Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[sfrench/cifs-2.6.git] / drivers / scsi / mpt3sas / mpt3sas_base.c
1 /*
2  * This is the Fusion MPT base driver providing common API layer interface
3  * for access to MPT (Message Passing Technology) firmware.
4  *
5  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6  * Copyright (C) 2012-2014  LSI Corporation
7  * Copyright (C) 2013-2014 Avago Technologies
8  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * NO WARRANTY
21  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25  * solely responsible for determining the appropriateness of using and
26  * distributing the Program and assumes all risks associated with its
27  * exercise of rights under this Agreement, including but not limited to
28  * the risks and costs of program errors, damage to or loss of data,
29  * programs or equipment, and unavailability or interruption of operations.
30
31  * DISCLAIMER OF LIABILITY
32  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40  * You should have received a copy of the GNU General Public License
41  * along with this program; if not, write to the Free Software
42  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
43  * USA.
44  */
45
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/kdev_t.h>
54 #include <linux/blkdev.h>
55 #include <linux/delay.h>
56 #include <linux/interrupt.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/io.h>
59 #include <linux/time.h>
60 #include <linux/ktime.h>
61 #include <linux/kthread.h>
62 #include <asm/page.h>        /* To get host page size per arch */
63 #include <linux/aer.h>
64
65
66 #include "mpt3sas_base.h"
67
68 static MPT_CALLBACK     mpt_callbacks[MPT_MAX_CALLBACKS];
69
70
71 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
72
73  /* maximum controller queue depth */
74 #define MAX_HBA_QUEUE_DEPTH     30000
75 #define MAX_CHAIN_DEPTH         100000
76 static int max_queue_depth = -1;
77 module_param(max_queue_depth, int, 0);
78 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
79
80 static int max_sgl_entries = -1;
81 module_param(max_sgl_entries, int, 0);
82 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
83
84 static int msix_disable = -1;
85 module_param(msix_disable, int, 0);
86 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
87
88 static int smp_affinity_enable = 1;
89 module_param(smp_affinity_enable, int, S_IRUGO);
90 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
91
92 static int max_msix_vectors = -1;
93 module_param(max_msix_vectors, int, 0);
94 MODULE_PARM_DESC(max_msix_vectors,
95         " max msix vectors");
96
97 static int irqpoll_weight = -1;
98 module_param(irqpoll_weight, int, 0);
99 MODULE_PARM_DESC(irqpoll_weight,
100         "irq poll weight (default= one fourth of HBA queue depth)");
101
102 static int mpt3sas_fwfault_debug;
103 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
104         " enable detection of firmware fault and halt firmware - (default=0)");
105
106 static int
107 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
108
109 /**
110  * mpt3sas_base_check_cmd_timeout - Function
111  *              to check timeout and command termination due
112  *              to Host reset.
113  *
114  * @ioc:        per adapter object.
115  * @status:     Status of issued command.
116  * @mpi_request:mf request pointer.
117  * @sz:         size of buffer.
118  *
119  * @Returns - 1/0 Reset to be done or Not
120  */
121 u8
122 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
123                 u8 status, void *mpi_request, int sz)
124 {
125         u8 issue_reset = 0;
126
127         if (!(status & MPT3_CMD_RESET))
128                 issue_reset = 1;
129
130         ioc_err(ioc, "Command %s\n",
131                 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
132         _debug_dump_mf(mpi_request, sz);
133
134         return issue_reset;
135 }
136
137 /**
138  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
139  * @val: ?
140  * @kp: ?
141  *
142  * Return: ?
143  */
144 static int
145 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
146 {
147         int ret = param_set_int(val, kp);
148         struct MPT3SAS_ADAPTER *ioc;
149
150         if (ret)
151                 return ret;
152
153         /* global ioc spinlock to protect controller list on list operations */
154         pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
155         spin_lock(&gioc_lock);
156         list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
157                 ioc->fwfault_debug = mpt3sas_fwfault_debug;
158         spin_unlock(&gioc_lock);
159         return 0;
160 }
161 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
162         param_get_int, &mpt3sas_fwfault_debug, 0644);
163
164 /**
165  * _base_readl_aero - retry readl for max three times.
166  * @addr - MPT Fusion system interface register address
167  *
168  * Retry the readl() for max three times if it gets zero value
169  * while reading the system interface register.
170  */
171 static inline u32
172 _base_readl_aero(const volatile void __iomem *addr)
173 {
174         u32 i = 0, ret_val;
175
176         do {
177                 ret_val = readl(addr);
178                 i++;
179         } while (ret_val == 0 && i < 3);
180
181         return ret_val;
182 }
183
184 static inline u32
185 _base_readl(const volatile void __iomem *addr)
186 {
187         return readl(addr);
188 }
189
190 /**
191  * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
192  *                                in BAR0 space.
193  *
194  * @ioc: per adapter object
195  * @reply: reply message frame(lower 32bit addr)
196  * @index: System request message index.
197  */
198 static void
199 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
200                 u32 index)
201 {
202         /*
203          * 256 is offset within sys register.
204          * 256 offset MPI frame starts. Max MPI frame supported is 32.
205          * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
206          */
207         u16 cmd_credit = ioc->facts.RequestCredit + 1;
208         void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
209                         MPI_FRAME_START_OFFSET +
210                         (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
211
212         writel(reply, reply_free_iomem);
213 }
214
215 /**
216  * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
217  *                              to system/BAR0 region.
218  *
219  * @dst_iomem: Pointer to the destination location in BAR0 space.
220  * @src: Pointer to the Source data.
221  * @size: Size of data to be copied.
222  */
223 static void
224 _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
225 {
226         int i;
227         u32 *src_virt_mem = (u32 *)src;
228
229         for (i = 0; i < size/4; i++)
230                 writel((u32)src_virt_mem[i],
231                                 (void __iomem *)dst_iomem + (i * 4));
232 }
233
234 /**
235  * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
236  *
237  * @dst_iomem: Pointer to the destination location in BAR0 space.
238  * @src: Pointer to the Source data.
239  * @size: Size of data to be copied.
240  */
241 static void
242 _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
243 {
244         int i;
245         u32 *src_virt_mem = (u32 *)(src);
246
247         for (i = 0; i < size/4; i++)
248                 writel((u32)src_virt_mem[i],
249                         (void __iomem *)dst_iomem + (i * 4));
250 }
251
252 /**
253  * _base_get_chain - Calculates and Returns virtual chain address
254  *                       for the provided smid in BAR0 space.
255  *
256  * @ioc: per adapter object
257  * @smid: system request message index
258  * @sge_chain_count: Scatter gather chain count.
259  *
260  * Return: the chain address.
261  */
262 static inline void __iomem*
263 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
264                 u8 sge_chain_count)
265 {
266         void __iomem *base_chain, *chain_virt;
267         u16 cmd_credit = ioc->facts.RequestCredit + 1;
268
269         base_chain  = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
270                 (cmd_credit * ioc->request_sz) +
271                 REPLY_FREE_POOL_SIZE;
272         chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
273                         ioc->request_sz) + (sge_chain_count * ioc->request_sz);
274         return chain_virt;
275 }
276
277 /**
278  * _base_get_chain_phys - Calculates and Returns physical address
279  *                      in BAR0 for scatter gather chains, for
280  *                      the provided smid.
281  *
282  * @ioc: per adapter object
283  * @smid: system request message index
284  * @sge_chain_count: Scatter gather chain count.
285  *
286  * Return: Physical chain address.
287  */
288 static inline phys_addr_t
289 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
290                 u8 sge_chain_count)
291 {
292         phys_addr_t base_chain_phys, chain_phys;
293         u16 cmd_credit = ioc->facts.RequestCredit + 1;
294
295         base_chain_phys  = ioc->chip_phys + MPI_FRAME_START_OFFSET +
296                 (cmd_credit * ioc->request_sz) +
297                 REPLY_FREE_POOL_SIZE;
298         chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
299                         ioc->request_sz) + (sge_chain_count * ioc->request_sz);
300         return chain_phys;
301 }
302
303 /**
304  * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
305  *                      buffer address for the provided smid.
306  *                      (Each smid can have 64K starts from 17024)
307  *
308  * @ioc: per adapter object
309  * @smid: system request message index
310  *
311  * Return: Pointer to buffer location in BAR0.
312  */
313
314 static void __iomem *
315 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
316 {
317         u16 cmd_credit = ioc->facts.RequestCredit + 1;
318         // Added extra 1 to reach end of chain.
319         void __iomem *chain_end = _base_get_chain(ioc,
320                         cmd_credit + 1,
321                         ioc->facts.MaxChainDepth);
322         return chain_end + (smid * 64 * 1024);
323 }
324
325 /**
326  * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
327  *              Host buffer Physical address for the provided smid.
328  *              (Each smid can have 64K starts from 17024)
329  *
330  * @ioc: per adapter object
331  * @smid: system request message index
332  *
333  * Return: Pointer to buffer location in BAR0.
334  */
335 static phys_addr_t
336 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
337 {
338         u16 cmd_credit = ioc->facts.RequestCredit + 1;
339         phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
340                         cmd_credit + 1,
341                         ioc->facts.MaxChainDepth);
342         return chain_end_phys + (smid * 64 * 1024);
343 }
344
345 /**
346  * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
347  *                      lookup list and Provides chain_buffer
348  *                      address for the matching dma address.
349  *                      (Each smid can have 64K starts from 17024)
350  *
351  * @ioc: per adapter object
352  * @chain_buffer_dma: Chain buffer dma address.
353  *
354  * Return: Pointer to chain buffer. Or Null on Failure.
355  */
356 static void *
357 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
358                 dma_addr_t chain_buffer_dma)
359 {
360         u16 index, j;
361         struct chain_tracker *ct;
362
363         for (index = 0; index < ioc->scsiio_depth; index++) {
364                 for (j = 0; j < ioc->chains_needed_per_io; j++) {
365                         ct = &ioc->chain_lookup[index].chains_per_smid[j];
366                         if (ct && ct->chain_buffer_dma == chain_buffer_dma)
367                                 return ct->chain_buffer;
368                 }
369         }
370         ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
371         return NULL;
372 }
373
374 /**
375  * _clone_sg_entries -  MPI EP's scsiio and config requests
376  *                      are handled here. Base function for
377  *                      double buffering, before submitting
378  *                      the requests.
379  *
380  * @ioc: per adapter object.
381  * @mpi_request: mf request pointer.
382  * @smid: system request message index.
383  */
384 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
385                 void *mpi_request, u16 smid)
386 {
387         Mpi2SGESimple32_t *sgel, *sgel_next;
388         u32  sgl_flags, sge_chain_count = 0;
389         bool is_write = 0;
390         u16 i = 0;
391         void __iomem *buffer_iomem;
392         phys_addr_t buffer_iomem_phys;
393         void __iomem *buff_ptr;
394         phys_addr_t buff_ptr_phys;
395         void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
396         void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
397         phys_addr_t dst_addr_phys;
398         MPI2RequestHeader_t *request_hdr;
399         struct scsi_cmnd *scmd;
400         struct scatterlist *sg_scmd = NULL;
401         int is_scsiio_req = 0;
402
403         request_hdr = (MPI2RequestHeader_t *) mpi_request;
404
405         if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
406                 Mpi25SCSIIORequest_t *scsiio_request =
407                         (Mpi25SCSIIORequest_t *)mpi_request;
408                 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
409                 is_scsiio_req = 1;
410         } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
411                 Mpi2ConfigRequest_t  *config_req =
412                         (Mpi2ConfigRequest_t *)mpi_request;
413                 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
414         } else
415                 return;
416
417         /* From smid we can get scsi_cmd, once we have sg_scmd,
418          * we just need to get sg_virt and sg_next to get virual
419          * address associated with sgel->Address.
420          */
421
422         if (is_scsiio_req) {
423                 /* Get scsi_cmd using smid */
424                 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
425                 if (scmd == NULL) {
426                         ioc_err(ioc, "scmd is NULL\n");
427                         return;
428                 }
429
430                 /* Get sg_scmd from scmd provided */
431                 sg_scmd = scsi_sglist(scmd);
432         }
433
434         /*
435          * 0 - 255      System register
436          * 256 - 4352   MPI Frame. (This is based on maxCredit 32)
437          * 4352 - 4864  Reply_free pool (512 byte is reserved
438          *              considering maxCredit 32. Reply need extra
439          *              room, for mCPU case kept four times of
440          *              maxCredit).
441          * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
442          *              128 byte size = 12288)
443          * 17152 - x    Host buffer mapped with smid.
444          *              (Each smid can have 64K Max IO.)
445          * BAR0+Last 1K MSIX Addr and Data
446          * Total size in use 2113664 bytes of 4MB BAR0
447          */
448
449         buffer_iomem = _base_get_buffer_bar0(ioc, smid);
450         buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
451
452         buff_ptr = buffer_iomem;
453         buff_ptr_phys = buffer_iomem_phys;
454         WARN_ON(buff_ptr_phys > U32_MAX);
455
456         if (le32_to_cpu(sgel->FlagsLength) &
457                         (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
458                 is_write = 1;
459
460         for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
461
462                 sgl_flags =
463                     (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
464
465                 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
466                 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
467                         /*
468                          * Helper function which on passing
469                          * chain_buffer_dma returns chain_buffer. Get
470                          * the virtual address for sgel->Address
471                          */
472                         sgel_next =
473                                 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
474                                                 le32_to_cpu(sgel->Address));
475                         if (sgel_next == NULL)
476                                 return;
477                         /*
478                          * This is coping 128 byte chain
479                          * frame (not a host buffer)
480                          */
481                         dst_chain_addr[sge_chain_count] =
482                                 _base_get_chain(ioc,
483                                         smid, sge_chain_count);
484                         src_chain_addr[sge_chain_count] =
485                                                 (void *) sgel_next;
486                         dst_addr_phys = _base_get_chain_phys(ioc,
487                                                 smid, sge_chain_count);
488                         WARN_ON(dst_addr_phys > U32_MAX);
489                         sgel->Address =
490                                 cpu_to_le32(lower_32_bits(dst_addr_phys));
491                         sgel = sgel_next;
492                         sge_chain_count++;
493                         break;
494                 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
495                         if (is_write) {
496                                 if (is_scsiio_req) {
497                                         _base_clone_to_sys_mem(buff_ptr,
498                                             sg_virt(sg_scmd),
499                                             (le32_to_cpu(sgel->FlagsLength) &
500                                             0x00ffffff));
501                                         /*
502                                          * FIXME: this relies on a a zero
503                                          * PCI mem_offset.
504                                          */
505                                         sgel->Address =
506                                             cpu_to_le32((u32)buff_ptr_phys);
507                                 } else {
508                                         _base_clone_to_sys_mem(buff_ptr,
509                                             ioc->config_vaddr,
510                                             (le32_to_cpu(sgel->FlagsLength) &
511                                             0x00ffffff));
512                                         sgel->Address =
513                                             cpu_to_le32((u32)buff_ptr_phys);
514                                 }
515                         }
516                         buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
517                             0x00ffffff);
518                         buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
519                             0x00ffffff);
520                         if ((le32_to_cpu(sgel->FlagsLength) &
521                             (MPI2_SGE_FLAGS_END_OF_BUFFER
522                                         << MPI2_SGE_FLAGS_SHIFT)))
523                                 goto eob_clone_chain;
524                         else {
525                                 /*
526                                  * Every single element in MPT will have
527                                  * associated sg_next. Better to sanity that
528                                  * sg_next is not NULL, but it will be a bug
529                                  * if it is null.
530                                  */
531                                 if (is_scsiio_req) {
532                                         sg_scmd = sg_next(sg_scmd);
533                                         if (sg_scmd)
534                                                 sgel++;
535                                         else
536                                                 goto eob_clone_chain;
537                                 }
538                         }
539                         break;
540                 }
541         }
542
543 eob_clone_chain:
544         for (i = 0; i < sge_chain_count; i++) {
545                 if (is_scsiio_req)
546                         _base_clone_to_sys_mem(dst_chain_addr[i],
547                                 src_chain_addr[i], ioc->request_sz);
548         }
549 }
550
551 /**
552  *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
553  * @arg: input argument, used to derive ioc
554  *
555  * Return:
556  * 0 if controller is removed from pci subsystem.
557  * -1 for other case.
558  */
559 static int mpt3sas_remove_dead_ioc_func(void *arg)
560 {
561         struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
562         struct pci_dev *pdev;
563
564         if (!ioc)
565                 return -1;
566
567         pdev = ioc->pdev;
568         if (!pdev)
569                 return -1;
570         pci_stop_and_remove_bus_device_locked(pdev);
571         return 0;
572 }
573
574 /**
575  * _base_fault_reset_work - workq handling ioc fault conditions
576  * @work: input argument, used to derive ioc
577  *
578  * Context: sleep.
579  */
580 static void
581 _base_fault_reset_work(struct work_struct *work)
582 {
583         struct MPT3SAS_ADAPTER *ioc =
584             container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
585         unsigned long    flags;
586         u32 doorbell;
587         int rc;
588         struct task_struct *p;
589
590
591         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
592         if (ioc->shost_recovery || ioc->pci_error_recovery)
593                 goto rearm_timer;
594         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
595
596         doorbell = mpt3sas_base_get_iocstate(ioc, 0);
597         if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
598                 ioc_err(ioc, "SAS host is non-operational !!!!\n");
599
600                 /* It may be possible that EEH recovery can resolve some of
601                  * pci bus failure issues rather removing the dead ioc function
602                  * by considering controller is in a non-operational state. So
603                  * here priority is given to the EEH recovery. If it doesn't
604                  * not resolve this issue, mpt3sas driver will consider this
605                  * controller to non-operational state and remove the dead ioc
606                  * function.
607                  */
608                 if (ioc->non_operational_loop++ < 5) {
609                         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
610                                                          flags);
611                         goto rearm_timer;
612                 }
613
614                 /*
615                  * Call _scsih_flush_pending_cmds callback so that we flush all
616                  * pending commands back to OS. This call is required to aovid
617                  * deadlock at block layer. Dead IOC will fail to do diag reset,
618                  * and this call is safe since dead ioc will never return any
619                  * command back from HW.
620                  */
621                 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
622                 /*
623                  * Set remove_host flag early since kernel thread will
624                  * take some time to execute.
625                  */
626                 ioc->remove_host = 1;
627                 /*Remove the Dead Host */
628                 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
629                     "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
630                 if (IS_ERR(p))
631                         ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
632                                 __func__);
633                 else
634                         ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
635                                 __func__);
636                 return; /* don't rearm timer */
637         }
638
639         ioc->non_operational_loop = 0;
640
641         if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
642                 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
643                 ioc_warn(ioc, "%s: hard reset: %s\n",
644                          __func__, rc == 0 ? "success" : "failed");
645                 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
646                 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
647                         mpt3sas_base_fault_info(ioc, doorbell &
648                             MPI2_DOORBELL_DATA_MASK);
649                 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
650                     MPI2_IOC_STATE_OPERATIONAL)
651                         return; /* don't rearm timer */
652         }
653
654         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
655  rearm_timer:
656         if (ioc->fault_reset_work_q)
657                 queue_delayed_work(ioc->fault_reset_work_q,
658                     &ioc->fault_reset_work,
659                     msecs_to_jiffies(FAULT_POLLING_INTERVAL));
660         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
661 }
662
663 /**
664  * mpt3sas_base_start_watchdog - start the fault_reset_work_q
665  * @ioc: per adapter object
666  *
667  * Context: sleep.
668  */
669 void
670 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
671 {
672         unsigned long    flags;
673
674         if (ioc->fault_reset_work_q)
675                 return;
676
677         /* initialize fault polling */
678
679         INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
680         snprintf(ioc->fault_reset_work_q_name,
681             sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
682             ioc->driver_name, ioc->id);
683         ioc->fault_reset_work_q =
684                 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
685         if (!ioc->fault_reset_work_q) {
686                 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
687                 return;
688         }
689         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
690         if (ioc->fault_reset_work_q)
691                 queue_delayed_work(ioc->fault_reset_work_q,
692                     &ioc->fault_reset_work,
693                     msecs_to_jiffies(FAULT_POLLING_INTERVAL));
694         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
695 }
696
697 /**
698  * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
699  * @ioc: per adapter object
700  *
701  * Context: sleep.
702  */
703 void
704 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
705 {
706         unsigned long flags;
707         struct workqueue_struct *wq;
708
709         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
710         wq = ioc->fault_reset_work_q;
711         ioc->fault_reset_work_q = NULL;
712         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
713         if (wq) {
714                 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
715                         flush_workqueue(wq);
716                 destroy_workqueue(wq);
717         }
718 }
719
720 /**
721  * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
722  * @ioc: per adapter object
723  * @fault_code: fault code
724  */
725 void
726 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
727 {
728         ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
729 }
730
731 /**
732  * mpt3sas_halt_firmware - halt's mpt controller firmware
733  * @ioc: per adapter object
734  *
735  * For debugging timeout related issues.  Writing 0xCOFFEE00
736  * to the doorbell register will halt controller firmware. With
737  * the purpose to stop both driver and firmware, the enduser can
738  * obtain a ring buffer from controller UART.
739  */
740 void
741 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
742 {
743         u32 doorbell;
744
745         if (!ioc->fwfault_debug)
746                 return;
747
748         dump_stack();
749
750         doorbell = ioc->base_readl(&ioc->chip->Doorbell);
751         if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
752                 mpt3sas_base_fault_info(ioc , doorbell);
753         else {
754                 writel(0xC0FFEE00, &ioc->chip->Doorbell);
755                 ioc_err(ioc, "Firmware is halted due to command timeout\n");
756         }
757
758         if (ioc->fwfault_debug == 2)
759                 for (;;)
760                         ;
761         else
762                 panic("panic in %s\n", __func__);
763 }
764
765 /**
766  * _base_sas_ioc_info - verbose translation of the ioc status
767  * @ioc: per adapter object
768  * @mpi_reply: reply mf payload returned from firmware
769  * @request_hdr: request mf
770  */
771 static void
772 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
773         MPI2RequestHeader_t *request_hdr)
774 {
775         u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
776             MPI2_IOCSTATUS_MASK;
777         char *desc = NULL;
778         u16 frame_sz;
779         char *func_str = NULL;
780
781         /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
782         if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
783             request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
784             request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
785                 return;
786
787         if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
788                 return;
789
790         switch (ioc_status) {
791
792 /****************************************************************************
793 *  Common IOCStatus values for all replies
794 ****************************************************************************/
795
796         case MPI2_IOCSTATUS_INVALID_FUNCTION:
797                 desc = "invalid function";
798                 break;
799         case MPI2_IOCSTATUS_BUSY:
800                 desc = "busy";
801                 break;
802         case MPI2_IOCSTATUS_INVALID_SGL:
803                 desc = "invalid sgl";
804                 break;
805         case MPI2_IOCSTATUS_INTERNAL_ERROR:
806                 desc = "internal error";
807                 break;
808         case MPI2_IOCSTATUS_INVALID_VPID:
809                 desc = "invalid vpid";
810                 break;
811         case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
812                 desc = "insufficient resources";
813                 break;
814         case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
815                 desc = "insufficient power";
816                 break;
817         case MPI2_IOCSTATUS_INVALID_FIELD:
818                 desc = "invalid field";
819                 break;
820         case MPI2_IOCSTATUS_INVALID_STATE:
821                 desc = "invalid state";
822                 break;
823         case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
824                 desc = "op state not supported";
825                 break;
826
827 /****************************************************************************
828 *  Config IOCStatus values
829 ****************************************************************************/
830
831         case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
832                 desc = "config invalid action";
833                 break;
834         case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
835                 desc = "config invalid type";
836                 break;
837         case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
838                 desc = "config invalid page";
839                 break;
840         case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
841                 desc = "config invalid data";
842                 break;
843         case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
844                 desc = "config no defaults";
845                 break;
846         case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
847                 desc = "config cant commit";
848                 break;
849
850 /****************************************************************************
851 *  SCSI IO Reply
852 ****************************************************************************/
853
854         case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
855         case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
856         case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
857         case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
858         case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
859         case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
860         case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
861         case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
862         case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
863         case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
864         case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
865         case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
866                 break;
867
868 /****************************************************************************
869 *  For use by SCSI Initiator and SCSI Target end-to-end data protection
870 ****************************************************************************/
871
872         case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
873                 desc = "eedp guard error";
874                 break;
875         case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
876                 desc = "eedp ref tag error";
877                 break;
878         case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
879                 desc = "eedp app tag error";
880                 break;
881
882 /****************************************************************************
883 *  SCSI Target values
884 ****************************************************************************/
885
886         case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
887                 desc = "target invalid io index";
888                 break;
889         case MPI2_IOCSTATUS_TARGET_ABORTED:
890                 desc = "target aborted";
891                 break;
892         case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
893                 desc = "target no conn retryable";
894                 break;
895         case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
896                 desc = "target no connection";
897                 break;
898         case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
899                 desc = "target xfer count mismatch";
900                 break;
901         case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
902                 desc = "target data offset error";
903                 break;
904         case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
905                 desc = "target too much write data";
906                 break;
907         case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
908                 desc = "target iu too short";
909                 break;
910         case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
911                 desc = "target ack nak timeout";
912                 break;
913         case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
914                 desc = "target nak received";
915                 break;
916
917 /****************************************************************************
918 *  Serial Attached SCSI values
919 ****************************************************************************/
920
921         case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
922                 desc = "smp request failed";
923                 break;
924         case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
925                 desc = "smp data overrun";
926                 break;
927
928 /****************************************************************************
929 *  Diagnostic Buffer Post / Diagnostic Release values
930 ****************************************************************************/
931
932         case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
933                 desc = "diagnostic released";
934                 break;
935         default:
936                 break;
937         }
938
939         if (!desc)
940                 return;
941
942         switch (request_hdr->Function) {
943         case MPI2_FUNCTION_CONFIG:
944                 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
945                 func_str = "config_page";
946                 break;
947         case MPI2_FUNCTION_SCSI_TASK_MGMT:
948                 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
949                 func_str = "task_mgmt";
950                 break;
951         case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
952                 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
953                 func_str = "sas_iounit_ctl";
954                 break;
955         case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
956                 frame_sz = sizeof(Mpi2SepRequest_t);
957                 func_str = "enclosure";
958                 break;
959         case MPI2_FUNCTION_IOC_INIT:
960                 frame_sz = sizeof(Mpi2IOCInitRequest_t);
961                 func_str = "ioc_init";
962                 break;
963         case MPI2_FUNCTION_PORT_ENABLE:
964                 frame_sz = sizeof(Mpi2PortEnableRequest_t);
965                 func_str = "port_enable";
966                 break;
967         case MPI2_FUNCTION_SMP_PASSTHROUGH:
968                 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
969                 func_str = "smp_passthru";
970                 break;
971         case MPI2_FUNCTION_NVME_ENCAPSULATED:
972                 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
973                     ioc->sge_size;
974                 func_str = "nvme_encapsulated";
975                 break;
976         default:
977                 frame_sz = 32;
978                 func_str = "unknown";
979                 break;
980         }
981
982         ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
983                  desc, ioc_status, request_hdr, func_str);
984
985         _debug_dump_mf(request_hdr, frame_sz/4);
986 }
987
988 /**
989  * _base_display_event_data - verbose translation of firmware asyn events
990  * @ioc: per adapter object
991  * @mpi_reply: reply mf payload returned from firmware
992  */
993 static void
994 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
995         Mpi2EventNotificationReply_t *mpi_reply)
996 {
997         char *desc = NULL;
998         u16 event;
999
1000         if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1001                 return;
1002
1003         event = le16_to_cpu(mpi_reply->Event);
1004
1005         switch (event) {
1006         case MPI2_EVENT_LOG_DATA:
1007                 desc = "Log Data";
1008                 break;
1009         case MPI2_EVENT_STATE_CHANGE:
1010                 desc = "Status Change";
1011                 break;
1012         case MPI2_EVENT_HARD_RESET_RECEIVED:
1013                 desc = "Hard Reset Received";
1014                 break;
1015         case MPI2_EVENT_EVENT_CHANGE:
1016                 desc = "Event Change";
1017                 break;
1018         case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1019                 desc = "Device Status Change";
1020                 break;
1021         case MPI2_EVENT_IR_OPERATION_STATUS:
1022                 if (!ioc->hide_ir_msg)
1023                         desc = "IR Operation Status";
1024                 break;
1025         case MPI2_EVENT_SAS_DISCOVERY:
1026         {
1027                 Mpi2EventDataSasDiscovery_t *event_data =
1028                     (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1029                 ioc_info(ioc, "Discovery: (%s)",
1030                          event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1031                          "start" : "stop");
1032                 if (event_data->DiscoveryStatus)
1033                         pr_cont(" discovery_status(0x%08x)",
1034                             le32_to_cpu(event_data->DiscoveryStatus));
1035                 pr_cont("\n");
1036                 return;
1037         }
1038         case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1039                 desc = "SAS Broadcast Primitive";
1040                 break;
1041         case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1042                 desc = "SAS Init Device Status Change";
1043                 break;
1044         case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1045                 desc = "SAS Init Table Overflow";
1046                 break;
1047         case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1048                 desc = "SAS Topology Change List";
1049                 break;
1050         case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1051                 desc = "SAS Enclosure Device Status Change";
1052                 break;
1053         case MPI2_EVENT_IR_VOLUME:
1054                 if (!ioc->hide_ir_msg)
1055                         desc = "IR Volume";
1056                 break;
1057         case MPI2_EVENT_IR_PHYSICAL_DISK:
1058                 if (!ioc->hide_ir_msg)
1059                         desc = "IR Physical Disk";
1060                 break;
1061         case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1062                 if (!ioc->hide_ir_msg)
1063                         desc = "IR Configuration Change List";
1064                 break;
1065         case MPI2_EVENT_LOG_ENTRY_ADDED:
1066                 if (!ioc->hide_ir_msg)
1067                         desc = "Log Entry Added";
1068                 break;
1069         case MPI2_EVENT_TEMP_THRESHOLD:
1070                 desc = "Temperature Threshold";
1071                 break;
1072         case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1073                 desc = "Cable Event";
1074                 break;
1075         case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1076                 desc = "SAS Device Discovery Error";
1077                 break;
1078         case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1079                 desc = "PCIE Device Status Change";
1080                 break;
1081         case MPI2_EVENT_PCIE_ENUMERATION:
1082         {
1083                 Mpi26EventDataPCIeEnumeration_t *event_data =
1084                         (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1085                 ioc_info(ioc, "PCIE Enumeration: (%s)",
1086                          event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1087                          "start" : "stop");
1088                 if (event_data->EnumerationStatus)
1089                         pr_cont("enumeration_status(0x%08x)",
1090                                 le32_to_cpu(event_data->EnumerationStatus));
1091                 pr_cont("\n");
1092                 return;
1093         }
1094         case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1095                 desc = "PCIE Topology Change List";
1096                 break;
1097         }
1098
1099         if (!desc)
1100                 return;
1101
1102         ioc_info(ioc, "%s\n", desc);
1103 }
1104
1105 /**
1106  * _base_sas_log_info - verbose translation of firmware log info
1107  * @ioc: per adapter object
1108  * @log_info: log info
1109  */
1110 static void
1111 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1112 {
1113         union loginfo_type {
1114                 u32     loginfo;
1115                 struct {
1116                         u32     subcode:16;
1117                         u32     code:8;
1118                         u32     originator:4;
1119                         u32     bus_type:4;
1120                 } dw;
1121         };
1122         union loginfo_type sas_loginfo;
1123         char *originator_str = NULL;
1124
1125         sas_loginfo.loginfo = log_info;
1126         if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1127                 return;
1128
1129         /* each nexus loss loginfo */
1130         if (log_info == 0x31170000)
1131                 return;
1132
1133         /* eat the loginfos associated with task aborts */
1134         if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1135             0x31140000 || log_info == 0x31130000))
1136                 return;
1137
1138         switch (sas_loginfo.dw.originator) {
1139         case 0:
1140                 originator_str = "IOP";
1141                 break;
1142         case 1:
1143                 originator_str = "PL";
1144                 break;
1145         case 2:
1146                 if (!ioc->hide_ir_msg)
1147                         originator_str = "IR";
1148                 else
1149                         originator_str = "WarpDrive";
1150                 break;
1151         }
1152
1153         ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1154                  log_info,
1155                  originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1156 }
1157
1158 /**
1159  * _base_display_reply_info -
1160  * @ioc: per adapter object
1161  * @smid: system request message index
1162  * @msix_index: MSIX table index supplied by the OS
1163  * @reply: reply message frame(lower 32bit addr)
1164  */
1165 static void
1166 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1167         u32 reply)
1168 {
1169         MPI2DefaultReply_t *mpi_reply;
1170         u16 ioc_status;
1171         u32 loginfo = 0;
1172
1173         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1174         if (unlikely(!mpi_reply)) {
1175                 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1176                         __FILE__, __LINE__, __func__);
1177                 return;
1178         }
1179         ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1180
1181         if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1182             (ioc->logging_level & MPT_DEBUG_REPLY)) {
1183                 _base_sas_ioc_info(ioc , mpi_reply,
1184                    mpt3sas_base_get_msg_frame(ioc, smid));
1185         }
1186
1187         if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1188                 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1189                 _base_sas_log_info(ioc, loginfo);
1190         }
1191
1192         if (ioc_status || loginfo) {
1193                 ioc_status &= MPI2_IOCSTATUS_MASK;
1194                 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1195         }
1196 }
1197
1198 /**
1199  * mpt3sas_base_done - base internal command completion routine
1200  * @ioc: per adapter object
1201  * @smid: system request message index
1202  * @msix_index: MSIX table index supplied by the OS
1203  * @reply: reply message frame(lower 32bit addr)
1204  *
1205  * Return:
1206  * 1 meaning mf should be freed from _base_interrupt
1207  * 0 means the mf is freed from this function.
1208  */
1209 u8
1210 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1211         u32 reply)
1212 {
1213         MPI2DefaultReply_t *mpi_reply;
1214
1215         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1216         if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1217                 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1218
1219         if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1220                 return 1;
1221
1222         ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1223         if (mpi_reply) {
1224                 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1225                 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1226         }
1227         ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1228
1229         complete(&ioc->base_cmds.done);
1230         return 1;
1231 }
1232
1233 /**
1234  * _base_async_event - main callback handler for firmware asyn events
1235  * @ioc: per adapter object
1236  * @msix_index: MSIX table index supplied by the OS
1237  * @reply: reply message frame(lower 32bit addr)
1238  *
1239  * Return:
1240  * 1 meaning mf should be freed from _base_interrupt
1241  * 0 means the mf is freed from this function.
1242  */
1243 static u8
1244 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1245 {
1246         Mpi2EventNotificationReply_t *mpi_reply;
1247         Mpi2EventAckRequest_t *ack_request;
1248         u16 smid;
1249         struct _event_ack_list *delayed_event_ack;
1250
1251         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1252         if (!mpi_reply)
1253                 return 1;
1254         if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1255                 return 1;
1256
1257         _base_display_event_data(ioc, mpi_reply);
1258
1259         if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1260                 goto out;
1261         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1262         if (!smid) {
1263                 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1264                                         GFP_ATOMIC);
1265                 if (!delayed_event_ack)
1266                         goto out;
1267                 INIT_LIST_HEAD(&delayed_event_ack->list);
1268                 delayed_event_ack->Event = mpi_reply->Event;
1269                 delayed_event_ack->EventContext = mpi_reply->EventContext;
1270                 list_add_tail(&delayed_event_ack->list,
1271                                 &ioc->delayed_event_ack_list);
1272                 dewtprintk(ioc,
1273                            ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1274                                     le16_to_cpu(mpi_reply->Event)));
1275                 goto out;
1276         }
1277
1278         ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1279         memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1280         ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1281         ack_request->Event = mpi_reply->Event;
1282         ack_request->EventContext = mpi_reply->EventContext;
1283         ack_request->VF_ID = 0;  /* TODO */
1284         ack_request->VP_ID = 0;
1285         mpt3sas_base_put_smid_default(ioc, smid);
1286
1287  out:
1288
1289         /* scsih callback handler */
1290         mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1291
1292         /* ctl callback handler */
1293         mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1294
1295         return 1;
1296 }
1297
1298 static struct scsiio_tracker *
1299 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1300 {
1301         struct scsi_cmnd *cmd;
1302
1303         if (WARN_ON(!smid) ||
1304             WARN_ON(smid >= ioc->hi_priority_smid))
1305                 return NULL;
1306
1307         cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1308         if (cmd)
1309                 return scsi_cmd_priv(cmd);
1310
1311         return NULL;
1312 }
1313
1314 /**
1315  * _base_get_cb_idx - obtain the callback index
1316  * @ioc: per adapter object
1317  * @smid: system request message index
1318  *
1319  * Return: callback index.
1320  */
1321 static u8
1322 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1323 {
1324         int i;
1325         u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1326         u8 cb_idx = 0xFF;
1327
1328         if (smid < ioc->hi_priority_smid) {
1329                 struct scsiio_tracker *st;
1330
1331                 if (smid < ctl_smid) {
1332                         st = _get_st_from_smid(ioc, smid);
1333                         if (st)
1334                                 cb_idx = st->cb_idx;
1335                 } else if (smid == ctl_smid)
1336                         cb_idx = ioc->ctl_cb_idx;
1337         } else if (smid < ioc->internal_smid) {
1338                 i = smid - ioc->hi_priority_smid;
1339                 cb_idx = ioc->hpr_lookup[i].cb_idx;
1340         } else if (smid <= ioc->hba_queue_depth) {
1341                 i = smid - ioc->internal_smid;
1342                 cb_idx = ioc->internal_lookup[i].cb_idx;
1343         }
1344         return cb_idx;
1345 }
1346
1347 /**
1348  * _base_mask_interrupts - disable interrupts
1349  * @ioc: per adapter object
1350  *
1351  * Disabling ResetIRQ, Reply and Doorbell Interrupts
1352  */
1353 static void
1354 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1355 {
1356         u32 him_register;
1357
1358         ioc->mask_interrupts = 1;
1359         him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1360         him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1361         writel(him_register, &ioc->chip->HostInterruptMask);
1362         ioc->base_readl(&ioc->chip->HostInterruptMask);
1363 }
1364
1365 /**
1366  * _base_unmask_interrupts - enable interrupts
1367  * @ioc: per adapter object
1368  *
1369  * Enabling only Reply Interrupts
1370  */
1371 static void
1372 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1373 {
1374         u32 him_register;
1375
1376         him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1377         him_register &= ~MPI2_HIM_RIM;
1378         writel(him_register, &ioc->chip->HostInterruptMask);
1379         ioc->mask_interrupts = 0;
1380 }
1381
1382 union reply_descriptor {
1383         u64 word;
1384         struct {
1385                 u32 low;
1386                 u32 high;
1387         } u;
1388 };
1389
1390 static u32 base_mod64(u64 dividend, u32 divisor)
1391 {
1392         u32 remainder;
1393
1394         if (!divisor)
1395                 pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1396         remainder = do_div(dividend, divisor);
1397         return remainder;
1398 }
1399
1400 /**
1401  * _base_process_reply_queue - Process reply descriptors from reply
1402  *              descriptor post queue.
1403  * @reply_q: per IRQ's reply queue object.
1404  *
1405  * Return: number of reply descriptors processed from reply
1406  *              descriptor queue.
1407  */
1408 static int
1409 _base_process_reply_queue(struct adapter_reply_queue *reply_q)
1410 {
1411         union reply_descriptor rd;
1412         u64 completed_cmds;
1413         u8 request_descript_type;
1414         u16 smid;
1415         u8 cb_idx;
1416         u32 reply;
1417         u8 msix_index = reply_q->msix_index;
1418         struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1419         Mpi2ReplyDescriptorsUnion_t *rpf;
1420         u8 rc;
1421
1422         completed_cmds = 0;
1423         if (!atomic_add_unless(&reply_q->busy, 1, 1))
1424                 return completed_cmds;
1425
1426         rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1427         request_descript_type = rpf->Default.ReplyFlags
1428              & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1429         if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1430                 atomic_dec(&reply_q->busy);
1431                 return completed_cmds;
1432         }
1433
1434         cb_idx = 0xFF;
1435         do {
1436                 rd.word = le64_to_cpu(rpf->Words);
1437                 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1438                         goto out;
1439                 reply = 0;
1440                 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1441                 if (request_descript_type ==
1442                     MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1443                     request_descript_type ==
1444                     MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1445                     request_descript_type ==
1446                     MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1447                         cb_idx = _base_get_cb_idx(ioc, smid);
1448                         if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1449                             (likely(mpt_callbacks[cb_idx] != NULL))) {
1450                                 rc = mpt_callbacks[cb_idx](ioc, smid,
1451                                     msix_index, 0);
1452                                 if (rc)
1453                                         mpt3sas_base_free_smid(ioc, smid);
1454                         }
1455                 } else if (request_descript_type ==
1456                     MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1457                         reply = le32_to_cpu(
1458                             rpf->AddressReply.ReplyFrameAddress);
1459                         if (reply > ioc->reply_dma_max_address ||
1460                             reply < ioc->reply_dma_min_address)
1461                                 reply = 0;
1462                         if (smid) {
1463                                 cb_idx = _base_get_cb_idx(ioc, smid);
1464                                 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1465                                     (likely(mpt_callbacks[cb_idx] != NULL))) {
1466                                         rc = mpt_callbacks[cb_idx](ioc, smid,
1467                                             msix_index, reply);
1468                                         if (reply)
1469                                                 _base_display_reply_info(ioc,
1470                                                     smid, msix_index, reply);
1471                                         if (rc)
1472                                                 mpt3sas_base_free_smid(ioc,
1473                                                     smid);
1474                                 }
1475                         } else {
1476                                 _base_async_event(ioc, msix_index, reply);
1477                         }
1478
1479                         /* reply free queue handling */
1480                         if (reply) {
1481                                 ioc->reply_free_host_index =
1482                                     (ioc->reply_free_host_index ==
1483                                     (ioc->reply_free_queue_depth - 1)) ?
1484                                     0 : ioc->reply_free_host_index + 1;
1485                                 ioc->reply_free[ioc->reply_free_host_index] =
1486                                     cpu_to_le32(reply);
1487                                 if (ioc->is_mcpu_endpoint)
1488                                         _base_clone_reply_to_sys_mem(ioc,
1489                                                 reply,
1490                                                 ioc->reply_free_host_index);
1491                                 writel(ioc->reply_free_host_index,
1492                                     &ioc->chip->ReplyFreeHostIndex);
1493                         }
1494                 }
1495
1496                 rpf->Words = cpu_to_le64(ULLONG_MAX);
1497                 reply_q->reply_post_host_index =
1498                     (reply_q->reply_post_host_index ==
1499                     (ioc->reply_post_queue_depth - 1)) ? 0 :
1500                     reply_q->reply_post_host_index + 1;
1501                 request_descript_type =
1502                     reply_q->reply_post_free[reply_q->reply_post_host_index].
1503                     Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1504                 completed_cmds++;
1505                 /* Update the reply post host index after continuously
1506                  * processing the threshold number of Reply Descriptors.
1507                  * So that FW can find enough entries to post the Reply
1508                  * Descriptors in the reply descriptor post queue.
1509                  */
1510                 if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
1511                         if (ioc->combined_reply_queue) {
1512                                 writel(reply_q->reply_post_host_index |
1513                                                 ((msix_index  & 7) <<
1514                                                  MPI2_RPHI_MSIX_INDEX_SHIFT),
1515                                     ioc->replyPostRegisterIndex[msix_index/8]);
1516                         } else {
1517                                 writel(reply_q->reply_post_host_index |
1518                                                 (msix_index <<
1519                                                  MPI2_RPHI_MSIX_INDEX_SHIFT),
1520                                                 &ioc->chip->ReplyPostHostIndex);
1521                         }
1522                         if (!reply_q->irq_poll_scheduled) {
1523                                 reply_q->irq_poll_scheduled = true;
1524                                 irq_poll_sched(&reply_q->irqpoll);
1525                         }
1526                         atomic_dec(&reply_q->busy);
1527                         return completed_cmds;
1528                 }
1529                 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1530                         goto out;
1531                 if (!reply_q->reply_post_host_index)
1532                         rpf = reply_q->reply_post_free;
1533                 else
1534                         rpf++;
1535         } while (1);
1536
1537  out:
1538
1539         if (!completed_cmds) {
1540                 atomic_dec(&reply_q->busy);
1541                 return completed_cmds;
1542         }
1543
1544         if (ioc->is_warpdrive) {
1545                 writel(reply_q->reply_post_host_index,
1546                 ioc->reply_post_host_index[msix_index]);
1547                 atomic_dec(&reply_q->busy);
1548                 return completed_cmds;
1549         }
1550
1551         /* Update Reply Post Host Index.
1552          * For those HBA's which support combined reply queue feature
1553          * 1. Get the correct Supplemental Reply Post Host Index Register.
1554          *    i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1555          *    Index Register address bank i.e replyPostRegisterIndex[],
1556          * 2. Then update this register with new reply host index value
1557          *    in ReplyPostIndex field and the MSIxIndex field with
1558          *    msix_index value reduced to a value between 0 and 7,
1559          *    using a modulo 8 operation. Since each Supplemental Reply Post
1560          *    Host Index Register supports 8 MSI-X vectors.
1561          *
1562          * For other HBA's just update the Reply Post Host Index register with
1563          * new reply host index value in ReplyPostIndex Field and msix_index
1564          * value in MSIxIndex field.
1565          */
1566         if (ioc->combined_reply_queue)
1567                 writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
1568                         MPI2_RPHI_MSIX_INDEX_SHIFT),
1569                         ioc->replyPostRegisterIndex[msix_index/8]);
1570         else
1571                 writel(reply_q->reply_post_host_index | (msix_index <<
1572                         MPI2_RPHI_MSIX_INDEX_SHIFT),
1573                         &ioc->chip->ReplyPostHostIndex);
1574         atomic_dec(&reply_q->busy);
1575         return completed_cmds;
1576 }
1577
1578 /**
1579  * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1580  * @irq: irq number (not used)
1581  * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
1582  *
1583  * Return: IRQ_HANDLED if processed, else IRQ_NONE.
1584  */
1585 static irqreturn_t
1586 _base_interrupt(int irq, void *bus_id)
1587 {
1588         struct adapter_reply_queue *reply_q = bus_id;
1589         struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1590
1591         if (ioc->mask_interrupts)
1592                 return IRQ_NONE;
1593         if (reply_q->irq_poll_scheduled)
1594                 return IRQ_HANDLED;
1595         return ((_base_process_reply_queue(reply_q) > 0) ?
1596                         IRQ_HANDLED : IRQ_NONE);
1597 }
1598
1599 /**
1600  * _base_irqpoll - IRQ poll callback handler
1601  * @irqpoll - irq_poll object
1602  * @budget - irq poll weight
1603  *
1604  * returns number of reply descriptors processed
1605  */
1606 static int
1607 _base_irqpoll(struct irq_poll *irqpoll, int budget)
1608 {
1609         struct adapter_reply_queue *reply_q;
1610         int num_entries = 0;
1611
1612         reply_q = container_of(irqpoll, struct adapter_reply_queue,
1613                         irqpoll);
1614         if (reply_q->irq_line_enable) {
1615                 disable_irq(reply_q->os_irq);
1616                 reply_q->irq_line_enable = false;
1617         }
1618         num_entries = _base_process_reply_queue(reply_q);
1619         if (num_entries < budget) {
1620                 irq_poll_complete(irqpoll);
1621                 reply_q->irq_poll_scheduled = false;
1622                 reply_q->irq_line_enable = true;
1623                 enable_irq(reply_q->os_irq);
1624         }
1625
1626         return num_entries;
1627 }
1628
1629 /**
1630  * _base_init_irqpolls - initliaze IRQ polls
1631  * @ioc: per adapter object
1632  *
1633  * returns nothing
1634  */
1635 static void
1636 _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1637 {
1638         struct adapter_reply_queue *reply_q, *next;
1639
1640         if (list_empty(&ioc->reply_queue_list))
1641                 return;
1642
1643         list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1644                 irq_poll_init(&reply_q->irqpoll,
1645                         ioc->hba_queue_depth/4, _base_irqpoll);
1646                 reply_q->irq_poll_scheduled = false;
1647                 reply_q->irq_line_enable = true;
1648                 reply_q->os_irq = pci_irq_vector(ioc->pdev,
1649                     reply_q->msix_index);
1650         }
1651 }
1652
1653 /**
1654  * _base_is_controller_msix_enabled - is controller support muli-reply queues
1655  * @ioc: per adapter object
1656  *
1657  * Return: Whether or not MSI/X is enabled.
1658  */
1659 static inline int
1660 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1661 {
1662         return (ioc->facts.IOCCapabilities &
1663             MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1664 }
1665
1666 /**
1667  * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1668  * @ioc: per adapter object
1669  * Context: non ISR conext
1670  *
1671  * Called when a Task Management request has completed.
1672  */
1673 void
1674 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
1675 {
1676         struct adapter_reply_queue *reply_q;
1677
1678         /* If MSIX capability is turned off
1679          * then multi-queues are not enabled
1680          */
1681         if (!_base_is_controller_msix_enabled(ioc))
1682                 return;
1683
1684         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1685                 if (ioc->shost_recovery || ioc->remove_host ||
1686                                 ioc->pci_error_recovery)
1687                         return;
1688                 /* TMs are on msix_index == 0 */
1689                 if (reply_q->msix_index == 0)
1690                         continue;
1691                 if (reply_q->irq_poll_scheduled) {
1692                         /* Calling irq_poll_disable will wait for any pending
1693                          * callbacks to have completed.
1694                          */
1695                         irq_poll_disable(&reply_q->irqpoll);
1696                         irq_poll_enable(&reply_q->irqpoll);
1697                         reply_q->irq_poll_scheduled = false;
1698                         reply_q->irq_line_enable = true;
1699                         enable_irq(reply_q->os_irq);
1700                         continue;
1701                 }
1702                 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1703         }
1704 }
1705
1706 /**
1707  * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1708  * @cb_idx: callback index
1709  */
1710 void
1711 mpt3sas_base_release_callback_handler(u8 cb_idx)
1712 {
1713         mpt_callbacks[cb_idx] = NULL;
1714 }
1715
1716 /**
1717  * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1718  * @cb_func: callback function
1719  *
1720  * Return: Index of @cb_func.
1721  */
1722 u8
1723 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1724 {
1725         u8 cb_idx;
1726
1727         for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1728                 if (mpt_callbacks[cb_idx] == NULL)
1729                         break;
1730
1731         mpt_callbacks[cb_idx] = cb_func;
1732         return cb_idx;
1733 }
1734
1735 /**
1736  * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1737  */
1738 void
1739 mpt3sas_base_initialize_callback_handler(void)
1740 {
1741         u8 cb_idx;
1742
1743         for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1744                 mpt3sas_base_release_callback_handler(cb_idx);
1745 }
1746
1747
1748 /**
1749  * _base_build_zero_len_sge - build zero length sg entry
1750  * @ioc: per adapter object
1751  * @paddr: virtual address for SGE
1752  *
1753  * Create a zero length scatter gather entry to insure the IOCs hardware has
1754  * something to use if the target device goes brain dead and tries
1755  * to send data even when none is asked for.
1756  */
1757 static void
1758 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1759 {
1760         u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1761             MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1762             MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1763             MPI2_SGE_FLAGS_SHIFT);
1764         ioc->base_add_sg_single(paddr, flags_length, -1);
1765 }
1766
1767 /**
1768  * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1769  * @paddr: virtual address for SGE
1770  * @flags_length: SGE flags and data transfer length
1771  * @dma_addr: Physical address
1772  */
1773 static void
1774 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1775 {
1776         Mpi2SGESimple32_t *sgel = paddr;
1777
1778         flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1779             MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1780         sgel->FlagsLength = cpu_to_le32(flags_length);
1781         sgel->Address = cpu_to_le32(dma_addr);
1782 }
1783
1784
1785 /**
1786  * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1787  * @paddr: virtual address for SGE
1788  * @flags_length: SGE flags and data transfer length
1789  * @dma_addr: Physical address
1790  */
1791 static void
1792 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1793 {
1794         Mpi2SGESimple64_t *sgel = paddr;
1795
1796         flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1797             MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1798         sgel->FlagsLength = cpu_to_le32(flags_length);
1799         sgel->Address = cpu_to_le64(dma_addr);
1800 }
1801
1802 /**
1803  * _base_get_chain_buffer_tracker - obtain chain tracker
1804  * @ioc: per adapter object
1805  * @scmd: SCSI commands of the IO request
1806  *
1807  * Return: chain tracker from chain_lookup table using key as
1808  * smid and smid's chain_offset.
1809  */
1810 static struct chain_tracker *
1811 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1812                                struct scsi_cmnd *scmd)
1813 {
1814         struct chain_tracker *chain_req;
1815         struct scsiio_tracker *st = scsi_cmd_priv(scmd);
1816         u16 smid = st->smid;
1817         u8 chain_offset =
1818            atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
1819
1820         if (chain_offset == ioc->chains_needed_per_io)
1821                 return NULL;
1822
1823         chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
1824         atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
1825         return chain_req;
1826 }
1827
1828
1829 /**
1830  * _base_build_sg - build generic sg
1831  * @ioc: per adapter object
1832  * @psge: virtual address for SGE
1833  * @data_out_dma: physical address for WRITES
1834  * @data_out_sz: data xfer size for WRITES
1835  * @data_in_dma: physical address for READS
1836  * @data_in_sz: data xfer size for READS
1837  */
1838 static void
1839 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1840         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1841         size_t data_in_sz)
1842 {
1843         u32 sgl_flags;
1844
1845         if (!data_out_sz && !data_in_sz) {
1846                 _base_build_zero_len_sge(ioc, psge);
1847                 return;
1848         }
1849
1850         if (data_out_sz && data_in_sz) {
1851                 /* WRITE sgel first */
1852                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1853                     MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1854                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1855                 ioc->base_add_sg_single(psge, sgl_flags |
1856                     data_out_sz, data_out_dma);
1857
1858                 /* incr sgel */
1859                 psge += ioc->sge_size;
1860
1861                 /* READ sgel last */
1862                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1863                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1864                     MPI2_SGE_FLAGS_END_OF_LIST);
1865                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1866                 ioc->base_add_sg_single(psge, sgl_flags |
1867                     data_in_sz, data_in_dma);
1868         } else if (data_out_sz) /* WRITE */ {
1869                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1870                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1871                     MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1872                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1873                 ioc->base_add_sg_single(psge, sgl_flags |
1874                     data_out_sz, data_out_dma);
1875         } else if (data_in_sz) /* READ */ {
1876                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1877                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1878                     MPI2_SGE_FLAGS_END_OF_LIST);
1879                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1880                 ioc->base_add_sg_single(psge, sgl_flags |
1881                     data_in_sz, data_in_dma);
1882         }
1883 }
1884
1885 /* IEEE format sgls */
1886
1887 /**
1888  * _base_build_nvme_prp - This function is called for NVMe end devices to build
1889  * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
1890  * entry of the NVMe message (PRP1).  If the data buffer is small enough to be
1891  * described entirely using PRP1, then PRP2 is not used.  If needed, PRP2 is
1892  * used to describe a larger data buffer.  If the data buffer is too large to
1893  * describe using the two PRP entriess inside the NVMe message, then PRP1
1894  * describes the first data memory segment, and PRP2 contains a pointer to a PRP
1895  * list located elsewhere in memory to describe the remaining data memory
1896  * segments.  The PRP list will be contiguous.
1897  *
1898  * The native SGL for NVMe devices is a Physical Region Page (PRP).  A PRP
1899  * consists of a list of PRP entries to describe a number of noncontigous
1900  * physical memory segments as a single memory buffer, just as a SGL does.  Note
1901  * however, that this function is only used by the IOCTL call, so the memory
1902  * given will be guaranteed to be contiguous.  There is no need to translate
1903  * non-contiguous SGL into a PRP in this case.  All PRPs will describe
1904  * contiguous space that is one page size each.
1905  *
1906  * Each NVMe message contains two PRP entries.  The first (PRP1) either contains
1907  * a PRP list pointer or a PRP element, depending upon the command.  PRP2
1908  * contains the second PRP element if the memory being described fits within 2
1909  * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
1910  *
1911  * A PRP list pointer contains the address of a PRP list, structured as a linear
1912  * array of PRP entries.  Each PRP entry in this list describes a segment of
1913  * physical memory.
1914  *
1915  * Each 64-bit PRP entry comprises an address and an offset field.  The address
1916  * always points at the beginning of a 4KB physical memory page, and the offset
1917  * describes where within that 4KB page the memory segment begins.  Only the
1918  * first element in a PRP list may contain a non-zero offest, implying that all
1919  * memory segments following the first begin at the start of a 4KB page.
1920  *
1921  * Each PRP element normally describes 4KB of physical memory, with exceptions
1922  * for the first and last elements in the list.  If the memory being described
1923  * by the list begins at a non-zero offset within the first 4KB page, then the
1924  * first PRP element will contain a non-zero offset indicating where the region
1925  * begins within the 4KB page.  The last memory segment may end before the end
1926  * of the 4KB segment, depending upon the overall size of the memory being
1927  * described by the PRP list.
1928  *
1929  * Since PRP entries lack any indication of size, the overall data buffer length
1930  * is used to determine where the end of the data memory buffer is located, and
1931  * how many PRP entries are required to describe it.
1932  *
1933  * @ioc: per adapter object
1934  * @smid: system request message index for getting asscociated SGL
1935  * @nvme_encap_request: the NVMe request msg frame pointer
1936  * @data_out_dma: physical address for WRITES
1937  * @data_out_sz: data xfer size for WRITES
1938  * @data_in_dma: physical address for READS
1939  * @data_in_sz: data xfer size for READS
1940  */
1941 static void
1942 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
1943         Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
1944         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1945         size_t data_in_sz)
1946 {
1947         int             prp_size = NVME_PRP_SIZE;
1948         __le64          *prp_entry, *prp1_entry, *prp2_entry;
1949         __le64          *prp_page;
1950         dma_addr_t      prp_entry_dma, prp_page_dma, dma_addr;
1951         u32             offset, entry_len;
1952         u32             page_mask_result, page_mask;
1953         size_t          length;
1954         struct mpt3sas_nvme_cmd *nvme_cmd =
1955                 (void *)nvme_encap_request->NVMe_Command;
1956
1957         /*
1958          * Not all commands require a data transfer. If no data, just return
1959          * without constructing any PRP.
1960          */
1961         if (!data_in_sz && !data_out_sz)
1962                 return;
1963         prp1_entry = &nvme_cmd->prp1;
1964         prp2_entry = &nvme_cmd->prp2;
1965         prp_entry = prp1_entry;
1966         /*
1967          * For the PRP entries, use the specially allocated buffer of
1968          * contiguous memory.
1969          */
1970         prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
1971         prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
1972
1973         /*
1974          * Check if we are within 1 entry of a page boundary we don't
1975          * want our first entry to be a PRP List entry.
1976          */
1977         page_mask = ioc->page_size - 1;
1978         page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
1979         if (!page_mask_result) {
1980                 /* Bump up to next page boundary. */
1981                 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
1982                 prp_page_dma = prp_page_dma + prp_size;
1983         }
1984
1985         /*
1986          * Set PRP physical pointer, which initially points to the current PRP
1987          * DMA memory page.
1988          */
1989         prp_entry_dma = prp_page_dma;
1990
1991         /* Get physical address and length of the data buffer. */
1992         if (data_in_sz) {
1993                 dma_addr = data_in_dma;
1994                 length = data_in_sz;
1995         } else {
1996                 dma_addr = data_out_dma;
1997                 length = data_out_sz;
1998         }
1999
2000         /* Loop while the length is not zero. */
2001         while (length) {
2002                 /*
2003                  * Check if we need to put a list pointer here if we are at
2004                  * page boundary - prp_size (8 bytes).
2005                  */
2006                 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2007                 if (!page_mask_result) {
2008                         /*
2009                          * This is the last entry in a PRP List, so we need to
2010                          * put a PRP list pointer here.  What this does is:
2011                          *   - bump the current memory pointer to the next
2012                          *     address, which will be the next full page.
2013                          *   - set the PRP Entry to point to that page.  This
2014                          *     is now the PRP List pointer.
2015                          *   - bump the PRP Entry pointer the start of the
2016                          *     next page.  Since all of this PRP memory is
2017                          *     contiguous, no need to get a new page - it's
2018                          *     just the next address.
2019                          */
2020                         prp_entry_dma++;
2021                         *prp_entry = cpu_to_le64(prp_entry_dma);
2022                         prp_entry++;
2023                 }
2024
2025                 /* Need to handle if entry will be part of a page. */
2026                 offset = dma_addr & page_mask;
2027                 entry_len = ioc->page_size - offset;
2028
2029                 if (prp_entry == prp1_entry) {
2030                         /*
2031                          * Must fill in the first PRP pointer (PRP1) before
2032                          * moving on.
2033                          */
2034                         *prp1_entry = cpu_to_le64(dma_addr);
2035
2036                         /*
2037                          * Now point to the second PRP entry within the
2038                          * command (PRP2).
2039                          */
2040                         prp_entry = prp2_entry;
2041                 } else if (prp_entry == prp2_entry) {
2042                         /*
2043                          * Should the PRP2 entry be a PRP List pointer or just
2044                          * a regular PRP pointer?  If there is more than one
2045                          * more page of data, must use a PRP List pointer.
2046                          */
2047                         if (length > ioc->page_size) {
2048                                 /*
2049                                  * PRP2 will contain a PRP List pointer because
2050                                  * more PRP's are needed with this command. The
2051                                  * list will start at the beginning of the
2052                                  * contiguous buffer.
2053                                  */
2054                                 *prp2_entry = cpu_to_le64(prp_entry_dma);
2055
2056                                 /*
2057                                  * The next PRP Entry will be the start of the
2058                                  * first PRP List.
2059                                  */
2060                                 prp_entry = prp_page;
2061                         } else {
2062                                 /*
2063                                  * After this, the PRP Entries are complete.
2064                                  * This command uses 2 PRP's and no PRP list.
2065                                  */
2066                                 *prp2_entry = cpu_to_le64(dma_addr);
2067                         }
2068                 } else {
2069                         /*
2070                          * Put entry in list and bump the addresses.
2071                          *
2072                          * After PRP1 and PRP2 are filled in, this will fill in
2073                          * all remaining PRP entries in a PRP List, one per
2074                          * each time through the loop.
2075                          */
2076                         *prp_entry = cpu_to_le64(dma_addr);
2077                         prp_entry++;
2078                         prp_entry_dma++;
2079                 }
2080
2081                 /*
2082                  * Bump the phys address of the command's data buffer by the
2083                  * entry_len.
2084                  */
2085                 dma_addr += entry_len;
2086
2087                 /* Decrement length accounting for last partial page. */
2088                 if (entry_len > length)
2089                         length = 0;
2090                 else
2091                         length -= entry_len;
2092         }
2093 }
2094
2095 /**
2096  * base_make_prp_nvme -
2097  * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
2098  *
2099  * @ioc:                per adapter object
2100  * @scmd:               SCSI command from the mid-layer
2101  * @mpi_request:        mpi request
2102  * @smid:               msg Index
2103  * @sge_count:          scatter gather element count.
2104  *
2105  * Return:              true: PRPs are built
2106  *                      false: IEEE SGLs needs to be built
2107  */
2108 static void
2109 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2110                 struct scsi_cmnd *scmd,
2111                 Mpi25SCSIIORequest_t *mpi_request,
2112                 u16 smid, int sge_count)
2113 {
2114         int sge_len, num_prp_in_chain = 0;
2115         Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2116         __le64 *curr_buff;
2117         dma_addr_t msg_dma, sge_addr, offset;
2118         u32 page_mask, page_mask_result;
2119         struct scatterlist *sg_scmd;
2120         u32 first_prp_len;
2121         int data_len = scsi_bufflen(scmd);
2122         u32 nvme_pg_size;
2123
2124         nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2125         /*
2126          * Nvme has a very convoluted prp format.  One prp is required
2127          * for each page or partial page. Driver need to split up OS sg_list
2128          * entries if it is longer than one page or cross a page
2129          * boundary.  Driver also have to insert a PRP list pointer entry as
2130          * the last entry in each physical page of the PRP list.
2131          *
2132          * NOTE: The first PRP "entry" is actually placed in the first
2133          * SGL entry in the main message as IEEE 64 format.  The 2nd
2134          * entry in the main message is the chain element, and the rest
2135          * of the PRP entries are built in the contiguous pcie buffer.
2136          */
2137         page_mask = nvme_pg_size - 1;
2138
2139         /*
2140          * Native SGL is needed.
2141          * Put a chain element in main message frame that points to the first
2142          * chain buffer.
2143          *
2144          * NOTE:  The ChainOffset field must be 0 when using a chain pointer to
2145          *        a native SGL.
2146          */
2147
2148         /* Set main message chain element pointer */
2149         main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2150         /*
2151          * For NVMe the chain element needs to be the 2nd SG entry in the main
2152          * message.
2153          */
2154         main_chain_element = (Mpi25IeeeSgeChain64_t *)
2155                 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2156
2157         /*
2158          * For the PRP entries, use the specially allocated buffer of
2159          * contiguous memory.  Normal chain buffers can't be used
2160          * because each chain buffer would need to be the size of an OS
2161          * page (4k).
2162          */
2163         curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2164         msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2165
2166         main_chain_element->Address = cpu_to_le64(msg_dma);
2167         main_chain_element->NextChainOffset = 0;
2168         main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2169                         MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2170                         MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2171
2172         /* Build first prp, sge need not to be page aligned*/
2173         ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2174         sg_scmd = scsi_sglist(scmd);
2175         sge_addr = sg_dma_address(sg_scmd);
2176         sge_len = sg_dma_len(sg_scmd);
2177
2178         offset = sge_addr & page_mask;
2179         first_prp_len = nvme_pg_size - offset;
2180
2181         ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2182         ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2183
2184         data_len -= first_prp_len;
2185
2186         if (sge_len > first_prp_len) {
2187                 sge_addr += first_prp_len;
2188                 sge_len -= first_prp_len;
2189         } else if (data_len && (sge_len == first_prp_len)) {
2190                 sg_scmd = sg_next(sg_scmd);
2191                 sge_addr = sg_dma_address(sg_scmd);
2192                 sge_len = sg_dma_len(sg_scmd);
2193         }
2194
2195         for (;;) {
2196                 offset = sge_addr & page_mask;
2197
2198                 /* Put PRP pointer due to page boundary*/
2199                 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2200                 if (unlikely(!page_mask_result)) {
2201                         scmd_printk(KERN_NOTICE,
2202                                 scmd, "page boundary curr_buff: 0x%p\n",
2203                                 curr_buff);
2204                         msg_dma += 8;
2205                         *curr_buff = cpu_to_le64(msg_dma);
2206                         curr_buff++;
2207                         num_prp_in_chain++;
2208                 }
2209
2210                 *curr_buff = cpu_to_le64(sge_addr);
2211                 curr_buff++;
2212                 msg_dma += 8;
2213                 num_prp_in_chain++;
2214
2215                 sge_addr += nvme_pg_size;
2216                 sge_len -= nvme_pg_size;
2217                 data_len -= nvme_pg_size;
2218
2219                 if (data_len <= 0)
2220                         break;
2221
2222                 if (sge_len > 0)
2223                         continue;
2224
2225                 sg_scmd = sg_next(sg_scmd);
2226                 sge_addr = sg_dma_address(sg_scmd);
2227                 sge_len = sg_dma_len(sg_scmd);
2228         }
2229
2230         main_chain_element->Length =
2231                 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2232         return;
2233 }
2234
2235 static bool
2236 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2237         struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2238 {
2239         u32 data_length = 0;
2240         bool build_prp = true;
2241
2242         data_length = scsi_bufflen(scmd);
2243
2244         /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2245          * we built IEEE SGL
2246          */
2247         if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2248                 build_prp = false;
2249
2250         return build_prp;
2251 }
2252
2253 /**
2254  * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2255  * determine if the driver needs to build a native SGL.  If so, that native
2256  * SGL is built in the special contiguous buffers allocated especially for
2257  * PCIe SGL creation.  If the driver will not build a native SGL, return
2258  * TRUE and a normal IEEE SGL will be built.  Currently this routine
2259  * supports NVMe.
2260  * @ioc: per adapter object
2261  * @mpi_request: mf request pointer
2262  * @smid: system request message index
2263  * @scmd: scsi command
2264  * @pcie_device: points to the PCIe device's info
2265  *
2266  * Return: 0 if native SGL was built, 1 if no SGL was built
2267  */
2268 static int
2269 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2270         Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2271         struct _pcie_device *pcie_device)
2272 {
2273         int sges_left;
2274
2275         /* Get the SG list pointer and info. */
2276         sges_left = scsi_dma_map(scmd);
2277         if (sges_left < 0) {
2278                 sdev_printk(KERN_ERR, scmd->device,
2279                         "scsi_dma_map failed: request for %d bytes!\n",
2280                         scsi_bufflen(scmd));
2281                 return 1;
2282         }
2283
2284         /* Check if we need to build a native SG list. */
2285         if (base_is_prp_possible(ioc, pcie_device,
2286                                 scmd, sges_left) == 0) {
2287                 /* We built a native SG list, just return. */
2288                 goto out;
2289         }
2290
2291         /*
2292          * Build native NVMe PRP.
2293          */
2294         base_make_prp_nvme(ioc, scmd, mpi_request,
2295                         smid, sges_left);
2296
2297         return 0;
2298 out:
2299         scsi_dma_unmap(scmd);
2300         return 1;
2301 }
2302
2303 /**
2304  * _base_add_sg_single_ieee - add sg element for IEEE format
2305  * @paddr: virtual address for SGE
2306  * @flags: SGE flags
2307  * @chain_offset: number of 128 byte elements from start of segment
2308  * @length: data transfer length
2309  * @dma_addr: Physical address
2310  */
2311 static void
2312 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2313         dma_addr_t dma_addr)
2314 {
2315         Mpi25IeeeSgeChain64_t *sgel = paddr;
2316
2317         sgel->Flags = flags;
2318         sgel->NextChainOffset = chain_offset;
2319         sgel->Length = cpu_to_le32(length);
2320         sgel->Address = cpu_to_le64(dma_addr);
2321 }
2322
2323 /**
2324  * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2325  * @ioc: per adapter object
2326  * @paddr: virtual address for SGE
2327  *
2328  * Create a zero length scatter gather entry to insure the IOCs hardware has
2329  * something to use if the target device goes brain dead and tries
2330  * to send data even when none is asked for.
2331  */
2332 static void
2333 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2334 {
2335         u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2336                 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2337                 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2338
2339         _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2340 }
2341
2342 /**
2343  * _base_build_sg_scmd - main sg creation routine
2344  *              pcie_device is unused here!
2345  * @ioc: per adapter object
2346  * @scmd: scsi command
2347  * @smid: system request message index
2348  * @unused: unused pcie_device pointer
2349  * Context: none.
2350  *
2351  * The main routine that builds scatter gather table from a given
2352  * scsi request sent via the .queuecommand main handler.
2353  *
2354  * Return: 0 success, anything else error
2355  */
2356 static int
2357 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2358         struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2359 {
2360         Mpi2SCSIIORequest_t *mpi_request;
2361         dma_addr_t chain_dma;
2362         struct scatterlist *sg_scmd;
2363         void *sg_local, *chain;
2364         u32 chain_offset;
2365         u32 chain_length;
2366         u32 chain_flags;
2367         int sges_left;
2368         u32 sges_in_segment;
2369         u32 sgl_flags;
2370         u32 sgl_flags_last_element;
2371         u32 sgl_flags_end_buffer;
2372         struct chain_tracker *chain_req;
2373
2374         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2375
2376         /* init scatter gather flags */
2377         sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2378         if (scmd->sc_data_direction == DMA_TO_DEVICE)
2379                 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2380         sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2381             << MPI2_SGE_FLAGS_SHIFT;
2382         sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2383             MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2384             << MPI2_SGE_FLAGS_SHIFT;
2385         sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2386
2387         sg_scmd = scsi_sglist(scmd);
2388         sges_left = scsi_dma_map(scmd);
2389         if (sges_left < 0) {
2390                 sdev_printk(KERN_ERR, scmd->device,
2391                  "scsi_dma_map failed: request for %d bytes!\n",
2392                  scsi_bufflen(scmd));
2393                 return -ENOMEM;
2394         }
2395
2396         sg_local = &mpi_request->SGL;
2397         sges_in_segment = ioc->max_sges_in_main_message;
2398         if (sges_left <= sges_in_segment)
2399                 goto fill_in_last_segment;
2400
2401         mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2402             (sges_in_segment * ioc->sge_size))/4;
2403
2404         /* fill in main message segment when there is a chain following */
2405         while (sges_in_segment) {
2406                 if (sges_in_segment == 1)
2407                         ioc->base_add_sg_single(sg_local,
2408                             sgl_flags_last_element | sg_dma_len(sg_scmd),
2409                             sg_dma_address(sg_scmd));
2410                 else
2411                         ioc->base_add_sg_single(sg_local, sgl_flags |
2412                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2413                 sg_scmd = sg_next(sg_scmd);
2414                 sg_local += ioc->sge_size;
2415                 sges_left--;
2416                 sges_in_segment--;
2417         }
2418
2419         /* initializing the chain flags and pointers */
2420         chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2421         chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2422         if (!chain_req)
2423                 return -1;
2424         chain = chain_req->chain_buffer;
2425         chain_dma = chain_req->chain_buffer_dma;
2426         do {
2427                 sges_in_segment = (sges_left <=
2428                     ioc->max_sges_in_chain_message) ? sges_left :
2429                     ioc->max_sges_in_chain_message;
2430                 chain_offset = (sges_left == sges_in_segment) ?
2431                     0 : (sges_in_segment * ioc->sge_size)/4;
2432                 chain_length = sges_in_segment * ioc->sge_size;
2433                 if (chain_offset) {
2434                         chain_offset = chain_offset <<
2435                             MPI2_SGE_CHAIN_OFFSET_SHIFT;
2436                         chain_length += ioc->sge_size;
2437                 }
2438                 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2439                     chain_length, chain_dma);
2440                 sg_local = chain;
2441                 if (!chain_offset)
2442                         goto fill_in_last_segment;
2443
2444                 /* fill in chain segments */
2445                 while (sges_in_segment) {
2446                         if (sges_in_segment == 1)
2447                                 ioc->base_add_sg_single(sg_local,
2448                                     sgl_flags_last_element |
2449                                     sg_dma_len(sg_scmd),
2450                                     sg_dma_address(sg_scmd));
2451                         else
2452                                 ioc->base_add_sg_single(sg_local, sgl_flags |
2453                                     sg_dma_len(sg_scmd),
2454                                     sg_dma_address(sg_scmd));
2455                         sg_scmd = sg_next(sg_scmd);
2456                         sg_local += ioc->sge_size;
2457                         sges_left--;
2458                         sges_in_segment--;
2459                 }
2460
2461                 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2462                 if (!chain_req)
2463                         return -1;
2464                 chain = chain_req->chain_buffer;
2465                 chain_dma = chain_req->chain_buffer_dma;
2466         } while (1);
2467
2468
2469  fill_in_last_segment:
2470
2471         /* fill the last segment */
2472         while (sges_left) {
2473                 if (sges_left == 1)
2474                         ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2475                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2476                 else
2477                         ioc->base_add_sg_single(sg_local, sgl_flags |
2478                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2479                 sg_scmd = sg_next(sg_scmd);
2480                 sg_local += ioc->sge_size;
2481                 sges_left--;
2482         }
2483
2484         return 0;
2485 }
2486
2487 /**
2488  * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2489  * @ioc: per adapter object
2490  * @scmd: scsi command
2491  * @smid: system request message index
2492  * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2493  * constructed on need.
2494  * Context: none.
2495  *
2496  * The main routine that builds scatter gather table from a given
2497  * scsi request sent via the .queuecommand main handler.
2498  *
2499  * Return: 0 success, anything else error
2500  */
2501 static int
2502 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2503         struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2504 {
2505         Mpi25SCSIIORequest_t *mpi_request;
2506         dma_addr_t chain_dma;
2507         struct scatterlist *sg_scmd;
2508         void *sg_local, *chain;
2509         u32 chain_offset;
2510         u32 chain_length;
2511         int sges_left;
2512         u32 sges_in_segment;
2513         u8 simple_sgl_flags;
2514         u8 simple_sgl_flags_last;
2515         u8 chain_sgl_flags;
2516         struct chain_tracker *chain_req;
2517
2518         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2519
2520         /* init scatter gather flags */
2521         simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2522             MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2523         simple_sgl_flags_last = simple_sgl_flags |
2524             MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2525         chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2526             MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2527
2528         /* Check if we need to build a native SG list. */
2529         if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2530                         smid, scmd, pcie_device) == 0)) {
2531                 /* We built a native SG list, just return. */
2532                 return 0;
2533         }
2534
2535         sg_scmd = scsi_sglist(scmd);
2536         sges_left = scsi_dma_map(scmd);
2537         if (sges_left < 0) {
2538                 sdev_printk(KERN_ERR, scmd->device,
2539                         "scsi_dma_map failed: request for %d bytes!\n",
2540                         scsi_bufflen(scmd));
2541                 return -ENOMEM;
2542         }
2543
2544         sg_local = &mpi_request->SGL;
2545         sges_in_segment = (ioc->request_sz -
2546                    offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2547         if (sges_left <= sges_in_segment)
2548                 goto fill_in_last_segment;
2549
2550         mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
2551             (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2552
2553         /* fill in main message segment when there is a chain following */
2554         while (sges_in_segment > 1) {
2555                 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2556                     sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2557                 sg_scmd = sg_next(sg_scmd);
2558                 sg_local += ioc->sge_size_ieee;
2559                 sges_left--;
2560                 sges_in_segment--;
2561         }
2562
2563         /* initializing the pointers */
2564         chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2565         if (!chain_req)
2566                 return -1;
2567         chain = chain_req->chain_buffer;
2568         chain_dma = chain_req->chain_buffer_dma;
2569         do {
2570                 sges_in_segment = (sges_left <=
2571                     ioc->max_sges_in_chain_message) ? sges_left :
2572                     ioc->max_sges_in_chain_message;
2573                 chain_offset = (sges_left == sges_in_segment) ?
2574                     0 : sges_in_segment;
2575                 chain_length = sges_in_segment * ioc->sge_size_ieee;
2576                 if (chain_offset)
2577                         chain_length += ioc->sge_size_ieee;
2578                 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2579                     chain_offset, chain_length, chain_dma);
2580
2581                 sg_local = chain;
2582                 if (!chain_offset)
2583                         goto fill_in_last_segment;
2584
2585                 /* fill in chain segments */
2586                 while (sges_in_segment) {
2587                         _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2588                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2589                         sg_scmd = sg_next(sg_scmd);
2590                         sg_local += ioc->sge_size_ieee;
2591                         sges_left--;
2592                         sges_in_segment--;
2593                 }
2594
2595                 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2596                 if (!chain_req)
2597                         return -1;
2598                 chain = chain_req->chain_buffer;
2599                 chain_dma = chain_req->chain_buffer_dma;
2600         } while (1);
2601
2602
2603  fill_in_last_segment:
2604
2605         /* fill the last segment */
2606         while (sges_left > 0) {
2607                 if (sges_left == 1)
2608                         _base_add_sg_single_ieee(sg_local,
2609                             simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2610                             sg_dma_address(sg_scmd));
2611                 else
2612                         _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2613                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2614                 sg_scmd = sg_next(sg_scmd);
2615                 sg_local += ioc->sge_size_ieee;
2616                 sges_left--;
2617         }
2618
2619         return 0;
2620 }
2621
2622 /**
2623  * _base_build_sg_ieee - build generic sg for IEEE format
2624  * @ioc: per adapter object
2625  * @psge: virtual address for SGE
2626  * @data_out_dma: physical address for WRITES
2627  * @data_out_sz: data xfer size for WRITES
2628  * @data_in_dma: physical address for READS
2629  * @data_in_sz: data xfer size for READS
2630  */
2631 static void
2632 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2633         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2634         size_t data_in_sz)
2635 {
2636         u8 sgl_flags;
2637
2638         if (!data_out_sz && !data_in_sz) {
2639                 _base_build_zero_len_sge_ieee(ioc, psge);
2640                 return;
2641         }
2642
2643         if (data_out_sz && data_in_sz) {
2644                 /* WRITE sgel first */
2645                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2646                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2647                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2648                     data_out_dma);
2649
2650                 /* incr sgel */
2651                 psge += ioc->sge_size_ieee;
2652
2653                 /* READ sgel last */
2654                 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2655                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2656                     data_in_dma);
2657         } else if (data_out_sz) /* WRITE */ {
2658                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2659                     MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2660                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2661                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2662                     data_out_dma);
2663         } else if (data_in_sz) /* READ */ {
2664                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2665                     MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2666                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2667                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2668                     data_in_dma);
2669         }
2670 }
2671
2672 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2673
2674 /**
2675  * _base_config_dma_addressing - set dma addressing
2676  * @ioc: per adapter object
2677  * @pdev: PCI device struct
2678  *
2679  * Return: 0 for success, non-zero for failure.
2680  */
2681 static int
2682 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2683 {
2684         u64 required_mask, coherent_mask;
2685         struct sysinfo s;
2686
2687         if (ioc->is_mcpu_endpoint)
2688                 goto try_32bit;
2689
2690         required_mask = dma_get_required_mask(&pdev->dev);
2691         if (sizeof(dma_addr_t) == 4 || required_mask == 32)
2692                 goto try_32bit;
2693
2694         if (ioc->dma_mask)
2695                 coherent_mask = DMA_BIT_MASK(64);
2696         else
2697                 coherent_mask = DMA_BIT_MASK(32);
2698
2699         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
2700             dma_set_coherent_mask(&pdev->dev, coherent_mask))
2701                 goto try_32bit;
2702
2703         ioc->base_add_sg_single = &_base_add_sg_single_64;
2704         ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2705         ioc->dma_mask = 64;
2706         goto out;
2707
2708  try_32bit:
2709         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
2710                 return -ENODEV;
2711
2712         ioc->base_add_sg_single = &_base_add_sg_single_32;
2713         ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2714         ioc->dma_mask = 32;
2715  out:
2716         si_meminfo(&s);
2717         ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2718                  ioc->dma_mask, convert_to_kb(s.totalram));
2719
2720         return 0;
2721 }
2722
2723 static int
2724 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
2725                                       struct pci_dev *pdev)
2726 {
2727         if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2728                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2729                         return -ENODEV;
2730         }
2731         return 0;
2732 }
2733
2734 /**
2735  * _base_check_enable_msix - checks MSIX capabable.
2736  * @ioc: per adapter object
2737  *
2738  * Check to see if card is capable of MSIX, and set number
2739  * of available msix vectors
2740  */
2741 static int
2742 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2743 {
2744         int base;
2745         u16 message_control;
2746
2747         /* Check whether controller SAS2008 B0 controller,
2748          * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
2749          */
2750         if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2751             ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2752                 return -EINVAL;
2753         }
2754
2755         base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2756         if (!base) {
2757                 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
2758                 return -EINVAL;
2759         }
2760
2761         /* get msix vector count */
2762         /* NUMA_IO not supported for older controllers */
2763         if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2764             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2765             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2766             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2767             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2768             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2769             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2770                 ioc->msix_vector_count = 1;
2771         else {
2772                 pci_read_config_word(ioc->pdev, base + 2, &message_control);
2773                 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2774         }
2775         dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2776                                   ioc->msix_vector_count));
2777         return 0;
2778 }
2779
2780 /**
2781  * _base_free_irq - free irq
2782  * @ioc: per adapter object
2783  *
2784  * Freeing respective reply_queue from the list.
2785  */
2786 static void
2787 _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2788 {
2789         struct adapter_reply_queue *reply_q, *next;
2790
2791         if (list_empty(&ioc->reply_queue_list))
2792                 return;
2793
2794         list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
2795                 list_del(&reply_q->list);
2796                 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
2797                          reply_q);
2798                 kfree(reply_q);
2799         }
2800 }
2801
2802 /**
2803  * _base_request_irq - request irq
2804  * @ioc: per adapter object
2805  * @index: msix index into vector table
2806  *
2807  * Inserting respective reply_queue into the list.
2808  */
2809 static int
2810 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
2811 {
2812         struct pci_dev *pdev = ioc->pdev;
2813         struct adapter_reply_queue *reply_q;
2814         int r;
2815
2816         reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
2817         if (!reply_q) {
2818                 ioc_err(ioc, "unable to allocate memory %zu!\n",
2819                         sizeof(struct adapter_reply_queue));
2820                 return -ENOMEM;
2821         }
2822         reply_q->ioc = ioc;
2823         reply_q->msix_index = index;
2824
2825         atomic_set(&reply_q->busy, 0);
2826         if (ioc->msix_enable)
2827                 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
2828                     ioc->driver_name, ioc->id, index);
2829         else
2830                 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
2831                     ioc->driver_name, ioc->id);
2832         r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
2833                         IRQF_SHARED, reply_q->name, reply_q);
2834         if (r) {
2835                 pr_err("%s: unable to allocate interrupt %d!\n",
2836                        reply_q->name, pci_irq_vector(pdev, index));
2837                 kfree(reply_q);
2838                 return -EBUSY;
2839         }
2840
2841         INIT_LIST_HEAD(&reply_q->list);
2842         list_add_tail(&reply_q->list, &ioc->reply_queue_list);
2843         return 0;
2844 }
2845
2846 /**
2847  * _base_assign_reply_queues - assigning msix index for each cpu
2848  * @ioc: per adapter object
2849  *
2850  * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
2851  *
2852  * It would nice if we could call irq_set_affinity, however it is not
2853  * an exported symbol
2854  */
2855 static void
2856 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2857 {
2858         unsigned int cpu, nr_cpus, nr_msix, index = 0;
2859         struct adapter_reply_queue *reply_q;
2860
2861         if (!_base_is_controller_msix_enabled(ioc))
2862                 return;
2863         ioc->msix_load_balance = false;
2864         if (ioc->reply_queue_count < num_online_cpus()) {
2865                 ioc->msix_load_balance = true;
2866                 return;
2867         }
2868
2869         memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
2870
2871         nr_cpus = num_online_cpus();
2872         nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
2873                                                ioc->facts.MaxMSIxVectors);
2874         if (!nr_msix)
2875                 return;
2876
2877         if (smp_affinity_enable) {
2878                 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2879                         const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
2880                                                         reply_q->msix_index);
2881                         if (!mask) {
2882                                 ioc_warn(ioc, "no affinity for msi %x\n",
2883                                          reply_q->msix_index);
2884                                 continue;
2885                         }
2886
2887                         for_each_cpu_and(cpu, mask, cpu_online_mask) {
2888                                 if (cpu >= ioc->cpu_msix_table_sz)
2889                                         break;
2890                                 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
2891                         }
2892                 }
2893                 return;
2894         }
2895         cpu = cpumask_first(cpu_online_mask);
2896
2897         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2898
2899                 unsigned int i, group = nr_cpus / nr_msix;
2900
2901                 if (cpu >= nr_cpus)
2902                         break;
2903
2904                 if (index < nr_cpus % nr_msix)
2905                         group++;
2906
2907                 for (i = 0 ; i < group ; i++) {
2908                         ioc->cpu_msix_table[cpu] = reply_q->msix_index;
2909                         cpu = cpumask_next(cpu, cpu_online_mask);
2910                 }
2911                 index++;
2912         }
2913 }
2914
2915 /**
2916  * _base_disable_msix - disables msix
2917  * @ioc: per adapter object
2918  *
2919  */
2920 static void
2921 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
2922 {
2923         if (!ioc->msix_enable)
2924                 return;
2925         pci_disable_msix(ioc->pdev);
2926         ioc->msix_enable = 0;
2927 }
2928
2929 /**
2930  * _base_enable_msix - enables msix, failback to io_apic
2931  * @ioc: per adapter object
2932  *
2933  */
2934 static int
2935 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2936 {
2937         int r;
2938         int i, local_max_msix_vectors;
2939         u8 try_msix = 0;
2940         unsigned int irq_flags = PCI_IRQ_MSIX;
2941
2942         if (msix_disable == -1 || msix_disable == 0)
2943                 try_msix = 1;
2944
2945         if (!try_msix)
2946                 goto try_ioapic;
2947
2948         if (_base_check_enable_msix(ioc) != 0)
2949                 goto try_ioapic;
2950
2951         ioc->reply_queue_count = min_t(int, ioc->cpu_count,
2952                 ioc->msix_vector_count);
2953
2954         ioc_info(ioc, "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
2955                  ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
2956
2957         if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
2958                 local_max_msix_vectors = (reset_devices) ? 1 : 8;
2959         else
2960                 local_max_msix_vectors = max_msix_vectors;
2961
2962         if (local_max_msix_vectors > 0)
2963                 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
2964                         ioc->reply_queue_count);
2965         else if (local_max_msix_vectors == 0)
2966                 goto try_ioapic;
2967
2968         if (ioc->msix_vector_count < ioc->cpu_count)
2969                 smp_affinity_enable = 0;
2970
2971         if (smp_affinity_enable)
2972                 irq_flags |= PCI_IRQ_AFFINITY;
2973
2974         r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
2975                                   irq_flags);
2976         if (r < 0) {
2977                 dfailprintk(ioc,
2978                             ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
2979                                      r));
2980                 goto try_ioapic;
2981         }
2982
2983         ioc->msix_enable = 1;
2984         ioc->reply_queue_count = r;
2985         for (i = 0; i < ioc->reply_queue_count; i++) {
2986                 r = _base_request_irq(ioc, i);
2987                 if (r) {
2988                         _base_free_irq(ioc);
2989                         _base_disable_msix(ioc);
2990                         goto try_ioapic;
2991                 }
2992         }
2993
2994         return 0;
2995
2996 /* failback to io_apic interrupt routing */
2997  try_ioapic:
2998
2999         ioc->reply_queue_count = 1;
3000         r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3001         if (r < 0) {
3002                 dfailprintk(ioc,
3003                             ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3004                                      r));
3005         } else
3006                 r = _base_request_irq(ioc, 0);
3007
3008         return r;
3009 }
3010
3011 /**
3012  * mpt3sas_base_unmap_resources - free controller resources
3013  * @ioc: per adapter object
3014  */
3015 static void
3016 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3017 {
3018         struct pci_dev *pdev = ioc->pdev;
3019
3020         dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3021
3022         _base_free_irq(ioc);
3023         _base_disable_msix(ioc);
3024
3025         kfree(ioc->replyPostRegisterIndex);
3026         ioc->replyPostRegisterIndex = NULL;
3027
3028
3029         if (ioc->chip_phys) {
3030                 iounmap(ioc->chip);
3031                 ioc->chip_phys = 0;
3032         }
3033
3034         if (pci_is_enabled(pdev)) {
3035                 pci_release_selected_regions(ioc->pdev, ioc->bars);
3036                 pci_disable_pcie_error_reporting(pdev);
3037                 pci_disable_device(pdev);
3038         }
3039 }
3040
3041 /**
3042  * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
3043  * @ioc: per adapter object
3044  *
3045  * Return: 0 for success, non-zero for failure.
3046  */
3047 int
3048 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3049 {
3050         struct pci_dev *pdev = ioc->pdev;
3051         u32 memap_sz;
3052         u32 pio_sz;
3053         int i, r = 0;
3054         u64 pio_chip = 0;
3055         phys_addr_t chip_phys = 0;
3056         struct adapter_reply_queue *reply_q;
3057
3058         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3059
3060         ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3061         if (pci_enable_device_mem(pdev)) {
3062                 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3063                 ioc->bars = 0;
3064                 return -ENODEV;
3065         }
3066
3067
3068         if (pci_request_selected_regions(pdev, ioc->bars,
3069             ioc->driver_name)) {
3070                 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3071                 ioc->bars = 0;
3072                 r = -ENODEV;
3073                 goto out_fail;
3074         }
3075
3076 /* AER (Advanced Error Reporting) hooks */
3077         pci_enable_pcie_error_reporting(pdev);
3078
3079         pci_set_master(pdev);
3080
3081
3082         if (_base_config_dma_addressing(ioc, pdev) != 0) {
3083                 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3084                 r = -ENODEV;
3085                 goto out_fail;
3086         }
3087
3088         for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3089              (!memap_sz || !pio_sz); i++) {
3090                 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3091                         if (pio_sz)
3092                                 continue;
3093                         pio_chip = (u64)pci_resource_start(pdev, i);
3094                         pio_sz = pci_resource_len(pdev, i);
3095                 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3096                         if (memap_sz)
3097                                 continue;
3098                         ioc->chip_phys = pci_resource_start(pdev, i);
3099                         chip_phys = ioc->chip_phys;
3100                         memap_sz = pci_resource_len(pdev, i);
3101                         ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3102                 }
3103         }
3104
3105         if (ioc->chip == NULL) {
3106                 ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
3107                 r = -EINVAL;
3108                 goto out_fail;
3109         }
3110
3111         _base_mask_interrupts(ioc);
3112
3113         r = _base_get_ioc_facts(ioc);
3114         if (r)
3115                 goto out_fail;
3116
3117         if (!ioc->rdpq_array_enable_assigned) {
3118                 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3119                 ioc->rdpq_array_enable_assigned = 1;
3120         }
3121
3122         r = _base_enable_msix(ioc);
3123         if (r)
3124                 goto out_fail;
3125
3126         if (!ioc->is_driver_loading)
3127                 _base_init_irqpolls(ioc);
3128         /* Use the Combined reply queue feature only for SAS3 C0 & higher
3129          * revision HBAs and also only when reply queue count is greater than 8
3130          */
3131         if (ioc->combined_reply_queue) {
3132                 /* Determine the Supplemental Reply Post Host Index Registers
3133                  * Addresse. Supplemental Reply Post Host Index Registers
3134                  * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3135                  * each register is at offset bytes of
3136                  * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3137                  */
3138                 ioc->replyPostRegisterIndex = kcalloc(
3139                      ioc->combined_reply_index_count,
3140                      sizeof(resource_size_t *), GFP_KERNEL);
3141                 if (!ioc->replyPostRegisterIndex) {
3142                         dfailprintk(ioc,
3143                                     ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
3144                         r = -ENOMEM;
3145                         goto out_fail;
3146                 }
3147
3148                 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3149                         ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3150                              ((u8 __force *)&ioc->chip->Doorbell +
3151                              MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3152                              (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3153                 }
3154         }
3155
3156         if (ioc->is_warpdrive) {
3157                 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3158                     &ioc->chip->ReplyPostHostIndex;
3159
3160                 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3161                         ioc->reply_post_host_index[i] =
3162                         (resource_size_t __iomem *)
3163                         ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3164                         * 4)));
3165         }
3166
3167         list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3168                 pr_info("%s: %s enabled: IRQ %d\n",
3169                         reply_q->name,
3170                         ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3171                         pci_irq_vector(ioc->pdev, reply_q->msix_index));
3172
3173         ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3174                  &chip_phys, ioc->chip, memap_sz);
3175         ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3176                  (unsigned long long)pio_chip, pio_sz);
3177
3178         /* Save PCI configuration state for recovery from PCI AER/EEH errors */
3179         pci_save_state(pdev);
3180         return 0;
3181
3182  out_fail:
3183         mpt3sas_base_unmap_resources(ioc);
3184         return r;
3185 }
3186
3187 /**
3188  * mpt3sas_base_get_msg_frame - obtain request mf pointer
3189  * @ioc: per adapter object
3190  * @smid: system request message index(smid zero is invalid)
3191  *
3192  * Return: virt pointer to message frame.
3193  */
3194 void *
3195 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3196 {
3197         return (void *)(ioc->request + (smid * ioc->request_sz));
3198 }
3199
3200 /**
3201  * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3202  * @ioc: per adapter object
3203  * @smid: system request message index
3204  *
3205  * Return: virt pointer to sense buffer.
3206  */
3207 void *
3208 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3209 {
3210         return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3211 }
3212
3213 /**
3214  * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3215  * @ioc: per adapter object
3216  * @smid: system request message index
3217  *
3218  * Return: phys pointer to the low 32bit address of the sense buffer.
3219  */
3220 __le32
3221 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3222 {
3223         return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3224             SCSI_SENSE_BUFFERSIZE));
3225 }
3226
3227 /**
3228  * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3229  * @ioc: per adapter object
3230  * @smid: system request message index
3231  *
3232  * Return: virt pointer to a PCIe SGL.
3233  */
3234 void *
3235 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3236 {
3237         return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3238 }
3239
3240 /**
3241  * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3242  * @ioc: per adapter object
3243  * @smid: system request message index
3244  *
3245  * Return: phys pointer to the address of the PCIe buffer.
3246  */
3247 dma_addr_t
3248 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3249 {
3250         return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3251 }
3252
3253 /**
3254  * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3255  * @ioc: per adapter object
3256  * @phys_addr: lower 32 physical addr of the reply
3257  *
3258  * Converts 32bit lower physical addr into a virt address.
3259  */
3260 void *
3261 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3262 {
3263         if (!phys_addr)
3264                 return NULL;
3265         return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3266 }
3267
3268 static inline u8
3269 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
3270 {
3271         /* Enables reply_queue load balancing */
3272         if (ioc->msix_load_balance)
3273                 return ioc->reply_queue_count ?
3274                     base_mod64(atomic64_add_return(1,
3275                     &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3276
3277         return ioc->cpu_msix_table[raw_smp_processor_id()];
3278 }
3279
3280 /**
3281  * mpt3sas_base_get_smid - obtain a free smid from internal queue
3282  * @ioc: per adapter object
3283  * @cb_idx: callback index
3284  *
3285  * Return: smid (zero is invalid)
3286  */
3287 u16
3288 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3289 {
3290         unsigned long flags;
3291         struct request_tracker *request;
3292         u16 smid;
3293
3294         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3295         if (list_empty(&ioc->internal_free_list)) {
3296                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3297                 ioc_err(ioc, "%s: smid not available\n", __func__);
3298                 return 0;
3299         }
3300
3301         request = list_entry(ioc->internal_free_list.next,
3302             struct request_tracker, tracker_list);
3303         request->cb_idx = cb_idx;
3304         smid = request->smid;
3305         list_del(&request->tracker_list);
3306         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3307         return smid;
3308 }
3309
3310 /**
3311  * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3312  * @ioc: per adapter object
3313  * @cb_idx: callback index
3314  * @scmd: pointer to scsi command object
3315  *
3316  * Return: smid (zero is invalid)
3317  */
3318 u16
3319 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3320         struct scsi_cmnd *scmd)
3321 {
3322         struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3323         unsigned int tag = scmd->request->tag;
3324         u16 smid;
3325
3326         smid = tag + 1;
3327         request->cb_idx = cb_idx;
3328         request->msix_io = _base_get_msix_index(ioc);
3329         request->smid = smid;
3330         INIT_LIST_HEAD(&request->chain_list);
3331         return smid;
3332 }
3333
3334 /**
3335  * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3336  * @ioc: per adapter object
3337  * @cb_idx: callback index
3338  *
3339  * Return: smid (zero is invalid)
3340  */
3341 u16
3342 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3343 {
3344         unsigned long flags;
3345         struct request_tracker *request;
3346         u16 smid;
3347
3348         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3349         if (list_empty(&ioc->hpr_free_list)) {
3350                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3351                 return 0;
3352         }
3353
3354         request = list_entry(ioc->hpr_free_list.next,
3355             struct request_tracker, tracker_list);
3356         request->cb_idx = cb_idx;
3357         smid = request->smid;
3358         list_del(&request->tracker_list);
3359         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3360         return smid;
3361 }
3362
3363 static void
3364 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3365 {
3366         /*
3367          * See _wait_for_commands_to_complete() call with regards to this code.
3368          */
3369         if (ioc->shost_recovery && ioc->pending_io_count) {
3370                 ioc->pending_io_count = scsi_host_busy(ioc->shost);
3371                 if (ioc->pending_io_count == 0)
3372                         wake_up(&ioc->reset_wq);
3373         }
3374 }
3375
3376 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3377                            struct scsiio_tracker *st)
3378 {
3379         if (WARN_ON(st->smid == 0))
3380                 return;
3381         st->cb_idx = 0xFF;
3382         st->direct_io = 0;
3383         atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3384         st->smid = 0;
3385 }
3386
3387 /**
3388  * mpt3sas_base_free_smid - put smid back on free_list
3389  * @ioc: per adapter object
3390  * @smid: system request message index
3391  */
3392 void
3393 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3394 {
3395         unsigned long flags;
3396         int i;
3397
3398         if (smid < ioc->hi_priority_smid) {
3399                 struct scsiio_tracker *st;
3400                 void *request;
3401
3402                 st = _get_st_from_smid(ioc, smid);
3403                 if (!st) {
3404                         _base_recovery_check(ioc);
3405                         return;
3406                 }
3407
3408                 /* Clear MPI request frame */
3409                 request = mpt3sas_base_get_msg_frame(ioc, smid);
3410                 memset(request, 0, ioc->request_sz);
3411
3412                 mpt3sas_base_clear_st(ioc, st);
3413                 _base_recovery_check(ioc);
3414                 return;
3415         }
3416
3417         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3418         if (smid < ioc->internal_smid) {
3419                 /* hi-priority */
3420                 i = smid - ioc->hi_priority_smid;
3421                 ioc->hpr_lookup[i].cb_idx = 0xFF;
3422                 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3423         } else if (smid <= ioc->hba_queue_depth) {
3424                 /* internal queue */
3425                 i = smid - ioc->internal_smid;
3426                 ioc->internal_lookup[i].cb_idx = 0xFF;
3427                 list_add(&ioc->internal_lookup[i].tracker_list,
3428                     &ioc->internal_free_list);
3429         }
3430         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3431 }
3432
3433 /**
3434  * _base_mpi_ep_writeq - 32 bit write to MMIO
3435  * @b: data payload
3436  * @addr: address in MMIO space
3437  * @writeq_lock: spin lock
3438  *
3439  * This special handling for MPI EP to take care of 32 bit
3440  * environment where its not quarenteed to send the entire word
3441  * in one transfer.
3442  */
3443 static inline void
3444 _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3445                                         spinlock_t *writeq_lock)
3446 {
3447         unsigned long flags;
3448
3449         spin_lock_irqsave(writeq_lock, flags);
3450         __raw_writel((u32)(b), addr);
3451         __raw_writel((u32)(b >> 32), (addr + 4));
3452         spin_unlock_irqrestore(writeq_lock, flags);
3453 }
3454
3455 /**
3456  * _base_writeq - 64 bit write to MMIO
3457  * @b: data payload
3458  * @addr: address in MMIO space
3459  * @writeq_lock: spin lock
3460  *
3461  * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
3462  * care of 32 bit environment where its not quarenteed to send the entire word
3463  * in one transfer.
3464  */
3465 #if defined(writeq) && defined(CONFIG_64BIT)
3466 static inline void
3467 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3468 {
3469         wmb();
3470         __raw_writeq(b, addr);
3471         barrier();
3472 }
3473 #else
3474 static inline void
3475 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3476 {
3477         _base_mpi_ep_writeq(b, addr, writeq_lock);
3478 }
3479 #endif
3480
3481 /**
3482  * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
3483  * @ioc: per adapter object
3484  * @smid: system request message index
3485  * @handle: device handle
3486  */
3487 static void
3488 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3489 {
3490         Mpi2RequestDescriptorUnion_t descriptor;
3491         u64 *request = (u64 *)&descriptor;
3492         void *mpi_req_iomem;
3493         __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3494
3495         _clone_sg_entries(ioc, (void *) mfp, smid);
3496         mpi_req_iomem = (void __force *)ioc->chip +
3497                         MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3498         _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3499                                         ioc->request_sz);
3500         descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3501         descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
3502         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3503         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3504         descriptor.SCSIIO.LMID = 0;
3505         _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3506             &ioc->scsi_lookup_lock);
3507 }
3508
3509 /**
3510  * _base_put_smid_scsi_io - send SCSI_IO request to firmware
3511  * @ioc: per adapter object
3512  * @smid: system request message index
3513  * @handle: device handle
3514  */
3515 static void
3516 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3517 {
3518         Mpi2RequestDescriptorUnion_t descriptor;
3519         u64 *request = (u64 *)&descriptor;
3520
3521
3522         descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3523         descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
3524         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3525         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3526         descriptor.SCSIIO.LMID = 0;
3527         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3528             &ioc->scsi_lookup_lock);
3529 }
3530
3531 /**
3532  * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
3533  * @ioc: per adapter object
3534  * @smid: system request message index
3535  * @handle: device handle
3536  */
3537 void
3538 mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3539         u16 handle)
3540 {
3541         Mpi2RequestDescriptorUnion_t descriptor;
3542         u64 *request = (u64 *)&descriptor;
3543
3544         descriptor.SCSIIO.RequestFlags =
3545             MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3546         descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
3547         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3548         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3549         descriptor.SCSIIO.LMID = 0;
3550         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3551             &ioc->scsi_lookup_lock);
3552 }
3553
3554 /**
3555  * mpt3sas_base_put_smid_hi_priority - send Task Management request to firmware
3556  * @ioc: per adapter object
3557  * @smid: system request message index
3558  * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
3559  */
3560 void
3561 mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3562         u16 msix_task)
3563 {
3564         Mpi2RequestDescriptorUnion_t descriptor;
3565         void *mpi_req_iomem;
3566         u64 *request;
3567
3568         if (ioc->is_mcpu_endpoint) {
3569                 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3570
3571                 /* TBD 256 is offset within sys register. */
3572                 mpi_req_iomem = (void __force *)ioc->chip
3573                                         + MPI_FRAME_START_OFFSET
3574                                         + (smid * ioc->request_sz);
3575                 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3576                                                         ioc->request_sz);
3577         }
3578
3579         request = (u64 *)&descriptor;
3580
3581         descriptor.HighPriority.RequestFlags =
3582             MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3583         descriptor.HighPriority.MSIxIndex =  msix_task;
3584         descriptor.HighPriority.SMID = cpu_to_le16(smid);
3585         descriptor.HighPriority.LMID = 0;
3586         descriptor.HighPriority.Reserved1 = 0;
3587         if (ioc->is_mcpu_endpoint)
3588                 _base_mpi_ep_writeq(*request,
3589                                 &ioc->chip->RequestDescriptorPostLow,
3590                                 &ioc->scsi_lookup_lock);
3591         else
3592                 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3593                     &ioc->scsi_lookup_lock);
3594 }
3595
3596 /**
3597  * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
3598  *  firmware
3599  * @ioc: per adapter object
3600  * @smid: system request message index
3601  */
3602 void
3603 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3604 {
3605         Mpi2RequestDescriptorUnion_t descriptor;
3606         u64 *request = (u64 *)&descriptor;
3607
3608         descriptor.Default.RequestFlags =
3609                 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3610         descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
3611         descriptor.Default.SMID = cpu_to_le16(smid);
3612         descriptor.Default.LMID = 0;
3613         descriptor.Default.DescriptorTypeDependent = 0;
3614         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3615             &ioc->scsi_lookup_lock);
3616 }
3617
3618 /**
3619  * mpt3sas_base_put_smid_default - Default, primarily used for config pages
3620  * @ioc: per adapter object
3621  * @smid: system request message index
3622  */
3623 void
3624 mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3625 {
3626         Mpi2RequestDescriptorUnion_t descriptor;
3627         void *mpi_req_iomem;
3628         u64 *request;
3629
3630         if (ioc->is_mcpu_endpoint) {
3631                 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3632
3633                 _clone_sg_entries(ioc, (void *) mfp, smid);
3634                 /* TBD 256 is offset within sys register */
3635                 mpi_req_iomem = (void __force *)ioc->chip +
3636                         MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3637                 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3638                                                         ioc->request_sz);
3639         }
3640         request = (u64 *)&descriptor;
3641         descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3642         descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
3643         descriptor.Default.SMID = cpu_to_le16(smid);
3644         descriptor.Default.LMID = 0;
3645         descriptor.Default.DescriptorTypeDependent = 0;
3646         if (ioc->is_mcpu_endpoint)
3647                 _base_mpi_ep_writeq(*request,
3648                                 &ioc->chip->RequestDescriptorPostLow,
3649                                 &ioc->scsi_lookup_lock);
3650         else
3651                 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3652                                 &ioc->scsi_lookup_lock);
3653 }
3654
3655 /**
3656  * _base_display_OEMs_branding - Display branding string
3657  * @ioc: per adapter object
3658  */
3659 static void
3660 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
3661 {
3662         if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
3663                 return;
3664
3665         switch (ioc->pdev->subsystem_vendor) {
3666         case PCI_VENDOR_ID_INTEL:
3667                 switch (ioc->pdev->device) {
3668                 case MPI2_MFGPAGE_DEVID_SAS2008:
3669                         switch (ioc->pdev->subsystem_device) {
3670                         case MPT2SAS_INTEL_RMS2LL080_SSDID:
3671                                 ioc_info(ioc, "%s\n",
3672                                          MPT2SAS_INTEL_RMS2LL080_BRANDING);
3673                                 break;
3674                         case MPT2SAS_INTEL_RMS2LL040_SSDID:
3675                                 ioc_info(ioc, "%s\n",
3676                                          MPT2SAS_INTEL_RMS2LL040_BRANDING);
3677                                 break;
3678                         case MPT2SAS_INTEL_SSD910_SSDID:
3679                                 ioc_info(ioc, "%s\n",
3680                                          MPT2SAS_INTEL_SSD910_BRANDING);
3681                                 break;
3682                         default:
3683                                 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3684                                          ioc->pdev->subsystem_device);
3685                                 break;
3686                         }
3687                         break;
3688                 case MPI2_MFGPAGE_DEVID_SAS2308_2:
3689                         switch (ioc->pdev->subsystem_device) {
3690                         case MPT2SAS_INTEL_RS25GB008_SSDID:
3691                                 ioc_info(ioc, "%s\n",
3692                                          MPT2SAS_INTEL_RS25GB008_BRANDING);
3693                                 break;
3694                         case MPT2SAS_INTEL_RMS25JB080_SSDID:
3695                                 ioc_info(ioc, "%s\n",
3696                                          MPT2SAS_INTEL_RMS25JB080_BRANDING);
3697                                 break;
3698                         case MPT2SAS_INTEL_RMS25JB040_SSDID:
3699                                 ioc_info(ioc, "%s\n",
3700                                          MPT2SAS_INTEL_RMS25JB040_BRANDING);
3701                                 break;
3702                         case MPT2SAS_INTEL_RMS25KB080_SSDID:
3703                                 ioc_info(ioc, "%s\n",
3704                                          MPT2SAS_INTEL_RMS25KB080_BRANDING);
3705                                 break;
3706                         case MPT2SAS_INTEL_RMS25KB040_SSDID:
3707                                 ioc_info(ioc, "%s\n",
3708                                          MPT2SAS_INTEL_RMS25KB040_BRANDING);
3709                                 break;
3710                         case MPT2SAS_INTEL_RMS25LB040_SSDID:
3711                                 ioc_info(ioc, "%s\n",
3712                                          MPT2SAS_INTEL_RMS25LB040_BRANDING);
3713                                 break;
3714                         case MPT2SAS_INTEL_RMS25LB080_SSDID:
3715                                 ioc_info(ioc, "%s\n",
3716                                          MPT2SAS_INTEL_RMS25LB080_BRANDING);
3717                                 break;
3718                         default:
3719                                 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3720                                          ioc->pdev->subsystem_device);
3721                                 break;
3722                         }
3723                         break;
3724                 case MPI25_MFGPAGE_DEVID_SAS3008:
3725                         switch (ioc->pdev->subsystem_device) {
3726                         case MPT3SAS_INTEL_RMS3JC080_SSDID:
3727                                 ioc_info(ioc, "%s\n",
3728                                          MPT3SAS_INTEL_RMS3JC080_BRANDING);
3729                                 break;
3730
3731                         case MPT3SAS_INTEL_RS3GC008_SSDID:
3732                                 ioc_info(ioc, "%s\n",
3733                                          MPT3SAS_INTEL_RS3GC008_BRANDING);
3734                                 break;
3735                         case MPT3SAS_INTEL_RS3FC044_SSDID:
3736                                 ioc_info(ioc, "%s\n",
3737                                          MPT3SAS_INTEL_RS3FC044_BRANDING);
3738                                 break;
3739                         case MPT3SAS_INTEL_RS3UC080_SSDID:
3740                                 ioc_info(ioc, "%s\n",
3741                                          MPT3SAS_INTEL_RS3UC080_BRANDING);
3742                                 break;
3743                         default:
3744                                 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3745                                          ioc->pdev->subsystem_device);
3746                                 break;
3747                         }
3748                         break;
3749                 default:
3750                         ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3751                                  ioc->pdev->subsystem_device);
3752                         break;
3753                 }
3754                 break;
3755         case PCI_VENDOR_ID_DELL:
3756                 switch (ioc->pdev->device) {
3757                 case MPI2_MFGPAGE_DEVID_SAS2008:
3758                         switch (ioc->pdev->subsystem_device) {
3759                         case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
3760                                 ioc_info(ioc, "%s\n",
3761                                          MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
3762                                 break;
3763                         case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
3764                                 ioc_info(ioc, "%s\n",
3765                                          MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
3766                                 break;
3767                         case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
3768                                 ioc_info(ioc, "%s\n",
3769                                          MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
3770                                 break;
3771                         case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
3772                                 ioc_info(ioc, "%s\n",
3773                                          MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
3774                                 break;
3775                         case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
3776                                 ioc_info(ioc, "%s\n",
3777                                          MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
3778                                 break;
3779                         case MPT2SAS_DELL_PERC_H200_SSDID:
3780                                 ioc_info(ioc, "%s\n",
3781                                          MPT2SAS_DELL_PERC_H200_BRANDING);
3782                                 break;
3783                         case MPT2SAS_DELL_6GBPS_SAS_SSDID:
3784                                 ioc_info(ioc, "%s\n",
3785                                          MPT2SAS_DELL_6GBPS_SAS_BRANDING);
3786                                 break;
3787                         default:
3788                                 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
3789                                          ioc->pdev->subsystem_device);
3790                                 break;
3791                         }
3792                         break;
3793                 case MPI25_MFGPAGE_DEVID_SAS3008:
3794                         switch (ioc->pdev->subsystem_device) {
3795                         case MPT3SAS_DELL_12G_HBA_SSDID:
3796                                 ioc_info(ioc, "%s\n",
3797                                          MPT3SAS_DELL_12G_HBA_BRANDING);
3798                                 break;
3799                         default:
3800                                 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
3801                                          ioc->pdev->subsystem_device);
3802                                 break;
3803                         }
3804                         break;
3805                 default:
3806                         ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
3807                                  ioc->pdev->subsystem_device);
3808                         break;
3809                 }
3810                 break;
3811         case PCI_VENDOR_ID_CISCO:
3812                 switch (ioc->pdev->device) {
3813                 case MPI25_MFGPAGE_DEVID_SAS3008:
3814                         switch (ioc->pdev->subsystem_device) {
3815                         case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
3816                                 ioc_info(ioc, "%s\n",
3817                                          MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
3818                                 break;
3819                         case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
3820                                 ioc_info(ioc, "%s\n",
3821                                          MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
3822                                 break;
3823                         case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
3824                                 ioc_info(ioc, "%s\n",
3825                                          MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
3826                                 break;
3827                         default:
3828                                 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
3829                                          ioc->pdev->subsystem_device);
3830                                 break;
3831                         }
3832                         break;
3833                 case MPI25_MFGPAGE_DEVID_SAS3108_1:
3834                         switch (ioc->pdev->subsystem_device) {
3835                         case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
3836                                 ioc_info(ioc, "%s\n",
3837                                          MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
3838                                 break;
3839                         case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
3840                                 ioc_info(ioc, "%s\n",
3841                                          MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
3842                                 break;
3843                         default:
3844                                 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
3845                                          ioc->pdev->subsystem_device);
3846                                 break;
3847                         }
3848                         break;
3849                 default:
3850                         ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
3851                                  ioc->pdev->subsystem_device);
3852                         break;
3853                 }
3854                 break;
3855         case MPT2SAS_HP_3PAR_SSVID:
3856                 switch (ioc->pdev->device) {
3857                 case MPI2_MFGPAGE_DEVID_SAS2004:
3858                         switch (ioc->pdev->subsystem_device) {
3859                         case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
3860                                 ioc_info(ioc, "%s\n",
3861                                          MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
3862                                 break;
3863                         default:
3864                                 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
3865                                          ioc->pdev->subsystem_device);
3866                                 break;
3867                         }
3868                         break;
3869                 case MPI2_MFGPAGE_DEVID_SAS2308_2:
3870                         switch (ioc->pdev->subsystem_device) {
3871                         case MPT2SAS_HP_2_4_INTERNAL_SSDID:
3872                                 ioc_info(ioc, "%s\n",
3873                                          MPT2SAS_HP_2_4_INTERNAL_BRANDING);
3874                                 break;
3875                         case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
3876                                 ioc_info(ioc, "%s\n",
3877                                          MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
3878                                 break;
3879                         case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
3880                                 ioc_info(ioc, "%s\n",
3881                                          MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
3882                                 break;
3883                         case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
3884                                 ioc_info(ioc, "%s\n",
3885                                          MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
3886                                 break;
3887                         default:
3888                                 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
3889                                          ioc->pdev->subsystem_device);
3890                                 break;
3891                         }
3892                         break;
3893                 default:
3894                         ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
3895                                  ioc->pdev->subsystem_device);
3896                         break;
3897                 }
3898         default:
3899                 break;
3900         }
3901 }
3902
3903 /**
3904  * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
3905  *                              version from FW Image Header.
3906  * @ioc: per adapter object
3907  *
3908  * Return: 0 for success, non-zero for failure.
3909  */
3910         static int
3911 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
3912 {
3913         Mpi2FWImageHeader_t *FWImgHdr;
3914         Mpi25FWUploadRequest_t *mpi_request;
3915         Mpi2FWUploadReply_t mpi_reply;
3916         int r = 0;
3917         void *fwpkg_data = NULL;
3918         dma_addr_t fwpkg_data_dma;
3919         u16 smid, ioc_status;
3920         size_t data_length;
3921
3922         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3923
3924         if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
3925                 ioc_err(ioc, "%s: internal command already in use\n", __func__);
3926                 return -EAGAIN;
3927         }
3928
3929         data_length = sizeof(Mpi2FWImageHeader_t);
3930         fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
3931                         &fwpkg_data_dma, GFP_KERNEL);
3932         if (!fwpkg_data) {
3933                 ioc_err(ioc, "failure at %s:%d/%s()!\n",
3934                         __FILE__, __LINE__, __func__);
3935                 return -ENOMEM;
3936         }
3937
3938         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3939         if (!smid) {
3940                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3941                 r = -EAGAIN;
3942                 goto out;
3943         }
3944
3945         ioc->base_cmds.status = MPT3_CMD_PENDING;
3946         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3947         ioc->base_cmds.smid = smid;
3948         memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
3949         mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
3950         mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
3951         mpi_request->ImageSize = cpu_to_le32(data_length);
3952         ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
3953                         data_length);
3954         init_completion(&ioc->base_cmds.done);
3955         mpt3sas_base_put_smid_default(ioc, smid);
3956         /* Wait for 15 seconds */
3957         wait_for_completion_timeout(&ioc->base_cmds.done,
3958                         FW_IMG_HDR_READ_TIMEOUT*HZ);
3959         ioc_info(ioc, "%s: complete\n", __func__);
3960         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3961                 ioc_err(ioc, "%s: timeout\n", __func__);
3962                 _debug_dump_mf(mpi_request,
3963                                 sizeof(Mpi25FWUploadRequest_t)/4);
3964                 r = -ETIME;
3965         } else {
3966                 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
3967                 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
3968                         memcpy(&mpi_reply, ioc->base_cmds.reply,
3969                                         sizeof(Mpi2FWUploadReply_t));
3970                         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3971                                                 MPI2_IOCSTATUS_MASK;
3972                         if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3973                                 FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
3974                                 if (FWImgHdr->PackageVersion.Word) {
3975                                         ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
3976                                                  FWImgHdr->PackageVersion.Struct.Major,
3977                                                  FWImgHdr->PackageVersion.Struct.Minor,
3978                                                  FWImgHdr->PackageVersion.Struct.Unit,
3979                                                  FWImgHdr->PackageVersion.Struct.Dev);
3980                                 }
3981                         } else {
3982                                 _debug_dump_mf(&mpi_reply,
3983                                                 sizeof(Mpi2FWUploadReply_t)/4);
3984                         }
3985                 }
3986         }
3987         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3988 out:
3989         if (fwpkg_data)
3990                 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
3991                                 fwpkg_data_dma);
3992         return r;
3993 }
3994
3995 /**
3996  * _base_display_ioc_capabilities - Disply IOC's capabilities.
3997  * @ioc: per adapter object
3998  */
3999 static void
4000 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4001 {
4002         int i = 0;
4003         char desc[16];
4004         u32 iounit_pg1_flags;
4005         u32 bios_version;
4006
4007         bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
4008         strncpy(desc, ioc->manu_pg0.ChipName, 16);
4009         ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
4010                  desc,
4011                  (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4012                  (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4013                  (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4014                  ioc->facts.FWVersion.Word & 0x000000FF,
4015                  ioc->pdev->revision,
4016                  (bios_version & 0xFF000000) >> 24,
4017                  (bios_version & 0x00FF0000) >> 16,
4018                  (bios_version & 0x0000FF00) >> 8,
4019                  bios_version & 0x000000FF);
4020
4021         _base_display_OEMs_branding(ioc);
4022
4023         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4024                 pr_info("%sNVMe", i ? "," : "");
4025                 i++;
4026         }
4027
4028         ioc_info(ioc, "Protocol=(");
4029
4030         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4031                 pr_cont("Initiator");
4032                 i++;
4033         }
4034
4035         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4036                 pr_cont("%sTarget", i ? "," : "");
4037                 i++;
4038         }
4039
4040         i = 0;
4041         pr_cont("), Capabilities=(");
4042
4043         if (!ioc->hide_ir_msg) {
4044                 if (ioc->facts.IOCCapabilities &
4045                     MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4046                         pr_cont("Raid");
4047                         i++;
4048                 }
4049         }
4050
4051         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4052                 pr_cont("%sTLR", i ? "," : "");
4053                 i++;
4054         }
4055
4056         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4057                 pr_cont("%sMulticast", i ? "," : "");
4058                 i++;
4059         }
4060
4061         if (ioc->facts.IOCCapabilities &
4062             MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4063                 pr_cont("%sBIDI Target", i ? "," : "");
4064                 i++;
4065         }
4066
4067         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4068                 pr_cont("%sEEDP", i ? "," : "");
4069                 i++;
4070         }
4071
4072         if (ioc->facts.IOCCapabilities &
4073             MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4074                 pr_cont("%sSnapshot Buffer", i ? "," : "");
4075                 i++;
4076         }
4077
4078         if (ioc->facts.IOCCapabilities &
4079             MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4080                 pr_cont("%sDiag Trace Buffer", i ? "," : "");
4081                 i++;
4082         }
4083
4084         if (ioc->facts.IOCCapabilities &
4085             MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4086                 pr_cont("%sDiag Extended Buffer", i ? "," : "");
4087                 i++;
4088         }
4089
4090         if (ioc->facts.IOCCapabilities &
4091             MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4092                 pr_cont("%sTask Set Full", i ? "," : "");
4093                 i++;
4094         }
4095
4096         iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4097         if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4098                 pr_cont("%sNCQ", i ? "," : "");
4099                 i++;
4100         }
4101
4102         pr_cont(")\n");
4103 }
4104
4105 /**
4106  * mpt3sas_base_update_missing_delay - change the missing delay timers
4107  * @ioc: per adapter object
4108  * @device_missing_delay: amount of time till device is reported missing
4109  * @io_missing_delay: interval IO is returned when there is a missing device
4110  *
4111  * Passed on the command line, this function will modify the device missing
4112  * delay, as well as the io missing delay. This should be called at driver
4113  * load time.
4114  */
4115 void
4116 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4117         u16 device_missing_delay, u8 io_missing_delay)
4118 {
4119         u16 dmd, dmd_new, dmd_orignal;
4120         u8 io_missing_delay_original;
4121         u16 sz;
4122         Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4123         Mpi2ConfigReply_t mpi_reply;
4124         u8 num_phys = 0;
4125         u16 ioc_status;
4126
4127         mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4128         if (!num_phys)
4129                 return;
4130
4131         sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4132             sizeof(Mpi2SasIOUnit1PhyData_t));
4133         sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4134         if (!sas_iounit_pg1) {
4135                 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4136                         __FILE__, __LINE__, __func__);
4137                 goto out;
4138         }
4139         if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4140             sas_iounit_pg1, sz))) {
4141                 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4142                         __FILE__, __LINE__, __func__);
4143                 goto out;
4144         }
4145         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4146             MPI2_IOCSTATUS_MASK;
4147         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4148                 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4149                         __FILE__, __LINE__, __func__);
4150                 goto out;
4151         }
4152
4153         /* device missing delay */
4154         dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4155         if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4156                 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4157         else
4158                 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4159         dmd_orignal = dmd;
4160         if (device_missing_delay > 0x7F) {
4161                 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4162                     device_missing_delay;
4163                 dmd = dmd / 16;
4164                 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4165         } else
4166                 dmd = device_missing_delay;
4167         sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4168
4169         /* io missing delay */
4170         io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4171         sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4172
4173         if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4174             sz)) {
4175                 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4176                         dmd_new = (dmd &
4177                             MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4178                 else
4179                         dmd_new =
4180                     dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4181                 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4182                          dmd_orignal, dmd_new);
4183                 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4184                          io_missing_delay_original,
4185                          io_missing_delay);
4186                 ioc->device_missing_delay = dmd_new;
4187                 ioc->io_missing_delay = io_missing_delay;
4188         }
4189
4190 out:
4191         kfree(sas_iounit_pg1);
4192 }
4193
4194 /**
4195  * _base_static_config_pages - static start of day config pages
4196  * @ioc: per adapter object
4197  */
4198 static void
4199 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4200 {
4201         Mpi2ConfigReply_t mpi_reply;
4202         u32 iounit_pg1_flags;
4203
4204         ioc->nvme_abort_timeout = 30;
4205         mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
4206         if (ioc->ir_firmware)
4207                 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
4208                     &ioc->manu_pg10);
4209
4210         /*
4211          * Ensure correct T10 PI operation if vendor left EEDPTagMode
4212          * flag unset in NVDATA.
4213          */
4214         mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
4215         if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
4216                 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
4217                     ioc->name);
4218                 ioc->manu_pg11.EEDPTagMode &= ~0x3;
4219                 ioc->manu_pg11.EEDPTagMode |= 0x1;
4220                 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
4221                     &ioc->manu_pg11);
4222         }
4223         if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
4224                 ioc->tm_custom_handling = 1;
4225         else {
4226                 ioc->tm_custom_handling = 0;
4227                 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
4228                         ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
4229                 else if (ioc->manu_pg11.NVMeAbortTO >
4230                                         NVME_TASK_ABORT_MAX_TIMEOUT)
4231                         ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
4232                 else
4233                         ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
4234         }
4235
4236         mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
4237         mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
4238         mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
4239         mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
4240         mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4241         mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
4242         _base_display_ioc_capabilities(ioc);
4243
4244         /*
4245          * Enable task_set_full handling in iounit_pg1 when the
4246          * facts capabilities indicate that its supported.
4247          */
4248         iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4249         if ((ioc->facts.IOCCapabilities &
4250             MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
4251                 iounit_pg1_flags &=
4252                     ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4253         else
4254                 iounit_pg1_flags |=
4255                     MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4256         ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
4257         mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4258
4259         if (ioc->iounit_pg8.NumSensors)
4260                 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
4261 }
4262
4263 /**
4264  * mpt3sas_free_enclosure_list - release memory
4265  * @ioc: per adapter object
4266  *
4267  * Free memory allocated during encloure add.
4268  */
4269 void
4270 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
4271 {
4272         struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
4273
4274         /* Free enclosure list */
4275         list_for_each_entry_safe(enclosure_dev,
4276                         enclosure_dev_next, &ioc->enclosure_list, list) {
4277                 list_del(&enclosure_dev->list);
4278                 kfree(enclosure_dev);
4279         }
4280 }
4281
4282 /**
4283  * _base_release_memory_pools - release memory
4284  * @ioc: per adapter object
4285  *
4286  * Free memory allocated from _base_allocate_memory_pools.
4287  */
4288 static void
4289 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4290 {
4291         int i = 0;
4292         int j = 0;
4293         struct chain_tracker *ct;
4294         struct reply_post_struct *rps;
4295
4296         dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4297
4298         if (ioc->request) {
4299                 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
4300                     ioc->request,  ioc->request_dma);
4301                 dexitprintk(ioc,
4302                             ioc_info(ioc, "request_pool(0x%p): free\n",
4303                                      ioc->request));
4304                 ioc->request = NULL;
4305         }
4306
4307         if (ioc->sense) {
4308                 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4309                 dma_pool_destroy(ioc->sense_dma_pool);
4310                 dexitprintk(ioc,
4311                             ioc_info(ioc, "sense_pool(0x%p): free\n",
4312                                      ioc->sense));
4313                 ioc->sense = NULL;
4314         }
4315
4316         if (ioc->reply) {
4317                 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
4318                 dma_pool_destroy(ioc->reply_dma_pool);
4319                 dexitprintk(ioc,
4320                             ioc_info(ioc, "reply_pool(0x%p): free\n",
4321                                      ioc->reply));
4322                 ioc->reply = NULL;
4323         }
4324
4325         if (ioc->reply_free) {
4326                 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
4327                     ioc->reply_free_dma);
4328                 dma_pool_destroy(ioc->reply_free_dma_pool);
4329                 dexitprintk(ioc,
4330                             ioc_info(ioc, "reply_free_pool(0x%p): free\n",
4331                                      ioc->reply_free));
4332                 ioc->reply_free = NULL;
4333         }
4334
4335         if (ioc->reply_post) {
4336                 do {
4337                         rps = &ioc->reply_post[i];
4338                         if (rps->reply_post_free) {
4339                                 dma_pool_free(
4340                                     ioc->reply_post_free_dma_pool,
4341                                     rps->reply_post_free,
4342                                     rps->reply_post_free_dma);
4343                                 dexitprintk(ioc,
4344                                             ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
4345                                                      rps->reply_post_free));
4346                                 rps->reply_post_free = NULL;
4347                         }
4348                 } while (ioc->rdpq_array_enable &&
4349                            (++i < ioc->reply_queue_count));
4350                 if (ioc->reply_post_free_array &&
4351                         ioc->rdpq_array_enable) {
4352                         dma_pool_free(ioc->reply_post_free_array_dma_pool,
4353                                 ioc->reply_post_free_array,
4354                                 ioc->reply_post_free_array_dma);
4355                         ioc->reply_post_free_array = NULL;
4356                 }
4357                 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
4358                 dma_pool_destroy(ioc->reply_post_free_dma_pool);
4359                 kfree(ioc->reply_post);
4360         }
4361
4362         if (ioc->pcie_sgl_dma_pool) {
4363                 for (i = 0; i < ioc->scsiio_depth; i++) {
4364                         dma_pool_free(ioc->pcie_sgl_dma_pool,
4365                                         ioc->pcie_sg_lookup[i].pcie_sgl,
4366                                         ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4367                 }
4368                 if (ioc->pcie_sgl_dma_pool)
4369                         dma_pool_destroy(ioc->pcie_sgl_dma_pool);
4370         }
4371
4372         if (ioc->config_page) {
4373                 dexitprintk(ioc,
4374                             ioc_info(ioc, "config_page(0x%p): free\n",
4375                                      ioc->config_page));
4376                 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
4377                     ioc->config_page, ioc->config_page_dma);
4378         }
4379
4380         kfree(ioc->hpr_lookup);
4381         kfree(ioc->internal_lookup);
4382         if (ioc->chain_lookup) {
4383                 for (i = 0; i < ioc->scsiio_depth; i++) {
4384                         for (j = ioc->chains_per_prp_buffer;
4385                             j < ioc->chains_needed_per_io; j++) {
4386                                 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4387                                 if (ct && ct->chain_buffer)
4388                                         dma_pool_free(ioc->chain_dma_pool,
4389                                                 ct->chain_buffer,
4390                                                 ct->chain_buffer_dma);
4391                         }
4392                         kfree(ioc->chain_lookup[i].chains_per_smid);
4393                 }
4394                 dma_pool_destroy(ioc->chain_dma_pool);
4395                 kfree(ioc->chain_lookup);
4396                 ioc->chain_lookup = NULL;
4397         }
4398 }
4399
4400 /**
4401  * is_MSB_are_same - checks whether all reply queues in a set are
4402  *      having same upper 32bits in their base memory address.
4403  * @reply_pool_start_address: Base address of a reply queue set
4404  * @pool_sz: Size of single Reply Descriptor Post Queues pool size
4405  *
4406  * Return: 1 if reply queues in a set have a same upper 32bits in their base
4407  * memory address, else 0.
4408  */
4409
4410 static int
4411 is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
4412 {
4413         long reply_pool_end_address;
4414
4415         reply_pool_end_address = reply_pool_start_address + pool_sz;
4416
4417         if (upper_32_bits(reply_pool_start_address) ==
4418                 upper_32_bits(reply_pool_end_address))
4419                 return 1;
4420         else
4421                 return 0;
4422 }
4423
4424 /**
4425  * _base_allocate_memory_pools - allocate start of day memory pools
4426  * @ioc: per adapter object
4427  *
4428  * Return: 0 success, anything else error.
4429  */
4430 static int
4431 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4432 {
4433         struct mpt3sas_facts *facts;
4434         u16 max_sge_elements;
4435         u16 chains_needed_per_io;
4436         u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
4437         u32 retry_sz;
4438         u16 max_request_credit, nvme_blocks_needed;
4439         unsigned short sg_tablesize;
4440         u16 sge_size;
4441         int i, j;
4442         struct chain_tracker *ct;
4443
4444         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4445
4446
4447         retry_sz = 0;
4448         facts = &ioc->facts;
4449
4450         /* command line tunables for max sgl entries */
4451         if (max_sgl_entries != -1)
4452                 sg_tablesize = max_sgl_entries;
4453         else {
4454                 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
4455                         sg_tablesize = MPT2SAS_SG_DEPTH;
4456                 else
4457                         sg_tablesize = MPT3SAS_SG_DEPTH;
4458         }
4459
4460         /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
4461         if (reset_devices)
4462                 sg_tablesize = min_t(unsigned short, sg_tablesize,
4463                    MPT_KDUMP_MIN_PHYS_SEGMENTS);
4464
4465         if (ioc->is_mcpu_endpoint)
4466                 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
4467         else {
4468                 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
4469                         sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
4470                 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
4471                         sg_tablesize = min_t(unsigned short, sg_tablesize,
4472                                         SG_MAX_SEGMENTS);
4473                         ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
4474                                  sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
4475                 }
4476                 ioc->shost->sg_tablesize = sg_tablesize;
4477         }
4478
4479         ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
4480                 (facts->RequestCredit / 4));
4481         if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
4482                 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
4483                                 INTERNAL_SCSIIO_CMDS_COUNT)) {
4484                         ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
4485                                 facts->RequestCredit);
4486                         return -ENOMEM;
4487                 }
4488                 ioc->internal_depth = 10;
4489         }
4490
4491         ioc->hi_priority_depth = ioc->internal_depth - (5);
4492         /* command line tunables  for max controller queue depth */
4493         if (max_queue_depth != -1 && max_queue_depth != 0) {
4494                 max_request_credit = min_t(u16, max_queue_depth +
4495                         ioc->internal_depth, facts->RequestCredit);
4496                 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
4497                         max_request_credit =  MAX_HBA_QUEUE_DEPTH;
4498         } else if (reset_devices)
4499                 max_request_credit = min_t(u16, facts->RequestCredit,
4500                     (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
4501         else
4502                 max_request_credit = min_t(u16, facts->RequestCredit,
4503                     MAX_HBA_QUEUE_DEPTH);
4504
4505         /* Firmware maintains additional facts->HighPriorityCredit number of
4506          * credits for HiPriprity Request messages, so hba queue depth will be
4507          * sum of max_request_credit and high priority queue depth.
4508          */
4509         ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
4510
4511         /* request frame size */
4512         ioc->request_sz = facts->IOCRequestFrameSize * 4;
4513
4514         /* reply frame size */
4515         ioc->reply_sz = facts->ReplyFrameSize * 4;
4516
4517         /* chain segment size */
4518         if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4519                 if (facts->IOCMaxChainSegmentSize)
4520                         ioc->chain_segment_sz =
4521                                         facts->IOCMaxChainSegmentSize *
4522                                         MAX_CHAIN_ELEMT_SZ;
4523                 else
4524                 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
4525                         ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
4526                                                     MAX_CHAIN_ELEMT_SZ;
4527         } else
4528                 ioc->chain_segment_sz = ioc->request_sz;
4529
4530         /* calculate the max scatter element size */
4531         sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
4532
4533  retry_allocation:
4534         total_sz = 0;
4535         /* calculate number of sg elements left over in the 1st frame */
4536         max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
4537             sizeof(Mpi2SGEIOUnion_t)) + sge_size);
4538         ioc->max_sges_in_main_message = max_sge_elements/sge_size;
4539
4540         /* now do the same for a chain buffer */
4541         max_sge_elements = ioc->chain_segment_sz - sge_size;
4542         ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
4543
4544         /*
4545          *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
4546          */
4547         chains_needed_per_io = ((ioc->shost->sg_tablesize -
4548            ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
4549             + 1;
4550         if (chains_needed_per_io > facts->MaxChainDepth) {
4551                 chains_needed_per_io = facts->MaxChainDepth;
4552                 ioc->shost->sg_tablesize = min_t(u16,
4553                 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
4554                 * chains_needed_per_io), ioc->shost->sg_tablesize);
4555         }
4556         ioc->chains_needed_per_io = chains_needed_per_io;
4557
4558         /* reply free queue sizing - taking into account for 64 FW events */
4559         ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4560
4561         /* mCPU manage single counters for simplicity */
4562         if (ioc->is_mcpu_endpoint)
4563                 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
4564         else {
4565                 /* calculate reply descriptor post queue depth */
4566                 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
4567                         ioc->reply_free_queue_depth +  1;
4568                 /* align the reply post queue on the next 16 count boundary */
4569                 if (ioc->reply_post_queue_depth % 16)
4570                         ioc->reply_post_queue_depth += 16 -
4571                                 (ioc->reply_post_queue_depth % 16);
4572         }
4573
4574         if (ioc->reply_post_queue_depth >
4575             facts->MaxReplyDescriptorPostQueueDepth) {
4576                 ioc->reply_post_queue_depth =
4577                                 facts->MaxReplyDescriptorPostQueueDepth -
4578                     (facts->MaxReplyDescriptorPostQueueDepth % 16);
4579                 ioc->hba_queue_depth =
4580                                 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
4581                 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4582         }
4583
4584         dinitprintk(ioc,
4585                     ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
4586                              ioc->max_sges_in_main_message,
4587                              ioc->max_sges_in_chain_message,
4588                              ioc->shost->sg_tablesize,
4589                              ioc->chains_needed_per_io));
4590
4591         /* reply post queue, 16 byte align */
4592         reply_post_free_sz = ioc->reply_post_queue_depth *
4593             sizeof(Mpi2DefaultReplyDescriptor_t);
4594
4595         sz = reply_post_free_sz;
4596         if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
4597                 sz *= ioc->reply_queue_count;
4598
4599         ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
4600             (ioc->reply_queue_count):1,
4601             sizeof(struct reply_post_struct), GFP_KERNEL);
4602
4603         if (!ioc->reply_post) {
4604                 ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
4605                 goto out;
4606         }
4607         ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
4608             &ioc->pdev->dev, sz, 16, 0);
4609         if (!ioc->reply_post_free_dma_pool) {
4610                 ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
4611                 goto out;
4612         }
4613         i = 0;
4614         do {
4615                 ioc->reply_post[i].reply_post_free =
4616                     dma_pool_zalloc(ioc->reply_post_free_dma_pool,
4617                     GFP_KERNEL,
4618                     &ioc->reply_post[i].reply_post_free_dma);
4619                 if (!ioc->reply_post[i].reply_post_free) {
4620                         ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
4621                         goto out;
4622                 }
4623                 dinitprintk(ioc,
4624                             ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
4625                                      ioc->reply_post[i].reply_post_free,
4626                                      ioc->reply_post_queue_depth,
4627                                      8, sz / 1024));
4628                 dinitprintk(ioc,
4629                             ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
4630                                      (u64)ioc->reply_post[i].reply_post_free_dma));
4631                 total_sz += sz;
4632         } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
4633
4634         if (ioc->dma_mask == 64) {
4635                 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
4636                         ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
4637                                  pci_name(ioc->pdev));
4638                         goto out;
4639                 }
4640         }
4641
4642         ioc->scsiio_depth = ioc->hba_queue_depth -
4643             ioc->hi_priority_depth - ioc->internal_depth;
4644
4645         /* set the scsi host can_queue depth
4646          * with some internal commands that could be outstanding
4647          */
4648         ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
4649         dinitprintk(ioc,
4650                     ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
4651                              ioc->shost->can_queue));
4652
4653
4654         /* contiguous pool for request and chains, 16 byte align, one extra "
4655          * "frame for smid=0
4656          */
4657         ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
4658         sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
4659
4660         /* hi-priority queue */
4661         sz += (ioc->hi_priority_depth * ioc->request_sz);
4662
4663         /* internal queue */
4664         sz += (ioc->internal_depth * ioc->request_sz);
4665
4666         ioc->request_dma_sz = sz;
4667         ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
4668                         &ioc->request_dma, GFP_KERNEL);
4669         if (!ioc->request) {
4670                 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
4671                         ioc->hba_queue_depth, ioc->chains_needed_per_io,
4672                         ioc->request_sz, sz / 1024);
4673                 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
4674                         goto out;
4675                 retry_sz = 64;
4676                 ioc->hba_queue_depth -= retry_sz;
4677                 _base_release_memory_pools(ioc);
4678                 goto retry_allocation;
4679         }
4680
4681         if (retry_sz)
4682                 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
4683                         ioc->hba_queue_depth, ioc->chains_needed_per_io,
4684                         ioc->request_sz, sz / 1024);
4685
4686         /* hi-priority queue */
4687         ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
4688             ioc->request_sz);
4689         ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
4690             ioc->request_sz);
4691
4692         /* internal queue */
4693         ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
4694             ioc->request_sz);
4695         ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
4696             ioc->request_sz);
4697
4698         dinitprintk(ioc,
4699                     ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
4700                              ioc->request, ioc->hba_queue_depth,
4701                              ioc->request_sz,
4702                              (ioc->hba_queue_depth * ioc->request_sz) / 1024));
4703
4704         dinitprintk(ioc,
4705                     ioc_info(ioc, "request pool: dma(0x%llx)\n",
4706                              (unsigned long long)ioc->request_dma));
4707         total_sz += sz;
4708
4709         dinitprintk(ioc,
4710                     ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
4711                              ioc->request, ioc->scsiio_depth));
4712
4713         ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
4714         sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
4715         ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
4716         if (!ioc->chain_lookup) {
4717                 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
4718                 goto out;
4719         }
4720
4721         sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
4722         for (i = 0; i < ioc->scsiio_depth; i++) {
4723                 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
4724                 if (!ioc->chain_lookup[i].chains_per_smid) {
4725                         ioc_err(ioc, "chain_lookup: kzalloc failed\n");
4726                         goto out;
4727                 }
4728         }
4729
4730         /* initialize hi-priority queue smid's */
4731         ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
4732             sizeof(struct request_tracker), GFP_KERNEL);
4733         if (!ioc->hpr_lookup) {
4734                 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
4735                 goto out;
4736         }
4737         ioc->hi_priority_smid = ioc->scsiio_depth + 1;
4738         dinitprintk(ioc,
4739                     ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
4740                              ioc->hi_priority,
4741                              ioc->hi_priority_depth, ioc->hi_priority_smid));
4742
4743         /* initialize internal queue smid's */
4744         ioc->internal_lookup = kcalloc(ioc->internal_depth,
4745             sizeof(struct request_tracker), GFP_KERNEL);
4746         if (!ioc->internal_lookup) {
4747                 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
4748                 goto out;
4749         }
4750         ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
4751         dinitprintk(ioc,
4752                     ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
4753                              ioc->internal,
4754                              ioc->internal_depth, ioc->internal_smid));
4755         /*
4756          * The number of NVMe page sized blocks needed is:
4757          *     (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
4758          * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
4759          * that is placed in the main message frame.  8 is the size of each PRP
4760          * entry or PRP list pointer entry.  8 is subtracted from page_size
4761          * because of the PRP list pointer entry at the end of a page, so this
4762          * is not counted as a PRP entry.  The 1 added page is a round up.
4763          *
4764          * To avoid allocation failures due to the amount of memory that could
4765          * be required for NVMe PRP's, only each set of NVMe blocks will be
4766          * contiguous, so a new set is allocated for each possible I/O.
4767          */
4768         ioc->chains_per_prp_buffer = 0;
4769         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4770                 nvme_blocks_needed =
4771                         (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
4772                 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
4773                 nvme_blocks_needed++;
4774
4775                 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
4776                 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
4777                 if (!ioc->pcie_sg_lookup) {
4778                         ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
4779                         goto out;
4780                 }
4781                 sz = nvme_blocks_needed * ioc->page_size;
4782                 ioc->pcie_sgl_dma_pool =
4783                         dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
4784                 if (!ioc->pcie_sgl_dma_pool) {
4785                         ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
4786                         goto out;
4787                 }
4788
4789                 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
4790                 ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
4791                                                 ioc->chains_needed_per_io);
4792
4793                 for (i = 0; i < ioc->scsiio_depth; i++) {
4794                         ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
4795                                 ioc->pcie_sgl_dma_pool, GFP_KERNEL,
4796                                 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4797                         if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
4798                                 ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
4799                                 goto out;
4800                         }
4801                         for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
4802                                 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4803                                 ct->chain_buffer =
4804                                     ioc->pcie_sg_lookup[i].pcie_sgl +
4805                                     (j * ioc->chain_segment_sz);
4806                                 ct->chain_buffer_dma =
4807                                     ioc->pcie_sg_lookup[i].pcie_sgl_dma +
4808                                     (j * ioc->chain_segment_sz);
4809                         }
4810                 }
4811
4812                 dinitprintk(ioc,
4813                             ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
4814                                      ioc->scsiio_depth, sz,
4815                                      (sz * ioc->scsiio_depth) / 1024));
4816                 dinitprintk(ioc,
4817                             ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
4818                                      ioc->chains_per_prp_buffer));
4819                 total_sz += sz * ioc->scsiio_depth;
4820         }
4821
4822         ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
4823             ioc->chain_segment_sz, 16, 0);
4824         if (!ioc->chain_dma_pool) {
4825                 ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
4826                 goto out;
4827         }
4828         for (i = 0; i < ioc->scsiio_depth; i++) {
4829                 for (j = ioc->chains_per_prp_buffer;
4830                                 j < ioc->chains_needed_per_io; j++) {
4831                         ct = &ioc->chain_lookup[i].chains_per_smid[j];
4832                         ct->chain_buffer = dma_pool_alloc(
4833                                         ioc->chain_dma_pool, GFP_KERNEL,
4834                                         &ct->chain_buffer_dma);
4835                         if (!ct->chain_buffer) {
4836                                 ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
4837                                 _base_release_memory_pools(ioc);
4838                                 goto out;
4839                         }
4840                 }
4841                 total_sz += ioc->chain_segment_sz;
4842         }
4843
4844         dinitprintk(ioc,
4845                     ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
4846                              ioc->chain_depth, ioc->chain_segment_sz,
4847                              (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
4848
4849         /* sense buffers, 4 byte align */
4850         sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
4851         ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4852                                               4, 0);
4853         if (!ioc->sense_dma_pool) {
4854                 ioc_err(ioc, "sense pool: dma_pool_create failed\n");
4855                 goto out;
4856         }
4857         ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
4858             &ioc->sense_dma);
4859         if (!ioc->sense) {
4860                 ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
4861                 goto out;
4862         }
4863         /* sense buffer requires to be in same 4 gb region.
4864          * Below function will check the same.
4865          * In case of failure, new pci pool will be created with updated
4866          * alignment. Older allocation and pool will be destroyed.
4867          * Alignment will be used such a way that next allocation if
4868          * success, will always meet same 4gb region requirement.
4869          * Actual requirement is not alignment, but we need start and end of
4870          * DMA address must have same upper 32 bit address.
4871          */
4872         if (!is_MSB_are_same((long)ioc->sense, sz)) {
4873                 //Release Sense pool & Reallocate
4874                 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4875                 dma_pool_destroy(ioc->sense_dma_pool);
4876                 ioc->sense = NULL;
4877
4878                 ioc->sense_dma_pool =
4879                         dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4880                                                 roundup_pow_of_two(sz), 0);
4881                 if (!ioc->sense_dma_pool) {
4882                         ioc_err(ioc, "sense pool: pci_pool_create failed\n");
4883                         goto out;
4884                 }
4885                 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
4886                                 &ioc->sense_dma);
4887                 if (!ioc->sense) {
4888                         ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
4889                         goto out;
4890                 }
4891         }
4892         dinitprintk(ioc,
4893                     ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
4894                              ioc->sense, ioc->scsiio_depth,
4895                              SCSI_SENSE_BUFFERSIZE, sz / 1024));
4896         dinitprintk(ioc,
4897                     ioc_info(ioc, "sense_dma(0x%llx)\n",
4898                              (unsigned long long)ioc->sense_dma));
4899         total_sz += sz;
4900
4901         /* reply pool, 4 byte align */
4902         sz = ioc->reply_free_queue_depth * ioc->reply_sz;
4903         ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
4904                                               4, 0);
4905         if (!ioc->reply_dma_pool) {
4906                 ioc_err(ioc, "reply pool: dma_pool_create failed\n");
4907                 goto out;
4908         }
4909         ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
4910             &ioc->reply_dma);
4911         if (!ioc->reply) {
4912                 ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
4913                 goto out;
4914         }
4915         ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
4916         ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
4917         dinitprintk(ioc,
4918                     ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
4919                              ioc->reply, ioc->reply_free_queue_depth,
4920                              ioc->reply_sz, sz / 1024));
4921         dinitprintk(ioc,
4922                     ioc_info(ioc, "reply_dma(0x%llx)\n",
4923                              (unsigned long long)ioc->reply_dma));
4924         total_sz += sz;
4925
4926         /* reply free queue, 16 byte align */
4927         sz = ioc->reply_free_queue_depth * 4;
4928         ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
4929             &ioc->pdev->dev, sz, 16, 0);
4930         if (!ioc->reply_free_dma_pool) {
4931                 ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
4932                 goto out;
4933         }
4934         ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
4935             &ioc->reply_free_dma);
4936         if (!ioc->reply_free) {
4937                 ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
4938                 goto out;
4939         }
4940         dinitprintk(ioc,
4941                     ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
4942                              ioc->reply_free, ioc->reply_free_queue_depth,
4943                              4, sz / 1024));
4944         dinitprintk(ioc,
4945                     ioc_info(ioc, "reply_free_dma (0x%llx)\n",
4946                              (unsigned long long)ioc->reply_free_dma));
4947         total_sz += sz;
4948
4949         if (ioc->rdpq_array_enable) {
4950                 reply_post_free_array_sz = ioc->reply_queue_count *
4951                     sizeof(Mpi2IOCInitRDPQArrayEntry);
4952                 ioc->reply_post_free_array_dma_pool =
4953                     dma_pool_create("reply_post_free_array pool",
4954                     &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
4955                 if (!ioc->reply_post_free_array_dma_pool) {
4956                         dinitprintk(ioc,
4957                                     ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
4958                         goto out;
4959                 }
4960                 ioc->reply_post_free_array =
4961                     dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
4962                     GFP_KERNEL, &ioc->reply_post_free_array_dma);
4963                 if (!ioc->reply_post_free_array) {
4964                         dinitprintk(ioc,
4965                                     ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
4966                         goto out;
4967                 }
4968         }
4969         ioc->config_page_sz = 512;
4970         ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
4971                         ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
4972         if (!ioc->config_page) {
4973                 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
4974                 goto out;
4975         }
4976         dinitprintk(ioc,
4977                     ioc_info(ioc, "config page(0x%p): size(%d)\n",
4978                              ioc->config_page, ioc->config_page_sz));
4979         dinitprintk(ioc,
4980                     ioc_info(ioc, "config_page_dma(0x%llx)\n",
4981                              (unsigned long long)ioc->config_page_dma));
4982         total_sz += ioc->config_page_sz;
4983
4984         ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
4985                  total_sz / 1024);
4986         ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
4987                  ioc->shost->can_queue, facts->RequestCredit);
4988         ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
4989                  ioc->shost->sg_tablesize);
4990         return 0;
4991
4992  out:
4993         return -ENOMEM;
4994 }
4995
4996 /**
4997  * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
4998  * @ioc: Pointer to MPT_ADAPTER structure
4999  * @cooked: Request raw or cooked IOC state
5000  *
5001  * Return: all IOC Doorbell register bits if cooked==0, else just the
5002  * Doorbell bits in MPI_IOC_STATE_MASK.
5003  */
5004 u32
5005 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
5006 {
5007         u32 s, sc;
5008
5009         s = ioc->base_readl(&ioc->chip->Doorbell);
5010         sc = s & MPI2_IOC_STATE_MASK;
5011         return cooked ? sc : s;
5012 }
5013
5014 /**
5015  * _base_wait_on_iocstate - waiting on a particular ioc state
5016  * @ioc: ?
5017  * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
5018  * @timeout: timeout in second
5019  *
5020  * Return: 0 for success, non-zero for failure.
5021  */
5022 static int
5023 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
5024 {
5025         u32 count, cntdn;
5026         u32 current_state;
5027
5028         count = 0;
5029         cntdn = 1000 * timeout;
5030         do {
5031                 current_state = mpt3sas_base_get_iocstate(ioc, 1);
5032                 if (current_state == ioc_state)
5033                         return 0;
5034                 if (count && current_state == MPI2_IOC_STATE_FAULT)
5035                         break;
5036
5037                 usleep_range(1000, 1500);
5038                 count++;
5039         } while (--cntdn);
5040
5041         return current_state;
5042 }
5043
5044 /**
5045  * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
5046  * a write to the doorbell)
5047  * @ioc: per adapter object
5048  *
5049  * Return: 0 for success, non-zero for failure.
5050  *
5051  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
5052  */
5053 static int
5054 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
5055
5056 static int
5057 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5058 {
5059         u32 cntdn, count;
5060         u32 int_status;
5061
5062         count = 0;
5063         cntdn = 1000 * timeout;
5064         do {
5065                 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5066                 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5067                         dhsprintk(ioc,
5068                                   ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5069                                            __func__, count, timeout));
5070                         return 0;
5071                 }
5072
5073                 usleep_range(1000, 1500);
5074                 count++;
5075         } while (--cntdn);
5076
5077         ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5078                 __func__, count, int_status);
5079         return -EFAULT;
5080 }
5081
5082 static int
5083 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5084 {
5085         u32 cntdn, count;
5086         u32 int_status;
5087
5088         count = 0;
5089         cntdn = 2000 * timeout;
5090         do {
5091                 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5092                 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5093                         dhsprintk(ioc,
5094                                   ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5095                                            __func__, count, timeout));
5096                         return 0;
5097                 }
5098
5099                 udelay(500);
5100                 count++;
5101         } while (--cntdn);
5102
5103         ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5104                 __func__, count, int_status);
5105         return -EFAULT;
5106
5107 }
5108
5109 /**
5110  * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
5111  * @ioc: per adapter object
5112  * @timeout: timeout in second
5113  *
5114  * Return: 0 for success, non-zero for failure.
5115  *
5116  * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
5117  * doorbell.
5118  */
5119 static int
5120 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
5121 {
5122         u32 cntdn, count;
5123         u32 int_status;
5124         u32 doorbell;
5125
5126         count = 0;
5127         cntdn = 1000 * timeout;
5128         do {
5129                 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5130                 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
5131                         dhsprintk(ioc,
5132                                   ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5133                                            __func__, count, timeout));
5134                         return 0;
5135                 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5136                         doorbell = ioc->base_readl(&ioc->chip->Doorbell);
5137                         if ((doorbell & MPI2_IOC_STATE_MASK) ==
5138                             MPI2_IOC_STATE_FAULT) {
5139                                 mpt3sas_base_fault_info(ioc , doorbell);
5140                                 return -EFAULT;
5141                         }
5142                 } else if (int_status == 0xFFFFFFFF)
5143                         goto out;
5144
5145                 usleep_range(1000, 1500);
5146                 count++;
5147         } while (--cntdn);
5148
5149  out:
5150         ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5151                 __func__, count, int_status);
5152         return -EFAULT;
5153 }
5154
5155 /**
5156  * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
5157  * @ioc: per adapter object
5158  * @timeout: timeout in second
5159  *
5160  * Return: 0 for success, non-zero for failure.
5161  */
5162 static int
5163 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
5164 {
5165         u32 cntdn, count;
5166         u32 doorbell_reg;
5167
5168         count = 0;
5169         cntdn = 1000 * timeout;
5170         do {
5171                 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
5172                 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
5173                         dhsprintk(ioc,
5174                                   ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5175                                            __func__, count, timeout));
5176                         return 0;
5177                 }
5178
5179                 usleep_range(1000, 1500);
5180                 count++;
5181         } while (--cntdn);
5182
5183         ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
5184                 __func__, count, doorbell_reg);
5185         return -EFAULT;
5186 }
5187
5188 /**
5189  * _base_send_ioc_reset - send doorbell reset
5190  * @ioc: per adapter object
5191  * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
5192  * @timeout: timeout in second
5193  *
5194  * Return: 0 for success, non-zero for failure.
5195  */
5196 static int
5197 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5198 {
5199         u32 ioc_state;
5200         int r = 0;
5201
5202         if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
5203                 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
5204                 return -EFAULT;
5205         }
5206
5207         if (!(ioc->facts.IOCCapabilities &
5208            MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
5209                 return -EFAULT;
5210
5211         ioc_info(ioc, "sending message unit reset !!\n");
5212
5213         writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
5214             &ioc->chip->Doorbell);
5215         if ((_base_wait_for_doorbell_ack(ioc, 15))) {
5216                 r = -EFAULT;
5217                 goto out;
5218         }
5219         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5220         if (ioc_state) {
5221                 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5222                         __func__, ioc_state);
5223                 r = -EFAULT;
5224                 goto out;
5225         }
5226  out:
5227         ioc_info(ioc, "message unit reset: %s\n",
5228                  r == 0 ? "SUCCESS" : "FAILED");
5229         return r;
5230 }
5231
5232 /**
5233  * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
5234  * @ioc: per adapter object
5235  * @wait_count: timeout in seconds
5236  *
5237  * Return: Waits up to timeout seconds for the IOC to
5238  * become operational. Returns 0 if IOC is present
5239  * and operational; otherwise returns -EFAULT.
5240  */
5241
5242 int
5243 mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
5244 {
5245         int wait_state_count = 0;
5246         u32 ioc_state;
5247
5248         do {
5249                 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5250                 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
5251                         break;
5252                 ssleep(1);
5253                 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5254                                 __func__, ++wait_state_count);
5255         } while (--timeout);
5256         if (!timeout) {
5257                 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
5258                 return -EFAULT;
5259         }
5260         if (wait_state_count)
5261                 ioc_info(ioc, "ioc is operational\n");
5262         return 0;
5263 }
5264
5265 /**
5266  * _base_handshake_req_reply_wait - send request thru doorbell interface
5267  * @ioc: per adapter object
5268  * @request_bytes: request length
5269  * @request: pointer having request payload
5270  * @reply_bytes: reply length
5271  * @reply: pointer to reply payload
5272  * @timeout: timeout in second
5273  *
5274  * Return: 0 for success, non-zero for failure.
5275  */
5276 static int
5277 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5278         u32 *request, int reply_bytes, u16 *reply, int timeout)
5279 {
5280         MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
5281         int i;
5282         u8 failed;
5283         __le32 *mfp;
5284
5285         /* make sure doorbell is not in use */
5286         if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
5287                 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
5288                 return -EFAULT;
5289         }
5290
5291         /* clear pending doorbell interrupts from previous state changes */
5292         if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
5293             MPI2_HIS_IOC2SYS_DB_STATUS)
5294                 writel(0, &ioc->chip->HostInterruptStatus);
5295
5296         /* send message to ioc */
5297         writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
5298             ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
5299             &ioc->chip->Doorbell);
5300
5301         if ((_base_spin_on_doorbell_int(ioc, 5))) {
5302                 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5303                         __LINE__);
5304                 return -EFAULT;
5305         }
5306         writel(0, &ioc->chip->HostInterruptStatus);
5307
5308         if ((_base_wait_for_doorbell_ack(ioc, 5))) {
5309                 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
5310                         __LINE__);
5311                 return -EFAULT;
5312         }
5313
5314         /* send message 32-bits at a time */
5315         for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
5316                 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
5317                 if ((_base_wait_for_doorbell_ack(ioc, 5)))
5318                         failed = 1;
5319         }
5320
5321         if (failed) {
5322                 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
5323                         __LINE__);
5324                 return -EFAULT;
5325         }
5326
5327         /* now wait for the reply */
5328         if ((_base_wait_for_doorbell_int(ioc, timeout))) {
5329                 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5330                         __LINE__);
5331                 return -EFAULT;
5332         }
5333
5334         /* read the first two 16-bits, it gives the total length of the reply */
5335         reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5336             & MPI2_DOORBELL_DATA_MASK);
5337         writel(0, &ioc->chip->HostInterruptStatus);
5338         if ((_base_wait_for_doorbell_int(ioc, 5))) {
5339                 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5340                         __LINE__);
5341                 return -EFAULT;
5342         }
5343         reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5344             & MPI2_DOORBELL_DATA_MASK);
5345         writel(0, &ioc->chip->HostInterruptStatus);
5346
5347         for (i = 2; i < default_reply->MsgLength * 2; i++)  {
5348                 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5349                         ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5350                                 __LINE__);
5351                         return -EFAULT;
5352                 }
5353                 if (i >=  reply_bytes/2) /* overflow case */
5354                         ioc->base_readl(&ioc->chip->Doorbell);
5355                 else
5356                         reply[i] = le16_to_cpu(
5357                             ioc->base_readl(&ioc->chip->Doorbell)
5358                             & MPI2_DOORBELL_DATA_MASK);
5359                 writel(0, &ioc->chip->HostInterruptStatus);
5360         }
5361
5362         _base_wait_for_doorbell_int(ioc, 5);
5363         if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
5364                 dhsprintk(ioc,
5365                           ioc_info(ioc, "doorbell is in use (line=%d)\n",
5366                                    __LINE__));
5367         }
5368         writel(0, &ioc->chip->HostInterruptStatus);
5369
5370         if (ioc->logging_level & MPT_DEBUG_INIT) {
5371                 mfp = (__le32 *)reply;
5372                 pr_info("\toffset:data\n");
5373                 for (i = 0; i < reply_bytes/4; i++)
5374                         pr_info("\t[0x%02x]:%08x\n", i*4,
5375                             le32_to_cpu(mfp[i]));
5376         }
5377         return 0;
5378 }
5379
5380 /**
5381  * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
5382  * @ioc: per adapter object
5383  * @mpi_reply: the reply payload from FW
5384  * @mpi_request: the request payload sent to FW
5385  *
5386  * The SAS IO Unit Control Request message allows the host to perform low-level
5387  * operations, such as resets on the PHYs of the IO Unit, also allows the host
5388  * to obtain the IOC assigned device handles for a device if it has other
5389  * identifying information about the device, in addition allows the host to
5390  * remove IOC resources associated with the device.
5391  *
5392  * Return: 0 for success, non-zero for failure.
5393  */
5394 int
5395 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
5396         Mpi2SasIoUnitControlReply_t *mpi_reply,
5397         Mpi2SasIoUnitControlRequest_t *mpi_request)
5398 {
5399         u16 smid;
5400         u8 issue_reset = 0;
5401         int rc;
5402         void *request;
5403
5404         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5405
5406         mutex_lock(&ioc->base_cmds.mutex);
5407
5408         if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
5409                 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
5410                 rc = -EAGAIN;
5411                 goto out;
5412         }
5413
5414         rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
5415         if (rc)
5416                 goto out;
5417
5418         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5419         if (!smid) {
5420                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5421                 rc = -EAGAIN;
5422                 goto out;
5423         }
5424
5425         rc = 0;
5426         ioc->base_cmds.status = MPT3_CMD_PENDING;
5427         request = mpt3sas_base_get_msg_frame(ioc, smid);
5428         ioc->base_cmds.smid = smid;
5429         memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
5430         if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
5431             mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
5432                 ioc->ioc_link_reset_in_progress = 1;
5433         init_completion(&ioc->base_cmds.done);
5434         mpt3sas_base_put_smid_default(ioc, smid);
5435         wait_for_completion_timeout(&ioc->base_cmds.done,
5436             msecs_to_jiffies(10000));
5437         if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
5438             mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
5439             ioc->ioc_link_reset_in_progress)
5440                 ioc->ioc_link_reset_in_progress = 0;
5441         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
5442                 issue_reset =
5443                         mpt3sas_base_check_cmd_timeout(ioc,
5444                                 ioc->base_cmds.status, mpi_request,
5445                                 sizeof(Mpi2SasIoUnitControlRequest_t)/4);
5446                 goto issue_host_reset;
5447         }
5448         if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
5449                 memcpy(mpi_reply, ioc->base_cmds.reply,
5450                     sizeof(Mpi2SasIoUnitControlReply_t));
5451         else
5452                 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
5453         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5454         goto out;
5455
5456  issue_host_reset:
5457         if (issue_reset)
5458                 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
5459         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5460         rc = -EFAULT;
5461  out:
5462         mutex_unlock(&ioc->base_cmds.mutex);
5463         return rc;
5464 }
5465
5466 /**
5467  * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
5468  * @ioc: per adapter object
5469  * @mpi_reply: the reply payload from FW
5470  * @mpi_request: the request payload sent to FW
5471  *
5472  * The SCSI Enclosure Processor request message causes the IOC to
5473  * communicate with SES devices to control LED status signals.
5474  *
5475  * Return: 0 for success, non-zero for failure.
5476  */
5477 int
5478 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
5479         Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
5480 {
5481         u16 smid;
5482         u8 issue_reset = 0;
5483         int rc;
5484         void *request;
5485
5486         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5487
5488         mutex_lock(&ioc->base_cmds.mutex);
5489
5490         if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
5491                 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
5492                 rc = -EAGAIN;
5493                 goto out;
5494         }
5495
5496         rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
5497         if (rc)
5498                 goto out;
5499
5500         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5501         if (!smid) {
5502                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5503                 rc = -EAGAIN;
5504                 goto out;
5505         }
5506
5507         rc = 0;
5508         ioc->base_cmds.status = MPT3_CMD_PENDING;
5509         request = mpt3sas_base_get_msg_frame(ioc, smid);
5510         ioc->base_cmds.smid = smid;
5511         memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
5512         init_completion(&ioc->base_cmds.done);
5513         mpt3sas_base_put_smid_default(ioc, smid);
5514         wait_for_completion_timeout(&ioc->base_cmds.done,
5515             msecs_to_jiffies(10000));
5516         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
5517                 issue_reset =
5518                         mpt3sas_base_check_cmd_timeout(ioc,
5519                                 ioc->base_cmds.status, mpi_request,
5520                                 sizeof(Mpi2SepRequest_t)/4);
5521                 goto issue_host_reset;
5522         }
5523         if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
5524                 memcpy(mpi_reply, ioc->base_cmds.reply,
5525                     sizeof(Mpi2SepReply_t));
5526         else
5527                 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
5528         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5529         goto out;
5530
5531  issue_host_reset:
5532         if (issue_reset)
5533                 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
5534         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5535         rc = -EFAULT;
5536  out:
5537         mutex_unlock(&ioc->base_cmds.mutex);
5538         return rc;
5539 }
5540
5541 /**
5542  * _base_get_port_facts - obtain port facts reply and save in ioc
5543  * @ioc: per adapter object
5544  * @port: ?
5545  *
5546  * Return: 0 for success, non-zero for failure.
5547  */
5548 static int
5549 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
5550 {
5551         Mpi2PortFactsRequest_t mpi_request;
5552         Mpi2PortFactsReply_t mpi_reply;
5553         struct mpt3sas_port_facts *pfacts;
5554         int mpi_reply_sz, mpi_request_sz, r;
5555
5556         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5557
5558         mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
5559         mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
5560         memset(&mpi_request, 0, mpi_request_sz);
5561         mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
5562         mpi_request.PortNumber = port;
5563         r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
5564             (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
5565
5566         if (r != 0) {
5567                 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
5568                 return r;
5569         }
5570
5571         pfacts = &ioc->pfacts[port];
5572         memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
5573         pfacts->PortNumber = mpi_reply.PortNumber;
5574         pfacts->VP_ID = mpi_reply.VP_ID;
5575         pfacts->VF_ID = mpi_reply.VF_ID;
5576         pfacts->MaxPostedCmdBuffers =
5577             le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
5578
5579         return 0;
5580 }
5581
5582 /**
5583  * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
5584  * @ioc: per adapter object
5585  * @timeout:
5586  *
5587  * Return: 0 for success, non-zero for failure.
5588  */
5589 static int
5590 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
5591 {
5592         u32 ioc_state;
5593         int rc;
5594
5595         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5596
5597         if (ioc->pci_error_recovery) {
5598                 dfailprintk(ioc,
5599                             ioc_info(ioc, "%s: host in pci error recovery\n",
5600                                      __func__));
5601                 return -EFAULT;
5602         }
5603
5604         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5605         dhsprintk(ioc,
5606                   ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
5607                            __func__, ioc_state));
5608
5609         if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
5610             (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
5611                 return 0;
5612
5613         if (ioc_state & MPI2_DOORBELL_USED) {
5614                 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
5615                 goto issue_diag_reset;
5616         }
5617
5618         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
5619                 mpt3sas_base_fault_info(ioc, ioc_state &
5620                     MPI2_DOORBELL_DATA_MASK);
5621                 goto issue_diag_reset;
5622         }
5623
5624         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5625         if (ioc_state) {
5626                 dfailprintk(ioc,
5627                             ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5628                                      __func__, ioc_state));
5629                 return -EFAULT;
5630         }
5631
5632  issue_diag_reset:
5633         rc = _base_diag_reset(ioc);
5634         return rc;
5635 }
5636
5637 /**
5638  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
5639  * @ioc: per adapter object
5640  *
5641  * Return: 0 for success, non-zero for failure.
5642  */
5643 static int
5644 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
5645 {
5646         Mpi2IOCFactsRequest_t mpi_request;
5647         Mpi2IOCFactsReply_t mpi_reply;
5648         struct mpt3sas_facts *facts;
5649         int mpi_reply_sz, mpi_request_sz, r;
5650
5651         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5652
5653         r = _base_wait_for_iocstate(ioc, 10);
5654         if (r) {
5655                 dfailprintk(ioc,
5656                             ioc_info(ioc, "%s: failed getting to correct state\n",
5657                                      __func__));
5658                 return r;
5659         }
5660         mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
5661         mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
5662         memset(&mpi_request, 0, mpi_request_sz);
5663         mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
5664         r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
5665             (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
5666
5667         if (r != 0) {
5668                 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
5669                 return r;
5670         }
5671
5672         facts = &ioc->facts;
5673         memset(facts, 0, sizeof(struct mpt3sas_facts));
5674         facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
5675         facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
5676         facts->VP_ID = mpi_reply.VP_ID;
5677         facts->VF_ID = mpi_reply.VF_ID;
5678         facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
5679         facts->MaxChainDepth = mpi_reply.MaxChainDepth;
5680         facts->WhoInit = mpi_reply.WhoInit;
5681         facts->NumberOfPorts = mpi_reply.NumberOfPorts;
5682         facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
5683         if (ioc->msix_enable && (facts->MaxMSIxVectors <=
5684             MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
5685                 ioc->combined_reply_queue = 0;
5686         facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
5687         facts->MaxReplyDescriptorPostQueueDepth =
5688             le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
5689         facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
5690         facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
5691         if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
5692                 ioc->ir_firmware = 1;
5693         if ((facts->IOCCapabilities &
5694               MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
5695                 ioc->rdpq_array_capable = 1;
5696         facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
5697         facts->IOCRequestFrameSize =
5698             le16_to_cpu(mpi_reply.IOCRequestFrameSize);
5699         if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5700                 facts->IOCMaxChainSegmentSize =
5701                         le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
5702         }
5703         facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
5704         facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
5705         ioc->shost->max_id = -1;
5706         facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
5707         facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
5708         facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
5709         facts->HighPriorityCredit =
5710             le16_to_cpu(mpi_reply.HighPriorityCredit);
5711         facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
5712         facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
5713         facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
5714
5715         /*
5716          * Get the Page Size from IOC Facts. If it's 0, default to 4k.
5717          */
5718         ioc->page_size = 1 << facts->CurrentHostPageSize;
5719         if (ioc->page_size == 1) {
5720                 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
5721                 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
5722         }
5723         dinitprintk(ioc,
5724                     ioc_info(ioc, "CurrentHostPageSize(%d)\n",
5725                              facts->CurrentHostPageSize));
5726
5727         dinitprintk(ioc,
5728                     ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
5729                              facts->RequestCredit, facts->MaxChainDepth));
5730         dinitprintk(ioc,
5731                     ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
5732                              facts->IOCRequestFrameSize * 4,
5733                              facts->ReplyFrameSize * 4));
5734         return 0;
5735 }
5736
5737 /**
5738  * _base_send_ioc_init - send ioc_init to firmware
5739  * @ioc: per adapter object
5740  *
5741  * Return: 0 for success, non-zero for failure.
5742  */
5743 static int
5744 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
5745 {
5746         Mpi2IOCInitRequest_t mpi_request;
5747         Mpi2IOCInitReply_t mpi_reply;
5748         int i, r = 0;
5749         ktime_t current_time;
5750         u16 ioc_status;
5751         u32 reply_post_free_array_sz = 0;
5752
5753         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5754
5755         memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
5756         mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
5757         mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
5758         mpi_request.VF_ID = 0; /* TODO */
5759         mpi_request.VP_ID = 0;
5760         mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
5761         mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
5762         mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
5763
5764         if (_base_is_controller_msix_enabled(ioc))
5765                 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
5766         mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
5767         mpi_request.ReplyDescriptorPostQueueDepth =
5768             cpu_to_le16(ioc->reply_post_queue_depth);
5769         mpi_request.ReplyFreeQueueDepth =
5770             cpu_to_le16(ioc->reply_free_queue_depth);
5771
5772         mpi_request.SenseBufferAddressHigh =
5773             cpu_to_le32((u64)ioc->sense_dma >> 32);
5774         mpi_request.SystemReplyAddressHigh =
5775             cpu_to_le32((u64)ioc->reply_dma >> 32);
5776         mpi_request.SystemRequestFrameBaseAddress =
5777             cpu_to_le64((u64)ioc->request_dma);
5778         mpi_request.ReplyFreeQueueAddress =
5779             cpu_to_le64((u64)ioc->reply_free_dma);
5780
5781         if (ioc->rdpq_array_enable) {
5782                 reply_post_free_array_sz = ioc->reply_queue_count *
5783                     sizeof(Mpi2IOCInitRDPQArrayEntry);
5784                 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
5785                 for (i = 0; i < ioc->reply_queue_count; i++)
5786                         ioc->reply_post_free_array[i].RDPQBaseAddress =
5787                             cpu_to_le64(
5788                                 (u64)ioc->reply_post[i].reply_post_free_dma);
5789                 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
5790                 mpi_request.ReplyDescriptorPostQueueAddress =
5791                     cpu_to_le64((u64)ioc->reply_post_free_array_dma);
5792         } else {
5793                 mpi_request.ReplyDescriptorPostQueueAddress =
5794                     cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
5795         }
5796
5797         /* This time stamp specifies number of milliseconds
5798          * since epoch ~ midnight January 1, 1970.
5799          */
5800         current_time = ktime_get_real();
5801         mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
5802
5803         if (ioc->logging_level & MPT_DEBUG_INIT) {
5804                 __le32 *mfp;
5805                 int i;
5806
5807                 mfp = (__le32 *)&mpi_request;
5808                 pr_info("\toffset:data\n");
5809                 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
5810                         pr_info("\t[0x%02x]:%08x\n", i*4,
5811                             le32_to_cpu(mfp[i]));
5812         }
5813
5814         r = _base_handshake_req_reply_wait(ioc,
5815             sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
5816             sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
5817
5818         if (r != 0) {
5819                 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
5820                 return r;
5821         }
5822
5823         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5824         if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
5825             mpi_reply.IOCLogInfo) {
5826                 ioc_err(ioc, "%s: failed\n", __func__);
5827                 r = -EIO;
5828         }
5829
5830         return r;
5831 }
5832
5833 /**
5834  * mpt3sas_port_enable_done - command completion routine for port enable
5835  * @ioc: per adapter object
5836  * @smid: system request message index
5837  * @msix_index: MSIX table index supplied by the OS
5838  * @reply: reply message frame(lower 32bit addr)
5839  *
5840  * Return: 1 meaning mf should be freed from _base_interrupt
5841  *          0 means the mf is freed from this function.
5842  */
5843 u8
5844 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
5845         u32 reply)
5846 {
5847         MPI2DefaultReply_t *mpi_reply;
5848         u16 ioc_status;
5849
5850         if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
5851                 return 1;
5852
5853         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5854         if (!mpi_reply)
5855                 return 1;
5856
5857         if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
5858                 return 1;
5859
5860         ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
5861         ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
5862         ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
5863         memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
5864         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
5865         if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5866                 ioc->port_enable_failed = 1;
5867
5868         if (ioc->is_driver_loading) {
5869                 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
5870                         mpt3sas_port_enable_complete(ioc);
5871                         return 1;
5872                 } else {
5873                         ioc->start_scan_failed = ioc_status;
5874                         ioc->start_scan = 0;
5875                         return 1;
5876                 }
5877         }
5878         complete(&ioc->port_enable_cmds.done);
5879         return 1;
5880 }
5881
5882 /**
5883  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
5884  * @ioc: per adapter object
5885  *
5886  * Return: 0 for success, non-zero for failure.
5887  */
5888 static int
5889 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
5890 {
5891         Mpi2PortEnableRequest_t *mpi_request;
5892         Mpi2PortEnableReply_t *mpi_reply;
5893         int r = 0;
5894         u16 smid;
5895         u16 ioc_status;
5896
5897         ioc_info(ioc, "sending port enable !!\n");
5898
5899         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5900                 ioc_err(ioc, "%s: internal command already in use\n", __func__);
5901                 return -EAGAIN;
5902         }
5903
5904         smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
5905         if (!smid) {
5906                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5907                 return -EAGAIN;
5908         }
5909
5910         ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
5911         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5912         ioc->port_enable_cmds.smid = smid;
5913         memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
5914         mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
5915
5916         init_completion(&ioc->port_enable_cmds.done);
5917         mpt3sas_base_put_smid_default(ioc, smid);
5918         wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
5919         if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
5920                 ioc_err(ioc, "%s: timeout\n", __func__);
5921                 _debug_dump_mf(mpi_request,
5922                     sizeof(Mpi2PortEnableRequest_t)/4);
5923                 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
5924                         r = -EFAULT;
5925                 else
5926                         r = -ETIME;
5927                 goto out;
5928         }
5929
5930         mpi_reply = ioc->port_enable_cmds.reply;
5931         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
5932         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5933                 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
5934                         __func__, ioc_status);
5935                 r = -EFAULT;
5936                 goto out;
5937         }
5938
5939  out:
5940         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
5941         ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
5942         return r;
5943 }
5944
5945 /**
5946  * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
5947  * @ioc: per adapter object
5948  *
5949  * Return: 0 for success, non-zero for failure.
5950  */
5951 int
5952 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
5953 {
5954         Mpi2PortEnableRequest_t *mpi_request;
5955         u16 smid;
5956
5957         ioc_info(ioc, "sending port enable !!\n");
5958
5959         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5960                 ioc_err(ioc, "%s: internal command already in use\n", __func__);
5961                 return -EAGAIN;
5962         }
5963
5964         smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
5965         if (!smid) {
5966                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5967                 return -EAGAIN;
5968         }
5969
5970         ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
5971         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5972         ioc->port_enable_cmds.smid = smid;
5973         memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
5974         mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
5975
5976         mpt3sas_base_put_smid_default(ioc, smid);
5977         return 0;
5978 }
5979
5980 /**
5981  * _base_determine_wait_on_discovery - desposition
5982  * @ioc: per adapter object
5983  *
5984  * Decide whether to wait on discovery to complete. Used to either
5985  * locate boot device, or report volumes ahead of physical devices.
5986  *
5987  * Return: 1 for wait, 0 for don't wait.
5988  */
5989 static int
5990 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
5991 {
5992         /* We wait for discovery to complete if IR firmware is loaded.
5993          * The sas topology events arrive before PD events, so we need time to
5994          * turn on the bit in ioc->pd_handles to indicate PD
5995          * Also, it maybe required to report Volumes ahead of physical
5996          * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
5997          */
5998         if (ioc->ir_firmware)
5999                 return 1;
6000
6001         /* if no Bios, then we don't need to wait */
6002         if (!ioc->bios_pg3.BiosVersion)
6003                 return 0;
6004
6005         /* Bios is present, then we drop down here.
6006          *
6007          * If there any entries in the Bios Page 2, then we wait
6008          * for discovery to complete.
6009          */
6010
6011         /* Current Boot Device */
6012         if ((ioc->bios_pg2.CurrentBootDeviceForm &
6013             MPI2_BIOSPAGE2_FORM_MASK) ==
6014             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6015         /* Request Boot Device */
6016            (ioc->bios_pg2.ReqBootDeviceForm &
6017             MPI2_BIOSPAGE2_FORM_MASK) ==
6018             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6019         /* Alternate Request Boot Device */
6020            (ioc->bios_pg2.ReqAltBootDeviceForm &
6021             MPI2_BIOSPAGE2_FORM_MASK) ==
6022             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
6023                 return 0;
6024
6025         return 1;
6026 }
6027
6028 /**
6029  * _base_unmask_events - turn on notification for this event
6030  * @ioc: per adapter object
6031  * @event: firmware event
6032  *
6033  * The mask is stored in ioc->event_masks.
6034  */
6035 static void
6036 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
6037 {
6038         u32 desired_event;
6039
6040         if (event >= 128)
6041                 return;
6042
6043         desired_event = (1 << (event % 32));
6044
6045         if (event < 32)
6046                 ioc->event_masks[0] &= ~desired_event;
6047         else if (event < 64)
6048                 ioc->event_masks[1] &= ~desired_event;
6049         else if (event < 96)
6050                 ioc->event_masks[2] &= ~desired_event;
6051         else if (event < 128)
6052                 ioc->event_masks[3] &= ~desired_event;
6053 }
6054
6055 /**
6056  * _base_event_notification - send event notification
6057  * @ioc: per adapter object
6058  *
6059  * Return: 0 for success, non-zero for failure.
6060  */
6061 static int
6062 _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
6063 {
6064         Mpi2EventNotificationRequest_t *mpi_request;
6065         u16 smid;
6066         int r = 0;
6067         int i;
6068
6069         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6070
6071         if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6072                 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6073                 return -EAGAIN;
6074         }
6075
6076         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6077         if (!smid) {
6078                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6079                 return -EAGAIN;
6080         }
6081         ioc->base_cmds.status = MPT3_CMD_PENDING;
6082         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6083         ioc->base_cmds.smid = smid;
6084         memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
6085         mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
6086         mpi_request->VF_ID = 0; /* TODO */
6087         mpi_request->VP_ID = 0;
6088         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6089                 mpi_request->EventMasks[i] =
6090                     cpu_to_le32(ioc->event_masks[i]);
6091         init_completion(&ioc->base_cmds.done);
6092         mpt3sas_base_put_smid_default(ioc, smid);
6093         wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
6094         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6095                 ioc_err(ioc, "%s: timeout\n", __func__);
6096                 _debug_dump_mf(mpi_request,
6097                     sizeof(Mpi2EventNotificationRequest_t)/4);
6098                 if (ioc->base_cmds.status & MPT3_CMD_RESET)
6099                         r = -EFAULT;
6100                 else
6101                         r = -ETIME;
6102         } else
6103                 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
6104         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6105         return r;
6106 }
6107
6108 /**
6109  * mpt3sas_base_validate_event_type - validating event types
6110  * @ioc: per adapter object
6111  * @event_type: firmware event
6112  *
6113  * This will turn on firmware event notification when application
6114  * ask for that event. We don't mask events that are already enabled.
6115  */
6116 void
6117 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
6118 {
6119         int i, j;
6120         u32 event_mask, desired_event;
6121         u8 send_update_to_fw;
6122
6123         for (i = 0, send_update_to_fw = 0; i <
6124             MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
6125                 event_mask = ~event_type[i];
6126                 desired_event = 1;
6127                 for (j = 0; j < 32; j++) {
6128                         if (!(event_mask & desired_event) &&
6129                             (ioc->event_masks[i] & desired_event)) {
6130                                 ioc->event_masks[i] &= ~desired_event;
6131                                 send_update_to_fw = 1;
6132                         }
6133                         desired_event = (desired_event << 1);
6134                 }
6135         }
6136
6137         if (!send_update_to_fw)
6138                 return;
6139
6140         mutex_lock(&ioc->base_cmds.mutex);
6141         _base_event_notification(ioc);
6142         mutex_unlock(&ioc->base_cmds.mutex);
6143 }
6144
6145 /**
6146  * _base_diag_reset - the "big hammer" start of day reset
6147  * @ioc: per adapter object
6148  *
6149  * Return: 0 for success, non-zero for failure.
6150  */
6151 static int
6152 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6153 {
6154         u32 host_diagnostic;
6155         u32 ioc_state;
6156         u32 count;
6157         u32 hcb_size;
6158
6159         ioc_info(ioc, "sending diag reset !!\n");
6160
6161         drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
6162
6163         count = 0;
6164         do {
6165                 /* Write magic sequence to WriteSequence register
6166                  * Loop until in diagnostic mode
6167                  */
6168                 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
6169                 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6170                 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
6171                 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
6172                 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
6173                 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
6174                 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
6175                 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
6176
6177                 /* wait 100 msec */
6178                 msleep(100);
6179
6180                 if (count++ > 20)
6181                         goto out;
6182
6183                 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6184                 drsprintk(ioc,
6185                           ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6186                                    count, host_diagnostic));
6187
6188         } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6189
6190         hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
6191
6192         drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
6193         writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
6194              &ioc->chip->HostDiagnostic);
6195
6196         /*This delay allows the chip PCIe hardware time to finish reset tasks*/
6197         msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
6198
6199         /* Approximately 300 second max wait */
6200         for (count = 0; count < (300000000 /
6201                 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
6202
6203                 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6204
6205                 if (host_diagnostic == 0xFFFFFFFF)
6206                         goto out;
6207                 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
6208                         break;
6209
6210                 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
6211         }
6212
6213         if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
6214
6215                 drsprintk(ioc,
6216                           ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
6217                 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
6218                 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
6219                 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
6220
6221                 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
6222                 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
6223                     &ioc->chip->HCBSize);
6224         }
6225
6226         drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
6227         writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
6228             &ioc->chip->HostDiagnostic);
6229
6230         drsprintk(ioc,
6231                   ioc_info(ioc, "disable writes to the diagnostic register\n"));
6232         writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6233
6234         drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
6235         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
6236         if (ioc_state) {
6237                 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6238                         __func__, ioc_state);
6239                 goto out;
6240         }
6241
6242         ioc_info(ioc, "diag reset: SUCCESS\n");
6243         return 0;
6244
6245  out:
6246         ioc_err(ioc, "diag reset: FAILED\n");
6247         return -EFAULT;
6248 }
6249
6250 /**
6251  * _base_make_ioc_ready - put controller in READY state
6252  * @ioc: per adapter object
6253  * @type: FORCE_BIG_HAMMER or SOFT_RESET
6254  *
6255  * Return: 0 for success, non-zero for failure.
6256  */
6257 static int
6258 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6259 {
6260         u32 ioc_state;
6261         int rc;
6262         int count;
6263
6264         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6265
6266         if (ioc->pci_error_recovery)
6267                 return 0;
6268
6269         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6270         dhsprintk(ioc,
6271                   ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6272                            __func__, ioc_state));
6273
6274         /* if in RESET state, it should move to READY state shortly */
6275         count = 0;
6276         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
6277                 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
6278                     MPI2_IOC_STATE_READY) {
6279                         if (count++ == 10) {
6280                                 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6281                                         __func__, ioc_state);
6282                                 return -EFAULT;
6283                         }
6284                         ssleep(1);
6285                         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6286                 }
6287         }
6288
6289         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
6290                 return 0;
6291
6292         if (ioc_state & MPI2_DOORBELL_USED) {
6293                 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6294                 goto issue_diag_reset;
6295         }
6296
6297         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6298                 mpt3sas_base_fault_info(ioc, ioc_state &
6299                     MPI2_DOORBELL_DATA_MASK);
6300                 goto issue_diag_reset;
6301         }
6302
6303         if (type == FORCE_BIG_HAMMER)
6304                 goto issue_diag_reset;
6305
6306         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6307                 if (!(_base_send_ioc_reset(ioc,
6308                     MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
6309                         return 0;
6310         }
6311
6312  issue_diag_reset:
6313         rc = _base_diag_reset(ioc);
6314         return rc;
6315 }
6316
6317 /**
6318  * _base_make_ioc_operational - put controller in OPERATIONAL state
6319  * @ioc: per adapter object
6320  *
6321  * Return: 0 for success, non-zero for failure.
6322  */
6323 static int
6324 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6325 {
6326         int r, i, index;
6327         unsigned long   flags;
6328         u32 reply_address;
6329         u16 smid;
6330         struct _tr_list *delayed_tr, *delayed_tr_next;
6331         struct _sc_list *delayed_sc, *delayed_sc_next;
6332         struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
6333         u8 hide_flag;
6334         struct adapter_reply_queue *reply_q;
6335         Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
6336
6337         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6338
6339         /* clean the delayed target reset list */
6340         list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6341             &ioc->delayed_tr_list, list) {
6342                 list_del(&delayed_tr->list);
6343                 kfree(delayed_tr);
6344         }
6345
6346
6347         list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6348             &ioc->delayed_tr_volume_list, list) {
6349                 list_del(&delayed_tr->list);
6350                 kfree(delayed_tr);
6351         }
6352
6353         list_for_each_entry_safe(delayed_sc, delayed_sc_next,
6354             &ioc->delayed_sc_list, list) {
6355                 list_del(&delayed_sc->list);
6356                 kfree(delayed_sc);
6357         }
6358
6359         list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
6360             &ioc->delayed_event_ack_list, list) {
6361                 list_del(&delayed_event_ack->list);
6362                 kfree(delayed_event_ack);
6363         }
6364
6365         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
6366
6367         /* hi-priority queue */
6368         INIT_LIST_HEAD(&ioc->hpr_free_list);
6369         smid = ioc->hi_priority_smid;
6370         for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
6371                 ioc->hpr_lookup[i].cb_idx = 0xFF;
6372                 ioc->hpr_lookup[i].smid = smid;
6373                 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
6374                     &ioc->hpr_free_list);
6375         }
6376
6377         /* internal queue */
6378         INIT_LIST_HEAD(&ioc->internal_free_list);
6379         smid = ioc->internal_smid;
6380         for (i = 0; i < ioc->internal_depth; i++, smid++) {
6381                 ioc->internal_lookup[i].cb_idx = 0xFF;
6382                 ioc->internal_lookup[i].smid = smid;
6383                 list_add_tail(&ioc->internal_lookup[i].tracker_list,
6384                     &ioc->internal_free_list);
6385         }
6386
6387         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
6388
6389         /* initialize Reply Free Queue */
6390         for (i = 0, reply_address = (u32)ioc->reply_dma ;
6391             i < ioc->reply_free_queue_depth ; i++, reply_address +=
6392             ioc->reply_sz) {
6393                 ioc->reply_free[i] = cpu_to_le32(reply_address);
6394                 if (ioc->is_mcpu_endpoint)
6395                         _base_clone_reply_to_sys_mem(ioc,
6396                                         reply_address, i);
6397         }
6398
6399         /* initialize reply queues */
6400         if (ioc->is_driver_loading)
6401                 _base_assign_reply_queues(ioc);
6402
6403         /* initialize Reply Post Free Queue */
6404         index = 0;
6405         reply_post_free_contig = ioc->reply_post[0].reply_post_free;
6406         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
6407                 /*
6408                  * If RDPQ is enabled, switch to the next allocation.
6409                  * Otherwise advance within the contiguous region.
6410                  */
6411                 if (ioc->rdpq_array_enable) {
6412                         reply_q->reply_post_free =
6413                                 ioc->reply_post[index++].reply_post_free;
6414                 } else {
6415                         reply_q->reply_post_free = reply_post_free_contig;
6416                         reply_post_free_contig += ioc->reply_post_queue_depth;
6417                 }
6418
6419                 reply_q->reply_post_host_index = 0;
6420                 for (i = 0; i < ioc->reply_post_queue_depth; i++)
6421                         reply_q->reply_post_free[i].Words =
6422                             cpu_to_le64(ULLONG_MAX);
6423                 if (!_base_is_controller_msix_enabled(ioc))
6424                         goto skip_init_reply_post_free_queue;
6425         }
6426  skip_init_reply_post_free_queue:
6427
6428         r = _base_send_ioc_init(ioc);
6429         if (r)
6430                 return r;
6431
6432         /* initialize reply free host index */
6433         ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
6434         writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
6435
6436         /* initialize reply post host index */
6437         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
6438                 if (ioc->combined_reply_queue)
6439                         writel((reply_q->msix_index & 7)<<
6440                            MPI2_RPHI_MSIX_INDEX_SHIFT,
6441                            ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
6442                 else
6443                         writel(reply_q->msix_index <<
6444                                 MPI2_RPHI_MSIX_INDEX_SHIFT,
6445                                 &ioc->chip->ReplyPostHostIndex);
6446
6447                 if (!_base_is_controller_msix_enabled(ioc))
6448                         goto skip_init_reply_post_host_index;
6449         }
6450
6451  skip_init_reply_post_host_index:
6452
6453         _base_unmask_interrupts(ioc);
6454
6455         if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6456                 r = _base_display_fwpkg_version(ioc);
6457                 if (r)
6458                         return r;
6459         }
6460
6461         _base_static_config_pages(ioc);
6462         r = _base_event_notification(ioc);
6463         if (r)
6464                 return r;
6465
6466         if (ioc->is_driver_loading) {
6467
6468                 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
6469                     == 0x80) {
6470                         hide_flag = (u8) (
6471                             le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
6472                             MFG_PAGE10_HIDE_SSDS_MASK);
6473                         if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
6474                                 ioc->mfg_pg10_hide_flag = hide_flag;
6475                 }
6476
6477                 ioc->wait_for_discovery_to_complete =
6478                     _base_determine_wait_on_discovery(ioc);
6479
6480                 return r; /* scan_start and scan_finished support */
6481         }
6482
6483         r = _base_send_port_enable(ioc);
6484         if (r)
6485                 return r;
6486
6487         return r;
6488 }
6489
6490 /**
6491  * mpt3sas_base_free_resources - free resources controller resources
6492  * @ioc: per adapter object
6493  */
6494 void
6495 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
6496 {
6497         dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6498
6499         /* synchronizing freeing resource with pci_access_mutex lock */
6500         mutex_lock(&ioc->pci_access_mutex);
6501         if (ioc->chip_phys && ioc->chip) {
6502                 _base_mask_interrupts(ioc);
6503                 ioc->shost_recovery = 1;
6504                 _base_make_ioc_ready(ioc, SOFT_RESET);
6505                 ioc->shost_recovery = 0;
6506         }
6507
6508         mpt3sas_base_unmap_resources(ioc);
6509         mutex_unlock(&ioc->pci_access_mutex);
6510         return;
6511 }
6512
6513 /**
6514  * mpt3sas_base_attach - attach controller instance
6515  * @ioc: per adapter object
6516  *
6517  * Return: 0 for success, non-zero for failure.
6518  */
6519 int
6520 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6521 {
6522         int r, i;
6523         int cpu_id, last_cpu_id = 0;
6524
6525         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6526
6527         /* setup cpu_msix_table */
6528         ioc->cpu_count = num_online_cpus();
6529         for_each_online_cpu(cpu_id)
6530                 last_cpu_id = cpu_id;
6531         ioc->cpu_msix_table_sz = last_cpu_id + 1;
6532         ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
6533         ioc->reply_queue_count = 1;
6534         if (!ioc->cpu_msix_table) {
6535                 dfailprintk(ioc,
6536                             ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
6537                 r = -ENOMEM;
6538                 goto out_free_resources;
6539         }
6540
6541         if (ioc->is_warpdrive) {
6542                 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
6543                     sizeof(resource_size_t *), GFP_KERNEL);
6544                 if (!ioc->reply_post_host_index) {
6545                         dfailprintk(ioc,
6546                                     ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
6547                         r = -ENOMEM;
6548                         goto out_free_resources;
6549                 }
6550         }
6551
6552         ioc->rdpq_array_enable_assigned = 0;
6553         ioc->dma_mask = 0;
6554         if (ioc->is_aero_ioc)
6555                 ioc->base_readl = &_base_readl_aero;
6556         else
6557                 ioc->base_readl = &_base_readl;
6558         r = mpt3sas_base_map_resources(ioc);
6559         if (r)
6560                 goto out_free_resources;
6561
6562         pci_set_drvdata(ioc->pdev, ioc->shost);
6563         r = _base_get_ioc_facts(ioc);
6564         if (r)
6565                 goto out_free_resources;
6566
6567         switch (ioc->hba_mpi_version_belonged) {
6568         case MPI2_VERSION:
6569                 ioc->build_sg_scmd = &_base_build_sg_scmd;
6570                 ioc->build_sg = &_base_build_sg;
6571                 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
6572                 break;
6573         case MPI25_VERSION:
6574         case MPI26_VERSION:
6575                 /*
6576                  * In SAS3.0,
6577                  * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
6578                  * Target Status - all require the IEEE formated scatter gather
6579                  * elements.
6580                  */
6581                 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
6582                 ioc->build_sg = &_base_build_sg_ieee;
6583                 ioc->build_nvme_prp = &_base_build_nvme_prp;
6584                 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
6585                 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
6586
6587                 break;
6588         }
6589
6590         if (ioc->is_mcpu_endpoint)
6591                 ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io;
6592         else
6593                 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
6594
6595         /*
6596          * These function pointers for other requests that don't
6597          * the require IEEE scatter gather elements.
6598          *
6599          * For example Configuration Pages and SAS IOUNIT Control don't.
6600          */
6601         ioc->build_sg_mpi = &_base_build_sg;
6602         ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
6603
6604         r = _base_make_ioc_ready(ioc, SOFT_RESET);
6605         if (r)
6606                 goto out_free_resources;
6607
6608         ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
6609             sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
6610         if (!ioc->pfacts) {
6611                 r = -ENOMEM;
6612                 goto out_free_resources;
6613         }
6614
6615         for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
6616                 r = _base_get_port_facts(ioc, i);
6617                 if (r)
6618                         goto out_free_resources;
6619         }
6620
6621         r = _base_allocate_memory_pools(ioc);
6622         if (r)
6623                 goto out_free_resources;
6624
6625         if (irqpoll_weight > 0)
6626                 ioc->thresh_hold = irqpoll_weight;
6627         else
6628                 ioc->thresh_hold = ioc->hba_queue_depth/4;
6629
6630         _base_init_irqpolls(ioc);
6631         init_waitqueue_head(&ioc->reset_wq);
6632
6633         /* allocate memory pd handle bitmask list */
6634         ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
6635         if (ioc->facts.MaxDevHandle % 8)
6636                 ioc->pd_handles_sz++;
6637         ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
6638             GFP_KERNEL);
6639         if (!ioc->pd_handles) {
6640                 r = -ENOMEM;
6641                 goto out_free_resources;
6642         }
6643         ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
6644             GFP_KERNEL);
6645         if (!ioc->blocking_handles) {
6646                 r = -ENOMEM;
6647                 goto out_free_resources;
6648         }
6649
6650         /* allocate memory for pending OS device add list */
6651         ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
6652         if (ioc->facts.MaxDevHandle % 8)
6653                 ioc->pend_os_device_add_sz++;
6654         ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
6655             GFP_KERNEL);
6656         if (!ioc->pend_os_device_add)
6657                 goto out_free_resources;
6658
6659         ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
6660         ioc->device_remove_in_progress =
6661                 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
6662         if (!ioc->device_remove_in_progress)
6663                 goto out_free_resources;
6664
6665         ioc->fwfault_debug = mpt3sas_fwfault_debug;
6666
6667         /* base internal command bits */
6668         mutex_init(&ioc->base_cmds.mutex);
6669         ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6670         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6671
6672         /* port_enable command bits */
6673         ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6674         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
6675
6676         /* transport internal command bits */
6677         ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6678         ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
6679         mutex_init(&ioc->transport_cmds.mutex);
6680
6681         /* scsih internal command bits */
6682         ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6683         ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
6684         mutex_init(&ioc->scsih_cmds.mutex);
6685
6686         /* task management internal command bits */
6687         ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6688         ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
6689         mutex_init(&ioc->tm_cmds.mutex);
6690
6691         /* config page internal command bits */
6692         ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6693         ioc->config_cmds.status = MPT3_CMD_NOT_USED;
6694         mutex_init(&ioc->config_cmds.mutex);
6695
6696         /* ctl module internal command bits */
6697         ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6698         ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
6699         ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
6700         mutex_init(&ioc->ctl_cmds.mutex);
6701
6702         if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
6703             !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
6704             !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
6705             !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
6706                 r = -ENOMEM;
6707                 goto out_free_resources;
6708         }
6709
6710         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6711                 ioc->event_masks[i] = -1;
6712
6713         /* here we enable the events we care about */
6714         _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
6715         _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
6716         _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
6717         _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
6718         _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
6719         _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
6720         _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
6721         _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
6722         _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
6723         _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
6724         _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
6725         _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
6726         _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
6727         if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
6728                 if (ioc->is_gen35_ioc) {
6729                         _base_unmask_events(ioc,
6730                                 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
6731                         _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
6732                         _base_unmask_events(ioc,
6733                                 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
6734                 }
6735         }
6736         r = _base_make_ioc_operational(ioc);
6737         if (r)
6738                 goto out_free_resources;
6739
6740         ioc->non_operational_loop = 0;
6741         ioc->got_task_abort_from_ioctl = 0;
6742         return 0;
6743
6744  out_free_resources:
6745
6746         ioc->remove_host = 1;
6747
6748         mpt3sas_base_free_resources(ioc);
6749         _base_release_memory_pools(ioc);
6750         pci_set_drvdata(ioc->pdev, NULL);
6751         kfree(ioc->cpu_msix_table);
6752         if (ioc->is_warpdrive)
6753                 kfree(ioc->reply_post_host_index);
6754         kfree(ioc->pd_handles);
6755         kfree(ioc->blocking_handles);
6756         kfree(ioc->device_remove_in_progress);
6757         kfree(ioc->pend_os_device_add);
6758         kfree(ioc->tm_cmds.reply);
6759         kfree(ioc->transport_cmds.reply);
6760         kfree(ioc->scsih_cmds.reply);
6761         kfree(ioc->config_cmds.reply);
6762         kfree(ioc->base_cmds.reply);
6763         kfree(ioc->port_enable_cmds.reply);
6764         kfree(ioc->ctl_cmds.reply);
6765         kfree(ioc->ctl_cmds.sense);
6766         kfree(ioc->pfacts);
6767         ioc->ctl_cmds.reply = NULL;
6768         ioc->base_cmds.reply = NULL;
6769         ioc->tm_cmds.reply = NULL;
6770         ioc->scsih_cmds.reply = NULL;
6771         ioc->transport_cmds.reply = NULL;
6772         ioc->config_cmds.reply = NULL;
6773         ioc->pfacts = NULL;
6774         return r;
6775 }
6776
6777
6778 /**
6779  * mpt3sas_base_detach - remove controller instance
6780  * @ioc: per adapter object
6781  */
6782 void
6783 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
6784 {
6785         dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6786
6787         mpt3sas_base_stop_watchdog(ioc);
6788         mpt3sas_base_free_resources(ioc);
6789         _base_release_memory_pools(ioc);
6790         mpt3sas_free_enclosure_list(ioc);
6791         pci_set_drvdata(ioc->pdev, NULL);
6792         kfree(ioc->cpu_msix_table);
6793         if (ioc->is_warpdrive)
6794                 kfree(ioc->reply_post_host_index);
6795         kfree(ioc->pd_handles);
6796         kfree(ioc->blocking_handles);
6797         kfree(ioc->device_remove_in_progress);
6798         kfree(ioc->pend_os_device_add);
6799         kfree(ioc->pfacts);
6800         kfree(ioc->ctl_cmds.reply);
6801         kfree(ioc->ctl_cmds.sense);
6802         kfree(ioc->base_cmds.reply);
6803         kfree(ioc->port_enable_cmds.reply);
6804         kfree(ioc->tm_cmds.reply);
6805         kfree(ioc->transport_cmds.reply);
6806         kfree(ioc->scsih_cmds.reply);
6807         kfree(ioc->config_cmds.reply);
6808 }
6809
6810 /**
6811  * _base_pre_reset_handler - pre reset handler
6812  * @ioc: per adapter object
6813  */
6814 static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
6815 {
6816         mpt3sas_scsih_pre_reset_handler(ioc);
6817         mpt3sas_ctl_pre_reset_handler(ioc);
6818         dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
6819 }
6820
6821 /**
6822  * _base_after_reset_handler - after reset handler
6823  * @ioc: per adapter object
6824  */
6825 static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
6826 {
6827         mpt3sas_scsih_after_reset_handler(ioc);
6828         mpt3sas_ctl_after_reset_handler(ioc);
6829         dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
6830         if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
6831                 ioc->transport_cmds.status |= MPT3_CMD_RESET;
6832                 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
6833                 complete(&ioc->transport_cmds.done);
6834         }
6835         if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6836                 ioc->base_cmds.status |= MPT3_CMD_RESET;
6837                 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
6838                 complete(&ioc->base_cmds.done);
6839         }
6840         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6841                 ioc->port_enable_failed = 1;
6842                 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
6843                 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
6844                 if (ioc->is_driver_loading) {
6845                         ioc->start_scan_failed =
6846                                 MPI2_IOCSTATUS_INTERNAL_ERROR;
6847                         ioc->start_scan = 0;
6848                         ioc->port_enable_cmds.status =
6849                                 MPT3_CMD_NOT_USED;
6850                 } else {
6851                         complete(&ioc->port_enable_cmds.done);
6852                 }
6853         }
6854         if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
6855                 ioc->config_cmds.status |= MPT3_CMD_RESET;
6856                 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
6857                 ioc->config_cmds.smid = USHRT_MAX;
6858                 complete(&ioc->config_cmds.done);
6859         }
6860 }
6861
6862 /**
6863  * _base_reset_done_handler - reset done handler
6864  * @ioc: per adapter object
6865  */
6866 static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
6867 {
6868         mpt3sas_scsih_reset_done_handler(ioc);
6869         mpt3sas_ctl_reset_done_handler(ioc);
6870         dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
6871 }
6872
6873 /**
6874  * mpt3sas_wait_for_commands_to_complete - reset controller
6875  * @ioc: Pointer to MPT_ADAPTER structure
6876  *
6877  * This function is waiting 10s for all pending commands to complete
6878  * prior to putting controller in reset.
6879  */
6880 void
6881 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
6882 {
6883         u32 ioc_state;
6884
6885         ioc->pending_io_count = 0;
6886
6887         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6888         if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
6889                 return;
6890
6891         /* pending command count */
6892         ioc->pending_io_count = scsi_host_busy(ioc->shost);
6893
6894         if (!ioc->pending_io_count)
6895                 return;
6896
6897         /* wait for pending commands to complete */
6898         wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
6899 }
6900
6901 /**
6902  * mpt3sas_base_hard_reset_handler - reset controller
6903  * @ioc: Pointer to MPT_ADAPTER structure
6904  * @type: FORCE_BIG_HAMMER or SOFT_RESET
6905  *
6906  * Return: 0 for success, non-zero for failure.
6907  */
6908 int
6909 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
6910         enum reset_type type)
6911 {
6912         int r;
6913         unsigned long flags;
6914         u32 ioc_state;
6915         u8 is_fault = 0, is_trigger = 0;
6916
6917         dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
6918
6919         if (ioc->pci_error_recovery) {
6920                 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
6921                 r = 0;
6922                 goto out_unlocked;
6923         }
6924
6925         if (mpt3sas_fwfault_debug)
6926                 mpt3sas_halt_firmware(ioc);
6927
6928         /* wait for an active reset in progress to complete */
6929         mutex_lock(&ioc->reset_in_progress_mutex);
6930
6931         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6932         ioc->shost_recovery = 1;
6933         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6934
6935         if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
6936             MPT3_DIAG_BUFFER_IS_REGISTERED) &&
6937             (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
6938             MPT3_DIAG_BUFFER_IS_RELEASED))) {
6939                 is_trigger = 1;
6940                 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6941                 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
6942                         is_fault = 1;
6943         }
6944         _base_pre_reset_handler(ioc);
6945         mpt3sas_wait_for_commands_to_complete(ioc);
6946         _base_mask_interrupts(ioc);
6947         r = _base_make_ioc_ready(ioc, type);
6948         if (r)
6949                 goto out;
6950         _base_after_reset_handler(ioc);
6951
6952         /* If this hard reset is called while port enable is active, then
6953          * there is no reason to call make_ioc_operational
6954          */
6955         if (ioc->is_driver_loading && ioc->port_enable_failed) {
6956                 ioc->remove_host = 1;
6957                 r = -EFAULT;
6958                 goto out;
6959         }
6960         r = _base_get_ioc_facts(ioc);
6961         if (r)
6962                 goto out;
6963
6964         if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
6965                 panic("%s: Issue occurred with flashing controller firmware."
6966                       "Please reboot the system and ensure that the correct"
6967                       " firmware version is running\n", ioc->name);
6968
6969         r = _base_make_ioc_operational(ioc);
6970         if (!r)
6971                 _base_reset_done_handler(ioc);
6972
6973  out:
6974         dtmprintk(ioc,
6975                   ioc_info(ioc, "%s: %s\n",
6976                            __func__, r == 0 ? "SUCCESS" : "FAILED"));
6977
6978         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6979         ioc->shost_recovery = 0;
6980         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6981         ioc->ioc_reset_count++;
6982         mutex_unlock(&ioc->reset_in_progress_mutex);
6983
6984  out_unlocked:
6985         if ((r == 0) && is_trigger) {
6986                 if (is_fault)
6987                         mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
6988                 else
6989                         mpt3sas_trigger_master(ioc,
6990                             MASTER_TRIGGER_ADAPTER_RESET);
6991         }
6992         dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
6993         return r;
6994 }