Merge branch 'for-6.9/amd-sfh' into for-linus
[sfrench/cifs-2.6.git] / drivers / scsi / mpi3mr / mpi3mr_fw.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2023 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17         struct mpi3_ioc_facts_data *facts_data);
18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19         struct mpi3mr_drv_cmd *drv_cmd);
20
21 static int poll_queues;
22 module_param(poll_queues, int, 0444);
23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24
25 #if defined(writeq) && defined(CONFIG_64BIT)
26 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
27 {
28         writeq(b, addr);
29 }
30 #else
31 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
32 {
33         __u64 data_out = b;
34
35         writel((u32)(data_out), addr);
36         writel((u32)(data_out >> 32), (addr + 4));
37 }
38 #endif
39
40 static inline bool
41 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
42 {
43         u16 pi, ci, max_entries;
44         bool is_qfull = false;
45
46         pi = op_req_q->pi;
47         ci = READ_ONCE(op_req_q->ci);
48         max_entries = op_req_q->num_requests;
49
50         if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
51                 is_qfull = true;
52
53         return is_qfull;
54 }
55
56 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
57 {
58         u16 i, max_vectors;
59
60         max_vectors = mrioc->intr_info_count;
61
62         for (i = 0; i < max_vectors; i++)
63                 synchronize_irq(pci_irq_vector(mrioc->pdev, i));
64 }
65
66 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
67 {
68         mrioc->intr_enabled = 0;
69         mpi3mr_sync_irqs(mrioc);
70 }
71
72 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
73 {
74         mrioc->intr_enabled = 1;
75 }
76
77 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
78 {
79         u16 i;
80
81         mpi3mr_ioc_disable_intr(mrioc);
82
83         if (!mrioc->intr_info)
84                 return;
85
86         for (i = 0; i < mrioc->intr_info_count; i++)
87                 free_irq(pci_irq_vector(mrioc->pdev, i),
88                     (mrioc->intr_info + i));
89
90         kfree(mrioc->intr_info);
91         mrioc->intr_info = NULL;
92         mrioc->intr_info_count = 0;
93         mrioc->is_intr_info_set = false;
94         pci_free_irq_vectors(mrioc->pdev);
95 }
96
97 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
98         dma_addr_t dma_addr)
99 {
100         struct mpi3_sge_common *sgel = paddr;
101
102         sgel->flags = flags;
103         sgel->length = cpu_to_le32(length);
104         sgel->address = cpu_to_le64(dma_addr);
105 }
106
107 void mpi3mr_build_zero_len_sge(void *paddr)
108 {
109         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
110
111         mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
112 }
113
114 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
115         dma_addr_t phys_addr)
116 {
117         if (!phys_addr)
118                 return NULL;
119
120         if ((phys_addr < mrioc->reply_buf_dma) ||
121             (phys_addr > mrioc->reply_buf_dma_max_address))
122                 return NULL;
123
124         return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
125 }
126
127 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
128         dma_addr_t phys_addr)
129 {
130         if (!phys_addr)
131                 return NULL;
132
133         return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
134 }
135
136 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
137         u64 reply_dma)
138 {
139         u32 old_idx = 0;
140         unsigned long flags;
141
142         spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
143         old_idx  =  mrioc->reply_free_queue_host_index;
144         mrioc->reply_free_queue_host_index = (
145             (mrioc->reply_free_queue_host_index ==
146             (mrioc->reply_free_qsz - 1)) ? 0 :
147             (mrioc->reply_free_queue_host_index + 1));
148         mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
149         writel(mrioc->reply_free_queue_host_index,
150             &mrioc->sysif_regs->reply_free_host_index);
151         spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
152 }
153
154 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
155         u64 sense_buf_dma)
156 {
157         u32 old_idx = 0;
158         unsigned long flags;
159
160         spin_lock_irqsave(&mrioc->sbq_lock, flags);
161         old_idx  =  mrioc->sbq_host_index;
162         mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
163             (mrioc->sense_buf_q_sz - 1)) ? 0 :
164             (mrioc->sbq_host_index + 1));
165         mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
166         writel(mrioc->sbq_host_index,
167             &mrioc->sysif_regs->sense_buffer_free_host_index);
168         spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
169 }
170
171 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
172         struct mpi3_event_notification_reply *event_reply)
173 {
174         char *desc = NULL;
175         u16 event;
176
177         event = event_reply->event;
178
179         switch (event) {
180         case MPI3_EVENT_LOG_DATA:
181                 desc = "Log Data";
182                 break;
183         case MPI3_EVENT_CHANGE:
184                 desc = "Event Change";
185                 break;
186         case MPI3_EVENT_GPIO_INTERRUPT:
187                 desc = "GPIO Interrupt";
188                 break;
189         case MPI3_EVENT_CABLE_MGMT:
190                 desc = "Cable Management";
191                 break;
192         case MPI3_EVENT_ENERGY_PACK_CHANGE:
193                 desc = "Energy Pack Change";
194                 break;
195         case MPI3_EVENT_DEVICE_ADDED:
196         {
197                 struct mpi3_device_page0 *event_data =
198                     (struct mpi3_device_page0 *)event_reply->event_data;
199                 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
200                     event_data->dev_handle, event_data->device_form);
201                 return;
202         }
203         case MPI3_EVENT_DEVICE_INFO_CHANGED:
204         {
205                 struct mpi3_device_page0 *event_data =
206                     (struct mpi3_device_page0 *)event_reply->event_data;
207                 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
208                     event_data->dev_handle, event_data->device_form);
209                 return;
210         }
211         case MPI3_EVENT_DEVICE_STATUS_CHANGE:
212         {
213                 struct mpi3_event_data_device_status_change *event_data =
214                     (struct mpi3_event_data_device_status_change *)event_reply->event_data;
215                 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
216                     event_data->dev_handle, event_data->reason_code);
217                 return;
218         }
219         case MPI3_EVENT_SAS_DISCOVERY:
220         {
221                 struct mpi3_event_data_sas_discovery *event_data =
222                     (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
223                 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
224                     (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
225                     "start" : "stop",
226                     le32_to_cpu(event_data->discovery_status));
227                 return;
228         }
229         case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
230                 desc = "SAS Broadcast Primitive";
231                 break;
232         case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
233                 desc = "SAS Notify Primitive";
234                 break;
235         case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
236                 desc = "SAS Init Device Status Change";
237                 break;
238         case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
239                 desc = "SAS Init Table Overflow";
240                 break;
241         case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
242                 desc = "SAS Topology Change List";
243                 break;
244         case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
245                 desc = "Enclosure Device Status Change";
246                 break;
247         case MPI3_EVENT_ENCL_DEVICE_ADDED:
248                 desc = "Enclosure Added";
249                 break;
250         case MPI3_EVENT_HARD_RESET_RECEIVED:
251                 desc = "Hard Reset Received";
252                 break;
253         case MPI3_EVENT_SAS_PHY_COUNTER:
254                 desc = "SAS PHY Counter";
255                 break;
256         case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
257                 desc = "SAS Device Discovery Error";
258                 break;
259         case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
260                 desc = "PCIE Topology Change List";
261                 break;
262         case MPI3_EVENT_PCIE_ENUMERATION:
263         {
264                 struct mpi3_event_data_pcie_enumeration *event_data =
265                     (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
266                 ioc_info(mrioc, "PCIE Enumeration: (%s)",
267                     (event_data->reason_code ==
268                     MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
269                 if (event_data->enumeration_status)
270                         ioc_info(mrioc, "enumeration_status(0x%08x)\n",
271                             le32_to_cpu(event_data->enumeration_status));
272                 return;
273         }
274         case MPI3_EVENT_PREPARE_FOR_RESET:
275                 desc = "Prepare For Reset";
276                 break;
277         }
278
279         if (!desc)
280                 return;
281
282         ioc_info(mrioc, "%s\n", desc);
283 }
284
285 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
286         struct mpi3_default_reply *def_reply)
287 {
288         struct mpi3_event_notification_reply *event_reply =
289             (struct mpi3_event_notification_reply *)def_reply;
290
291         mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
292         mpi3mr_print_event_data(mrioc, event_reply);
293         mpi3mr_os_handle_events(mrioc, event_reply);
294 }
295
296 static struct mpi3mr_drv_cmd *
297 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
298         struct mpi3_default_reply *def_reply)
299 {
300         u16 idx;
301
302         switch (host_tag) {
303         case MPI3MR_HOSTTAG_INITCMDS:
304                 return &mrioc->init_cmds;
305         case MPI3MR_HOSTTAG_CFG_CMDS:
306                 return &mrioc->cfg_cmds;
307         case MPI3MR_HOSTTAG_BSG_CMDS:
308                 return &mrioc->bsg_cmds;
309         case MPI3MR_HOSTTAG_BLK_TMS:
310                 return &mrioc->host_tm_cmds;
311         case MPI3MR_HOSTTAG_PEL_ABORT:
312                 return &mrioc->pel_abort_cmd;
313         case MPI3MR_HOSTTAG_PEL_WAIT:
314                 return &mrioc->pel_cmds;
315         case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
316                 return &mrioc->transport_cmds;
317         case MPI3MR_HOSTTAG_INVALID:
318                 if (def_reply && def_reply->function ==
319                     MPI3_FUNCTION_EVENT_NOTIFICATION)
320                         mpi3mr_handle_events(mrioc, def_reply);
321                 return NULL;
322         default:
323                 break;
324         }
325         if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
326             host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
327                 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
328                 return &mrioc->dev_rmhs_cmds[idx];
329         }
330
331         if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
332             host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
333                 idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
334                 return &mrioc->evtack_cmds[idx];
335         }
336
337         return NULL;
338 }
339
340 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
341         struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
342 {
343         u16 reply_desc_type, host_tag = 0;
344         u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
345         u32 ioc_loginfo = 0;
346         struct mpi3_status_reply_descriptor *status_desc;
347         struct mpi3_address_reply_descriptor *addr_desc;
348         struct mpi3_success_reply_descriptor *success_desc;
349         struct mpi3_default_reply *def_reply = NULL;
350         struct mpi3mr_drv_cmd *cmdptr = NULL;
351         struct mpi3_scsi_io_reply *scsi_reply;
352         u8 *sense_buf = NULL;
353
354         *reply_dma = 0;
355         reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
356             MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
357         switch (reply_desc_type) {
358         case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
359                 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
360                 host_tag = le16_to_cpu(status_desc->host_tag);
361                 ioc_status = le16_to_cpu(status_desc->ioc_status);
362                 if (ioc_status &
363                     MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
364                         ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
365                 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
366                 break;
367         case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
368                 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
369                 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
370                 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
371                 if (!def_reply)
372                         goto out;
373                 host_tag = le16_to_cpu(def_reply->host_tag);
374                 ioc_status = le16_to_cpu(def_reply->ioc_status);
375                 if (ioc_status &
376                     MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
377                         ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
378                 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
379                 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
380                         scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
381                         sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
382                             le64_to_cpu(scsi_reply->sense_data_buffer_address));
383                 }
384                 break;
385         case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
386                 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
387                 host_tag = le16_to_cpu(success_desc->host_tag);
388                 break;
389         default:
390                 break;
391         }
392
393         cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
394         if (cmdptr) {
395                 if (cmdptr->state & MPI3MR_CMD_PENDING) {
396                         cmdptr->state |= MPI3MR_CMD_COMPLETE;
397                         cmdptr->ioc_loginfo = ioc_loginfo;
398                         cmdptr->ioc_status = ioc_status;
399                         cmdptr->state &= ~MPI3MR_CMD_PENDING;
400                         if (def_reply) {
401                                 cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
402                                 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
403                                     mrioc->reply_sz);
404                         }
405                         if (sense_buf && cmdptr->sensebuf) {
406                                 cmdptr->is_sense = 1;
407                                 memcpy(cmdptr->sensebuf, sense_buf,
408                                        MPI3MR_SENSE_BUF_SZ);
409                         }
410                         if (cmdptr->is_waiting) {
411                                 complete(&cmdptr->done);
412                                 cmdptr->is_waiting = 0;
413                         } else if (cmdptr->callback)
414                                 cmdptr->callback(mrioc, cmdptr);
415                 }
416         }
417 out:
418         if (sense_buf)
419                 mpi3mr_repost_sense_buf(mrioc,
420                     le64_to_cpu(scsi_reply->sense_data_buffer_address));
421 }
422
423 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
424 {
425         u32 exp_phase = mrioc->admin_reply_ephase;
426         u32 admin_reply_ci = mrioc->admin_reply_ci;
427         u32 num_admin_replies = 0;
428         u64 reply_dma = 0;
429         struct mpi3_default_reply_descriptor *reply_desc;
430
431         if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
432                 return 0;
433
434         reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
435             admin_reply_ci;
436
437         if ((le16_to_cpu(reply_desc->reply_flags) &
438             MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
439                 atomic_dec(&mrioc->admin_reply_q_in_use);
440                 return 0;
441         }
442
443         do {
444                 if (mrioc->unrecoverable)
445                         break;
446
447                 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
448                 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
449                 if (reply_dma)
450                         mpi3mr_repost_reply_buf(mrioc, reply_dma);
451                 num_admin_replies++;
452                 if (++admin_reply_ci == mrioc->num_admin_replies) {
453                         admin_reply_ci = 0;
454                         exp_phase ^= 1;
455                 }
456                 reply_desc =
457                     (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
458                     admin_reply_ci;
459                 if ((le16_to_cpu(reply_desc->reply_flags) &
460                     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
461                         break;
462         } while (1);
463
464         writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
465         mrioc->admin_reply_ci = admin_reply_ci;
466         mrioc->admin_reply_ephase = exp_phase;
467         atomic_dec(&mrioc->admin_reply_q_in_use);
468
469         return num_admin_replies;
470 }
471
472 /**
473  * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
474  *      queue's consumer index from operational reply descriptor queue.
475  * @op_reply_q: op_reply_qinfo object
476  * @reply_ci: operational reply descriptor's queue consumer index
477  *
478  * Returns: reply descriptor frame address
479  */
480 static inline struct mpi3_default_reply_descriptor *
481 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
482 {
483         void *segment_base_addr;
484         struct segments *segments = op_reply_q->q_segments;
485         struct mpi3_default_reply_descriptor *reply_desc = NULL;
486
487         segment_base_addr =
488             segments[reply_ci / op_reply_q->segment_qd].segment;
489         reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
490             (reply_ci % op_reply_q->segment_qd);
491         return reply_desc;
492 }
493
494 /**
495  * mpi3mr_process_op_reply_q - Operational reply queue handler
496  * @mrioc: Adapter instance reference
497  * @op_reply_q: Operational reply queue info
498  *
499  * Checks the specific operational reply queue and drains the
500  * reply queue entries until the queue is empty and process the
501  * individual reply descriptors.
502  *
503  * Return: 0 if queue is already processed,or number of reply
504  *          descriptors processed.
505  */
506 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
507         struct op_reply_qinfo *op_reply_q)
508 {
509         struct op_req_qinfo *op_req_q;
510         u32 exp_phase;
511         u32 reply_ci;
512         u32 num_op_reply = 0;
513         u64 reply_dma = 0;
514         struct mpi3_default_reply_descriptor *reply_desc;
515         u16 req_q_idx = 0, reply_qidx;
516
517         reply_qidx = op_reply_q->qid - 1;
518
519         if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
520                 return 0;
521
522         exp_phase = op_reply_q->ephase;
523         reply_ci = op_reply_q->ci;
524
525         reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
526         if ((le16_to_cpu(reply_desc->reply_flags) &
527             MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
528                 atomic_dec(&op_reply_q->in_use);
529                 return 0;
530         }
531
532         do {
533                 if (mrioc->unrecoverable)
534                         break;
535
536                 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
537                 op_req_q = &mrioc->req_qinfo[req_q_idx];
538
539                 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
540                 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
541                     reply_qidx);
542                 atomic_dec(&op_reply_q->pend_ios);
543                 if (reply_dma)
544                         mpi3mr_repost_reply_buf(mrioc, reply_dma);
545                 num_op_reply++;
546
547                 if (++reply_ci == op_reply_q->num_replies) {
548                         reply_ci = 0;
549                         exp_phase ^= 1;
550                 }
551
552                 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
553
554                 if ((le16_to_cpu(reply_desc->reply_flags) &
555                     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
556                         break;
557 #ifndef CONFIG_PREEMPT_RT
558                 /*
559                  * Exit completion loop to avoid CPU lockup
560                  * Ensure remaining completion happens from threaded ISR.
561                  */
562                 if (num_op_reply > mrioc->max_host_ios) {
563                         op_reply_q->enable_irq_poll = true;
564                         break;
565                 }
566 #endif
567         } while (1);
568
569         writel(reply_ci,
570             &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
571         op_reply_q->ci = reply_ci;
572         op_reply_q->ephase = exp_phase;
573
574         atomic_dec(&op_reply_q->in_use);
575         return num_op_reply;
576 }
577
578 /**
579  * mpi3mr_blk_mq_poll - Operational reply queue handler
580  * @shost: SCSI Host reference
581  * @queue_num: Request queue number (w.r.t OS it is hardware context number)
582  *
583  * Checks the specific operational reply queue and drains the
584  * reply queue entries until the queue is empty and process the
585  * individual reply descriptors.
586  *
587  * Return: 0 if queue is already processed,or number of reply
588  *          descriptors processed.
589  */
590 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
591 {
592         int num_entries = 0;
593         struct mpi3mr_ioc *mrioc;
594
595         mrioc = (struct mpi3mr_ioc *)shost->hostdata;
596
597         if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
598             mrioc->unrecoverable))
599                 return 0;
600
601         num_entries = mpi3mr_process_op_reply_q(mrioc,
602                         &mrioc->op_reply_qinfo[queue_num]);
603
604         return num_entries;
605 }
606
607 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
608 {
609         struct mpi3mr_intr_info *intr_info = privdata;
610         struct mpi3mr_ioc *mrioc;
611         u16 midx;
612         u32 num_admin_replies = 0, num_op_reply = 0;
613
614         if (!intr_info)
615                 return IRQ_NONE;
616
617         mrioc = intr_info->mrioc;
618
619         if (!mrioc->intr_enabled)
620                 return IRQ_NONE;
621
622         midx = intr_info->msix_index;
623
624         if (!midx)
625                 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
626         if (intr_info->op_reply_q)
627                 num_op_reply = mpi3mr_process_op_reply_q(mrioc,
628                     intr_info->op_reply_q);
629
630         if (num_admin_replies || num_op_reply)
631                 return IRQ_HANDLED;
632         else
633                 return IRQ_NONE;
634 }
635
636 #ifndef CONFIG_PREEMPT_RT
637
638 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
639 {
640         struct mpi3mr_intr_info *intr_info = privdata;
641         int ret;
642
643         if (!intr_info)
644                 return IRQ_NONE;
645
646         /* Call primary ISR routine */
647         ret = mpi3mr_isr_primary(irq, privdata);
648
649         /*
650          * If more IOs are expected, schedule IRQ polling thread.
651          * Otherwise exit from ISR.
652          */
653         if (!intr_info->op_reply_q)
654                 return ret;
655
656         if (!intr_info->op_reply_q->enable_irq_poll ||
657             !atomic_read(&intr_info->op_reply_q->pend_ios))
658                 return ret;
659
660         disable_irq_nosync(intr_info->os_irq);
661
662         return IRQ_WAKE_THREAD;
663 }
664
665 /**
666  * mpi3mr_isr_poll - Reply queue polling routine
667  * @irq: IRQ
668  * @privdata: Interrupt info
669  *
670  * poll for pending I/O completions in a loop until pending I/Os
671  * present or controller queue depth I/Os are processed.
672  *
673  * Return: IRQ_NONE or IRQ_HANDLED
674  */
675 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
676 {
677         struct mpi3mr_intr_info *intr_info = privdata;
678         struct mpi3mr_ioc *mrioc;
679         u16 midx;
680         u32 num_op_reply = 0;
681
682         if (!intr_info || !intr_info->op_reply_q)
683                 return IRQ_NONE;
684
685         mrioc = intr_info->mrioc;
686         midx = intr_info->msix_index;
687
688         /* Poll for pending IOs completions */
689         do {
690                 if (!mrioc->intr_enabled || mrioc->unrecoverable)
691                         break;
692
693                 if (!midx)
694                         mpi3mr_process_admin_reply_q(mrioc);
695                 if (intr_info->op_reply_q)
696                         num_op_reply +=
697                             mpi3mr_process_op_reply_q(mrioc,
698                                 intr_info->op_reply_q);
699
700                 usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP);
701
702         } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
703             (num_op_reply < mrioc->max_host_ios));
704
705         intr_info->op_reply_q->enable_irq_poll = false;
706         enable_irq(intr_info->os_irq);
707
708         return IRQ_HANDLED;
709 }
710
711 #endif
712
713 /**
714  * mpi3mr_request_irq - Request IRQ and register ISR
715  * @mrioc: Adapter instance reference
716  * @index: IRQ vector index
717  *
718  * Request threaded ISR with primary ISR and secondary
719  *
720  * Return: 0 on success and non zero on failures.
721  */
722 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
723 {
724         struct pci_dev *pdev = mrioc->pdev;
725         struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
726         int retval = 0;
727
728         intr_info->mrioc = mrioc;
729         intr_info->msix_index = index;
730         intr_info->op_reply_q = NULL;
731
732         snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
733             mrioc->driver_name, mrioc->id, index);
734
735 #ifndef CONFIG_PREEMPT_RT
736         retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
737             mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
738 #else
739         retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
740             NULL, IRQF_SHARED, intr_info->name, intr_info);
741 #endif
742         if (retval) {
743                 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
744                     intr_info->name, pci_irq_vector(pdev, index));
745                 return retval;
746         }
747
748         intr_info->os_irq = pci_irq_vector(pdev, index);
749         return retval;
750 }
751
752 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
753 {
754         if (!mrioc->requested_poll_qcount)
755                 return;
756
757         /* Reserved for Admin and Default Queue */
758         if (max_vectors > 2 &&
759                 (mrioc->requested_poll_qcount < max_vectors - 2)) {
760                 ioc_info(mrioc,
761                     "enabled polled queues (%d) msix (%d)\n",
762                     mrioc->requested_poll_qcount, max_vectors);
763         } else {
764                 ioc_info(mrioc,
765                     "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
766                     mrioc->requested_poll_qcount, max_vectors);
767                 mrioc->requested_poll_qcount = 0;
768         }
769 }
770
771 /**
772  * mpi3mr_setup_isr - Setup ISR for the controller
773  * @mrioc: Adapter instance reference
774  * @setup_one: Request one IRQ or more
775  *
776  * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
777  *
778  * Return: 0 on success and non zero on failures.
779  */
780 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
781 {
782         unsigned int irq_flags = PCI_IRQ_MSIX;
783         int max_vectors, min_vec;
784         int retval;
785         int i;
786         struct irq_affinity desc = { .pre_vectors =  1, .post_vectors = 1 };
787
788         if (mrioc->is_intr_info_set)
789                 return 0;
790
791         mpi3mr_cleanup_isr(mrioc);
792
793         if (setup_one || reset_devices) {
794                 max_vectors = 1;
795                 retval = pci_alloc_irq_vectors(mrioc->pdev,
796                     1, max_vectors, irq_flags);
797                 if (retval < 0) {
798                         ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
799                             retval);
800                         goto out_failed;
801                 }
802         } else {
803                 max_vectors =
804                     min_t(int, mrioc->cpu_count + 1 +
805                         mrioc->requested_poll_qcount, mrioc->msix_count);
806
807                 mpi3mr_calc_poll_queues(mrioc, max_vectors);
808
809                 ioc_info(mrioc,
810                     "MSI-X vectors supported: %d, no of cores: %d,",
811                     mrioc->msix_count, mrioc->cpu_count);
812                 ioc_info(mrioc,
813                     "MSI-x vectors requested: %d poll_queues %d\n",
814                     max_vectors, mrioc->requested_poll_qcount);
815
816                 desc.post_vectors = mrioc->requested_poll_qcount;
817                 min_vec = desc.pre_vectors + desc.post_vectors;
818                 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
819
820                 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
821                         min_vec, max_vectors, irq_flags, &desc);
822
823                 if (retval < 0) {
824                         ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
825                             retval);
826                         goto out_failed;
827                 }
828
829
830                 /*
831                  * If only one MSI-x is allocated, then MSI-x 0 will be shared
832                  * between Admin queue and operational queue
833                  */
834                 if (retval == min_vec)
835                         mrioc->op_reply_q_offset = 0;
836                 else if (retval != (max_vectors)) {
837                         ioc_info(mrioc,
838                             "allocated vectors (%d) are less than configured (%d)\n",
839                             retval, max_vectors);
840                 }
841
842                 max_vectors = retval;
843                 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
844
845                 mpi3mr_calc_poll_queues(mrioc, max_vectors);
846
847         }
848
849         mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
850             GFP_KERNEL);
851         if (!mrioc->intr_info) {
852                 retval = -ENOMEM;
853                 pci_free_irq_vectors(mrioc->pdev);
854                 goto out_failed;
855         }
856         for (i = 0; i < max_vectors; i++) {
857                 retval = mpi3mr_request_irq(mrioc, i);
858                 if (retval) {
859                         mrioc->intr_info_count = i;
860                         goto out_failed;
861                 }
862         }
863         if (reset_devices || !setup_one)
864                 mrioc->is_intr_info_set = true;
865         mrioc->intr_info_count = max_vectors;
866         mpi3mr_ioc_enable_intr(mrioc);
867         return 0;
868
869 out_failed:
870         mpi3mr_cleanup_isr(mrioc);
871
872         return retval;
873 }
874
875 static const struct {
876         enum mpi3mr_iocstate value;
877         char *name;
878 } mrioc_states[] = {
879         { MRIOC_STATE_READY, "ready" },
880         { MRIOC_STATE_FAULT, "fault" },
881         { MRIOC_STATE_RESET, "reset" },
882         { MRIOC_STATE_BECOMING_READY, "becoming ready" },
883         { MRIOC_STATE_RESET_REQUESTED, "reset requested" },
884         { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
885 };
886
887 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
888 {
889         int i;
890         char *name = NULL;
891
892         for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
893                 if (mrioc_states[i].value == mrioc_state) {
894                         name = mrioc_states[i].name;
895                         break;
896                 }
897         }
898         return name;
899 }
900
901 /* Reset reason to name mapper structure*/
902 static const struct {
903         enum mpi3mr_reset_reason value;
904         char *name;
905 } mpi3mr_reset_reason_codes[] = {
906         { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
907         { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
908         { MPI3MR_RESET_FROM_APP, "application invocation" },
909         { MPI3MR_RESET_FROM_EH_HOS, "error handling" },
910         { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
911         { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
912         { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
913         { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
914         { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
915         { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
916         { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
917         { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
918         { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
919         {
920                 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
921                 "create request queue timeout"
922         },
923         {
924                 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
925                 "create reply queue timeout"
926         },
927         { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
928         { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
929         { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
930         { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
931         {
932                 MPI3MR_RESET_FROM_CIACTVRST_TIMER,
933                 "component image activation timeout"
934         },
935         {
936                 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
937                 "get package version timeout"
938         },
939         { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
940         { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
941         { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
942         { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
943         { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
944 };
945
946 /**
947  * mpi3mr_reset_rc_name - get reset reason code name
948  * @reason_code: reset reason code value
949  *
950  * Map reset reason to an NULL terminated ASCII string
951  *
952  * Return: name corresponding to reset reason value or NULL.
953  */
954 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
955 {
956         int i;
957         char *name = NULL;
958
959         for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
960                 if (mpi3mr_reset_reason_codes[i].value == reason_code) {
961                         name = mpi3mr_reset_reason_codes[i].name;
962                         break;
963                 }
964         }
965         return name;
966 }
967
968 /* Reset type to name mapper structure*/
969 static const struct {
970         u16 reset_type;
971         char *name;
972 } mpi3mr_reset_types[] = {
973         { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
974         { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
975 };
976
977 /**
978  * mpi3mr_reset_type_name - get reset type name
979  * @reset_type: reset type value
980  *
981  * Map reset type to an NULL terminated ASCII string
982  *
983  * Return: name corresponding to reset type value or NULL.
984  */
985 static const char *mpi3mr_reset_type_name(u16 reset_type)
986 {
987         int i;
988         char *name = NULL;
989
990         for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
991                 if (mpi3mr_reset_types[i].reset_type == reset_type) {
992                         name = mpi3mr_reset_types[i].name;
993                         break;
994                 }
995         }
996         return name;
997 }
998
999 /**
1000  * mpi3mr_print_fault_info - Display fault information
1001  * @mrioc: Adapter instance reference
1002  *
1003  * Display the controller fault information if there is a
1004  * controller fault.
1005  *
1006  * Return: Nothing.
1007  */
1008 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
1009 {
1010         u32 ioc_status, code, code1, code2, code3;
1011
1012         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1013
1014         if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1015                 code = readl(&mrioc->sysif_regs->fault);
1016                 code1 = readl(&mrioc->sysif_regs->fault_info[0]);
1017                 code2 = readl(&mrioc->sysif_regs->fault_info[1]);
1018                 code3 = readl(&mrioc->sysif_regs->fault_info[2]);
1019
1020                 ioc_info(mrioc,
1021                     "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
1022                     code, code1, code2, code3);
1023         }
1024 }
1025
1026 /**
1027  * mpi3mr_get_iocstate - Get IOC State
1028  * @mrioc: Adapter instance reference
1029  *
1030  * Return a proper IOC state enum based on the IOC status and
1031  * IOC configuration and unrcoverable state of the controller.
1032  *
1033  * Return: Current IOC state.
1034  */
1035 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1036 {
1037         u32 ioc_status, ioc_config;
1038         u8 ready, enabled;
1039
1040         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1041         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1042
1043         if (mrioc->unrecoverable)
1044                 return MRIOC_STATE_UNRECOVERABLE;
1045         if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1046                 return MRIOC_STATE_FAULT;
1047
1048         ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1049         enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1050
1051         if (ready && enabled)
1052                 return MRIOC_STATE_READY;
1053         if ((!ready) && (!enabled))
1054                 return MRIOC_STATE_RESET;
1055         if ((!ready) && (enabled))
1056                 return MRIOC_STATE_BECOMING_READY;
1057
1058         return MRIOC_STATE_RESET_REQUESTED;
1059 }
1060
1061 /**
1062  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
1063  * @mrioc: Adapter instance reference
1064  *
1065  * Free the DMA memory allocated for IOCTL handling purpose.
1066  *
1067  * Return: None
1068  */
1069 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1070 {
1071         struct dma_memory_desc *mem_desc;
1072         u16 i;
1073
1074         if (!mrioc->ioctl_dma_pool)
1075                 return;
1076
1077         for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1078                 mem_desc = &mrioc->ioctl_sge[i];
1079                 if (mem_desc->addr) {
1080                         dma_pool_free(mrioc->ioctl_dma_pool,
1081                                       mem_desc->addr,
1082                                       mem_desc->dma_addr);
1083                         mem_desc->addr = NULL;
1084                 }
1085         }
1086         dma_pool_destroy(mrioc->ioctl_dma_pool);
1087         mrioc->ioctl_dma_pool = NULL;
1088         mem_desc = &mrioc->ioctl_chain_sge;
1089
1090         if (mem_desc->addr) {
1091                 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1092                                   mem_desc->addr, mem_desc->dma_addr);
1093                 mem_desc->addr = NULL;
1094         }
1095         mem_desc = &mrioc->ioctl_resp_sge;
1096         if (mem_desc->addr) {
1097                 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1098                                   mem_desc->addr, mem_desc->dma_addr);
1099                 mem_desc->addr = NULL;
1100         }
1101
1102         mrioc->ioctl_sges_allocated = false;
1103 }
1104
1105 /**
1106  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
1107  * @mrioc: Adapter instance reference
1108  *
1109  * This function allocates dmaable memory required to handle the
1110  * application issued MPI3 IOCTL requests.
1111  *
1112  * Return: None
1113  */
1114 static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1115
1116 {
1117         struct dma_memory_desc *mem_desc;
1118         u16 i;
1119
1120         mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
1121                                                 &mrioc->pdev->dev,
1122                                                 MPI3MR_IOCTL_SGE_SIZE,
1123                                                 MPI3MR_PAGE_SIZE_4K, 0);
1124
1125         if (!mrioc->ioctl_dma_pool) {
1126                 ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
1127                 goto out_failed;
1128         }
1129
1130         for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1131                 mem_desc = &mrioc->ioctl_sge[i];
1132                 mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
1133                 mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
1134                                                  GFP_KERNEL,
1135                                                  &mem_desc->dma_addr);
1136                 if (!mem_desc->addr)
1137                         goto out_failed;
1138         }
1139
1140         mem_desc = &mrioc->ioctl_chain_sge;
1141         mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1142         mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1143                                             mem_desc->size,
1144                                             &mem_desc->dma_addr,
1145                                             GFP_KERNEL);
1146         if (!mem_desc->addr)
1147                 goto out_failed;
1148
1149         mem_desc = &mrioc->ioctl_resp_sge;
1150         mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1151         mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1152                                             mem_desc->size,
1153                                             &mem_desc->dma_addr,
1154                                             GFP_KERNEL);
1155         if (!mem_desc->addr)
1156                 goto out_failed;
1157
1158         mrioc->ioctl_sges_allocated = true;
1159
1160         return;
1161 out_failed:
1162         ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
1163                  "from the applications, application interface for MPT command is disabled\n");
1164         mpi3mr_free_ioctl_dma_memory(mrioc);
1165 }
1166
1167 /**
1168  * mpi3mr_clear_reset_history - clear reset history
1169  * @mrioc: Adapter instance reference
1170  *
1171  * Write the reset history bit in IOC status to clear the bit,
1172  * if it is already set.
1173  *
1174  * Return: Nothing.
1175  */
1176 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1177 {
1178         u32 ioc_status;
1179
1180         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1181         if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1182                 writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1183 }
1184
1185 /**
1186  * mpi3mr_issue_and_process_mur - Message unit Reset handler
1187  * @mrioc: Adapter instance reference
1188  * @reset_reason: Reset reason code
1189  *
1190  * Issue Message unit Reset to the controller and wait for it to
1191  * be complete.
1192  *
1193  * Return: 0 on success, -1 on failure.
1194  */
1195 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1196         u32 reset_reason)
1197 {
1198         u32 ioc_config, timeout, ioc_status;
1199         int retval = -1;
1200
1201         ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1202         if (mrioc->unrecoverable) {
1203                 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1204                 return retval;
1205         }
1206         mpi3mr_clear_reset_history(mrioc);
1207         writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1208         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1209         ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1210         writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1211
1212         timeout = MPI3MR_MUR_TIMEOUT * 10;
1213         do {
1214                 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1215                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1216                         mpi3mr_clear_reset_history(mrioc);
1217                         break;
1218                 }
1219                 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1220                         mpi3mr_print_fault_info(mrioc);
1221                         break;
1222                 }
1223                 msleep(100);
1224         } while (--timeout);
1225
1226         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1227         if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1228               (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1229               (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1230                 retval = 0;
1231
1232         ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
1233             (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1234         return retval;
1235 }
1236
1237 /**
1238  * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1239  * during reset/resume
1240  * @mrioc: Adapter instance reference
1241  *
1242  * Return: zero if the new IOCFacts parameters value is compatible with
1243  * older values else return -EPERM
1244  */
1245 static int
1246 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1247 {
1248         unsigned long *removepend_bitmap;
1249
1250         if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1251                 ioc_err(mrioc,
1252                     "cannot increase reply size from %d to %d\n",
1253                     mrioc->reply_sz, mrioc->facts.reply_sz);
1254                 return -EPERM;
1255         }
1256
1257         if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1258                 ioc_err(mrioc,
1259                     "cannot reduce number of operational reply queues from %d to %d\n",
1260                     mrioc->num_op_reply_q,
1261                     mrioc->facts.max_op_reply_q);
1262                 return -EPERM;
1263         }
1264
1265         if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1266                 ioc_err(mrioc,
1267                     "cannot reduce number of operational request queues from %d to %d\n",
1268                     mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1269                 return -EPERM;
1270         }
1271
1272         if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
1273                 ioc_err(mrioc, "Warning: The maximum data transfer length\n"
1274                             "\tchanged after reset: previous(%d), new(%d),\n"
1275                             "the driver cannot change this at run time\n",
1276                             mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
1277
1278         if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
1279             MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED))
1280                 ioc_err(mrioc,
1281                     "critical error: multipath capability is enabled at the\n"
1282                     "\tcontroller while sas transport support is enabled at the\n"
1283                     "\tdriver, please reboot the system or reload the driver\n");
1284
1285         if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
1286                 removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
1287                                                   GFP_KERNEL);
1288                 if (!removepend_bitmap) {
1289                         ioc_err(mrioc,
1290                                 "failed to increase removepend_bitmap bits from %d to %d\n",
1291                                 mrioc->dev_handle_bitmap_bits,
1292                                 mrioc->facts.max_devhandle);
1293                         return -EPERM;
1294                 }
1295                 bitmap_free(mrioc->removepend_bitmap);
1296                 mrioc->removepend_bitmap = removepend_bitmap;
1297                 ioc_info(mrioc,
1298                          "increased bits of dev_handle_bitmap from %d to %d\n",
1299                          mrioc->dev_handle_bitmap_bits,
1300                          mrioc->facts.max_devhandle);
1301                 mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
1302         }
1303
1304         return 0;
1305 }
1306
1307 /**
1308  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1309  * @mrioc: Adapter instance reference
1310  *
1311  * Set Enable IOC bit in IOC configuration register and wait for
1312  * the controller to become ready.
1313  *
1314  * Return: 0 on success, appropriate error on failure.
1315  */
1316 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1317 {
1318         u32 ioc_config, ioc_status, timeout, host_diagnostic;
1319         int retval = 0;
1320         enum mpi3mr_iocstate ioc_state;
1321         u64 base_info;
1322
1323         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1324         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1325         base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1326         ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1327             ioc_status, ioc_config, base_info);
1328
1329         /*The timeout value is in 2sec unit, changing it to seconds*/
1330         mrioc->ready_timeout =
1331             ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1332             MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1333
1334         ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1335
1336         ioc_state = mpi3mr_get_iocstate(mrioc);
1337         ioc_info(mrioc, "controller is in %s state during detection\n",
1338             mpi3mr_iocstate_name(ioc_state));
1339
1340         if (ioc_state == MRIOC_STATE_BECOMING_READY ||
1341             ioc_state == MRIOC_STATE_RESET_REQUESTED) {
1342                 timeout = mrioc->ready_timeout * 10;
1343                 do {
1344                         msleep(100);
1345                 } while (--timeout);
1346
1347                 if (!pci_device_is_present(mrioc->pdev)) {
1348                         mrioc->unrecoverable = 1;
1349                         ioc_err(mrioc,
1350                             "controller is not present while waiting to reset\n");
1351                         retval = -1;
1352                         goto out_device_not_present;
1353                 }
1354
1355                 ioc_state = mpi3mr_get_iocstate(mrioc);
1356                 ioc_info(mrioc,
1357                     "controller is in %s state after waiting to reset\n",
1358                     mpi3mr_iocstate_name(ioc_state));
1359         }
1360
1361         if (ioc_state == MRIOC_STATE_READY) {
1362                 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1363                 retval = mpi3mr_issue_and_process_mur(mrioc,
1364                     MPI3MR_RESET_FROM_BRINGUP);
1365                 ioc_state = mpi3mr_get_iocstate(mrioc);
1366                 if (retval)
1367                         ioc_err(mrioc,
1368                             "message unit reset failed with error %d current state %s\n",
1369                             retval, mpi3mr_iocstate_name(ioc_state));
1370         }
1371         if (ioc_state != MRIOC_STATE_RESET) {
1372                 if (ioc_state == MRIOC_STATE_FAULT) {
1373                         timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
1374                         mpi3mr_print_fault_info(mrioc);
1375                         do {
1376                                 host_diagnostic =
1377                                         readl(&mrioc->sysif_regs->host_diagnostic);
1378                                 if (!(host_diagnostic &
1379                                       MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
1380                                         break;
1381                                 if (!pci_device_is_present(mrioc->pdev)) {
1382                                         mrioc->unrecoverable = 1;
1383                                         ioc_err(mrioc, "controller is not present at the bringup\n");
1384                                         goto out_device_not_present;
1385                                 }
1386                                 msleep(100);
1387                         } while (--timeout);
1388                 }
1389                 mpi3mr_print_fault_info(mrioc);
1390                 ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1391                 retval = mpi3mr_issue_reset(mrioc,
1392                     MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1393                     MPI3MR_RESET_FROM_BRINGUP);
1394                 if (retval) {
1395                         ioc_err(mrioc,
1396                             "soft reset failed with error %d\n", retval);
1397                         goto out_failed;
1398                 }
1399         }
1400         ioc_state = mpi3mr_get_iocstate(mrioc);
1401         if (ioc_state != MRIOC_STATE_RESET) {
1402                 ioc_err(mrioc,
1403                     "cannot bring controller to reset state, current state: %s\n",
1404                     mpi3mr_iocstate_name(ioc_state));
1405                 goto out_failed;
1406         }
1407         mpi3mr_clear_reset_history(mrioc);
1408         retval = mpi3mr_setup_admin_qpair(mrioc);
1409         if (retval) {
1410                 ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1411                     retval);
1412                 goto out_failed;
1413         }
1414
1415         ioc_info(mrioc, "bringing controller to ready state\n");
1416         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1417         ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1418         writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1419
1420         timeout = mrioc->ready_timeout * 10;
1421         do {
1422                 ioc_state = mpi3mr_get_iocstate(mrioc);
1423                 if (ioc_state == MRIOC_STATE_READY) {
1424                         ioc_info(mrioc,
1425                             "successfully transitioned to %s state\n",
1426                             mpi3mr_iocstate_name(ioc_state));
1427                         return 0;
1428                 }
1429                 if (!pci_device_is_present(mrioc->pdev)) {
1430                         mrioc->unrecoverable = 1;
1431                         ioc_err(mrioc,
1432                             "controller is not present at the bringup\n");
1433                         retval = -1;
1434                         goto out_device_not_present;
1435                 }
1436                 msleep(100);
1437         } while (--timeout);
1438
1439 out_failed:
1440         ioc_state = mpi3mr_get_iocstate(mrioc);
1441         ioc_err(mrioc,
1442             "failed to bring to ready state,  current state: %s\n",
1443             mpi3mr_iocstate_name(ioc_state));
1444 out_device_not_present:
1445         return retval;
1446 }
1447
1448 /**
1449  * mpi3mr_soft_reset_success - Check softreset is success or not
1450  * @ioc_status: IOC status register value
1451  * @ioc_config: IOC config register value
1452  *
1453  * Check whether the soft reset is successful or not based on
1454  * IOC status and IOC config register values.
1455  *
1456  * Return: True when the soft reset is success, false otherwise.
1457  */
1458 static inline bool
1459 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1460 {
1461         if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1462             (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1463                 return true;
1464         return false;
1465 }
1466
1467 /**
1468  * mpi3mr_diagfault_success - Check diag fault is success or not
1469  * @mrioc: Adapter reference
1470  * @ioc_status: IOC status register value
1471  *
1472  * Check whether the controller hit diag reset fault code.
1473  *
1474  * Return: True when there is diag fault, false otherwise.
1475  */
1476 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1477         u32 ioc_status)
1478 {
1479         u32 fault;
1480
1481         if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1482                 return false;
1483         fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1484         if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1485                 mpi3mr_print_fault_info(mrioc);
1486                 return true;
1487         }
1488         return false;
1489 }
1490
1491 /**
1492  * mpi3mr_set_diagsave - Set diag save bit for snapdump
1493  * @mrioc: Adapter reference
1494  *
1495  * Set diag save bit in IOC configuration register to enable
1496  * snapdump.
1497  *
1498  * Return: Nothing.
1499  */
1500 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1501 {
1502         u32 ioc_config;
1503
1504         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1505         ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1506         writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1507 }
1508
1509 /**
1510  * mpi3mr_issue_reset - Issue reset to the controller
1511  * @mrioc: Adapter reference
1512  * @reset_type: Reset type
1513  * @reset_reason: Reset reason code
1514  *
1515  * Unlock the host diagnostic registers and write the specific
1516  * reset type to that, wait for reset acknowledgment from the
1517  * controller, if the reset is not successful retry for the
1518  * predefined number of times.
1519  *
1520  * Return: 0 on success, non-zero on failure.
1521  */
1522 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1523         u32 reset_reason)
1524 {
1525         int retval = -1;
1526         u8 unlock_retry_count = 0;
1527         u32 host_diagnostic, ioc_status, ioc_config;
1528         u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1529
1530         if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1531             (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1532                 return retval;
1533         if (mrioc->unrecoverable)
1534                 return retval;
1535         if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1536                 retval = 0;
1537                 return retval;
1538         }
1539
1540         ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1541             mpi3mr_reset_type_name(reset_type),
1542             mpi3mr_reset_rc_name(reset_reason), reset_reason);
1543
1544         mpi3mr_clear_reset_history(mrioc);
1545         do {
1546                 ioc_info(mrioc,
1547                     "Write magic sequence to unlock host diag register (retry=%d)\n",
1548                     ++unlock_retry_count);
1549                 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1550                         ioc_err(mrioc,
1551                             "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1552                             mpi3mr_reset_type_name(reset_type),
1553                             host_diagnostic);
1554                         mrioc->unrecoverable = 1;
1555                         return retval;
1556                 }
1557
1558                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1559                     &mrioc->sysif_regs->write_sequence);
1560                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1561                     &mrioc->sysif_regs->write_sequence);
1562                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1563                     &mrioc->sysif_regs->write_sequence);
1564                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1565                     &mrioc->sysif_regs->write_sequence);
1566                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1567                     &mrioc->sysif_regs->write_sequence);
1568                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1569                     &mrioc->sysif_regs->write_sequence);
1570                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1571                     &mrioc->sysif_regs->write_sequence);
1572                 usleep_range(1000, 1100);
1573                 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1574                 ioc_info(mrioc,
1575                     "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1576                     unlock_retry_count, host_diagnostic);
1577         } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1578
1579         writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1580         writel(host_diagnostic | reset_type,
1581             &mrioc->sysif_regs->host_diagnostic);
1582         switch (reset_type) {
1583         case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1584                 do {
1585                         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1586                         ioc_config =
1587                             readl(&mrioc->sysif_regs->ioc_configuration);
1588                         if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1589                             && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1590                             ) {
1591                                 mpi3mr_clear_reset_history(mrioc);
1592                                 retval = 0;
1593                                 break;
1594                         }
1595                         msleep(100);
1596                 } while (--timeout);
1597                 mpi3mr_print_fault_info(mrioc);
1598                 break;
1599         case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1600                 do {
1601                         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1602                         if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1603                                 retval = 0;
1604                                 break;
1605                         }
1606                         msleep(100);
1607                 } while (--timeout);
1608                 break;
1609         default:
1610                 break;
1611         }
1612
1613         writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1614             &mrioc->sysif_regs->write_sequence);
1615
1616         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1617         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1618         ioc_info(mrioc,
1619             "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
1620             (!retval)?"successful":"failed", ioc_status,
1621             ioc_config);
1622         if (retval)
1623                 mrioc->unrecoverable = 1;
1624         return retval;
1625 }
1626
1627 /**
1628  * mpi3mr_admin_request_post - Post request to admin queue
1629  * @mrioc: Adapter reference
1630  * @admin_req: MPI3 request
1631  * @admin_req_sz: Request size
1632  * @ignore_reset: Ignore reset in process
1633  *
1634  * Post the MPI3 request into admin request queue and
1635  * inform the controller, if the queue is full return
1636  * appropriate error.
1637  *
1638  * Return: 0 on success, non-zero on failure.
1639  */
1640 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1641         u16 admin_req_sz, u8 ignore_reset)
1642 {
1643         u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1644         int retval = 0;
1645         unsigned long flags;
1646         u8 *areq_entry;
1647
1648         if (mrioc->unrecoverable) {
1649                 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1650                 return -EFAULT;
1651         }
1652
1653         spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1654         areq_pi = mrioc->admin_req_pi;
1655         areq_ci = mrioc->admin_req_ci;
1656         max_entries = mrioc->num_admin_req;
1657         if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1658             (areq_pi == (max_entries - 1)))) {
1659                 ioc_err(mrioc, "AdminReqQ full condition detected\n");
1660                 retval = -EAGAIN;
1661                 goto out;
1662         }
1663         if (!ignore_reset && mrioc->reset_in_progress) {
1664                 ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1665                 retval = -EAGAIN;
1666                 goto out;
1667         }
1668         areq_entry = (u8 *)mrioc->admin_req_base +
1669             (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1670         memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1671         memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1672
1673         if (++areq_pi == max_entries)
1674                 areq_pi = 0;
1675         mrioc->admin_req_pi = areq_pi;
1676
1677         writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1678
1679 out:
1680         spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1681
1682         return retval;
1683 }
1684
1685 /**
1686  * mpi3mr_free_op_req_q_segments - free request memory segments
1687  * @mrioc: Adapter instance reference
1688  * @q_idx: operational request queue index
1689  *
1690  * Free memory segments allocated for operational request queue
1691  *
1692  * Return: Nothing.
1693  */
1694 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1695 {
1696         u16 j;
1697         int size;
1698         struct segments *segments;
1699
1700         segments = mrioc->req_qinfo[q_idx].q_segments;
1701         if (!segments)
1702                 return;
1703
1704         if (mrioc->enable_segqueue) {
1705                 size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1706                 if (mrioc->req_qinfo[q_idx].q_segment_list) {
1707                         dma_free_coherent(&mrioc->pdev->dev,
1708                             MPI3MR_MAX_SEG_LIST_SIZE,
1709                             mrioc->req_qinfo[q_idx].q_segment_list,
1710                             mrioc->req_qinfo[q_idx].q_segment_list_dma);
1711                         mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1712                 }
1713         } else
1714                 size = mrioc->req_qinfo[q_idx].segment_qd *
1715                     mrioc->facts.op_req_sz;
1716
1717         for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1718                 if (!segments[j].segment)
1719                         continue;
1720                 dma_free_coherent(&mrioc->pdev->dev,
1721                     size, segments[j].segment, segments[j].segment_dma);
1722                 segments[j].segment = NULL;
1723         }
1724         kfree(mrioc->req_qinfo[q_idx].q_segments);
1725         mrioc->req_qinfo[q_idx].q_segments = NULL;
1726         mrioc->req_qinfo[q_idx].qid = 0;
1727 }
1728
1729 /**
1730  * mpi3mr_free_op_reply_q_segments - free reply memory segments
1731  * @mrioc: Adapter instance reference
1732  * @q_idx: operational reply queue index
1733  *
1734  * Free memory segments allocated for operational reply queue
1735  *
1736  * Return: Nothing.
1737  */
1738 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1739 {
1740         u16 j;
1741         int size;
1742         struct segments *segments;
1743
1744         segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1745         if (!segments)
1746                 return;
1747
1748         if (mrioc->enable_segqueue) {
1749                 size = MPI3MR_OP_REP_Q_SEG_SIZE;
1750                 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1751                         dma_free_coherent(&mrioc->pdev->dev,
1752                             MPI3MR_MAX_SEG_LIST_SIZE,
1753                             mrioc->op_reply_qinfo[q_idx].q_segment_list,
1754                             mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1755                         mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1756                 }
1757         } else
1758                 size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1759                     mrioc->op_reply_desc_sz;
1760
1761         for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1762                 if (!segments[j].segment)
1763                         continue;
1764                 dma_free_coherent(&mrioc->pdev->dev,
1765                     size, segments[j].segment, segments[j].segment_dma);
1766                 segments[j].segment = NULL;
1767         }
1768
1769         kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1770         mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1771         mrioc->op_reply_qinfo[q_idx].qid = 0;
1772 }
1773
1774 /**
1775  * mpi3mr_delete_op_reply_q - delete operational reply queue
1776  * @mrioc: Adapter instance reference
1777  * @qidx: operational reply queue index
1778  *
1779  * Delete operatinal reply queue by issuing MPI request
1780  * through admin queue.
1781  *
1782  * Return:  0 on success, non-zero on failure.
1783  */
1784 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1785 {
1786         struct mpi3_delete_reply_queue_request delq_req;
1787         struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1788         int retval = 0;
1789         u16 reply_qid = 0, midx;
1790
1791         reply_qid = op_reply_q->qid;
1792
1793         midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1794
1795         if (!reply_qid) {
1796                 retval = -1;
1797                 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
1798                 goto out;
1799         }
1800
1801         (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
1802             mrioc->active_poll_qcount--;
1803
1804         memset(&delq_req, 0, sizeof(delq_req));
1805         mutex_lock(&mrioc->init_cmds.mutex);
1806         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1807                 retval = -1;
1808                 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
1809                 mutex_unlock(&mrioc->init_cmds.mutex);
1810                 goto out;
1811         }
1812         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1813         mrioc->init_cmds.is_waiting = 1;
1814         mrioc->init_cmds.callback = NULL;
1815         delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1816         delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
1817         delq_req.queue_id = cpu_to_le16(reply_qid);
1818
1819         init_completion(&mrioc->init_cmds.done);
1820         retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
1821             1);
1822         if (retval) {
1823                 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
1824                 goto out_unlock;
1825         }
1826         wait_for_completion_timeout(&mrioc->init_cmds.done,
1827             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1828         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1829                 ioc_err(mrioc, "delete reply queue timed out\n");
1830                 mpi3mr_check_rh_fault_ioc(mrioc,
1831                     MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
1832                 retval = -1;
1833                 goto out_unlock;
1834         }
1835         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1836             != MPI3_IOCSTATUS_SUCCESS) {
1837                 ioc_err(mrioc,
1838                     "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1839                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1840                     mrioc->init_cmds.ioc_loginfo);
1841                 retval = -1;
1842                 goto out_unlock;
1843         }
1844         mrioc->intr_info[midx].op_reply_q = NULL;
1845
1846         mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1847 out_unlock:
1848         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1849         mutex_unlock(&mrioc->init_cmds.mutex);
1850 out:
1851
1852         return retval;
1853 }
1854
1855 /**
1856  * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
1857  * @mrioc: Adapter instance reference
1858  * @qidx: request queue index
1859  *
1860  * Allocate segmented memory pools for operational reply
1861  * queue.
1862  *
1863  * Return: 0 on success, non-zero on failure.
1864  */
1865 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1866 {
1867         struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1868         int i, size;
1869         u64 *q_segment_list_entry = NULL;
1870         struct segments *segments;
1871
1872         if (mrioc->enable_segqueue) {
1873                 op_reply_q->segment_qd =
1874                     MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
1875
1876                 size = MPI3MR_OP_REP_Q_SEG_SIZE;
1877
1878                 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1879                     MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
1880                     GFP_KERNEL);
1881                 if (!op_reply_q->q_segment_list)
1882                         return -ENOMEM;
1883                 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
1884         } else {
1885                 op_reply_q->segment_qd = op_reply_q->num_replies;
1886                 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
1887         }
1888
1889         op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
1890             op_reply_q->segment_qd);
1891
1892         op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
1893             sizeof(struct segments), GFP_KERNEL);
1894         if (!op_reply_q->q_segments)
1895                 return -ENOMEM;
1896
1897         segments = op_reply_q->q_segments;
1898         for (i = 0; i < op_reply_q->num_segments; i++) {
1899                 segments[i].segment =
1900                     dma_alloc_coherent(&mrioc->pdev->dev,
1901                     size, &segments[i].segment_dma, GFP_KERNEL);
1902                 if (!segments[i].segment)
1903                         return -ENOMEM;
1904                 if (mrioc->enable_segqueue)
1905                         q_segment_list_entry[i] =
1906                             (unsigned long)segments[i].segment_dma;
1907         }
1908
1909         return 0;
1910 }
1911
1912 /**
1913  * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
1914  * @mrioc: Adapter instance reference
1915  * @qidx: request queue index
1916  *
1917  * Allocate segmented memory pools for operational request
1918  * queue.
1919  *
1920  * Return: 0 on success, non-zero on failure.
1921  */
1922 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1923 {
1924         struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
1925         int i, size;
1926         u64 *q_segment_list_entry = NULL;
1927         struct segments *segments;
1928
1929         if (mrioc->enable_segqueue) {
1930                 op_req_q->segment_qd =
1931                     MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
1932
1933                 size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1934
1935                 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1936                     MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
1937                     GFP_KERNEL);
1938                 if (!op_req_q->q_segment_list)
1939                         return -ENOMEM;
1940                 q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
1941
1942         } else {
1943                 op_req_q->segment_qd = op_req_q->num_requests;
1944                 size = op_req_q->num_requests * mrioc->facts.op_req_sz;
1945         }
1946
1947         op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
1948             op_req_q->segment_qd);
1949
1950         op_req_q->q_segments = kcalloc(op_req_q->num_segments,
1951             sizeof(struct segments), GFP_KERNEL);
1952         if (!op_req_q->q_segments)
1953                 return -ENOMEM;
1954
1955         segments = op_req_q->q_segments;
1956         for (i = 0; i < op_req_q->num_segments; i++) {
1957                 segments[i].segment =
1958                     dma_alloc_coherent(&mrioc->pdev->dev,
1959                     size, &segments[i].segment_dma, GFP_KERNEL);
1960                 if (!segments[i].segment)
1961                         return -ENOMEM;
1962                 if (mrioc->enable_segqueue)
1963                         q_segment_list_entry[i] =
1964                             (unsigned long)segments[i].segment_dma;
1965         }
1966
1967         return 0;
1968 }
1969
1970 /**
1971  * mpi3mr_create_op_reply_q - create operational reply queue
1972  * @mrioc: Adapter instance reference
1973  * @qidx: operational reply queue index
1974  *
1975  * Create operatinal reply queue by issuing MPI request
1976  * through admin queue.
1977  *
1978  * Return:  0 on success, non-zero on failure.
1979  */
1980 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1981 {
1982         struct mpi3_create_reply_queue_request create_req;
1983         struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1984         int retval = 0;
1985         u16 reply_qid = 0, midx;
1986
1987         reply_qid = op_reply_q->qid;
1988
1989         midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1990
1991         if (reply_qid) {
1992                 retval = -1;
1993                 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
1994                     reply_qid);
1995
1996                 return retval;
1997         }
1998
1999         reply_qid = qidx + 1;
2000         op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
2001         if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
2002                 !mrioc->pdev->revision)
2003                 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
2004         op_reply_q->ci = 0;
2005         op_reply_q->ephase = 1;
2006         atomic_set(&op_reply_q->pend_ios, 0);
2007         atomic_set(&op_reply_q->in_use, 0);
2008         op_reply_q->enable_irq_poll = false;
2009
2010         if (!op_reply_q->q_segments) {
2011                 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
2012                 if (retval) {
2013                         mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2014                         goto out;
2015                 }
2016         }
2017
2018         memset(&create_req, 0, sizeof(create_req));
2019         mutex_lock(&mrioc->init_cmds.mutex);
2020         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2021                 retval = -1;
2022                 ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
2023                 goto out_unlock;
2024         }
2025         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2026         mrioc->init_cmds.is_waiting = 1;
2027         mrioc->init_cmds.callback = NULL;
2028         create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2029         create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
2030         create_req.queue_id = cpu_to_le16(reply_qid);
2031
2032         if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
2033                 op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
2034         else
2035                 op_reply_q->qtype = MPI3MR_POLL_QUEUE;
2036
2037         if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
2038                 create_req.flags =
2039                         MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
2040                 create_req.msix_index =
2041                         cpu_to_le16(mrioc->intr_info[midx].msix_index);
2042         } else {
2043                 create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
2044                 ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
2045                         reply_qid, midx);
2046                 if (!mrioc->active_poll_qcount)
2047                         disable_irq_nosync(pci_irq_vector(mrioc->pdev,
2048                             mrioc->intr_info_count - 1));
2049         }
2050
2051         if (mrioc->enable_segqueue) {
2052                 create_req.flags |=
2053                     MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2054                 create_req.base_address = cpu_to_le64(
2055                     op_reply_q->q_segment_list_dma);
2056         } else
2057                 create_req.base_address = cpu_to_le64(
2058                     op_reply_q->q_segments[0].segment_dma);
2059
2060         create_req.size = cpu_to_le16(op_reply_q->num_replies);
2061
2062         init_completion(&mrioc->init_cmds.done);
2063         retval = mpi3mr_admin_request_post(mrioc, &create_req,
2064             sizeof(create_req), 1);
2065         if (retval) {
2066                 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
2067                 goto out_unlock;
2068         }
2069         wait_for_completion_timeout(&mrioc->init_cmds.done,
2070             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2071         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2072                 ioc_err(mrioc, "create reply queue timed out\n");
2073                 mpi3mr_check_rh_fault_ioc(mrioc,
2074                     MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
2075                 retval = -1;
2076                 goto out_unlock;
2077         }
2078         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2079             != MPI3_IOCSTATUS_SUCCESS) {
2080                 ioc_err(mrioc,
2081                     "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2082                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2083                     mrioc->init_cmds.ioc_loginfo);
2084                 retval = -1;
2085                 goto out_unlock;
2086         }
2087         op_reply_q->qid = reply_qid;
2088         if (midx < mrioc->intr_info_count)
2089                 mrioc->intr_info[midx].op_reply_q = op_reply_q;
2090
2091         (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
2092             mrioc->active_poll_qcount++;
2093
2094 out_unlock:
2095         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2096         mutex_unlock(&mrioc->init_cmds.mutex);
2097 out:
2098
2099         return retval;
2100 }
2101
2102 /**
2103  * mpi3mr_create_op_req_q - create operational request queue
2104  * @mrioc: Adapter instance reference
2105  * @idx: operational request queue index
2106  * @reply_qid: Reply queue ID
2107  *
2108  * Create operatinal request queue by issuing MPI request
2109  * through admin queue.
2110  *
2111  * Return:  0 on success, non-zero on failure.
2112  */
2113 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
2114         u16 reply_qid)
2115 {
2116         struct mpi3_create_request_queue_request create_req;
2117         struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
2118         int retval = 0;
2119         u16 req_qid = 0;
2120
2121         req_qid = op_req_q->qid;
2122
2123         if (req_qid) {
2124                 retval = -1;
2125                 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
2126                     req_qid);
2127
2128                 return retval;
2129         }
2130         req_qid = idx + 1;
2131
2132         op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
2133         op_req_q->ci = 0;
2134         op_req_q->pi = 0;
2135         op_req_q->reply_qid = reply_qid;
2136         spin_lock_init(&op_req_q->q_lock);
2137
2138         if (!op_req_q->q_segments) {
2139                 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
2140                 if (retval) {
2141                         mpi3mr_free_op_req_q_segments(mrioc, idx);
2142                         goto out;
2143                 }
2144         }
2145
2146         memset(&create_req, 0, sizeof(create_req));
2147         mutex_lock(&mrioc->init_cmds.mutex);
2148         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2149                 retval = -1;
2150                 ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
2151                 goto out_unlock;
2152         }
2153         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2154         mrioc->init_cmds.is_waiting = 1;
2155         mrioc->init_cmds.callback = NULL;
2156         create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2157         create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
2158         create_req.queue_id = cpu_to_le16(req_qid);
2159         if (mrioc->enable_segqueue) {
2160                 create_req.flags =
2161                     MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2162                 create_req.base_address = cpu_to_le64(
2163                     op_req_q->q_segment_list_dma);
2164         } else
2165                 create_req.base_address = cpu_to_le64(
2166                     op_req_q->q_segments[0].segment_dma);
2167         create_req.reply_queue_id = cpu_to_le16(reply_qid);
2168         create_req.size = cpu_to_le16(op_req_q->num_requests);
2169
2170         init_completion(&mrioc->init_cmds.done);
2171         retval = mpi3mr_admin_request_post(mrioc, &create_req,
2172             sizeof(create_req), 1);
2173         if (retval) {
2174                 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
2175                 goto out_unlock;
2176         }
2177         wait_for_completion_timeout(&mrioc->init_cmds.done,
2178             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2179         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2180                 ioc_err(mrioc, "create request queue timed out\n");
2181                 mpi3mr_check_rh_fault_ioc(mrioc,
2182                     MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
2183                 retval = -1;
2184                 goto out_unlock;
2185         }
2186         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2187             != MPI3_IOCSTATUS_SUCCESS) {
2188                 ioc_err(mrioc,
2189                     "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2190                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2191                     mrioc->init_cmds.ioc_loginfo);
2192                 retval = -1;
2193                 goto out_unlock;
2194         }
2195         op_req_q->qid = req_qid;
2196
2197 out_unlock:
2198         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2199         mutex_unlock(&mrioc->init_cmds.mutex);
2200 out:
2201
2202         return retval;
2203 }
2204
2205 /**
2206  * mpi3mr_create_op_queues - create operational queue pairs
2207  * @mrioc: Adapter instance reference
2208  *
2209  * Allocate memory for operational queue meta data and call
2210  * create request and reply queue functions.
2211  *
2212  * Return: 0 on success, non-zero on failures.
2213  */
2214 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2215 {
2216         int retval = 0;
2217         u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2218
2219         num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2220             mrioc->facts.max_op_req_q);
2221
2222         msix_count_op_q =
2223             mrioc->intr_info_count - mrioc->op_reply_q_offset;
2224         if (!mrioc->num_queues)
2225                 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2226         /*
2227          * During reset set the num_queues to the number of queues
2228          * that was set before the reset.
2229          */
2230         num_queues = mrioc->num_op_reply_q ?
2231             mrioc->num_op_reply_q : mrioc->num_queues;
2232         ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2233             num_queues);
2234
2235         if (!mrioc->req_qinfo) {
2236                 mrioc->req_qinfo = kcalloc(num_queues,
2237                     sizeof(struct op_req_qinfo), GFP_KERNEL);
2238                 if (!mrioc->req_qinfo) {
2239                         retval = -1;
2240                         goto out_failed;
2241                 }
2242
2243                 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2244                     num_queues, GFP_KERNEL);
2245                 if (!mrioc->op_reply_qinfo) {
2246                         retval = -1;
2247                         goto out_failed;
2248                 }
2249         }
2250
2251         if (mrioc->enable_segqueue)
2252                 ioc_info(mrioc,
2253                     "allocating operational queues through segmented queues\n");
2254
2255         for (i = 0; i < num_queues; i++) {
2256                 if (mpi3mr_create_op_reply_q(mrioc, i)) {
2257                         ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2258                         break;
2259                 }
2260                 if (mpi3mr_create_op_req_q(mrioc, i,
2261                     mrioc->op_reply_qinfo[i].qid)) {
2262                         ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2263                         mpi3mr_delete_op_reply_q(mrioc, i);
2264                         break;
2265                 }
2266         }
2267
2268         if (i == 0) {
2269                 /* Not even one queue is created successfully*/
2270                 retval = -1;
2271                 goto out_failed;
2272         }
2273         mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2274         ioc_info(mrioc,
2275             "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2276             mrioc->num_op_reply_q, mrioc->default_qcount,
2277             mrioc->active_poll_qcount);
2278
2279         return retval;
2280 out_failed:
2281         kfree(mrioc->req_qinfo);
2282         mrioc->req_qinfo = NULL;
2283
2284         kfree(mrioc->op_reply_qinfo);
2285         mrioc->op_reply_qinfo = NULL;
2286
2287         return retval;
2288 }
2289
2290 /**
2291  * mpi3mr_op_request_post - Post request to operational queue
2292  * @mrioc: Adapter reference
2293  * @op_req_q: Operational request queue info
2294  * @req: MPI3 request
2295  *
2296  * Post the MPI3 request into operational request queue and
2297  * inform the controller, if the queue is full return
2298  * appropriate error.
2299  *
2300  * Return: 0 on success, non-zero on failure.
2301  */
2302 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2303         struct op_req_qinfo *op_req_q, u8 *req)
2304 {
2305         u16 pi = 0, max_entries, reply_qidx = 0, midx;
2306         int retval = 0;
2307         unsigned long flags;
2308         u8 *req_entry;
2309         void *segment_base_addr;
2310         u16 req_sz = mrioc->facts.op_req_sz;
2311         struct segments *segments = op_req_q->q_segments;
2312
2313         reply_qidx = op_req_q->reply_qid - 1;
2314
2315         if (mrioc->unrecoverable)
2316                 return -EFAULT;
2317
2318         spin_lock_irqsave(&op_req_q->q_lock, flags);
2319         pi = op_req_q->pi;
2320         max_entries = op_req_q->num_requests;
2321
2322         if (mpi3mr_check_req_qfull(op_req_q)) {
2323                 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2324                     reply_qidx, mrioc->op_reply_q_offset);
2325                 mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2326
2327                 if (mpi3mr_check_req_qfull(op_req_q)) {
2328                         retval = -EAGAIN;
2329                         goto out;
2330                 }
2331         }
2332
2333         if (mrioc->reset_in_progress) {
2334                 ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2335                 retval = -EAGAIN;
2336                 goto out;
2337         }
2338
2339         segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2340         req_entry = (u8 *)segment_base_addr +
2341             ((pi % op_req_q->segment_qd) * req_sz);
2342
2343         memset(req_entry, 0, req_sz);
2344         memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2345
2346         if (++pi == max_entries)
2347                 pi = 0;
2348         op_req_q->pi = pi;
2349
2350 #ifndef CONFIG_PREEMPT_RT
2351         if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2352             > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2353                 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2354 #else
2355         atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
2356 #endif
2357
2358         writel(op_req_q->pi,
2359             &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2360
2361 out:
2362         spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2363         return retval;
2364 }
2365
2366 /**
2367  * mpi3mr_check_rh_fault_ioc - check reset history and fault
2368  * controller
2369  * @mrioc: Adapter instance reference
2370  * @reason_code: reason code for the fault.
2371  *
2372  * This routine will save snapdump and fault the controller with
2373  * the given reason code if it is not already in the fault or
2374  * not asynchronosuly reset. This will be used to handle
2375  * initilaization time faults/resets/timeout as in those cases
2376  * immediate soft reset invocation is not required.
2377  *
2378  * Return:  None.
2379  */
2380 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2381 {
2382         u32 ioc_status, host_diagnostic, timeout;
2383
2384         if (mrioc->unrecoverable) {
2385                 ioc_err(mrioc, "controller is unrecoverable\n");
2386                 return;
2387         }
2388
2389         if (!pci_device_is_present(mrioc->pdev)) {
2390                 mrioc->unrecoverable = 1;
2391                 ioc_err(mrioc, "controller is not present\n");
2392                 return;
2393         }
2394
2395         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2396         if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
2397             (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
2398                 mpi3mr_print_fault_info(mrioc);
2399                 return;
2400         }
2401         mpi3mr_set_diagsave(mrioc);
2402         mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2403             reason_code);
2404         timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2405         do {
2406                 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2407                 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2408                         break;
2409                 msleep(100);
2410         } while (--timeout);
2411 }
2412
2413 /**
2414  * mpi3mr_sync_timestamp - Issue time stamp sync request
2415  * @mrioc: Adapter reference
2416  *
2417  * Issue IO unit control MPI request to synchornize firmware
2418  * timestamp with host time.
2419  *
2420  * Return: 0 on success, non-zero on failure.
2421  */
2422 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2423 {
2424         ktime_t current_time;
2425         struct mpi3_iounit_control_request iou_ctrl;
2426         int retval = 0;
2427
2428         memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2429         mutex_lock(&mrioc->init_cmds.mutex);
2430         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2431                 retval = -1;
2432                 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2433                 mutex_unlock(&mrioc->init_cmds.mutex);
2434                 goto out;
2435         }
2436         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2437         mrioc->init_cmds.is_waiting = 1;
2438         mrioc->init_cmds.callback = NULL;
2439         iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2440         iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2441         iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2442         current_time = ktime_get_real();
2443         iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2444
2445         init_completion(&mrioc->init_cmds.done);
2446         retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2447             sizeof(iou_ctrl), 0);
2448         if (retval) {
2449                 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2450                 goto out_unlock;
2451         }
2452
2453         wait_for_completion_timeout(&mrioc->init_cmds.done,
2454             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2455         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2456                 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2457                 mrioc->init_cmds.is_waiting = 0;
2458                 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2459                         mpi3mr_check_rh_fault_ioc(mrioc,
2460                             MPI3MR_RESET_FROM_TSU_TIMEOUT);
2461                 retval = -1;
2462                 goto out_unlock;
2463         }
2464         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2465             != MPI3_IOCSTATUS_SUCCESS) {
2466                 ioc_err(mrioc,
2467                     "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2468                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2469                     mrioc->init_cmds.ioc_loginfo);
2470                 retval = -1;
2471                 goto out_unlock;
2472         }
2473
2474 out_unlock:
2475         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2476         mutex_unlock(&mrioc->init_cmds.mutex);
2477
2478 out:
2479         return retval;
2480 }
2481
2482 /**
2483  * mpi3mr_print_pkg_ver - display controller fw package version
2484  * @mrioc: Adapter reference
2485  *
2486  * Retrieve firmware package version from the component image
2487  * header of the controller flash and display it.
2488  *
2489  * Return: 0 on success and non-zero on failure.
2490  */
2491 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2492 {
2493         struct mpi3_ci_upload_request ci_upload;
2494         int retval = -1;
2495         void *data = NULL;
2496         dma_addr_t data_dma;
2497         struct mpi3_ci_manifest_mpi *manifest;
2498         u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2499         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2500
2501         data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2502             GFP_KERNEL);
2503         if (!data)
2504                 return -ENOMEM;
2505
2506         memset(&ci_upload, 0, sizeof(ci_upload));
2507         mutex_lock(&mrioc->init_cmds.mutex);
2508         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2509                 ioc_err(mrioc, "sending get package version failed due to command in use\n");
2510                 mutex_unlock(&mrioc->init_cmds.mutex);
2511                 goto out;
2512         }
2513         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2514         mrioc->init_cmds.is_waiting = 1;
2515         mrioc->init_cmds.callback = NULL;
2516         ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2517         ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2518         ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2519         ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2520         ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2521         ci_upload.segment_size = cpu_to_le32(data_len);
2522
2523         mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2524             data_dma);
2525         init_completion(&mrioc->init_cmds.done);
2526         retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2527             sizeof(ci_upload), 1);
2528         if (retval) {
2529                 ioc_err(mrioc, "posting get package version failed\n");
2530                 goto out_unlock;
2531         }
2532         wait_for_completion_timeout(&mrioc->init_cmds.done,
2533             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2534         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2535                 ioc_err(mrioc, "get package version timed out\n");
2536                 mpi3mr_check_rh_fault_ioc(mrioc,
2537                     MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2538                 retval = -1;
2539                 goto out_unlock;
2540         }
2541         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2542             == MPI3_IOCSTATUS_SUCCESS) {
2543                 manifest = (struct mpi3_ci_manifest_mpi *) data;
2544                 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2545                         ioc_info(mrioc,
2546                             "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2547                             manifest->package_version.gen_major,
2548                             manifest->package_version.gen_minor,
2549                             manifest->package_version.phase_major,
2550                             manifest->package_version.phase_minor,
2551                             manifest->package_version.customer_id,
2552                             manifest->package_version.build_num);
2553                 }
2554         }
2555         retval = 0;
2556 out_unlock:
2557         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2558         mutex_unlock(&mrioc->init_cmds.mutex);
2559
2560 out:
2561         if (data)
2562                 dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2563                     data_dma);
2564         return retval;
2565 }
2566
2567 /**
2568  * mpi3mr_watchdog_work - watchdog thread to monitor faults
2569  * @work: work struct
2570  *
2571  * Watch dog work periodically executed (1 second interval) to
2572  * monitor firmware fault and to issue periodic timer sync to
2573  * the firmware.
2574  *
2575  * Return: Nothing.
2576  */
2577 static void mpi3mr_watchdog_work(struct work_struct *work)
2578 {
2579         struct mpi3mr_ioc *mrioc =
2580             container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2581         unsigned long flags;
2582         enum mpi3mr_iocstate ioc_state;
2583         u32 fault, host_diagnostic, ioc_status;
2584         u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2585
2586         if (mrioc->reset_in_progress)
2587                 return;
2588
2589         if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
2590                 ioc_err(mrioc, "watchdog could not detect the controller\n");
2591                 mrioc->unrecoverable = 1;
2592         }
2593
2594         if (mrioc->unrecoverable) {
2595                 ioc_err(mrioc,
2596                     "flush pending commands for unrecoverable controller\n");
2597                 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
2598                 return;
2599         }
2600
2601         if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
2602                 mrioc->ts_update_counter = 0;
2603                 mpi3mr_sync_timestamp(mrioc);
2604         }
2605
2606         if ((mrioc->prepare_for_reset) &&
2607             ((mrioc->prepare_for_reset_timeout_counter++) >=
2608              MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2609                 mpi3mr_soft_reset_handler(mrioc,
2610                     MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2611                 return;
2612         }
2613
2614         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2615         if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2616                 mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2617                 return;
2618         }
2619
2620         /*Check for fault state every one second and issue Soft reset*/
2621         ioc_state = mpi3mr_get_iocstate(mrioc);
2622         if (ioc_state != MRIOC_STATE_FAULT)
2623                 goto schedule_work;
2624
2625         fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2626         host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2627         if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2628                 if (!mrioc->diagsave_timeout) {
2629                         mpi3mr_print_fault_info(mrioc);
2630                         ioc_warn(mrioc, "diag save in progress\n");
2631                 }
2632                 if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2633                         goto schedule_work;
2634         }
2635
2636         mpi3mr_print_fault_info(mrioc);
2637         mrioc->diagsave_timeout = 0;
2638
2639         switch (fault) {
2640         case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
2641         case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2642                 ioc_warn(mrioc,
2643                     "controller requires system power cycle, marking controller as unrecoverable\n");
2644                 mrioc->unrecoverable = 1;
2645                 goto schedule_work;
2646         case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2647                 goto schedule_work;
2648         case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2649                 reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2650                 break;
2651         default:
2652                 break;
2653         }
2654         mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2655         return;
2656
2657 schedule_work:
2658         spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2659         if (mrioc->watchdog_work_q)
2660                 queue_delayed_work(mrioc->watchdog_work_q,
2661                     &mrioc->watchdog_work,
2662                     msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2663         spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2664         return;
2665 }
2666
2667 /**
2668  * mpi3mr_start_watchdog - Start watchdog
2669  * @mrioc: Adapter instance reference
2670  *
2671  * Create and start the watchdog thread to monitor controller
2672  * faults.
2673  *
2674  * Return: Nothing.
2675  */
2676 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2677 {
2678         if (mrioc->watchdog_work_q)
2679                 return;
2680
2681         INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2682         snprintf(mrioc->watchdog_work_q_name,
2683             sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
2684             mrioc->id);
2685         mrioc->watchdog_work_q =
2686             create_singlethread_workqueue(mrioc->watchdog_work_q_name);
2687         if (!mrioc->watchdog_work_q) {
2688                 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2689                 return;
2690         }
2691
2692         if (mrioc->watchdog_work_q)
2693                 queue_delayed_work(mrioc->watchdog_work_q,
2694                     &mrioc->watchdog_work,
2695                     msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2696 }
2697
2698 /**
2699  * mpi3mr_stop_watchdog - Stop watchdog
2700  * @mrioc: Adapter instance reference
2701  *
2702  * Stop the watchdog thread created to monitor controller
2703  * faults.
2704  *
2705  * Return: Nothing.
2706  */
2707 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
2708 {
2709         unsigned long flags;
2710         struct workqueue_struct *wq;
2711
2712         spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2713         wq = mrioc->watchdog_work_q;
2714         mrioc->watchdog_work_q = NULL;
2715         spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2716         if (wq) {
2717                 if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
2718                         flush_workqueue(wq);
2719                 destroy_workqueue(wq);
2720         }
2721 }
2722
2723 /**
2724  * mpi3mr_setup_admin_qpair - Setup admin queue pair
2725  * @mrioc: Adapter instance reference
2726  *
2727  * Allocate memory for admin queue pair if required and register
2728  * the admin queue with the controller.
2729  *
2730  * Return: 0 on success, non-zero on failures.
2731  */
2732 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
2733 {
2734         int retval = 0;
2735         u32 num_admin_entries = 0;
2736
2737         mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
2738         mrioc->num_admin_req = mrioc->admin_req_q_sz /
2739             MPI3MR_ADMIN_REQ_FRAME_SZ;
2740         mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
2741
2742         mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
2743         mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
2744             MPI3MR_ADMIN_REPLY_FRAME_SZ;
2745         mrioc->admin_reply_ci = 0;
2746         mrioc->admin_reply_ephase = 1;
2747         atomic_set(&mrioc->admin_reply_q_in_use, 0);
2748
2749         if (!mrioc->admin_req_base) {
2750                 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
2751                     mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
2752
2753                 if (!mrioc->admin_req_base) {
2754                         retval = -1;
2755                         goto out_failed;
2756                 }
2757
2758                 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
2759                     mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
2760                     GFP_KERNEL);
2761
2762                 if (!mrioc->admin_reply_base) {
2763                         retval = -1;
2764                         goto out_failed;
2765                 }
2766         }
2767
2768         num_admin_entries = (mrioc->num_admin_replies << 16) |
2769             (mrioc->num_admin_req);
2770         writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
2771         mpi3mr_writeq(mrioc->admin_req_dma,
2772             &mrioc->sysif_regs->admin_request_queue_address);
2773         mpi3mr_writeq(mrioc->admin_reply_dma,
2774             &mrioc->sysif_regs->admin_reply_queue_address);
2775         writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
2776         writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
2777         return retval;
2778
2779 out_failed:
2780
2781         if (mrioc->admin_reply_base) {
2782                 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
2783                     mrioc->admin_reply_base, mrioc->admin_reply_dma);
2784                 mrioc->admin_reply_base = NULL;
2785         }
2786         if (mrioc->admin_req_base) {
2787                 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
2788                     mrioc->admin_req_base, mrioc->admin_req_dma);
2789                 mrioc->admin_req_base = NULL;
2790         }
2791         return retval;
2792 }
2793
2794 /**
2795  * mpi3mr_issue_iocfacts - Send IOC Facts
2796  * @mrioc: Adapter instance reference
2797  * @facts_data: Cached IOC facts data
2798  *
2799  * Issue IOC Facts MPI request through admin queue and wait for
2800  * the completion of it or time out.
2801  *
2802  * Return: 0 on success, non-zero on failures.
2803  */
2804 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
2805         struct mpi3_ioc_facts_data *facts_data)
2806 {
2807         struct mpi3_ioc_facts_request iocfacts_req;
2808         void *data = NULL;
2809         dma_addr_t data_dma;
2810         u32 data_len = sizeof(*facts_data);
2811         int retval = 0;
2812         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2813
2814         data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2815             GFP_KERNEL);
2816
2817         if (!data) {
2818                 retval = -1;
2819                 goto out;
2820         }
2821
2822         memset(&iocfacts_req, 0, sizeof(iocfacts_req));
2823         mutex_lock(&mrioc->init_cmds.mutex);
2824         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2825                 retval = -1;
2826                 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
2827                 mutex_unlock(&mrioc->init_cmds.mutex);
2828                 goto out;
2829         }
2830         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2831         mrioc->init_cmds.is_waiting = 1;
2832         mrioc->init_cmds.callback = NULL;
2833         iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2834         iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
2835
2836         mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
2837             data_dma);
2838
2839         init_completion(&mrioc->init_cmds.done);
2840         retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
2841             sizeof(iocfacts_req), 1);
2842         if (retval) {
2843                 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
2844                 goto out_unlock;
2845         }
2846         wait_for_completion_timeout(&mrioc->init_cmds.done,
2847             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2848         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2849                 ioc_err(mrioc, "ioc_facts timed out\n");
2850                 mpi3mr_check_rh_fault_ioc(mrioc,
2851                     MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
2852                 retval = -1;
2853                 goto out_unlock;
2854         }
2855         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2856             != MPI3_IOCSTATUS_SUCCESS) {
2857                 ioc_err(mrioc,
2858                     "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2859                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2860                     mrioc->init_cmds.ioc_loginfo);
2861                 retval = -1;
2862                 goto out_unlock;
2863         }
2864         memcpy(facts_data, (u8 *)data, data_len);
2865         mpi3mr_process_factsdata(mrioc, facts_data);
2866 out_unlock:
2867         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2868         mutex_unlock(&mrioc->init_cmds.mutex);
2869
2870 out:
2871         if (data)
2872                 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
2873
2874         return retval;
2875 }
2876
2877 /**
2878  * mpi3mr_check_reset_dma_mask - Process IOC facts data
2879  * @mrioc: Adapter instance reference
2880  *
2881  * Check whether the new DMA mask requested through IOCFacts by
2882  * firmware needs to be set, if so set it .
2883  *
2884  * Return: 0 on success, non-zero on failure.
2885  */
2886 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
2887 {
2888         struct pci_dev *pdev = mrioc->pdev;
2889         int r;
2890         u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
2891
2892         if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
2893                 return 0;
2894
2895         ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
2896             mrioc->dma_mask, facts_dma_mask);
2897
2898         r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
2899         if (r) {
2900                 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
2901                     facts_dma_mask, r);
2902                 return r;
2903         }
2904         mrioc->dma_mask = facts_dma_mask;
2905         return r;
2906 }
2907
2908 /**
2909  * mpi3mr_process_factsdata - Process IOC facts data
2910  * @mrioc: Adapter instance reference
2911  * @facts_data: Cached IOC facts data
2912  *
2913  * Convert IOC facts data into cpu endianness and cache it in
2914  * the driver .
2915  *
2916  * Return: Nothing.
2917  */
2918 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
2919         struct mpi3_ioc_facts_data *facts_data)
2920 {
2921         u32 ioc_config, req_sz, facts_flags;
2922
2923         if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
2924             (sizeof(*facts_data) / 4)) {
2925                 ioc_warn(mrioc,
2926                     "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
2927                     sizeof(*facts_data),
2928                     le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
2929         }
2930
2931         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
2932         req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
2933             MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
2934         if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
2935                 ioc_err(mrioc,
2936                     "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
2937                     req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
2938         }
2939
2940         memset(&mrioc->facts, 0, sizeof(mrioc->facts));
2941
2942         facts_flags = le32_to_cpu(facts_data->flags);
2943         mrioc->facts.op_req_sz = req_sz;
2944         mrioc->op_reply_desc_sz = 1 << ((ioc_config &
2945             MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
2946             MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
2947
2948         mrioc->facts.ioc_num = facts_data->ioc_number;
2949         mrioc->facts.who_init = facts_data->who_init;
2950         mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
2951         mrioc->facts.personality = (facts_flags &
2952             MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
2953         mrioc->facts.dma_mask = (facts_flags &
2954             MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
2955             MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
2956         mrioc->facts.protocol_flags = facts_data->protocol_flags;
2957         mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
2958         mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
2959         mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
2960         mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
2961         mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
2962         mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
2963         mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
2964         mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
2965         mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
2966         mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
2967         mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
2968         mrioc->facts.max_pcie_switches =
2969             le16_to_cpu(facts_data->max_pcie_switches);
2970         mrioc->facts.max_sasexpanders =
2971             le16_to_cpu(facts_data->max_sas_expanders);
2972         mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
2973         mrioc->facts.max_sasinitiators =
2974             le16_to_cpu(facts_data->max_sas_initiators);
2975         mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
2976         mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
2977         mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
2978         mrioc->facts.max_op_req_q =
2979             le16_to_cpu(facts_data->max_operational_request_queues);
2980         mrioc->facts.max_op_reply_q =
2981             le16_to_cpu(facts_data->max_operational_reply_queues);
2982         mrioc->facts.ioc_capabilities =
2983             le32_to_cpu(facts_data->ioc_capabilities);
2984         mrioc->facts.fw_ver.build_num =
2985             le16_to_cpu(facts_data->fw_version.build_num);
2986         mrioc->facts.fw_ver.cust_id =
2987             le16_to_cpu(facts_data->fw_version.customer_id);
2988         mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
2989         mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
2990         mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
2991         mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
2992         mrioc->msix_count = min_t(int, mrioc->msix_count,
2993             mrioc->facts.max_msix_vectors);
2994         mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
2995         mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
2996         mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
2997         mrioc->facts.shutdown_timeout =
2998             le16_to_cpu(facts_data->shutdown_timeout);
2999
3000         mrioc->facts.max_dev_per_tg =
3001             facts_data->max_devices_per_throttle_group;
3002         mrioc->facts.io_throttle_data_length =
3003             le16_to_cpu(facts_data->io_throttle_data_length);
3004         mrioc->facts.max_io_throttle_group =
3005             le16_to_cpu(facts_data->max_io_throttle_group);
3006         mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
3007         mrioc->facts.io_throttle_high =
3008             le16_to_cpu(facts_data->io_throttle_high);
3009
3010         if (mrioc->facts.max_data_length ==
3011             MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
3012                 mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
3013         else
3014                 mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
3015         /* Store in 512b block count */
3016         if (mrioc->facts.io_throttle_data_length)
3017                 mrioc->io_throttle_data_length =
3018                     (mrioc->facts.io_throttle_data_length * 2 * 4);
3019         else
3020                 /* set the length to 1MB + 1K to disable throttle */
3021                 mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
3022
3023         mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
3024         mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
3025
3026         ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
3027             mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
3028             mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
3029         ioc_info(mrioc,
3030             "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
3031             mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
3032             mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
3033         ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
3034             mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
3035             mrioc->facts.sge_mod_shift);
3036         ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
3037             mrioc->facts.dma_mask, (facts_flags &
3038             MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
3039         ioc_info(mrioc,
3040             "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
3041             mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
3042         ioc_info(mrioc,
3043            "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
3044            mrioc->facts.io_throttle_data_length * 4,
3045            mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
3046 }
3047
3048 /**
3049  * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
3050  * @mrioc: Adapter instance reference
3051  *
3052  * Allocate and initialize the reply free buffers, sense
3053  * buffers, reply free queue and sense buffer queue.
3054  *
3055  * Return: 0 on success, non-zero on failures.
3056  */
3057 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
3058 {
3059         int retval = 0;
3060         u32 sz, i;
3061
3062         if (mrioc->init_cmds.reply)
3063                 return retval;
3064
3065         mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3066         if (!mrioc->init_cmds.reply)
3067                 goto out_failed;
3068
3069         mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3070         if (!mrioc->bsg_cmds.reply)
3071                 goto out_failed;
3072
3073         mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3074         if (!mrioc->transport_cmds.reply)
3075                 goto out_failed;
3076
3077         for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3078                 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
3079                     GFP_KERNEL);
3080                 if (!mrioc->dev_rmhs_cmds[i].reply)
3081                         goto out_failed;
3082         }
3083
3084         for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
3085                 mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
3086                     GFP_KERNEL);
3087                 if (!mrioc->evtack_cmds[i].reply)
3088                         goto out_failed;
3089         }
3090
3091         mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3092         if (!mrioc->host_tm_cmds.reply)
3093                 goto out_failed;
3094
3095         mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3096         if (!mrioc->pel_cmds.reply)
3097                 goto out_failed;
3098
3099         mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3100         if (!mrioc->pel_abort_cmd.reply)
3101                 goto out_failed;
3102
3103         mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
3104         mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
3105                                                  GFP_KERNEL);
3106         if (!mrioc->removepend_bitmap)
3107                 goto out_failed;
3108
3109         mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
3110         if (!mrioc->devrem_bitmap)
3111                 goto out_failed;
3112
3113         mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
3114                                                   GFP_KERNEL);
3115         if (!mrioc->evtack_cmds_bitmap)
3116                 goto out_failed;
3117
3118         mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
3119         mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
3120         mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
3121         mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
3122
3123         /* reply buffer pool, 16 byte align */
3124         sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3125         mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
3126             &mrioc->pdev->dev, sz, 16, 0);
3127         if (!mrioc->reply_buf_pool) {
3128                 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
3129                 goto out_failed;
3130         }
3131
3132         mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
3133             &mrioc->reply_buf_dma);
3134         if (!mrioc->reply_buf)
3135                 goto out_failed;
3136
3137         mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
3138
3139         /* reply free queue, 8 byte align */
3140         sz = mrioc->reply_free_qsz * 8;
3141         mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
3142             &mrioc->pdev->dev, sz, 8, 0);
3143         if (!mrioc->reply_free_q_pool) {
3144                 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
3145                 goto out_failed;
3146         }
3147         mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
3148             GFP_KERNEL, &mrioc->reply_free_q_dma);
3149         if (!mrioc->reply_free_q)
3150                 goto out_failed;
3151
3152         /* sense buffer pool,  4 byte align */
3153         sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3154         mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
3155             &mrioc->pdev->dev, sz, 4, 0);
3156         if (!mrioc->sense_buf_pool) {
3157                 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
3158                 goto out_failed;
3159         }
3160         mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
3161             &mrioc->sense_buf_dma);
3162         if (!mrioc->sense_buf)
3163                 goto out_failed;
3164
3165         /* sense buffer queue, 8 byte align */
3166         sz = mrioc->sense_buf_q_sz * 8;
3167         mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
3168             &mrioc->pdev->dev, sz, 8, 0);
3169         if (!mrioc->sense_buf_q_pool) {
3170                 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
3171                 goto out_failed;
3172         }
3173         mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
3174             GFP_KERNEL, &mrioc->sense_buf_q_dma);
3175         if (!mrioc->sense_buf_q)
3176                 goto out_failed;
3177
3178         return retval;
3179
3180 out_failed:
3181         retval = -1;
3182         return retval;
3183 }
3184
3185 /**
3186  * mpimr_initialize_reply_sbuf_queues - initialize reply sense
3187  * buffers
3188  * @mrioc: Adapter instance reference
3189  *
3190  * Helper function to initialize reply and sense buffers along
3191  * with some debug prints.
3192  *
3193  * Return:  None.
3194  */
3195 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
3196 {
3197         u32 sz, i;
3198         dma_addr_t phy_addr;
3199
3200         sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3201         ioc_info(mrioc,
3202             "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3203             mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
3204             (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
3205         sz = mrioc->reply_free_qsz * 8;
3206         ioc_info(mrioc,
3207             "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3208             mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
3209             (unsigned long long)mrioc->reply_free_q_dma);
3210         sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3211         ioc_info(mrioc,
3212             "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3213             mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3214             (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3215         sz = mrioc->sense_buf_q_sz * 8;
3216         ioc_info(mrioc,
3217             "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3218             mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3219             (unsigned long long)mrioc->sense_buf_q_dma);
3220
3221         /* initialize Reply buffer Queue */
3222         for (i = 0, phy_addr = mrioc->reply_buf_dma;
3223             i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3224                 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3225         mrioc->reply_free_q[i] = cpu_to_le64(0);
3226
3227         /* initialize Sense Buffer Queue */
3228         for (i = 0, phy_addr = mrioc->sense_buf_dma;
3229             i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3230                 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3231         mrioc->sense_buf_q[i] = cpu_to_le64(0);
3232 }
3233
3234 /**
3235  * mpi3mr_issue_iocinit - Send IOC Init
3236  * @mrioc: Adapter instance reference
3237  *
3238  * Issue IOC Init MPI request through admin queue and wait for
3239  * the completion of it or time out.
3240  *
3241  * Return: 0 on success, non-zero on failures.
3242  */
3243 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3244 {
3245         struct mpi3_ioc_init_request iocinit_req;
3246         struct mpi3_driver_info_layout *drv_info;
3247         dma_addr_t data_dma;
3248         u32 data_len = sizeof(*drv_info);
3249         int retval = 0;
3250         ktime_t current_time;
3251
3252         drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3253             GFP_KERNEL);
3254         if (!drv_info) {
3255                 retval = -1;
3256                 goto out;
3257         }
3258         mpimr_initialize_reply_sbuf_queues(mrioc);
3259
3260         drv_info->information_length = cpu_to_le32(data_len);
3261         strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3262         strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3263         strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3264         strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3265         strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3266         strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3267             sizeof(drv_info->driver_release_date));
3268         drv_info->driver_capabilities = 0;
3269         memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3270             sizeof(mrioc->driver_info));
3271
3272         memset(&iocinit_req, 0, sizeof(iocinit_req));
3273         mutex_lock(&mrioc->init_cmds.mutex);
3274         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3275                 retval = -1;
3276                 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3277                 mutex_unlock(&mrioc->init_cmds.mutex);
3278                 goto out;
3279         }
3280         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3281         mrioc->init_cmds.is_waiting = 1;
3282         mrioc->init_cmds.callback = NULL;
3283         iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3284         iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3285         iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3286         iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3287         iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3288         iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3289         iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3290         iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3291         iocinit_req.reply_free_queue_address =
3292             cpu_to_le64(mrioc->reply_free_q_dma);
3293         iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3294         iocinit_req.sense_buffer_free_queue_depth =
3295             cpu_to_le16(mrioc->sense_buf_q_sz);
3296         iocinit_req.sense_buffer_free_queue_address =
3297             cpu_to_le64(mrioc->sense_buf_q_dma);
3298         iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3299
3300         current_time = ktime_get_real();
3301         iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3302
3303         iocinit_req.msg_flags |=
3304             MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED;
3305
3306         init_completion(&mrioc->init_cmds.done);
3307         retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3308             sizeof(iocinit_req), 1);
3309         if (retval) {
3310                 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3311                 goto out_unlock;
3312         }
3313         wait_for_completion_timeout(&mrioc->init_cmds.done,
3314             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3315         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3316                 mpi3mr_check_rh_fault_ioc(mrioc,
3317                     MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3318                 ioc_err(mrioc, "ioc_init timed out\n");
3319                 retval = -1;
3320                 goto out_unlock;
3321         }
3322         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3323             != MPI3_IOCSTATUS_SUCCESS) {
3324                 ioc_err(mrioc,
3325                     "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3326                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3327                     mrioc->init_cmds.ioc_loginfo);
3328                 retval = -1;
3329                 goto out_unlock;
3330         }
3331
3332         mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3333         writel(mrioc->reply_free_queue_host_index,
3334             &mrioc->sysif_regs->reply_free_host_index);
3335
3336         mrioc->sbq_host_index = mrioc->num_sense_bufs;
3337         writel(mrioc->sbq_host_index,
3338             &mrioc->sysif_regs->sense_buffer_free_host_index);
3339 out_unlock:
3340         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3341         mutex_unlock(&mrioc->init_cmds.mutex);
3342
3343 out:
3344         if (drv_info)
3345                 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3346                     data_dma);
3347
3348         return retval;
3349 }
3350
3351 /**
3352  * mpi3mr_unmask_events - Unmask events in event mask bitmap
3353  * @mrioc: Adapter instance reference
3354  * @event: MPI event ID
3355  *
3356  * Un mask the specific event by resetting the event_mask
3357  * bitmap.
3358  *
3359  * Return: 0 on success, non-zero on failures.
3360  */
3361 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3362 {
3363         u32 desired_event;
3364         u8 word;
3365
3366         if (event >= 128)
3367                 return;
3368
3369         desired_event = (1 << (event % 32));
3370         word = event / 32;
3371
3372         mrioc->event_masks[word] &= ~desired_event;
3373 }
3374
3375 /**
3376  * mpi3mr_issue_event_notification - Send event notification
3377  * @mrioc: Adapter instance reference
3378  *
3379  * Issue event notification MPI request through admin queue and
3380  * wait for the completion of it or time out.
3381  *
3382  * Return: 0 on success, non-zero on failures.
3383  */
3384 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3385 {
3386         struct mpi3_event_notification_request evtnotify_req;
3387         int retval = 0;
3388         u8 i;
3389
3390         memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3391         mutex_lock(&mrioc->init_cmds.mutex);
3392         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3393                 retval = -1;
3394                 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3395                 mutex_unlock(&mrioc->init_cmds.mutex);
3396                 goto out;
3397         }
3398         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3399         mrioc->init_cmds.is_waiting = 1;
3400         mrioc->init_cmds.callback = NULL;
3401         evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3402         evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3403         for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3404                 evtnotify_req.event_masks[i] =
3405                     cpu_to_le32(mrioc->event_masks[i]);
3406         init_completion(&mrioc->init_cmds.done);
3407         retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3408             sizeof(evtnotify_req), 1);
3409         if (retval) {
3410                 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3411                 goto out_unlock;
3412         }
3413         wait_for_completion_timeout(&mrioc->init_cmds.done,
3414             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3415         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3416                 ioc_err(mrioc, "event notification timed out\n");
3417                 mpi3mr_check_rh_fault_ioc(mrioc,
3418                     MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3419                 retval = -1;
3420                 goto out_unlock;
3421         }
3422         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3423             != MPI3_IOCSTATUS_SUCCESS) {
3424                 ioc_err(mrioc,
3425                     "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3426                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3427                     mrioc->init_cmds.ioc_loginfo);
3428                 retval = -1;
3429                 goto out_unlock;
3430         }
3431
3432 out_unlock:
3433         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3434         mutex_unlock(&mrioc->init_cmds.mutex);
3435 out:
3436         return retval;
3437 }
3438
3439 /**
3440  * mpi3mr_process_event_ack - Process event acknowledgment
3441  * @mrioc: Adapter instance reference
3442  * @event: MPI3 event ID
3443  * @event_ctx: event context
3444  *
3445  * Send event acknowledgment through admin queue and wait for
3446  * it to complete.
3447  *
3448  * Return: 0 on success, non-zero on failures.
3449  */
3450 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3451         u32 event_ctx)
3452 {
3453         struct mpi3_event_ack_request evtack_req;
3454         int retval = 0;
3455
3456         memset(&evtack_req, 0, sizeof(evtack_req));
3457         mutex_lock(&mrioc->init_cmds.mutex);
3458         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3459                 retval = -1;
3460                 ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3461                 mutex_unlock(&mrioc->init_cmds.mutex);
3462                 goto out;
3463         }
3464         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3465         mrioc->init_cmds.is_waiting = 1;
3466         mrioc->init_cmds.callback = NULL;
3467         evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3468         evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3469         evtack_req.event = event;
3470         evtack_req.event_context = cpu_to_le32(event_ctx);
3471
3472         init_completion(&mrioc->init_cmds.done);
3473         retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3474             sizeof(evtack_req), 1);
3475         if (retval) {
3476                 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3477                 goto out_unlock;
3478         }
3479         wait_for_completion_timeout(&mrioc->init_cmds.done,
3480             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3481         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3482                 ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3483                 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3484                         mpi3mr_check_rh_fault_ioc(mrioc,
3485                             MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
3486                 retval = -1;
3487                 goto out_unlock;
3488         }
3489         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3490             != MPI3_IOCSTATUS_SUCCESS) {
3491                 ioc_err(mrioc,
3492                     "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3493                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3494                     mrioc->init_cmds.ioc_loginfo);
3495                 retval = -1;
3496                 goto out_unlock;
3497         }
3498
3499 out_unlock:
3500         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3501         mutex_unlock(&mrioc->init_cmds.mutex);
3502 out:
3503         return retval;
3504 }
3505
3506 /**
3507  * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3508  * @mrioc: Adapter instance reference
3509  *
3510  * Allocate chain buffers and set a bitmap to indicate free
3511  * chain buffers. Chain buffers are used to pass the SGE
3512  * information along with MPI3 SCSI IO requests for host I/O.
3513  *
3514  * Return: 0 on success, non-zero on failure
3515  */
3516 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3517 {
3518         int retval = 0;
3519         u32 sz, i;
3520         u16 num_chains;
3521
3522         if (mrioc->chain_sgl_list)
3523                 return retval;
3524
3525         num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3526
3527         if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3528             | SHOST_DIX_TYPE1_PROTECTION
3529             | SHOST_DIX_TYPE2_PROTECTION
3530             | SHOST_DIX_TYPE3_PROTECTION))
3531                 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3532
3533         mrioc->chain_buf_count = num_chains;
3534         sz = sizeof(struct chain_element) * num_chains;
3535         mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3536         if (!mrioc->chain_sgl_list)
3537                 goto out_failed;
3538
3539         if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
3540                 MPI3MR_PAGE_SIZE_4K))
3541                 mrioc->max_sgl_entries = mrioc->facts.max_data_length /
3542                         MPI3MR_PAGE_SIZE_4K;
3543         sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
3544         ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
3545                         mrioc->max_sgl_entries, sz/1024);
3546
3547         mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3548             &mrioc->pdev->dev, sz, 16, 0);
3549         if (!mrioc->chain_buf_pool) {
3550                 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3551                 goto out_failed;
3552         }
3553
3554         for (i = 0; i < num_chains; i++) {
3555                 mrioc->chain_sgl_list[i].addr =
3556                     dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3557                     &mrioc->chain_sgl_list[i].dma_addr);
3558
3559                 if (!mrioc->chain_sgl_list[i].addr)
3560                         goto out_failed;
3561         }
3562         mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
3563         if (!mrioc->chain_bitmap)
3564                 goto out_failed;
3565         return retval;
3566 out_failed:
3567         retval = -1;
3568         return retval;
3569 }
3570
3571 /**
3572  * mpi3mr_port_enable_complete - Mark port enable complete
3573  * @mrioc: Adapter instance reference
3574  * @drv_cmd: Internal command tracker
3575  *
3576  * Call back for asynchronous port enable request sets the
3577  * driver command to indicate port enable request is complete.
3578  *
3579  * Return: Nothing
3580  */
3581 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3582         struct mpi3mr_drv_cmd *drv_cmd)
3583 {
3584         drv_cmd->callback = NULL;
3585         mrioc->scan_started = 0;
3586         if (drv_cmd->state & MPI3MR_CMD_RESET)
3587                 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3588         else
3589                 mrioc->scan_failed = drv_cmd->ioc_status;
3590         drv_cmd->state = MPI3MR_CMD_NOTUSED;
3591 }
3592
3593 /**
3594  * mpi3mr_issue_port_enable - Issue Port Enable
3595  * @mrioc: Adapter instance reference
3596  * @async: Flag to wait for completion or not
3597  *
3598  * Issue Port Enable MPI request through admin queue and if the
3599  * async flag is not set wait for the completion of the port
3600  * enable or time out.
3601  *
3602  * Return: 0 on success, non-zero on failures.
3603  */
3604 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3605 {
3606         struct mpi3_port_enable_request pe_req;
3607         int retval = 0;
3608         u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3609
3610         memset(&pe_req, 0, sizeof(pe_req));
3611         mutex_lock(&mrioc->init_cmds.mutex);
3612         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3613                 retval = -1;
3614                 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3615                 mutex_unlock(&mrioc->init_cmds.mutex);
3616                 goto out;
3617         }
3618         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3619         if (async) {
3620                 mrioc->init_cmds.is_waiting = 0;
3621                 mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3622         } else {
3623                 mrioc->init_cmds.is_waiting = 1;
3624                 mrioc->init_cmds.callback = NULL;
3625                 init_completion(&mrioc->init_cmds.done);
3626         }
3627         pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3628         pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3629
3630         retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3631         if (retval) {
3632                 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3633                 goto out_unlock;
3634         }
3635         if (async) {
3636                 mutex_unlock(&mrioc->init_cmds.mutex);
3637                 goto out;
3638         }
3639
3640         wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3641         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3642                 ioc_err(mrioc, "port enable timed out\n");
3643                 retval = -1;
3644                 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3645                 goto out_unlock;
3646         }
3647         mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3648
3649 out_unlock:
3650         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3651         mutex_unlock(&mrioc->init_cmds.mutex);
3652 out:
3653         return retval;
3654 }
3655
3656 /* Protocol type to name mapper structure */
3657 static const struct {
3658         u8 protocol;
3659         char *name;
3660 } mpi3mr_protocols[] = {
3661         { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3662         { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3663         { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3664 };
3665
3666 /* Capability to name mapper structure*/
3667 static const struct {
3668         u32 capability;
3669         char *name;
3670 } mpi3mr_capabilities[] = {
3671         { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
3672         { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED, "MultiPath" },
3673 };
3674
3675 /**
3676  * mpi3mr_print_ioc_info - Display controller information
3677  * @mrioc: Adapter instance reference
3678  *
3679  * Display controller personalit, capability, supported
3680  * protocols etc.
3681  *
3682  * Return: Nothing
3683  */
3684 static void
3685 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
3686 {
3687         int i = 0, bytes_written = 0;
3688         char personality[16];
3689         char protocol[50] = {0};
3690         char capabilities[100] = {0};
3691         struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
3692
3693         switch (mrioc->facts.personality) {
3694         case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
3695                 strncpy(personality, "Enhanced HBA", sizeof(personality));
3696                 break;
3697         case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
3698                 strncpy(personality, "RAID", sizeof(personality));
3699                 break;
3700         default:
3701                 strncpy(personality, "Unknown", sizeof(personality));
3702                 break;
3703         }
3704
3705         ioc_info(mrioc, "Running in %s Personality", personality);
3706
3707         ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
3708             fwver->gen_major, fwver->gen_minor, fwver->ph_major,
3709             fwver->ph_minor, fwver->cust_id, fwver->build_num);
3710
3711         for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
3712                 if (mrioc->facts.protocol_flags &
3713                     mpi3mr_protocols[i].protocol) {
3714                         bytes_written += scnprintf(protocol + bytes_written,
3715                                     sizeof(protocol) - bytes_written, "%s%s",
3716                                     bytes_written ? "," : "",
3717                                     mpi3mr_protocols[i].name);
3718                 }
3719         }
3720
3721         bytes_written = 0;
3722         for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
3723                 if (mrioc->facts.protocol_flags &
3724                     mpi3mr_capabilities[i].capability) {
3725                         bytes_written += scnprintf(capabilities + bytes_written,
3726                                     sizeof(capabilities) - bytes_written, "%s%s",
3727                                     bytes_written ? "," : "",
3728                                     mpi3mr_capabilities[i].name);
3729                 }
3730         }
3731
3732         ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
3733                  protocol, capabilities);
3734 }
3735
3736 /**
3737  * mpi3mr_cleanup_resources - Free PCI resources
3738  * @mrioc: Adapter instance reference
3739  *
3740  * Unmap PCI device memory and disable PCI device.
3741  *
3742  * Return: 0 on success and non-zero on failure.
3743  */
3744 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
3745 {
3746         struct pci_dev *pdev = mrioc->pdev;
3747
3748         mpi3mr_cleanup_isr(mrioc);
3749
3750         if (mrioc->sysif_regs) {
3751                 iounmap((void __iomem *)mrioc->sysif_regs);
3752                 mrioc->sysif_regs = NULL;
3753         }
3754
3755         if (pci_is_enabled(pdev)) {
3756                 if (mrioc->bars)
3757                         pci_release_selected_regions(pdev, mrioc->bars);
3758                 pci_disable_device(pdev);
3759         }
3760 }
3761
3762 /**
3763  * mpi3mr_setup_resources - Enable PCI resources
3764  * @mrioc: Adapter instance reference
3765  *
3766  * Enable PCI device memory, MSI-x registers and set DMA mask.
3767  *
3768  * Return: 0 on success and non-zero on failure.
3769  */
3770 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
3771 {
3772         struct pci_dev *pdev = mrioc->pdev;
3773         u32 memap_sz = 0;
3774         int i, retval = 0, capb = 0;
3775         u16 message_control;
3776         u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
3777             ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
3778
3779         if (pci_enable_device_mem(pdev)) {
3780                 ioc_err(mrioc, "pci_enable_device_mem: failed\n");
3781                 retval = -ENODEV;
3782                 goto out_failed;
3783         }
3784
3785         capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3786         if (!capb) {
3787                 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
3788                 retval = -ENODEV;
3789                 goto out_failed;
3790         }
3791         mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3792
3793         if (pci_request_selected_regions(pdev, mrioc->bars,
3794             mrioc->driver_name)) {
3795                 ioc_err(mrioc, "pci_request_selected_regions: failed\n");
3796                 retval = -ENODEV;
3797                 goto out_failed;
3798         }
3799
3800         for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
3801                 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3802                         mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
3803                         memap_sz = pci_resource_len(pdev, i);
3804                         mrioc->sysif_regs =
3805                             ioremap(mrioc->sysif_regs_phys, memap_sz);
3806                         break;
3807                 }
3808         }
3809
3810         pci_set_master(pdev);
3811
3812         retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
3813         if (retval) {
3814                 if (dma_mask != DMA_BIT_MASK(32)) {
3815                         ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
3816                         dma_mask = DMA_BIT_MASK(32);
3817                         retval = dma_set_mask_and_coherent(&pdev->dev,
3818                             dma_mask);
3819                 }
3820                 if (retval) {
3821                         mrioc->dma_mask = 0;
3822                         ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
3823                         goto out_failed;
3824                 }
3825         }
3826         mrioc->dma_mask = dma_mask;
3827
3828         if (!mrioc->sysif_regs) {
3829                 ioc_err(mrioc,
3830                     "Unable to map adapter memory or resource not found\n");
3831                 retval = -EINVAL;
3832                 goto out_failed;
3833         }
3834
3835         pci_read_config_word(pdev, capb + 2, &message_control);
3836         mrioc->msix_count = (message_control & 0x3FF) + 1;
3837
3838         pci_save_state(pdev);
3839
3840         pci_set_drvdata(pdev, mrioc->shost);
3841
3842         mpi3mr_ioc_disable_intr(mrioc);
3843
3844         ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
3845             (unsigned long long)mrioc->sysif_regs_phys,
3846             mrioc->sysif_regs, memap_sz);
3847         ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
3848             mrioc->msix_count);
3849
3850         if (!reset_devices && poll_queues > 0)
3851                 mrioc->requested_poll_qcount = min_t(int, poll_queues,
3852                                 mrioc->msix_count - 2);
3853         return retval;
3854
3855 out_failed:
3856         mpi3mr_cleanup_resources(mrioc);
3857         return retval;
3858 }
3859
3860 /**
3861  * mpi3mr_enable_events - Enable required events
3862  * @mrioc: Adapter instance reference
3863  *
3864  * This routine unmasks the events required by the driver by
3865  * sennding appropriate event mask bitmapt through an event
3866  * notification request.
3867  *
3868  * Return: 0 on success and non-zero on failure.
3869  */
3870 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
3871 {
3872         int retval = 0;
3873         u32  i;
3874
3875         for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3876                 mrioc->event_masks[i] = -1;
3877
3878         mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
3879         mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
3880         mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
3881         mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
3882         mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
3883         mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
3884         mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
3885         mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
3886         mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
3887         mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
3888         mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
3889         mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
3890         mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
3891         mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
3892
3893         retval = mpi3mr_issue_event_notification(mrioc);
3894         if (retval)
3895                 ioc_err(mrioc, "failed to issue event notification %d\n",
3896                     retval);
3897         return retval;
3898 }
3899
3900 /**
3901  * mpi3mr_init_ioc - Initialize the controller
3902  * @mrioc: Adapter instance reference
3903  *
3904  * This the controller initialization routine, executed either
3905  * after soft reset or from pci probe callback.
3906  * Setup the required resources, memory map the controller
3907  * registers, create admin and operational reply queue pairs,
3908  * allocate required memory for reply pool, sense buffer pool,
3909  * issue IOC init request to the firmware, unmask the events and
3910  * issue port enable to discover SAS/SATA/NVMe devies and RAID
3911  * volumes.
3912  *
3913  * Return: 0 on success and non-zero on failure.
3914  */
3915 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
3916 {
3917         int retval = 0;
3918         u8 retry = 0;
3919         struct mpi3_ioc_facts_data facts_data;
3920         u32 sz;
3921
3922 retry_init:
3923         retval = mpi3mr_bring_ioc_ready(mrioc);
3924         if (retval) {
3925                 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
3926                     retval);
3927                 goto out_failed_noretry;
3928         }
3929
3930         retval = mpi3mr_setup_isr(mrioc, 1);
3931         if (retval) {
3932                 ioc_err(mrioc, "Failed to setup ISR error %d\n",
3933                     retval);
3934                 goto out_failed_noretry;
3935         }
3936
3937         retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
3938         if (retval) {
3939                 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
3940                     retval);
3941                 goto out_failed;
3942         }
3943
3944         mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
3945         mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
3946         mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
3947         atomic_set(&mrioc->pend_large_data_sz, 0);
3948
3949         if (reset_devices)
3950                 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
3951                     MPI3MR_HOST_IOS_KDUMP);
3952
3953         if (!(mrioc->facts.ioc_capabilities &
3954             MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) {
3955                 mrioc->sas_transport_enabled = 1;
3956                 mrioc->scsi_device_channel = 1;
3957                 mrioc->shost->max_channel = 1;
3958                 mrioc->shost->transportt = mpi3mr_transport_template;
3959         }
3960
3961         mrioc->reply_sz = mrioc->facts.reply_sz;
3962
3963         retval = mpi3mr_check_reset_dma_mask(mrioc);
3964         if (retval) {
3965                 ioc_err(mrioc, "Resetting dma mask failed %d\n",
3966                     retval);
3967                 goto out_failed_noretry;
3968         }
3969
3970         mpi3mr_print_ioc_info(mrioc);
3971
3972         if (!mrioc->cfg_page) {
3973                 dprint_init(mrioc, "allocating config page buffers\n");
3974                 mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
3975                 mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
3976                     mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL);
3977                 if (!mrioc->cfg_page) {
3978                         retval = -1;
3979                         goto out_failed_noretry;
3980                 }
3981         }
3982
3983         dprint_init(mrioc, "allocating ioctl dma buffers\n");
3984         mpi3mr_alloc_ioctl_dma_memory(mrioc);
3985
3986         if (!mrioc->init_cmds.reply) {
3987                 retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
3988                 if (retval) {
3989                         ioc_err(mrioc,
3990                             "%s :Failed to allocated reply sense buffers %d\n",
3991                             __func__, retval);
3992                         goto out_failed_noretry;
3993                 }
3994         }
3995
3996         if (!mrioc->chain_sgl_list) {
3997                 retval = mpi3mr_alloc_chain_bufs(mrioc);
3998                 if (retval) {
3999                         ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
4000                             retval);
4001                         goto out_failed_noretry;
4002                 }
4003         }
4004
4005         retval = mpi3mr_issue_iocinit(mrioc);
4006         if (retval) {
4007                 ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
4008                     retval);
4009                 goto out_failed;
4010         }
4011
4012         retval = mpi3mr_print_pkg_ver(mrioc);
4013         if (retval) {
4014                 ioc_err(mrioc, "failed to get package version\n");
4015                 goto out_failed;
4016         }
4017
4018         retval = mpi3mr_setup_isr(mrioc, 0);
4019         if (retval) {
4020                 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
4021                     retval);
4022                 goto out_failed_noretry;
4023         }
4024
4025         retval = mpi3mr_create_op_queues(mrioc);
4026         if (retval) {
4027                 ioc_err(mrioc, "Failed to create OpQueues error %d\n",
4028                     retval);
4029                 goto out_failed;
4030         }
4031
4032         if (!mrioc->pel_seqnum_virt) {
4033                 dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
4034                 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4035                 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4036                     mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4037                     GFP_KERNEL);
4038                 if (!mrioc->pel_seqnum_virt) {
4039                         retval = -ENOMEM;
4040                         goto out_failed_noretry;
4041                 }
4042         }
4043
4044         if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
4045                 dprint_init(mrioc, "allocating memory for throttle groups\n");
4046                 sz = sizeof(struct mpi3mr_throttle_group_info);
4047                 mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
4048                 if (!mrioc->throttle_groups) {
4049                         retval = -1;
4050                         goto out_failed_noretry;
4051                 }
4052         }
4053
4054         retval = mpi3mr_enable_events(mrioc);
4055         if (retval) {
4056                 ioc_err(mrioc, "failed to enable events %d\n",
4057                     retval);
4058                 goto out_failed;
4059         }
4060
4061         ioc_info(mrioc, "controller initialization completed successfully\n");
4062         return retval;
4063 out_failed:
4064         if (retry < 2) {
4065                 retry++;
4066                 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
4067                     retry);
4068                 mpi3mr_memset_buffers(mrioc);
4069                 goto retry_init;
4070         }
4071         retval = -1;
4072 out_failed_noretry:
4073         ioc_err(mrioc, "controller initialization failed\n");
4074         mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4075             MPI3MR_RESET_FROM_CTLR_CLEANUP);
4076         mrioc->unrecoverable = 1;
4077         return retval;
4078 }
4079
4080 /**
4081  * mpi3mr_reinit_ioc - Re-Initialize the controller
4082  * @mrioc: Adapter instance reference
4083  * @is_resume: Called from resume or reset path
4084  *
4085  * This the controller re-initialization routine, executed from
4086  * the soft reset handler or resume callback. Creates
4087  * operational reply queue pairs, allocate required memory for
4088  * reply pool, sense buffer pool, issue IOC init request to the
4089  * firmware, unmask the events and issue port enable to discover
4090  * SAS/SATA/NVMe devices and RAID volumes.
4091  *
4092  * Return: 0 on success and non-zero on failure.
4093  */
4094 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
4095 {
4096         int retval = 0;
4097         u8 retry = 0;
4098         struct mpi3_ioc_facts_data facts_data;
4099         u32 pe_timeout, ioc_status;
4100
4101 retry_init:
4102         pe_timeout =
4103             (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
4104
4105         dprint_reset(mrioc, "bringing up the controller to ready state\n");
4106         retval = mpi3mr_bring_ioc_ready(mrioc);
4107         if (retval) {
4108                 ioc_err(mrioc, "failed to bring to ready state\n");
4109                 goto out_failed_noretry;
4110         }
4111
4112         if (is_resume) {
4113                 dprint_reset(mrioc, "setting up single ISR\n");
4114                 retval = mpi3mr_setup_isr(mrioc, 1);
4115                 if (retval) {
4116                         ioc_err(mrioc, "failed to setup ISR\n");
4117                         goto out_failed_noretry;
4118                 }
4119         } else
4120                 mpi3mr_ioc_enable_intr(mrioc);
4121
4122         dprint_reset(mrioc, "getting ioc_facts\n");
4123         retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4124         if (retval) {
4125                 ioc_err(mrioc, "failed to get ioc_facts\n");
4126                 goto out_failed;
4127         }
4128
4129         dprint_reset(mrioc, "validating ioc_facts\n");
4130         retval = mpi3mr_revalidate_factsdata(mrioc);
4131         if (retval) {
4132                 ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
4133                 goto out_failed_noretry;
4134         }
4135
4136         mpi3mr_print_ioc_info(mrioc);
4137
4138         dprint_reset(mrioc, "sending ioc_init\n");
4139         retval = mpi3mr_issue_iocinit(mrioc);
4140         if (retval) {
4141                 ioc_err(mrioc, "failed to send ioc_init\n");
4142                 goto out_failed;
4143         }
4144
4145         dprint_reset(mrioc, "getting package version\n");
4146         retval = mpi3mr_print_pkg_ver(mrioc);
4147         if (retval) {
4148                 ioc_err(mrioc, "failed to get package version\n");
4149                 goto out_failed;
4150         }
4151
4152         if (is_resume) {
4153                 dprint_reset(mrioc, "setting up multiple ISR\n");
4154                 retval = mpi3mr_setup_isr(mrioc, 0);
4155                 if (retval) {
4156                         ioc_err(mrioc, "failed to re-setup ISR\n");
4157                         goto out_failed_noretry;
4158                 }
4159         }
4160
4161         dprint_reset(mrioc, "creating operational queue pairs\n");
4162         retval = mpi3mr_create_op_queues(mrioc);
4163         if (retval) {
4164                 ioc_err(mrioc, "failed to create operational queue pairs\n");
4165                 goto out_failed;
4166         }
4167
4168         if (!mrioc->pel_seqnum_virt) {
4169                 dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
4170                 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4171                 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4172                     mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4173                     GFP_KERNEL);
4174                 if (!mrioc->pel_seqnum_virt) {
4175                         retval = -ENOMEM;
4176                         goto out_failed_noretry;
4177                 }
4178         }
4179
4180         if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
4181                 ioc_err(mrioc,
4182                     "cannot create minimum number of operational queues expected:%d created:%d\n",
4183                     mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
4184                 retval = -1;
4185                 goto out_failed_noretry;
4186         }
4187
4188         dprint_reset(mrioc, "enabling events\n");
4189         retval = mpi3mr_enable_events(mrioc);
4190         if (retval) {
4191                 ioc_err(mrioc, "failed to enable events\n");
4192                 goto out_failed;
4193         }
4194
4195         mrioc->device_refresh_on = 1;
4196         mpi3mr_add_event_wait_for_device_refresh(mrioc);
4197
4198         ioc_info(mrioc, "sending port enable\n");
4199         retval = mpi3mr_issue_port_enable(mrioc, 1);
4200         if (retval) {
4201                 ioc_err(mrioc, "failed to issue port enable\n");
4202                 goto out_failed;
4203         }
4204         do {
4205                 ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
4206                 if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
4207                         break;
4208                 if (!pci_device_is_present(mrioc->pdev))
4209                         mrioc->unrecoverable = 1;
4210                 if (mrioc->unrecoverable) {
4211                         retval = -1;
4212                         goto out_failed_noretry;
4213                 }
4214                 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4215                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4216                     (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4217                         mpi3mr_print_fault_info(mrioc);
4218                         mrioc->init_cmds.is_waiting = 0;
4219                         mrioc->init_cmds.callback = NULL;
4220                         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4221                         goto out_failed;
4222                 }
4223         } while (--pe_timeout);
4224
4225         if (!pe_timeout) {
4226                 ioc_err(mrioc, "port enable timed out\n");
4227                 mpi3mr_check_rh_fault_ioc(mrioc,
4228                     MPI3MR_RESET_FROM_PE_TIMEOUT);
4229                 mrioc->init_cmds.is_waiting = 0;
4230                 mrioc->init_cmds.callback = NULL;
4231                 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4232                 goto out_failed;
4233         } else if (mrioc->scan_failed) {
4234                 ioc_err(mrioc,
4235                     "port enable failed with status=0x%04x\n",
4236                     mrioc->scan_failed);
4237         } else
4238                 ioc_info(mrioc, "port enable completed successfully\n");
4239
4240         ioc_info(mrioc, "controller %s completed successfully\n",
4241             (is_resume)?"resume":"re-initialization");
4242         return retval;
4243 out_failed:
4244         if (retry < 2) {
4245                 retry++;
4246                 ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
4247                     (is_resume)?"resume":"re-initialization", retry);
4248                 mpi3mr_memset_buffers(mrioc);
4249                 goto retry_init;
4250         }
4251         retval = -1;
4252 out_failed_noretry:
4253         ioc_err(mrioc, "controller %s is failed\n",
4254             (is_resume)?"resume":"re-initialization");
4255         mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4256             MPI3MR_RESET_FROM_CTLR_CLEANUP);
4257         mrioc->unrecoverable = 1;
4258         return retval;
4259 }
4260
4261 /**
4262  * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
4263  *                                      segments
4264  * @mrioc: Adapter instance reference
4265  * @qidx: Operational reply queue index
4266  *
4267  * Return: Nothing.
4268  */
4269 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4270 {
4271         struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
4272         struct segments *segments;
4273         int i, size;
4274
4275         if (!op_reply_q->q_segments)
4276                 return;
4277
4278         size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
4279         segments = op_reply_q->q_segments;
4280         for (i = 0; i < op_reply_q->num_segments; i++)
4281                 memset(segments[i].segment, 0, size);
4282 }
4283
4284 /**
4285  * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
4286  *                                      segments
4287  * @mrioc: Adapter instance reference
4288  * @qidx: Operational request queue index
4289  *
4290  * Return: Nothing.
4291  */
4292 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4293 {
4294         struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4295         struct segments *segments;
4296         int i, size;
4297
4298         if (!op_req_q->q_segments)
4299                 return;
4300
4301         size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4302         segments = op_req_q->q_segments;
4303         for (i = 0; i < op_req_q->num_segments; i++)
4304                 memset(segments[i].segment, 0, size);
4305 }
4306
4307 /**
4308  * mpi3mr_memset_buffers - memset memory for a controller
4309  * @mrioc: Adapter instance reference
4310  *
4311  * clear all the memory allocated for a controller, typically
4312  * called post reset to reuse the memory allocated during the
4313  * controller init.
4314  *
4315  * Return: Nothing.
4316  */
4317 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4318 {
4319         u16 i;
4320         struct mpi3mr_throttle_group_info *tg;
4321
4322         mrioc->change_count = 0;
4323         mrioc->active_poll_qcount = 0;
4324         mrioc->default_qcount = 0;
4325         if (mrioc->admin_req_base)
4326                 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4327         if (mrioc->admin_reply_base)
4328                 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4329         atomic_set(&mrioc->admin_reply_q_in_use, 0);
4330
4331         if (mrioc->init_cmds.reply) {
4332                 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4333                 memset(mrioc->bsg_cmds.reply, 0,
4334                     sizeof(*mrioc->bsg_cmds.reply));
4335                 memset(mrioc->host_tm_cmds.reply, 0,
4336                     sizeof(*mrioc->host_tm_cmds.reply));
4337                 memset(mrioc->pel_cmds.reply, 0,
4338                     sizeof(*mrioc->pel_cmds.reply));
4339                 memset(mrioc->pel_abort_cmd.reply, 0,
4340                     sizeof(*mrioc->pel_abort_cmd.reply));
4341                 memset(mrioc->transport_cmds.reply, 0,
4342                     sizeof(*mrioc->transport_cmds.reply));
4343                 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4344                         memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4345                             sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4346                 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4347                         memset(mrioc->evtack_cmds[i].reply, 0,
4348                             sizeof(*mrioc->evtack_cmds[i].reply));
4349                 bitmap_clear(mrioc->removepend_bitmap, 0,
4350                              mrioc->dev_handle_bitmap_bits);
4351                 bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
4352                 bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
4353                              MPI3MR_NUM_EVTACKCMD);
4354         }
4355
4356         for (i = 0; i < mrioc->num_queues; i++) {
4357                 mrioc->op_reply_qinfo[i].qid = 0;
4358                 mrioc->op_reply_qinfo[i].ci = 0;
4359                 mrioc->op_reply_qinfo[i].num_replies = 0;
4360                 mrioc->op_reply_qinfo[i].ephase = 0;
4361                 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4362                 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4363                 mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4364
4365                 mrioc->req_qinfo[i].ci = 0;
4366                 mrioc->req_qinfo[i].pi = 0;
4367                 mrioc->req_qinfo[i].num_requests = 0;
4368                 mrioc->req_qinfo[i].qid = 0;
4369                 mrioc->req_qinfo[i].reply_qid = 0;
4370                 spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4371                 mpi3mr_memset_op_req_q_buffers(mrioc, i);
4372         }
4373
4374         atomic_set(&mrioc->pend_large_data_sz, 0);
4375         if (mrioc->throttle_groups) {
4376                 tg = mrioc->throttle_groups;
4377                 for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4378                         tg->id = 0;
4379                         tg->fw_qd = 0;
4380                         tg->modified_qd = 0;
4381                         tg->io_divert = 0;
4382                         tg->need_qd_reduction = 0;
4383                         tg->high = 0;
4384                         tg->low = 0;
4385                         tg->qd_reduction = 0;
4386                         atomic_set(&tg->pend_large_data_sz, 0);
4387                 }
4388         }
4389 }
4390
4391 /**
4392  * mpi3mr_free_mem - Free memory allocated for a controller
4393  * @mrioc: Adapter instance reference
4394  *
4395  * Free all the memory allocated for a controller.
4396  *
4397  * Return: Nothing.
4398  */
4399 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4400 {
4401         u16 i;
4402         struct mpi3mr_intr_info *intr_info;
4403
4404         mpi3mr_free_enclosure_list(mrioc);
4405         mpi3mr_free_ioctl_dma_memory(mrioc);
4406
4407         if (mrioc->sense_buf_pool) {
4408                 if (mrioc->sense_buf)
4409                         dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4410                             mrioc->sense_buf_dma);
4411                 dma_pool_destroy(mrioc->sense_buf_pool);
4412                 mrioc->sense_buf = NULL;
4413                 mrioc->sense_buf_pool = NULL;
4414         }
4415         if (mrioc->sense_buf_q_pool) {
4416                 if (mrioc->sense_buf_q)
4417                         dma_pool_free(mrioc->sense_buf_q_pool,
4418                             mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4419                 dma_pool_destroy(mrioc->sense_buf_q_pool);
4420                 mrioc->sense_buf_q = NULL;
4421                 mrioc->sense_buf_q_pool = NULL;
4422         }
4423
4424         if (mrioc->reply_buf_pool) {
4425                 if (mrioc->reply_buf)
4426                         dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4427                             mrioc->reply_buf_dma);
4428                 dma_pool_destroy(mrioc->reply_buf_pool);
4429                 mrioc->reply_buf = NULL;
4430                 mrioc->reply_buf_pool = NULL;
4431         }
4432         if (mrioc->reply_free_q_pool) {
4433                 if (mrioc->reply_free_q)
4434                         dma_pool_free(mrioc->reply_free_q_pool,
4435                             mrioc->reply_free_q, mrioc->reply_free_q_dma);
4436                 dma_pool_destroy(mrioc->reply_free_q_pool);
4437                 mrioc->reply_free_q = NULL;
4438                 mrioc->reply_free_q_pool = NULL;
4439         }
4440
4441         for (i = 0; i < mrioc->num_op_req_q; i++)
4442                 mpi3mr_free_op_req_q_segments(mrioc, i);
4443
4444         for (i = 0; i < mrioc->num_op_reply_q; i++)
4445                 mpi3mr_free_op_reply_q_segments(mrioc, i);
4446
4447         for (i = 0; i < mrioc->intr_info_count; i++) {
4448                 intr_info = mrioc->intr_info + i;
4449                 intr_info->op_reply_q = NULL;
4450         }
4451
4452         kfree(mrioc->req_qinfo);
4453         mrioc->req_qinfo = NULL;
4454         mrioc->num_op_req_q = 0;
4455
4456         kfree(mrioc->op_reply_qinfo);
4457         mrioc->op_reply_qinfo = NULL;
4458         mrioc->num_op_reply_q = 0;
4459
4460         kfree(mrioc->init_cmds.reply);
4461         mrioc->init_cmds.reply = NULL;
4462
4463         kfree(mrioc->bsg_cmds.reply);
4464         mrioc->bsg_cmds.reply = NULL;
4465
4466         kfree(mrioc->host_tm_cmds.reply);
4467         mrioc->host_tm_cmds.reply = NULL;
4468
4469         kfree(mrioc->pel_cmds.reply);
4470         mrioc->pel_cmds.reply = NULL;
4471
4472         kfree(mrioc->pel_abort_cmd.reply);
4473         mrioc->pel_abort_cmd.reply = NULL;
4474
4475         for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4476                 kfree(mrioc->evtack_cmds[i].reply);
4477                 mrioc->evtack_cmds[i].reply = NULL;
4478         }
4479
4480         bitmap_free(mrioc->removepend_bitmap);
4481         mrioc->removepend_bitmap = NULL;
4482
4483         bitmap_free(mrioc->devrem_bitmap);
4484         mrioc->devrem_bitmap = NULL;
4485
4486         bitmap_free(mrioc->evtack_cmds_bitmap);
4487         mrioc->evtack_cmds_bitmap = NULL;
4488
4489         bitmap_free(mrioc->chain_bitmap);
4490         mrioc->chain_bitmap = NULL;
4491
4492         kfree(mrioc->transport_cmds.reply);
4493         mrioc->transport_cmds.reply = NULL;
4494
4495         for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4496                 kfree(mrioc->dev_rmhs_cmds[i].reply);
4497                 mrioc->dev_rmhs_cmds[i].reply = NULL;
4498         }
4499
4500         if (mrioc->chain_buf_pool) {
4501                 for (i = 0; i < mrioc->chain_buf_count; i++) {
4502                         if (mrioc->chain_sgl_list[i].addr) {
4503                                 dma_pool_free(mrioc->chain_buf_pool,
4504                                     mrioc->chain_sgl_list[i].addr,
4505                                     mrioc->chain_sgl_list[i].dma_addr);
4506                                 mrioc->chain_sgl_list[i].addr = NULL;
4507                         }
4508                 }
4509                 dma_pool_destroy(mrioc->chain_buf_pool);
4510                 mrioc->chain_buf_pool = NULL;
4511         }
4512
4513         kfree(mrioc->chain_sgl_list);
4514         mrioc->chain_sgl_list = NULL;
4515
4516         if (mrioc->admin_reply_base) {
4517                 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
4518                     mrioc->admin_reply_base, mrioc->admin_reply_dma);
4519                 mrioc->admin_reply_base = NULL;
4520         }
4521         if (mrioc->admin_req_base) {
4522                 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
4523                     mrioc->admin_req_base, mrioc->admin_req_dma);
4524                 mrioc->admin_req_base = NULL;
4525         }
4526         if (mrioc->cfg_page) {
4527                 dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz,
4528                     mrioc->cfg_page, mrioc->cfg_page_dma);
4529                 mrioc->cfg_page = NULL;
4530         }
4531         if (mrioc->pel_seqnum_virt) {
4532                 dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
4533                     mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
4534                 mrioc->pel_seqnum_virt = NULL;
4535         }
4536
4537         kfree(mrioc->throttle_groups);
4538         mrioc->throttle_groups = NULL;
4539
4540         kfree(mrioc->logdata_buf);
4541         mrioc->logdata_buf = NULL;
4542
4543 }
4544
4545 /**
4546  * mpi3mr_issue_ioc_shutdown - shutdown controller
4547  * @mrioc: Adapter instance reference
4548  *
4549  * Send shutodwn notification to the controller and wait for the
4550  * shutdown_timeout for it to be completed.
4551  *
4552  * Return: Nothing.
4553  */
4554 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
4555 {
4556         u32 ioc_config, ioc_status;
4557         u8 retval = 1;
4558         u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
4559
4560         ioc_info(mrioc, "Issuing shutdown Notification\n");
4561         if (mrioc->unrecoverable) {
4562                 ioc_warn(mrioc,
4563                     "IOC is unrecoverable shutdown is not issued\n");
4564                 return;
4565         }
4566         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4567         if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4568             == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
4569                 ioc_info(mrioc, "shutdown already in progress\n");
4570                 return;
4571         }
4572
4573         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4574         ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
4575         ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
4576
4577         writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
4578
4579         if (mrioc->facts.shutdown_timeout)
4580                 timeout = mrioc->facts.shutdown_timeout * 10;
4581
4582         do {
4583                 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4584                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4585                     == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
4586                         retval = 0;
4587                         break;
4588                 }
4589                 msleep(100);
4590         } while (--timeout);
4591
4592         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4593         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4594
4595         if (retval) {
4596                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4597                     == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
4598                         ioc_warn(mrioc,
4599                             "shutdown still in progress after timeout\n");
4600         }
4601
4602         ioc_info(mrioc,
4603             "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
4604             (!retval) ? "successful" : "failed", ioc_status,
4605             ioc_config);
4606 }
4607
4608 /**
4609  * mpi3mr_cleanup_ioc - Cleanup controller
4610  * @mrioc: Adapter instance reference
4611  *
4612  * controller cleanup handler, Message unit reset or soft reset
4613  * and shutdown notification is issued to the controller.
4614  *
4615  * Return: Nothing.
4616  */
4617 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
4618 {
4619         enum mpi3mr_iocstate ioc_state;
4620
4621         dprint_exit(mrioc, "cleaning up the controller\n");
4622         mpi3mr_ioc_disable_intr(mrioc);
4623
4624         ioc_state = mpi3mr_get_iocstate(mrioc);
4625
4626         if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) &&
4627             (ioc_state == MRIOC_STATE_READY)) {
4628                 if (mpi3mr_issue_and_process_mur(mrioc,
4629                     MPI3MR_RESET_FROM_CTLR_CLEANUP))
4630                         mpi3mr_issue_reset(mrioc,
4631                             MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
4632                             MPI3MR_RESET_FROM_MUR_FAILURE);
4633                 mpi3mr_issue_ioc_shutdown(mrioc);
4634         }
4635         dprint_exit(mrioc, "controller cleanup completed\n");
4636 }
4637
4638 /**
4639  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
4640  * @mrioc: Adapter instance reference
4641  * @cmdptr: Internal command tracker
4642  *
4643  * Complete an internal driver commands with state indicating it
4644  * is completed due to reset.
4645  *
4646  * Return: Nothing.
4647  */
4648 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
4649         struct mpi3mr_drv_cmd *cmdptr)
4650 {
4651         if (cmdptr->state & MPI3MR_CMD_PENDING) {
4652                 cmdptr->state |= MPI3MR_CMD_RESET;
4653                 cmdptr->state &= ~MPI3MR_CMD_PENDING;
4654                 if (cmdptr->is_waiting) {
4655                         complete(&cmdptr->done);
4656                         cmdptr->is_waiting = 0;
4657                 } else if (cmdptr->callback)
4658                         cmdptr->callback(mrioc, cmdptr);
4659         }
4660 }
4661
4662 /**
4663  * mpi3mr_flush_drv_cmds - Flush internaldriver commands
4664  * @mrioc: Adapter instance reference
4665  *
4666  * Flush all internal driver commands post reset
4667  *
4668  * Return: Nothing.
4669  */
4670 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
4671 {
4672         struct mpi3mr_drv_cmd *cmdptr;
4673         u8 i;
4674
4675         cmdptr = &mrioc->init_cmds;
4676         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4677
4678         cmdptr = &mrioc->cfg_cmds;
4679         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4680
4681         cmdptr = &mrioc->bsg_cmds;
4682         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4683         cmdptr = &mrioc->host_tm_cmds;
4684         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4685
4686         for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4687                 cmdptr = &mrioc->dev_rmhs_cmds[i];
4688                 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4689         }
4690
4691         for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4692                 cmdptr = &mrioc->evtack_cmds[i];
4693                 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4694         }
4695
4696         cmdptr = &mrioc->pel_cmds;
4697         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4698
4699         cmdptr = &mrioc->pel_abort_cmd;
4700         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4701
4702         cmdptr = &mrioc->transport_cmds;
4703         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4704 }
4705
4706 /**
4707  * mpi3mr_pel_wait_post - Issue PEL Wait
4708  * @mrioc: Adapter instance reference
4709  * @drv_cmd: Internal command tracker
4710  *
4711  * Issue PEL Wait MPI request through admin queue and return.
4712  *
4713  * Return: Nothing.
4714  */
4715 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
4716         struct mpi3mr_drv_cmd *drv_cmd)
4717 {
4718         struct mpi3_pel_req_action_wait pel_wait;
4719
4720         mrioc->pel_abort_requested = false;
4721
4722         memset(&pel_wait, 0, sizeof(pel_wait));
4723         drv_cmd->state = MPI3MR_CMD_PENDING;
4724         drv_cmd->is_waiting = 0;
4725         drv_cmd->callback = mpi3mr_pel_wait_complete;
4726         drv_cmd->ioc_status = 0;
4727         drv_cmd->ioc_loginfo = 0;
4728         pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
4729         pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
4730         pel_wait.action = MPI3_PEL_ACTION_WAIT;
4731         pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
4732         pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
4733         pel_wait.class = cpu_to_le16(mrioc->pel_class);
4734         pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
4735         dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
4736             mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
4737
4738         if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
4739                 dprint_bsg_err(mrioc,
4740                             "Issuing PELWait: Admin post failed\n");
4741                 drv_cmd->state = MPI3MR_CMD_NOTUSED;
4742                 drv_cmd->callback = NULL;
4743                 drv_cmd->retry_count = 0;
4744                 mrioc->pel_enabled = false;
4745         }
4746 }
4747
4748 /**
4749  * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
4750  * @mrioc: Adapter instance reference
4751  * @drv_cmd: Internal command tracker
4752  *
4753  * Issue PEL get sequence number MPI request through admin queue
4754  * and return.
4755  *
4756  * Return: 0 on success, non-zero on failure.
4757  */
4758 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
4759         struct mpi3mr_drv_cmd *drv_cmd)
4760 {
4761         struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
4762         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
4763         int retval = 0;
4764
4765         memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
4766         mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
4767         mrioc->pel_cmds.is_waiting = 0;
4768         mrioc->pel_cmds.ioc_status = 0;
4769         mrioc->pel_cmds.ioc_loginfo = 0;
4770         mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
4771         pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
4772         pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
4773         pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
4774         mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
4775             mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
4776
4777         retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
4778                         sizeof(pel_getseq_req), 0);
4779         if (retval) {
4780                 if (drv_cmd) {
4781                         drv_cmd->state = MPI3MR_CMD_NOTUSED;
4782                         drv_cmd->callback = NULL;
4783                         drv_cmd->retry_count = 0;
4784                 }
4785                 mrioc->pel_enabled = false;
4786         }
4787
4788         return retval;
4789 }
4790
4791 /**
4792  * mpi3mr_pel_wait_complete - PELWait Completion callback
4793  * @mrioc: Adapter instance reference
4794  * @drv_cmd: Internal command tracker
4795  *
4796  * This is a callback handler for the PELWait request and
4797  * firmware completes a PELWait request when it is aborted or a
4798  * new PEL entry is available. This sends AEN to the application
4799  * and if the PELwait completion is not due to PELAbort then
4800  * this will send a request for new PEL Sequence number
4801  *
4802  * Return: Nothing.
4803  */
4804 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
4805         struct mpi3mr_drv_cmd *drv_cmd)
4806 {
4807         struct mpi3_pel_reply *pel_reply = NULL;
4808         u16 ioc_status, pe_log_status;
4809         bool do_retry = false;
4810
4811         if (drv_cmd->state & MPI3MR_CMD_RESET)
4812                 goto cleanup_drv_cmd;
4813
4814         ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
4815         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
4816                 ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
4817                         __func__, ioc_status, drv_cmd->ioc_loginfo);
4818                 dprint_bsg_err(mrioc,
4819                     "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
4820                     ioc_status, drv_cmd->ioc_loginfo);
4821                 do_retry = true;
4822         }
4823
4824         if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
4825                 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
4826
4827         if (!pel_reply) {
4828                 dprint_bsg_err(mrioc,
4829                     "pel_wait: failed due to no reply\n");
4830                 goto out_failed;
4831         }
4832
4833         pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
4834         if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
4835             (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
4836                 ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
4837                         __func__, pe_log_status);
4838                 dprint_bsg_err(mrioc,
4839                     "pel_wait: failed due to pel_log_status(0x%04x)\n",
4840                     pe_log_status);
4841                 do_retry = true;
4842         }
4843
4844         if (do_retry) {
4845                 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
4846                         drv_cmd->retry_count++;
4847                         dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
4848                             drv_cmd->retry_count);
4849                         mpi3mr_pel_wait_post(mrioc, drv_cmd);
4850                         return;
4851                 }
4852                 dprint_bsg_err(mrioc,
4853                     "pel_wait: failed after all retries(%d)\n",
4854                     drv_cmd->retry_count);
4855                 goto out_failed;
4856         }
4857         atomic64_inc(&event_counter);
4858         if (!mrioc->pel_abort_requested) {
4859                 mrioc->pel_cmds.retry_count = 0;
4860                 mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
4861         }
4862
4863         return;
4864 out_failed:
4865         mrioc->pel_enabled = false;
4866 cleanup_drv_cmd:
4867         drv_cmd->state = MPI3MR_CMD_NOTUSED;
4868         drv_cmd->callback = NULL;
4869         drv_cmd->retry_count = 0;
4870 }
4871
4872 /**
4873  * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
4874  * @mrioc: Adapter instance reference
4875  * @drv_cmd: Internal command tracker
4876  *
4877  * This is a callback handler for the PEL get sequence number
4878  * request and a new PEL wait request will be issued to the
4879  * firmware from this
4880  *
4881  * Return: Nothing.
4882  */
4883 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
4884         struct mpi3mr_drv_cmd *drv_cmd)
4885 {
4886         struct mpi3_pel_reply *pel_reply = NULL;
4887         struct mpi3_pel_seq *pel_seqnum_virt;
4888         u16 ioc_status;
4889         bool do_retry = false;
4890
4891         pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
4892
4893         if (drv_cmd->state & MPI3MR_CMD_RESET)
4894                 goto cleanup_drv_cmd;
4895
4896         ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
4897         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
4898                 dprint_bsg_err(mrioc,
4899                     "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
4900                     ioc_status, drv_cmd->ioc_loginfo);
4901                 do_retry = true;
4902         }
4903
4904         if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
4905                 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
4906         if (!pel_reply) {
4907                 dprint_bsg_err(mrioc,
4908                     "pel_get_seqnum: failed due to no reply\n");
4909                 goto out_failed;
4910         }
4911
4912         if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
4913                 dprint_bsg_err(mrioc,
4914                     "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
4915                     le16_to_cpu(pel_reply->pe_log_status));
4916                 do_retry = true;
4917         }
4918
4919         if (do_retry) {
4920                 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
4921                         drv_cmd->retry_count++;
4922                         dprint_bsg_err(mrioc,
4923                             "pel_get_seqnum: retrying(%d)\n",
4924                             drv_cmd->retry_count);
4925                         mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
4926                         return;
4927                 }
4928
4929                 dprint_bsg_err(mrioc,
4930                     "pel_get_seqnum: failed after all retries(%d)\n",
4931                     drv_cmd->retry_count);
4932                 goto out_failed;
4933         }
4934         mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
4935         drv_cmd->retry_count = 0;
4936         mpi3mr_pel_wait_post(mrioc, drv_cmd);
4937
4938         return;
4939 out_failed:
4940         mrioc->pel_enabled = false;
4941 cleanup_drv_cmd:
4942         drv_cmd->state = MPI3MR_CMD_NOTUSED;
4943         drv_cmd->callback = NULL;
4944         drv_cmd->retry_count = 0;
4945 }
4946
4947 /**
4948  * mpi3mr_soft_reset_handler - Reset the controller
4949  * @mrioc: Adapter instance reference
4950  * @reset_reason: Reset reason code
4951  * @snapdump: Flag to generate snapdump in firmware or not
4952  *
4953  * This is an handler for recovering controller by issuing soft
4954  * reset are diag fault reset.  This is a blocking function and
4955  * when one reset is executed if any other resets they will be
4956  * blocked. All BSG requests will be blocked during the reset. If
4957  * controller reset is successful then the controller will be
4958  * reinitalized, otherwise the controller will be marked as not
4959  * recoverable
4960  *
4961  * In snapdump bit is set, the controller is issued with diag
4962  * fault reset so that the firmware can create a snap dump and
4963  * post that the firmware will result in F000 fault and the
4964  * driver will issue soft reset to recover from that.
4965  *
4966  * Return: 0 on success, non-zero on failure.
4967  */
4968 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
4969         u32 reset_reason, u8 snapdump)
4970 {
4971         int retval = 0, i;
4972         unsigned long flags;
4973         u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
4974
4975         /* Block the reset handler until diag save in progress*/
4976         dprint_reset(mrioc,
4977             "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
4978             mrioc->diagsave_timeout);
4979         while (mrioc->diagsave_timeout)
4980                 ssleep(1);
4981         /*
4982          * Block new resets until the currently executing one is finished and
4983          * return the status of the existing reset for all blocked resets
4984          */
4985         dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
4986         if (!mutex_trylock(&mrioc->reset_mutex)) {
4987                 ioc_info(mrioc,
4988                     "controller reset triggered by %s is blocked due to another reset in progress\n",
4989                     mpi3mr_reset_rc_name(reset_reason));
4990                 do {
4991                         ssleep(1);
4992                 } while (mrioc->reset_in_progress == 1);
4993                 ioc_info(mrioc,
4994                     "returning previous reset result(%d) for the reset triggered by %s\n",
4995                     mrioc->prev_reset_result,
4996                     mpi3mr_reset_rc_name(reset_reason));
4997                 return mrioc->prev_reset_result;
4998         }
4999         ioc_info(mrioc, "controller reset is triggered by %s\n",
5000             mpi3mr_reset_rc_name(reset_reason));
5001
5002         mrioc->device_refresh_on = 0;
5003         mrioc->reset_in_progress = 1;
5004         mrioc->stop_bsgs = 1;
5005         mrioc->prev_reset_result = -1;
5006
5007         if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5008             (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5009             (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5010                 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5011                         mrioc->event_masks[i] = -1;
5012
5013                 dprint_reset(mrioc, "soft_reset_handler: masking events\n");
5014                 mpi3mr_issue_event_notification(mrioc);
5015         }
5016
5017         mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
5018
5019         mpi3mr_ioc_disable_intr(mrioc);
5020
5021         if (snapdump) {
5022                 mpi3mr_set_diagsave(mrioc);
5023                 retval = mpi3mr_issue_reset(mrioc,
5024                     MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5025                 if (!retval) {
5026                         do {
5027                                 host_diagnostic =
5028                                     readl(&mrioc->sysif_regs->host_diagnostic);
5029                                 if (!(host_diagnostic &
5030                                     MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
5031                                         break;
5032                                 msleep(100);
5033                         } while (--timeout);
5034                 }
5035         }
5036
5037         retval = mpi3mr_issue_reset(mrioc,
5038             MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5039         if (retval) {
5040                 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
5041                 goto out;
5042         }
5043         if (mrioc->num_io_throttle_group !=
5044             mrioc->facts.max_io_throttle_group) {
5045                 ioc_err(mrioc,
5046                     "max io throttle group doesn't match old(%d), new(%d)\n",
5047                     mrioc->num_io_throttle_group,
5048                     mrioc->facts.max_io_throttle_group);
5049                 retval = -EPERM;
5050                 goto out;
5051         }
5052
5053         mpi3mr_flush_delayed_cmd_lists(mrioc);
5054         mpi3mr_flush_drv_cmds(mrioc);
5055         bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
5056         bitmap_clear(mrioc->removepend_bitmap, 0,
5057                      mrioc->dev_handle_bitmap_bits);
5058         bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
5059         mpi3mr_flush_host_io(mrioc);
5060         mpi3mr_cleanup_fwevt_list(mrioc);
5061         mpi3mr_invalidate_devhandles(mrioc);
5062         mpi3mr_free_enclosure_list(mrioc);
5063
5064         if (mrioc->prepare_for_reset) {
5065                 mrioc->prepare_for_reset = 0;
5066                 mrioc->prepare_for_reset_timeout_counter = 0;
5067         }
5068         mpi3mr_memset_buffers(mrioc);
5069         retval = mpi3mr_reinit_ioc(mrioc, 0);
5070         if (retval) {
5071                 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
5072                     mrioc->name, reset_reason);
5073                 goto out;
5074         }
5075         ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5076
5077 out:
5078         if (!retval) {
5079                 mrioc->diagsave_timeout = 0;
5080                 mrioc->reset_in_progress = 0;
5081                 mrioc->pel_abort_requested = 0;
5082                 if (mrioc->pel_enabled) {
5083                         mrioc->pel_cmds.retry_count = 0;
5084                         mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
5085                 }
5086
5087                 mrioc->device_refresh_on = 0;
5088
5089                 mrioc->ts_update_counter = 0;
5090                 spin_lock_irqsave(&mrioc->watchdog_lock, flags);
5091                 if (mrioc->watchdog_work_q)
5092                         queue_delayed_work(mrioc->watchdog_work_q,
5093                             &mrioc->watchdog_work,
5094                             msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
5095                 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
5096                 mrioc->stop_bsgs = 0;
5097                 if (mrioc->pel_enabled)
5098                         atomic64_inc(&event_counter);
5099         } else {
5100                 mpi3mr_issue_reset(mrioc,
5101                     MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5102                 mrioc->device_refresh_on = 0;
5103                 mrioc->unrecoverable = 1;
5104                 mrioc->reset_in_progress = 0;
5105                 retval = -1;
5106                 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5107         }
5108         mrioc->prev_reset_result = retval;
5109         mutex_unlock(&mrioc->reset_mutex);
5110         ioc_info(mrioc, "controller reset is %s\n",
5111             ((retval == 0) ? "successful" : "failed"));
5112         return retval;
5113 }
5114
5115
5116 /**
5117  * mpi3mr_free_config_dma_memory - free memory for config page
5118  * @mrioc: Adapter instance reference
5119  * @mem_desc: memory descriptor structure
5120  *
5121  * Check whether the size of the buffer specified by the memory
5122  * descriptor is greater than the default page size if so then
5123  * free the memory pointed by the descriptor.
5124  *
5125  * Return: Nothing.
5126  */
5127 static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc,
5128         struct dma_memory_desc *mem_desc)
5129 {
5130         if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) {
5131                 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
5132                     mem_desc->addr, mem_desc->dma_addr);
5133                 mem_desc->addr = NULL;
5134         }
5135 }
5136
5137 /**
5138  * mpi3mr_alloc_config_dma_memory - Alloc memory for config page
5139  * @mrioc: Adapter instance reference
5140  * @mem_desc: Memory descriptor to hold dma memory info
5141  *
5142  * This function allocates new dmaable memory or provides the
5143  * default config page dmaable memory based on the memory size
5144  * described by the descriptor.
5145  *
5146  * Return: 0 on success, non-zero on failure.
5147  */
5148 static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc,
5149         struct dma_memory_desc *mem_desc)
5150 {
5151         if (mem_desc->size > mrioc->cfg_page_sz) {
5152                 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
5153                     mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL);
5154                 if (!mem_desc->addr)
5155                         return -ENOMEM;
5156         } else {
5157                 mem_desc->addr = mrioc->cfg_page;
5158                 mem_desc->dma_addr = mrioc->cfg_page_dma;
5159                 memset(mem_desc->addr, 0, mrioc->cfg_page_sz);
5160         }
5161         return 0;
5162 }
5163
5164 /**
5165  * mpi3mr_post_cfg_req - Issue config requests and wait
5166  * @mrioc: Adapter instance reference
5167  * @cfg_req: Configuration request
5168  * @timeout: Timeout in seconds
5169  * @ioc_status: Pointer to return ioc status
5170  *
5171  * A generic function for posting MPI3 configuration request to
5172  * the firmware. This blocks for the completion of request for
5173  * timeout seconds and if the request times out this function
5174  * faults the controller with proper reason code.
5175  *
5176  * On successful completion of the request this function returns
5177  * appropriate ioc status from the firmware back to the caller.
5178  *
5179  * Return: 0 on success, non-zero on failure.
5180  */
5181 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
5182         struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
5183 {
5184         int retval = 0;
5185
5186         mutex_lock(&mrioc->cfg_cmds.mutex);
5187         if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
5188                 retval = -1;
5189                 ioc_err(mrioc, "sending config request failed due to command in use\n");
5190                 mutex_unlock(&mrioc->cfg_cmds.mutex);
5191                 goto out;
5192         }
5193         mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
5194         mrioc->cfg_cmds.is_waiting = 1;
5195         mrioc->cfg_cmds.callback = NULL;
5196         mrioc->cfg_cmds.ioc_status = 0;
5197         mrioc->cfg_cmds.ioc_loginfo = 0;
5198
5199         cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
5200         cfg_req->function = MPI3_FUNCTION_CONFIG;
5201
5202         init_completion(&mrioc->cfg_cmds.done);
5203         dprint_cfg_info(mrioc, "posting config request\n");
5204         if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5205                 dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
5206                     "mpi3_cfg_req");
5207         retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
5208         if (retval) {
5209                 ioc_err(mrioc, "posting config request failed\n");
5210                 goto out_unlock;
5211         }
5212         wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
5213         if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
5214                 mpi3mr_check_rh_fault_ioc(mrioc,
5215                     MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
5216                 ioc_err(mrioc, "config request timed out\n");
5217                 retval = -1;
5218                 goto out_unlock;
5219         }
5220         *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5221         if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
5222                 dprint_cfg_err(mrioc,
5223                     "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
5224                     *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
5225
5226 out_unlock:
5227         mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
5228         mutex_unlock(&mrioc->cfg_cmds.mutex);
5229
5230 out:
5231         return retval;
5232 }
5233
5234 /**
5235  * mpi3mr_process_cfg_req - config page request processor
5236  * @mrioc: Adapter instance reference
5237  * @cfg_req: Configuration request
5238  * @cfg_hdr: Configuration page header
5239  * @timeout: Timeout in seconds
5240  * @ioc_status: Pointer to return ioc status
5241  * @cfg_buf: Memory pointer to copy config page or header
5242  * @cfg_buf_sz: Size of the memory to get config page or header
5243  *
5244  * This is handler for config page read, write and config page
5245  * header read operations.
5246  *
5247  * This function expects the cfg_req to be populated with page
5248  * type, page number, action for the header read and with page
5249  * address for all other operations.
5250  *
5251  * The cfg_hdr can be passed as null for reading required header
5252  * details for read/write pages the cfg_hdr should point valid
5253  * configuration page header.
5254  *
5255  * This allocates dmaable memory based on the size of the config
5256  * buffer and set the SGE of the cfg_req.
5257  *
5258  * For write actions, the config page data has to be passed in
5259  * the cfg_buf and size of the data has to be mentioned in the
5260  * cfg_buf_sz.
5261  *
5262  * For read/header actions, on successful completion of the
5263  * request with successful ioc_status the data will be copied
5264  * into the cfg_buf limited to a minimum of actual page size and
5265  * cfg_buf_sz
5266  *
5267  *
5268  * Return: 0 on success, non-zero on failure.
5269  */
5270 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
5271         struct mpi3_config_request *cfg_req,
5272         struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
5273         void *cfg_buf, u32 cfg_buf_sz)
5274 {
5275         struct dma_memory_desc mem_desc;
5276         int retval = -1;
5277         u8 invalid_action = 0;
5278         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5279
5280         memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
5281
5282         if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
5283                 mem_desc.size = sizeof(struct mpi3_config_page_header);
5284         else {
5285                 if (!cfg_hdr) {
5286                         ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
5287                             cfg_req->action, cfg_req->page_type,
5288                             cfg_req->page_number);
5289                         goto out;
5290                 }
5291                 switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
5292                 case MPI3_CONFIG_PAGEATTR_READ_ONLY:
5293                         if (cfg_req->action
5294                             != MPI3_CONFIG_ACTION_READ_CURRENT)
5295                                 invalid_action = 1;
5296                         break;
5297                 case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
5298                         if ((cfg_req->action ==
5299                              MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
5300                             (cfg_req->action ==
5301                              MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
5302                                 invalid_action = 1;
5303                         break;
5304                 case MPI3_CONFIG_PAGEATTR_PERSISTENT:
5305                 default:
5306                         break;
5307                 }
5308                 if (invalid_action) {
5309                         ioc_err(mrioc,
5310                             "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
5311                             cfg_req->action, cfg_req->page_type,
5312                             cfg_req->page_number, cfg_hdr->page_attribute);
5313                         goto out;
5314                 }
5315                 mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
5316                 cfg_req->page_length = cfg_hdr->page_length;
5317                 cfg_req->page_version = cfg_hdr->page_version;
5318         }
5319         if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc))
5320                 goto out;
5321
5322         mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
5323             mem_desc.dma_addr);
5324
5325         if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
5326             (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5327                 memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
5328                     cfg_buf_sz));
5329                 dprint_cfg_info(mrioc, "config buffer to be written\n");
5330                 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5331                         dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5332         }
5333
5334         if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
5335                 goto out;
5336
5337         retval = 0;
5338         if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
5339             (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
5340             (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5341                 memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
5342                     cfg_buf_sz));
5343                 dprint_cfg_info(mrioc, "config buffer read\n");
5344                 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5345                         dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5346         }
5347
5348 out:
5349         mpi3mr_free_config_dma_memory(mrioc, &mem_desc);
5350         return retval;
5351 }
5352
5353 /**
5354  * mpi3mr_cfg_get_dev_pg0 - Read current device page0
5355  * @mrioc: Adapter instance reference
5356  * @ioc_status: Pointer to return ioc status
5357  * @dev_pg0: Pointer to return device page 0
5358  * @pg_sz: Size of the memory allocated to the page pointer
5359  * @form: The form to be used for addressing the page
5360  * @form_spec: Form specific information like device handle
5361  *
5362  * This is handler for config page read for a specific device
5363  * page0. The ioc_status has the controller returned ioc_status.
5364  * This routine doesn't check ioc_status to decide whether the
5365  * page read is success or not and it is the callers
5366  * responsibility.
5367  *
5368  * Return: 0 on success, non-zero on failure.
5369  */
5370 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5371         struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
5372 {
5373         struct mpi3_config_page_header cfg_hdr;
5374         struct mpi3_config_request cfg_req;
5375         u32 page_address;
5376
5377         memset(dev_pg0, 0, pg_sz);
5378         memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5379         memset(&cfg_req, 0, sizeof(cfg_req));
5380
5381         cfg_req.function = MPI3_FUNCTION_CONFIG;
5382         cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5383         cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
5384         cfg_req.page_number = 0;
5385         cfg_req.page_address = 0;
5386
5387         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5388             MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5389                 ioc_err(mrioc, "device page0 header read failed\n");
5390                 goto out_failed;
5391         }
5392         if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5393                 ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
5394                     *ioc_status);
5395                 goto out_failed;
5396         }
5397         cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5398         page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
5399             (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
5400         cfg_req.page_address = cpu_to_le32(page_address);
5401         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5402             MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
5403                 ioc_err(mrioc, "device page0 read failed\n");
5404                 goto out_failed;
5405         }
5406         return 0;
5407 out_failed:
5408         return -1;
5409 }
5410
5411
5412 /**
5413  * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
5414  * @mrioc: Adapter instance reference
5415  * @ioc_status: Pointer to return ioc status
5416  * @phy_pg0: Pointer to return SAS Phy page 0
5417  * @pg_sz: Size of the memory allocated to the page pointer
5418  * @form: The form to be used for addressing the page
5419  * @form_spec: Form specific information like phy number
5420  *
5421  * This is handler for config page read for a specific SAS Phy
5422  * page0. The ioc_status has the controller returned ioc_status.
5423  * This routine doesn't check ioc_status to decide whether the
5424  * page read is success or not and it is the callers
5425  * responsibility.
5426  *
5427  * Return: 0 on success, non-zero on failure.
5428  */
5429 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5430         struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
5431         u32 form_spec)
5432 {
5433         struct mpi3_config_page_header cfg_hdr;
5434         struct mpi3_config_request cfg_req;
5435         u32 page_address;
5436
5437         memset(phy_pg0, 0, pg_sz);
5438         memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5439         memset(&cfg_req, 0, sizeof(cfg_req));
5440
5441         cfg_req.function = MPI3_FUNCTION_CONFIG;
5442         cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5443         cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5444         cfg_req.page_number = 0;
5445         cfg_req.page_address = 0;
5446
5447         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5448             MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5449                 ioc_err(mrioc, "sas phy page0 header read failed\n");
5450                 goto out_failed;
5451         }
5452         if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5453                 ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
5454                     *ioc_status);
5455                 goto out_failed;
5456         }
5457         cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5458         page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5459             (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5460         cfg_req.page_address = cpu_to_le32(page_address);
5461         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5462             MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
5463                 ioc_err(mrioc, "sas phy page0 read failed\n");
5464                 goto out_failed;
5465         }
5466         return 0;
5467 out_failed:
5468         return -1;
5469 }
5470
5471 /**
5472  * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
5473  * @mrioc: Adapter instance reference
5474  * @ioc_status: Pointer to return ioc status
5475  * @phy_pg1: Pointer to return SAS Phy page 1
5476  * @pg_sz: Size of the memory allocated to the page pointer
5477  * @form: The form to be used for addressing the page
5478  * @form_spec: Form specific information like phy number
5479  *
5480  * This is handler for config page read for a specific SAS Phy
5481  * page1. The ioc_status has the controller returned ioc_status.
5482  * This routine doesn't check ioc_status to decide whether the
5483  * page read is success or not and it is the callers
5484  * responsibility.
5485  *
5486  * Return: 0 on success, non-zero on failure.
5487  */
5488 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5489         struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
5490         u32 form_spec)
5491 {
5492         struct mpi3_config_page_header cfg_hdr;
5493         struct mpi3_config_request cfg_req;
5494         u32 page_address;
5495
5496         memset(phy_pg1, 0, pg_sz);
5497         memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5498         memset(&cfg_req, 0, sizeof(cfg_req));
5499
5500         cfg_req.function = MPI3_FUNCTION_CONFIG;
5501         cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5502         cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5503         cfg_req.page_number = 1;
5504         cfg_req.page_address = 0;
5505
5506         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5507             MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5508                 ioc_err(mrioc, "sas phy page1 header read failed\n");
5509                 goto out_failed;
5510         }
5511         if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5512                 ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
5513                     *ioc_status);
5514                 goto out_failed;
5515         }
5516         cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5517         page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5518             (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5519         cfg_req.page_address = cpu_to_le32(page_address);
5520         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5521             MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
5522                 ioc_err(mrioc, "sas phy page1 read failed\n");
5523                 goto out_failed;
5524         }
5525         return 0;
5526 out_failed:
5527         return -1;
5528 }
5529
5530
5531 /**
5532  * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
5533  * @mrioc: Adapter instance reference
5534  * @ioc_status: Pointer to return ioc status
5535  * @exp_pg0: Pointer to return SAS Expander page 0
5536  * @pg_sz: Size of the memory allocated to the page pointer
5537  * @form: The form to be used for addressing the page
5538  * @form_spec: Form specific information like device handle
5539  *
5540  * This is handler for config page read for a specific SAS
5541  * Expander page0. The ioc_status has the controller returned
5542  * ioc_status. This routine doesn't check ioc_status to decide
5543  * whether the page read is success or not and it is the callers
5544  * responsibility.
5545  *
5546  * Return: 0 on success, non-zero on failure.
5547  */
5548 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5549         struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
5550         u32 form_spec)
5551 {
5552         struct mpi3_config_page_header cfg_hdr;
5553         struct mpi3_config_request cfg_req;
5554         u32 page_address;
5555
5556         memset(exp_pg0, 0, pg_sz);
5557         memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5558         memset(&cfg_req, 0, sizeof(cfg_req));
5559
5560         cfg_req.function = MPI3_FUNCTION_CONFIG;
5561         cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5562         cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
5563         cfg_req.page_number = 0;
5564         cfg_req.page_address = 0;
5565
5566         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5567             MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5568                 ioc_err(mrioc, "expander page0 header read failed\n");
5569                 goto out_failed;
5570         }
5571         if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5572                 ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
5573                     *ioc_status);
5574                 goto out_failed;
5575         }
5576         cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5577         page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
5578             (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
5579             MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
5580         cfg_req.page_address = cpu_to_le32(page_address);
5581         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5582             MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
5583                 ioc_err(mrioc, "expander page0 read failed\n");
5584                 goto out_failed;
5585         }
5586         return 0;
5587 out_failed:
5588         return -1;
5589 }
5590
5591 /**
5592  * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
5593  * @mrioc: Adapter instance reference
5594  * @ioc_status: Pointer to return ioc status
5595  * @exp_pg1: Pointer to return SAS Expander page 1
5596  * @pg_sz: Size of the memory allocated to the page pointer
5597  * @form: The form to be used for addressing the page
5598  * @form_spec: Form specific information like phy number
5599  *
5600  * This is handler for config page read for a specific SAS
5601  * Expander page1. The ioc_status has the controller returned
5602  * ioc_status. This routine doesn't check ioc_status to decide
5603  * whether the page read is success or not and it is the callers
5604  * responsibility.
5605  *
5606  * Return: 0 on success, non-zero on failure.
5607  */
5608 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5609         struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
5610         u32 form_spec)
5611 {
5612         struct mpi3_config_page_header cfg_hdr;
5613         struct mpi3_config_request cfg_req;
5614         u32 page_address;
5615
5616         memset(exp_pg1, 0, pg_sz);
5617         memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5618         memset(&cfg_req, 0, sizeof(cfg_req));
5619
5620         cfg_req.function = MPI3_FUNCTION_CONFIG;
5621         cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5622         cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
5623         cfg_req.page_number = 1;
5624         cfg_req.page_address = 0;
5625
5626         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5627             MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5628                 ioc_err(mrioc, "expander page1 header read failed\n");
5629                 goto out_failed;
5630         }
5631         if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5632                 ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
5633                     *ioc_status);
5634                 goto out_failed;
5635         }
5636         cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5637         page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
5638             (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
5639             MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
5640         cfg_req.page_address = cpu_to_le32(page_address);
5641         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5642             MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
5643                 ioc_err(mrioc, "expander page1 read failed\n");
5644                 goto out_failed;
5645         }
5646         return 0;
5647 out_failed:
5648         return -1;
5649 }
5650
5651 /**
5652  * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
5653  * @mrioc: Adapter instance reference
5654  * @ioc_status: Pointer to return ioc status
5655  * @encl_pg0: Pointer to return Enclosure page 0
5656  * @pg_sz: Size of the memory allocated to the page pointer
5657  * @form: The form to be used for addressing the page
5658  * @form_spec: Form specific information like device handle
5659  *
5660  * This is handler for config page read for a specific Enclosure
5661  * page0. The ioc_status has the controller returned ioc_status.
5662  * This routine doesn't check ioc_status to decide whether the
5663  * page read is success or not and it is the callers
5664  * responsibility.
5665  *
5666  * Return: 0 on success, non-zero on failure.
5667  */
5668 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5669         struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
5670         u32 form_spec)
5671 {
5672         struct mpi3_config_page_header cfg_hdr;
5673         struct mpi3_config_request cfg_req;
5674         u32 page_address;
5675
5676         memset(encl_pg0, 0, pg_sz);
5677         memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5678         memset(&cfg_req, 0, sizeof(cfg_req));
5679
5680         cfg_req.function = MPI3_FUNCTION_CONFIG;
5681         cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5682         cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
5683         cfg_req.page_number = 0;
5684         cfg_req.page_address = 0;
5685
5686         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5687             MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5688                 ioc_err(mrioc, "enclosure page0 header read failed\n");
5689                 goto out_failed;
5690         }
5691         if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5692                 ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
5693                     *ioc_status);
5694                 goto out_failed;
5695         }
5696         cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5697         page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
5698             (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
5699         cfg_req.page_address = cpu_to_le32(page_address);
5700         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5701             MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
5702                 ioc_err(mrioc, "enclosure page0 read failed\n");
5703                 goto out_failed;
5704         }
5705         return 0;
5706 out_failed:
5707         return -1;
5708 }
5709
5710
5711 /**
5712  * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
5713  * @mrioc: Adapter instance reference
5714  * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
5715  * @pg_sz: Size of the memory allocated to the page pointer
5716  *
5717  * This is handler for config page read for the SAS IO Unit
5718  * page0. This routine checks ioc_status to decide whether the
5719  * page read is success or not.
5720  *
5721  * Return: 0 on success, non-zero on failure.
5722  */
5723 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
5724         struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
5725 {
5726         struct mpi3_config_page_header cfg_hdr;
5727         struct mpi3_config_request cfg_req;
5728         u16 ioc_status = 0;
5729
5730         memset(sas_io_unit_pg0, 0, pg_sz);
5731         memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5732         memset(&cfg_req, 0, sizeof(cfg_req));
5733
5734         cfg_req.function = MPI3_FUNCTION_CONFIG;
5735         cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5736         cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
5737         cfg_req.page_number = 0;
5738         cfg_req.page_address = 0;
5739
5740         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5741             MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5742                 ioc_err(mrioc, "sas io unit page0 header read failed\n");
5743                 goto out_failed;
5744         }
5745         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5746                 ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
5747                     ioc_status);
5748                 goto out_failed;
5749         }
5750         cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5751
5752         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5753             MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
5754                 ioc_err(mrioc, "sas io unit page0 read failed\n");
5755                 goto out_failed;
5756         }
5757         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5758                 ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
5759                     ioc_status);
5760                 goto out_failed;
5761         }
5762         return 0;
5763 out_failed:
5764         return -1;
5765 }
5766
5767 /**
5768  * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
5769  * @mrioc: Adapter instance reference
5770  * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
5771  * @pg_sz: Size of the memory allocated to the page pointer
5772  *
5773  * This is handler for config page read for the SAS IO Unit
5774  * page1. This routine checks ioc_status to decide whether the
5775  * page read is success or not.
5776  *
5777  * Return: 0 on success, non-zero on failure.
5778  */
5779 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
5780         struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
5781 {
5782         struct mpi3_config_page_header cfg_hdr;
5783         struct mpi3_config_request cfg_req;
5784         u16 ioc_status = 0;
5785
5786         memset(sas_io_unit_pg1, 0, pg_sz);
5787         memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5788         memset(&cfg_req, 0, sizeof(cfg_req));
5789
5790         cfg_req.function = MPI3_FUNCTION_CONFIG;
5791         cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5792         cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
5793         cfg_req.page_number = 1;
5794         cfg_req.page_address = 0;
5795
5796         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5797             MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5798                 ioc_err(mrioc, "sas io unit page1 header read failed\n");
5799                 goto out_failed;
5800         }
5801         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5802                 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
5803                     ioc_status);
5804                 goto out_failed;
5805         }
5806         cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5807
5808         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5809             MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
5810                 ioc_err(mrioc, "sas io unit page1 read failed\n");
5811                 goto out_failed;
5812         }
5813         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5814                 ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
5815                     ioc_status);
5816                 goto out_failed;
5817         }
5818         return 0;
5819 out_failed:
5820         return -1;
5821 }
5822
5823 /**
5824  * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
5825  * @mrioc: Adapter instance reference
5826  * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
5827  * @pg_sz: Size of the memory allocated to the page pointer
5828  *
5829  * This is handler for config page write for the SAS IO Unit
5830  * page1. This routine checks ioc_status to decide whether the
5831  * page read is success or not. This will modify both current
5832  * and persistent page.
5833  *
5834  * Return: 0 on success, non-zero on failure.
5835  */
5836 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
5837         struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
5838 {
5839         struct mpi3_config_page_header cfg_hdr;
5840         struct mpi3_config_request cfg_req;
5841         u16 ioc_status = 0;
5842
5843         memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5844         memset(&cfg_req, 0, sizeof(cfg_req));
5845
5846         cfg_req.function = MPI3_FUNCTION_CONFIG;
5847         cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5848         cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
5849         cfg_req.page_number = 1;
5850         cfg_req.page_address = 0;
5851
5852         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5853             MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5854                 ioc_err(mrioc, "sas io unit page1 header read failed\n");
5855                 goto out_failed;
5856         }
5857         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5858                 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
5859                     ioc_status);
5860                 goto out_failed;
5861         }
5862         cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
5863
5864         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5865             MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
5866                 ioc_err(mrioc, "sas io unit page1 write current failed\n");
5867                 goto out_failed;
5868         }
5869         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5870                 ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
5871                     ioc_status);
5872                 goto out_failed;
5873         }
5874
5875         cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
5876
5877         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5878             MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
5879                 ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
5880                 goto out_failed;
5881         }
5882         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5883                 ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
5884                     ioc_status);
5885                 goto out_failed;
5886         }
5887         return 0;
5888 out_failed:
5889         return -1;
5890 }
5891
5892 /**
5893  * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
5894  * @mrioc: Adapter instance reference
5895  * @driver_pg1: Pointer to return Driver page 1
5896  * @pg_sz: Size of the memory allocated to the page pointer
5897  *
5898  * This is handler for config page read for the Driver page1.
5899  * This routine checks ioc_status to decide whether the page
5900  * read is success or not.
5901  *
5902  * Return: 0 on success, non-zero on failure.
5903  */
5904 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
5905         struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
5906 {
5907         struct mpi3_config_page_header cfg_hdr;
5908         struct mpi3_config_request cfg_req;
5909         u16 ioc_status = 0;
5910
5911         memset(driver_pg1, 0, pg_sz);
5912         memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5913         memset(&cfg_req, 0, sizeof(cfg_req));
5914
5915         cfg_req.function = MPI3_FUNCTION_CONFIG;
5916         cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5917         cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
5918         cfg_req.page_number = 1;
5919         cfg_req.page_address = 0;
5920
5921         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5922             MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5923                 ioc_err(mrioc, "driver page1 header read failed\n");
5924                 goto out_failed;
5925         }
5926         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5927                 ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
5928                     ioc_status);
5929                 goto out_failed;
5930         }
5931         cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5932
5933         if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5934             MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
5935                 ioc_err(mrioc, "driver page1 read failed\n");
5936                 goto out_failed;
5937         }
5938         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5939                 ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
5940                     ioc_status);
5941                 goto out_failed;
5942         }
5943         return 0;
5944 out_failed:
5945         return -1;
5946 }