spnego: add missing OID to oid registry
[sfrench/cifs-2.6.git] / drivers / scsi / smartpqi / smartpqi_init.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    driver for Microchip PQI-based storage controllers
4  *    Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
5  *    Copyright (c) 2016-2018 Microsemi Corporation
6  *    Copyright (c) 2016 PMC-Sierra, Inc.
7  *
8  *    Questions/Comments/Bugfixes to storagedev@microchip.com
9  *
10  */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
29 #include "smartpqi.h"
30 #include "smartpqi_sis.h"
31
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
34 #endif
35
36 #define DRIVER_VERSION          "2.1.22-040"
37 #define DRIVER_MAJOR            2
38 #define DRIVER_MINOR            1
39 #define DRIVER_RELEASE          22
40 #define DRIVER_REVISION         40
41
42 #define DRIVER_NAME             "Microchip SmartPQI Driver (v" \
43                                 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT       "smartpqi"
45
46 #define PQI_EXTRA_SGL_MEMORY    (12 * sizeof(struct pqi_sg_descriptor))
47
48 #define PQI_POST_RESET_DELAY_SECS                       5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS      10
50
51 MODULE_AUTHOR("Microchip");
52 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
53         DRIVER_VERSION);
54 MODULE_VERSION(DRIVER_VERSION);
55 MODULE_LICENSE("GPL");
56
57 struct pqi_cmd_priv {
58         int this_residual;
59 };
60
61 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
62 {
63         return scsi_cmd_priv(cmd);
64 }
65
66 static void pqi_verify_structures(void);
67 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
68         enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
69 static void pqi_ctrl_offline_worker(struct work_struct *work);
70 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
71 static void pqi_scan_start(struct Scsi_Host *shost);
72 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
73         struct pqi_queue_group *queue_group, enum pqi_io_path path,
74         struct pqi_io_request *io_request);
75 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
76         struct pqi_iu_header *request, unsigned int flags,
77         struct pqi_raid_error_info *error_info);
78 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
79         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
80         unsigned int cdb_length, struct pqi_queue_group *queue_group,
81         struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
82 static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
83         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
84         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
85         struct pqi_scsi_dev_raid_map_data *rmd);
86 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
87         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
88         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
89         struct pqi_scsi_dev_raid_map_data *rmd);
90 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
91 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
92 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
93 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
94 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
95 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
96 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
97         struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
98 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
99
100 /* for flags argument to pqi_submit_raid_request_synchronous() */
101 #define PQI_SYNC_FLAGS_INTERRUPTABLE    0x1
102
103 static struct scsi_transport_template *pqi_sas_transport_template;
104
105 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
106
107 enum pqi_lockup_action {
108         NONE,
109         REBOOT,
110         PANIC
111 };
112
113 static enum pqi_lockup_action pqi_lockup_action = NONE;
114
115 static struct {
116         enum pqi_lockup_action  action;
117         char                    *name;
118 } pqi_lockup_actions[] = {
119         {
120                 .action = NONE,
121                 .name = "none",
122         },
123         {
124                 .action = REBOOT,
125                 .name = "reboot",
126         },
127         {
128                 .action = PANIC,
129                 .name = "panic",
130         },
131 };
132
133 static unsigned int pqi_supported_event_types[] = {
134         PQI_EVENT_TYPE_HOTPLUG,
135         PQI_EVENT_TYPE_HARDWARE,
136         PQI_EVENT_TYPE_PHYSICAL_DEVICE,
137         PQI_EVENT_TYPE_LOGICAL_DEVICE,
138         PQI_EVENT_TYPE_OFA,
139         PQI_EVENT_TYPE_AIO_STATE_CHANGE,
140         PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
141 };
142
143 static int pqi_disable_device_id_wildcards;
144 module_param_named(disable_device_id_wildcards,
145         pqi_disable_device_id_wildcards, int, 0644);
146 MODULE_PARM_DESC(disable_device_id_wildcards,
147         "Disable device ID wildcards.");
148
149 static int pqi_disable_heartbeat;
150 module_param_named(disable_heartbeat,
151         pqi_disable_heartbeat, int, 0644);
152 MODULE_PARM_DESC(disable_heartbeat,
153         "Disable heartbeat.");
154
155 static int pqi_disable_ctrl_shutdown;
156 module_param_named(disable_ctrl_shutdown,
157         pqi_disable_ctrl_shutdown, int, 0644);
158 MODULE_PARM_DESC(disable_ctrl_shutdown,
159         "Disable controller shutdown when controller locked up.");
160
161 static char *pqi_lockup_action_param;
162 module_param_named(lockup_action,
163         pqi_lockup_action_param, charp, 0644);
164 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
165         "\t\tSupported: none, reboot, panic\n"
166         "\t\tDefault: none");
167
168 static int pqi_expose_ld_first;
169 module_param_named(expose_ld_first,
170         pqi_expose_ld_first, int, 0644);
171 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
172
173 static int pqi_hide_vsep;
174 module_param_named(hide_vsep,
175         pqi_hide_vsep, int, 0644);
176 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
177
178 static int pqi_disable_managed_interrupts;
179 module_param_named(disable_managed_interrupts,
180         pqi_disable_managed_interrupts, int, 0644);
181 MODULE_PARM_DESC(disable_managed_interrupts,
182         "Disable the kernel automatically assigning SMP affinity to IRQs.");
183
184 static unsigned int pqi_ctrl_ready_timeout_secs;
185 module_param_named(ctrl_ready_timeout,
186         pqi_ctrl_ready_timeout_secs, uint, 0644);
187 MODULE_PARM_DESC(ctrl_ready_timeout,
188         "Timeout in seconds for driver to wait for controller ready.");
189
190 static char *raid_levels[] = {
191         "RAID-0",
192         "RAID-4",
193         "RAID-1(1+0)",
194         "RAID-5",
195         "RAID-5+1",
196         "RAID-6",
197         "RAID-1(Triple)",
198 };
199
200 static char *pqi_raid_level_to_string(u8 raid_level)
201 {
202         if (raid_level < ARRAY_SIZE(raid_levels))
203                 return raid_levels[raid_level];
204
205         return "RAID UNKNOWN";
206 }
207
208 #define SA_RAID_0               0
209 #define SA_RAID_4               1
210 #define SA_RAID_1               2       /* also used for RAID 10 */
211 #define SA_RAID_5               3       /* also used for RAID 50 */
212 #define SA_RAID_51              4
213 #define SA_RAID_6               5       /* also used for RAID 60 */
214 #define SA_RAID_TRIPLE          6       /* also used for RAID 1+0 Triple */
215 #define SA_RAID_MAX             SA_RAID_TRIPLE
216 #define SA_RAID_UNKNOWN         0xff
217
218 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
219 {
220         pqi_prep_for_scsi_done(scmd);
221         scsi_done(scmd);
222 }
223
224 static inline void pqi_disable_write_same(struct scsi_device *sdev)
225 {
226         sdev->no_write_same = 1;
227 }
228
229 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
230 {
231         return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
232 }
233
234 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
235 {
236         return !device->is_physical_device;
237 }
238
239 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
240 {
241         return scsi3addr[2] != 0;
242 }
243
244 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
245 {
246         return !ctrl_info->controller_online;
247 }
248
249 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
250 {
251         if (ctrl_info->controller_online)
252                 if (!sis_is_firmware_running(ctrl_info))
253                         pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
254 }
255
256 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
257 {
258         return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
259 }
260
261 #define PQI_DRIVER_SCRATCH_PQI_MODE                     0x1
262 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED          0x2
263
264 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
265 {
266         return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
267 }
268
269 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
270         enum pqi_ctrl_mode mode)
271 {
272         u32 driver_scratch;
273
274         driver_scratch = sis_read_driver_scratch(ctrl_info);
275
276         if (mode == PQI_MODE)
277                 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
278         else
279                 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
280
281         sis_write_driver_scratch(ctrl_info, driver_scratch);
282 }
283
284 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
285 {
286         return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
287 }
288
289 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
290 {
291         u32 driver_scratch;
292
293         driver_scratch = sis_read_driver_scratch(ctrl_info);
294
295         if (is_supported)
296                 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
297         else
298                 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
299
300         sis_write_driver_scratch(ctrl_info, driver_scratch);
301 }
302
303 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
304 {
305         ctrl_info->scan_blocked = true;
306         mutex_lock(&ctrl_info->scan_mutex);
307 }
308
309 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
310 {
311         ctrl_info->scan_blocked = false;
312         mutex_unlock(&ctrl_info->scan_mutex);
313 }
314
315 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
316 {
317         return ctrl_info->scan_blocked;
318 }
319
320 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
321 {
322         mutex_lock(&ctrl_info->lun_reset_mutex);
323 }
324
325 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
326 {
327         mutex_unlock(&ctrl_info->lun_reset_mutex);
328 }
329
330 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
331 {
332         struct Scsi_Host *shost;
333         unsigned int num_loops;
334         int msecs_sleep;
335
336         shost = ctrl_info->scsi_host;
337
338         scsi_block_requests(shost);
339
340         num_loops = 0;
341         msecs_sleep = 20;
342         while (scsi_host_busy(shost)) {
343                 num_loops++;
344                 if (num_loops == 10)
345                         msecs_sleep = 500;
346                 msleep(msecs_sleep);
347         }
348 }
349
350 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
351 {
352         scsi_unblock_requests(ctrl_info->scsi_host);
353 }
354
355 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
356 {
357         atomic_inc(&ctrl_info->num_busy_threads);
358 }
359
360 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
361 {
362         atomic_dec(&ctrl_info->num_busy_threads);
363 }
364
365 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
366 {
367         return ctrl_info->block_requests;
368 }
369
370 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
371 {
372         ctrl_info->block_requests = true;
373 }
374
375 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
376 {
377         ctrl_info->block_requests = false;
378         wake_up_all(&ctrl_info->block_requests_wait);
379 }
380
381 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
382 {
383         if (!pqi_ctrl_blocked(ctrl_info))
384                 return;
385
386         atomic_inc(&ctrl_info->num_blocked_threads);
387         wait_event(ctrl_info->block_requests_wait,
388                 !pqi_ctrl_blocked(ctrl_info));
389         atomic_dec(&ctrl_info->num_blocked_threads);
390 }
391
392 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS                10
393
394 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
395 {
396         unsigned long start_jiffies;
397         unsigned long warning_timeout;
398         bool displayed_warning;
399
400         displayed_warning = false;
401         start_jiffies = jiffies;
402         warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
403
404         while (atomic_read(&ctrl_info->num_busy_threads) >
405                 atomic_read(&ctrl_info->num_blocked_threads)) {
406                 if (time_after(jiffies, warning_timeout)) {
407                         dev_warn(&ctrl_info->pci_dev->dev,
408                                 "waiting %u seconds for driver activity to quiesce\n",
409                                 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
410                         displayed_warning = true;
411                         warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
412                 }
413                 usleep_range(1000, 2000);
414         }
415
416         if (displayed_warning)
417                 dev_warn(&ctrl_info->pci_dev->dev,
418                         "driver activity quiesced after waiting for %u seconds\n",
419                         jiffies_to_msecs(jiffies - start_jiffies) / 1000);
420 }
421
422 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
423 {
424         return device->device_offline;
425 }
426
427 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
428 {
429         mutex_lock(&ctrl_info->ofa_mutex);
430 }
431
432 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
433 {
434         mutex_unlock(&ctrl_info->ofa_mutex);
435 }
436
437 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
438 {
439         mutex_lock(&ctrl_info->ofa_mutex);
440         mutex_unlock(&ctrl_info->ofa_mutex);
441 }
442
443 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
444 {
445         return mutex_is_locked(&ctrl_info->ofa_mutex);
446 }
447
448 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
449 {
450         device->in_remove = true;
451 }
452
453 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
454 {
455         return device->in_remove;
456 }
457
458 static inline int pqi_event_type_to_event_index(unsigned int event_type)
459 {
460         int index;
461
462         for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
463                 if (event_type == pqi_supported_event_types[index])
464                         return index;
465
466         return -1;
467 }
468
469 static inline bool pqi_is_supported_event(unsigned int event_type)
470 {
471         return pqi_event_type_to_event_index(event_type) != -1;
472 }
473
474 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
475         unsigned long delay)
476 {
477         if (pqi_ctrl_offline(ctrl_info))
478                 return;
479
480         schedule_delayed_work(&ctrl_info->rescan_work, delay);
481 }
482
483 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
484 {
485         pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
486 }
487
488 #define PQI_RESCAN_WORK_DELAY   (10 * HZ)
489
490 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
491 {
492         pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
493 }
494
495 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
496 {
497         cancel_delayed_work_sync(&ctrl_info->rescan_work);
498 }
499
500 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
501 {
502         if (!ctrl_info->heartbeat_counter)
503                 return 0;
504
505         return readl(ctrl_info->heartbeat_counter);
506 }
507
508 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
509 {
510         return readb(ctrl_info->soft_reset_status);
511 }
512
513 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
514 {
515         u8 status;
516
517         status = pqi_read_soft_reset_status(ctrl_info);
518         status &= ~PQI_SOFT_RESET_ABORT;
519         writeb(status, ctrl_info->soft_reset_status);
520 }
521
522 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
523 {
524         bool io_high_prio;
525         int priority_class;
526
527         io_high_prio = false;
528
529         if (device->ncq_prio_enable) {
530                 priority_class =
531                         IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
532                 if (priority_class == IOPRIO_CLASS_RT) {
533                         /* Set NCQ priority for read/write commands. */
534                         switch (scmd->cmnd[0]) {
535                         case WRITE_16:
536                         case READ_16:
537                         case WRITE_12:
538                         case READ_12:
539                         case WRITE_10:
540                         case READ_10:
541                         case WRITE_6:
542                         case READ_6:
543                                 io_high_prio = true;
544                                 break;
545                         }
546                 }
547         }
548
549         return io_high_prio;
550 }
551
552 static int pqi_map_single(struct pci_dev *pci_dev,
553         struct pqi_sg_descriptor *sg_descriptor, void *buffer,
554         size_t buffer_length, enum dma_data_direction data_direction)
555 {
556         dma_addr_t bus_address;
557
558         if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
559                 return 0;
560
561         bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
562                 data_direction);
563         if (dma_mapping_error(&pci_dev->dev, bus_address))
564                 return -ENOMEM;
565
566         put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
567         put_unaligned_le32(buffer_length, &sg_descriptor->length);
568         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
569
570         return 0;
571 }
572
573 static void pqi_pci_unmap(struct pci_dev *pci_dev,
574         struct pqi_sg_descriptor *descriptors, int num_descriptors,
575         enum dma_data_direction data_direction)
576 {
577         int i;
578
579         if (data_direction == DMA_NONE)
580                 return;
581
582         for (i = 0; i < num_descriptors; i++)
583                 dma_unmap_single(&pci_dev->dev,
584                         (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
585                         get_unaligned_le32(&descriptors[i].length),
586                         data_direction);
587 }
588
589 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
590         struct pqi_raid_path_request *request, u8 cmd,
591         u8 *scsi3addr, void *buffer, size_t buffer_length,
592         u16 vpd_page, enum dma_data_direction *dir)
593 {
594         u8 *cdb;
595         size_t cdb_length = buffer_length;
596
597         memset(request, 0, sizeof(*request));
598
599         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
600         put_unaligned_le16(offsetof(struct pqi_raid_path_request,
601                 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
602                 &request->header.iu_length);
603         put_unaligned_le32(buffer_length, &request->buffer_length);
604         memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
605         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
606         request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
607
608         cdb = request->cdb;
609
610         switch (cmd) {
611         case INQUIRY:
612                 request->data_direction = SOP_READ_FLAG;
613                 cdb[0] = INQUIRY;
614                 if (vpd_page & VPD_PAGE) {
615                         cdb[1] = 0x1;
616                         cdb[2] = (u8)vpd_page;
617                 }
618                 cdb[4] = (u8)cdb_length;
619                 break;
620         case CISS_REPORT_LOG:
621         case CISS_REPORT_PHYS:
622                 request->data_direction = SOP_READ_FLAG;
623                 cdb[0] = cmd;
624                 if (cmd == CISS_REPORT_PHYS) {
625                         if (ctrl_info->rpl_extended_format_4_5_supported)
626                                 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
627                         else
628                                 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
629                 } else {
630                         cdb[1] = ctrl_info->ciss_report_log_flags;
631                 }
632                 put_unaligned_be32(cdb_length, &cdb[6]);
633                 break;
634         case CISS_GET_RAID_MAP:
635                 request->data_direction = SOP_READ_FLAG;
636                 cdb[0] = CISS_READ;
637                 cdb[1] = CISS_GET_RAID_MAP;
638                 put_unaligned_be32(cdb_length, &cdb[6]);
639                 break;
640         case SA_FLUSH_CACHE:
641                 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
642                 request->data_direction = SOP_WRITE_FLAG;
643                 cdb[0] = BMIC_WRITE;
644                 cdb[6] = BMIC_FLUSH_CACHE;
645                 put_unaligned_be16(cdb_length, &cdb[7]);
646                 break;
647         case BMIC_SENSE_DIAG_OPTIONS:
648                 cdb_length = 0;
649                 fallthrough;
650         case BMIC_IDENTIFY_CONTROLLER:
651         case BMIC_IDENTIFY_PHYSICAL_DEVICE:
652         case BMIC_SENSE_SUBSYSTEM_INFORMATION:
653         case BMIC_SENSE_FEATURE:
654                 request->data_direction = SOP_READ_FLAG;
655                 cdb[0] = BMIC_READ;
656                 cdb[6] = cmd;
657                 put_unaligned_be16(cdb_length, &cdb[7]);
658                 break;
659         case BMIC_SET_DIAG_OPTIONS:
660                 cdb_length = 0;
661                 fallthrough;
662         case BMIC_WRITE_HOST_WELLNESS:
663                 request->data_direction = SOP_WRITE_FLAG;
664                 cdb[0] = BMIC_WRITE;
665                 cdb[6] = cmd;
666                 put_unaligned_be16(cdb_length, &cdb[7]);
667                 break;
668         case BMIC_CSMI_PASSTHRU:
669                 request->data_direction = SOP_BIDIRECTIONAL;
670                 cdb[0] = BMIC_WRITE;
671                 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
672                 cdb[6] = cmd;
673                 put_unaligned_be16(cdb_length, &cdb[7]);
674                 break;
675         default:
676                 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
677                 break;
678         }
679
680         switch (request->data_direction) {
681         case SOP_READ_FLAG:
682                 *dir = DMA_FROM_DEVICE;
683                 break;
684         case SOP_WRITE_FLAG:
685                 *dir = DMA_TO_DEVICE;
686                 break;
687         case SOP_NO_DIRECTION_FLAG:
688                 *dir = DMA_NONE;
689                 break;
690         default:
691                 *dir = DMA_BIDIRECTIONAL;
692                 break;
693         }
694
695         return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
696                 buffer, buffer_length, *dir);
697 }
698
699 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
700 {
701         io_request->scmd = NULL;
702         io_request->status = 0;
703         io_request->error_info = NULL;
704         io_request->raid_bypass = false;
705 }
706
707 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
708 {
709         struct pqi_io_request *io_request;
710         u16 i;
711
712         if (scmd) { /* SML I/O request */
713                 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
714
715                 i = blk_mq_unique_tag_to_tag(blk_tag);
716                 io_request = &ctrl_info->io_request_pool[i];
717                 if (atomic_inc_return(&io_request->refcount) > 1) {
718                         atomic_dec(&io_request->refcount);
719                         return NULL;
720                 }
721         } else { /* IOCTL or driver internal request */
722                 /*
723                  * benignly racy - may have to wait for an open slot.
724                  * command slot range is scsi_ml_can_queue -
725                  *         [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
726                  */
727                 i = 0;
728                 while (1) {
729                         io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
730                         if (atomic_inc_return(&io_request->refcount) == 1)
731                                 break;
732                         atomic_dec(&io_request->refcount);
733                         i = (i + 1) % PQI_RESERVED_IO_SLOTS;
734                 }
735         }
736
737         if (io_request)
738                 pqi_reinit_io_request(io_request);
739
740         return io_request;
741 }
742
743 static void pqi_free_io_request(struct pqi_io_request *io_request)
744 {
745         atomic_dec(&io_request->refcount);
746 }
747
748 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
749         u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
750         struct pqi_raid_error_info *error_info)
751 {
752         int rc;
753         struct pqi_raid_path_request request;
754         enum dma_data_direction dir;
755
756         rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
757                 buffer, buffer_length, vpd_page, &dir);
758         if (rc)
759                 return rc;
760
761         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
762
763         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
764
765         return rc;
766 }
767
768 /* helper functions for pqi_send_scsi_raid_request */
769
770 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
771         u8 cmd, void *buffer, size_t buffer_length)
772 {
773         return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
774                 buffer, buffer_length, 0, NULL);
775 }
776
777 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
778         u8 cmd, void *buffer, size_t buffer_length,
779         struct pqi_raid_error_info *error_info)
780 {
781         return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
782                 buffer, buffer_length, 0, error_info);
783 }
784
785 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
786         struct bmic_identify_controller *buffer)
787 {
788         return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
789                 buffer, sizeof(*buffer));
790 }
791
792 static inline int pqi_sense_subsystem_info(struct  pqi_ctrl_info *ctrl_info,
793         struct bmic_sense_subsystem_info *sense_info)
794 {
795         return pqi_send_ctrl_raid_request(ctrl_info,
796                 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
797                 sizeof(*sense_info));
798 }
799
800 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
801         u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
802 {
803         return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
804                 buffer, buffer_length, vpd_page, NULL);
805 }
806
807 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
808         struct pqi_scsi_dev *device,
809         struct bmic_identify_physical_device *buffer, size_t buffer_length)
810 {
811         int rc;
812         enum dma_data_direction dir;
813         u16 bmic_device_index;
814         struct pqi_raid_path_request request;
815
816         rc = pqi_build_raid_path_request(ctrl_info, &request,
817                 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
818                 buffer_length, 0, &dir);
819         if (rc)
820                 return rc;
821
822         bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
823         request.cdb[2] = (u8)bmic_device_index;
824         request.cdb[9] = (u8)(bmic_device_index >> 8);
825
826         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
827
828         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
829
830         return rc;
831 }
832
833 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
834 {
835         u32 bytes;
836
837         bytes = get_unaligned_le16(limit);
838         if (bytes == 0)
839                 bytes = ~0;
840         else
841                 bytes *= 1024;
842
843         return bytes;
844 }
845
846 #pragma pack(1)
847
848 struct bmic_sense_feature_buffer {
849         struct bmic_sense_feature_buffer_header header;
850         struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
851 };
852
853 #pragma pack()
854
855 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH       \
856         offsetofend(struct bmic_sense_feature_buffer, \
857                 aio_subpage.max_write_raid_1_10_3drive)
858
859 #define MINIMUM_AIO_SUBPAGE_LENGTH      \
860         (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
861                 max_write_raid_1_10_3drive) - \
862                 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
863
864 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
865 {
866         int rc;
867         enum dma_data_direction dir;
868         struct pqi_raid_path_request request;
869         struct bmic_sense_feature_buffer *buffer;
870
871         buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
872         if (!buffer)
873                 return -ENOMEM;
874
875         rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
876                 buffer, sizeof(*buffer), 0, &dir);
877         if (rc)
878                 goto error;
879
880         request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
881         request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
882
883         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
884
885         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
886
887         if (rc)
888                 goto error;
889
890         if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
891                 buffer->header.subpage_code !=
892                         BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
893                 get_unaligned_le16(&buffer->header.buffer_length) <
894                         MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
895                 buffer->aio_subpage.header.page_code !=
896                         BMIC_SENSE_FEATURE_IO_PAGE ||
897                 buffer->aio_subpage.header.subpage_code !=
898                         BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
899                 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
900                         MINIMUM_AIO_SUBPAGE_LENGTH) {
901                 goto error;
902         }
903
904         ctrl_info->max_transfer_encrypted_sas_sata =
905                 pqi_aio_limit_to_bytes(
906                         &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
907
908         ctrl_info->max_transfer_encrypted_nvme =
909                 pqi_aio_limit_to_bytes(
910                         &buffer->aio_subpage.max_transfer_encrypted_nvme);
911
912         ctrl_info->max_write_raid_5_6 =
913                 pqi_aio_limit_to_bytes(
914                         &buffer->aio_subpage.max_write_raid_5_6);
915
916         ctrl_info->max_write_raid_1_10_2drive =
917                 pqi_aio_limit_to_bytes(
918                         &buffer->aio_subpage.max_write_raid_1_10_2drive);
919
920         ctrl_info->max_write_raid_1_10_3drive =
921                 pqi_aio_limit_to_bytes(
922                         &buffer->aio_subpage.max_write_raid_1_10_3drive);
923
924 error:
925         kfree(buffer);
926
927         return rc;
928 }
929
930 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
931         enum bmic_flush_cache_shutdown_event shutdown_event)
932 {
933         int rc;
934         struct bmic_flush_cache *flush_cache;
935
936         flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
937         if (!flush_cache)
938                 return -ENOMEM;
939
940         flush_cache->shutdown_event = shutdown_event;
941
942         rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
943                 sizeof(*flush_cache));
944
945         kfree(flush_cache);
946
947         return rc;
948 }
949
950 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
951         struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
952         struct pqi_raid_error_info *error_info)
953 {
954         return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
955                 buffer, buffer_length, error_info);
956 }
957
958 #define PQI_FETCH_PTRAID_DATA           (1 << 31)
959
960 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
961 {
962         int rc;
963         struct bmic_diag_options *diag;
964
965         diag = kzalloc(sizeof(*diag), GFP_KERNEL);
966         if (!diag)
967                 return -ENOMEM;
968
969         rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
970                 diag, sizeof(*diag));
971         if (rc)
972                 goto out;
973
974         diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
975
976         rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
977                 sizeof(*diag));
978
979 out:
980         kfree(diag);
981
982         return rc;
983 }
984
985 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
986         void *buffer, size_t buffer_length)
987 {
988         return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
989                 buffer, buffer_length);
990 }
991
992 #pragma pack(1)
993
994 struct bmic_host_wellness_driver_version {
995         u8      start_tag[4];
996         u8      driver_version_tag[2];
997         __le16  driver_version_length;
998         char    driver_version[32];
999         u8      dont_write_tag[2];
1000         u8      end_tag[2];
1001 };
1002
1003 #pragma pack()
1004
1005 static int pqi_write_driver_version_to_host_wellness(
1006         struct pqi_ctrl_info *ctrl_info)
1007 {
1008         int rc;
1009         struct bmic_host_wellness_driver_version *buffer;
1010         size_t buffer_length;
1011
1012         buffer_length = sizeof(*buffer);
1013
1014         buffer = kmalloc(buffer_length, GFP_KERNEL);
1015         if (!buffer)
1016                 return -ENOMEM;
1017
1018         buffer->start_tag[0] = '<';
1019         buffer->start_tag[1] = 'H';
1020         buffer->start_tag[2] = 'W';
1021         buffer->start_tag[3] = '>';
1022         buffer->driver_version_tag[0] = 'D';
1023         buffer->driver_version_tag[1] = 'V';
1024         put_unaligned_le16(sizeof(buffer->driver_version),
1025                 &buffer->driver_version_length);
1026         strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
1027                 sizeof(buffer->driver_version) - 1);
1028         buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
1029         buffer->dont_write_tag[0] = 'D';
1030         buffer->dont_write_tag[1] = 'W';
1031         buffer->end_tag[0] = 'Z';
1032         buffer->end_tag[1] = 'Z';
1033
1034         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1035
1036         kfree(buffer);
1037
1038         return rc;
1039 }
1040
1041 #pragma pack(1)
1042
1043 struct bmic_host_wellness_time {
1044         u8      start_tag[4];
1045         u8      time_tag[2];
1046         __le16  time_length;
1047         u8      time[8];
1048         u8      dont_write_tag[2];
1049         u8      end_tag[2];
1050 };
1051
1052 #pragma pack()
1053
1054 static int pqi_write_current_time_to_host_wellness(
1055         struct pqi_ctrl_info *ctrl_info)
1056 {
1057         int rc;
1058         struct bmic_host_wellness_time *buffer;
1059         size_t buffer_length;
1060         time64_t local_time;
1061         unsigned int year;
1062         struct tm tm;
1063
1064         buffer_length = sizeof(*buffer);
1065
1066         buffer = kmalloc(buffer_length, GFP_KERNEL);
1067         if (!buffer)
1068                 return -ENOMEM;
1069
1070         buffer->start_tag[0] = '<';
1071         buffer->start_tag[1] = 'H';
1072         buffer->start_tag[2] = 'W';
1073         buffer->start_tag[3] = '>';
1074         buffer->time_tag[0] = 'T';
1075         buffer->time_tag[1] = 'D';
1076         put_unaligned_le16(sizeof(buffer->time),
1077                 &buffer->time_length);
1078
1079         local_time = ktime_get_real_seconds();
1080         time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1081         year = tm.tm_year + 1900;
1082
1083         buffer->time[0] = bin2bcd(tm.tm_hour);
1084         buffer->time[1] = bin2bcd(tm.tm_min);
1085         buffer->time[2] = bin2bcd(tm.tm_sec);
1086         buffer->time[3] = 0;
1087         buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1088         buffer->time[5] = bin2bcd(tm.tm_mday);
1089         buffer->time[6] = bin2bcd(year / 100);
1090         buffer->time[7] = bin2bcd(year % 100);
1091
1092         buffer->dont_write_tag[0] = 'D';
1093         buffer->dont_write_tag[1] = 'W';
1094         buffer->end_tag[0] = 'Z';
1095         buffer->end_tag[1] = 'Z';
1096
1097         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1098
1099         kfree(buffer);
1100
1101         return rc;
1102 }
1103
1104 #define PQI_UPDATE_TIME_WORK_INTERVAL   (24UL * 60 * 60 * HZ)
1105
1106 static void pqi_update_time_worker(struct work_struct *work)
1107 {
1108         int rc;
1109         struct pqi_ctrl_info *ctrl_info;
1110
1111         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1112                 update_time_work);
1113
1114         rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1115         if (rc)
1116                 dev_warn(&ctrl_info->pci_dev->dev,
1117                         "error updating time on controller\n");
1118
1119         schedule_delayed_work(&ctrl_info->update_time_work,
1120                 PQI_UPDATE_TIME_WORK_INTERVAL);
1121 }
1122
1123 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1124 {
1125         schedule_delayed_work(&ctrl_info->update_time_work, 0);
1126 }
1127
1128 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1129 {
1130         cancel_delayed_work_sync(&ctrl_info->update_time_work);
1131 }
1132
1133 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1134         size_t buffer_length)
1135 {
1136         return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1137 }
1138
1139 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1140 {
1141         int rc;
1142         size_t lun_list_length;
1143         size_t lun_data_length;
1144         size_t new_lun_list_length;
1145         void *lun_data = NULL;
1146         struct report_lun_header *report_lun_header;
1147
1148         report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1149         if (!report_lun_header) {
1150                 rc = -ENOMEM;
1151                 goto out;
1152         }
1153
1154         rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1155         if (rc)
1156                 goto out;
1157
1158         lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1159
1160 again:
1161         lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1162
1163         lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1164         if (!lun_data) {
1165                 rc = -ENOMEM;
1166                 goto out;
1167         }
1168
1169         if (lun_list_length == 0) {
1170                 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1171                 goto out;
1172         }
1173
1174         rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1175         if (rc)
1176                 goto out;
1177
1178         new_lun_list_length =
1179                 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1180
1181         if (new_lun_list_length > lun_list_length) {
1182                 lun_list_length = new_lun_list_length;
1183                 kfree(lun_data);
1184                 goto again;
1185         }
1186
1187 out:
1188         kfree(report_lun_header);
1189
1190         if (rc) {
1191                 kfree(lun_data);
1192                 lun_data = NULL;
1193         }
1194
1195         *buffer = lun_data;
1196
1197         return rc;
1198 }
1199
1200 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1201 {
1202         int rc;
1203         unsigned int i;
1204         u8 rpl_response_format;
1205         u32 num_physicals;
1206         void *rpl_list;
1207         struct report_lun_header *rpl_header;
1208         struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1209         struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1210
1211         rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1212         if (rc)
1213                 return rc;
1214
1215         if (ctrl_info->rpl_extended_format_4_5_supported) {
1216                 rpl_header = rpl_list;
1217                 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1218                 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1219                         *buffer = rpl_list;
1220                         return 0;
1221                 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1222                         dev_err(&ctrl_info->pci_dev->dev,
1223                                 "RPL returned unsupported data format %u\n",
1224                                 rpl_response_format);
1225                         return -EINVAL;
1226                 } else {
1227                         dev_warn(&ctrl_info->pci_dev->dev,
1228                                 "RPL returned extended format 2 instead of 4\n");
1229                 }
1230         }
1231
1232         rpl_8byte_wwid_list = rpl_list;
1233         num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1234
1235         rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries,
1236                                                    num_physicals), GFP_KERNEL);
1237         if (!rpl_16byte_wwid_list)
1238                 return -ENOMEM;
1239
1240         put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1241                 &rpl_16byte_wwid_list->header.list_length);
1242         rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1243
1244         for (i = 0; i < num_physicals; i++) {
1245                 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1246                 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1247                 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1248                 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1249                 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1250                 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1251                 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1252                 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1253         }
1254
1255         kfree(rpl_8byte_wwid_list);
1256         *buffer = rpl_16byte_wwid_list;
1257
1258         return 0;
1259 }
1260
1261 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1262 {
1263         return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1264 }
1265
1266 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1267         struct report_phys_lun_16byte_wwid_list **physdev_list,
1268         struct report_log_lun_list **logdev_list)
1269 {
1270         int rc;
1271         size_t logdev_list_length;
1272         size_t logdev_data_length;
1273         struct report_log_lun_list *internal_logdev_list;
1274         struct report_log_lun_list *logdev_data;
1275         struct report_lun_header report_lun_header;
1276
1277         rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1278         if (rc)
1279                 dev_err(&ctrl_info->pci_dev->dev,
1280                         "report physical LUNs failed\n");
1281
1282         rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1283         if (rc)
1284                 dev_err(&ctrl_info->pci_dev->dev,
1285                         "report logical LUNs failed\n");
1286
1287         /*
1288          * Tack the controller itself onto the end of the logical device list
1289          * by adding a list entry that is all zeros.
1290          */
1291
1292         logdev_data = *logdev_list;
1293
1294         if (logdev_data) {
1295                 logdev_list_length =
1296                         get_unaligned_be32(&logdev_data->header.list_length);
1297         } else {
1298                 memset(&report_lun_header, 0, sizeof(report_lun_header));
1299                 logdev_data =
1300                         (struct report_log_lun_list *)&report_lun_header;
1301                 logdev_list_length = 0;
1302         }
1303
1304         logdev_data_length = sizeof(struct report_lun_header) +
1305                 logdev_list_length;
1306
1307         internal_logdev_list = kmalloc(logdev_data_length +
1308                 sizeof(struct report_log_lun), GFP_KERNEL);
1309         if (!internal_logdev_list) {
1310                 kfree(*logdev_list);
1311                 *logdev_list = NULL;
1312                 return -ENOMEM;
1313         }
1314
1315         memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1316         memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1317                 sizeof(struct report_log_lun));
1318         put_unaligned_be32(logdev_list_length +
1319                 sizeof(struct report_log_lun),
1320                 &internal_logdev_list->header.list_length);
1321
1322         kfree(*logdev_list);
1323         *logdev_list = internal_logdev_list;
1324
1325         return 0;
1326 }
1327
1328 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1329         int bus, int target, int lun)
1330 {
1331         device->bus = bus;
1332         device->target = target;
1333         device->lun = lun;
1334 }
1335
1336 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1337 {
1338         u8 *scsi3addr;
1339         u32 lunid;
1340         int bus;
1341         int target;
1342         int lun;
1343
1344         scsi3addr = device->scsi3addr;
1345         lunid = get_unaligned_le32(scsi3addr);
1346
1347         if (pqi_is_hba_lunid(scsi3addr)) {
1348                 /* The specified device is the controller. */
1349                 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1350                 device->target_lun_valid = true;
1351                 return;
1352         }
1353
1354         if (pqi_is_logical_device(device)) {
1355                 if (device->is_external_raid_device) {
1356                         bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1357                         target = (lunid >> 16) & 0x3fff;
1358                         lun = lunid & 0xff;
1359                 } else {
1360                         bus = PQI_RAID_VOLUME_BUS;
1361                         target = 0;
1362                         lun = lunid & 0x3fff;
1363                 }
1364                 pqi_set_bus_target_lun(device, bus, target, lun);
1365                 device->target_lun_valid = true;
1366                 return;
1367         }
1368
1369         /*
1370          * Defer target and LUN assignment for non-controller physical devices
1371          * because the SAS transport layer will make these assignments later.
1372          */
1373         pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1374 }
1375
1376 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1377         struct pqi_scsi_dev *device)
1378 {
1379         int rc;
1380         u8 raid_level;
1381         u8 *buffer;
1382
1383         raid_level = SA_RAID_UNKNOWN;
1384
1385         buffer = kmalloc(64, GFP_KERNEL);
1386         if (buffer) {
1387                 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1388                         VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1389                 if (rc == 0) {
1390                         raid_level = buffer[8];
1391                         if (raid_level > SA_RAID_MAX)
1392                                 raid_level = SA_RAID_UNKNOWN;
1393                 }
1394                 kfree(buffer);
1395         }
1396
1397         device->raid_level = raid_level;
1398 }
1399
1400 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1401         struct pqi_scsi_dev *device, struct raid_map *raid_map)
1402 {
1403         char *err_msg;
1404         u32 raid_map_size;
1405         u32 r5or6_blocks_per_row;
1406
1407         raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1408
1409         if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1410                 err_msg = "RAID map too small";
1411                 goto bad_raid_map;
1412         }
1413
1414         if (device->raid_level == SA_RAID_1) {
1415                 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1416                         err_msg = "invalid RAID-1 map";
1417                         goto bad_raid_map;
1418                 }
1419         } else if (device->raid_level == SA_RAID_TRIPLE) {
1420                 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1421                         err_msg = "invalid RAID-1(Triple) map";
1422                         goto bad_raid_map;
1423                 }
1424         } else if ((device->raid_level == SA_RAID_5 ||
1425                 device->raid_level == SA_RAID_6) &&
1426                 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1427                 /* RAID 50/60 */
1428                 r5or6_blocks_per_row =
1429                         get_unaligned_le16(&raid_map->strip_size) *
1430                         get_unaligned_le16(&raid_map->data_disks_per_row);
1431                 if (r5or6_blocks_per_row == 0) {
1432                         err_msg = "invalid RAID-5 or RAID-6 map";
1433                         goto bad_raid_map;
1434                 }
1435         }
1436
1437         return 0;
1438
1439 bad_raid_map:
1440         dev_warn(&ctrl_info->pci_dev->dev,
1441                 "logical device %08x%08x %s\n",
1442                 *((u32 *)&device->scsi3addr),
1443                 *((u32 *)&device->scsi3addr[4]), err_msg);
1444
1445         return -EINVAL;
1446 }
1447
1448 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1449         struct pqi_scsi_dev *device)
1450 {
1451         int rc;
1452         u32 raid_map_size;
1453         struct raid_map *raid_map;
1454
1455         raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1456         if (!raid_map)
1457                 return -ENOMEM;
1458
1459         rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1460                 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1461         if (rc)
1462                 goto error;
1463
1464         raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1465
1466         if (raid_map_size > sizeof(*raid_map)) {
1467
1468                 kfree(raid_map);
1469
1470                 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1471                 if (!raid_map)
1472                         return -ENOMEM;
1473
1474                 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1475                         device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1476                 if (rc)
1477                         goto error;
1478
1479                 if (get_unaligned_le32(&raid_map->structure_size)
1480                         != raid_map_size) {
1481                         dev_warn(&ctrl_info->pci_dev->dev,
1482                                 "requested %u bytes, received %u bytes\n",
1483                                 raid_map_size,
1484                                 get_unaligned_le32(&raid_map->structure_size));
1485                         rc = -EINVAL;
1486                         goto error;
1487                 }
1488         }
1489
1490         rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1491         if (rc)
1492                 goto error;
1493
1494         device->raid_map = raid_map;
1495
1496         return 0;
1497
1498 error:
1499         kfree(raid_map);
1500
1501         return rc;
1502 }
1503
1504 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1505         struct pqi_scsi_dev *device)
1506 {
1507         if (!ctrl_info->lv_drive_type_mix_valid) {
1508                 device->max_transfer_encrypted = ~0;
1509                 return;
1510         }
1511
1512         switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1513         case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1514         case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1515         case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1516         case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1517         case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1518         case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1519         case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1520                 device->max_transfer_encrypted =
1521                         ctrl_info->max_transfer_encrypted_sas_sata;
1522                 break;
1523         case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1524                 device->max_transfer_encrypted =
1525                         ctrl_info->max_transfer_encrypted_nvme;
1526                 break;
1527         case LV_DRIVE_TYPE_MIX_UNKNOWN:
1528         case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1529         default:
1530                 device->max_transfer_encrypted =
1531                         min(ctrl_info->max_transfer_encrypted_sas_sata,
1532                                 ctrl_info->max_transfer_encrypted_nvme);
1533                 break;
1534         }
1535 }
1536
1537 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1538         struct pqi_scsi_dev *device)
1539 {
1540         int rc;
1541         u8 *buffer;
1542         u8 bypass_status;
1543
1544         buffer = kmalloc(64, GFP_KERNEL);
1545         if (!buffer)
1546                 return;
1547
1548         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1549                 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1550         if (rc)
1551                 goto out;
1552
1553 #define RAID_BYPASS_STATUS              4
1554 #define RAID_BYPASS_CONFIGURED          0x1
1555 #define RAID_BYPASS_ENABLED             0x2
1556
1557         bypass_status = buffer[RAID_BYPASS_STATUS];
1558         device->raid_bypass_configured =
1559                 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1560         if (device->raid_bypass_configured &&
1561                 (bypass_status & RAID_BYPASS_ENABLED) &&
1562                 pqi_get_raid_map(ctrl_info, device) == 0) {
1563                 device->raid_bypass_enabled = true;
1564                 if (get_unaligned_le16(&device->raid_map->flags) &
1565                         RAID_MAP_ENCRYPTION_ENABLED)
1566                         pqi_set_max_transfer_encrypted(ctrl_info, device);
1567         }
1568
1569 out:
1570         kfree(buffer);
1571 }
1572
1573 /*
1574  * Use vendor-specific VPD to determine online/offline status of a volume.
1575  */
1576
1577 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1578         struct pqi_scsi_dev *device)
1579 {
1580         int rc;
1581         size_t page_length;
1582         u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1583         bool volume_offline = true;
1584         u32 volume_flags;
1585         struct ciss_vpd_logical_volume_status *vpd;
1586
1587         vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1588         if (!vpd)
1589                 goto no_buffer;
1590
1591         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1592                 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1593         if (rc)
1594                 goto out;
1595
1596         if (vpd->page_code != CISS_VPD_LV_STATUS)
1597                 goto out;
1598
1599         page_length = offsetof(struct ciss_vpd_logical_volume_status,
1600                 volume_status) + vpd->page_length;
1601         if (page_length < sizeof(*vpd))
1602                 goto out;
1603
1604         volume_status = vpd->volume_status;
1605         volume_flags = get_unaligned_be32(&vpd->flags);
1606         volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1607
1608 out:
1609         kfree(vpd);
1610 no_buffer:
1611         device->volume_status = volume_status;
1612         device->volume_offline = volume_offline;
1613 }
1614
1615 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED   0x01
1616 #define PQI_DEVICE_PHY_MAP_SUPPORTED    0x10
1617 #define PQI_DEVICE_ERASE_IN_PROGRESS    0x10
1618
1619 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1620         struct pqi_scsi_dev *device,
1621         struct bmic_identify_physical_device *id_phys)
1622 {
1623         int rc;
1624
1625         memset(id_phys, 0, sizeof(*id_phys));
1626
1627         rc = pqi_identify_physical_device(ctrl_info, device,
1628                 id_phys, sizeof(*id_phys));
1629         if (rc) {
1630                 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1631                 return rc;
1632         }
1633
1634         scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1635         scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1636
1637         memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1638         memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1639
1640         device->box_index = id_phys->box_index;
1641         device->phys_box_on_bus = id_phys->phys_box_on_bus;
1642         device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1643         device->queue_depth =
1644                 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1645         device->active_path_index = id_phys->active_path_number;
1646         device->path_map = id_phys->redundant_path_present_map;
1647         memcpy(&device->box,
1648                 &id_phys->alternate_paths_phys_box_on_port,
1649                 sizeof(device->box));
1650         memcpy(&device->phys_connector,
1651                 &id_phys->alternate_paths_phys_connector,
1652                 sizeof(device->phys_connector));
1653         device->bay = id_phys->phys_bay_in_box;
1654         device->lun_count = id_phys->multi_lun_device_lun_count;
1655         if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1656                 id_phys->phy_count)
1657                 device->phy_id =
1658                         id_phys->phy_to_phy_map[device->active_path_index];
1659         else
1660                 device->phy_id = 0xFF;
1661
1662         device->ncq_prio_support =
1663                 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1664                 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1665
1666         device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
1667
1668         return 0;
1669 }
1670
1671 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1672         struct pqi_scsi_dev *device)
1673 {
1674         int rc;
1675         u8 *buffer;
1676
1677         buffer = kmalloc(64, GFP_KERNEL);
1678         if (!buffer)
1679                 return -ENOMEM;
1680
1681         /* Send an inquiry to the device to see what it is. */
1682         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1683         if (rc)
1684                 goto out;
1685
1686         scsi_sanitize_inquiry_string(&buffer[8], 8);
1687         scsi_sanitize_inquiry_string(&buffer[16], 16);
1688
1689         device->devtype = buffer[0] & 0x1f;
1690         memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1691         memcpy(device->model, &buffer[16], sizeof(device->model));
1692
1693         if (device->devtype == TYPE_DISK) {
1694                 if (device->is_external_raid_device) {
1695                         device->raid_level = SA_RAID_UNKNOWN;
1696                         device->volume_status = CISS_LV_OK;
1697                         device->volume_offline = false;
1698                 } else {
1699                         pqi_get_raid_level(ctrl_info, device);
1700                         pqi_get_raid_bypass_status(ctrl_info, device);
1701                         pqi_get_volume_status(ctrl_info, device);
1702                 }
1703         }
1704
1705 out:
1706         kfree(buffer);
1707
1708         return rc;
1709 }
1710
1711 /*
1712  * Prevent adding drive to OS for some corner cases such as a drive
1713  * undergoing a sanitize (erase) operation. Some OSes will continue to poll
1714  * the drive until the sanitize completes, which can take hours,
1715  * resulting in long bootup delays. Commands such as TUR, READ_CAP
1716  * are allowed, but READ/WRITE cause check condition. So the OS
1717  * cannot check/read the partition table.
1718  * Note: devices that have completed sanitize must be re-enabled
1719  *       using the management utility.
1720  */
1721 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
1722 {
1723         return device->erase_in_progress;
1724 }
1725
1726 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
1727         struct pqi_scsi_dev *device,
1728         struct bmic_identify_physical_device *id_phys)
1729 {
1730         int rc;
1731
1732         if (device->is_expander_smp_device)
1733                 return 0;
1734
1735         if (pqi_is_logical_device(device))
1736                 rc = pqi_get_logical_device_info(ctrl_info, device);
1737         else
1738                 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1739
1740         return rc;
1741 }
1742
1743 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1744         struct pqi_scsi_dev *device,
1745         struct bmic_identify_physical_device *id_phys)
1746 {
1747         int rc;
1748
1749         rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
1750
1751         if (rc == 0 && device->lun_count == 0)
1752                 device->lun_count = 1;
1753
1754         return rc;
1755 }
1756
1757 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1758         struct pqi_scsi_dev *device)
1759 {
1760         char *status;
1761         static const char unknown_state_str[] =
1762                 "Volume is in an unknown state (%u)";
1763         char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1764
1765         switch (device->volume_status) {
1766         case CISS_LV_OK:
1767                 status = "Volume online";
1768                 break;
1769         case CISS_LV_FAILED:
1770                 status = "Volume failed";
1771                 break;
1772         case CISS_LV_NOT_CONFIGURED:
1773                 status = "Volume not configured";
1774                 break;
1775         case CISS_LV_DEGRADED:
1776                 status = "Volume degraded";
1777                 break;
1778         case CISS_LV_READY_FOR_RECOVERY:
1779                 status = "Volume ready for recovery operation";
1780                 break;
1781         case CISS_LV_UNDERGOING_RECOVERY:
1782                 status = "Volume undergoing recovery";
1783                 break;
1784         case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1785                 status = "Wrong physical drive was replaced";
1786                 break;
1787         case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1788                 status = "A physical drive not properly connected";
1789                 break;
1790         case CISS_LV_HARDWARE_OVERHEATING:
1791                 status = "Hardware is overheating";
1792                 break;
1793         case CISS_LV_HARDWARE_HAS_OVERHEATED:
1794                 status = "Hardware has overheated";
1795                 break;
1796         case CISS_LV_UNDERGOING_EXPANSION:
1797                 status = "Volume undergoing expansion";
1798                 break;
1799         case CISS_LV_NOT_AVAILABLE:
1800                 status = "Volume waiting for transforming volume";
1801                 break;
1802         case CISS_LV_QUEUED_FOR_EXPANSION:
1803                 status = "Volume queued for expansion";
1804                 break;
1805         case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1806                 status = "Volume disabled due to SCSI ID conflict";
1807                 break;
1808         case CISS_LV_EJECTED:
1809                 status = "Volume has been ejected";
1810                 break;
1811         case CISS_LV_UNDERGOING_ERASE:
1812                 status = "Volume undergoing background erase";
1813                 break;
1814         case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1815                 status = "Volume ready for predictive spare rebuild";
1816                 break;
1817         case CISS_LV_UNDERGOING_RPI:
1818                 status = "Volume undergoing rapid parity initialization";
1819                 break;
1820         case CISS_LV_PENDING_RPI:
1821                 status = "Volume queued for rapid parity initialization";
1822                 break;
1823         case CISS_LV_ENCRYPTED_NO_KEY:
1824                 status = "Encrypted volume inaccessible - key not present";
1825                 break;
1826         case CISS_LV_UNDERGOING_ENCRYPTION:
1827                 status = "Volume undergoing encryption process";
1828                 break;
1829         case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1830                 status = "Volume undergoing encryption re-keying process";
1831                 break;
1832         case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1833                 status = "Volume encrypted but encryption is disabled";
1834                 break;
1835         case CISS_LV_PENDING_ENCRYPTION:
1836                 status = "Volume pending migration to encrypted state";
1837                 break;
1838         case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1839                 status = "Volume pending encryption rekeying";
1840                 break;
1841         case CISS_LV_NOT_SUPPORTED:
1842                 status = "Volume not supported on this controller";
1843                 break;
1844         case CISS_LV_STATUS_UNAVAILABLE:
1845                 status = "Volume status not available";
1846                 break;
1847         default:
1848                 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1849                         unknown_state_str, device->volume_status);
1850                 status = unknown_state_buffer;
1851                 break;
1852         }
1853
1854         dev_info(&ctrl_info->pci_dev->dev,
1855                 "scsi %d:%d:%d:%d %s\n",
1856                 ctrl_info->scsi_host->host_no,
1857                 device->bus, device->target, device->lun, status);
1858 }
1859
1860 static void pqi_rescan_worker(struct work_struct *work)
1861 {
1862         struct pqi_ctrl_info *ctrl_info;
1863
1864         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1865                 rescan_work);
1866
1867         pqi_scan_scsi_devices(ctrl_info);
1868 }
1869
1870 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1871         struct pqi_scsi_dev *device)
1872 {
1873         int rc;
1874
1875         if (pqi_is_logical_device(device))
1876                 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1877                         device->target, device->lun);
1878         else
1879                 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1880
1881         return rc;
1882 }
1883
1884 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS      (20 * 1000)
1885
1886 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1887 {
1888         int rc;
1889         int lun;
1890
1891         for (lun = 0; lun < device->lun_count; lun++) {
1892                 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
1893                         PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1894                 if (rc)
1895                         dev_err(&ctrl_info->pci_dev->dev,
1896                                 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1897                                 ctrl_info->scsi_host->host_no, device->bus,
1898                                 device->target, lun,
1899                                 atomic_read(&device->scsi_cmds_outstanding[lun]));
1900         }
1901
1902         if (pqi_is_logical_device(device))
1903                 scsi_remove_device(device->sdev);
1904         else
1905                 pqi_remove_sas_device(device);
1906
1907         pqi_device_remove_start(device);
1908 }
1909
1910 /* Assumes the SCSI device list lock is held. */
1911
1912 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1913         int bus, int target, int lun)
1914 {
1915         struct pqi_scsi_dev *device;
1916
1917         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1918                 if (device->bus == bus && device->target == target && device->lun == lun)
1919                         return device;
1920
1921         return NULL;
1922 }
1923
1924 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1925 {
1926         if (dev1->is_physical_device != dev2->is_physical_device)
1927                 return false;
1928
1929         if (dev1->is_physical_device)
1930                 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1931
1932         return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1933 }
1934
1935 enum pqi_find_result {
1936         DEVICE_NOT_FOUND,
1937         DEVICE_CHANGED,
1938         DEVICE_SAME,
1939 };
1940
1941 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1942         struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1943 {
1944         struct pqi_scsi_dev *device;
1945
1946         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1947                 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1948                         *matching_device = device;
1949                         if (pqi_device_equal(device_to_find, device)) {
1950                                 if (device_to_find->volume_offline)
1951                                         return DEVICE_CHANGED;
1952                                 return DEVICE_SAME;
1953                         }
1954                         return DEVICE_CHANGED;
1955                 }
1956         }
1957
1958         return DEVICE_NOT_FOUND;
1959 }
1960
1961 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1962 {
1963         if (device->is_expander_smp_device)
1964                 return "Enclosure SMP    ";
1965
1966         return scsi_device_type(device->devtype);
1967 }
1968
1969 #define PQI_DEV_INFO_BUFFER_LENGTH      128
1970
1971 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1972         char *action, struct pqi_scsi_dev *device)
1973 {
1974         ssize_t count;
1975         char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1976
1977         count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1978                 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1979
1980         if (device->target_lun_valid)
1981                 count += scnprintf(buffer + count,
1982                         PQI_DEV_INFO_BUFFER_LENGTH - count,
1983                         "%d:%d",
1984                         device->target,
1985                         device->lun);
1986         else
1987                 count += scnprintf(buffer + count,
1988                         PQI_DEV_INFO_BUFFER_LENGTH - count,
1989                         "-:-");
1990
1991         if (pqi_is_logical_device(device))
1992                 count += scnprintf(buffer + count,
1993                         PQI_DEV_INFO_BUFFER_LENGTH - count,
1994                         " %08x%08x",
1995                         *((u32 *)&device->scsi3addr),
1996                         *((u32 *)&device->scsi3addr[4]));
1997         else
1998                 count += scnprintf(buffer + count,
1999                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2000                         " %016llx%016llx",
2001                         get_unaligned_be64(&device->wwid[0]),
2002                         get_unaligned_be64(&device->wwid[8]));
2003
2004         count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
2005                 " %s %.8s %.16s ",
2006                 pqi_device_type(device),
2007                 device->vendor,
2008                 device->model);
2009
2010         if (pqi_is_logical_device(device)) {
2011                 if (device->devtype == TYPE_DISK)
2012                         count += scnprintf(buffer + count,
2013                                 PQI_DEV_INFO_BUFFER_LENGTH - count,
2014                                 "SSDSmartPathCap%c En%c %-12s",
2015                                 device->raid_bypass_configured ? '+' : '-',
2016                                 device->raid_bypass_enabled ? '+' : '-',
2017                                 pqi_raid_level_to_string(device->raid_level));
2018         } else {
2019                 count += scnprintf(buffer + count,
2020                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2021                         "AIO%c", device->aio_enabled ? '+' : '-');
2022                 if (device->devtype == TYPE_DISK ||
2023                         device->devtype == TYPE_ZBC)
2024                         count += scnprintf(buffer + count,
2025                                 PQI_DEV_INFO_BUFFER_LENGTH - count,
2026                                 " qd=%-6d", device->queue_depth);
2027         }
2028
2029         dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2030 }
2031
2032 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
2033 {
2034         u32 raid_map1_size;
2035         u32 raid_map2_size;
2036
2037         if (raid_map1 == NULL || raid_map2 == NULL)
2038                 return raid_map1 == raid_map2;
2039
2040         raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
2041         raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
2042
2043         if (raid_map1_size != raid_map2_size)
2044                 return false;
2045
2046         return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
2047 }
2048
2049 /* Assumes the SCSI device list lock is held. */
2050
2051 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2052         struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2053 {
2054         existing_device->device_type = new_device->device_type;
2055         existing_device->bus = new_device->bus;
2056         if (new_device->target_lun_valid) {
2057                 existing_device->target = new_device->target;
2058                 existing_device->lun = new_device->lun;
2059                 existing_device->target_lun_valid = true;
2060         }
2061
2062         /* By definition, the scsi3addr and wwid fields are already the same. */
2063
2064         existing_device->is_physical_device = new_device->is_physical_device;
2065         memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
2066         memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
2067         existing_device->sas_address = new_device->sas_address;
2068         existing_device->queue_depth = new_device->queue_depth;
2069         existing_device->device_offline = false;
2070         existing_device->lun_count = new_device->lun_count;
2071
2072         if (pqi_is_logical_device(existing_device)) {
2073                 existing_device->is_external_raid_device = new_device->is_external_raid_device;
2074
2075                 if (existing_device->devtype == TYPE_DISK) {
2076                         existing_device->raid_level = new_device->raid_level;
2077                         existing_device->volume_status = new_device->volume_status;
2078                         if (ctrl_info->logical_volume_rescan_needed)
2079                                 existing_device->rescan = true;
2080                         memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2081                         if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
2082                                 kfree(existing_device->raid_map);
2083                                 existing_device->raid_map = new_device->raid_map;
2084                                 /* To prevent this from being freed later. */
2085                                 new_device->raid_map = NULL;
2086                         }
2087                         existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
2088                         existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
2089                 }
2090         } else {
2091                 existing_device->aio_enabled = new_device->aio_enabled;
2092                 existing_device->aio_handle = new_device->aio_handle;
2093                 existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
2094                 existing_device->active_path_index = new_device->active_path_index;
2095                 existing_device->phy_id = new_device->phy_id;
2096                 existing_device->path_map = new_device->path_map;
2097                 existing_device->bay = new_device->bay;
2098                 existing_device->box_index = new_device->box_index;
2099                 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2100                 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2101                 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
2102                 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
2103         }
2104 }
2105
2106 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2107 {
2108         if (device) {
2109                 kfree(device->raid_map);
2110                 kfree(device);
2111         }
2112 }
2113
2114 /*
2115  * Called when exposing a new device to the OS fails in order to re-adjust
2116  * our internal SCSI device list to match the SCSI ML's view.
2117  */
2118
2119 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2120         struct pqi_scsi_dev *device)
2121 {
2122         unsigned long flags;
2123
2124         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2125         list_del(&device->scsi_device_list_entry);
2126         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2127
2128         /* Allow the device structure to be freed later. */
2129         device->keep_device = false;
2130 }
2131
2132 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2133 {
2134         if (device->is_expander_smp_device)
2135                 return device->sas_port != NULL;
2136
2137         return device->sdev != NULL;
2138 }
2139
2140 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2141         struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2142 {
2143         int rc;
2144         unsigned int i;
2145         unsigned long flags;
2146         enum pqi_find_result find_result;
2147         struct pqi_scsi_dev *device;
2148         struct pqi_scsi_dev *next;
2149         struct pqi_scsi_dev *matching_device;
2150         LIST_HEAD(add_list);
2151         LIST_HEAD(delete_list);
2152
2153         /*
2154          * The idea here is to do as little work as possible while holding the
2155          * spinlock.  That's why we go to great pains to defer anything other
2156          * than updating the internal device list until after we release the
2157          * spinlock.
2158          */
2159
2160         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2161
2162         /* Assume that all devices in the existing list have gone away. */
2163         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2164                 device->device_gone = true;
2165
2166         for (i = 0; i < num_new_devices; i++) {
2167                 device = new_device_list[i];
2168
2169                 find_result = pqi_scsi_find_entry(ctrl_info, device,
2170                         &matching_device);
2171
2172                 switch (find_result) {
2173                 case DEVICE_SAME:
2174                         /*
2175                          * The newly found device is already in the existing
2176                          * device list.
2177                          */
2178                         device->new_device = false;
2179                         matching_device->device_gone = false;
2180                         pqi_scsi_update_device(ctrl_info, matching_device, device);
2181                         break;
2182                 case DEVICE_NOT_FOUND:
2183                         /*
2184                          * The newly found device is NOT in the existing device
2185                          * list.
2186                          */
2187                         device->new_device = true;
2188                         break;
2189                 case DEVICE_CHANGED:
2190                         /*
2191                          * The original device has gone away and we need to add
2192                          * the new device.
2193                          */
2194                         device->new_device = true;
2195                         break;
2196                 }
2197         }
2198
2199         /* Process all devices that have gone away. */
2200         list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2201                 scsi_device_list_entry) {
2202                 if (device->device_gone) {
2203                         list_del(&device->scsi_device_list_entry);
2204                         list_add_tail(&device->delete_list_entry, &delete_list);
2205                 }
2206         }
2207
2208         /* Process all new devices. */
2209         for (i = 0; i < num_new_devices; i++) {
2210                 device = new_device_list[i];
2211                 if (!device->new_device)
2212                         continue;
2213                 if (device->volume_offline)
2214                         continue;
2215                 list_add_tail(&device->scsi_device_list_entry,
2216                         &ctrl_info->scsi_device_list);
2217                 list_add_tail(&device->add_list_entry, &add_list);
2218                 /* To prevent this device structure from being freed later. */
2219                 device->keep_device = true;
2220         }
2221
2222         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2223
2224         /*
2225          * If OFA is in progress and there are devices that need to be deleted,
2226          * allow any pending reset operations to continue and unblock any SCSI
2227          * requests before removal.
2228          */
2229         if (pqi_ofa_in_progress(ctrl_info)) {
2230                 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2231                         if (pqi_is_device_added(device))
2232                                 pqi_device_remove_start(device);
2233                 pqi_ctrl_unblock_device_reset(ctrl_info);
2234                 pqi_scsi_unblock_requests(ctrl_info);
2235         }
2236
2237         /* Remove all devices that have gone away. */
2238         list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2239                 if (device->volume_offline) {
2240                         pqi_dev_info(ctrl_info, "offline", device);
2241                         pqi_show_volume_status(ctrl_info, device);
2242                 } else {
2243                         pqi_dev_info(ctrl_info, "removed", device);
2244                 }
2245                 if (pqi_is_device_added(device))
2246                         pqi_remove_device(ctrl_info, device);
2247                 list_del(&device->delete_list_entry);
2248                 pqi_free_device(device);
2249         }
2250
2251         /*
2252          * Notify the SML of any existing device changes such as;
2253          * queue depth, device size.
2254          */
2255         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2256                 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2257                         device->advertised_queue_depth = device->queue_depth;
2258                         scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2259                         if (device->rescan) {
2260                                 scsi_rescan_device(&device->sdev->sdev_gendev);
2261                                 device->rescan = false;
2262                         }
2263                 }
2264         }
2265
2266         /* Expose any new devices. */
2267         list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2268                 if (!pqi_is_device_added(device)) {
2269                         rc = pqi_add_device(ctrl_info, device);
2270                         if (rc == 0) {
2271                                 pqi_dev_info(ctrl_info, "added", device);
2272                         } else {
2273                                 dev_warn(&ctrl_info->pci_dev->dev,
2274                                         "scsi %d:%d:%d:%d addition failed, device not added\n",
2275                                         ctrl_info->scsi_host->host_no,
2276                                         device->bus, device->target,
2277                                         device->lun);
2278                                 pqi_fixup_botched_add(ctrl_info, device);
2279                         }
2280                 }
2281         }
2282
2283         ctrl_info->logical_volume_rescan_needed = false;
2284
2285 }
2286
2287 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2288 {
2289         /*
2290          * Only support the HBA controller itself as a RAID
2291          * controller.  If it's a RAID controller other than
2292          * the HBA itself (an external RAID controller, for
2293          * example), we don't support it.
2294          */
2295         if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2296                 !pqi_is_hba_lunid(device->scsi3addr))
2297                         return false;
2298
2299         return true;
2300 }
2301
2302 static inline bool pqi_skip_device(u8 *scsi3addr)
2303 {
2304         /* Ignore all masked devices. */
2305         if (MASKED_DEVICE(scsi3addr))
2306                 return true;
2307
2308         return false;
2309 }
2310
2311 static inline void pqi_mask_device(u8 *scsi3addr)
2312 {
2313         scsi3addr[3] |= 0xc0;
2314 }
2315
2316 static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
2317 {
2318         if (pqi_is_logical_device(device))
2319                 return false;
2320
2321         return (device->path_map & (device->path_map - 1)) != 0;
2322 }
2323
2324 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2325 {
2326         return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2327 }
2328
2329 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2330 {
2331         int i;
2332         int rc;
2333         LIST_HEAD(new_device_list_head);
2334         struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2335         struct report_log_lun_list *logdev_list = NULL;
2336         struct report_phys_lun_16byte_wwid *phys_lun;
2337         struct report_log_lun *log_lun;
2338         struct bmic_identify_physical_device *id_phys = NULL;
2339         u32 num_physicals;
2340         u32 num_logicals;
2341         struct pqi_scsi_dev **new_device_list = NULL;
2342         struct pqi_scsi_dev *device;
2343         struct pqi_scsi_dev *next;
2344         unsigned int num_new_devices;
2345         unsigned int num_valid_devices;
2346         bool is_physical_device;
2347         u8 *scsi3addr;
2348         unsigned int physical_index;
2349         unsigned int logical_index;
2350         static char *out_of_memory_msg =
2351                 "failed to allocate memory, device discovery stopped";
2352
2353         rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2354         if (rc)
2355                 goto out;
2356
2357         if (physdev_list)
2358                 num_physicals =
2359                         get_unaligned_be32(&physdev_list->header.list_length)
2360                                 / sizeof(physdev_list->lun_entries[0]);
2361         else
2362                 num_physicals = 0;
2363
2364         if (logdev_list)
2365                 num_logicals =
2366                         get_unaligned_be32(&logdev_list->header.list_length)
2367                                 / sizeof(logdev_list->lun_entries[0]);
2368         else
2369                 num_logicals = 0;
2370
2371         if (num_physicals) {
2372                 /*
2373                  * We need this buffer for calls to pqi_get_physical_disk_info()
2374                  * below.  We allocate it here instead of inside
2375                  * pqi_get_physical_disk_info() because it's a fairly large
2376                  * buffer.
2377                  */
2378                 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2379                 if (!id_phys) {
2380                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2381                                 out_of_memory_msg);
2382                         rc = -ENOMEM;
2383                         goto out;
2384                 }
2385
2386                 if (pqi_hide_vsep) {
2387                         for (i = num_physicals - 1; i >= 0; i--) {
2388                                 phys_lun = &physdev_list->lun_entries[i];
2389                                 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2390                                         pqi_mask_device(phys_lun->lunid);
2391                                         break;
2392                                 }
2393                         }
2394                 }
2395         }
2396
2397         if (num_logicals &&
2398                 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2399                 ctrl_info->lv_drive_type_mix_valid = true;
2400
2401         num_new_devices = num_physicals + num_logicals;
2402
2403         new_device_list = kmalloc_array(num_new_devices,
2404                                         sizeof(*new_device_list),
2405                                         GFP_KERNEL);
2406         if (!new_device_list) {
2407                 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2408                 rc = -ENOMEM;
2409                 goto out;
2410         }
2411
2412         for (i = 0; i < num_new_devices; i++) {
2413                 device = kzalloc(sizeof(*device), GFP_KERNEL);
2414                 if (!device) {
2415                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2416                                 out_of_memory_msg);
2417                         rc = -ENOMEM;
2418                         goto out;
2419                 }
2420                 list_add_tail(&device->new_device_list_entry,
2421                         &new_device_list_head);
2422         }
2423
2424         device = NULL;
2425         num_valid_devices = 0;
2426         physical_index = 0;
2427         logical_index = 0;
2428
2429         for (i = 0; i < num_new_devices; i++) {
2430
2431                 if ((!pqi_expose_ld_first && i < num_physicals) ||
2432                         (pqi_expose_ld_first && i >= num_logicals)) {
2433                         is_physical_device = true;
2434                         phys_lun = &physdev_list->lun_entries[physical_index++];
2435                         log_lun = NULL;
2436                         scsi3addr = phys_lun->lunid;
2437                 } else {
2438                         is_physical_device = false;
2439                         phys_lun = NULL;
2440                         log_lun = &logdev_list->lun_entries[logical_index++];
2441                         scsi3addr = log_lun->lunid;
2442                 }
2443
2444                 if (is_physical_device && pqi_skip_device(scsi3addr))
2445                         continue;
2446
2447                 if (device)
2448                         device = list_next_entry(device, new_device_list_entry);
2449                 else
2450                         device = list_first_entry(&new_device_list_head,
2451                                 struct pqi_scsi_dev, new_device_list_entry);
2452
2453                 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2454                 device->is_physical_device = is_physical_device;
2455                 if (is_physical_device) {
2456                         device->device_type = phys_lun->device_type;
2457                         if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2458                                 device->is_expander_smp_device = true;
2459                 } else {
2460                         device->is_external_raid_device =
2461                                 pqi_is_external_raid_addr(scsi3addr);
2462                 }
2463
2464                 if (!pqi_is_supported_device(device))
2465                         continue;
2466
2467                 /* Gather information about the device. */
2468                 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2469                 if (rc == -ENOMEM) {
2470                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2471                                 out_of_memory_msg);
2472                         goto out;
2473                 }
2474                 if (rc) {
2475                         if (device->is_physical_device)
2476                                 dev_warn(&ctrl_info->pci_dev->dev,
2477                                         "obtaining device info failed, skipping physical device %016llx%016llx\n",
2478                                         get_unaligned_be64(&phys_lun->wwid[0]),
2479                                         get_unaligned_be64(&phys_lun->wwid[8]));
2480                         else
2481                                 dev_warn(&ctrl_info->pci_dev->dev,
2482                                         "obtaining device info failed, skipping logical device %08x%08x\n",
2483                                         *((u32 *)&device->scsi3addr),
2484                                         *((u32 *)&device->scsi3addr[4]));
2485                         rc = 0;
2486                         continue;
2487                 }
2488
2489                 /* Do not present disks that the OS cannot fully probe. */
2490                 if (pqi_keep_device_offline(device))
2491                         continue;
2492
2493                 pqi_assign_bus_target_lun(device);
2494
2495                 if (device->is_physical_device) {
2496                         memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2497                         if ((phys_lun->device_flags &
2498                                 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2499                                 phys_lun->aio_handle) {
2500                                         device->aio_enabled = true;
2501                                         device->aio_handle =
2502                                                 phys_lun->aio_handle;
2503                         }
2504                 } else {
2505                         memcpy(device->volume_id, log_lun->volume_id,
2506                                 sizeof(device->volume_id));
2507                 }
2508
2509                 device->sas_address = get_unaligned_be64(&device->wwid[0]);
2510
2511                 new_device_list[num_valid_devices++] = device;
2512         }
2513
2514         pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2515
2516 out:
2517         list_for_each_entry_safe(device, next, &new_device_list_head,
2518                 new_device_list_entry) {
2519                 if (device->keep_device)
2520                         continue;
2521                 list_del(&device->new_device_list_entry);
2522                 pqi_free_device(device);
2523         }
2524
2525         kfree(new_device_list);
2526         kfree(physdev_list);
2527         kfree(logdev_list);
2528         kfree(id_phys);
2529
2530         return rc;
2531 }
2532
2533 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2534 {
2535         int rc;
2536         int mutex_acquired;
2537
2538         if (pqi_ctrl_offline(ctrl_info))
2539                 return -ENXIO;
2540
2541         mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2542
2543         if (!mutex_acquired) {
2544                 if (pqi_ctrl_scan_blocked(ctrl_info))
2545                         return -EBUSY;
2546                 pqi_schedule_rescan_worker_delayed(ctrl_info);
2547                 return -EINPROGRESS;
2548         }
2549
2550         rc = pqi_update_scsi_devices(ctrl_info);
2551         if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2552                 pqi_schedule_rescan_worker_delayed(ctrl_info);
2553
2554         mutex_unlock(&ctrl_info->scan_mutex);
2555
2556         return rc;
2557 }
2558
2559 static void pqi_scan_start(struct Scsi_Host *shost)
2560 {
2561         struct pqi_ctrl_info *ctrl_info;
2562
2563         ctrl_info = shost_to_hba(shost);
2564
2565         pqi_scan_scsi_devices(ctrl_info);
2566 }
2567
2568 /* Returns TRUE if scan is finished. */
2569
2570 static int pqi_scan_finished(struct Scsi_Host *shost,
2571         unsigned long elapsed_time)
2572 {
2573         struct pqi_ctrl_info *ctrl_info;
2574
2575         ctrl_info = shost_priv(shost);
2576
2577         return !mutex_is_locked(&ctrl_info->scan_mutex);
2578 }
2579
2580 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2581         struct raid_map *raid_map, u64 first_block)
2582 {
2583         u32 volume_blk_size;
2584
2585         /*
2586          * Set the encryption tweak values based on logical block address.
2587          * If the block size is 512, the tweak value is equal to the LBA.
2588          * For other block sizes, tweak value is (LBA * block size) / 512.
2589          */
2590         volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2591         if (volume_blk_size != 512)
2592                 first_block = (first_block * volume_blk_size) / 512;
2593
2594         encryption_info->data_encryption_key_index =
2595                 get_unaligned_le16(&raid_map->data_encryption_key_index);
2596         encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2597         encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2598 }
2599
2600 /*
2601  * Attempt to perform RAID bypass mapping for a logical volume I/O.
2602  */
2603
2604 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2605         struct pqi_scsi_dev_raid_map_data *rmd)
2606 {
2607         bool is_supported = true;
2608
2609         switch (rmd->raid_level) {
2610         case SA_RAID_0:
2611                 break;
2612         case SA_RAID_1:
2613                 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2614                         rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2615                         is_supported = false;
2616                 break;
2617         case SA_RAID_TRIPLE:
2618                 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2619                         rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2620                         is_supported = false;
2621                 break;
2622         case SA_RAID_5:
2623                 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2624                         rmd->data_length > ctrl_info->max_write_raid_5_6))
2625                         is_supported = false;
2626                 break;
2627         case SA_RAID_6:
2628                 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2629                         rmd->data_length > ctrl_info->max_write_raid_5_6))
2630                         is_supported = false;
2631                 break;
2632         default:
2633                 is_supported = false;
2634                 break;
2635         }
2636
2637         return is_supported;
2638 }
2639
2640 #define PQI_RAID_BYPASS_INELIGIBLE      1
2641
2642 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2643         struct pqi_scsi_dev_raid_map_data *rmd)
2644 {
2645         /* Check for valid opcode, get LBA and block count. */
2646         switch (scmd->cmnd[0]) {
2647         case WRITE_6:
2648                 rmd->is_write = true;
2649                 fallthrough;
2650         case READ_6:
2651                 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2652                         (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2653                 rmd->block_cnt = (u32)scmd->cmnd[4];
2654                 if (rmd->block_cnt == 0)
2655                         rmd->block_cnt = 256;
2656                 break;
2657         case WRITE_10:
2658                 rmd->is_write = true;
2659                 fallthrough;
2660         case READ_10:
2661                 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2662                 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2663                 break;
2664         case WRITE_12:
2665                 rmd->is_write = true;
2666                 fallthrough;
2667         case READ_12:
2668                 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2669                 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2670                 break;
2671         case WRITE_16:
2672                 rmd->is_write = true;
2673                 fallthrough;
2674         case READ_16:
2675                 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2676                 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2677                 break;
2678         default:
2679                 /* Process via normal I/O path. */
2680                 return PQI_RAID_BYPASS_INELIGIBLE;
2681         }
2682
2683         put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2684
2685         return 0;
2686 }
2687
2688 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2689         struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2690 {
2691 #if BITS_PER_LONG == 32
2692         u64 tmpdiv;
2693 #endif
2694
2695         rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2696
2697         /* Check for invalid block or wraparound. */
2698         if (rmd->last_block >=
2699                 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2700                 rmd->last_block < rmd->first_block)
2701                 return PQI_RAID_BYPASS_INELIGIBLE;
2702
2703         rmd->data_disks_per_row =
2704                 get_unaligned_le16(&raid_map->data_disks_per_row);
2705         rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2706         rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2707
2708         /* Calculate stripe information for the request. */
2709         rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2710         if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2711                 return PQI_RAID_BYPASS_INELIGIBLE;
2712 #if BITS_PER_LONG == 32
2713         tmpdiv = rmd->first_block;
2714         do_div(tmpdiv, rmd->blocks_per_row);
2715         rmd->first_row = tmpdiv;
2716         tmpdiv = rmd->last_block;
2717         do_div(tmpdiv, rmd->blocks_per_row);
2718         rmd->last_row = tmpdiv;
2719         rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2720         rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2721         tmpdiv = rmd->first_row_offset;
2722         do_div(tmpdiv, rmd->strip_size);
2723         rmd->first_column = tmpdiv;
2724         tmpdiv = rmd->last_row_offset;
2725         do_div(tmpdiv, rmd->strip_size);
2726         rmd->last_column = tmpdiv;
2727 #else
2728         rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2729         rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2730         rmd->first_row_offset = (u32)(rmd->first_block -
2731                 (rmd->first_row * rmd->blocks_per_row));
2732         rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2733                 rmd->blocks_per_row));
2734         rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2735         rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2736 #endif
2737
2738         /* If this isn't a single row/column then give to the controller. */
2739         if (rmd->first_row != rmd->last_row ||
2740                 rmd->first_column != rmd->last_column)
2741                 return PQI_RAID_BYPASS_INELIGIBLE;
2742
2743         /* Proceeding with driver mapping. */
2744         rmd->total_disks_per_row = rmd->data_disks_per_row +
2745                 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2746         rmd->map_row = ((u32)(rmd->first_row >>
2747                 raid_map->parity_rotation_shift)) %
2748                 get_unaligned_le16(&raid_map->row_cnt);
2749         rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2750                 rmd->first_column;
2751
2752         return 0;
2753 }
2754
2755 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2756         struct raid_map *raid_map)
2757 {
2758 #if BITS_PER_LONG == 32
2759         u64 tmpdiv;
2760 #endif
2761
2762         if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2763                 return PQI_RAID_BYPASS_INELIGIBLE;
2764
2765         /* RAID 50/60 */
2766         /* Verify first and last block are in same RAID group. */
2767         rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2768 #if BITS_PER_LONG == 32
2769         tmpdiv = rmd->first_block;
2770         rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2771         tmpdiv = rmd->first_group;
2772         do_div(tmpdiv, rmd->blocks_per_row);
2773         rmd->first_group = tmpdiv;
2774         tmpdiv = rmd->last_block;
2775         rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2776         tmpdiv = rmd->last_group;
2777         do_div(tmpdiv, rmd->blocks_per_row);
2778         rmd->last_group = tmpdiv;
2779 #else
2780         rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2781         rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2782 #endif
2783         if (rmd->first_group != rmd->last_group)
2784                 return PQI_RAID_BYPASS_INELIGIBLE;
2785
2786         /* Verify request is in a single row of RAID 5/6. */
2787 #if BITS_PER_LONG == 32
2788         tmpdiv = rmd->first_block;
2789         do_div(tmpdiv, rmd->stripesize);
2790         rmd->first_row = tmpdiv;
2791         rmd->r5or6_first_row = tmpdiv;
2792         tmpdiv = rmd->last_block;
2793         do_div(tmpdiv, rmd->stripesize);
2794         rmd->r5or6_last_row = tmpdiv;
2795 #else
2796         rmd->first_row = rmd->r5or6_first_row =
2797                 rmd->first_block / rmd->stripesize;
2798         rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2799 #endif
2800         if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2801                 return PQI_RAID_BYPASS_INELIGIBLE;
2802
2803         /* Verify request is in a single column. */
2804 #if BITS_PER_LONG == 32
2805         tmpdiv = rmd->first_block;
2806         rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2807         tmpdiv = rmd->first_row_offset;
2808         rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2809         rmd->r5or6_first_row_offset = rmd->first_row_offset;
2810         tmpdiv = rmd->last_block;
2811         rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2812         tmpdiv = rmd->r5or6_last_row_offset;
2813         rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2814         tmpdiv = rmd->r5or6_first_row_offset;
2815         do_div(tmpdiv, rmd->strip_size);
2816         rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2817         tmpdiv = rmd->r5or6_last_row_offset;
2818         do_div(tmpdiv, rmd->strip_size);
2819         rmd->r5or6_last_column = tmpdiv;
2820 #else
2821         rmd->first_row_offset = rmd->r5or6_first_row_offset =
2822                 (u32)((rmd->first_block % rmd->stripesize) %
2823                 rmd->blocks_per_row);
2824
2825         rmd->r5or6_last_row_offset =
2826                 (u32)((rmd->last_block % rmd->stripesize) %
2827                 rmd->blocks_per_row);
2828
2829         rmd->first_column =
2830                 rmd->r5or6_first_row_offset / rmd->strip_size;
2831         rmd->r5or6_first_column = rmd->first_column;
2832         rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2833 #endif
2834         if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2835                 return PQI_RAID_BYPASS_INELIGIBLE;
2836
2837         /* Request is eligible. */
2838         rmd->map_row =
2839                 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2840                 get_unaligned_le16(&raid_map->row_cnt);
2841
2842         rmd->map_index = (rmd->first_group *
2843                 (get_unaligned_le16(&raid_map->row_cnt) *
2844                 rmd->total_disks_per_row)) +
2845                 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2846
2847         if (rmd->is_write) {
2848                 u32 index;
2849
2850                 /*
2851                  * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2852                  * parity entries inside the device's raid_map.
2853                  *
2854                  * A device's RAID map is bounded by: number of RAID disks squared.
2855                  *
2856                  * The devices RAID map size is checked during device
2857                  * initialization.
2858                  */
2859                 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2860                 index *= rmd->total_disks_per_row;
2861                 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2862
2863                 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2864                 if (rmd->raid_level == SA_RAID_6) {
2865                         rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2866                         rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2867                 }
2868 #if BITS_PER_LONG == 32
2869                 tmpdiv = rmd->first_block;
2870                 do_div(tmpdiv, rmd->blocks_per_row);
2871                 rmd->row = tmpdiv;
2872 #else
2873                 rmd->row = rmd->first_block / rmd->blocks_per_row;
2874 #endif
2875         }
2876
2877         return 0;
2878 }
2879
2880 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2881 {
2882         /* Build the new CDB for the physical disk I/O. */
2883         if (rmd->disk_block > 0xffffffff) {
2884                 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2885                 rmd->cdb[1] = 0;
2886                 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2887                 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2888                 rmd->cdb[14] = 0;
2889                 rmd->cdb[15] = 0;
2890                 rmd->cdb_length = 16;
2891         } else {
2892                 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2893                 rmd->cdb[1] = 0;
2894                 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2895                 rmd->cdb[6] = 0;
2896                 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2897                 rmd->cdb[9] = 0;
2898                 rmd->cdb_length = 10;
2899         }
2900 }
2901
2902 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2903         struct pqi_scsi_dev_raid_map_data *rmd)
2904 {
2905         u32 index;
2906         u32 group;
2907
2908         group = rmd->map_index / rmd->data_disks_per_row;
2909
2910         index = rmd->map_index - (group * rmd->data_disks_per_row);
2911         rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2912         index += rmd->data_disks_per_row;
2913         rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2914         if (rmd->layout_map_count > 2) {
2915                 index += rmd->data_disks_per_row;
2916                 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2917         }
2918
2919         rmd->num_it_nexus_entries = rmd->layout_map_count;
2920 }
2921
2922 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2923         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2924         struct pqi_queue_group *queue_group)
2925 {
2926         int rc;
2927         struct raid_map *raid_map;
2928         u32 group;
2929         u32 next_bypass_group;
2930         struct pqi_encryption_info *encryption_info_ptr;
2931         struct pqi_encryption_info encryption_info;
2932         struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2933
2934         rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2935         if (rc)
2936                 return PQI_RAID_BYPASS_INELIGIBLE;
2937
2938         rmd.raid_level = device->raid_level;
2939
2940         if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2941                 return PQI_RAID_BYPASS_INELIGIBLE;
2942
2943         if (unlikely(rmd.block_cnt == 0))
2944                 return PQI_RAID_BYPASS_INELIGIBLE;
2945
2946         raid_map = device->raid_map;
2947
2948         rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2949         if (rc)
2950                 return PQI_RAID_BYPASS_INELIGIBLE;
2951
2952         if (device->raid_level == SA_RAID_1 ||
2953                 device->raid_level == SA_RAID_TRIPLE) {
2954                 if (rmd.is_write) {
2955                         pqi_calc_aio_r1_nexus(raid_map, &rmd);
2956                 } else {
2957                         group = device->next_bypass_group[rmd.map_index];
2958                         next_bypass_group = group + 1;
2959                         if (next_bypass_group >= rmd.layout_map_count)
2960                                 next_bypass_group = 0;
2961                         device->next_bypass_group[rmd.map_index] = next_bypass_group;
2962                         rmd.map_index += group * rmd.data_disks_per_row;
2963                 }
2964         } else if ((device->raid_level == SA_RAID_5 ||
2965                 device->raid_level == SA_RAID_6) &&
2966                 (rmd.layout_map_count > 1 || rmd.is_write)) {
2967                 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2968                 if (rc)
2969                         return PQI_RAID_BYPASS_INELIGIBLE;
2970         }
2971
2972         if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2973                 return PQI_RAID_BYPASS_INELIGIBLE;
2974
2975         rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2976         rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2977                 rmd.first_row * rmd.strip_size +
2978                 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2979         rmd.disk_block_cnt = rmd.block_cnt;
2980
2981         /* Handle differing logical/physical block sizes. */
2982         if (raid_map->phys_blk_shift) {
2983                 rmd.disk_block <<= raid_map->phys_blk_shift;
2984                 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
2985         }
2986
2987         if (unlikely(rmd.disk_block_cnt > 0xffff))
2988                 return PQI_RAID_BYPASS_INELIGIBLE;
2989
2990         pqi_set_aio_cdb(&rmd);
2991
2992         if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
2993                 if (rmd.data_length > device->max_transfer_encrypted)
2994                         return PQI_RAID_BYPASS_INELIGIBLE;
2995                 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
2996                 encryption_info_ptr = &encryption_info;
2997         } else {
2998                 encryption_info_ptr = NULL;
2999         }
3000
3001         if (rmd.is_write) {
3002                 switch (device->raid_level) {
3003                 case SA_RAID_1:
3004                 case SA_RAID_TRIPLE:
3005                         return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3006                                 encryption_info_ptr, device, &rmd);
3007                 case SA_RAID_5:
3008                 case SA_RAID_6:
3009                         return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3010                                 encryption_info_ptr, device, &rmd);
3011                 }
3012         }
3013
3014         return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3015                 rmd.cdb, rmd.cdb_length, queue_group,
3016                 encryption_info_ptr, true, false);
3017 }
3018
3019 #define PQI_STATUS_IDLE         0x0
3020
3021 #define PQI_CREATE_ADMIN_QUEUE_PAIR     1
3022 #define PQI_DELETE_ADMIN_QUEUE_PAIR     2
3023
3024 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET             0x0
3025 #define PQI_DEVICE_STATE_STATUS_AVAILABLE               0x1
3026 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY            0x2
3027 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY         0x3
3028 #define PQI_DEVICE_STATE_ERROR                          0x4
3029
3030 #define PQI_MODE_READY_TIMEOUT_SECS             30
3031 #define PQI_MODE_READY_POLL_INTERVAL_MSECS      1
3032
3033 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3034 {
3035         struct pqi_device_registers __iomem *pqi_registers;
3036         unsigned long timeout;
3037         u64 signature;
3038         u8 status;
3039
3040         pqi_registers = ctrl_info->pqi_registers;
3041         timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3042
3043         while (1) {
3044                 signature = readq(&pqi_registers->signature);
3045                 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3046                         sizeof(signature)) == 0)
3047                         break;
3048                 if (time_after(jiffies, timeout)) {
3049                         dev_err(&ctrl_info->pci_dev->dev,
3050                                 "timed out waiting for PQI signature\n");
3051                         return -ETIMEDOUT;
3052                 }
3053                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3054         }
3055
3056         while (1) {
3057                 status = readb(&pqi_registers->function_and_status_code);
3058                 if (status == PQI_STATUS_IDLE)
3059                         break;
3060                 if (time_after(jiffies, timeout)) {
3061                         dev_err(&ctrl_info->pci_dev->dev,
3062                                 "timed out waiting for PQI IDLE\n");
3063                         return -ETIMEDOUT;
3064                 }
3065                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3066         }
3067
3068         while (1) {
3069                 if (readl(&pqi_registers->device_status) ==
3070                         PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3071                         break;
3072                 if (time_after(jiffies, timeout)) {
3073                         dev_err(&ctrl_info->pci_dev->dev,
3074                                 "timed out waiting for PQI all registers ready\n");
3075                         return -ETIMEDOUT;
3076                 }
3077                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3078         }
3079
3080         return 0;
3081 }
3082
3083 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3084 {
3085         struct pqi_scsi_dev *device;
3086
3087         device = io_request->scmd->device->hostdata;
3088         device->raid_bypass_enabled = false;
3089         device->aio_enabled = false;
3090 }
3091
3092 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3093 {
3094         struct pqi_ctrl_info *ctrl_info;
3095         struct pqi_scsi_dev *device;
3096
3097         device = sdev->hostdata;
3098         if (device->device_offline)
3099                 return;
3100
3101         device->device_offline = true;
3102         ctrl_info = shost_to_hba(sdev->host);
3103         pqi_schedule_rescan_worker(ctrl_info);
3104         dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3105                 path, ctrl_info->scsi_host->host_no, device->bus,
3106                 device->target, device->lun);
3107 }
3108
3109 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3110 {
3111         u8 scsi_status;
3112         u8 host_byte;
3113         struct scsi_cmnd *scmd;
3114         struct pqi_raid_error_info *error_info;
3115         size_t sense_data_length;
3116         int residual_count;
3117         int xfer_count;
3118         struct scsi_sense_hdr sshdr;
3119
3120         scmd = io_request->scmd;
3121         if (!scmd)
3122                 return;
3123
3124         error_info = io_request->error_info;
3125         scsi_status = error_info->status;
3126         host_byte = DID_OK;
3127
3128         switch (error_info->data_out_result) {
3129         case PQI_DATA_IN_OUT_GOOD:
3130                 break;
3131         case PQI_DATA_IN_OUT_UNDERFLOW:
3132                 xfer_count =
3133                         get_unaligned_le32(&error_info->data_out_transferred);
3134                 residual_count = scsi_bufflen(scmd) - xfer_count;
3135                 scsi_set_resid(scmd, residual_count);
3136                 if (xfer_count < scmd->underflow)
3137                         host_byte = DID_SOFT_ERROR;
3138                 break;
3139         case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3140         case PQI_DATA_IN_OUT_ABORTED:
3141                 host_byte = DID_ABORT;
3142                 break;
3143         case PQI_DATA_IN_OUT_TIMEOUT:
3144                 host_byte = DID_TIME_OUT;
3145                 break;
3146         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3147         case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3148         case PQI_DATA_IN_OUT_BUFFER_ERROR:
3149         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3150         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3151         case PQI_DATA_IN_OUT_ERROR:
3152         case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3153         case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3154         case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3155         case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3156         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3157         case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3158         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3159         case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3160         case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3161         case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3162         default:
3163                 host_byte = DID_ERROR;
3164                 break;
3165         }
3166
3167         sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3168         if (sense_data_length == 0)
3169                 sense_data_length =
3170                         get_unaligned_le16(&error_info->response_data_length);
3171         if (sense_data_length) {
3172                 if (sense_data_length > sizeof(error_info->data))
3173                         sense_data_length = sizeof(error_info->data);
3174
3175                 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3176                         scsi_normalize_sense(error_info->data,
3177                                 sense_data_length, &sshdr) &&
3178                                 sshdr.sense_key == HARDWARE_ERROR &&
3179                                 sshdr.asc == 0x3e) {
3180                         struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3181                         struct pqi_scsi_dev *device = scmd->device->hostdata;
3182
3183                         switch (sshdr.ascq) {
3184                         case 0x1: /* LOGICAL UNIT FAILURE */
3185                                 if (printk_ratelimit())
3186                                         scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3187                                                 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3188                                 pqi_take_device_offline(scmd->device, "RAID");
3189                                 host_byte = DID_NO_CONNECT;
3190                                 break;
3191
3192                         default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3193                                 if (printk_ratelimit())
3194                                         scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3195                                                 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3196                                 break;
3197                         }
3198                 }
3199
3200                 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3201                         sense_data_length = SCSI_SENSE_BUFFERSIZE;
3202                 memcpy(scmd->sense_buffer, error_info->data,
3203                         sense_data_length);
3204         }
3205
3206         scmd->result = scsi_status;
3207         set_host_byte(scmd, host_byte);
3208 }
3209
3210 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3211 {
3212         u8 scsi_status;
3213         u8 host_byte;
3214         struct scsi_cmnd *scmd;
3215         struct pqi_aio_error_info *error_info;
3216         size_t sense_data_length;
3217         int residual_count;
3218         int xfer_count;
3219         bool device_offline;
3220         struct pqi_scsi_dev *device;
3221
3222         scmd = io_request->scmd;
3223         error_info = io_request->error_info;
3224         host_byte = DID_OK;
3225         sense_data_length = 0;
3226         device_offline = false;
3227         device = scmd->device->hostdata;
3228
3229         switch (error_info->service_response) {
3230         case PQI_AIO_SERV_RESPONSE_COMPLETE:
3231                 scsi_status = error_info->status;
3232                 break;
3233         case PQI_AIO_SERV_RESPONSE_FAILURE:
3234                 switch (error_info->status) {
3235                 case PQI_AIO_STATUS_IO_ABORTED:
3236                         scsi_status = SAM_STAT_TASK_ABORTED;
3237                         break;
3238                 case PQI_AIO_STATUS_UNDERRUN:
3239                         scsi_status = SAM_STAT_GOOD;
3240                         residual_count = get_unaligned_le32(
3241                                                 &error_info->residual_count);
3242                         scsi_set_resid(scmd, residual_count);
3243                         xfer_count = scsi_bufflen(scmd) - residual_count;
3244                         if (xfer_count < scmd->underflow)
3245                                 host_byte = DID_SOFT_ERROR;
3246                         break;
3247                 case PQI_AIO_STATUS_OVERRUN:
3248                         scsi_status = SAM_STAT_GOOD;
3249                         break;
3250                 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3251                         pqi_aio_path_disabled(io_request);
3252                         if (pqi_is_multipath_device(device)) {
3253                                 pqi_device_remove_start(device);
3254                                 host_byte = DID_NO_CONNECT;
3255                                 scsi_status = SAM_STAT_CHECK_CONDITION;
3256                         } else {
3257                                 scsi_status = SAM_STAT_GOOD;
3258                                 io_request->status = -EAGAIN;
3259                         }
3260                         break;
3261                 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3262                 case PQI_AIO_STATUS_INVALID_DEVICE:
3263                         if (!io_request->raid_bypass) {
3264                                 device_offline = true;
3265                                 pqi_take_device_offline(scmd->device, "AIO");
3266                                 host_byte = DID_NO_CONNECT;
3267                         }
3268                         scsi_status = SAM_STAT_CHECK_CONDITION;
3269                         break;
3270                 case PQI_AIO_STATUS_IO_ERROR:
3271                 default:
3272                         scsi_status = SAM_STAT_CHECK_CONDITION;
3273                         break;
3274                 }
3275                 break;
3276         case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3277         case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3278                 scsi_status = SAM_STAT_GOOD;
3279                 break;
3280         case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3281         case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3282         default:
3283                 scsi_status = SAM_STAT_CHECK_CONDITION;
3284                 break;
3285         }
3286
3287         if (error_info->data_present) {
3288                 sense_data_length =
3289                         get_unaligned_le16(&error_info->data_length);
3290                 if (sense_data_length) {
3291                         if (sense_data_length > sizeof(error_info->data))
3292                                 sense_data_length = sizeof(error_info->data);
3293                         if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3294                                 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3295                         memcpy(scmd->sense_buffer, error_info->data,
3296                                 sense_data_length);
3297                 }
3298         }
3299
3300         if (device_offline && sense_data_length == 0)
3301                 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3302
3303         scmd->result = scsi_status;
3304         set_host_byte(scmd, host_byte);
3305 }
3306
3307 static void pqi_process_io_error(unsigned int iu_type,
3308         struct pqi_io_request *io_request)
3309 {
3310         switch (iu_type) {
3311         case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3312                 pqi_process_raid_io_error(io_request);
3313                 break;
3314         case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3315                 pqi_process_aio_io_error(io_request);
3316                 break;
3317         }
3318 }
3319
3320 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3321         struct pqi_task_management_response *response)
3322 {
3323         int rc;
3324
3325         switch (response->response_code) {
3326         case SOP_TMF_COMPLETE:
3327         case SOP_TMF_FUNCTION_SUCCEEDED:
3328                 rc = 0;
3329                 break;
3330         case SOP_TMF_REJECTED:
3331                 rc = -EAGAIN;
3332                 break;
3333         case SOP_RC_INCORRECT_LOGICAL_UNIT:
3334                 rc = -ENODEV;
3335                 break;
3336         default:
3337                 rc = -EIO;
3338                 break;
3339         }
3340
3341         if (rc)
3342                 dev_err(&ctrl_info->pci_dev->dev,
3343                         "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3344
3345         return rc;
3346 }
3347
3348 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3349         enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3350 {
3351         pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3352 }
3353
3354 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3355 {
3356         int num_responses;
3357         pqi_index_t oq_pi;
3358         pqi_index_t oq_ci;
3359         struct pqi_io_request *io_request;
3360         struct pqi_io_response *response;
3361         u16 request_id;
3362
3363         num_responses = 0;
3364         oq_ci = queue_group->oq_ci_copy;
3365
3366         while (1) {
3367                 oq_pi = readl(queue_group->oq_pi);
3368                 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3369                         pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3370                         dev_err(&ctrl_info->pci_dev->dev,
3371                                 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3372                                 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3373                         return -1;
3374                 }
3375                 if (oq_pi == oq_ci)
3376                         break;
3377
3378                 num_responses++;
3379                 response = queue_group->oq_element_array +
3380                         (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3381
3382                 request_id = get_unaligned_le16(&response->request_id);
3383                 if (request_id >= ctrl_info->max_io_slots) {
3384                         pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3385                         dev_err(&ctrl_info->pci_dev->dev,
3386                                 "request ID in response (%u) out of range (0-%u): producer index: %u  consumer index: %u\n",
3387                                 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3388                         return -1;
3389                 }
3390
3391                 io_request = &ctrl_info->io_request_pool[request_id];
3392                 if (atomic_read(&io_request->refcount) == 0) {
3393                         pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3394                         dev_err(&ctrl_info->pci_dev->dev,
3395                                 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u  consumer index: %u\n",
3396                                 request_id, oq_pi, oq_ci);
3397                         return -1;
3398                 }
3399
3400                 switch (response->header.iu_type) {
3401                 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3402                 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3403                         if (io_request->scmd)
3404                                 io_request->scmd->result = 0;
3405                         fallthrough;
3406                 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3407                         break;
3408                 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3409                         io_request->status =
3410                                 get_unaligned_le16(
3411                                 &((struct pqi_vendor_general_response *)response)->status);
3412                         break;
3413                 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3414                         io_request->status = pqi_interpret_task_management_response(ctrl_info,
3415                                 (void *)response);
3416                         break;
3417                 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3418                         pqi_aio_path_disabled(io_request);
3419                         io_request->status = -EAGAIN;
3420                         break;
3421                 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3422                 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3423                         io_request->error_info = ctrl_info->error_buffer +
3424                                 (get_unaligned_le16(&response->error_index) *
3425                                 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3426                         pqi_process_io_error(response->header.iu_type, io_request);
3427                         break;
3428                 default:
3429                         pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3430                         dev_err(&ctrl_info->pci_dev->dev,
3431                                 "unexpected IU type: 0x%x: producer index: %u  consumer index: %u\n",
3432                                 response->header.iu_type, oq_pi, oq_ci);
3433                         return -1;
3434                 }
3435
3436                 io_request->io_complete_callback(io_request, io_request->context);
3437
3438                 /*
3439                  * Note that the I/O request structure CANNOT BE TOUCHED after
3440                  * returning from the I/O completion callback!
3441                  */
3442                 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3443         }
3444
3445         if (num_responses) {
3446                 queue_group->oq_ci_copy = oq_ci;
3447                 writel(oq_ci, queue_group->oq_ci);
3448         }
3449
3450         return num_responses;
3451 }
3452
3453 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3454         unsigned int ci, unsigned int elements_in_queue)
3455 {
3456         unsigned int num_elements_used;
3457
3458         if (pi >= ci)
3459                 num_elements_used = pi - ci;
3460         else
3461                 num_elements_used = elements_in_queue - ci + pi;
3462
3463         return elements_in_queue - num_elements_used - 1;
3464 }
3465
3466 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3467         struct pqi_event_acknowledge_request *iu, size_t iu_length)
3468 {
3469         pqi_index_t iq_pi;
3470         pqi_index_t iq_ci;
3471         unsigned long flags;
3472         void *next_element;
3473         struct pqi_queue_group *queue_group;
3474
3475         queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3476         put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3477
3478         while (1) {
3479                 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3480
3481                 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3482                 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3483
3484                 if (pqi_num_elements_free(iq_pi, iq_ci,
3485                         ctrl_info->num_elements_per_iq))
3486                         break;
3487
3488                 spin_unlock_irqrestore(
3489                         &queue_group->submit_lock[RAID_PATH], flags);
3490
3491                 if (pqi_ctrl_offline(ctrl_info))
3492                         return;
3493         }
3494
3495         next_element = queue_group->iq_element_array[RAID_PATH] +
3496                 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3497
3498         memcpy(next_element, iu, iu_length);
3499
3500         iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3501         queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3502
3503         /*
3504          * This write notifies the controller that an IU is available to be
3505          * processed.
3506          */
3507         writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3508
3509         spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3510 }
3511
3512 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3513         struct pqi_event *event)
3514 {
3515         struct pqi_event_acknowledge_request request;
3516
3517         memset(&request, 0, sizeof(request));
3518
3519         request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3520         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3521                 &request.header.iu_length);
3522         request.event_type = event->event_type;
3523         put_unaligned_le16(event->event_id, &request.event_id);
3524         put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3525
3526         pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3527 }
3528
3529 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS              30
3530 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS        1
3531
3532 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3533         struct pqi_ctrl_info *ctrl_info)
3534 {
3535         u8 status;
3536         unsigned long timeout;
3537
3538         timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3539
3540         while (1) {
3541                 status = pqi_read_soft_reset_status(ctrl_info);
3542                 if (status & PQI_SOFT_RESET_INITIATE)
3543                         return RESET_INITIATE_DRIVER;
3544
3545                 if (status & PQI_SOFT_RESET_ABORT)
3546                         return RESET_ABORT;
3547
3548                 if (!sis_is_firmware_running(ctrl_info))
3549                         return RESET_NORESPONSE;
3550
3551                 if (time_after(jiffies, timeout)) {
3552                         dev_warn(&ctrl_info->pci_dev->dev,
3553                                 "timed out waiting for soft reset status\n");
3554                         return RESET_TIMEDOUT;
3555                 }
3556
3557                 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3558         }
3559 }
3560
3561 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3562 {
3563         int rc;
3564         unsigned int delay_secs;
3565         enum pqi_soft_reset_status reset_status;
3566
3567         if (ctrl_info->soft_reset_handshake_supported)
3568                 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3569         else
3570                 reset_status = RESET_INITIATE_FIRMWARE;
3571
3572         delay_secs = PQI_POST_RESET_DELAY_SECS;
3573
3574         switch (reset_status) {
3575         case RESET_TIMEDOUT:
3576                 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3577                 fallthrough;
3578         case RESET_INITIATE_DRIVER:
3579                 dev_info(&ctrl_info->pci_dev->dev,
3580                                 "Online Firmware Activation: resetting controller\n");
3581                 sis_soft_reset(ctrl_info);
3582                 fallthrough;
3583         case RESET_INITIATE_FIRMWARE:
3584                 ctrl_info->pqi_mode_enabled = false;
3585                 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3586                 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3587                 pqi_ofa_free_host_buffer(ctrl_info);
3588                 pqi_ctrl_ofa_done(ctrl_info);
3589                 dev_info(&ctrl_info->pci_dev->dev,
3590                                 "Online Firmware Activation: %s\n",
3591                                 rc == 0 ? "SUCCESS" : "FAILED");
3592                 break;
3593         case RESET_ABORT:
3594                 dev_info(&ctrl_info->pci_dev->dev,
3595                                 "Online Firmware Activation ABORTED\n");
3596                 if (ctrl_info->soft_reset_handshake_supported)
3597                         pqi_clear_soft_reset_status(ctrl_info);
3598                 pqi_ofa_free_host_buffer(ctrl_info);
3599                 pqi_ctrl_ofa_done(ctrl_info);
3600                 pqi_ofa_ctrl_unquiesce(ctrl_info);
3601                 break;
3602         case RESET_NORESPONSE:
3603                 fallthrough;
3604         default:
3605                 dev_err(&ctrl_info->pci_dev->dev,
3606                         "unexpected Online Firmware Activation reset status: 0x%x\n",
3607                         reset_status);
3608                 pqi_ofa_free_host_buffer(ctrl_info);
3609                 pqi_ctrl_ofa_done(ctrl_info);
3610                 pqi_ofa_ctrl_unquiesce(ctrl_info);
3611                 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3612                 break;
3613         }
3614 }
3615
3616 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3617 {
3618         struct pqi_ctrl_info *ctrl_info;
3619
3620         ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3621
3622         pqi_ctrl_ofa_start(ctrl_info);
3623         pqi_ofa_setup_host_buffer(ctrl_info);
3624         pqi_ofa_host_memory_update(ctrl_info);
3625 }
3626
3627 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3628 {
3629         struct pqi_ctrl_info *ctrl_info;
3630         struct pqi_event *event;
3631
3632         ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3633
3634         event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3635
3636         pqi_ofa_ctrl_quiesce(ctrl_info);
3637         pqi_acknowledge_event(ctrl_info, event);
3638         pqi_process_soft_reset(ctrl_info);
3639 }
3640
3641 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3642         struct pqi_event *event)
3643 {
3644         bool ack_event;
3645
3646         ack_event = true;
3647
3648         switch (event->event_id) {
3649         case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3650                 dev_info(&ctrl_info->pci_dev->dev,
3651                         "received Online Firmware Activation memory allocation request\n");
3652                 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3653                 break;
3654         case PQI_EVENT_OFA_QUIESCE:
3655                 dev_info(&ctrl_info->pci_dev->dev,
3656                         "received Online Firmware Activation quiesce request\n");
3657                 schedule_work(&ctrl_info->ofa_quiesce_work);
3658                 ack_event = false;
3659                 break;
3660         case PQI_EVENT_OFA_CANCELED:
3661                 dev_info(&ctrl_info->pci_dev->dev,
3662                         "received Online Firmware Activation cancel request: reason: %u\n",
3663                         ctrl_info->ofa_cancel_reason);
3664                 pqi_ofa_free_host_buffer(ctrl_info);
3665                 pqi_ctrl_ofa_done(ctrl_info);
3666                 break;
3667         default:
3668                 dev_err(&ctrl_info->pci_dev->dev,
3669                         "received unknown Online Firmware Activation request: event ID: %u\n",
3670                         event->event_id);
3671                 break;
3672         }
3673
3674         return ack_event;
3675 }
3676
3677 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
3678 {
3679         unsigned long flags;
3680         struct pqi_scsi_dev *device;
3681
3682         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3683
3684         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
3685                 if (device->raid_bypass_enabled)
3686                         device->raid_bypass_enabled = false;
3687
3688         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3689 }
3690
3691 static void pqi_event_worker(struct work_struct *work)
3692 {
3693         unsigned int i;
3694         bool rescan_needed;
3695         struct pqi_ctrl_info *ctrl_info;
3696         struct pqi_event *event;
3697         bool ack_event;
3698
3699         ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3700
3701         pqi_ctrl_busy(ctrl_info);
3702         pqi_wait_if_ctrl_blocked(ctrl_info);
3703         if (pqi_ctrl_offline(ctrl_info))
3704                 goto out;
3705
3706         rescan_needed = false;
3707         event = ctrl_info->events;
3708         for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3709                 if (event->pending) {
3710                         event->pending = false;
3711                         if (event->event_type == PQI_EVENT_TYPE_OFA) {
3712                                 ack_event = pqi_ofa_process_event(ctrl_info, event);
3713                         } else {
3714                                 ack_event = true;
3715                                 rescan_needed = true;
3716                                 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3717                                         ctrl_info->logical_volume_rescan_needed = true;
3718                                 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
3719                                         pqi_disable_raid_bypass(ctrl_info);
3720                         }
3721                         if (ack_event)
3722                                 pqi_acknowledge_event(ctrl_info, event);
3723                 }
3724                 event++;
3725         }
3726
3727 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY         (5 * HZ)
3728
3729         if (rescan_needed)
3730                 pqi_schedule_rescan_worker_with_delay(ctrl_info,
3731                         PQI_RESCAN_WORK_FOR_EVENT_DELAY);
3732
3733 out:
3734         pqi_ctrl_unbusy(ctrl_info);
3735 }
3736
3737 #define PQI_HEARTBEAT_TIMER_INTERVAL    (10 * HZ)
3738
3739 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3740 {
3741         int num_interrupts;
3742         u32 heartbeat_count;
3743         struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3744
3745         pqi_check_ctrl_health(ctrl_info);
3746         if (pqi_ctrl_offline(ctrl_info))
3747                 return;
3748
3749         num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3750         heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3751
3752         if (num_interrupts == ctrl_info->previous_num_interrupts) {
3753                 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3754                         dev_err(&ctrl_info->pci_dev->dev,
3755                                 "no heartbeat detected - last heartbeat count: %u\n",
3756                                 heartbeat_count);
3757                         pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3758                         return;
3759                 }
3760         } else {
3761                 ctrl_info->previous_num_interrupts = num_interrupts;
3762         }
3763
3764         ctrl_info->previous_heartbeat_count = heartbeat_count;
3765         mod_timer(&ctrl_info->heartbeat_timer,
3766                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3767 }
3768
3769 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3770 {
3771         if (!ctrl_info->heartbeat_counter)
3772                 return;
3773
3774         ctrl_info->previous_num_interrupts =
3775                 atomic_read(&ctrl_info->num_interrupts);
3776         ctrl_info->previous_heartbeat_count =
3777                 pqi_read_heartbeat_counter(ctrl_info);
3778
3779         ctrl_info->heartbeat_timer.expires =
3780                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3781         add_timer(&ctrl_info->heartbeat_timer);
3782 }
3783
3784 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3785 {
3786         del_timer_sync(&ctrl_info->heartbeat_timer);
3787 }
3788
3789 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3790         struct pqi_event *event, struct pqi_event_response *response)
3791 {
3792         switch (event->event_id) {
3793         case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3794                 ctrl_info->ofa_bytes_requested =
3795                         get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3796                 break;
3797         case PQI_EVENT_OFA_CANCELED:
3798                 ctrl_info->ofa_cancel_reason =
3799                         get_unaligned_le16(&response->data.ofa_cancelled.reason);
3800                 break;
3801         }
3802 }
3803
3804 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3805 {
3806         int num_events;
3807         pqi_index_t oq_pi;
3808         pqi_index_t oq_ci;
3809         struct pqi_event_queue *event_queue;
3810         struct pqi_event_response *response;
3811         struct pqi_event *event;
3812         int event_index;
3813
3814         event_queue = &ctrl_info->event_queue;
3815         num_events = 0;
3816         oq_ci = event_queue->oq_ci_copy;
3817
3818         while (1) {
3819                 oq_pi = readl(event_queue->oq_pi);
3820                 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3821                         pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3822                         dev_err(&ctrl_info->pci_dev->dev,
3823                                 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3824                                 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3825                         return -1;
3826                 }
3827
3828                 if (oq_pi == oq_ci)
3829                         break;
3830
3831                 num_events++;
3832                 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3833
3834                 event_index = pqi_event_type_to_event_index(response->event_type);
3835
3836                 if (event_index >= 0 && response->request_acknowledge) {
3837                         event = &ctrl_info->events[event_index];
3838                         event->pending = true;
3839                         event->event_type = response->event_type;
3840                         event->event_id = get_unaligned_le16(&response->event_id);
3841                         event->additional_event_id =
3842                                 get_unaligned_le32(&response->additional_event_id);
3843                         if (event->event_type == PQI_EVENT_TYPE_OFA)
3844                                 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3845                 }
3846
3847                 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3848         }
3849
3850         if (num_events) {
3851                 event_queue->oq_ci_copy = oq_ci;
3852                 writel(oq_ci, event_queue->oq_ci);
3853                 schedule_work(&ctrl_info->event_work);
3854         }
3855
3856         return num_events;
3857 }
3858
3859 #define PQI_LEGACY_INTX_MASK    0x1
3860
3861 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3862 {
3863         u32 intx_mask;
3864         struct pqi_device_registers __iomem *pqi_registers;
3865         volatile void __iomem *register_addr;
3866
3867         pqi_registers = ctrl_info->pqi_registers;
3868
3869         if (enable_intx)
3870                 register_addr = &pqi_registers->legacy_intx_mask_clear;
3871         else
3872                 register_addr = &pqi_registers->legacy_intx_mask_set;
3873
3874         intx_mask = readl(register_addr);
3875         intx_mask |= PQI_LEGACY_INTX_MASK;
3876         writel(intx_mask, register_addr);
3877 }
3878
3879 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3880         enum pqi_irq_mode new_mode)
3881 {
3882         switch (ctrl_info->irq_mode) {
3883         case IRQ_MODE_MSIX:
3884                 switch (new_mode) {
3885                 case IRQ_MODE_MSIX:
3886                         break;
3887                 case IRQ_MODE_INTX:
3888                         pqi_configure_legacy_intx(ctrl_info, true);
3889                         sis_enable_intx(ctrl_info);
3890                         break;
3891                 case IRQ_MODE_NONE:
3892                         break;
3893                 }
3894                 break;
3895         case IRQ_MODE_INTX:
3896                 switch (new_mode) {
3897                 case IRQ_MODE_MSIX:
3898                         pqi_configure_legacy_intx(ctrl_info, false);
3899                         sis_enable_msix(ctrl_info);
3900                         break;
3901                 case IRQ_MODE_INTX:
3902                         break;
3903                 case IRQ_MODE_NONE:
3904                         pqi_configure_legacy_intx(ctrl_info, false);
3905                         break;
3906                 }
3907                 break;
3908         case IRQ_MODE_NONE:
3909                 switch (new_mode) {
3910                 case IRQ_MODE_MSIX:
3911                         sis_enable_msix(ctrl_info);
3912                         break;
3913                 case IRQ_MODE_INTX:
3914                         pqi_configure_legacy_intx(ctrl_info, true);
3915                         sis_enable_intx(ctrl_info);
3916                         break;
3917                 case IRQ_MODE_NONE:
3918                         break;
3919                 }
3920                 break;
3921         }
3922
3923         ctrl_info->irq_mode = new_mode;
3924 }
3925
3926 #define PQI_LEGACY_INTX_PENDING         0x1
3927
3928 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3929 {
3930         bool valid_irq;
3931         u32 intx_status;
3932
3933         switch (ctrl_info->irq_mode) {
3934         case IRQ_MODE_MSIX:
3935                 valid_irq = true;
3936                 break;
3937         case IRQ_MODE_INTX:
3938                 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
3939                 if (intx_status & PQI_LEGACY_INTX_PENDING)
3940                         valid_irq = true;
3941                 else
3942                         valid_irq = false;
3943                 break;
3944         case IRQ_MODE_NONE:
3945         default:
3946                 valid_irq = false;
3947                 break;
3948         }
3949
3950         return valid_irq;
3951 }
3952
3953 static irqreturn_t pqi_irq_handler(int irq, void *data)
3954 {
3955         struct pqi_ctrl_info *ctrl_info;
3956         struct pqi_queue_group *queue_group;
3957         int num_io_responses_handled;
3958         int num_events_handled;
3959
3960         queue_group = data;
3961         ctrl_info = queue_group->ctrl_info;
3962
3963         if (!pqi_is_valid_irq(ctrl_info))
3964                 return IRQ_NONE;
3965
3966         num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3967         if (num_io_responses_handled < 0)
3968                 goto out;
3969
3970         if (irq == ctrl_info->event_irq) {
3971                 num_events_handled = pqi_process_event_intr(ctrl_info);
3972                 if (num_events_handled < 0)
3973                         goto out;
3974         } else {
3975                 num_events_handled = 0;
3976         }
3977
3978         if (num_io_responses_handled + num_events_handled > 0)
3979                 atomic_inc(&ctrl_info->num_interrupts);
3980
3981         pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3982         pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3983
3984 out:
3985         return IRQ_HANDLED;
3986 }
3987
3988 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3989 {
3990         struct pci_dev *pci_dev = ctrl_info->pci_dev;
3991         int i;
3992         int rc;
3993
3994         ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3995
3996         for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3997                 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3998                         DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3999                 if (rc) {
4000                         dev_err(&pci_dev->dev,
4001                                 "irq %u init failed with error %d\n",
4002                                 pci_irq_vector(pci_dev, i), rc);
4003                         return rc;
4004                 }
4005                 ctrl_info->num_msix_vectors_initialized++;
4006         }
4007
4008         return 0;
4009 }
4010
4011 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
4012 {
4013         int i;
4014
4015         for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
4016                 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
4017                         &ctrl_info->queue_groups[i]);
4018
4019         ctrl_info->num_msix_vectors_initialized = 0;
4020 }
4021
4022 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4023 {
4024         int num_vectors_enabled;
4025         unsigned int flags = PCI_IRQ_MSIX;
4026
4027         if (!pqi_disable_managed_interrupts)
4028                 flags |= PCI_IRQ_AFFINITY;
4029
4030         num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
4031                         PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
4032                         flags);
4033         if (num_vectors_enabled < 0) {
4034                 dev_err(&ctrl_info->pci_dev->dev,
4035                         "MSI-X init failed with error %d\n",
4036                         num_vectors_enabled);
4037                 return num_vectors_enabled;
4038         }
4039
4040         ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4041         ctrl_info->irq_mode = IRQ_MODE_MSIX;
4042         return 0;
4043 }
4044
4045 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4046 {
4047         if (ctrl_info->num_msix_vectors_enabled) {
4048                 pci_free_irq_vectors(ctrl_info->pci_dev);
4049                 ctrl_info->num_msix_vectors_enabled = 0;
4050         }
4051 }
4052
4053 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4054 {
4055         unsigned int i;
4056         size_t alloc_length;
4057         size_t element_array_length_per_iq;
4058         size_t element_array_length_per_oq;
4059         void *element_array;
4060         void __iomem *next_queue_index;
4061         void *aligned_pointer;
4062         unsigned int num_inbound_queues;
4063         unsigned int num_outbound_queues;
4064         unsigned int num_queue_indexes;
4065         struct pqi_queue_group *queue_group;
4066
4067         element_array_length_per_iq =
4068                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4069                 ctrl_info->num_elements_per_iq;
4070         element_array_length_per_oq =
4071                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4072                 ctrl_info->num_elements_per_oq;
4073         num_inbound_queues = ctrl_info->num_queue_groups * 2;
4074         num_outbound_queues = ctrl_info->num_queue_groups;
4075         num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4076
4077         aligned_pointer = NULL;
4078
4079         for (i = 0; i < num_inbound_queues; i++) {
4080                 aligned_pointer = PTR_ALIGN(aligned_pointer,
4081                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4082                 aligned_pointer += element_array_length_per_iq;
4083         }
4084
4085         for (i = 0; i < num_outbound_queues; i++) {
4086                 aligned_pointer = PTR_ALIGN(aligned_pointer,
4087                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4088                 aligned_pointer += element_array_length_per_oq;
4089         }
4090
4091         aligned_pointer = PTR_ALIGN(aligned_pointer,
4092                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4093         aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4094                 PQI_EVENT_OQ_ELEMENT_LENGTH;
4095
4096         for (i = 0; i < num_queue_indexes; i++) {
4097                 aligned_pointer = PTR_ALIGN(aligned_pointer,
4098                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4099                 aligned_pointer += sizeof(pqi_index_t);
4100         }
4101
4102         alloc_length = (size_t)aligned_pointer +
4103                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4104
4105         alloc_length += PQI_EXTRA_SGL_MEMORY;
4106
4107         ctrl_info->queue_memory_base =
4108                 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4109                                    &ctrl_info->queue_memory_base_dma_handle,
4110                                    GFP_KERNEL);
4111
4112         if (!ctrl_info->queue_memory_base)
4113                 return -ENOMEM;
4114
4115         ctrl_info->queue_memory_length = alloc_length;
4116
4117         element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4118                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4119
4120         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4121                 queue_group = &ctrl_info->queue_groups[i];
4122                 queue_group->iq_element_array[RAID_PATH] = element_array;
4123                 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4124                         ctrl_info->queue_memory_base_dma_handle +
4125                                 (element_array - ctrl_info->queue_memory_base);
4126                 element_array += element_array_length_per_iq;
4127                 element_array = PTR_ALIGN(element_array,
4128                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4129                 queue_group->iq_element_array[AIO_PATH] = element_array;
4130                 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4131                         ctrl_info->queue_memory_base_dma_handle +
4132                         (element_array - ctrl_info->queue_memory_base);
4133                 element_array += element_array_length_per_iq;
4134                 element_array = PTR_ALIGN(element_array,
4135                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4136         }
4137
4138         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4139                 queue_group = &ctrl_info->queue_groups[i];
4140                 queue_group->oq_element_array = element_array;
4141                 queue_group->oq_element_array_bus_addr =
4142                         ctrl_info->queue_memory_base_dma_handle +
4143                         (element_array - ctrl_info->queue_memory_base);
4144                 element_array += element_array_length_per_oq;
4145                 element_array = PTR_ALIGN(element_array,
4146                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4147         }
4148
4149         ctrl_info->event_queue.oq_element_array = element_array;
4150         ctrl_info->event_queue.oq_element_array_bus_addr =
4151                 ctrl_info->queue_memory_base_dma_handle +
4152                 (element_array - ctrl_info->queue_memory_base);
4153         element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4154                 PQI_EVENT_OQ_ELEMENT_LENGTH;
4155
4156         next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4157                 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4158
4159         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4160                 queue_group = &ctrl_info->queue_groups[i];
4161                 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4162                 queue_group->iq_ci_bus_addr[RAID_PATH] =
4163                         ctrl_info->queue_memory_base_dma_handle +
4164                         (next_queue_index -
4165                         (void __iomem *)ctrl_info->queue_memory_base);
4166                 next_queue_index += sizeof(pqi_index_t);
4167                 next_queue_index = PTR_ALIGN(next_queue_index,
4168                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4169                 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4170                 queue_group->iq_ci_bus_addr[AIO_PATH] =
4171                         ctrl_info->queue_memory_base_dma_handle +
4172                         (next_queue_index -
4173                         (void __iomem *)ctrl_info->queue_memory_base);
4174                 next_queue_index += sizeof(pqi_index_t);
4175                 next_queue_index = PTR_ALIGN(next_queue_index,
4176                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4177                 queue_group->oq_pi = next_queue_index;
4178                 queue_group->oq_pi_bus_addr =
4179                         ctrl_info->queue_memory_base_dma_handle +
4180                         (next_queue_index -
4181                         (void __iomem *)ctrl_info->queue_memory_base);
4182                 next_queue_index += sizeof(pqi_index_t);
4183                 next_queue_index = PTR_ALIGN(next_queue_index,
4184                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4185         }
4186
4187         ctrl_info->event_queue.oq_pi = next_queue_index;
4188         ctrl_info->event_queue.oq_pi_bus_addr =
4189                 ctrl_info->queue_memory_base_dma_handle +
4190                 (next_queue_index -
4191                 (void __iomem *)ctrl_info->queue_memory_base);
4192
4193         return 0;
4194 }
4195
4196 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4197 {
4198         unsigned int i;
4199         u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4200         u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4201
4202         /*
4203          * Initialize the backpointers to the controller structure in
4204          * each operational queue group structure.
4205          */
4206         for (i = 0; i < ctrl_info->num_queue_groups; i++)
4207                 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4208
4209         /*
4210          * Assign IDs to all operational queues.  Note that the IDs
4211          * assigned to operational IQs are independent of the IDs
4212          * assigned to operational OQs.
4213          */
4214         ctrl_info->event_queue.oq_id = next_oq_id++;
4215         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4216                 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4217                 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4218                 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4219         }
4220
4221         /*
4222          * Assign MSI-X table entry indexes to all queues.  Note that the
4223          * interrupt for the event queue is shared with the first queue group.
4224          */
4225         ctrl_info->event_queue.int_msg_num = 0;
4226         for (i = 0; i < ctrl_info->num_queue_groups; i++)
4227                 ctrl_info->queue_groups[i].int_msg_num = i;
4228
4229         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4230                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4231                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4232                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4233                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4234         }
4235 }
4236
4237 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4238 {
4239         size_t alloc_length;
4240         struct pqi_admin_queues_aligned *admin_queues_aligned;
4241         struct pqi_admin_queues *admin_queues;
4242
4243         alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4244                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4245
4246         ctrl_info->admin_queue_memory_base =
4247                 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4248                                    &ctrl_info->admin_queue_memory_base_dma_handle,
4249                                    GFP_KERNEL);
4250
4251         if (!ctrl_info->admin_queue_memory_base)
4252                 return -ENOMEM;
4253
4254         ctrl_info->admin_queue_memory_length = alloc_length;
4255
4256         admin_queues = &ctrl_info->admin_queues;
4257         admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4258                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4259         admin_queues->iq_element_array =
4260                 &admin_queues_aligned->iq_element_array;
4261         admin_queues->oq_element_array =
4262                 &admin_queues_aligned->oq_element_array;
4263         admin_queues->iq_ci =
4264                 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4265         admin_queues->oq_pi =
4266                 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4267
4268         admin_queues->iq_element_array_bus_addr =
4269                 ctrl_info->admin_queue_memory_base_dma_handle +
4270                 (admin_queues->iq_element_array -
4271                 ctrl_info->admin_queue_memory_base);
4272         admin_queues->oq_element_array_bus_addr =
4273                 ctrl_info->admin_queue_memory_base_dma_handle +
4274                 (admin_queues->oq_element_array -
4275                 ctrl_info->admin_queue_memory_base);
4276         admin_queues->iq_ci_bus_addr =
4277                 ctrl_info->admin_queue_memory_base_dma_handle +
4278                 ((void __iomem *)admin_queues->iq_ci -
4279                 (void __iomem *)ctrl_info->admin_queue_memory_base);
4280         admin_queues->oq_pi_bus_addr =
4281                 ctrl_info->admin_queue_memory_base_dma_handle +
4282                 ((void __iomem *)admin_queues->oq_pi -
4283                 (void __iomem *)ctrl_info->admin_queue_memory_base);
4284
4285         return 0;
4286 }
4287
4288 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES          HZ
4289 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS      1
4290
4291 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4292 {
4293         struct pqi_device_registers __iomem *pqi_registers;
4294         struct pqi_admin_queues *admin_queues;
4295         unsigned long timeout;
4296         u8 status;
4297         u32 reg;
4298
4299         pqi_registers = ctrl_info->pqi_registers;
4300         admin_queues = &ctrl_info->admin_queues;
4301
4302         writeq((u64)admin_queues->iq_element_array_bus_addr,
4303                 &pqi_registers->admin_iq_element_array_addr);
4304         writeq((u64)admin_queues->oq_element_array_bus_addr,
4305                 &pqi_registers->admin_oq_element_array_addr);
4306         writeq((u64)admin_queues->iq_ci_bus_addr,
4307                 &pqi_registers->admin_iq_ci_addr);
4308         writeq((u64)admin_queues->oq_pi_bus_addr,
4309                 &pqi_registers->admin_oq_pi_addr);
4310
4311         reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4312                 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4313                 (admin_queues->int_msg_num << 16);
4314         writel(reg, &pqi_registers->admin_iq_num_elements);
4315
4316         writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4317                 &pqi_registers->function_and_status_code);
4318
4319         timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4320         while (1) {
4321                 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4322                 status = readb(&pqi_registers->function_and_status_code);
4323                 if (status == PQI_STATUS_IDLE)
4324                         break;
4325                 if (time_after(jiffies, timeout))
4326                         return -ETIMEDOUT;
4327         }
4328
4329         /*
4330          * The offset registers are not initialized to the correct
4331          * offsets until *after* the create admin queue pair command
4332          * completes successfully.
4333          */
4334         admin_queues->iq_pi = ctrl_info->iomem_base +
4335                 PQI_DEVICE_REGISTERS_OFFSET +
4336                 readq(&pqi_registers->admin_iq_pi_offset);
4337         admin_queues->oq_ci = ctrl_info->iomem_base +
4338                 PQI_DEVICE_REGISTERS_OFFSET +
4339                 readq(&pqi_registers->admin_oq_ci_offset);
4340
4341         return 0;
4342 }
4343
4344 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4345         struct pqi_general_admin_request *request)
4346 {
4347         struct pqi_admin_queues *admin_queues;
4348         void *next_element;
4349         pqi_index_t iq_pi;
4350
4351         admin_queues = &ctrl_info->admin_queues;
4352         iq_pi = admin_queues->iq_pi_copy;
4353
4354         next_element = admin_queues->iq_element_array +
4355                 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4356
4357         memcpy(next_element, request, sizeof(*request));
4358
4359         iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4360         admin_queues->iq_pi_copy = iq_pi;
4361
4362         /*
4363          * This write notifies the controller that an IU is available to be
4364          * processed.
4365          */
4366         writel(iq_pi, admin_queues->iq_pi);
4367 }
4368
4369 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS  60
4370
4371 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4372         struct pqi_general_admin_response *response)
4373 {
4374         struct pqi_admin_queues *admin_queues;
4375         pqi_index_t oq_pi;
4376         pqi_index_t oq_ci;
4377         unsigned long timeout;
4378
4379         admin_queues = &ctrl_info->admin_queues;
4380         oq_ci = admin_queues->oq_ci_copy;
4381
4382         timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4383
4384         while (1) {
4385                 oq_pi = readl(admin_queues->oq_pi);
4386                 if (oq_pi != oq_ci)
4387                         break;
4388                 if (time_after(jiffies, timeout)) {
4389                         dev_err(&ctrl_info->pci_dev->dev,
4390                                 "timed out waiting for admin response\n");
4391                         return -ETIMEDOUT;
4392                 }
4393                 if (!sis_is_firmware_running(ctrl_info))
4394                         return -ENXIO;
4395                 usleep_range(1000, 2000);
4396         }
4397
4398         memcpy(response, admin_queues->oq_element_array +
4399                 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4400
4401         oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4402         admin_queues->oq_ci_copy = oq_ci;
4403         writel(oq_ci, admin_queues->oq_ci);
4404
4405         return 0;
4406 }
4407
4408 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4409         struct pqi_queue_group *queue_group, enum pqi_io_path path,
4410         struct pqi_io_request *io_request)
4411 {
4412         struct pqi_io_request *next;
4413         void *next_element;
4414         pqi_index_t iq_pi;
4415         pqi_index_t iq_ci;
4416         size_t iu_length;
4417         unsigned long flags;
4418         unsigned int num_elements_needed;
4419         unsigned int num_elements_to_end_of_queue;
4420         size_t copy_count;
4421         struct pqi_iu_header *request;
4422
4423         spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4424
4425         if (io_request) {
4426                 io_request->queue_group = queue_group;
4427                 list_add_tail(&io_request->request_list_entry,
4428                         &queue_group->request_list[path]);
4429         }
4430
4431         iq_pi = queue_group->iq_pi_copy[path];
4432
4433         list_for_each_entry_safe(io_request, next,
4434                 &queue_group->request_list[path], request_list_entry) {
4435
4436                 request = io_request->iu;
4437
4438                 iu_length = get_unaligned_le16(&request->iu_length) +
4439                         PQI_REQUEST_HEADER_LENGTH;
4440                 num_elements_needed =
4441                         DIV_ROUND_UP(iu_length,
4442                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4443
4444                 iq_ci = readl(queue_group->iq_ci[path]);
4445
4446                 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4447                         ctrl_info->num_elements_per_iq))
4448                         break;
4449
4450                 put_unaligned_le16(queue_group->oq_id,
4451                         &request->response_queue_id);
4452
4453                 next_element = queue_group->iq_element_array[path] +
4454                         (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4455
4456                 num_elements_to_end_of_queue =
4457                         ctrl_info->num_elements_per_iq - iq_pi;
4458
4459                 if (num_elements_needed <= num_elements_to_end_of_queue) {
4460                         memcpy(next_element, request, iu_length);
4461                 } else {
4462                         copy_count = num_elements_to_end_of_queue *
4463                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4464                         memcpy(next_element, request, copy_count);
4465                         memcpy(queue_group->iq_element_array[path],
4466                                 (u8 *)request + copy_count,
4467                                 iu_length - copy_count);
4468                 }
4469
4470                 iq_pi = (iq_pi + num_elements_needed) %
4471                         ctrl_info->num_elements_per_iq;
4472
4473                 list_del(&io_request->request_list_entry);
4474         }
4475
4476         if (iq_pi != queue_group->iq_pi_copy[path]) {
4477                 queue_group->iq_pi_copy[path] = iq_pi;
4478                 /*
4479                  * This write notifies the controller that one or more IUs are
4480                  * available to be processed.
4481                  */
4482                 writel(iq_pi, queue_group->iq_pi[path]);
4483         }
4484
4485         spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4486 }
4487
4488 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS         10
4489
4490 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4491         struct completion *wait)
4492 {
4493         int rc;
4494
4495         while (1) {
4496                 if (wait_for_completion_io_timeout(wait,
4497                         PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4498                         rc = 0;
4499                         break;
4500                 }
4501
4502                 pqi_check_ctrl_health(ctrl_info);
4503                 if (pqi_ctrl_offline(ctrl_info)) {
4504                         rc = -ENXIO;
4505                         break;
4506                 }
4507         }
4508
4509         return rc;
4510 }
4511
4512 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4513         void *context)
4514 {
4515         struct completion *waiting = context;
4516
4517         complete(waiting);
4518 }
4519
4520 static int pqi_process_raid_io_error_synchronous(
4521         struct pqi_raid_error_info *error_info)
4522 {
4523         int rc = -EIO;
4524
4525         switch (error_info->data_out_result) {
4526         case PQI_DATA_IN_OUT_GOOD:
4527                 if (error_info->status == SAM_STAT_GOOD)
4528                         rc = 0;
4529                 break;
4530         case PQI_DATA_IN_OUT_UNDERFLOW:
4531                 if (error_info->status == SAM_STAT_GOOD ||
4532                         error_info->status == SAM_STAT_CHECK_CONDITION)
4533                         rc = 0;
4534                 break;
4535         case PQI_DATA_IN_OUT_ABORTED:
4536                 rc = PQI_CMD_STATUS_ABORTED;
4537                 break;
4538         }
4539
4540         return rc;
4541 }
4542
4543 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4544 {
4545         return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4546 }
4547
4548 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4549         struct pqi_iu_header *request, unsigned int flags,
4550         struct pqi_raid_error_info *error_info)
4551 {
4552         int rc = 0;
4553         struct pqi_io_request *io_request;
4554         size_t iu_length;
4555         DECLARE_COMPLETION_ONSTACK(wait);
4556
4557         if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4558                 if (down_interruptible(&ctrl_info->sync_request_sem))
4559                         return -ERESTARTSYS;
4560         } else {
4561                 down(&ctrl_info->sync_request_sem);
4562         }
4563
4564         pqi_ctrl_busy(ctrl_info);
4565         /*
4566          * Wait for other admin queue updates such as;
4567          * config table changes, OFA memory updates, ...
4568          */
4569         if (pqi_is_blockable_request(request))
4570                 pqi_wait_if_ctrl_blocked(ctrl_info);
4571
4572         if (pqi_ctrl_offline(ctrl_info)) {
4573                 rc = -ENXIO;
4574                 goto out;
4575         }
4576
4577         io_request = pqi_alloc_io_request(ctrl_info, NULL);
4578
4579         put_unaligned_le16(io_request->index,
4580                 &(((struct pqi_raid_path_request *)request)->request_id));
4581
4582         if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4583                 ((struct pqi_raid_path_request *)request)->error_index =
4584                         ((struct pqi_raid_path_request *)request)->request_id;
4585
4586         iu_length = get_unaligned_le16(&request->iu_length) +
4587                 PQI_REQUEST_HEADER_LENGTH;
4588         memcpy(io_request->iu, request, iu_length);
4589
4590         io_request->io_complete_callback = pqi_raid_synchronous_complete;
4591         io_request->context = &wait;
4592
4593         pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4594                 io_request);
4595
4596         pqi_wait_for_completion_io(ctrl_info, &wait);
4597
4598         if (error_info) {
4599                 if (io_request->error_info)
4600                         memcpy(error_info, io_request->error_info, sizeof(*error_info));
4601                 else
4602                         memset(error_info, 0, sizeof(*error_info));
4603         } else if (rc == 0 && io_request->error_info) {
4604                 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4605         }
4606
4607         pqi_free_io_request(io_request);
4608
4609 out:
4610         pqi_ctrl_unbusy(ctrl_info);
4611         up(&ctrl_info->sync_request_sem);
4612
4613         return rc;
4614 }
4615
4616 static int pqi_validate_admin_response(
4617         struct pqi_general_admin_response *response, u8 expected_function_code)
4618 {
4619         if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4620                 return -EINVAL;
4621
4622         if (get_unaligned_le16(&response->header.iu_length) !=
4623                 PQI_GENERAL_ADMIN_IU_LENGTH)
4624                 return -EINVAL;
4625
4626         if (response->function_code != expected_function_code)
4627                 return -EINVAL;
4628
4629         if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4630                 return -EINVAL;
4631
4632         return 0;
4633 }
4634
4635 static int pqi_submit_admin_request_synchronous(
4636         struct pqi_ctrl_info *ctrl_info,
4637         struct pqi_general_admin_request *request,
4638         struct pqi_general_admin_response *response)
4639 {
4640         int rc;
4641
4642         pqi_submit_admin_request(ctrl_info, request);
4643
4644         rc = pqi_poll_for_admin_response(ctrl_info, response);
4645
4646         if (rc == 0)
4647                 rc = pqi_validate_admin_response(response, request->function_code);
4648
4649         return rc;
4650 }
4651
4652 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4653 {
4654         int rc;
4655         struct pqi_general_admin_request request;
4656         struct pqi_general_admin_response response;
4657         struct pqi_device_capability *capability;
4658         struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4659
4660         capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4661         if (!capability)
4662                 return -ENOMEM;
4663
4664         memset(&request, 0, sizeof(request));
4665
4666         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4667         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4668                 &request.header.iu_length);
4669         request.function_code =
4670                 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4671         put_unaligned_le32(sizeof(*capability),
4672                 &request.data.report_device_capability.buffer_length);
4673
4674         rc = pqi_map_single(ctrl_info->pci_dev,
4675                 &request.data.report_device_capability.sg_descriptor,
4676                 capability, sizeof(*capability),
4677                 DMA_FROM_DEVICE);
4678         if (rc)
4679                 goto out;
4680
4681         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4682
4683         pqi_pci_unmap(ctrl_info->pci_dev,
4684                 &request.data.report_device_capability.sg_descriptor, 1,
4685                 DMA_FROM_DEVICE);
4686
4687         if (rc)
4688                 goto out;
4689
4690         if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4691                 rc = -EIO;
4692                 goto out;
4693         }
4694
4695         ctrl_info->max_inbound_queues =
4696                 get_unaligned_le16(&capability->max_inbound_queues);
4697         ctrl_info->max_elements_per_iq =
4698                 get_unaligned_le16(&capability->max_elements_per_iq);
4699         ctrl_info->max_iq_element_length =
4700                 get_unaligned_le16(&capability->max_iq_element_length)
4701                 * 16;
4702         ctrl_info->max_outbound_queues =
4703                 get_unaligned_le16(&capability->max_outbound_queues);
4704         ctrl_info->max_elements_per_oq =
4705                 get_unaligned_le16(&capability->max_elements_per_oq);
4706         ctrl_info->max_oq_element_length =
4707                 get_unaligned_le16(&capability->max_oq_element_length)
4708                 * 16;
4709
4710         sop_iu_layer_descriptor =
4711                 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4712
4713         ctrl_info->max_inbound_iu_length_per_firmware =
4714                 get_unaligned_le16(
4715                         &sop_iu_layer_descriptor->max_inbound_iu_length);
4716         ctrl_info->inbound_spanning_supported =
4717                 sop_iu_layer_descriptor->inbound_spanning_supported;
4718         ctrl_info->outbound_spanning_supported =
4719                 sop_iu_layer_descriptor->outbound_spanning_supported;
4720
4721 out:
4722         kfree(capability);
4723
4724         return rc;
4725 }
4726
4727 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4728 {
4729         if (ctrl_info->max_iq_element_length <
4730                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4731                 dev_err(&ctrl_info->pci_dev->dev,
4732                         "max. inbound queue element length of %d is less than the required length of %d\n",
4733                         ctrl_info->max_iq_element_length,
4734                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4735                 return -EINVAL;
4736         }
4737
4738         if (ctrl_info->max_oq_element_length <
4739                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4740                 dev_err(&ctrl_info->pci_dev->dev,
4741                         "max. outbound queue element length of %d is less than the required length of %d\n",
4742                         ctrl_info->max_oq_element_length,
4743                         PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4744                 return -EINVAL;
4745         }
4746
4747         if (ctrl_info->max_inbound_iu_length_per_firmware <
4748                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4749                 dev_err(&ctrl_info->pci_dev->dev,
4750                         "max. inbound IU length of %u is less than the min. required length of %d\n",
4751                         ctrl_info->max_inbound_iu_length_per_firmware,
4752                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4753                 return -EINVAL;
4754         }
4755
4756         if (!ctrl_info->inbound_spanning_supported) {
4757                 dev_err(&ctrl_info->pci_dev->dev,
4758                         "the controller does not support inbound spanning\n");
4759                 return -EINVAL;
4760         }
4761
4762         if (ctrl_info->outbound_spanning_supported) {
4763                 dev_err(&ctrl_info->pci_dev->dev,
4764                         "the controller supports outbound spanning but this driver does not\n");
4765                 return -EINVAL;
4766         }
4767
4768         return 0;
4769 }
4770
4771 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4772 {
4773         int rc;
4774         struct pqi_event_queue *event_queue;
4775         struct pqi_general_admin_request request;
4776         struct pqi_general_admin_response response;
4777
4778         event_queue = &ctrl_info->event_queue;
4779
4780         /*
4781          * Create OQ (Outbound Queue - device to host queue) to dedicate
4782          * to events.
4783          */
4784         memset(&request, 0, sizeof(request));
4785         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4786         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4787                 &request.header.iu_length);
4788         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4789         put_unaligned_le16(event_queue->oq_id,
4790                 &request.data.create_operational_oq.queue_id);
4791         put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4792                 &request.data.create_operational_oq.element_array_addr);
4793         put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4794                 &request.data.create_operational_oq.pi_addr);
4795         put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4796                 &request.data.create_operational_oq.num_elements);
4797         put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4798                 &request.data.create_operational_oq.element_length);
4799         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4800         put_unaligned_le16(event_queue->int_msg_num,
4801                 &request.data.create_operational_oq.int_msg_num);
4802
4803         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4804                 &response);
4805         if (rc)
4806                 return rc;
4807
4808         event_queue->oq_ci = ctrl_info->iomem_base +
4809                 PQI_DEVICE_REGISTERS_OFFSET +
4810                 get_unaligned_le64(
4811                         &response.data.create_operational_oq.oq_ci_offset);
4812
4813         return 0;
4814 }
4815
4816 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4817         unsigned int group_number)
4818 {
4819         int rc;
4820         struct pqi_queue_group *queue_group;
4821         struct pqi_general_admin_request request;
4822         struct pqi_general_admin_response response;
4823
4824         queue_group = &ctrl_info->queue_groups[group_number];
4825
4826         /*
4827          * Create IQ (Inbound Queue - host to device queue) for
4828          * RAID path.
4829          */
4830         memset(&request, 0, sizeof(request));
4831         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4832         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4833                 &request.header.iu_length);
4834         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4835         put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4836                 &request.data.create_operational_iq.queue_id);
4837         put_unaligned_le64(
4838                 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4839                 &request.data.create_operational_iq.element_array_addr);
4840         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4841                 &request.data.create_operational_iq.ci_addr);
4842         put_unaligned_le16(ctrl_info->num_elements_per_iq,
4843                 &request.data.create_operational_iq.num_elements);
4844         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4845                 &request.data.create_operational_iq.element_length);
4846         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4847
4848         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4849                 &response);
4850         if (rc) {
4851                 dev_err(&ctrl_info->pci_dev->dev,
4852                         "error creating inbound RAID queue\n");
4853                 return rc;
4854         }
4855
4856         queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4857                 PQI_DEVICE_REGISTERS_OFFSET +
4858                 get_unaligned_le64(
4859                         &response.data.create_operational_iq.iq_pi_offset);
4860
4861         /*
4862          * Create IQ (Inbound Queue - host to device queue) for
4863          * Advanced I/O (AIO) path.
4864          */
4865         memset(&request, 0, sizeof(request));
4866         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4867         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4868                 &request.header.iu_length);
4869         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4870         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4871                 &request.data.create_operational_iq.queue_id);
4872         put_unaligned_le64((u64)queue_group->
4873                 iq_element_array_bus_addr[AIO_PATH],
4874                 &request.data.create_operational_iq.element_array_addr);
4875         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4876                 &request.data.create_operational_iq.ci_addr);
4877         put_unaligned_le16(ctrl_info->num_elements_per_iq,
4878                 &request.data.create_operational_iq.num_elements);
4879         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4880                 &request.data.create_operational_iq.element_length);
4881         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4882
4883         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4884                 &response);
4885         if (rc) {
4886                 dev_err(&ctrl_info->pci_dev->dev,
4887                         "error creating inbound AIO queue\n");
4888                 return rc;
4889         }
4890
4891         queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4892                 PQI_DEVICE_REGISTERS_OFFSET +
4893                 get_unaligned_le64(
4894                         &response.data.create_operational_iq.iq_pi_offset);
4895
4896         /*
4897          * Designate the 2nd IQ as the AIO path.  By default, all IQs are
4898          * assumed to be for RAID path I/O unless we change the queue's
4899          * property.
4900          */
4901         memset(&request, 0, sizeof(request));
4902         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4903         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4904                 &request.header.iu_length);
4905         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4906         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4907                 &request.data.change_operational_iq_properties.queue_id);
4908         put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4909                 &request.data.change_operational_iq_properties.vendor_specific);
4910
4911         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4912                 &response);
4913         if (rc) {
4914                 dev_err(&ctrl_info->pci_dev->dev,
4915                         "error changing queue property\n");
4916                 return rc;
4917         }
4918
4919         /*
4920          * Create OQ (Outbound Queue - device to host queue).
4921          */
4922         memset(&request, 0, sizeof(request));
4923         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4924         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4925                 &request.header.iu_length);
4926         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4927         put_unaligned_le16(queue_group->oq_id,
4928                 &request.data.create_operational_oq.queue_id);
4929         put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4930                 &request.data.create_operational_oq.element_array_addr);
4931         put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4932                 &request.data.create_operational_oq.pi_addr);
4933         put_unaligned_le16(ctrl_info->num_elements_per_oq,
4934                 &request.data.create_operational_oq.num_elements);
4935         put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4936                 &request.data.create_operational_oq.element_length);
4937         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4938         put_unaligned_le16(queue_group->int_msg_num,
4939                 &request.data.create_operational_oq.int_msg_num);
4940
4941         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4942                 &response);
4943         if (rc) {
4944                 dev_err(&ctrl_info->pci_dev->dev,
4945                         "error creating outbound queue\n");
4946                 return rc;
4947         }
4948
4949         queue_group->oq_ci = ctrl_info->iomem_base +
4950                 PQI_DEVICE_REGISTERS_OFFSET +
4951                 get_unaligned_le64(
4952                         &response.data.create_operational_oq.oq_ci_offset);
4953
4954         return 0;
4955 }
4956
4957 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4958 {
4959         int rc;
4960         unsigned int i;
4961
4962         rc = pqi_create_event_queue(ctrl_info);
4963         if (rc) {
4964                 dev_err(&ctrl_info->pci_dev->dev,
4965                         "error creating event queue\n");
4966                 return rc;
4967         }
4968
4969         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4970                 rc = pqi_create_queue_group(ctrl_info, i);
4971                 if (rc) {
4972                         dev_err(&ctrl_info->pci_dev->dev,
4973                                 "error creating queue group number %u/%u\n",
4974                                 i, ctrl_info->num_queue_groups);
4975                         return rc;
4976                 }
4977         }
4978
4979         return 0;
4980 }
4981
4982 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH   \
4983         struct_size_t(struct pqi_event_config,  descriptors, PQI_MAX_EVENT_DESCRIPTORS)
4984
4985 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4986         bool enable_events)
4987 {
4988         int rc;
4989         unsigned int i;
4990         struct pqi_event_config *event_config;
4991         struct pqi_event_descriptor *event_descriptor;
4992         struct pqi_general_management_request request;
4993
4994         event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4995                 GFP_KERNEL);
4996         if (!event_config)
4997                 return -ENOMEM;
4998
4999         memset(&request, 0, sizeof(request));
5000
5001         request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
5002         put_unaligned_le16(offsetof(struct pqi_general_management_request,
5003                 data.report_event_configuration.sg_descriptors[1]) -
5004                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5005         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5006                 &request.data.report_event_configuration.buffer_length);
5007
5008         rc = pqi_map_single(ctrl_info->pci_dev,
5009                 request.data.report_event_configuration.sg_descriptors,
5010                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5011                 DMA_FROM_DEVICE);
5012         if (rc)
5013                 goto out;
5014
5015         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5016
5017         pqi_pci_unmap(ctrl_info->pci_dev,
5018                 request.data.report_event_configuration.sg_descriptors, 1,
5019                 DMA_FROM_DEVICE);
5020
5021         if (rc)
5022                 goto out;
5023
5024         for (i = 0; i < event_config->num_event_descriptors; i++) {
5025                 event_descriptor = &event_config->descriptors[i];
5026                 if (enable_events &&
5027                         pqi_is_supported_event(event_descriptor->event_type))
5028                                 put_unaligned_le16(ctrl_info->event_queue.oq_id,
5029                                         &event_descriptor->oq_id);
5030                 else
5031                         put_unaligned_le16(0, &event_descriptor->oq_id);
5032         }
5033
5034         memset(&request, 0, sizeof(request));
5035
5036         request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5037         put_unaligned_le16(offsetof(struct pqi_general_management_request,
5038                 data.report_event_configuration.sg_descriptors[1]) -
5039                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5040         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5041                 &request.data.report_event_configuration.buffer_length);
5042
5043         rc = pqi_map_single(ctrl_info->pci_dev,
5044                 request.data.report_event_configuration.sg_descriptors,
5045                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5046                 DMA_TO_DEVICE);
5047         if (rc)
5048                 goto out;
5049
5050         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5051
5052         pqi_pci_unmap(ctrl_info->pci_dev,
5053                 request.data.report_event_configuration.sg_descriptors, 1,
5054                 DMA_TO_DEVICE);
5055
5056 out:
5057         kfree(event_config);
5058
5059         return rc;
5060 }
5061
5062 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5063 {
5064         return pqi_configure_events(ctrl_info, true);
5065 }
5066
5067 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5068 {
5069         unsigned int i;
5070         struct device *dev;
5071         size_t sg_chain_buffer_length;
5072         struct pqi_io_request *io_request;
5073
5074         if (!ctrl_info->io_request_pool)
5075                 return;
5076
5077         dev = &ctrl_info->pci_dev->dev;
5078         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5079         io_request = ctrl_info->io_request_pool;
5080
5081         for (i = 0; i < ctrl_info->max_io_slots; i++) {
5082                 kfree(io_request->iu);
5083                 if (!io_request->sg_chain_buffer)
5084                         break;
5085                 dma_free_coherent(dev, sg_chain_buffer_length,
5086                         io_request->sg_chain_buffer,
5087                         io_request->sg_chain_buffer_dma_handle);
5088                 io_request++;
5089         }
5090
5091         kfree(ctrl_info->io_request_pool);
5092         ctrl_info->io_request_pool = NULL;
5093 }
5094
5095 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5096 {
5097         ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5098                                      ctrl_info->error_buffer_length,
5099                                      &ctrl_info->error_buffer_dma_handle,
5100                                      GFP_KERNEL);
5101         if (!ctrl_info->error_buffer)
5102                 return -ENOMEM;
5103
5104         return 0;
5105 }
5106
5107 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5108 {
5109         unsigned int i;
5110         void *sg_chain_buffer;
5111         size_t sg_chain_buffer_length;
5112         dma_addr_t sg_chain_buffer_dma_handle;
5113         struct device *dev;
5114         struct pqi_io_request *io_request;
5115
5116         ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5117                 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5118
5119         if (!ctrl_info->io_request_pool) {
5120                 dev_err(&ctrl_info->pci_dev->dev,
5121                         "failed to allocate I/O request pool\n");
5122                 goto error;
5123         }
5124
5125         dev = &ctrl_info->pci_dev->dev;
5126         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5127         io_request = ctrl_info->io_request_pool;
5128
5129         for (i = 0; i < ctrl_info->max_io_slots; i++) {
5130                 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5131
5132                 if (!io_request->iu) {
5133                         dev_err(&ctrl_info->pci_dev->dev,
5134                                 "failed to allocate IU buffers\n");
5135                         goto error;
5136                 }
5137
5138                 sg_chain_buffer = dma_alloc_coherent(dev,
5139                         sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5140                         GFP_KERNEL);
5141
5142                 if (!sg_chain_buffer) {
5143                         dev_err(&ctrl_info->pci_dev->dev,
5144                                 "failed to allocate PQI scatter-gather chain buffers\n");
5145                         goto error;
5146                 }
5147
5148                 io_request->index = i;
5149                 io_request->sg_chain_buffer = sg_chain_buffer;
5150                 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5151                 io_request++;
5152         }
5153
5154         return 0;
5155
5156 error:
5157         pqi_free_all_io_requests(ctrl_info);
5158
5159         return -ENOMEM;
5160 }
5161
5162 /*
5163  * Calculate required resources that are sized based on max. outstanding
5164  * requests and max. transfer size.
5165  */
5166
5167 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5168 {
5169         u32 max_transfer_size;
5170         u32 max_sg_entries;
5171
5172         ctrl_info->scsi_ml_can_queue =
5173                 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5174         ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5175
5176         ctrl_info->error_buffer_length =
5177                 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5178
5179         if (reset_devices)
5180                 max_transfer_size = min(ctrl_info->max_transfer_size,
5181                         PQI_MAX_TRANSFER_SIZE_KDUMP);
5182         else
5183                 max_transfer_size = min(ctrl_info->max_transfer_size,
5184                         PQI_MAX_TRANSFER_SIZE);
5185
5186         max_sg_entries = max_transfer_size / PAGE_SIZE;
5187
5188         /* +1 to cover when the buffer is not page-aligned. */
5189         max_sg_entries++;
5190
5191         max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5192
5193         max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5194
5195         ctrl_info->sg_chain_buffer_length =
5196                 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5197                 PQI_EXTRA_SGL_MEMORY;
5198         ctrl_info->sg_tablesize = max_sg_entries;
5199         ctrl_info->max_sectors = max_transfer_size / 512;
5200 }
5201
5202 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5203 {
5204         int num_queue_groups;
5205         u16 num_elements_per_iq;
5206         u16 num_elements_per_oq;
5207
5208         if (reset_devices) {
5209                 num_queue_groups = 1;
5210         } else {
5211                 int num_cpus;
5212                 int max_queue_groups;
5213
5214                 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5215                         ctrl_info->max_outbound_queues - 1);
5216                 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5217
5218                 num_cpus = num_online_cpus();
5219                 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5220                 num_queue_groups = min(num_queue_groups, max_queue_groups);
5221         }
5222
5223         ctrl_info->num_queue_groups = num_queue_groups;
5224
5225         /*
5226          * Make sure that the max. inbound IU length is an even multiple
5227          * of our inbound element length.
5228          */
5229         ctrl_info->max_inbound_iu_length =
5230                 (ctrl_info->max_inbound_iu_length_per_firmware /
5231                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5232                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5233
5234         num_elements_per_iq =
5235                 (ctrl_info->max_inbound_iu_length /
5236                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5237
5238         /* Add one because one element in each queue is unusable. */
5239         num_elements_per_iq++;
5240
5241         num_elements_per_iq = min(num_elements_per_iq,
5242                 ctrl_info->max_elements_per_iq);
5243
5244         num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5245         num_elements_per_oq = min(num_elements_per_oq,
5246                 ctrl_info->max_elements_per_oq);
5247
5248         ctrl_info->num_elements_per_iq = num_elements_per_iq;
5249         ctrl_info->num_elements_per_oq = num_elements_per_oq;
5250
5251         ctrl_info->max_sg_per_iu =
5252                 ((ctrl_info->max_inbound_iu_length -
5253                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5254                 sizeof(struct pqi_sg_descriptor)) +
5255                 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5256
5257         ctrl_info->max_sg_per_r56_iu =
5258                 ((ctrl_info->max_inbound_iu_length -
5259                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5260                 sizeof(struct pqi_sg_descriptor)) +
5261                 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5262 }
5263
5264 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5265         struct scatterlist *sg)
5266 {
5267         u64 address = (u64)sg_dma_address(sg);
5268         unsigned int length = sg_dma_len(sg);
5269
5270         put_unaligned_le64(address, &sg_descriptor->address);
5271         put_unaligned_le32(length, &sg_descriptor->length);
5272         put_unaligned_le32(0, &sg_descriptor->flags);
5273 }
5274
5275 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5276         struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5277         int max_sg_per_iu, bool *chained)
5278 {
5279         int i;
5280         unsigned int num_sg_in_iu;
5281
5282         *chained = false;
5283         i = 0;
5284         num_sg_in_iu = 0;
5285         max_sg_per_iu--;        /* Subtract 1 to leave room for chain marker. */
5286
5287         while (1) {
5288                 pqi_set_sg_descriptor(sg_descriptor, sg);
5289                 if (!*chained)
5290                         num_sg_in_iu++;
5291                 i++;
5292                 if (i == sg_count)
5293                         break;
5294                 sg_descriptor++;
5295                 if (i == max_sg_per_iu) {
5296                         put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5297                                 &sg_descriptor->address);
5298                         put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5299                                 &sg_descriptor->length);
5300                         put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5301                         *chained = true;
5302                         num_sg_in_iu++;
5303                         sg_descriptor = io_request->sg_chain_buffer;
5304                 }
5305                 sg = sg_next(sg);
5306         }
5307
5308         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5309
5310         return num_sg_in_iu;
5311 }
5312
5313 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5314         struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5315         struct pqi_io_request *io_request)
5316 {
5317         u16 iu_length;
5318         int sg_count;
5319         bool chained;
5320         unsigned int num_sg_in_iu;
5321         struct scatterlist *sg;
5322         struct pqi_sg_descriptor *sg_descriptor;
5323
5324         sg_count = scsi_dma_map(scmd);
5325         if (sg_count < 0)
5326                 return sg_count;
5327
5328         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5329                 PQI_REQUEST_HEADER_LENGTH;
5330
5331         if (sg_count == 0)
5332                 goto out;
5333
5334         sg = scsi_sglist(scmd);
5335         sg_descriptor = request->sg_descriptors;
5336
5337         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5338                 ctrl_info->max_sg_per_iu, &chained);
5339
5340         request->partial = chained;
5341         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5342
5343 out:
5344         put_unaligned_le16(iu_length, &request->header.iu_length);
5345
5346         return 0;
5347 }
5348
5349 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5350         struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5351         struct pqi_io_request *io_request)
5352 {
5353         u16 iu_length;
5354         int sg_count;
5355         bool chained;
5356         unsigned int num_sg_in_iu;
5357         struct scatterlist *sg;
5358         struct pqi_sg_descriptor *sg_descriptor;
5359
5360         sg_count = scsi_dma_map(scmd);
5361         if (sg_count < 0)
5362                 return sg_count;
5363
5364         iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5365                 PQI_REQUEST_HEADER_LENGTH;
5366         num_sg_in_iu = 0;
5367
5368         if (sg_count == 0)
5369                 goto out;
5370
5371         sg = scsi_sglist(scmd);
5372         sg_descriptor = request->sg_descriptors;
5373
5374         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5375                 ctrl_info->max_sg_per_iu, &chained);
5376
5377         request->partial = chained;
5378         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5379
5380 out:
5381         put_unaligned_le16(iu_length, &request->header.iu_length);
5382         request->num_sg_descriptors = num_sg_in_iu;
5383
5384         return 0;
5385 }
5386
5387 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5388         struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5389         struct pqi_io_request *io_request)
5390 {
5391         u16 iu_length;
5392         int sg_count;
5393         bool chained;
5394         unsigned int num_sg_in_iu;
5395         struct scatterlist *sg;
5396         struct pqi_sg_descriptor *sg_descriptor;
5397
5398         sg_count = scsi_dma_map(scmd);
5399         if (sg_count < 0)
5400                 return sg_count;
5401
5402         iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5403                 PQI_REQUEST_HEADER_LENGTH;
5404         num_sg_in_iu = 0;
5405
5406         if (sg_count != 0) {
5407                 sg = scsi_sglist(scmd);
5408                 sg_descriptor = request->sg_descriptors;
5409
5410                 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5411                         ctrl_info->max_sg_per_r56_iu, &chained);
5412
5413                 request->partial = chained;
5414                 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5415         }
5416
5417         put_unaligned_le16(iu_length, &request->header.iu_length);
5418         request->num_sg_descriptors = num_sg_in_iu;
5419
5420         return 0;
5421 }
5422
5423 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5424         struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5425         struct pqi_io_request *io_request)
5426 {
5427         u16 iu_length;
5428         int sg_count;
5429         bool chained;
5430         unsigned int num_sg_in_iu;
5431         struct scatterlist *sg;
5432         struct pqi_sg_descriptor *sg_descriptor;
5433
5434         sg_count = scsi_dma_map(scmd);
5435         if (sg_count < 0)
5436                 return sg_count;
5437
5438         iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5439                 PQI_REQUEST_HEADER_LENGTH;
5440         num_sg_in_iu = 0;
5441
5442         if (sg_count == 0)
5443                 goto out;
5444
5445         sg = scsi_sglist(scmd);
5446         sg_descriptor = request->sg_descriptors;
5447
5448         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5449                 ctrl_info->max_sg_per_iu, &chained);
5450
5451         request->partial = chained;
5452         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5453
5454 out:
5455         put_unaligned_le16(iu_length, &request->header.iu_length);
5456         request->num_sg_descriptors = num_sg_in_iu;
5457
5458         return 0;
5459 }
5460
5461 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5462         void *context)
5463 {
5464         struct scsi_cmnd *scmd;
5465
5466         scmd = io_request->scmd;
5467         pqi_free_io_request(io_request);
5468         scsi_dma_unmap(scmd);
5469         pqi_scsi_done(scmd);
5470 }
5471
5472 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
5473         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5474         struct pqi_queue_group *queue_group, bool io_high_prio)
5475 {
5476         int rc;
5477         size_t cdb_length;
5478         struct pqi_io_request *io_request;
5479         struct pqi_raid_path_request *request;
5480
5481         io_request = pqi_alloc_io_request(ctrl_info, scmd);
5482         if (!io_request)
5483                 return SCSI_MLQUEUE_HOST_BUSY;
5484
5485         io_request->io_complete_callback = pqi_raid_io_complete;
5486         io_request->scmd = scmd;
5487
5488         request = io_request->iu;
5489         memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5490
5491         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5492         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5493         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5494         request->command_priority = io_high_prio;
5495         put_unaligned_le16(io_request->index, &request->request_id);
5496         request->error_index = request->request_id;
5497         memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5498         request->ml_device_lun_number = (u8)scmd->device->lun;
5499
5500         cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5501         memcpy(request->cdb, scmd->cmnd, cdb_length);
5502
5503         switch (cdb_length) {
5504         case 6:
5505         case 10:
5506         case 12:
5507         case 16:
5508                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5509                 break;
5510         case 20:
5511                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5512                 break;
5513         case 24:
5514                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5515                 break;
5516         case 28:
5517                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5518                 break;
5519         case 32:
5520         default:
5521                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5522                 break;
5523         }
5524
5525         switch (scmd->sc_data_direction) {
5526         case DMA_FROM_DEVICE:
5527                 request->data_direction = SOP_READ_FLAG;
5528                 break;
5529         case DMA_TO_DEVICE:
5530                 request->data_direction = SOP_WRITE_FLAG;
5531                 break;
5532         case DMA_NONE:
5533                 request->data_direction = SOP_NO_DIRECTION_FLAG;
5534                 break;
5535         case DMA_BIDIRECTIONAL:
5536                 request->data_direction = SOP_BIDIRECTIONAL;
5537                 break;
5538         default:
5539                 dev_err(&ctrl_info->pci_dev->dev,
5540                         "unknown data direction: %d\n",
5541                         scmd->sc_data_direction);
5542                 break;
5543         }
5544
5545         rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5546         if (rc) {
5547                 pqi_free_io_request(io_request);
5548                 return SCSI_MLQUEUE_HOST_BUSY;
5549         }
5550
5551         pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5552
5553         return 0;
5554 }
5555
5556 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5557         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5558         struct pqi_queue_group *queue_group)
5559 {
5560         bool io_high_prio;
5561
5562         io_high_prio = pqi_is_io_high_priority(device, scmd);
5563
5564         return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
5565 }
5566
5567 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5568 {
5569         struct scsi_cmnd *scmd;
5570         struct pqi_scsi_dev *device;
5571         struct pqi_ctrl_info *ctrl_info;
5572
5573         if (!io_request->raid_bypass)
5574                 return false;
5575
5576         scmd = io_request->scmd;
5577         if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5578                 return false;
5579         if (host_byte(scmd->result) == DID_NO_CONNECT)
5580                 return false;
5581
5582         device = scmd->device->hostdata;
5583         if (pqi_device_offline(device) || pqi_device_in_remove(device))
5584                 return false;
5585
5586         ctrl_info = shost_to_hba(scmd->device->host);
5587         if (pqi_ctrl_offline(ctrl_info))
5588                 return false;
5589
5590         return true;
5591 }
5592
5593 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5594         void *context)
5595 {
5596         struct scsi_cmnd *scmd;
5597
5598         scmd = io_request->scmd;
5599         scsi_dma_unmap(scmd);
5600         if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5601                 set_host_byte(scmd, DID_IMM_RETRY);
5602                 pqi_cmd_priv(scmd)->this_residual++;
5603         }
5604
5605         pqi_free_io_request(io_request);
5606         pqi_scsi_done(scmd);
5607 }
5608
5609 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5610         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5611         struct pqi_queue_group *queue_group)
5612 {
5613         bool io_high_prio;
5614
5615         io_high_prio = pqi_is_io_high_priority(device, scmd);
5616
5617         return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5618                 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5619                 false, io_high_prio);
5620 }
5621
5622 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5623         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5624         unsigned int cdb_length, struct pqi_queue_group *queue_group,
5625         struct pqi_encryption_info *encryption_info, bool raid_bypass,
5626         bool io_high_prio)
5627 {
5628         int rc;
5629         struct pqi_io_request *io_request;
5630         struct pqi_aio_path_request *request;
5631         struct pqi_scsi_dev *device;
5632
5633         io_request = pqi_alloc_io_request(ctrl_info, scmd);
5634         if (!io_request)
5635                 return SCSI_MLQUEUE_HOST_BUSY;
5636
5637         io_request->io_complete_callback = pqi_aio_io_complete;
5638         io_request->scmd = scmd;
5639         io_request->raid_bypass = raid_bypass;
5640
5641         request = io_request->iu;
5642         memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5643
5644         request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5645         put_unaligned_le32(aio_handle, &request->nexus_id);
5646         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5647         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5648         request->command_priority = io_high_prio;
5649         put_unaligned_le16(io_request->index, &request->request_id);
5650         request->error_index = request->request_id;
5651         device = scmd->device->hostdata;
5652         if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
5653                 put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
5654         if (cdb_length > sizeof(request->cdb))
5655                 cdb_length = sizeof(request->cdb);
5656         request->cdb_length = cdb_length;
5657         memcpy(request->cdb, cdb, cdb_length);
5658
5659         switch (scmd->sc_data_direction) {
5660         case DMA_TO_DEVICE:
5661                 request->data_direction = SOP_READ_FLAG;
5662                 break;
5663         case DMA_FROM_DEVICE:
5664                 request->data_direction = SOP_WRITE_FLAG;
5665                 break;
5666         case DMA_NONE:
5667                 request->data_direction = SOP_NO_DIRECTION_FLAG;
5668                 break;
5669         case DMA_BIDIRECTIONAL:
5670                 request->data_direction = SOP_BIDIRECTIONAL;
5671                 break;
5672         default:
5673                 dev_err(&ctrl_info->pci_dev->dev,
5674                         "unknown data direction: %d\n",
5675                         scmd->sc_data_direction);
5676                 break;
5677         }
5678
5679         if (encryption_info) {
5680                 request->encryption_enable = true;
5681                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5682                         &request->data_encryption_key_index);
5683                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5684                         &request->encrypt_tweak_lower);
5685                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5686                         &request->encrypt_tweak_upper);
5687         }
5688
5689         rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5690         if (rc) {
5691                 pqi_free_io_request(io_request);
5692                 return SCSI_MLQUEUE_HOST_BUSY;
5693         }
5694
5695         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5696
5697         return 0;
5698 }
5699
5700 static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5701         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5702         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5703         struct pqi_scsi_dev_raid_map_data *rmd)
5704 {
5705         int rc;
5706         struct pqi_io_request *io_request;
5707         struct pqi_aio_r1_path_request *r1_request;
5708
5709         io_request = pqi_alloc_io_request(ctrl_info, scmd);
5710         if (!io_request)
5711                 return SCSI_MLQUEUE_HOST_BUSY;
5712
5713         io_request->io_complete_callback = pqi_aio_io_complete;
5714         io_request->scmd = scmd;
5715         io_request->raid_bypass = true;
5716
5717         r1_request = io_request->iu;
5718         memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5719
5720         r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5721         put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5722         r1_request->num_drives = rmd->num_it_nexus_entries;
5723         put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5724         put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5725         if (rmd->num_it_nexus_entries == 3)
5726                 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5727
5728         put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5729         r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5730         put_unaligned_le16(io_request->index, &r1_request->request_id);
5731         r1_request->error_index = r1_request->request_id;
5732         if (rmd->cdb_length > sizeof(r1_request->cdb))
5733                 rmd->cdb_length = sizeof(r1_request->cdb);
5734         r1_request->cdb_length = rmd->cdb_length;
5735         memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5736
5737         /* The direction is always write. */
5738         r1_request->data_direction = SOP_READ_FLAG;
5739
5740         if (encryption_info) {
5741                 r1_request->encryption_enable = true;
5742                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5743                                 &r1_request->data_encryption_key_index);
5744                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5745                                 &r1_request->encrypt_tweak_lower);
5746                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5747                                 &r1_request->encrypt_tweak_upper);
5748         }
5749
5750         rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5751         if (rc) {
5752                 pqi_free_io_request(io_request);
5753                 return SCSI_MLQUEUE_HOST_BUSY;
5754         }
5755
5756         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5757
5758         return 0;
5759 }
5760
5761 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5762         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5763         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5764         struct pqi_scsi_dev_raid_map_data *rmd)
5765 {
5766         int rc;
5767         struct pqi_io_request *io_request;
5768         struct pqi_aio_r56_path_request *r56_request;
5769
5770         io_request = pqi_alloc_io_request(ctrl_info, scmd);
5771         if (!io_request)
5772                 return SCSI_MLQUEUE_HOST_BUSY;
5773         io_request->io_complete_callback = pqi_aio_io_complete;
5774         io_request->scmd = scmd;
5775         io_request->raid_bypass = true;
5776
5777         r56_request = io_request->iu;
5778         memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5779
5780         if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5781                 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5782         else
5783                 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5784
5785         put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5786         put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5787         put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5788         if (rmd->raid_level == SA_RAID_6) {
5789                 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5790                 r56_request->xor_multiplier = rmd->xor_mult;
5791         }
5792         put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5793         r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5794         put_unaligned_le64(rmd->row, &r56_request->row);
5795
5796         put_unaligned_le16(io_request->index, &r56_request->request_id);
5797         r56_request->error_index = r56_request->request_id;
5798
5799         if (rmd->cdb_length > sizeof(r56_request->cdb))
5800                 rmd->cdb_length = sizeof(r56_request->cdb);
5801         r56_request->cdb_length = rmd->cdb_length;
5802         memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5803
5804         /* The direction is always write. */
5805         r56_request->data_direction = SOP_READ_FLAG;
5806
5807         if (encryption_info) {
5808                 r56_request->encryption_enable = true;
5809                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5810                                 &r56_request->data_encryption_key_index);
5811                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5812                                 &r56_request->encrypt_tweak_lower);
5813                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5814                                 &r56_request->encrypt_tweak_upper);
5815         }
5816
5817         rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5818         if (rc) {
5819                 pqi_free_io_request(io_request);
5820                 return SCSI_MLQUEUE_HOST_BUSY;
5821         }
5822
5823         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5824
5825         return 0;
5826 }
5827
5828 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5829         struct scsi_cmnd *scmd)
5830 {
5831         /*
5832          * We are setting host_tagset = 1 during init.
5833          */
5834         return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5835 }
5836
5837 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5838 {
5839         if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5840                 return false;
5841
5842         return pqi_cmd_priv(scmd)->this_residual == 0;
5843 }
5844
5845 /*
5846  * This function gets called just before we hand the completed SCSI request
5847  * back to the SML.
5848  */
5849
5850 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5851 {
5852         struct pqi_scsi_dev *device;
5853
5854         if (!scmd->device) {
5855                 set_host_byte(scmd, DID_NO_CONNECT);
5856                 return;
5857         }
5858
5859         device = scmd->device->hostdata;
5860         if (!device) {
5861                 set_host_byte(scmd, DID_NO_CONNECT);
5862                 return;
5863         }
5864
5865         atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
5866 }
5867
5868 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5869         struct scsi_cmnd *scmd)
5870 {
5871         u32 oldest_jiffies;
5872         u8 lru_index;
5873         int i;
5874         int rc;
5875         struct pqi_scsi_dev *device;
5876         struct pqi_stream_data *pqi_stream_data;
5877         struct pqi_scsi_dev_raid_map_data rmd;
5878
5879         if (!ctrl_info->enable_stream_detection)
5880                 return false;
5881
5882         rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5883         if (rc)
5884                 return false;
5885
5886         /* Check writes only. */
5887         if (!rmd.is_write)
5888                 return false;
5889
5890         device = scmd->device->hostdata;
5891
5892         /* Check for RAID 5/6 streams. */
5893         if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5894                 return false;
5895
5896         /*
5897          * If controller does not support AIO RAID{5,6} writes, need to send
5898          * requests down non-AIO path.
5899          */
5900         if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5901                 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5902                 return true;
5903
5904         lru_index = 0;
5905         oldest_jiffies = INT_MAX;
5906         for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5907                 pqi_stream_data = &device->stream_data[i];
5908                 /*
5909                  * Check for adjacent request or request is within
5910                  * the previous request.
5911                  */
5912                 if ((pqi_stream_data->next_lba &&
5913                         rmd.first_block >= pqi_stream_data->next_lba) &&
5914                         rmd.first_block <= pqi_stream_data->next_lba +
5915                                 rmd.block_cnt) {
5916                         pqi_stream_data->next_lba = rmd.first_block +
5917                                 rmd.block_cnt;
5918                         pqi_stream_data->last_accessed = jiffies;
5919                         return true;
5920                 }
5921
5922                 /* unused entry */
5923                 if (pqi_stream_data->last_accessed == 0) {
5924                         lru_index = i;
5925                         break;
5926                 }
5927
5928                 /* Find entry with oldest last accessed time. */
5929                 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5930                         oldest_jiffies = pqi_stream_data->last_accessed;
5931                         lru_index = i;
5932                 }
5933         }
5934
5935         /* Set LRU entry. */
5936         pqi_stream_data = &device->stream_data[lru_index];
5937         pqi_stream_data->last_accessed = jiffies;
5938         pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5939
5940         return false;
5941 }
5942
5943 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5944 {
5945         int rc;
5946         struct pqi_ctrl_info *ctrl_info;
5947         struct pqi_scsi_dev *device;
5948         u16 hw_queue;
5949         struct pqi_queue_group *queue_group;
5950         bool raid_bypassed;
5951
5952         device = scmd->device->hostdata;
5953
5954         if (!device) {
5955                 set_host_byte(scmd, DID_NO_CONNECT);
5956                 pqi_scsi_done(scmd);
5957                 return 0;
5958         }
5959
5960         atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
5961
5962         ctrl_info = shost_to_hba(shost);
5963
5964         if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
5965                 set_host_byte(scmd, DID_NO_CONNECT);
5966                 pqi_scsi_done(scmd);
5967                 return 0;
5968         }
5969
5970         if (pqi_ctrl_blocked(ctrl_info)) {
5971                 rc = SCSI_MLQUEUE_HOST_BUSY;
5972                 goto out;
5973         }
5974
5975         /*
5976          * This is necessary because the SML doesn't zero out this field during
5977          * error recovery.
5978          */
5979         scmd->result = 0;
5980
5981         hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5982         queue_group = &ctrl_info->queue_groups[hw_queue];
5983
5984         if (pqi_is_logical_device(device)) {
5985                 raid_bypassed = false;
5986                 if (device->raid_bypass_enabled &&
5987                         pqi_is_bypass_eligible_request(scmd) &&
5988                         !pqi_is_parity_write_stream(ctrl_info, scmd)) {
5989                         rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5990                         if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
5991                                 raid_bypassed = true;
5992                                 device->raid_bypass_cnt++;
5993                         }
5994                 }
5995                 if (!raid_bypassed)
5996                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5997         } else {
5998                 if (device->aio_enabled)
5999                         rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6000                 else
6001                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6002         }
6003
6004 out:
6005         if (rc)
6006                 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
6007
6008         return rc;
6009 }
6010
6011 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
6012 {
6013         unsigned int i;
6014         unsigned int path;
6015         unsigned long flags;
6016         unsigned int queued_io_count;
6017         struct pqi_queue_group *queue_group;
6018         struct pqi_io_request *io_request;
6019
6020         queued_io_count = 0;
6021
6022         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6023                 queue_group = &ctrl_info->queue_groups[i];
6024                 for (path = 0; path < 2; path++) {
6025                         spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6026                         list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6027                                 queued_io_count++;
6028                         spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6029                 }
6030         }
6031
6032         return queued_io_count;
6033 }
6034
6035 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6036 {
6037         unsigned int i;
6038         unsigned int path;
6039         unsigned int nonempty_inbound_queue_count;
6040         struct pqi_queue_group *queue_group;
6041         pqi_index_t iq_pi;
6042         pqi_index_t iq_ci;
6043
6044         nonempty_inbound_queue_count = 0;
6045
6046         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6047                 queue_group = &ctrl_info->queue_groups[i];
6048                 for (path = 0; path < 2; path++) {
6049                         iq_pi = queue_group->iq_pi_copy[path];
6050                         iq_ci = readl(queue_group->iq_ci[path]);
6051                         if (iq_ci != iq_pi)
6052                                 nonempty_inbound_queue_count++;
6053                 }
6054         }
6055
6056         return nonempty_inbound_queue_count;
6057 }
6058
6059 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS        10
6060
6061 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6062 {
6063         unsigned long start_jiffies;
6064         unsigned long warning_timeout;
6065         unsigned int queued_io_count;
6066         unsigned int nonempty_inbound_queue_count;
6067         bool displayed_warning;
6068
6069         displayed_warning = false;
6070         start_jiffies = jiffies;
6071         warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6072
6073         while (1) {
6074                 queued_io_count = pqi_queued_io_count(ctrl_info);
6075                 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6076                 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6077                         break;
6078                 pqi_check_ctrl_health(ctrl_info);
6079                 if (pqi_ctrl_offline(ctrl_info))
6080                         return -ENXIO;
6081                 if (time_after(jiffies, warning_timeout)) {
6082                         dev_warn(&ctrl_info->pci_dev->dev,
6083                                 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6084                                 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6085                         displayed_warning = true;
6086                         warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6087                 }
6088                 usleep_range(1000, 2000);
6089         }
6090
6091         if (displayed_warning)
6092                 dev_warn(&ctrl_info->pci_dev->dev,
6093                         "queued I/O drained after waiting for %u seconds\n",
6094                         jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6095
6096         return 0;
6097 }
6098
6099 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6100         struct pqi_scsi_dev *device)
6101 {
6102         unsigned int i;
6103         unsigned int path;
6104         struct pqi_queue_group *queue_group;
6105         unsigned long flags;
6106         struct pqi_io_request *io_request;
6107         struct pqi_io_request *next;
6108         struct scsi_cmnd *scmd;
6109         struct pqi_scsi_dev *scsi_device;
6110
6111         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6112                 queue_group = &ctrl_info->queue_groups[i];
6113
6114                 for (path = 0; path < 2; path++) {
6115                         spin_lock_irqsave(
6116                                 &queue_group->submit_lock[path], flags);
6117
6118                         list_for_each_entry_safe(io_request, next,
6119                                 &queue_group->request_list[path],
6120                                 request_list_entry) {
6121
6122                                 scmd = io_request->scmd;
6123                                 if (!scmd)
6124                                         continue;
6125
6126                                 scsi_device = scmd->device->hostdata;
6127                                 if (scsi_device != device)
6128                                         continue;
6129
6130                                 list_del(&io_request->request_list_entry);
6131                                 set_host_byte(scmd, DID_RESET);
6132                                 pqi_free_io_request(io_request);
6133                                 scsi_dma_unmap(scmd);
6134                                 pqi_scsi_done(scmd);
6135                         }
6136
6137                         spin_unlock_irqrestore(
6138                                 &queue_group->submit_lock[path], flags);
6139                 }
6140         }
6141 }
6142
6143 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS     10
6144
6145 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6146         struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
6147 {
6148         int cmds_outstanding;
6149         unsigned long start_jiffies;
6150         unsigned long warning_timeout;
6151         unsigned long msecs_waiting;
6152
6153         start_jiffies = jiffies;
6154         warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6155
6156         while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
6157                 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
6158                         pqi_check_ctrl_health(ctrl_info);
6159                         if (pqi_ctrl_offline(ctrl_info))
6160                                 return -ENXIO;
6161                 }
6162                 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6163                 if (msecs_waiting >= timeout_msecs) {
6164                         dev_err(&ctrl_info->pci_dev->dev,
6165                                 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6166                                 ctrl_info->scsi_host->host_no, device->bus, device->target,
6167                                 lun, msecs_waiting / 1000, cmds_outstanding);
6168                         return -ETIMEDOUT;
6169                 }
6170                 if (time_after(jiffies, warning_timeout)) {
6171                         dev_warn(&ctrl_info->pci_dev->dev,
6172                                 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6173                                 ctrl_info->scsi_host->host_no, device->bus, device->target,
6174                                 lun, msecs_waiting / 1000, cmds_outstanding);
6175                         warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6176                 }
6177                 usleep_range(1000, 2000);
6178         }
6179
6180         return 0;
6181 }
6182
6183 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6184         void *context)
6185 {
6186         struct completion *waiting = context;
6187
6188         complete(waiting);
6189 }
6190
6191 #define PQI_LUN_RESET_POLL_COMPLETION_SECS      10
6192
6193 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6194         struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
6195 {
6196         int rc;
6197         unsigned int wait_secs;
6198         int cmds_outstanding;
6199
6200         wait_secs = 0;
6201
6202         while (1) {
6203                 if (wait_for_completion_io_timeout(wait,
6204                         PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6205                         rc = 0;
6206                         break;
6207                 }
6208
6209                 pqi_check_ctrl_health(ctrl_info);
6210                 if (pqi_ctrl_offline(ctrl_info)) {
6211                         rc = -ENXIO;
6212                         break;
6213                 }
6214
6215                 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6216                 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
6217                 dev_warn(&ctrl_info->pci_dev->dev,
6218                         "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6219                         ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
6220         }
6221
6222         return rc;
6223 }
6224
6225 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS     30
6226
6227 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6228 {
6229         int rc;
6230         struct pqi_io_request *io_request;
6231         DECLARE_COMPLETION_ONSTACK(wait);
6232         struct pqi_task_management_request *request;
6233         struct pqi_scsi_dev *device;
6234
6235         device = scmd->device->hostdata;
6236         io_request = pqi_alloc_io_request(ctrl_info, NULL);
6237         io_request->io_complete_callback = pqi_lun_reset_complete;
6238         io_request->context = &wait;
6239
6240         request = io_request->iu;
6241         memset(request, 0, sizeof(*request));
6242
6243         request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6244         put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6245                 &request->header.iu_length);
6246         put_unaligned_le16(io_request->index, &request->request_id);
6247         memcpy(request->lun_number, device->scsi3addr,
6248                 sizeof(request->lun_number));
6249         if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
6250                 request->ml_device_lun_number = (u8)scmd->device->lun;
6251         request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6252         if (ctrl_info->tmf_iu_timeout_supported)
6253                 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6254
6255         pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6256                 io_request);
6257
6258         rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
6259         if (rc == 0)
6260                 rc = io_request->status;
6261
6262         pqi_free_io_request(io_request);
6263
6264         return rc;
6265 }
6266
6267 #define PQI_LUN_RESET_RETRIES                           3
6268 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS              (10 * 1000)
6269 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS          (10 * 60 * 1000)
6270 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS   (2 * 60 * 1000)
6271
6272 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6273 {
6274         int reset_rc;
6275         int wait_rc;
6276         unsigned int retries;
6277         unsigned long timeout_msecs;
6278         struct pqi_scsi_dev *device;
6279
6280         device = scmd->device->hostdata;
6281         for (retries = 0;;) {
6282                 reset_rc = pqi_lun_reset(ctrl_info, scmd);
6283                 if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
6284                         break;
6285                 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6286         }
6287
6288         timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6289                 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6290
6291         wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
6292         if (wait_rc && reset_rc == 0)
6293                 reset_rc = wait_rc;
6294
6295         return reset_rc == 0 ? SUCCESS : FAILED;
6296 }
6297
6298 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6299 {
6300         int rc;
6301         struct pqi_scsi_dev *device;
6302
6303         device = scmd->device->hostdata;
6304         pqi_ctrl_block_requests(ctrl_info);
6305         pqi_ctrl_wait_until_quiesced(ctrl_info);
6306         pqi_fail_io_queued_for_device(ctrl_info, device);
6307         rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6308         if (rc)
6309                 rc = FAILED;
6310         else
6311                 rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
6312         pqi_ctrl_unblock_requests(ctrl_info);
6313
6314         return rc;
6315 }
6316
6317 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6318 {
6319         int rc;
6320         struct Scsi_Host *shost;
6321         struct pqi_ctrl_info *ctrl_info;
6322         struct pqi_scsi_dev *device;
6323
6324         shost = scmd->device->host;
6325         ctrl_info = shost_to_hba(shost);
6326         device = scmd->device->hostdata;
6327
6328         mutex_lock(&ctrl_info->lun_reset_mutex);
6329
6330         dev_err(&ctrl_info->pci_dev->dev,
6331                 "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
6332                 shost->host_no,
6333                 device->bus, device->target, (u32)scmd->device->lun,
6334                 scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
6335
6336         pqi_check_ctrl_health(ctrl_info);
6337         if (pqi_ctrl_offline(ctrl_info))
6338                 rc = FAILED;
6339         else
6340                 rc = pqi_device_reset(ctrl_info, scmd);
6341
6342         dev_err(&ctrl_info->pci_dev->dev,
6343                 "reset of scsi %d:%d:%d:%d: %s\n",
6344                 shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
6345                 rc == SUCCESS ? "SUCCESS" : "FAILED");
6346
6347         mutex_unlock(&ctrl_info->lun_reset_mutex);
6348
6349         return rc;
6350 }
6351
6352 static int pqi_slave_alloc(struct scsi_device *sdev)
6353 {
6354         struct pqi_scsi_dev *device;
6355         unsigned long flags;
6356         struct pqi_ctrl_info *ctrl_info;
6357         struct scsi_target *starget;
6358         struct sas_rphy *rphy;
6359
6360         ctrl_info = shost_to_hba(sdev->host);
6361
6362         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6363
6364         if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6365                 starget = scsi_target(sdev);
6366                 rphy = target_to_rphy(starget);
6367                 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6368                 if (device) {
6369                         if (device->target_lun_valid) {
6370                                 device->ignore_device = true;
6371                         } else {
6372                                 device->target = sdev_id(sdev);
6373                                 device->lun = sdev->lun;
6374                                 device->target_lun_valid = true;
6375                         }
6376                 }
6377         } else {
6378                 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6379                         sdev_id(sdev), sdev->lun);
6380         }
6381
6382         if (device) {
6383                 sdev->hostdata = device;
6384                 device->sdev = sdev;
6385                 if (device->queue_depth) {
6386                         device->advertised_queue_depth = device->queue_depth;
6387                         scsi_change_queue_depth(sdev,
6388                                 device->advertised_queue_depth);
6389                 }
6390                 if (pqi_is_logical_device(device)) {
6391                         pqi_disable_write_same(sdev);
6392                 } else {
6393                         sdev->allow_restart = 1;
6394                         if (device->device_type == SA_DEVICE_TYPE_NVME)
6395                                 pqi_disable_write_same(sdev);
6396                 }
6397         }
6398
6399         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6400
6401         return 0;
6402 }
6403
6404 static void pqi_map_queues(struct Scsi_Host *shost)
6405 {
6406         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6407
6408         blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6409                               ctrl_info->pci_dev, 0);
6410 }
6411
6412 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6413 {
6414         return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6415 }
6416
6417 static int pqi_slave_configure(struct scsi_device *sdev)
6418 {
6419         int rc = 0;
6420         struct pqi_scsi_dev *device;
6421
6422         device = sdev->hostdata;
6423         device->devtype = sdev->type;
6424
6425         if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6426                 rc = -ENXIO;
6427                 device->ignore_device = false;
6428         }
6429
6430         return rc;
6431 }
6432
6433 static void pqi_slave_destroy(struct scsi_device *sdev)
6434 {
6435         struct pqi_ctrl_info *ctrl_info;
6436         struct pqi_scsi_dev *device;
6437         int mutex_acquired;
6438         unsigned long flags;
6439
6440         ctrl_info = shost_to_hba(sdev->host);
6441
6442         mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
6443         if (!mutex_acquired)
6444                 return;
6445
6446         device = sdev->hostdata;
6447         if (!device) {
6448                 mutex_unlock(&ctrl_info->scan_mutex);
6449                 return;
6450         }
6451
6452         device->lun_count--;
6453         if (device->lun_count > 0) {
6454                 mutex_unlock(&ctrl_info->scan_mutex);
6455                 return;
6456         }
6457
6458         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6459         list_del(&device->scsi_device_list_entry);
6460         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6461
6462         mutex_unlock(&ctrl_info->scan_mutex);
6463
6464         pqi_dev_info(ctrl_info, "removed", device);
6465         pqi_free_device(device);
6466 }
6467
6468 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6469 {
6470         struct pci_dev *pci_dev;
6471         u32 subsystem_vendor;
6472         u32 subsystem_device;
6473         cciss_pci_info_struct pciinfo;
6474
6475         if (!arg)
6476                 return -EINVAL;
6477
6478         pci_dev = ctrl_info->pci_dev;
6479
6480         pciinfo.domain = pci_domain_nr(pci_dev->bus);
6481         pciinfo.bus = pci_dev->bus->number;
6482         pciinfo.dev_fn = pci_dev->devfn;
6483         subsystem_vendor = pci_dev->subsystem_vendor;
6484         subsystem_device = pci_dev->subsystem_device;
6485         pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6486
6487         if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6488                 return -EFAULT;
6489
6490         return 0;
6491 }
6492
6493 static int pqi_getdrivver_ioctl(void __user *arg)
6494 {
6495         u32 version;
6496
6497         if (!arg)
6498                 return -EINVAL;
6499
6500         version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6501                 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6502
6503         if (copy_to_user(arg, &version, sizeof(version)))
6504                 return -EFAULT;
6505
6506         return 0;
6507 }
6508
6509 struct ciss_error_info {
6510         u8      scsi_status;
6511         int     command_status;
6512         size_t  sense_data_length;
6513 };
6514
6515 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6516         struct ciss_error_info *ciss_error_info)
6517 {
6518         int ciss_cmd_status;
6519         size_t sense_data_length;
6520
6521         switch (pqi_error_info->data_out_result) {
6522         case PQI_DATA_IN_OUT_GOOD:
6523                 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6524                 break;
6525         case PQI_DATA_IN_OUT_UNDERFLOW:
6526                 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6527                 break;
6528         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6529                 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6530                 break;
6531         case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6532         case PQI_DATA_IN_OUT_BUFFER_ERROR:
6533         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6534         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6535         case PQI_DATA_IN_OUT_ERROR:
6536                 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6537                 break;
6538         case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6539         case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6540         case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6541         case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6542         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6543         case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6544         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6545         case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6546         case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6547         case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6548                 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6549                 break;
6550         case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6551                 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6552                 break;
6553         case PQI_DATA_IN_OUT_ABORTED:
6554                 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6555                 break;
6556         case PQI_DATA_IN_OUT_TIMEOUT:
6557                 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6558                 break;
6559         default:
6560                 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6561                 break;
6562         }
6563
6564         sense_data_length =
6565                 get_unaligned_le16(&pqi_error_info->sense_data_length);
6566         if (sense_data_length == 0)
6567                 sense_data_length =
6568                 get_unaligned_le16(&pqi_error_info->response_data_length);
6569         if (sense_data_length)
6570                 if (sense_data_length > sizeof(pqi_error_info->data))
6571                         sense_data_length = sizeof(pqi_error_info->data);
6572
6573         ciss_error_info->scsi_status = pqi_error_info->status;
6574         ciss_error_info->command_status = ciss_cmd_status;
6575         ciss_error_info->sense_data_length = sense_data_length;
6576 }
6577
6578 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6579 {
6580         int rc;
6581         char *kernel_buffer = NULL;
6582         u16 iu_length;
6583         size_t sense_data_length;
6584         IOCTL_Command_struct iocommand;
6585         struct pqi_raid_path_request request;
6586         struct pqi_raid_error_info pqi_error_info;
6587         struct ciss_error_info ciss_error_info;
6588
6589         if (pqi_ctrl_offline(ctrl_info))
6590                 return -ENXIO;
6591         if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6592                 return -EBUSY;
6593         if (!arg)
6594                 return -EINVAL;
6595         if (!capable(CAP_SYS_RAWIO))
6596                 return -EPERM;
6597         if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6598                 return -EFAULT;
6599         if (iocommand.buf_size < 1 &&
6600                 iocommand.Request.Type.Direction != XFER_NONE)
6601                 return -EINVAL;
6602         if (iocommand.Request.CDBLen > sizeof(request.cdb))
6603                 return -EINVAL;
6604         if (iocommand.Request.Type.Type != TYPE_CMD)
6605                 return -EINVAL;
6606
6607         switch (iocommand.Request.Type.Direction) {
6608         case XFER_NONE:
6609         case XFER_WRITE:
6610         case XFER_READ:
6611         case XFER_READ | XFER_WRITE:
6612                 break;
6613         default:
6614                 return -EINVAL;
6615         }
6616
6617         if (iocommand.buf_size > 0) {
6618                 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6619                 if (!kernel_buffer)
6620                         return -ENOMEM;
6621                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6622                         if (copy_from_user(kernel_buffer, iocommand.buf,
6623                                 iocommand.buf_size)) {
6624                                 rc = -EFAULT;
6625                                 goto out;
6626                         }
6627                 } else {
6628                         memset(kernel_buffer, 0, iocommand.buf_size);
6629                 }
6630         }
6631
6632         memset(&request, 0, sizeof(request));
6633
6634         request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6635         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6636                 PQI_REQUEST_HEADER_LENGTH;
6637         memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6638                 sizeof(request.lun_number));
6639         memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6640         request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6641
6642         switch (iocommand.Request.Type.Direction) {
6643         case XFER_NONE:
6644                 request.data_direction = SOP_NO_DIRECTION_FLAG;
6645                 break;
6646         case XFER_WRITE:
6647                 request.data_direction = SOP_WRITE_FLAG;
6648                 break;
6649         case XFER_READ:
6650                 request.data_direction = SOP_READ_FLAG;
6651                 break;
6652         case XFER_READ | XFER_WRITE:
6653                 request.data_direction = SOP_BIDIRECTIONAL;
6654                 break;
6655         }
6656
6657         request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6658
6659         if (iocommand.buf_size > 0) {
6660                 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6661
6662                 rc = pqi_map_single(ctrl_info->pci_dev,
6663                         &request.sg_descriptors[0], kernel_buffer,
6664                         iocommand.buf_size, DMA_BIDIRECTIONAL);
6665                 if (rc)
6666                         goto out;
6667
6668                 iu_length += sizeof(request.sg_descriptors[0]);
6669         }
6670
6671         put_unaligned_le16(iu_length, &request.header.iu_length);
6672
6673         if (ctrl_info->raid_iu_timeout_supported)
6674                 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6675
6676         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6677                 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6678
6679         if (iocommand.buf_size > 0)
6680                 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6681                         DMA_BIDIRECTIONAL);
6682
6683         memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6684
6685         if (rc == 0) {
6686                 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6687                 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6688                 iocommand.error_info.CommandStatus =
6689                         ciss_error_info.command_status;
6690                 sense_data_length = ciss_error_info.sense_data_length;
6691                 if (sense_data_length) {
6692                         if (sense_data_length >
6693                                 sizeof(iocommand.error_info.SenseInfo))
6694                                 sense_data_length =
6695                                         sizeof(iocommand.error_info.SenseInfo);
6696                         memcpy(iocommand.error_info.SenseInfo,
6697                                 pqi_error_info.data, sense_data_length);
6698                         iocommand.error_info.SenseLen = sense_data_length;
6699                 }
6700         }
6701
6702         if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6703                 rc = -EFAULT;
6704                 goto out;
6705         }
6706
6707         if (rc == 0 && iocommand.buf_size > 0 &&
6708                 (iocommand.Request.Type.Direction & XFER_READ)) {
6709                 if (copy_to_user(iocommand.buf, kernel_buffer,
6710                         iocommand.buf_size)) {
6711                         rc = -EFAULT;
6712                 }
6713         }
6714
6715 out:
6716         kfree(kernel_buffer);
6717
6718         return rc;
6719 }
6720
6721 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6722                      void __user *arg)
6723 {
6724         int rc;
6725         struct pqi_ctrl_info *ctrl_info;
6726
6727         ctrl_info = shost_to_hba(sdev->host);
6728
6729         switch (cmd) {
6730         case CCISS_DEREGDISK:
6731         case CCISS_REGNEWDISK:
6732         case CCISS_REGNEWD:
6733                 rc = pqi_scan_scsi_devices(ctrl_info);
6734                 break;
6735         case CCISS_GETPCIINFO:
6736                 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6737                 break;
6738         case CCISS_GETDRIVVER:
6739                 rc = pqi_getdrivver_ioctl(arg);
6740                 break;
6741         case CCISS_PASSTHRU:
6742                 rc = pqi_passthru_ioctl(ctrl_info, arg);
6743                 break;
6744         default:
6745                 rc = -EINVAL;
6746                 break;
6747         }
6748
6749         return rc;
6750 }
6751
6752 static ssize_t pqi_firmware_version_show(struct device *dev,
6753         struct device_attribute *attr, char *buffer)
6754 {
6755         struct Scsi_Host *shost;
6756         struct pqi_ctrl_info *ctrl_info;
6757
6758         shost = class_to_shost(dev);
6759         ctrl_info = shost_to_hba(shost);
6760
6761         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6762 }
6763
6764 static ssize_t pqi_driver_version_show(struct device *dev,
6765         struct device_attribute *attr, char *buffer)
6766 {
6767         return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6768 }
6769
6770 static ssize_t pqi_serial_number_show(struct device *dev,
6771         struct device_attribute *attr, char *buffer)
6772 {
6773         struct Scsi_Host *shost;
6774         struct pqi_ctrl_info *ctrl_info;
6775
6776         shost = class_to_shost(dev);
6777         ctrl_info = shost_to_hba(shost);
6778
6779         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6780 }
6781
6782 static ssize_t pqi_model_show(struct device *dev,
6783         struct device_attribute *attr, char *buffer)
6784 {
6785         struct Scsi_Host *shost;
6786         struct pqi_ctrl_info *ctrl_info;
6787
6788         shost = class_to_shost(dev);
6789         ctrl_info = shost_to_hba(shost);
6790
6791         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6792 }
6793
6794 static ssize_t pqi_vendor_show(struct device *dev,
6795         struct device_attribute *attr, char *buffer)
6796 {
6797         struct Scsi_Host *shost;
6798         struct pqi_ctrl_info *ctrl_info;
6799
6800         shost = class_to_shost(dev);
6801         ctrl_info = shost_to_hba(shost);
6802
6803         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6804 }
6805
6806 static ssize_t pqi_host_rescan_store(struct device *dev,
6807         struct device_attribute *attr, const char *buffer, size_t count)
6808 {
6809         struct Scsi_Host *shost = class_to_shost(dev);
6810
6811         pqi_scan_start(shost);
6812
6813         return count;
6814 }
6815
6816 static ssize_t pqi_lockup_action_show(struct device *dev,
6817         struct device_attribute *attr, char *buffer)
6818 {
6819         int count = 0;
6820         unsigned int i;
6821
6822         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6823                 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6824                         count += scnprintf(buffer + count, PAGE_SIZE - count,
6825                                 "[%s] ", pqi_lockup_actions[i].name);
6826                 else
6827                         count += scnprintf(buffer + count, PAGE_SIZE - count,
6828                                 "%s ", pqi_lockup_actions[i].name);
6829         }
6830
6831         count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6832
6833         return count;
6834 }
6835
6836 static ssize_t pqi_lockup_action_store(struct device *dev,
6837         struct device_attribute *attr, const char *buffer, size_t count)
6838 {
6839         unsigned int i;
6840         char *action_name;
6841         char action_name_buffer[32];
6842
6843         strscpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6844         action_name = strstrip(action_name_buffer);
6845
6846         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6847                 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6848                         pqi_lockup_action = pqi_lockup_actions[i].action;
6849                         return count;
6850                 }
6851         }
6852
6853         return -EINVAL;
6854 }
6855
6856 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6857         struct device_attribute *attr, char *buffer)
6858 {
6859         struct Scsi_Host *shost = class_to_shost(dev);
6860         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6861
6862         return scnprintf(buffer, 10, "%x\n",
6863                         ctrl_info->enable_stream_detection);
6864 }
6865
6866 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6867         struct device_attribute *attr, const char *buffer, size_t count)
6868 {
6869         struct Scsi_Host *shost = class_to_shost(dev);
6870         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6871         u8 set_stream_detection = 0;
6872
6873         if (kstrtou8(buffer, 0, &set_stream_detection))
6874                 return -EINVAL;
6875
6876         if (set_stream_detection > 0)
6877                 set_stream_detection = 1;
6878
6879         ctrl_info->enable_stream_detection = set_stream_detection;
6880
6881         return count;
6882 }
6883
6884 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6885         struct device_attribute *attr, char *buffer)
6886 {
6887         struct Scsi_Host *shost = class_to_shost(dev);
6888         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6889
6890         return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6891 }
6892
6893 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6894         struct device_attribute *attr, const char *buffer, size_t count)
6895 {
6896         struct Scsi_Host *shost = class_to_shost(dev);
6897         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6898         u8 set_r5_writes = 0;
6899
6900         if (kstrtou8(buffer, 0, &set_r5_writes))
6901                 return -EINVAL;
6902
6903         if (set_r5_writes > 0)
6904                 set_r5_writes = 1;
6905
6906         ctrl_info->enable_r5_writes = set_r5_writes;
6907
6908         return count;
6909 }
6910
6911 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6912         struct device_attribute *attr, char *buffer)
6913 {
6914         struct Scsi_Host *shost = class_to_shost(dev);
6915         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6916
6917         return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6918 }
6919
6920 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6921         struct device_attribute *attr, const char *buffer, size_t count)
6922 {
6923         struct Scsi_Host *shost = class_to_shost(dev);
6924         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6925         u8 set_r6_writes = 0;
6926
6927         if (kstrtou8(buffer, 0, &set_r6_writes))
6928                 return -EINVAL;
6929
6930         if (set_r6_writes > 0)
6931                 set_r6_writes = 1;
6932
6933         ctrl_info->enable_r6_writes = set_r6_writes;
6934
6935         return count;
6936 }
6937
6938 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6939 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6940 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6941 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6942 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6943 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6944 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6945         pqi_lockup_action_store);
6946 static DEVICE_ATTR(enable_stream_detection, 0644,
6947         pqi_host_enable_stream_detection_show,
6948         pqi_host_enable_stream_detection_store);
6949 static DEVICE_ATTR(enable_r5_writes, 0644,
6950         pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6951 static DEVICE_ATTR(enable_r6_writes, 0644,
6952         pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6953
6954 static struct attribute *pqi_shost_attrs[] = {
6955         &dev_attr_driver_version.attr,
6956         &dev_attr_firmware_version.attr,
6957         &dev_attr_model.attr,
6958         &dev_attr_serial_number.attr,
6959         &dev_attr_vendor.attr,
6960         &dev_attr_rescan.attr,
6961         &dev_attr_lockup_action.attr,
6962         &dev_attr_enable_stream_detection.attr,
6963         &dev_attr_enable_r5_writes.attr,
6964         &dev_attr_enable_r6_writes.attr,
6965         NULL
6966 };
6967
6968 ATTRIBUTE_GROUPS(pqi_shost);
6969
6970 static ssize_t pqi_unique_id_show(struct device *dev,
6971         struct device_attribute *attr, char *buffer)
6972 {
6973         struct pqi_ctrl_info *ctrl_info;
6974         struct scsi_device *sdev;
6975         struct pqi_scsi_dev *device;
6976         unsigned long flags;
6977         u8 unique_id[16];
6978
6979         sdev = to_scsi_device(dev);
6980         ctrl_info = shost_to_hba(sdev->host);
6981
6982         if (pqi_ctrl_offline(ctrl_info))
6983                 return -ENODEV;
6984
6985         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6986
6987         device = sdev->hostdata;
6988         if (!device) {
6989                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6990                 return -ENODEV;
6991         }
6992
6993         if (device->is_physical_device)
6994                 memcpy(unique_id, device->wwid, sizeof(device->wwid));
6995         else
6996                 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6997
6998         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6999
7000         return scnprintf(buffer, PAGE_SIZE,
7001                 "%02X%02X%02X%02X%02X%02X%02X%02X"
7002                 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
7003                 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
7004                 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
7005                 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
7006                 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
7007 }
7008
7009 static ssize_t pqi_lunid_show(struct device *dev,
7010         struct device_attribute *attr, char *buffer)
7011 {
7012         struct pqi_ctrl_info *ctrl_info;
7013         struct scsi_device *sdev;
7014         struct pqi_scsi_dev *device;
7015         unsigned long flags;
7016         u8 lunid[8];
7017
7018         sdev = to_scsi_device(dev);
7019         ctrl_info = shost_to_hba(sdev->host);
7020
7021         if (pqi_ctrl_offline(ctrl_info))
7022                 return -ENODEV;
7023
7024         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7025
7026         device = sdev->hostdata;
7027         if (!device) {
7028                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7029                 return -ENODEV;
7030         }
7031
7032         memcpy(lunid, device->scsi3addr, sizeof(lunid));
7033
7034         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7035
7036         return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
7037 }
7038
7039 #define MAX_PATHS       8
7040
7041 static ssize_t pqi_path_info_show(struct device *dev,
7042         struct device_attribute *attr, char *buf)
7043 {
7044         struct pqi_ctrl_info *ctrl_info;
7045         struct scsi_device *sdev;
7046         struct pqi_scsi_dev *device;
7047         unsigned long flags;
7048         int i;
7049         int output_len = 0;
7050         u8 box;
7051         u8 bay;
7052         u8 path_map_index;
7053         char *active;
7054         u8 phys_connector[2];
7055
7056         sdev = to_scsi_device(dev);
7057         ctrl_info = shost_to_hba(sdev->host);
7058
7059         if (pqi_ctrl_offline(ctrl_info))
7060                 return -ENODEV;
7061
7062         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7063
7064         device = sdev->hostdata;
7065         if (!device) {
7066                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7067                 return -ENODEV;
7068         }
7069
7070         bay = device->bay;
7071         for (i = 0; i < MAX_PATHS; i++) {
7072                 path_map_index = 1 << i;
7073                 if (i == device->active_path_index)
7074                         active = "Active";
7075                 else if (device->path_map & path_map_index)
7076                         active = "Inactive";
7077                 else
7078                         continue;
7079
7080                 output_len += scnprintf(buf + output_len,
7081                                         PAGE_SIZE - output_len,
7082                                         "[%d:%d:%d:%d] %20.20s ",
7083                                         ctrl_info->scsi_host->host_no,
7084                                         device->bus, device->target,
7085                                         device->lun,
7086                                         scsi_device_type(device->devtype));
7087
7088                 if (device->devtype == TYPE_RAID ||
7089                         pqi_is_logical_device(device))
7090                         goto end_buffer;
7091
7092                 memcpy(&phys_connector, &device->phys_connector[i],
7093                         sizeof(phys_connector));
7094                 if (phys_connector[0] < '0')
7095                         phys_connector[0] = '0';
7096                 if (phys_connector[1] < '0')
7097                         phys_connector[1] = '0';
7098
7099                 output_len += scnprintf(buf + output_len,
7100                                         PAGE_SIZE - output_len,
7101                                         "PORT: %.2s ", phys_connector);
7102
7103                 box = device->box[i];
7104                 if (box != 0 && box != 0xFF)
7105                         output_len += scnprintf(buf + output_len,
7106                                                 PAGE_SIZE - output_len,
7107                                                 "BOX: %hhu ", box);
7108
7109                 if ((device->devtype == TYPE_DISK ||
7110                         device->devtype == TYPE_ZBC) &&
7111                         pqi_expose_device(device))
7112                         output_len += scnprintf(buf + output_len,
7113                                                 PAGE_SIZE - output_len,
7114                                                 "BAY: %hhu ", bay);
7115
7116 end_buffer:
7117                 output_len += scnprintf(buf + output_len,
7118                                         PAGE_SIZE - output_len,
7119                                         "%s\n", active);
7120         }
7121
7122         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7123
7124         return output_len;
7125 }
7126
7127 static ssize_t pqi_sas_address_show(struct device *dev,
7128         struct device_attribute *attr, char *buffer)
7129 {
7130         struct pqi_ctrl_info *ctrl_info;
7131         struct scsi_device *sdev;
7132         struct pqi_scsi_dev *device;
7133         unsigned long flags;
7134         u64 sas_address;
7135
7136         sdev = to_scsi_device(dev);
7137         ctrl_info = shost_to_hba(sdev->host);
7138
7139         if (pqi_ctrl_offline(ctrl_info))
7140                 return -ENODEV;
7141
7142         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7143
7144         device = sdev->hostdata;
7145         if (!device) {
7146                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7147                 return -ENODEV;
7148         }
7149
7150         sas_address = device->sas_address;
7151
7152         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7153
7154         return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7155 }
7156
7157 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7158         struct device_attribute *attr, char *buffer)
7159 {
7160         struct pqi_ctrl_info *ctrl_info;
7161         struct scsi_device *sdev;
7162         struct pqi_scsi_dev *device;
7163         unsigned long flags;
7164
7165         sdev = to_scsi_device(dev);
7166         ctrl_info = shost_to_hba(sdev->host);
7167
7168         if (pqi_ctrl_offline(ctrl_info))
7169                 return -ENODEV;
7170
7171         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7172
7173         device = sdev->hostdata;
7174         if (!device) {
7175                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7176                 return -ENODEV;
7177         }
7178
7179         buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7180         buffer[1] = '\n';
7181         buffer[2] = '\0';
7182
7183         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7184
7185         return 2;
7186 }
7187
7188 static ssize_t pqi_raid_level_show(struct device *dev,
7189         struct device_attribute *attr, char *buffer)
7190 {
7191         struct pqi_ctrl_info *ctrl_info;
7192         struct scsi_device *sdev;
7193         struct pqi_scsi_dev *device;
7194         unsigned long flags;
7195         char *raid_level;
7196
7197         sdev = to_scsi_device(dev);
7198         ctrl_info = shost_to_hba(sdev->host);
7199
7200         if (pqi_ctrl_offline(ctrl_info))
7201                 return -ENODEV;
7202
7203         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7204
7205         device = sdev->hostdata;
7206         if (!device) {
7207                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7208                 return -ENODEV;
7209         }
7210
7211         if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
7212                 raid_level = pqi_raid_level_to_string(device->raid_level);
7213         else
7214                 raid_level = "N/A";
7215
7216         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7217
7218         return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7219 }
7220
7221 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7222         struct device_attribute *attr, char *buffer)
7223 {
7224         struct pqi_ctrl_info *ctrl_info;
7225         struct scsi_device *sdev;
7226         struct pqi_scsi_dev *device;
7227         unsigned long flags;
7228         unsigned int raid_bypass_cnt;
7229
7230         sdev = to_scsi_device(dev);
7231         ctrl_info = shost_to_hba(sdev->host);
7232
7233         if (pqi_ctrl_offline(ctrl_info))
7234                 return -ENODEV;
7235
7236         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7237
7238         device = sdev->hostdata;
7239         if (!device) {
7240                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7241                 return -ENODEV;
7242         }
7243
7244         raid_bypass_cnt = device->raid_bypass_cnt;
7245
7246         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7247
7248         return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
7249 }
7250
7251 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7252                 struct device_attribute *attr, char *buf)
7253 {
7254         struct pqi_ctrl_info *ctrl_info;
7255         struct scsi_device *sdev;
7256         struct pqi_scsi_dev *device;
7257         unsigned long flags;
7258         int output_len = 0;
7259
7260         sdev = to_scsi_device(dev);
7261         ctrl_info = shost_to_hba(sdev->host);
7262
7263         if (pqi_ctrl_offline(ctrl_info))
7264                 return -ENODEV;
7265
7266         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7267
7268         device = sdev->hostdata;
7269         if (!device) {
7270                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7271                 return -ENODEV;
7272         }
7273
7274         output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7275                                 device->ncq_prio_enable);
7276         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7277
7278         return output_len;
7279 }
7280
7281 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7282                         struct device_attribute *attr,
7283                         const char *buf, size_t count)
7284 {
7285         struct pqi_ctrl_info *ctrl_info;
7286         struct scsi_device *sdev;
7287         struct pqi_scsi_dev *device;
7288         unsigned long flags;
7289         u8 ncq_prio_enable = 0;
7290
7291         if (kstrtou8(buf, 0, &ncq_prio_enable))
7292                 return -EINVAL;
7293
7294         sdev = to_scsi_device(dev);
7295         ctrl_info = shost_to_hba(sdev->host);
7296
7297         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7298
7299         device = sdev->hostdata;
7300
7301         if (!device) {
7302                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7303                 return -ENODEV;
7304         }
7305
7306         if (!device->ncq_prio_support) {
7307                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7308                 return -EINVAL;
7309         }
7310
7311         device->ncq_prio_enable = ncq_prio_enable;
7312
7313         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7314
7315         return  strlen(buf);
7316 }
7317
7318 static ssize_t pqi_numa_node_show(struct device *dev,
7319         struct device_attribute *attr, char *buffer)
7320 {
7321         struct scsi_device *sdev;
7322         struct pqi_ctrl_info *ctrl_info;
7323
7324         sdev = to_scsi_device(dev);
7325         ctrl_info = shost_to_hba(sdev->host);
7326
7327         return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
7328 }
7329
7330 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7331 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7332 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7333 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7334 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7335 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7336 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7337 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7338                 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7339 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
7340
7341 static struct attribute *pqi_sdev_attrs[] = {
7342         &dev_attr_lunid.attr,
7343         &dev_attr_unique_id.attr,
7344         &dev_attr_path_info.attr,
7345         &dev_attr_sas_address.attr,
7346         &dev_attr_ssd_smart_path_enabled.attr,
7347         &dev_attr_raid_level.attr,
7348         &dev_attr_raid_bypass_cnt.attr,
7349         &dev_attr_sas_ncq_prio_enable.attr,
7350         &dev_attr_numa_node.attr,
7351         NULL
7352 };
7353
7354 ATTRIBUTE_GROUPS(pqi_sdev);
7355
7356 static const struct scsi_host_template pqi_driver_template = {
7357         .module = THIS_MODULE,
7358         .name = DRIVER_NAME_SHORT,
7359         .proc_name = DRIVER_NAME_SHORT,
7360         .queuecommand = pqi_scsi_queue_command,
7361         .scan_start = pqi_scan_start,
7362         .scan_finished = pqi_scan_finished,
7363         .this_id = -1,
7364         .eh_device_reset_handler = pqi_eh_device_reset_handler,
7365         .ioctl = pqi_ioctl,
7366         .slave_alloc = pqi_slave_alloc,
7367         .slave_configure = pqi_slave_configure,
7368         .slave_destroy = pqi_slave_destroy,
7369         .map_queues = pqi_map_queues,
7370         .sdev_groups = pqi_sdev_groups,
7371         .shost_groups = pqi_shost_groups,
7372         .cmd_size = sizeof(struct pqi_cmd_priv),
7373 };
7374
7375 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7376 {
7377         int rc;
7378         struct Scsi_Host *shost;
7379
7380         shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7381         if (!shost) {
7382                 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7383                 return -ENOMEM;
7384         }
7385
7386         shost->io_port = 0;
7387         shost->n_io_port = 0;
7388         shost->this_id = -1;
7389         shost->max_channel = PQI_MAX_BUS;
7390         shost->max_cmd_len = MAX_COMMAND_SIZE;
7391         shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
7392         shost->max_id = ~0;
7393         shost->max_sectors = ctrl_info->max_sectors;
7394         shost->can_queue = ctrl_info->scsi_ml_can_queue;
7395         shost->cmd_per_lun = shost->can_queue;
7396         shost->sg_tablesize = ctrl_info->sg_tablesize;
7397         shost->transportt = pqi_sas_transport_template;
7398         shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7399         shost->unique_id = shost->irq;
7400         shost->nr_hw_queues = ctrl_info->num_queue_groups;
7401         shost->host_tagset = 1;
7402         shost->hostdata[0] = (unsigned long)ctrl_info;
7403
7404         rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7405         if (rc) {
7406                 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7407                 goto free_host;
7408         }
7409
7410         rc = pqi_add_sas_host(shost, ctrl_info);
7411         if (rc) {
7412                 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7413                 goto remove_host;
7414         }
7415
7416         ctrl_info->scsi_host = shost;
7417
7418         return 0;
7419
7420 remove_host:
7421         scsi_remove_host(shost);
7422 free_host:
7423         scsi_host_put(shost);
7424
7425         return rc;
7426 }
7427
7428 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7429 {
7430         struct Scsi_Host *shost;
7431
7432         pqi_delete_sas_host(ctrl_info);
7433
7434         shost = ctrl_info->scsi_host;
7435         if (!shost)
7436                 return;
7437
7438         scsi_remove_host(shost);
7439         scsi_host_put(shost);
7440 }
7441
7442 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7443 {
7444         int rc = 0;
7445         struct pqi_device_registers __iomem *pqi_registers;
7446         unsigned long timeout;
7447         unsigned int timeout_msecs;
7448         union pqi_reset_register reset_reg;
7449
7450         pqi_registers = ctrl_info->pqi_registers;
7451         timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7452         timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7453
7454         while (1) {
7455                 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7456                 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7457                 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7458                         break;
7459                 if (!sis_is_firmware_running(ctrl_info)) {
7460                         rc = -ENXIO;
7461                         break;
7462                 }
7463                 if (time_after(jiffies, timeout)) {
7464                         rc = -ETIMEDOUT;
7465                         break;
7466                 }
7467         }
7468
7469         return rc;
7470 }
7471
7472 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7473 {
7474         int rc;
7475         union pqi_reset_register reset_reg;
7476
7477         if (ctrl_info->pqi_reset_quiesce_supported) {
7478                 rc = sis_pqi_reset_quiesce(ctrl_info);
7479                 if (rc) {
7480                         dev_err(&ctrl_info->pci_dev->dev,
7481                                 "PQI reset failed during quiesce with error %d\n", rc);
7482                         return rc;
7483                 }
7484         }
7485
7486         reset_reg.all_bits = 0;
7487         reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7488         reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7489
7490         writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7491
7492         rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7493         if (rc)
7494                 dev_err(&ctrl_info->pci_dev->dev,
7495                         "PQI reset failed with error %d\n", rc);
7496
7497         return rc;
7498 }
7499
7500 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7501 {
7502         int rc;
7503         struct bmic_sense_subsystem_info *sense_info;
7504
7505         sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7506         if (!sense_info)
7507                 return -ENOMEM;
7508
7509         rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7510         if (rc)
7511                 goto out;
7512
7513         memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7514                 sizeof(sense_info->ctrl_serial_number));
7515         ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7516
7517 out:
7518         kfree(sense_info);
7519
7520         return rc;
7521 }
7522
7523 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7524 {
7525         int rc;
7526         struct bmic_identify_controller *identify;
7527
7528         identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7529         if (!identify)
7530                 return -ENOMEM;
7531
7532         rc = pqi_identify_controller(ctrl_info, identify);
7533         if (rc)
7534                 goto out;
7535
7536         if (get_unaligned_le32(&identify->extra_controller_flags) &
7537                 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7538                 memcpy(ctrl_info->firmware_version,
7539                         identify->firmware_version_long,
7540                         sizeof(identify->firmware_version_long));
7541         } else {
7542                 memcpy(ctrl_info->firmware_version,
7543                         identify->firmware_version_short,
7544                         sizeof(identify->firmware_version_short));
7545                 ctrl_info->firmware_version
7546                         [sizeof(identify->firmware_version_short)] = '\0';
7547                 snprintf(ctrl_info->firmware_version +
7548                         strlen(ctrl_info->firmware_version),
7549                         sizeof(ctrl_info->firmware_version) -
7550                         sizeof(identify->firmware_version_short),
7551                         "-%u",
7552                         get_unaligned_le16(&identify->firmware_build_number));
7553         }
7554
7555         memcpy(ctrl_info->model, identify->product_id,
7556                 sizeof(identify->product_id));
7557         ctrl_info->model[sizeof(identify->product_id)] = '\0';
7558
7559         memcpy(ctrl_info->vendor, identify->vendor_id,
7560                 sizeof(identify->vendor_id));
7561         ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7562
7563         dev_info(&ctrl_info->pci_dev->dev,
7564                 "Firmware version: %s\n", ctrl_info->firmware_version);
7565
7566 out:
7567         kfree(identify);
7568
7569         return rc;
7570 }
7571
7572 struct pqi_config_table_section_info {
7573         struct pqi_ctrl_info *ctrl_info;
7574         void            *section;
7575         u32             section_offset;
7576         void __iomem    *section_iomem_addr;
7577 };
7578
7579 static inline bool pqi_is_firmware_feature_supported(
7580         struct pqi_config_table_firmware_features *firmware_features,
7581         unsigned int bit_position)
7582 {
7583         unsigned int byte_index;
7584
7585         byte_index = bit_position / BITS_PER_BYTE;
7586
7587         if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7588                 return false;
7589
7590         return firmware_features->features_supported[byte_index] &
7591                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7592 }
7593
7594 static inline bool pqi_is_firmware_feature_enabled(
7595         struct pqi_config_table_firmware_features *firmware_features,
7596         void __iomem *firmware_features_iomem_addr,
7597         unsigned int bit_position)
7598 {
7599         unsigned int byte_index;
7600         u8 __iomem *features_enabled_iomem_addr;
7601
7602         byte_index = (bit_position / BITS_PER_BYTE) +
7603                 (le16_to_cpu(firmware_features->num_elements) * 2);
7604
7605         features_enabled_iomem_addr = firmware_features_iomem_addr +
7606                 offsetof(struct pqi_config_table_firmware_features,
7607                         features_supported) + byte_index;
7608
7609         return *((__force u8 *)features_enabled_iomem_addr) &
7610                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7611 }
7612
7613 static inline void pqi_request_firmware_feature(
7614         struct pqi_config_table_firmware_features *firmware_features,
7615         unsigned int bit_position)
7616 {
7617         unsigned int byte_index;
7618
7619         byte_index = (bit_position / BITS_PER_BYTE) +
7620                 le16_to_cpu(firmware_features->num_elements);
7621
7622         firmware_features->features_supported[byte_index] |=
7623                 (1 << (bit_position % BITS_PER_BYTE));
7624 }
7625
7626 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7627         u16 first_section, u16 last_section)
7628 {
7629         struct pqi_vendor_general_request request;
7630
7631         memset(&request, 0, sizeof(request));
7632
7633         request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7634         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7635                 &request.header.iu_length);
7636         put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7637                 &request.function_code);
7638         put_unaligned_le16(first_section,
7639                 &request.data.config_table_update.first_section);
7640         put_unaligned_le16(last_section,
7641                 &request.data.config_table_update.last_section);
7642
7643         return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7644 }
7645
7646 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7647         struct pqi_config_table_firmware_features *firmware_features,
7648         void __iomem *firmware_features_iomem_addr)
7649 {
7650         void *features_requested;
7651         void __iomem *features_requested_iomem_addr;
7652         void __iomem *host_max_known_feature_iomem_addr;
7653
7654         features_requested = firmware_features->features_supported +
7655                 le16_to_cpu(firmware_features->num_elements);
7656
7657         features_requested_iomem_addr = firmware_features_iomem_addr +
7658                 (features_requested - (void *)firmware_features);
7659
7660         memcpy_toio(features_requested_iomem_addr, features_requested,
7661                 le16_to_cpu(firmware_features->num_elements));
7662
7663         if (pqi_is_firmware_feature_supported(firmware_features,
7664                 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7665                 host_max_known_feature_iomem_addr =
7666                         features_requested_iomem_addr +
7667                         (le16_to_cpu(firmware_features->num_elements) * 2) +
7668                         sizeof(__le16);
7669                 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
7670                 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
7671         }
7672
7673         return pqi_config_table_update(ctrl_info,
7674                 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7675                 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7676 }
7677
7678 struct pqi_firmware_feature {
7679         char            *feature_name;
7680         unsigned int    feature_bit;
7681         bool            supported;
7682         bool            enabled;
7683         void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7684                 struct pqi_firmware_feature *firmware_feature);
7685 };
7686
7687 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7688         struct pqi_firmware_feature *firmware_feature)
7689 {
7690         if (!firmware_feature->supported) {
7691                 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7692                         firmware_feature->feature_name);
7693                 return;
7694         }
7695
7696         if (firmware_feature->enabled) {
7697                 dev_info(&ctrl_info->pci_dev->dev,
7698                         "%s enabled\n", firmware_feature->feature_name);
7699                 return;
7700         }
7701
7702         dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7703                 firmware_feature->feature_name);
7704 }
7705
7706 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7707         struct pqi_firmware_feature *firmware_feature)
7708 {
7709         switch (firmware_feature->feature_bit) {
7710         case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7711                 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7712                 break;
7713         case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7714                 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7715                 break;
7716         case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7717                 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7718                 break;
7719         case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7720                 ctrl_info->soft_reset_handshake_supported =
7721                         firmware_feature->enabled &&
7722                         pqi_read_soft_reset_status(ctrl_info);
7723                 break;
7724         case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7725                 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7726                 break;
7727         case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7728                 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7729                 break;
7730         case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7731                 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7732                 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7733                 break;
7734         case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7735                 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7736                 break;
7737         case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
7738                 ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
7739                 break;
7740         }
7741
7742         pqi_firmware_feature_status(ctrl_info, firmware_feature);
7743 }
7744
7745 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7746         struct pqi_firmware_feature *firmware_feature)
7747 {
7748         if (firmware_feature->feature_status)
7749                 firmware_feature->feature_status(ctrl_info, firmware_feature);
7750 }
7751
7752 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7753
7754 static struct pqi_firmware_feature pqi_firmware_features[] = {
7755         {
7756                 .feature_name = "Online Firmware Activation",
7757                 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7758                 .feature_status = pqi_firmware_feature_status,
7759         },
7760         {
7761                 .feature_name = "Serial Management Protocol",
7762                 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7763                 .feature_status = pqi_firmware_feature_status,
7764         },
7765         {
7766                 .feature_name = "Maximum Known Feature",
7767                 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7768                 .feature_status = pqi_firmware_feature_status,
7769         },
7770         {
7771                 .feature_name = "RAID 0 Read Bypass",
7772                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7773                 .feature_status = pqi_firmware_feature_status,
7774         },
7775         {
7776                 .feature_name = "RAID 1 Read Bypass",
7777                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7778                 .feature_status = pqi_firmware_feature_status,
7779         },
7780         {
7781                 .feature_name = "RAID 5 Read Bypass",
7782                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7783                 .feature_status = pqi_firmware_feature_status,
7784         },
7785         {
7786                 .feature_name = "RAID 6 Read Bypass",
7787                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7788                 .feature_status = pqi_firmware_feature_status,
7789         },
7790         {
7791                 .feature_name = "RAID 0 Write Bypass",
7792                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7793                 .feature_status = pqi_firmware_feature_status,
7794         },
7795         {
7796                 .feature_name = "RAID 1 Write Bypass",
7797                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7798                 .feature_status = pqi_ctrl_update_feature_flags,
7799         },
7800         {
7801                 .feature_name = "RAID 5 Write Bypass",
7802                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7803                 .feature_status = pqi_ctrl_update_feature_flags,
7804         },
7805         {
7806                 .feature_name = "RAID 6 Write Bypass",
7807                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7808                 .feature_status = pqi_ctrl_update_feature_flags,
7809         },
7810         {
7811                 .feature_name = "New Soft Reset Handshake",
7812                 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
7813                 .feature_status = pqi_ctrl_update_feature_flags,
7814         },
7815         {
7816                 .feature_name = "RAID IU Timeout",
7817                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7818                 .feature_status = pqi_ctrl_update_feature_flags,
7819         },
7820         {
7821                 .feature_name = "TMF IU Timeout",
7822                 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7823                 .feature_status = pqi_ctrl_update_feature_flags,
7824         },
7825         {
7826                 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7827                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7828                 .feature_status = pqi_firmware_feature_status,
7829         },
7830         {
7831                 .feature_name = "Firmware Triage",
7832                 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
7833                 .feature_status = pqi_ctrl_update_feature_flags,
7834         },
7835         {
7836                 .feature_name = "RPL Extended Formats 4 and 5",
7837                 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
7838                 .feature_status = pqi_ctrl_update_feature_flags,
7839         },
7840         {
7841                 .feature_name = "Multi-LUN Target",
7842                 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
7843                 .feature_status = pqi_ctrl_update_feature_flags,
7844         },
7845 };
7846
7847 static void pqi_process_firmware_features(
7848         struct pqi_config_table_section_info *section_info)
7849 {
7850         int rc;
7851         struct pqi_ctrl_info *ctrl_info;
7852         struct pqi_config_table_firmware_features *firmware_features;
7853         void __iomem *firmware_features_iomem_addr;
7854         unsigned int i;
7855         unsigned int num_features_supported;
7856
7857         ctrl_info = section_info->ctrl_info;
7858         firmware_features = section_info->section;
7859         firmware_features_iomem_addr = section_info->section_iomem_addr;
7860
7861         for (i = 0, num_features_supported = 0;
7862                 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7863                 if (pqi_is_firmware_feature_supported(firmware_features,
7864                         pqi_firmware_features[i].feature_bit)) {
7865                         pqi_firmware_features[i].supported = true;
7866                         num_features_supported++;
7867                 } else {
7868                         pqi_firmware_feature_update(ctrl_info,
7869                                 &pqi_firmware_features[i]);
7870                 }
7871         }
7872
7873         if (num_features_supported == 0)
7874                 return;
7875
7876         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7877                 if (!pqi_firmware_features[i].supported)
7878                         continue;
7879                 pqi_request_firmware_feature(firmware_features,
7880                         pqi_firmware_features[i].feature_bit);
7881         }
7882
7883         rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7884                 firmware_features_iomem_addr);
7885         if (rc) {
7886                 dev_err(&ctrl_info->pci_dev->dev,
7887                         "failed to enable firmware features in PQI configuration table\n");
7888                 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7889                         if (!pqi_firmware_features[i].supported)
7890                                 continue;
7891                         pqi_firmware_feature_update(ctrl_info,
7892                                 &pqi_firmware_features[i]);
7893                 }
7894                 return;
7895         }
7896
7897         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7898                 if (!pqi_firmware_features[i].supported)
7899                         continue;
7900                 if (pqi_is_firmware_feature_enabled(firmware_features,
7901                         firmware_features_iomem_addr,
7902                         pqi_firmware_features[i].feature_bit)) {
7903                                 pqi_firmware_features[i].enabled = true;
7904                 }
7905                 pqi_firmware_feature_update(ctrl_info,
7906                         &pqi_firmware_features[i]);
7907         }
7908 }
7909
7910 static void pqi_init_firmware_features(void)
7911 {
7912         unsigned int i;
7913
7914         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7915                 pqi_firmware_features[i].supported = false;
7916                 pqi_firmware_features[i].enabled = false;
7917         }
7918 }
7919
7920 static void pqi_process_firmware_features_section(
7921         struct pqi_config_table_section_info *section_info)
7922 {
7923         mutex_lock(&pqi_firmware_features_mutex);
7924         pqi_init_firmware_features();
7925         pqi_process_firmware_features(section_info);
7926         mutex_unlock(&pqi_firmware_features_mutex);
7927 }
7928
7929 /*
7930  * Reset all controller settings that can be initialized during the processing
7931  * of the PQI Configuration Table.
7932  */
7933
7934 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7935 {
7936         ctrl_info->heartbeat_counter = NULL;
7937         ctrl_info->soft_reset_status = NULL;
7938         ctrl_info->soft_reset_handshake_supported = false;
7939         ctrl_info->enable_r1_writes = false;
7940         ctrl_info->enable_r5_writes = false;
7941         ctrl_info->enable_r6_writes = false;
7942         ctrl_info->raid_iu_timeout_supported = false;
7943         ctrl_info->tmf_iu_timeout_supported = false;
7944         ctrl_info->firmware_triage_supported = false;
7945         ctrl_info->rpl_extended_format_4_5_supported = false;
7946         ctrl_info->multi_lun_device_supported = false;
7947 }
7948
7949 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7950 {
7951         u32 table_length;
7952         u32 section_offset;
7953         bool firmware_feature_section_present;
7954         void __iomem *table_iomem_addr;
7955         struct pqi_config_table *config_table;
7956         struct pqi_config_table_section_header *section;
7957         struct pqi_config_table_section_info section_info;
7958         struct pqi_config_table_section_info feature_section_info = {0};
7959
7960         table_length = ctrl_info->config_table_length;
7961         if (table_length == 0)
7962                 return 0;
7963
7964         config_table = kmalloc(table_length, GFP_KERNEL);
7965         if (!config_table) {
7966                 dev_err(&ctrl_info->pci_dev->dev,
7967                         "failed to allocate memory for PQI configuration table\n");
7968                 return -ENOMEM;
7969         }
7970
7971         /*
7972          * Copy the config table contents from I/O memory space into the
7973          * temporary buffer.
7974          */
7975         table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
7976         memcpy_fromio(config_table, table_iomem_addr, table_length);
7977
7978         firmware_feature_section_present = false;
7979         section_info.ctrl_info = ctrl_info;
7980         section_offset = get_unaligned_le32(&config_table->first_section_offset);
7981
7982         while (section_offset) {
7983                 section = (void *)config_table + section_offset;
7984
7985                 section_info.section = section;
7986                 section_info.section_offset = section_offset;
7987                 section_info.section_iomem_addr = table_iomem_addr + section_offset;
7988
7989                 switch (get_unaligned_le16(&section->section_id)) {
7990                 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7991                         firmware_feature_section_present = true;
7992                         feature_section_info = section_info;
7993                         break;
7994                 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7995                         if (pqi_disable_heartbeat)
7996                                 dev_warn(&ctrl_info->pci_dev->dev,
7997                                 "heartbeat disabled by module parameter\n");
7998                         else
7999                                 ctrl_info->heartbeat_counter =
8000                                         table_iomem_addr +
8001                                         section_offset +
8002                                         offsetof(struct pqi_config_table_heartbeat,
8003                                                 heartbeat_counter);
8004                         break;
8005                 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
8006                         ctrl_info->soft_reset_status =
8007                                 table_iomem_addr +
8008                                 section_offset +
8009                                 offsetof(struct pqi_config_table_soft_reset,
8010                                         soft_reset_status);
8011                         break;
8012                 }
8013
8014                 section_offset = get_unaligned_le16(&section->next_section_offset);
8015         }
8016
8017         /*
8018          * We process the firmware feature section after all other sections
8019          * have been processed so that the feature bit callbacks can take
8020          * into account the settings configured by other sections.
8021          */
8022         if (firmware_feature_section_present)
8023                 pqi_process_firmware_features_section(&feature_section_info);
8024
8025         kfree(config_table);
8026
8027         return 0;
8028 }
8029
8030 /* Switches the controller from PQI mode back into SIS mode. */
8031
8032 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
8033 {
8034         int rc;
8035
8036         pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
8037         rc = pqi_reset(ctrl_info);
8038         if (rc)
8039                 return rc;
8040         rc = sis_reenable_sis_mode(ctrl_info);
8041         if (rc) {
8042                 dev_err(&ctrl_info->pci_dev->dev,
8043                         "re-enabling SIS mode failed with error %d\n", rc);
8044                 return rc;
8045         }
8046         pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8047
8048         return 0;
8049 }
8050
8051 /*
8052  * If the controller isn't already in SIS mode, this function forces it into
8053  * SIS mode.
8054  */
8055
8056 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
8057 {
8058         if (!sis_is_firmware_running(ctrl_info))
8059                 return -ENXIO;
8060
8061         if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
8062                 return 0;
8063
8064         if (sis_is_kernel_up(ctrl_info)) {
8065                 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8066                 return 0;
8067         }
8068
8069         return pqi_revert_to_sis_mode(ctrl_info);
8070 }
8071
8072 static void pqi_perform_lockup_action(void)
8073 {
8074         switch (pqi_lockup_action) {
8075         case PANIC:
8076                 panic("FATAL: Smart Family Controller lockup detected");
8077                 break;
8078         case REBOOT:
8079                 emergency_restart();
8080                 break;
8081         case NONE:
8082         default:
8083                 break;
8084         }
8085 }
8086
8087 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
8088 {
8089         int rc;
8090         u32 product_id;
8091
8092         if (reset_devices) {
8093                 if (pqi_is_fw_triage_supported(ctrl_info)) {
8094                         rc = sis_wait_for_fw_triage_completion(ctrl_info);
8095                         if (rc)
8096                                 return rc;
8097                 }
8098                 sis_soft_reset(ctrl_info);
8099                 ssleep(PQI_POST_RESET_DELAY_SECS);
8100         } else {
8101                 rc = pqi_force_sis_mode(ctrl_info);
8102                 if (rc)
8103                         return rc;
8104         }
8105
8106         /*
8107          * Wait until the controller is ready to start accepting SIS
8108          * commands.
8109          */
8110         rc = sis_wait_for_ctrl_ready(ctrl_info);
8111         if (rc) {
8112                 if (reset_devices) {
8113                         dev_err(&ctrl_info->pci_dev->dev,
8114                                 "kdump init failed with error %d\n", rc);
8115                         pqi_lockup_action = REBOOT;
8116                         pqi_perform_lockup_action();
8117                 }
8118                 return rc;
8119         }
8120
8121         /*
8122          * Get the controller properties.  This allows us to determine
8123          * whether or not it supports PQI mode.
8124          */
8125         rc = sis_get_ctrl_properties(ctrl_info);
8126         if (rc) {
8127                 dev_err(&ctrl_info->pci_dev->dev,
8128                         "error obtaining controller properties\n");
8129                 return rc;
8130         }
8131
8132         rc = sis_get_pqi_capabilities(ctrl_info);
8133         if (rc) {
8134                 dev_err(&ctrl_info->pci_dev->dev,
8135                         "error obtaining controller capabilities\n");
8136                 return rc;
8137         }
8138
8139         product_id = sis_get_product_id(ctrl_info);
8140         ctrl_info->product_id = (u8)product_id;
8141         ctrl_info->product_revision = (u8)(product_id >> 8);
8142
8143         if (reset_devices) {
8144                 if (ctrl_info->max_outstanding_requests >
8145                         PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8146                                 ctrl_info->max_outstanding_requests =
8147                                         PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8148         } else {
8149                 if (ctrl_info->max_outstanding_requests >
8150                         PQI_MAX_OUTSTANDING_REQUESTS)
8151                                 ctrl_info->max_outstanding_requests =
8152                                         PQI_MAX_OUTSTANDING_REQUESTS;
8153         }
8154
8155         pqi_calculate_io_resources(ctrl_info);
8156
8157         rc = pqi_alloc_error_buffer(ctrl_info);
8158         if (rc) {
8159                 dev_err(&ctrl_info->pci_dev->dev,
8160                         "failed to allocate PQI error buffer\n");
8161                 return rc;
8162         }
8163
8164         /*
8165          * If the function we are about to call succeeds, the
8166          * controller will transition from legacy SIS mode
8167          * into PQI mode.
8168          */
8169         rc = sis_init_base_struct_addr(ctrl_info);
8170         if (rc) {
8171                 dev_err(&ctrl_info->pci_dev->dev,
8172                         "error initializing PQI mode\n");
8173                 return rc;
8174         }
8175
8176         /* Wait for the controller to complete the SIS -> PQI transition. */
8177         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8178         if (rc) {
8179                 dev_err(&ctrl_info->pci_dev->dev,
8180                         "transition to PQI mode failed\n");
8181                 return rc;
8182         }
8183
8184         /* From here on, we are running in PQI mode. */
8185         ctrl_info->pqi_mode_enabled = true;
8186         pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8187
8188         rc = pqi_alloc_admin_queues(ctrl_info);
8189         if (rc) {
8190                 dev_err(&ctrl_info->pci_dev->dev,
8191                         "failed to allocate admin queues\n");
8192                 return rc;
8193         }
8194
8195         rc = pqi_create_admin_queues(ctrl_info);
8196         if (rc) {
8197                 dev_err(&ctrl_info->pci_dev->dev,
8198                         "error creating admin queues\n");
8199                 return rc;
8200         }
8201
8202         rc = pqi_report_device_capability(ctrl_info);
8203         if (rc) {
8204                 dev_err(&ctrl_info->pci_dev->dev,
8205                         "obtaining device capability failed\n");
8206                 return rc;
8207         }
8208
8209         rc = pqi_validate_device_capability(ctrl_info);
8210         if (rc)
8211                 return rc;
8212
8213         pqi_calculate_queue_resources(ctrl_info);
8214
8215         rc = pqi_enable_msix_interrupts(ctrl_info);
8216         if (rc)
8217                 return rc;
8218
8219         if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8220                 ctrl_info->max_msix_vectors =
8221                         ctrl_info->num_msix_vectors_enabled;
8222                 pqi_calculate_queue_resources(ctrl_info);
8223         }
8224
8225         rc = pqi_alloc_io_resources(ctrl_info);
8226         if (rc)
8227                 return rc;
8228
8229         rc = pqi_alloc_operational_queues(ctrl_info);
8230         if (rc) {
8231                 dev_err(&ctrl_info->pci_dev->dev,
8232                         "failed to allocate operational queues\n");
8233                 return rc;
8234         }
8235
8236         pqi_init_operational_queues(ctrl_info);
8237
8238         rc = pqi_create_queues(ctrl_info);
8239         if (rc)
8240                 return rc;
8241
8242         rc = pqi_request_irqs(ctrl_info);
8243         if (rc)
8244                 return rc;
8245
8246         pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8247
8248         ctrl_info->controller_online = true;
8249
8250         rc = pqi_process_config_table(ctrl_info);
8251         if (rc)
8252                 return rc;
8253
8254         pqi_start_heartbeat_timer(ctrl_info);
8255
8256         if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8257                 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8258                 if (rc) { /* Supported features not returned correctly. */
8259                         dev_err(&ctrl_info->pci_dev->dev,
8260                                 "error obtaining advanced RAID bypass configuration\n");
8261                         return rc;
8262                 }
8263                 ctrl_info->ciss_report_log_flags |=
8264                         CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8265         }
8266
8267         rc = pqi_enable_events(ctrl_info);
8268         if (rc) {
8269                 dev_err(&ctrl_info->pci_dev->dev,
8270                         "error enabling events\n");
8271                 return rc;
8272         }
8273
8274         /* Register with the SCSI subsystem. */
8275         rc = pqi_register_scsi(ctrl_info);
8276         if (rc)
8277                 return rc;
8278
8279         rc = pqi_get_ctrl_product_details(ctrl_info);
8280         if (rc) {
8281                 dev_err(&ctrl_info->pci_dev->dev,
8282                         "error obtaining product details\n");
8283                 return rc;
8284         }
8285
8286         rc = pqi_get_ctrl_serial_number(ctrl_info);
8287         if (rc) {
8288                 dev_err(&ctrl_info->pci_dev->dev,
8289                         "error obtaining ctrl serial number\n");
8290                 return rc;
8291         }
8292
8293         rc = pqi_set_diag_rescan(ctrl_info);
8294         if (rc) {
8295                 dev_err(&ctrl_info->pci_dev->dev,
8296                         "error enabling multi-lun rescan\n");
8297                 return rc;
8298         }
8299
8300         rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8301         if (rc) {
8302                 dev_err(&ctrl_info->pci_dev->dev,
8303                         "error updating host wellness\n");
8304                 return rc;
8305         }
8306
8307         pqi_schedule_update_time_worker(ctrl_info);
8308
8309         pqi_scan_scsi_devices(ctrl_info);
8310
8311         return 0;
8312 }
8313
8314 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8315 {
8316         unsigned int i;
8317         struct pqi_admin_queues *admin_queues;
8318         struct pqi_event_queue *event_queue;
8319
8320         admin_queues = &ctrl_info->admin_queues;
8321         admin_queues->iq_pi_copy = 0;
8322         admin_queues->oq_ci_copy = 0;
8323         writel(0, admin_queues->oq_pi);
8324
8325         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8326                 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8327                 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8328                 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8329
8330                 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8331                 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8332                 writel(0, ctrl_info->queue_groups[i].oq_pi);
8333         }
8334
8335         event_queue = &ctrl_info->event_queue;
8336         writel(0, event_queue->oq_pi);
8337         event_queue->oq_ci_copy = 0;
8338 }
8339
8340 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8341 {
8342         int rc;
8343
8344         rc = pqi_force_sis_mode(ctrl_info);
8345         if (rc)
8346                 return rc;
8347
8348         /*
8349          * Wait until the controller is ready to start accepting SIS
8350          * commands.
8351          */
8352         rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8353         if (rc)
8354                 return rc;
8355
8356         /*
8357          * Get the controller properties.  This allows us to determine
8358          * whether or not it supports PQI mode.
8359          */
8360         rc = sis_get_ctrl_properties(ctrl_info);
8361         if (rc) {
8362                 dev_err(&ctrl_info->pci_dev->dev,
8363                         "error obtaining controller properties\n");
8364                 return rc;
8365         }
8366
8367         rc = sis_get_pqi_capabilities(ctrl_info);
8368         if (rc) {
8369                 dev_err(&ctrl_info->pci_dev->dev,
8370                         "error obtaining controller capabilities\n");
8371                 return rc;
8372         }
8373
8374         /*
8375          * If the function we are about to call succeeds, the
8376          * controller will transition from legacy SIS mode
8377          * into PQI mode.
8378          */
8379         rc = sis_init_base_struct_addr(ctrl_info);
8380         if (rc) {
8381                 dev_err(&ctrl_info->pci_dev->dev,
8382                         "error initializing PQI mode\n");
8383                 return rc;
8384         }
8385
8386         /* Wait for the controller to complete the SIS -> PQI transition. */
8387         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8388         if (rc) {
8389                 dev_err(&ctrl_info->pci_dev->dev,
8390                         "transition to PQI mode failed\n");
8391                 return rc;
8392         }
8393
8394         /* From here on, we are running in PQI mode. */
8395         ctrl_info->pqi_mode_enabled = true;
8396         pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8397
8398         pqi_reinit_queues(ctrl_info);
8399
8400         rc = pqi_create_admin_queues(ctrl_info);
8401         if (rc) {
8402                 dev_err(&ctrl_info->pci_dev->dev,
8403                         "error creating admin queues\n");
8404                 return rc;
8405         }
8406
8407         rc = pqi_create_queues(ctrl_info);
8408         if (rc)
8409                 return rc;
8410
8411         pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8412
8413         ctrl_info->controller_online = true;
8414         pqi_ctrl_unblock_requests(ctrl_info);
8415
8416         pqi_ctrl_reset_config(ctrl_info);
8417
8418         rc = pqi_process_config_table(ctrl_info);
8419         if (rc)
8420                 return rc;
8421
8422         pqi_start_heartbeat_timer(ctrl_info);
8423
8424         if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8425                 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8426                 if (rc) {
8427                         dev_err(&ctrl_info->pci_dev->dev,
8428                                 "error obtaining advanced RAID bypass configuration\n");
8429                         return rc;
8430                 }
8431                 ctrl_info->ciss_report_log_flags |=
8432                         CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8433         }
8434
8435         rc = pqi_enable_events(ctrl_info);
8436         if (rc) {
8437                 dev_err(&ctrl_info->pci_dev->dev,
8438                         "error enabling events\n");
8439                 return rc;
8440         }
8441
8442         rc = pqi_get_ctrl_product_details(ctrl_info);
8443         if (rc) {
8444                 dev_err(&ctrl_info->pci_dev->dev,
8445                         "error obtaining product details\n");
8446                 return rc;
8447         }
8448
8449         rc = pqi_set_diag_rescan(ctrl_info);
8450         if (rc) {
8451                 dev_err(&ctrl_info->pci_dev->dev,
8452                         "error enabling multi-lun rescan\n");
8453                 return rc;
8454         }
8455
8456         rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8457         if (rc) {
8458                 dev_err(&ctrl_info->pci_dev->dev,
8459                         "error updating host wellness\n");
8460                 return rc;
8461         }
8462
8463         if (pqi_ofa_in_progress(ctrl_info))
8464                 pqi_ctrl_unblock_scan(ctrl_info);
8465
8466         pqi_scan_scsi_devices(ctrl_info);
8467
8468         return 0;
8469 }
8470
8471 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8472 {
8473         int rc;
8474
8475         rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8476                 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8477
8478         return pcibios_err_to_errno(rc);
8479 }
8480
8481 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8482 {
8483         int rc;
8484         u64 mask;
8485
8486         rc = pci_enable_device(ctrl_info->pci_dev);
8487         if (rc) {
8488                 dev_err(&ctrl_info->pci_dev->dev,
8489                         "failed to enable PCI device\n");
8490                 return rc;
8491         }
8492
8493         if (sizeof(dma_addr_t) > 4)
8494                 mask = DMA_BIT_MASK(64);
8495         else
8496                 mask = DMA_BIT_MASK(32);
8497
8498         rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8499         if (rc) {
8500                 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8501                 goto disable_device;
8502         }
8503
8504         rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8505         if (rc) {
8506                 dev_err(&ctrl_info->pci_dev->dev,
8507                         "failed to obtain PCI resources\n");
8508                 goto disable_device;
8509         }
8510
8511         ctrl_info->iomem_base = ioremap(pci_resource_start(
8512                 ctrl_info->pci_dev, 0),
8513                 pci_resource_len(ctrl_info->pci_dev, 0));
8514         if (!ctrl_info->iomem_base) {
8515                 dev_err(&ctrl_info->pci_dev->dev,
8516                         "failed to map memory for controller registers\n");
8517                 rc = -ENOMEM;
8518                 goto release_regions;
8519         }
8520
8521 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS               0x6
8522
8523         /* Increase the PCIe completion timeout. */
8524         rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8525                 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8526         if (rc) {
8527                 dev_err(&ctrl_info->pci_dev->dev,
8528                         "failed to set PCIe completion timeout\n");
8529                 goto release_regions;
8530         }
8531
8532         /* Enable bus mastering. */
8533         pci_set_master(ctrl_info->pci_dev);
8534
8535         ctrl_info->registers = ctrl_info->iomem_base;
8536         ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8537
8538         pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8539
8540         return 0;
8541
8542 release_regions:
8543         pci_release_regions(ctrl_info->pci_dev);
8544 disable_device:
8545         pci_disable_device(ctrl_info->pci_dev);
8546
8547         return rc;
8548 }
8549
8550 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8551 {
8552         iounmap(ctrl_info->iomem_base);
8553         pci_release_regions(ctrl_info->pci_dev);
8554         if (pci_is_enabled(ctrl_info->pci_dev))
8555                 pci_disable_device(ctrl_info->pci_dev);
8556         pci_set_drvdata(ctrl_info->pci_dev, NULL);
8557 }
8558
8559 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8560 {
8561         struct pqi_ctrl_info *ctrl_info;
8562
8563         ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8564                         GFP_KERNEL, numa_node);
8565         if (!ctrl_info)
8566                 return NULL;
8567
8568         mutex_init(&ctrl_info->scan_mutex);
8569         mutex_init(&ctrl_info->lun_reset_mutex);
8570         mutex_init(&ctrl_info->ofa_mutex);
8571
8572         INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8573         spin_lock_init(&ctrl_info->scsi_device_list_lock);
8574
8575         INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8576         atomic_set(&ctrl_info->num_interrupts, 0);
8577
8578         INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8579         INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8580
8581         timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8582         INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8583
8584         INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8585         INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8586
8587         sema_init(&ctrl_info->sync_request_sem,
8588                 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8589         init_waitqueue_head(&ctrl_info->block_requests_wait);
8590
8591         ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8592         ctrl_info->irq_mode = IRQ_MODE_NONE;
8593         ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8594
8595         ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8596         ctrl_info->max_transfer_encrypted_sas_sata =
8597                 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8598         ctrl_info->max_transfer_encrypted_nvme =
8599                 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8600         ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8601         ctrl_info->max_write_raid_1_10_2drive = ~0;
8602         ctrl_info->max_write_raid_1_10_3drive = ~0;
8603         ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
8604
8605         return ctrl_info;
8606 }
8607
8608 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8609 {
8610         kfree(ctrl_info);
8611 }
8612
8613 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8614 {
8615         pqi_free_irqs(ctrl_info);
8616         pqi_disable_msix_interrupts(ctrl_info);
8617 }
8618
8619 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8620 {
8621         pqi_free_interrupts(ctrl_info);
8622         if (ctrl_info->queue_memory_base)
8623                 dma_free_coherent(&ctrl_info->pci_dev->dev,
8624                         ctrl_info->queue_memory_length,
8625                         ctrl_info->queue_memory_base,
8626                         ctrl_info->queue_memory_base_dma_handle);
8627         if (ctrl_info->admin_queue_memory_base)
8628                 dma_free_coherent(&ctrl_info->pci_dev->dev,
8629                         ctrl_info->admin_queue_memory_length,
8630                         ctrl_info->admin_queue_memory_base,
8631                         ctrl_info->admin_queue_memory_base_dma_handle);
8632         pqi_free_all_io_requests(ctrl_info);
8633         if (ctrl_info->error_buffer)
8634                 dma_free_coherent(&ctrl_info->pci_dev->dev,
8635                         ctrl_info->error_buffer_length,
8636                         ctrl_info->error_buffer,
8637                         ctrl_info->error_buffer_dma_handle);
8638         if (ctrl_info->iomem_base)
8639                 pqi_cleanup_pci_init(ctrl_info);
8640         pqi_free_ctrl_info(ctrl_info);
8641 }
8642
8643 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8644 {
8645         ctrl_info->controller_online = false;
8646         pqi_stop_heartbeat_timer(ctrl_info);
8647         pqi_ctrl_block_requests(ctrl_info);
8648         pqi_cancel_rescan_worker(ctrl_info);
8649         pqi_cancel_update_time_worker(ctrl_info);
8650         if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
8651                 pqi_fail_all_outstanding_requests(ctrl_info);
8652                 ctrl_info->pqi_mode_enabled = false;
8653         }
8654         pqi_unregister_scsi(ctrl_info);
8655         if (ctrl_info->pqi_mode_enabled)
8656                 pqi_revert_to_sis_mode(ctrl_info);
8657         pqi_free_ctrl_resources(ctrl_info);
8658 }
8659
8660 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8661 {
8662         pqi_ctrl_block_scan(ctrl_info);
8663         pqi_scsi_block_requests(ctrl_info);
8664         pqi_ctrl_block_device_reset(ctrl_info);
8665         pqi_ctrl_block_requests(ctrl_info);
8666         pqi_ctrl_wait_until_quiesced(ctrl_info);
8667         pqi_stop_heartbeat_timer(ctrl_info);
8668 }
8669
8670 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8671 {
8672         pqi_start_heartbeat_timer(ctrl_info);
8673         pqi_ctrl_unblock_requests(ctrl_info);
8674         pqi_ctrl_unblock_device_reset(ctrl_info);
8675         pqi_scsi_unblock_requests(ctrl_info);
8676         pqi_ctrl_unblock_scan(ctrl_info);
8677 }
8678
8679 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
8680 {
8681         int i;
8682         u32 sg_count;
8683         struct device *dev;
8684         struct pqi_ofa_memory *ofap;
8685         struct pqi_sg_descriptor *mem_descriptor;
8686         dma_addr_t dma_handle;
8687
8688         ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8689
8690         sg_count = DIV_ROUND_UP(total_size, chunk_size);
8691         if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
8692                 goto out;
8693
8694         ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
8695         if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8696                 goto out;
8697
8698         dev = &ctrl_info->pci_dev->dev;
8699
8700         for (i = 0; i < sg_count; i++) {
8701                 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
8702                         dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8703                 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
8704                         goto out_free_chunks;
8705                 mem_descriptor = &ofap->sg_descriptor[i];
8706                 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8707                 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8708         }
8709
8710         put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8711         put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
8712         put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
8713
8714         return 0;
8715
8716 out_free_chunks:
8717         while (--i >= 0) {
8718                 mem_descriptor = &ofap->sg_descriptor[i];
8719                 dma_free_coherent(dev, chunk_size,
8720                         ctrl_info->pqi_ofa_chunk_virt_addr[i],
8721                         get_unaligned_le64(&mem_descriptor->address));
8722         }
8723         kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8724
8725 out:
8726         return -ENOMEM;
8727 }
8728
8729 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8730 {
8731         u32 total_size;
8732         u32 chunk_size;
8733         u32 min_chunk_size;
8734
8735         if (ctrl_info->ofa_bytes_requested == 0)
8736                 return 0;
8737
8738         total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8739         min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8740         min_chunk_size = PAGE_ALIGN(min_chunk_size);
8741
8742         for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8743                 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
8744                         return 0;
8745                 chunk_size /= 2;
8746                 chunk_size = PAGE_ALIGN(chunk_size);
8747         }
8748
8749         return -ENOMEM;
8750 }
8751
8752 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
8753 {
8754         struct device *dev;
8755         struct pqi_ofa_memory *ofap;
8756
8757         dev = &ctrl_info->pci_dev->dev;
8758
8759         ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8760                 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8761         if (!ofap)
8762                 return;
8763
8764         ctrl_info->pqi_ofa_mem_virt_addr = ofap;
8765
8766         if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
8767                 dev_err(dev,
8768                         "failed to allocate host buffer for Online Firmware Activation\n");
8769                 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8770                 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8771                 return;
8772         }
8773
8774         put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8775         memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
8776 }
8777
8778 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8779 {
8780         unsigned int i;
8781         struct device *dev;
8782         struct pqi_ofa_memory *ofap;
8783         struct pqi_sg_descriptor *mem_descriptor;
8784         unsigned int num_memory_descriptors;
8785
8786         ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8787         if (!ofap)
8788                 return;
8789
8790         dev = &ctrl_info->pci_dev->dev;
8791
8792         if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
8793                 goto out;
8794
8795         mem_descriptor = ofap->sg_descriptor;
8796         num_memory_descriptors =
8797                 get_unaligned_le16(&ofap->num_memory_descriptors);
8798
8799         for (i = 0; i < num_memory_descriptors; i++) {
8800                 dma_free_coherent(dev,
8801                         get_unaligned_le32(&mem_descriptor[i].length),
8802                         ctrl_info->pqi_ofa_chunk_virt_addr[i],
8803                         get_unaligned_le64(&mem_descriptor[i].address));
8804         }
8805         kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8806
8807 out:
8808         dma_free_coherent(dev, sizeof(*ofap), ofap,
8809                 ctrl_info->pqi_ofa_mem_dma_handle);
8810         ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8811 }
8812
8813 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8814 {
8815         u32 buffer_length;
8816         struct pqi_vendor_general_request request;
8817         struct pqi_ofa_memory *ofap;
8818
8819         memset(&request, 0, sizeof(request));
8820
8821         request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8822         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8823                 &request.header.iu_length);
8824         put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8825                 &request.function_code);
8826
8827         ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8828
8829         if (ofap) {
8830                 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
8831                         get_unaligned_le16(&ofap->num_memory_descriptors) *
8832                         sizeof(struct pqi_sg_descriptor);
8833
8834                 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8835                         &request.data.ofa_memory_allocation.buffer_address);
8836                 put_unaligned_le32(buffer_length,
8837                         &request.data.ofa_memory_allocation.buffer_length);
8838         }
8839
8840         return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
8841 }
8842
8843 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8844 {
8845         ssleep(delay_secs);
8846
8847         return pqi_ctrl_init_resume(ctrl_info);
8848 }
8849
8850 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8851         .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8852         .status = SAM_STAT_CHECK_CONDITION,
8853 };
8854
8855 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
8856 {
8857         unsigned int i;
8858         struct pqi_io_request *io_request;
8859         struct scsi_cmnd *scmd;
8860         struct scsi_device *sdev;
8861
8862         for (i = 0; i < ctrl_info->max_io_slots; i++) {
8863                 io_request = &ctrl_info->io_request_pool[i];
8864                 if (atomic_read(&io_request->refcount) == 0)
8865                         continue;
8866
8867                 scmd = io_request->scmd;
8868                 if (scmd) {
8869                         sdev = scmd->device;
8870                         if (!sdev || !scsi_device_online(sdev)) {
8871                                 pqi_free_io_request(io_request);
8872                                 continue;
8873                         } else {
8874                                 set_host_byte(scmd, DID_NO_CONNECT);
8875                         }
8876                 } else {
8877                         io_request->status = -ENXIO;
8878                         io_request->error_info =
8879                                 &pqi_ctrl_offline_raid_error_info;
8880                 }
8881
8882                 io_request->io_complete_callback(io_request,
8883                         io_request->context);
8884         }
8885 }
8886
8887 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
8888 {
8889         pqi_perform_lockup_action();
8890         pqi_stop_heartbeat_timer(ctrl_info);
8891         pqi_free_interrupts(ctrl_info);
8892         pqi_cancel_rescan_worker(ctrl_info);
8893         pqi_cancel_update_time_worker(ctrl_info);
8894         pqi_ctrl_wait_until_quiesced(ctrl_info);
8895         pqi_fail_all_outstanding_requests(ctrl_info);
8896         pqi_ctrl_unblock_requests(ctrl_info);
8897 }
8898
8899 static void pqi_ctrl_offline_worker(struct work_struct *work)
8900 {
8901         struct pqi_ctrl_info *ctrl_info;
8902
8903         ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8904         pqi_take_ctrl_offline_deferred(ctrl_info);
8905 }
8906
8907 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
8908         enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
8909 {
8910         if (!ctrl_info->controller_online)
8911                 return;
8912
8913         ctrl_info->controller_online = false;
8914         ctrl_info->pqi_mode_enabled = false;
8915         pqi_ctrl_block_requests(ctrl_info);
8916         if (!pqi_disable_ctrl_shutdown)
8917                 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
8918         pci_disable_device(ctrl_info->pci_dev);
8919         dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
8920         schedule_work(&ctrl_info->ctrl_offline_work);
8921 }
8922
8923 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
8924         const struct pci_device_id *id)
8925 {
8926         char *ctrl_description;
8927
8928         if (id->driver_data)
8929                 ctrl_description = (char *)id->driver_data;
8930         else
8931                 ctrl_description = "Microchip Smart Family Controller";
8932
8933         dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
8934 }
8935
8936 static int pqi_pci_probe(struct pci_dev *pci_dev,
8937         const struct pci_device_id *id)
8938 {
8939         int rc;
8940         int node;
8941         struct pqi_ctrl_info *ctrl_info;
8942
8943         pqi_print_ctrl_info(pci_dev, id);
8944
8945         if (pqi_disable_device_id_wildcards &&
8946                 id->subvendor == PCI_ANY_ID &&
8947                 id->subdevice == PCI_ANY_ID) {
8948                 dev_warn(&pci_dev->dev,
8949                         "controller not probed because device ID wildcards are disabled\n");
8950                 return -ENODEV;
8951         }
8952
8953         if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
8954                 dev_warn(&pci_dev->dev,
8955                         "controller device ID matched using wildcards\n");
8956
8957         node = dev_to_node(&pci_dev->dev);
8958         if (node == NUMA_NO_NODE) {
8959                 node = cpu_to_node(0);
8960                 if (node == NUMA_NO_NODE)
8961                         node = 0;
8962                 set_dev_node(&pci_dev->dev, node);
8963         }
8964
8965         ctrl_info = pqi_alloc_ctrl_info(node);
8966         if (!ctrl_info) {
8967                 dev_err(&pci_dev->dev,
8968                         "failed to allocate controller info block\n");
8969                 return -ENOMEM;
8970         }
8971         ctrl_info->numa_node = node;
8972
8973         ctrl_info->pci_dev = pci_dev;
8974
8975         rc = pqi_pci_init(ctrl_info);
8976         if (rc)
8977                 goto error;
8978
8979         rc = pqi_ctrl_init(ctrl_info);
8980         if (rc)
8981                 goto error;
8982
8983         return 0;
8984
8985 error:
8986         pqi_remove_ctrl(ctrl_info);
8987
8988         return rc;
8989 }
8990
8991 static void pqi_pci_remove(struct pci_dev *pci_dev)
8992 {
8993         struct pqi_ctrl_info *ctrl_info;
8994         u16 vendor_id;
8995         int rc;
8996
8997         ctrl_info = pci_get_drvdata(pci_dev);
8998         if (!ctrl_info)
8999                 return;
9000
9001         pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
9002         if (vendor_id == 0xffff)
9003                 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
9004         else
9005                 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
9006
9007         if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) {
9008                 rc = pqi_flush_cache(ctrl_info, RESTART);
9009                 if (rc)
9010                         dev_err(&pci_dev->dev,
9011                                 "unable to flush controller cache during remove\n");
9012         }
9013
9014         pqi_remove_ctrl(ctrl_info);
9015 }
9016
9017 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
9018 {
9019         unsigned int i;
9020         struct pqi_io_request *io_request;
9021         struct scsi_cmnd *scmd;
9022
9023         for (i = 0; i < ctrl_info->max_io_slots; i++) {
9024                 io_request = &ctrl_info->io_request_pool[i];
9025                 if (atomic_read(&io_request->refcount) == 0)
9026                         continue;
9027                 scmd = io_request->scmd;
9028                 WARN_ON(scmd != NULL); /* IO command from SML */
9029                 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
9030         }
9031 }
9032
9033 static void pqi_shutdown(struct pci_dev *pci_dev)
9034 {
9035         int rc;
9036         struct pqi_ctrl_info *ctrl_info;
9037         enum bmic_flush_cache_shutdown_event shutdown_event;
9038
9039         ctrl_info = pci_get_drvdata(pci_dev);
9040         if (!ctrl_info) {
9041                 dev_err(&pci_dev->dev,
9042                         "cache could not be flushed\n");
9043                 return;
9044         }
9045
9046         pqi_wait_until_ofa_finished(ctrl_info);
9047
9048         pqi_scsi_block_requests(ctrl_info);
9049         pqi_ctrl_block_device_reset(ctrl_info);
9050         pqi_ctrl_block_requests(ctrl_info);
9051         pqi_ctrl_wait_until_quiesced(ctrl_info);
9052
9053         if (system_state == SYSTEM_RESTART)
9054                 shutdown_event = RESTART;
9055         else
9056                 shutdown_event = SHUTDOWN;
9057
9058         /*
9059          * Write all data in the controller's battery-backed cache to
9060          * storage.
9061          */
9062         rc = pqi_flush_cache(ctrl_info, shutdown_event);
9063         if (rc)
9064                 dev_err(&pci_dev->dev,
9065                         "unable to flush controller cache\n");
9066
9067         pqi_crash_if_pending_command(ctrl_info);
9068         pqi_reset(ctrl_info);
9069 }
9070
9071 static void pqi_process_lockup_action_param(void)
9072 {
9073         unsigned int i;
9074
9075         if (!pqi_lockup_action_param)
9076                 return;
9077
9078         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
9079                 if (strcmp(pqi_lockup_action_param,
9080                         pqi_lockup_actions[i].name) == 0) {
9081                         pqi_lockup_action = pqi_lockup_actions[i].action;
9082                         return;
9083                 }
9084         }
9085
9086         pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
9087                 DRIVER_NAME_SHORT, pqi_lockup_action_param);
9088 }
9089
9090 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS           30
9091 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS           (30 * 60)
9092
9093 static void pqi_process_ctrl_ready_timeout_param(void)
9094 {
9095         if (pqi_ctrl_ready_timeout_secs == 0)
9096                 return;
9097
9098         if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
9099                 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
9100                         DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
9101                 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
9102         } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
9103                 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
9104                         DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
9105                 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
9106         }
9107
9108         sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
9109 }
9110
9111 static void pqi_process_module_params(void)
9112 {
9113         pqi_process_lockup_action_param();
9114         pqi_process_ctrl_ready_timeout_param();
9115 }
9116
9117 #if defined(CONFIG_PM)
9118
9119 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
9120 {
9121         if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
9122                 return RESTART;
9123
9124         return SUSPEND;
9125 }
9126
9127 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
9128 {
9129         struct pci_dev *pci_dev;
9130         struct pqi_ctrl_info *ctrl_info;
9131
9132         pci_dev = to_pci_dev(dev);
9133         ctrl_info = pci_get_drvdata(pci_dev);
9134
9135         pqi_wait_until_ofa_finished(ctrl_info);
9136
9137         pqi_ctrl_block_scan(ctrl_info);
9138         pqi_scsi_block_requests(ctrl_info);
9139         pqi_ctrl_block_device_reset(ctrl_info);
9140         pqi_ctrl_block_requests(ctrl_info);
9141         pqi_ctrl_wait_until_quiesced(ctrl_info);
9142
9143         if (suspend) {
9144                 enum bmic_flush_cache_shutdown_event shutdown_event;
9145
9146                 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9147                 pqi_flush_cache(ctrl_info, shutdown_event);
9148         }
9149
9150         pqi_stop_heartbeat_timer(ctrl_info);
9151         pqi_crash_if_pending_command(ctrl_info);
9152         pqi_free_irqs(ctrl_info);
9153
9154         ctrl_info->controller_online = false;
9155         ctrl_info->pqi_mode_enabled = false;
9156
9157         return 0;
9158 }
9159
9160 static __maybe_unused int pqi_suspend(struct device *dev)
9161 {
9162         return pqi_suspend_or_freeze(dev, true);
9163 }
9164
9165 static int pqi_resume_or_restore(struct device *dev)
9166 {
9167         int rc;
9168         struct pci_dev *pci_dev;
9169         struct pqi_ctrl_info *ctrl_info;
9170
9171         pci_dev = to_pci_dev(dev);
9172         ctrl_info = pci_get_drvdata(pci_dev);
9173
9174         rc = pqi_request_irqs(ctrl_info);
9175         if (rc)
9176                 return rc;
9177
9178         pqi_ctrl_unblock_device_reset(ctrl_info);
9179         pqi_ctrl_unblock_requests(ctrl_info);
9180         pqi_scsi_unblock_requests(ctrl_info);
9181         pqi_ctrl_unblock_scan(ctrl_info);
9182
9183         ssleep(PQI_POST_RESET_DELAY_SECS);
9184
9185         return pqi_ctrl_init_resume(ctrl_info);
9186 }
9187
9188 static int pqi_freeze(struct device *dev)
9189 {
9190         return pqi_suspend_or_freeze(dev, false);
9191 }
9192
9193 static int pqi_thaw(struct device *dev)
9194 {
9195         int rc;
9196         struct pci_dev *pci_dev;
9197         struct pqi_ctrl_info *ctrl_info;
9198
9199         pci_dev = to_pci_dev(dev);
9200         ctrl_info = pci_get_drvdata(pci_dev);
9201
9202         rc = pqi_request_irqs(ctrl_info);
9203         if (rc)
9204                 return rc;
9205
9206         ctrl_info->controller_online = true;
9207         ctrl_info->pqi_mode_enabled = true;
9208
9209         pqi_ctrl_unblock_device_reset(ctrl_info);
9210         pqi_ctrl_unblock_requests(ctrl_info);
9211         pqi_scsi_unblock_requests(ctrl_info);
9212         pqi_ctrl_unblock_scan(ctrl_info);
9213
9214         return 0;
9215 }
9216
9217 static int pqi_poweroff(struct device *dev)
9218 {
9219         struct pci_dev *pci_dev;
9220         struct pqi_ctrl_info *ctrl_info;
9221         enum bmic_flush_cache_shutdown_event shutdown_event;
9222
9223         pci_dev = to_pci_dev(dev);
9224         ctrl_info = pci_get_drvdata(pci_dev);
9225
9226         shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9227         pqi_flush_cache(ctrl_info, shutdown_event);
9228
9229         return 0;
9230 }
9231
9232 static const struct dev_pm_ops pqi_pm_ops = {
9233         .suspend = pqi_suspend,
9234         .resume = pqi_resume_or_restore,
9235         .freeze = pqi_freeze,
9236         .thaw = pqi_thaw,
9237         .poweroff = pqi_poweroff,
9238         .restore = pqi_resume_or_restore,
9239 };
9240
9241 #endif /* CONFIG_PM */
9242
9243 /* Define the PCI IDs for the controllers that we support. */
9244 static const struct pci_device_id pqi_pci_id_table[] = {
9245         {
9246                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9247                                0x105b, 0x1211)
9248         },
9249         {
9250                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9251                                0x105b, 0x1321)
9252         },
9253         {
9254                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9255                                0x152d, 0x8a22)
9256         },
9257         {
9258                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9259                                0x152d, 0x8a23)
9260         },
9261         {
9262                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9263                                0x152d, 0x8a24)
9264         },
9265         {
9266                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9267                                0x152d, 0x8a36)
9268         },
9269         {
9270                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9271                                0x152d, 0x8a37)
9272         },
9273         {
9274                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9275                                0x193d, 0x1104)
9276         },
9277         {
9278                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9279                                0x193d, 0x1105)
9280         },
9281         {
9282                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9283                                0x193d, 0x1106)
9284         },
9285         {
9286                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9287                                0x193d, 0x1107)
9288         },
9289         {
9290                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9291                                0x193d, 0x1108)
9292         },
9293         {
9294                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9295                                0x193d, 0x1109)
9296         },
9297         {
9298                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9299                                0x193d, 0x110b)
9300         },
9301         {
9302                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9303                                0x193d, 0x8460)
9304         },
9305         {
9306                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9307                                0x193d, 0x8461)
9308         },
9309         {
9310                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9311                                0x193d, 0xc460)
9312         },
9313         {
9314                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9315                                0x193d, 0xc461)
9316         },
9317         {
9318                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9319                                0x193d, 0xf460)
9320         },
9321         {
9322                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9323                                0x193d, 0xf461)
9324         },
9325         {
9326                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9327                                0x1bd4, 0x0045)
9328         },
9329         {
9330                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9331                                0x1bd4, 0x0046)
9332         },
9333         {
9334                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9335                                0x1bd4, 0x0047)
9336         },
9337         {
9338                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9339                                0x1bd4, 0x0048)
9340         },
9341         {
9342                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9343                                0x1bd4, 0x004a)
9344         },
9345         {
9346                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9347                                0x1bd4, 0x004b)
9348         },
9349         {
9350                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9351                                0x1bd4, 0x004c)
9352         },
9353         {
9354                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9355                                0x1bd4, 0x004f)
9356         },
9357         {
9358                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9359                                0x1bd4, 0x0051)
9360         },
9361         {
9362                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9363                                0x1bd4, 0x0052)
9364         },
9365         {
9366                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9367                                0x1bd4, 0x0053)
9368         },
9369         {
9370                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9371                                0x1bd4, 0x0054)
9372         },
9373         {
9374                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9375                                0x1bd4, 0x006b)
9376         },
9377         {
9378                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9379                                0x1bd4, 0x006c)
9380         },
9381         {
9382                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9383                                0x1bd4, 0x006d)
9384         },
9385         {
9386                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9387                                0x1bd4, 0x006f)
9388         },
9389         {
9390                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9391                                0x1bd4, 0x0070)
9392         },
9393         {
9394                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9395                                0x1bd4, 0x0071)
9396         },
9397         {
9398                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9399                                0x1bd4, 0x0072)
9400         },
9401         {
9402                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9403                                0x1bd4, 0x0086)
9404         },
9405         {
9406                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9407                                0x1bd4, 0x0087)
9408         },
9409         {
9410                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9411                                0x1bd4, 0x0088)
9412         },
9413         {
9414                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9415                                0x1bd4, 0x0089)
9416         },
9417         {
9418                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9419                                0x19e5, 0xd227)
9420         },
9421         {
9422                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9423                                0x19e5, 0xd228)
9424         },
9425         {
9426                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9427                                0x19e5, 0xd229)
9428         },
9429         {
9430                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9431                                0x19e5, 0xd22a)
9432         },
9433         {
9434                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9435                                0x19e5, 0xd22b)
9436         },
9437         {
9438                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9439                                0x19e5, 0xd22c)
9440         },
9441         {
9442                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9443                                PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9444         },
9445         {
9446                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9447                                PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9448         },
9449         {
9450                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9451                                PCI_VENDOR_ID_ADAPTEC2, 0x0659)
9452         },
9453         {
9454                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9455                                PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9456         },
9457         {
9458                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9459                                PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9460         },
9461         {
9462                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9463                                PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9464         },
9465         {
9466                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9467                                PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9468         },
9469         {
9470                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9471                                PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9472         },
9473         {
9474                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9475                                PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9476         },
9477         {
9478                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9479                                PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9480         },
9481         {
9482                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9483                                PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9484         },
9485         {
9486                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9487                                PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9488         },
9489         {
9490                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9491                                PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9492         },
9493         {
9494                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9495                                PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9496         },
9497         {
9498                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9499                                PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9500         },
9501         {
9502                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9503                                PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9504         },
9505         {
9506                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9507                                PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9508         },
9509         {
9510                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9511                                PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9512         },
9513         {
9514                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9515                                PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9516         },
9517         {
9518                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9519                                PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9520         },
9521         {
9522                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9523                                PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9524         },
9525         {
9526                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9527                                PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9528         },
9529         {
9530                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9531                                PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9532         },
9533         {
9534                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9535                                PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9536         },
9537         {
9538                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9539                                PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9540         },
9541         {
9542                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9543                                PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9544         },
9545         {
9546                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9547                                PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9548         },
9549         {
9550                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9551                                PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9552         },
9553         {
9554                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9555                                PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9556         },
9557         {
9558                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9559                                PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9560         },
9561         {
9562                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9563                                PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9564         },
9565         {
9566                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9567                                PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9568         },
9569         {
9570                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9571                                PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9572         },
9573         {
9574                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9575                                PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9576         },
9577         {
9578                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9579                                PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9580         },
9581         {
9582                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9583                                PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9584         },
9585         {
9586                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9587                                PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9588         },
9589         {
9590                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9591                                PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9592         },
9593         {
9594                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9595                                PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9596         },
9597         {
9598                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9599                                PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9600         },
9601         {
9602                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9603                                PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9604         },
9605         {
9606                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9607                                PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9608         },
9609         {
9610                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9611                                PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9612         },
9613         {
9614                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9615                                PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9616         },
9617         {
9618                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9619                                PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9620         },
9621         {
9622                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9623                                PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9624         },
9625         {
9626                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9627                                PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9628         },
9629         {
9630                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9631                                PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9632         },
9633         {
9634                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9635                                PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9636         },
9637         {
9638                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9639                                PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9640         },
9641         {
9642                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9643                                PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9644         },
9645         {
9646                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9647                                PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9648         },
9649         {
9650                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9651                                PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9652         },
9653         {
9654                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9655                                PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9656         },
9657         {
9658                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9659                                PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9660         },
9661         {
9662                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9663                                PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9664         },
9665         {
9666                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9667                                PCI_VENDOR_ID_ADAPTEC2, 0x1475)
9668         },
9669         {
9670                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9671                                PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9672         },
9673         {
9674                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9675                                PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9676         },
9677         {
9678                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9679                                PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9680         },
9681         {
9682                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9683                                PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9684         },
9685         {
9686                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9687                                PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9688         },
9689         {
9690                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9691                                PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9692         },
9693         {
9694                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9695                                PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
9696         },
9697         {
9698                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9699                                PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
9700         },
9701         {
9702                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9703                                PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
9704         },
9705         {
9706                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9707                                PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9708         },
9709         {
9710                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9711                                PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9712         },
9713         {
9714                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9715                                PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9716         },
9717         {
9718                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9719                                PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9720         },
9721         {
9722                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9723                                PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
9724         },
9725         {
9726                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9727                                PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
9728         },
9729         {
9730                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9731                                PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
9732         },
9733         {
9734                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9735                                PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9736         },
9737         {
9738                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9739                                PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9740         },
9741         {
9742                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9743                                PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9744         },
9745         {
9746                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9747                                PCI_VENDOR_ID_ADVANTECH, 0x8312)
9748         },
9749         {
9750                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9751                                PCI_VENDOR_ID_DELL, 0x1fe0)
9752         },
9753         {
9754                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9755                                PCI_VENDOR_ID_HP, 0x0600)
9756         },
9757         {
9758                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9759                                PCI_VENDOR_ID_HP, 0x0601)
9760         },
9761         {
9762                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9763                                PCI_VENDOR_ID_HP, 0x0602)
9764         },
9765         {
9766                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9767                                PCI_VENDOR_ID_HP, 0x0603)
9768         },
9769         {
9770                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9771                                PCI_VENDOR_ID_HP, 0x0609)
9772         },
9773         {
9774                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9775                                PCI_VENDOR_ID_HP, 0x0650)
9776         },
9777         {
9778                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9779                                PCI_VENDOR_ID_HP, 0x0651)
9780         },
9781         {
9782                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9783                                PCI_VENDOR_ID_HP, 0x0652)
9784         },
9785         {
9786                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9787                                PCI_VENDOR_ID_HP, 0x0653)
9788         },
9789         {
9790                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9791                                PCI_VENDOR_ID_HP, 0x0654)
9792         },
9793         {
9794                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9795                                PCI_VENDOR_ID_HP, 0x0655)
9796         },
9797         {
9798                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9799                                PCI_VENDOR_ID_HP, 0x0700)
9800         },
9801         {
9802                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9803                                PCI_VENDOR_ID_HP, 0x0701)
9804         },
9805         {
9806                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9807                                PCI_VENDOR_ID_HP, 0x1001)
9808         },
9809         {
9810                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9811                                PCI_VENDOR_ID_HP, 0x1002)
9812         },
9813         {
9814                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9815                                PCI_VENDOR_ID_HP, 0x1100)
9816         },
9817         {
9818                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9819                                PCI_VENDOR_ID_HP, 0x1101)
9820         },
9821         {
9822                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9823                                0x1590, 0x0294)
9824         },
9825         {
9826                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9827                                0x1590, 0x02db)
9828         },
9829         {
9830                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9831                                0x1590, 0x02dc)
9832         },
9833         {
9834                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9835                                0x1590, 0x032e)
9836         },
9837         {
9838                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9839                                0x1590, 0x036f)
9840         },
9841         {
9842                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9843                                0x1590, 0x0381)
9844         },
9845         {
9846                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9847                                0x1590, 0x0382)
9848         },
9849         {
9850                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9851                                0x1590, 0x0383)
9852         },
9853         {
9854                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9855                                0x1d8d, 0x0800)
9856         },
9857         {
9858                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9859                                0x1d8d, 0x0908)
9860         },
9861         {
9862                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9863                                0x1d8d, 0x0806)
9864         },
9865         {
9866                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9867                                0x1d8d, 0x0916)
9868         },
9869         {
9870                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9871                                PCI_VENDOR_ID_GIGABYTE, 0x1000)
9872         },
9873         {
9874                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9875                                0x1dfc, 0x3161)
9876         },
9877         {
9878                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9879                                0x1f0c, 0x3161)
9880         },
9881         {
9882                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9883                                0x1cf2, 0x0804)
9884         },
9885         {
9886                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9887                                0x1cf2, 0x0805)
9888         },
9889         {
9890                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9891                                0x1cf2, 0x0806)
9892         },
9893         {
9894                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9895                                0x1cf2, 0x5445)
9896         },
9897         {
9898                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9899                                0x1cf2, 0x5446)
9900         },
9901         {
9902                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9903                                0x1cf2, 0x5447)
9904         },
9905         {
9906                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9907                                0x1cf2, 0x5449)
9908         },
9909         {
9910                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9911                                0x1cf2, 0x544a)
9912         },
9913         {
9914                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9915                                0x1cf2, 0x544b)
9916         },
9917         {
9918                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9919                                0x1cf2, 0x544d)
9920         },
9921         {
9922                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9923                                0x1cf2, 0x544e)
9924         },
9925         {
9926                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9927                                0x1cf2, 0x544f)
9928         },
9929         {
9930                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9931                                0x1cf2, 0x54da)
9932         },
9933         {
9934                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9935                                0x1cf2, 0x54db)
9936         },
9937         {
9938                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9939                                0x1cf2, 0x54dc)
9940         },
9941         {
9942                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9943                                0x1cf2, 0x0b27)
9944         },
9945         {
9946                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9947                                0x1cf2, 0x0b29)
9948         },
9949         {
9950                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9951                                0x1cf2, 0x0b45)
9952         },
9953         {
9954                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9955                                0x1cc4, 0x0101)
9956         },
9957         {
9958                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9959                                0x1cc4, 0x0201)
9960         },
9961         {
9962                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9963                                PCI_VENDOR_ID_LENOVO, 0x0220)
9964         },
9965         {
9966                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9967                                PCI_VENDOR_ID_LENOVO, 0x0221)
9968         },
9969         {
9970                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9971                                PCI_VENDOR_ID_LENOVO, 0x0520)
9972         },
9973         {
9974                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9975                                PCI_VENDOR_ID_LENOVO, 0x0522)
9976         },
9977         {
9978                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9979                                PCI_VENDOR_ID_LENOVO, 0x0620)
9980         },
9981         {
9982                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9983                                PCI_VENDOR_ID_LENOVO, 0x0621)
9984         },
9985         {
9986                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9987                                PCI_VENDOR_ID_LENOVO, 0x0622)
9988         },
9989         {
9990                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9991                                PCI_VENDOR_ID_LENOVO, 0x0623)
9992         },
9993         {
9994                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9995                                 0x1014, 0x0718)
9996         },
9997         {
9998                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9999                                 0x1e93, 0x1000)
10000         },
10001         {
10002                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10003                                 0x1e93, 0x1001)
10004         },
10005         {
10006                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10007                                 0x1e93, 0x1002)
10008         },
10009         {
10010                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10011                                 0x1e93, 0x1005)
10012         },
10013         {
10014                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10015                                 0x1f51, 0x1001)
10016         },
10017         {
10018                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10019                                 0x1f51, 0x1002)
10020         },
10021         {
10022                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10023                                 0x1f51, 0x1003)
10024         },
10025         {
10026                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10027                                 0x1f51, 0x1004)
10028         },
10029         {
10030                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10031                                 0x1f51, 0x1005)
10032         },
10033         {
10034                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10035                                 0x1f51, 0x1006)
10036         },
10037         {
10038                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10039                                 0x1f51, 0x1007)
10040         },
10041         {
10042                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10043                                 0x1f51, 0x1008)
10044         },
10045         {
10046                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10047                                 0x1f51, 0x1009)
10048         },
10049         {
10050                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10051                                 0x1f51, 0x100a)
10052         },
10053         {
10054                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10055                                PCI_ANY_ID, PCI_ANY_ID)
10056         },
10057         { 0 }
10058 };
10059
10060 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
10061
10062 static struct pci_driver pqi_pci_driver = {
10063         .name = DRIVER_NAME_SHORT,
10064         .id_table = pqi_pci_id_table,
10065         .probe = pqi_pci_probe,
10066         .remove = pqi_pci_remove,
10067         .shutdown = pqi_shutdown,
10068 #if defined(CONFIG_PM)
10069         .driver = {
10070                 .pm = &pqi_pm_ops
10071         },
10072 #endif
10073 };
10074
10075 static int __init pqi_init(void)
10076 {
10077         int rc;
10078
10079         pr_info(DRIVER_NAME "\n");
10080         pqi_verify_structures();
10081         sis_verify_structures();
10082
10083         pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
10084         if (!pqi_sas_transport_template)
10085                 return -ENODEV;
10086
10087         pqi_process_module_params();
10088
10089         rc = pci_register_driver(&pqi_pci_driver);
10090         if (rc)
10091                 sas_release_transport(pqi_sas_transport_template);
10092
10093         return rc;
10094 }
10095
10096 static void __exit pqi_cleanup(void)
10097 {
10098         pci_unregister_driver(&pqi_pci_driver);
10099         sas_release_transport(pqi_sas_transport_template);
10100 }
10101
10102 module_init(pqi_init);
10103 module_exit(pqi_cleanup);
10104
10105 static void pqi_verify_structures(void)
10106 {
10107         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10108                 sis_host_to_ctrl_doorbell) != 0x20);
10109         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10110                 sis_interrupt_mask) != 0x34);
10111         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10112                 sis_ctrl_to_host_doorbell) != 0x9c);
10113         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10114                 sis_ctrl_to_host_doorbell_clear) != 0xa0);
10115         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10116                 sis_driver_scratch) != 0xb0);
10117         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10118                 sis_product_identifier) != 0xb4);
10119         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10120                 sis_firmware_status) != 0xbc);
10121         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10122                 sis_ctrl_shutdown_reason_code) != 0xcc);
10123         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10124                 sis_mailbox) != 0x1000);
10125         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10126                 pqi_registers) != 0x4000);
10127
10128         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10129                 iu_type) != 0x0);
10130         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10131                 iu_length) != 0x2);
10132         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10133                 response_queue_id) != 0x4);
10134         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10135                 driver_flags) != 0x6);
10136         BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
10137
10138         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10139                 status) != 0x0);
10140         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10141                 service_response) != 0x1);
10142         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10143                 data_present) != 0x2);
10144         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10145                 reserved) != 0x3);
10146         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10147                 residual_count) != 0x4);
10148         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10149                 data_length) != 0x8);
10150         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10151                 reserved1) != 0xa);
10152         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10153                 data) != 0xc);
10154         BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
10155
10156         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10157                 data_in_result) != 0x0);
10158         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10159                 data_out_result) != 0x1);
10160         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10161                 reserved) != 0x2);
10162         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10163                 status) != 0x5);
10164         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10165                 status_qualifier) != 0x6);
10166         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10167                 sense_data_length) != 0x8);
10168         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10169                 response_data_length) != 0xa);
10170         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10171                 data_in_transferred) != 0xc);
10172         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10173                 data_out_transferred) != 0x10);
10174         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10175                 data) != 0x14);
10176         BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
10177
10178         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10179                 signature) != 0x0);
10180         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10181                 function_and_status_code) != 0x8);
10182         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10183                 max_admin_iq_elements) != 0x10);
10184         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10185                 max_admin_oq_elements) != 0x11);
10186         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10187                 admin_iq_element_length) != 0x12);
10188         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10189                 admin_oq_element_length) != 0x13);
10190         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10191                 max_reset_timeout) != 0x14);
10192         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10193                 legacy_intx_status) != 0x18);
10194         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10195                 legacy_intx_mask_set) != 0x1c);
10196         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10197                 legacy_intx_mask_clear) != 0x20);
10198         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10199                 device_status) != 0x40);
10200         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10201                 admin_iq_pi_offset) != 0x48);
10202         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10203                 admin_oq_ci_offset) != 0x50);
10204         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10205                 admin_iq_element_array_addr) != 0x58);
10206         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10207                 admin_oq_element_array_addr) != 0x60);
10208         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10209                 admin_iq_ci_addr) != 0x68);
10210         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10211                 admin_oq_pi_addr) != 0x70);
10212         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10213                 admin_iq_num_elements) != 0x78);
10214         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10215                 admin_oq_num_elements) != 0x79);
10216         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10217                 admin_queue_int_msg_num) != 0x7a);
10218         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10219                 device_error) != 0x80);
10220         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10221                 error_details) != 0x88);
10222         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10223                 device_reset) != 0x90);
10224         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10225                 power_action) != 0x94);
10226         BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10227
10228         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10229                 header.iu_type) != 0);
10230         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10231                 header.iu_length) != 2);
10232         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10233                 header.driver_flags) != 6);
10234         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10235                 request_id) != 8);
10236         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10237                 function_code) != 10);
10238         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10239                 data.report_device_capability.buffer_length) != 44);
10240         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10241                 data.report_device_capability.sg_descriptor) != 48);
10242         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10243                 data.create_operational_iq.queue_id) != 12);
10244         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10245                 data.create_operational_iq.element_array_addr) != 16);
10246         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10247                 data.create_operational_iq.ci_addr) != 24);
10248         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10249                 data.create_operational_iq.num_elements) != 32);
10250         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10251                 data.create_operational_iq.element_length) != 34);
10252         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10253                 data.create_operational_iq.queue_protocol) != 36);
10254         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10255                 data.create_operational_oq.queue_id) != 12);
10256         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10257                 data.create_operational_oq.element_array_addr) != 16);
10258         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10259                 data.create_operational_oq.pi_addr) != 24);
10260         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10261                 data.create_operational_oq.num_elements) != 32);
10262         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10263                 data.create_operational_oq.element_length) != 34);
10264         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10265                 data.create_operational_oq.queue_protocol) != 36);
10266         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10267                 data.create_operational_oq.int_msg_num) != 40);
10268         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10269                 data.create_operational_oq.coalescing_count) != 42);
10270         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10271                 data.create_operational_oq.min_coalescing_time) != 44);
10272         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10273                 data.create_operational_oq.max_coalescing_time) != 48);
10274         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10275                 data.delete_operational_queue.queue_id) != 12);
10276         BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
10277         BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10278                 data.create_operational_iq) != 64 - 11);
10279         BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10280                 data.create_operational_oq) != 64 - 11);
10281         BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10282                 data.delete_operational_queue) != 64 - 11);
10283
10284         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10285                 header.iu_type) != 0);
10286         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10287                 header.iu_length) != 2);
10288         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10289                 header.driver_flags) != 6);
10290         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10291                 request_id) != 8);
10292         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10293                 function_code) != 10);
10294         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10295                 status) != 11);
10296         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10297                 data.create_operational_iq.status_descriptor) != 12);
10298         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10299                 data.create_operational_iq.iq_pi_offset) != 16);
10300         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10301                 data.create_operational_oq.status_descriptor) != 12);
10302         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10303                 data.create_operational_oq.oq_ci_offset) != 16);
10304         BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10305
10306         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10307                 header.iu_type) != 0);
10308         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10309                 header.iu_length) != 2);
10310         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10311                 header.response_queue_id) != 4);
10312         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10313                 header.driver_flags) != 6);
10314         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10315                 request_id) != 8);
10316         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10317                 nexus_id) != 10);
10318         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10319                 buffer_length) != 12);
10320         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10321                 lun_number) != 16);
10322         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10323                 protocol_specific) != 24);
10324         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10325                 error_index) != 27);
10326         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10327                 cdb) != 32);
10328         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10329                 timeout) != 60);
10330         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10331                 sg_descriptors) != 64);
10332         BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10333                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10334
10335         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10336                 header.iu_type) != 0);
10337         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10338                 header.iu_length) != 2);
10339         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10340                 header.response_queue_id) != 4);
10341         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10342                 header.driver_flags) != 6);
10343         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10344                 request_id) != 8);
10345         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10346                 nexus_id) != 12);
10347         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10348                 buffer_length) != 16);
10349         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10350                 data_encryption_key_index) != 22);
10351         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10352                 encrypt_tweak_lower) != 24);
10353         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10354                 encrypt_tweak_upper) != 28);
10355         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10356                 cdb) != 32);
10357         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10358                 error_index) != 48);
10359         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10360                 num_sg_descriptors) != 50);
10361         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10362                 cdb_length) != 51);
10363         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10364                 lun_number) != 52);
10365         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10366                 sg_descriptors) != 64);
10367         BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10368                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10369
10370         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10371                 header.iu_type) != 0);
10372         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10373                 header.iu_length) != 2);
10374         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10375                 request_id) != 8);
10376         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10377                 error_index) != 10);
10378
10379         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10380                 header.iu_type) != 0);
10381         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10382                 header.iu_length) != 2);
10383         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10384                 header.response_queue_id) != 4);
10385         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10386                 request_id) != 8);
10387         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10388                 data.report_event_configuration.buffer_length) != 12);
10389         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10390                 data.report_event_configuration.sg_descriptors) != 16);
10391         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10392                 data.set_event_configuration.global_event_oq_id) != 10);
10393         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10394                 data.set_event_configuration.buffer_length) != 12);
10395         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10396                 data.set_event_configuration.sg_descriptors) != 16);
10397
10398         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10399                 max_inbound_iu_length) != 6);
10400         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10401                 max_outbound_iu_length) != 14);
10402         BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10403
10404         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10405                 data_length) != 0);
10406         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10407                 iq_arbitration_priority_support_bitmask) != 8);
10408         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10409                 maximum_aw_a) != 9);
10410         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10411                 maximum_aw_b) != 10);
10412         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10413                 maximum_aw_c) != 11);
10414         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10415                 max_inbound_queues) != 16);
10416         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10417                 max_elements_per_iq) != 18);
10418         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10419                 max_iq_element_length) != 24);
10420         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10421                 min_iq_element_length) != 26);
10422         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10423                 max_outbound_queues) != 30);
10424         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10425                 max_elements_per_oq) != 32);
10426         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10427                 intr_coalescing_time_granularity) != 34);
10428         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10429                 max_oq_element_length) != 36);
10430         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10431                 min_oq_element_length) != 38);
10432         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10433                 iu_layer_descriptors) != 64);
10434         BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10435
10436         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10437                 event_type) != 0);
10438         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10439                 oq_id) != 2);
10440         BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10441
10442         BUILD_BUG_ON(offsetof(struct pqi_event_config,
10443                 num_event_descriptors) != 2);
10444         BUILD_BUG_ON(offsetof(struct pqi_event_config,
10445                 descriptors) != 4);
10446
10447         BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10448                 ARRAY_SIZE(pqi_supported_event_types));
10449
10450         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10451                 header.iu_type) != 0);
10452         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10453                 header.iu_length) != 2);
10454         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10455                 event_type) != 8);
10456         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10457                 event_id) != 10);
10458         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10459                 additional_event_id) != 12);
10460         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10461                 data) != 16);
10462         BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10463
10464         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10465                 header.iu_type) != 0);
10466         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10467                 header.iu_length) != 2);
10468         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10469                 event_type) != 8);
10470         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10471                 event_id) != 10);
10472         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10473                 additional_event_id) != 12);
10474         BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10475
10476         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10477                 header.iu_type) != 0);
10478         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10479                 header.iu_length) != 2);
10480         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10481                 request_id) != 8);
10482         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10483                 nexus_id) != 10);
10484         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10485                 timeout) != 14);
10486         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10487                 lun_number) != 16);
10488         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10489                 protocol_specific) != 24);
10490         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10491                 outbound_queue_id_to_manage) != 26);
10492         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10493                 request_id_to_manage) != 28);
10494         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10495                 task_management_function) != 30);
10496         BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10497
10498         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10499                 header.iu_type) != 0);
10500         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10501                 header.iu_length) != 2);
10502         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10503                 request_id) != 8);
10504         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10505                 nexus_id) != 10);
10506         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10507                 additional_response_info) != 12);
10508         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10509                 response_code) != 15);
10510         BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10511
10512         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10513                 configured_logical_drive_count) != 0);
10514         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10515                 configuration_signature) != 1);
10516         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10517                 firmware_version_short) != 5);
10518         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10519                 extended_logical_unit_count) != 154);
10520         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10521                 firmware_build_number) != 190);
10522         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10523                 vendor_id) != 200);
10524         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10525                 product_id) != 208);
10526         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10527                 extra_controller_flags) != 286);
10528         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10529                 controller_mode) != 292);
10530         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10531                 spare_part_number) != 293);
10532         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10533                 firmware_version_long) != 325);
10534
10535         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10536                 phys_bay_in_box) != 115);
10537         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10538                 device_type) != 120);
10539         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10540                 redundant_path_present_map) != 1736);
10541         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10542                 active_path_number) != 1738);
10543         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10544                 alternate_paths_phys_connector) != 1739);
10545         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10546                 alternate_paths_phys_box_on_port) != 1755);
10547         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10548                 current_queue_depth_limit) != 1796);
10549         BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
10550
10551         BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
10552         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10553                 page_code) != 0);
10554         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10555                 subpage_code) != 1);
10556         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10557                 buffer_length) != 2);
10558
10559         BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
10560         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10561                 page_code) != 0);
10562         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10563                 subpage_code) != 1);
10564         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10565                 page_length) != 2);
10566
10567         BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
10568                 != 18);
10569         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10570                 header) != 0);
10571         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10572                 firmware_read_support) != 4);
10573         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10574                 driver_read_support) != 5);
10575         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10576                 firmware_write_support) != 6);
10577         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10578                 driver_write_support) != 7);
10579         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10580                 max_transfer_encrypted_sas_sata) != 8);
10581         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10582                 max_transfer_encrypted_nvme) != 10);
10583         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10584                 max_write_raid_5_6) != 12);
10585         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10586                 max_write_raid_1_10_2drive) != 14);
10587         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10588                 max_write_raid_1_10_3drive) != 16);
10589
10590         BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
10591         BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10592         BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10593                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10594         BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10595                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10596         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10597         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10598                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10599         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10600         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10601                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10602
10603         BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
10604         BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10605                 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
10606 }