1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microchip PQI-based storage controllers
4 * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "2.1.22-040"
37 #define DRIVER_MAJOR 2
38 #define DRIVER_MINOR 1
39 #define DRIVER_RELEASE 22
40 #define DRIVER_REVISION 40
42 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 #define PQI_POST_RESET_DELAY_SECS 5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
51 MODULE_AUTHOR("Microchip");
52 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
54 MODULE_VERSION(DRIVER_VERSION);
55 MODULE_LICENSE("GPL");
61 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
63 return scsi_cmd_priv(cmd);
66 static void pqi_verify_structures(void);
67 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
68 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
69 static void pqi_ctrl_offline_worker(struct work_struct *work);
70 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
71 static void pqi_scan_start(struct Scsi_Host *shost);
72 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
73 struct pqi_queue_group *queue_group, enum pqi_io_path path,
74 struct pqi_io_request *io_request);
75 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
76 struct pqi_iu_header *request, unsigned int flags,
77 struct pqi_raid_error_info *error_info);
78 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
79 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
80 unsigned int cdb_length, struct pqi_queue_group *queue_group,
81 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
82 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
83 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
84 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
85 struct pqi_scsi_dev_raid_map_data *rmd);
86 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
87 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
88 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
89 struct pqi_scsi_dev_raid_map_data *rmd);
90 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
91 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
92 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
93 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
94 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
95 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
96 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
97 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
98 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
100 /* for flags argument to pqi_submit_raid_request_synchronous() */
101 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
103 static struct scsi_transport_template *pqi_sas_transport_template;
105 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
107 enum pqi_lockup_action {
113 static enum pqi_lockup_action pqi_lockup_action = NONE;
116 enum pqi_lockup_action action;
118 } pqi_lockup_actions[] = {
133 static unsigned int pqi_supported_event_types[] = {
134 PQI_EVENT_TYPE_HOTPLUG,
135 PQI_EVENT_TYPE_HARDWARE,
136 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
137 PQI_EVENT_TYPE_LOGICAL_DEVICE,
139 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
140 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
143 static int pqi_disable_device_id_wildcards;
144 module_param_named(disable_device_id_wildcards,
145 pqi_disable_device_id_wildcards, int, 0644);
146 MODULE_PARM_DESC(disable_device_id_wildcards,
147 "Disable device ID wildcards.");
149 static int pqi_disable_heartbeat;
150 module_param_named(disable_heartbeat,
151 pqi_disable_heartbeat, int, 0644);
152 MODULE_PARM_DESC(disable_heartbeat,
153 "Disable heartbeat.");
155 static int pqi_disable_ctrl_shutdown;
156 module_param_named(disable_ctrl_shutdown,
157 pqi_disable_ctrl_shutdown, int, 0644);
158 MODULE_PARM_DESC(disable_ctrl_shutdown,
159 "Disable controller shutdown when controller locked up.");
161 static char *pqi_lockup_action_param;
162 module_param_named(lockup_action,
163 pqi_lockup_action_param, charp, 0644);
164 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
165 "\t\tSupported: none, reboot, panic\n"
166 "\t\tDefault: none");
168 static int pqi_expose_ld_first;
169 module_param_named(expose_ld_first,
170 pqi_expose_ld_first, int, 0644);
171 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
173 static int pqi_hide_vsep;
174 module_param_named(hide_vsep,
175 pqi_hide_vsep, int, 0644);
176 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
178 static int pqi_disable_managed_interrupts;
179 module_param_named(disable_managed_interrupts,
180 pqi_disable_managed_interrupts, int, 0644);
181 MODULE_PARM_DESC(disable_managed_interrupts,
182 "Disable the kernel automatically assigning SMP affinity to IRQs.");
184 static unsigned int pqi_ctrl_ready_timeout_secs;
185 module_param_named(ctrl_ready_timeout,
186 pqi_ctrl_ready_timeout_secs, uint, 0644);
187 MODULE_PARM_DESC(ctrl_ready_timeout,
188 "Timeout in seconds for driver to wait for controller ready.");
190 static char *raid_levels[] = {
200 static char *pqi_raid_level_to_string(u8 raid_level)
202 if (raid_level < ARRAY_SIZE(raid_levels))
203 return raid_levels[raid_level];
205 return "RAID UNKNOWN";
210 #define SA_RAID_1 2 /* also used for RAID 10 */
211 #define SA_RAID_5 3 /* also used for RAID 50 */
213 #define SA_RAID_6 5 /* also used for RAID 60 */
214 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
215 #define SA_RAID_MAX SA_RAID_TRIPLE
216 #define SA_RAID_UNKNOWN 0xff
218 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
220 pqi_prep_for_scsi_done(scmd);
224 static inline void pqi_disable_write_same(struct scsi_device *sdev)
226 sdev->no_write_same = 1;
229 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
231 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
234 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
236 return !device->is_physical_device;
239 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
241 return scsi3addr[2] != 0;
244 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
246 return !ctrl_info->controller_online;
249 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
251 if (ctrl_info->controller_online)
252 if (!sis_is_firmware_running(ctrl_info))
253 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
256 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
258 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
261 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
262 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
264 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
266 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
269 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
270 enum pqi_ctrl_mode mode)
274 driver_scratch = sis_read_driver_scratch(ctrl_info);
276 if (mode == PQI_MODE)
277 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
279 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
281 sis_write_driver_scratch(ctrl_info, driver_scratch);
284 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
286 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
289 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
293 driver_scratch = sis_read_driver_scratch(ctrl_info);
296 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
298 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
300 sis_write_driver_scratch(ctrl_info, driver_scratch);
303 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
305 ctrl_info->scan_blocked = true;
306 mutex_lock(&ctrl_info->scan_mutex);
309 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
311 ctrl_info->scan_blocked = false;
312 mutex_unlock(&ctrl_info->scan_mutex);
315 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
317 return ctrl_info->scan_blocked;
320 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
322 mutex_lock(&ctrl_info->lun_reset_mutex);
325 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
327 mutex_unlock(&ctrl_info->lun_reset_mutex);
330 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
332 struct Scsi_Host *shost;
333 unsigned int num_loops;
336 shost = ctrl_info->scsi_host;
338 scsi_block_requests(shost);
342 while (scsi_host_busy(shost)) {
350 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
352 scsi_unblock_requests(ctrl_info->scsi_host);
355 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
357 atomic_inc(&ctrl_info->num_busy_threads);
360 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
362 atomic_dec(&ctrl_info->num_busy_threads);
365 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
367 return ctrl_info->block_requests;
370 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
372 ctrl_info->block_requests = true;
375 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
377 ctrl_info->block_requests = false;
378 wake_up_all(&ctrl_info->block_requests_wait);
381 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
383 if (!pqi_ctrl_blocked(ctrl_info))
386 atomic_inc(&ctrl_info->num_blocked_threads);
387 wait_event(ctrl_info->block_requests_wait,
388 !pqi_ctrl_blocked(ctrl_info));
389 atomic_dec(&ctrl_info->num_blocked_threads);
392 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
394 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
396 unsigned long start_jiffies;
397 unsigned long warning_timeout;
398 bool displayed_warning;
400 displayed_warning = false;
401 start_jiffies = jiffies;
402 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
404 while (atomic_read(&ctrl_info->num_busy_threads) >
405 atomic_read(&ctrl_info->num_blocked_threads)) {
406 if (time_after(jiffies, warning_timeout)) {
407 dev_warn(&ctrl_info->pci_dev->dev,
408 "waiting %u seconds for driver activity to quiesce\n",
409 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
410 displayed_warning = true;
411 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
413 usleep_range(1000, 2000);
416 if (displayed_warning)
417 dev_warn(&ctrl_info->pci_dev->dev,
418 "driver activity quiesced after waiting for %u seconds\n",
419 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
422 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
424 return device->device_offline;
427 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
429 mutex_lock(&ctrl_info->ofa_mutex);
432 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
434 mutex_unlock(&ctrl_info->ofa_mutex);
437 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
439 mutex_lock(&ctrl_info->ofa_mutex);
440 mutex_unlock(&ctrl_info->ofa_mutex);
443 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
445 return mutex_is_locked(&ctrl_info->ofa_mutex);
448 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
450 device->in_remove = true;
453 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
455 return device->in_remove;
458 static inline int pqi_event_type_to_event_index(unsigned int event_type)
462 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
463 if (event_type == pqi_supported_event_types[index])
469 static inline bool pqi_is_supported_event(unsigned int event_type)
471 return pqi_event_type_to_event_index(event_type) != -1;
474 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
477 if (pqi_ctrl_offline(ctrl_info))
480 schedule_delayed_work(&ctrl_info->rescan_work, delay);
483 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
485 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
488 #define PQI_RESCAN_WORK_DELAY (10 * HZ)
490 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
492 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
495 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
497 cancel_delayed_work_sync(&ctrl_info->rescan_work);
500 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
502 if (!ctrl_info->heartbeat_counter)
505 return readl(ctrl_info->heartbeat_counter);
508 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
510 return readb(ctrl_info->soft_reset_status);
513 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
517 status = pqi_read_soft_reset_status(ctrl_info);
518 status &= ~PQI_SOFT_RESET_ABORT;
519 writeb(status, ctrl_info->soft_reset_status);
522 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
527 io_high_prio = false;
529 if (device->ncq_prio_enable) {
531 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
532 if (priority_class == IOPRIO_CLASS_RT) {
533 /* Set NCQ priority for read/write commands. */
534 switch (scmd->cmnd[0]) {
552 static int pqi_map_single(struct pci_dev *pci_dev,
553 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
554 size_t buffer_length, enum dma_data_direction data_direction)
556 dma_addr_t bus_address;
558 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
561 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
563 if (dma_mapping_error(&pci_dev->dev, bus_address))
566 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
567 put_unaligned_le32(buffer_length, &sg_descriptor->length);
568 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
573 static void pqi_pci_unmap(struct pci_dev *pci_dev,
574 struct pqi_sg_descriptor *descriptors, int num_descriptors,
575 enum dma_data_direction data_direction)
579 if (data_direction == DMA_NONE)
582 for (i = 0; i < num_descriptors; i++)
583 dma_unmap_single(&pci_dev->dev,
584 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
585 get_unaligned_le32(&descriptors[i].length),
589 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
590 struct pqi_raid_path_request *request, u8 cmd,
591 u8 *scsi3addr, void *buffer, size_t buffer_length,
592 u16 vpd_page, enum dma_data_direction *dir)
595 size_t cdb_length = buffer_length;
597 memset(request, 0, sizeof(*request));
599 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
600 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
601 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
602 &request->header.iu_length);
603 put_unaligned_le32(buffer_length, &request->buffer_length);
604 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
605 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
606 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
612 request->data_direction = SOP_READ_FLAG;
614 if (vpd_page & VPD_PAGE) {
616 cdb[2] = (u8)vpd_page;
618 cdb[4] = (u8)cdb_length;
620 case CISS_REPORT_LOG:
621 case CISS_REPORT_PHYS:
622 request->data_direction = SOP_READ_FLAG;
624 if (cmd == CISS_REPORT_PHYS) {
625 if (ctrl_info->rpl_extended_format_4_5_supported)
626 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
628 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
630 cdb[1] = ctrl_info->ciss_report_log_flags;
632 put_unaligned_be32(cdb_length, &cdb[6]);
634 case CISS_GET_RAID_MAP:
635 request->data_direction = SOP_READ_FLAG;
637 cdb[1] = CISS_GET_RAID_MAP;
638 put_unaligned_be32(cdb_length, &cdb[6]);
641 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
642 request->data_direction = SOP_WRITE_FLAG;
644 cdb[6] = BMIC_FLUSH_CACHE;
645 put_unaligned_be16(cdb_length, &cdb[7]);
647 case BMIC_SENSE_DIAG_OPTIONS:
650 case BMIC_IDENTIFY_CONTROLLER:
651 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
652 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
653 case BMIC_SENSE_FEATURE:
654 request->data_direction = SOP_READ_FLAG;
657 put_unaligned_be16(cdb_length, &cdb[7]);
659 case BMIC_SET_DIAG_OPTIONS:
662 case BMIC_WRITE_HOST_WELLNESS:
663 request->data_direction = SOP_WRITE_FLAG;
666 put_unaligned_be16(cdb_length, &cdb[7]);
668 case BMIC_CSMI_PASSTHRU:
669 request->data_direction = SOP_BIDIRECTIONAL;
671 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
673 put_unaligned_be16(cdb_length, &cdb[7]);
676 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
680 switch (request->data_direction) {
682 *dir = DMA_FROM_DEVICE;
685 *dir = DMA_TO_DEVICE;
687 case SOP_NO_DIRECTION_FLAG:
691 *dir = DMA_BIDIRECTIONAL;
695 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
696 buffer, buffer_length, *dir);
699 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
701 io_request->scmd = NULL;
702 io_request->status = 0;
703 io_request->error_info = NULL;
704 io_request->raid_bypass = false;
707 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
709 struct pqi_io_request *io_request;
712 if (scmd) { /* SML I/O request */
713 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
715 i = blk_mq_unique_tag_to_tag(blk_tag);
716 io_request = &ctrl_info->io_request_pool[i];
717 if (atomic_inc_return(&io_request->refcount) > 1) {
718 atomic_dec(&io_request->refcount);
721 } else { /* IOCTL or driver internal request */
723 * benignly racy - may have to wait for an open slot.
724 * command slot range is scsi_ml_can_queue -
725 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
729 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
730 if (atomic_inc_return(&io_request->refcount) == 1)
732 atomic_dec(&io_request->refcount);
733 i = (i + 1) % PQI_RESERVED_IO_SLOTS;
738 pqi_reinit_io_request(io_request);
743 static void pqi_free_io_request(struct pqi_io_request *io_request)
745 atomic_dec(&io_request->refcount);
748 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
749 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
750 struct pqi_raid_error_info *error_info)
753 struct pqi_raid_path_request request;
754 enum dma_data_direction dir;
756 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
757 buffer, buffer_length, vpd_page, &dir);
761 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
763 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
768 /* helper functions for pqi_send_scsi_raid_request */
770 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
771 u8 cmd, void *buffer, size_t buffer_length)
773 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
774 buffer, buffer_length, 0, NULL);
777 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
778 u8 cmd, void *buffer, size_t buffer_length,
779 struct pqi_raid_error_info *error_info)
781 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
782 buffer, buffer_length, 0, error_info);
785 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
786 struct bmic_identify_controller *buffer)
788 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
789 buffer, sizeof(*buffer));
792 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
793 struct bmic_sense_subsystem_info *sense_info)
795 return pqi_send_ctrl_raid_request(ctrl_info,
796 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
797 sizeof(*sense_info));
800 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
801 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
803 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
804 buffer, buffer_length, vpd_page, NULL);
807 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
808 struct pqi_scsi_dev *device,
809 struct bmic_identify_physical_device *buffer, size_t buffer_length)
812 enum dma_data_direction dir;
813 u16 bmic_device_index;
814 struct pqi_raid_path_request request;
816 rc = pqi_build_raid_path_request(ctrl_info, &request,
817 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
818 buffer_length, 0, &dir);
822 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
823 request.cdb[2] = (u8)bmic_device_index;
824 request.cdb[9] = (u8)(bmic_device_index >> 8);
826 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
828 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
833 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
837 bytes = get_unaligned_le16(limit);
848 struct bmic_sense_feature_buffer {
849 struct bmic_sense_feature_buffer_header header;
850 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
855 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
856 offsetofend(struct bmic_sense_feature_buffer, \
857 aio_subpage.max_write_raid_1_10_3drive)
859 #define MINIMUM_AIO_SUBPAGE_LENGTH \
860 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
861 max_write_raid_1_10_3drive) - \
862 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
864 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
867 enum dma_data_direction dir;
868 struct pqi_raid_path_request request;
869 struct bmic_sense_feature_buffer *buffer;
871 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
875 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
876 buffer, sizeof(*buffer), 0, &dir);
880 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
881 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
883 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
885 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
890 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
891 buffer->header.subpage_code !=
892 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
893 get_unaligned_le16(&buffer->header.buffer_length) <
894 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
895 buffer->aio_subpage.header.page_code !=
896 BMIC_SENSE_FEATURE_IO_PAGE ||
897 buffer->aio_subpage.header.subpage_code !=
898 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
899 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
900 MINIMUM_AIO_SUBPAGE_LENGTH) {
904 ctrl_info->max_transfer_encrypted_sas_sata =
905 pqi_aio_limit_to_bytes(
906 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
908 ctrl_info->max_transfer_encrypted_nvme =
909 pqi_aio_limit_to_bytes(
910 &buffer->aio_subpage.max_transfer_encrypted_nvme);
912 ctrl_info->max_write_raid_5_6 =
913 pqi_aio_limit_to_bytes(
914 &buffer->aio_subpage.max_write_raid_5_6);
916 ctrl_info->max_write_raid_1_10_2drive =
917 pqi_aio_limit_to_bytes(
918 &buffer->aio_subpage.max_write_raid_1_10_2drive);
920 ctrl_info->max_write_raid_1_10_3drive =
921 pqi_aio_limit_to_bytes(
922 &buffer->aio_subpage.max_write_raid_1_10_3drive);
930 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
931 enum bmic_flush_cache_shutdown_event shutdown_event)
934 struct bmic_flush_cache *flush_cache;
936 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
940 flush_cache->shutdown_event = shutdown_event;
942 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
943 sizeof(*flush_cache));
950 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
951 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
952 struct pqi_raid_error_info *error_info)
954 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
955 buffer, buffer_length, error_info);
958 #define PQI_FETCH_PTRAID_DATA (1 << 31)
960 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
963 struct bmic_diag_options *diag;
965 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
969 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
970 diag, sizeof(*diag));
974 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
976 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
985 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
986 void *buffer, size_t buffer_length)
988 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
989 buffer, buffer_length);
994 struct bmic_host_wellness_driver_version {
996 u8 driver_version_tag[2];
997 __le16 driver_version_length;
998 char driver_version[32];
999 u8 dont_write_tag[2];
1005 static int pqi_write_driver_version_to_host_wellness(
1006 struct pqi_ctrl_info *ctrl_info)
1009 struct bmic_host_wellness_driver_version *buffer;
1010 size_t buffer_length;
1012 buffer_length = sizeof(*buffer);
1014 buffer = kmalloc(buffer_length, GFP_KERNEL);
1018 buffer->start_tag[0] = '<';
1019 buffer->start_tag[1] = 'H';
1020 buffer->start_tag[2] = 'W';
1021 buffer->start_tag[3] = '>';
1022 buffer->driver_version_tag[0] = 'D';
1023 buffer->driver_version_tag[1] = 'V';
1024 put_unaligned_le16(sizeof(buffer->driver_version),
1025 &buffer->driver_version_length);
1026 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
1027 sizeof(buffer->driver_version) - 1);
1028 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
1029 buffer->dont_write_tag[0] = 'D';
1030 buffer->dont_write_tag[1] = 'W';
1031 buffer->end_tag[0] = 'Z';
1032 buffer->end_tag[1] = 'Z';
1034 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1043 struct bmic_host_wellness_time {
1048 u8 dont_write_tag[2];
1054 static int pqi_write_current_time_to_host_wellness(
1055 struct pqi_ctrl_info *ctrl_info)
1058 struct bmic_host_wellness_time *buffer;
1059 size_t buffer_length;
1060 time64_t local_time;
1064 buffer_length = sizeof(*buffer);
1066 buffer = kmalloc(buffer_length, GFP_KERNEL);
1070 buffer->start_tag[0] = '<';
1071 buffer->start_tag[1] = 'H';
1072 buffer->start_tag[2] = 'W';
1073 buffer->start_tag[3] = '>';
1074 buffer->time_tag[0] = 'T';
1075 buffer->time_tag[1] = 'D';
1076 put_unaligned_le16(sizeof(buffer->time),
1077 &buffer->time_length);
1079 local_time = ktime_get_real_seconds();
1080 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1081 year = tm.tm_year + 1900;
1083 buffer->time[0] = bin2bcd(tm.tm_hour);
1084 buffer->time[1] = bin2bcd(tm.tm_min);
1085 buffer->time[2] = bin2bcd(tm.tm_sec);
1086 buffer->time[3] = 0;
1087 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1088 buffer->time[5] = bin2bcd(tm.tm_mday);
1089 buffer->time[6] = bin2bcd(year / 100);
1090 buffer->time[7] = bin2bcd(year % 100);
1092 buffer->dont_write_tag[0] = 'D';
1093 buffer->dont_write_tag[1] = 'W';
1094 buffer->end_tag[0] = 'Z';
1095 buffer->end_tag[1] = 'Z';
1097 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1104 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
1106 static void pqi_update_time_worker(struct work_struct *work)
1109 struct pqi_ctrl_info *ctrl_info;
1111 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1114 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1116 dev_warn(&ctrl_info->pci_dev->dev,
1117 "error updating time on controller\n");
1119 schedule_delayed_work(&ctrl_info->update_time_work,
1120 PQI_UPDATE_TIME_WORK_INTERVAL);
1123 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1125 schedule_delayed_work(&ctrl_info->update_time_work, 0);
1128 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1130 cancel_delayed_work_sync(&ctrl_info->update_time_work);
1133 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1134 size_t buffer_length)
1136 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1139 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1142 size_t lun_list_length;
1143 size_t lun_data_length;
1144 size_t new_lun_list_length;
1145 void *lun_data = NULL;
1146 struct report_lun_header *report_lun_header;
1148 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1149 if (!report_lun_header) {
1154 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1158 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1161 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1163 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1169 if (lun_list_length == 0) {
1170 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1174 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1178 new_lun_list_length =
1179 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1181 if (new_lun_list_length > lun_list_length) {
1182 lun_list_length = new_lun_list_length;
1188 kfree(report_lun_header);
1200 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1204 u8 rpl_response_format;
1207 struct report_lun_header *rpl_header;
1208 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1209 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1211 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1215 if (ctrl_info->rpl_extended_format_4_5_supported) {
1216 rpl_header = rpl_list;
1217 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1218 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1221 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1222 dev_err(&ctrl_info->pci_dev->dev,
1223 "RPL returned unsupported data format %u\n",
1224 rpl_response_format);
1227 dev_warn(&ctrl_info->pci_dev->dev,
1228 "RPL returned extended format 2 instead of 4\n");
1232 rpl_8byte_wwid_list = rpl_list;
1233 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1235 rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries,
1236 num_physicals), GFP_KERNEL);
1237 if (!rpl_16byte_wwid_list)
1240 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1241 &rpl_16byte_wwid_list->header.list_length);
1242 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1244 for (i = 0; i < num_physicals; i++) {
1245 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1246 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1247 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1248 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1249 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1250 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1251 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1252 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1255 kfree(rpl_8byte_wwid_list);
1256 *buffer = rpl_16byte_wwid_list;
1261 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1263 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1266 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1267 struct report_phys_lun_16byte_wwid_list **physdev_list,
1268 struct report_log_lun_list **logdev_list)
1271 size_t logdev_list_length;
1272 size_t logdev_data_length;
1273 struct report_log_lun_list *internal_logdev_list;
1274 struct report_log_lun_list *logdev_data;
1275 struct report_lun_header report_lun_header;
1277 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1279 dev_err(&ctrl_info->pci_dev->dev,
1280 "report physical LUNs failed\n");
1282 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1284 dev_err(&ctrl_info->pci_dev->dev,
1285 "report logical LUNs failed\n");
1288 * Tack the controller itself onto the end of the logical device list
1289 * by adding a list entry that is all zeros.
1292 logdev_data = *logdev_list;
1295 logdev_list_length =
1296 get_unaligned_be32(&logdev_data->header.list_length);
1298 memset(&report_lun_header, 0, sizeof(report_lun_header));
1300 (struct report_log_lun_list *)&report_lun_header;
1301 logdev_list_length = 0;
1304 logdev_data_length = sizeof(struct report_lun_header) +
1307 internal_logdev_list = kmalloc(logdev_data_length +
1308 sizeof(struct report_log_lun), GFP_KERNEL);
1309 if (!internal_logdev_list) {
1310 kfree(*logdev_list);
1311 *logdev_list = NULL;
1315 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1316 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1317 sizeof(struct report_log_lun));
1318 put_unaligned_be32(logdev_list_length +
1319 sizeof(struct report_log_lun),
1320 &internal_logdev_list->header.list_length);
1322 kfree(*logdev_list);
1323 *logdev_list = internal_logdev_list;
1328 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1329 int bus, int target, int lun)
1332 device->target = target;
1336 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1344 scsi3addr = device->scsi3addr;
1345 lunid = get_unaligned_le32(scsi3addr);
1347 if (pqi_is_hba_lunid(scsi3addr)) {
1348 /* The specified device is the controller. */
1349 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1350 device->target_lun_valid = true;
1354 if (pqi_is_logical_device(device)) {
1355 if (device->is_external_raid_device) {
1356 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1357 target = (lunid >> 16) & 0x3fff;
1360 bus = PQI_RAID_VOLUME_BUS;
1362 lun = lunid & 0x3fff;
1364 pqi_set_bus_target_lun(device, bus, target, lun);
1365 device->target_lun_valid = true;
1370 * Defer target and LUN assignment for non-controller physical devices
1371 * because the SAS transport layer will make these assignments later.
1373 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1376 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1377 struct pqi_scsi_dev *device)
1383 raid_level = SA_RAID_UNKNOWN;
1385 buffer = kmalloc(64, GFP_KERNEL);
1387 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1388 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1390 raid_level = buffer[8];
1391 if (raid_level > SA_RAID_MAX)
1392 raid_level = SA_RAID_UNKNOWN;
1397 device->raid_level = raid_level;
1400 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1401 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1405 u32 r5or6_blocks_per_row;
1407 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1409 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1410 err_msg = "RAID map too small";
1414 if (device->raid_level == SA_RAID_1) {
1415 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1416 err_msg = "invalid RAID-1 map";
1419 } else if (device->raid_level == SA_RAID_TRIPLE) {
1420 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1421 err_msg = "invalid RAID-1(Triple) map";
1424 } else if ((device->raid_level == SA_RAID_5 ||
1425 device->raid_level == SA_RAID_6) &&
1426 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1428 r5or6_blocks_per_row =
1429 get_unaligned_le16(&raid_map->strip_size) *
1430 get_unaligned_le16(&raid_map->data_disks_per_row);
1431 if (r5or6_blocks_per_row == 0) {
1432 err_msg = "invalid RAID-5 or RAID-6 map";
1440 dev_warn(&ctrl_info->pci_dev->dev,
1441 "logical device %08x%08x %s\n",
1442 *((u32 *)&device->scsi3addr),
1443 *((u32 *)&device->scsi3addr[4]), err_msg);
1448 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1449 struct pqi_scsi_dev *device)
1453 struct raid_map *raid_map;
1455 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1459 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1460 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1464 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1466 if (raid_map_size > sizeof(*raid_map)) {
1470 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1474 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1475 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1479 if (get_unaligned_le32(&raid_map->structure_size)
1481 dev_warn(&ctrl_info->pci_dev->dev,
1482 "requested %u bytes, received %u bytes\n",
1484 get_unaligned_le32(&raid_map->structure_size));
1490 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1494 device->raid_map = raid_map;
1504 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1505 struct pqi_scsi_dev *device)
1507 if (!ctrl_info->lv_drive_type_mix_valid) {
1508 device->max_transfer_encrypted = ~0;
1512 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1513 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1514 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1515 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1516 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1517 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1518 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1519 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1520 device->max_transfer_encrypted =
1521 ctrl_info->max_transfer_encrypted_sas_sata;
1523 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1524 device->max_transfer_encrypted =
1525 ctrl_info->max_transfer_encrypted_nvme;
1527 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1528 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1530 device->max_transfer_encrypted =
1531 min(ctrl_info->max_transfer_encrypted_sas_sata,
1532 ctrl_info->max_transfer_encrypted_nvme);
1537 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1538 struct pqi_scsi_dev *device)
1544 buffer = kmalloc(64, GFP_KERNEL);
1548 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1549 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1553 #define RAID_BYPASS_STATUS 4
1554 #define RAID_BYPASS_CONFIGURED 0x1
1555 #define RAID_BYPASS_ENABLED 0x2
1557 bypass_status = buffer[RAID_BYPASS_STATUS];
1558 device->raid_bypass_configured =
1559 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1560 if (device->raid_bypass_configured &&
1561 (bypass_status & RAID_BYPASS_ENABLED) &&
1562 pqi_get_raid_map(ctrl_info, device) == 0) {
1563 device->raid_bypass_enabled = true;
1564 if (get_unaligned_le16(&device->raid_map->flags) &
1565 RAID_MAP_ENCRYPTION_ENABLED)
1566 pqi_set_max_transfer_encrypted(ctrl_info, device);
1574 * Use vendor-specific VPD to determine online/offline status of a volume.
1577 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1578 struct pqi_scsi_dev *device)
1582 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1583 bool volume_offline = true;
1585 struct ciss_vpd_logical_volume_status *vpd;
1587 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1591 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1592 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1596 if (vpd->page_code != CISS_VPD_LV_STATUS)
1599 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1600 volume_status) + vpd->page_length;
1601 if (page_length < sizeof(*vpd))
1604 volume_status = vpd->volume_status;
1605 volume_flags = get_unaligned_be32(&vpd->flags);
1606 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1611 device->volume_status = volume_status;
1612 device->volume_offline = volume_offline;
1615 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
1616 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1617 #define PQI_DEVICE_ERASE_IN_PROGRESS 0x10
1619 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1620 struct pqi_scsi_dev *device,
1621 struct bmic_identify_physical_device *id_phys)
1625 memset(id_phys, 0, sizeof(*id_phys));
1627 rc = pqi_identify_physical_device(ctrl_info, device,
1628 id_phys, sizeof(*id_phys));
1630 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1634 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1635 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1637 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1638 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1640 device->box_index = id_phys->box_index;
1641 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1642 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1643 device->queue_depth =
1644 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1645 device->active_path_index = id_phys->active_path_number;
1646 device->path_map = id_phys->redundant_path_present_map;
1647 memcpy(&device->box,
1648 &id_phys->alternate_paths_phys_box_on_port,
1649 sizeof(device->box));
1650 memcpy(&device->phys_connector,
1651 &id_phys->alternate_paths_phys_connector,
1652 sizeof(device->phys_connector));
1653 device->bay = id_phys->phys_bay_in_box;
1654 device->lun_count = id_phys->multi_lun_device_lun_count;
1655 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1658 id_phys->phy_to_phy_map[device->active_path_index];
1660 device->phy_id = 0xFF;
1662 device->ncq_prio_support =
1663 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1664 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1666 device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
1671 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1672 struct pqi_scsi_dev *device)
1677 buffer = kmalloc(64, GFP_KERNEL);
1681 /* Send an inquiry to the device to see what it is. */
1682 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1686 scsi_sanitize_inquiry_string(&buffer[8], 8);
1687 scsi_sanitize_inquiry_string(&buffer[16], 16);
1689 device->devtype = buffer[0] & 0x1f;
1690 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1691 memcpy(device->model, &buffer[16], sizeof(device->model));
1693 if (device->devtype == TYPE_DISK) {
1694 if (device->is_external_raid_device) {
1695 device->raid_level = SA_RAID_UNKNOWN;
1696 device->volume_status = CISS_LV_OK;
1697 device->volume_offline = false;
1699 pqi_get_raid_level(ctrl_info, device);
1700 pqi_get_raid_bypass_status(ctrl_info, device);
1701 pqi_get_volume_status(ctrl_info, device);
1712 * Prevent adding drive to OS for some corner cases such as a drive
1713 * undergoing a sanitize (erase) operation. Some OSes will continue to poll
1714 * the drive until the sanitize completes, which can take hours,
1715 * resulting in long bootup delays. Commands such as TUR, READ_CAP
1716 * are allowed, but READ/WRITE cause check condition. So the OS
1717 * cannot check/read the partition table.
1718 * Note: devices that have completed sanitize must be re-enabled
1719 * using the management utility.
1721 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
1723 return device->erase_in_progress;
1726 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
1727 struct pqi_scsi_dev *device,
1728 struct bmic_identify_physical_device *id_phys)
1732 if (device->is_expander_smp_device)
1735 if (pqi_is_logical_device(device))
1736 rc = pqi_get_logical_device_info(ctrl_info, device);
1738 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1743 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1744 struct pqi_scsi_dev *device,
1745 struct bmic_identify_physical_device *id_phys)
1749 rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
1751 if (rc == 0 && device->lun_count == 0)
1752 device->lun_count = 1;
1757 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1758 struct pqi_scsi_dev *device)
1761 static const char unknown_state_str[] =
1762 "Volume is in an unknown state (%u)";
1763 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1765 switch (device->volume_status) {
1767 status = "Volume online";
1769 case CISS_LV_FAILED:
1770 status = "Volume failed";
1772 case CISS_LV_NOT_CONFIGURED:
1773 status = "Volume not configured";
1775 case CISS_LV_DEGRADED:
1776 status = "Volume degraded";
1778 case CISS_LV_READY_FOR_RECOVERY:
1779 status = "Volume ready for recovery operation";
1781 case CISS_LV_UNDERGOING_RECOVERY:
1782 status = "Volume undergoing recovery";
1784 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1785 status = "Wrong physical drive was replaced";
1787 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1788 status = "A physical drive not properly connected";
1790 case CISS_LV_HARDWARE_OVERHEATING:
1791 status = "Hardware is overheating";
1793 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1794 status = "Hardware has overheated";
1796 case CISS_LV_UNDERGOING_EXPANSION:
1797 status = "Volume undergoing expansion";
1799 case CISS_LV_NOT_AVAILABLE:
1800 status = "Volume waiting for transforming volume";
1802 case CISS_LV_QUEUED_FOR_EXPANSION:
1803 status = "Volume queued for expansion";
1805 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1806 status = "Volume disabled due to SCSI ID conflict";
1808 case CISS_LV_EJECTED:
1809 status = "Volume has been ejected";
1811 case CISS_LV_UNDERGOING_ERASE:
1812 status = "Volume undergoing background erase";
1814 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1815 status = "Volume ready for predictive spare rebuild";
1817 case CISS_LV_UNDERGOING_RPI:
1818 status = "Volume undergoing rapid parity initialization";
1820 case CISS_LV_PENDING_RPI:
1821 status = "Volume queued for rapid parity initialization";
1823 case CISS_LV_ENCRYPTED_NO_KEY:
1824 status = "Encrypted volume inaccessible - key not present";
1826 case CISS_LV_UNDERGOING_ENCRYPTION:
1827 status = "Volume undergoing encryption process";
1829 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1830 status = "Volume undergoing encryption re-keying process";
1832 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1833 status = "Volume encrypted but encryption is disabled";
1835 case CISS_LV_PENDING_ENCRYPTION:
1836 status = "Volume pending migration to encrypted state";
1838 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1839 status = "Volume pending encryption rekeying";
1841 case CISS_LV_NOT_SUPPORTED:
1842 status = "Volume not supported on this controller";
1844 case CISS_LV_STATUS_UNAVAILABLE:
1845 status = "Volume status not available";
1848 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1849 unknown_state_str, device->volume_status);
1850 status = unknown_state_buffer;
1854 dev_info(&ctrl_info->pci_dev->dev,
1855 "scsi %d:%d:%d:%d %s\n",
1856 ctrl_info->scsi_host->host_no,
1857 device->bus, device->target, device->lun, status);
1860 static void pqi_rescan_worker(struct work_struct *work)
1862 struct pqi_ctrl_info *ctrl_info;
1864 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1867 pqi_scan_scsi_devices(ctrl_info);
1870 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1871 struct pqi_scsi_dev *device)
1875 if (pqi_is_logical_device(device))
1876 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1877 device->target, device->lun);
1879 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1884 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1886 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1891 for (lun = 0; lun < device->lun_count; lun++) {
1892 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
1893 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1895 dev_err(&ctrl_info->pci_dev->dev,
1896 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1897 ctrl_info->scsi_host->host_no, device->bus,
1898 device->target, lun,
1899 atomic_read(&device->scsi_cmds_outstanding[lun]));
1902 if (pqi_is_logical_device(device))
1903 scsi_remove_device(device->sdev);
1905 pqi_remove_sas_device(device);
1907 pqi_device_remove_start(device);
1910 /* Assumes the SCSI device list lock is held. */
1912 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1913 int bus, int target, int lun)
1915 struct pqi_scsi_dev *device;
1917 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1918 if (device->bus == bus && device->target == target && device->lun == lun)
1924 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1926 if (dev1->is_physical_device != dev2->is_physical_device)
1929 if (dev1->is_physical_device)
1930 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1932 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1935 enum pqi_find_result {
1941 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1942 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1944 struct pqi_scsi_dev *device;
1946 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1947 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1948 *matching_device = device;
1949 if (pqi_device_equal(device_to_find, device)) {
1950 if (device_to_find->volume_offline)
1951 return DEVICE_CHANGED;
1954 return DEVICE_CHANGED;
1958 return DEVICE_NOT_FOUND;
1961 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1963 if (device->is_expander_smp_device)
1964 return "Enclosure SMP ";
1966 return scsi_device_type(device->devtype);
1969 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1971 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1972 char *action, struct pqi_scsi_dev *device)
1975 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1977 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1978 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1980 if (device->target_lun_valid)
1981 count += scnprintf(buffer + count,
1982 PQI_DEV_INFO_BUFFER_LENGTH - count,
1987 count += scnprintf(buffer + count,
1988 PQI_DEV_INFO_BUFFER_LENGTH - count,
1991 if (pqi_is_logical_device(device))
1992 count += scnprintf(buffer + count,
1993 PQI_DEV_INFO_BUFFER_LENGTH - count,
1995 *((u32 *)&device->scsi3addr),
1996 *((u32 *)&device->scsi3addr[4]));
1998 count += scnprintf(buffer + count,
1999 PQI_DEV_INFO_BUFFER_LENGTH - count,
2001 get_unaligned_be64(&device->wwid[0]),
2002 get_unaligned_be64(&device->wwid[8]));
2004 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
2006 pqi_device_type(device),
2010 if (pqi_is_logical_device(device)) {
2011 if (device->devtype == TYPE_DISK)
2012 count += scnprintf(buffer + count,
2013 PQI_DEV_INFO_BUFFER_LENGTH - count,
2014 "SSDSmartPathCap%c En%c %-12s",
2015 device->raid_bypass_configured ? '+' : '-',
2016 device->raid_bypass_enabled ? '+' : '-',
2017 pqi_raid_level_to_string(device->raid_level));
2019 count += scnprintf(buffer + count,
2020 PQI_DEV_INFO_BUFFER_LENGTH - count,
2021 "AIO%c", device->aio_enabled ? '+' : '-');
2022 if (device->devtype == TYPE_DISK ||
2023 device->devtype == TYPE_ZBC)
2024 count += scnprintf(buffer + count,
2025 PQI_DEV_INFO_BUFFER_LENGTH - count,
2026 " qd=%-6d", device->queue_depth);
2029 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2032 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
2037 if (raid_map1 == NULL || raid_map2 == NULL)
2038 return raid_map1 == raid_map2;
2040 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
2041 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
2043 if (raid_map1_size != raid_map2_size)
2046 return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
2049 /* Assumes the SCSI device list lock is held. */
2051 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2052 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2054 existing_device->device_type = new_device->device_type;
2055 existing_device->bus = new_device->bus;
2056 if (new_device->target_lun_valid) {
2057 existing_device->target = new_device->target;
2058 existing_device->lun = new_device->lun;
2059 existing_device->target_lun_valid = true;
2062 /* By definition, the scsi3addr and wwid fields are already the same. */
2064 existing_device->is_physical_device = new_device->is_physical_device;
2065 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
2066 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
2067 existing_device->sas_address = new_device->sas_address;
2068 existing_device->queue_depth = new_device->queue_depth;
2069 existing_device->device_offline = false;
2070 existing_device->lun_count = new_device->lun_count;
2072 if (pqi_is_logical_device(existing_device)) {
2073 existing_device->is_external_raid_device = new_device->is_external_raid_device;
2075 if (existing_device->devtype == TYPE_DISK) {
2076 existing_device->raid_level = new_device->raid_level;
2077 existing_device->volume_status = new_device->volume_status;
2078 if (ctrl_info->logical_volume_rescan_needed)
2079 existing_device->rescan = true;
2080 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2081 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
2082 kfree(existing_device->raid_map);
2083 existing_device->raid_map = new_device->raid_map;
2084 /* To prevent this from being freed later. */
2085 new_device->raid_map = NULL;
2087 existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
2088 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
2091 existing_device->aio_enabled = new_device->aio_enabled;
2092 existing_device->aio_handle = new_device->aio_handle;
2093 existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
2094 existing_device->active_path_index = new_device->active_path_index;
2095 existing_device->phy_id = new_device->phy_id;
2096 existing_device->path_map = new_device->path_map;
2097 existing_device->bay = new_device->bay;
2098 existing_device->box_index = new_device->box_index;
2099 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2100 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2101 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
2102 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
2106 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2109 kfree(device->raid_map);
2115 * Called when exposing a new device to the OS fails in order to re-adjust
2116 * our internal SCSI device list to match the SCSI ML's view.
2119 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2120 struct pqi_scsi_dev *device)
2122 unsigned long flags;
2124 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2125 list_del(&device->scsi_device_list_entry);
2126 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2128 /* Allow the device structure to be freed later. */
2129 device->keep_device = false;
2132 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2134 if (device->is_expander_smp_device)
2135 return device->sas_port != NULL;
2137 return device->sdev != NULL;
2140 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2141 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2145 unsigned long flags;
2146 enum pqi_find_result find_result;
2147 struct pqi_scsi_dev *device;
2148 struct pqi_scsi_dev *next;
2149 struct pqi_scsi_dev *matching_device;
2150 LIST_HEAD(add_list);
2151 LIST_HEAD(delete_list);
2154 * The idea here is to do as little work as possible while holding the
2155 * spinlock. That's why we go to great pains to defer anything other
2156 * than updating the internal device list until after we release the
2160 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2162 /* Assume that all devices in the existing list have gone away. */
2163 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2164 device->device_gone = true;
2166 for (i = 0; i < num_new_devices; i++) {
2167 device = new_device_list[i];
2169 find_result = pqi_scsi_find_entry(ctrl_info, device,
2172 switch (find_result) {
2175 * The newly found device is already in the existing
2178 device->new_device = false;
2179 matching_device->device_gone = false;
2180 pqi_scsi_update_device(ctrl_info, matching_device, device);
2182 case DEVICE_NOT_FOUND:
2184 * The newly found device is NOT in the existing device
2187 device->new_device = true;
2189 case DEVICE_CHANGED:
2191 * The original device has gone away and we need to add
2194 device->new_device = true;
2199 /* Process all devices that have gone away. */
2200 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2201 scsi_device_list_entry) {
2202 if (device->device_gone) {
2203 list_del(&device->scsi_device_list_entry);
2204 list_add_tail(&device->delete_list_entry, &delete_list);
2208 /* Process all new devices. */
2209 for (i = 0; i < num_new_devices; i++) {
2210 device = new_device_list[i];
2211 if (!device->new_device)
2213 if (device->volume_offline)
2215 list_add_tail(&device->scsi_device_list_entry,
2216 &ctrl_info->scsi_device_list);
2217 list_add_tail(&device->add_list_entry, &add_list);
2218 /* To prevent this device structure from being freed later. */
2219 device->keep_device = true;
2222 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2225 * If OFA is in progress and there are devices that need to be deleted,
2226 * allow any pending reset operations to continue and unblock any SCSI
2227 * requests before removal.
2229 if (pqi_ofa_in_progress(ctrl_info)) {
2230 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2231 if (pqi_is_device_added(device))
2232 pqi_device_remove_start(device);
2233 pqi_ctrl_unblock_device_reset(ctrl_info);
2234 pqi_scsi_unblock_requests(ctrl_info);
2237 /* Remove all devices that have gone away. */
2238 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2239 if (device->volume_offline) {
2240 pqi_dev_info(ctrl_info, "offline", device);
2241 pqi_show_volume_status(ctrl_info, device);
2243 pqi_dev_info(ctrl_info, "removed", device);
2245 if (pqi_is_device_added(device))
2246 pqi_remove_device(ctrl_info, device);
2247 list_del(&device->delete_list_entry);
2248 pqi_free_device(device);
2252 * Notify the SML of any existing device changes such as;
2253 * queue depth, device size.
2255 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2256 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2257 device->advertised_queue_depth = device->queue_depth;
2258 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2259 if (device->rescan) {
2260 scsi_rescan_device(&device->sdev->sdev_gendev);
2261 device->rescan = false;
2266 /* Expose any new devices. */
2267 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2268 if (!pqi_is_device_added(device)) {
2269 rc = pqi_add_device(ctrl_info, device);
2271 pqi_dev_info(ctrl_info, "added", device);
2273 dev_warn(&ctrl_info->pci_dev->dev,
2274 "scsi %d:%d:%d:%d addition failed, device not added\n",
2275 ctrl_info->scsi_host->host_no,
2276 device->bus, device->target,
2278 pqi_fixup_botched_add(ctrl_info, device);
2283 ctrl_info->logical_volume_rescan_needed = false;
2287 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2290 * Only support the HBA controller itself as a RAID
2291 * controller. If it's a RAID controller other than
2292 * the HBA itself (an external RAID controller, for
2293 * example), we don't support it.
2295 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2296 !pqi_is_hba_lunid(device->scsi3addr))
2302 static inline bool pqi_skip_device(u8 *scsi3addr)
2304 /* Ignore all masked devices. */
2305 if (MASKED_DEVICE(scsi3addr))
2311 static inline void pqi_mask_device(u8 *scsi3addr)
2313 scsi3addr[3] |= 0xc0;
2316 static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
2318 if (pqi_is_logical_device(device))
2321 return (device->path_map & (device->path_map - 1)) != 0;
2324 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2326 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2329 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2333 LIST_HEAD(new_device_list_head);
2334 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2335 struct report_log_lun_list *logdev_list = NULL;
2336 struct report_phys_lun_16byte_wwid *phys_lun;
2337 struct report_log_lun *log_lun;
2338 struct bmic_identify_physical_device *id_phys = NULL;
2341 struct pqi_scsi_dev **new_device_list = NULL;
2342 struct pqi_scsi_dev *device;
2343 struct pqi_scsi_dev *next;
2344 unsigned int num_new_devices;
2345 unsigned int num_valid_devices;
2346 bool is_physical_device;
2348 unsigned int physical_index;
2349 unsigned int logical_index;
2350 static char *out_of_memory_msg =
2351 "failed to allocate memory, device discovery stopped";
2353 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2359 get_unaligned_be32(&physdev_list->header.list_length)
2360 / sizeof(physdev_list->lun_entries[0]);
2366 get_unaligned_be32(&logdev_list->header.list_length)
2367 / sizeof(logdev_list->lun_entries[0]);
2371 if (num_physicals) {
2373 * We need this buffer for calls to pqi_get_physical_disk_info()
2374 * below. We allocate it here instead of inside
2375 * pqi_get_physical_disk_info() because it's a fairly large
2378 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2380 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2386 if (pqi_hide_vsep) {
2387 for (i = num_physicals - 1; i >= 0; i--) {
2388 phys_lun = &physdev_list->lun_entries[i];
2389 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2390 pqi_mask_device(phys_lun->lunid);
2398 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2399 ctrl_info->lv_drive_type_mix_valid = true;
2401 num_new_devices = num_physicals + num_logicals;
2403 new_device_list = kmalloc_array(num_new_devices,
2404 sizeof(*new_device_list),
2406 if (!new_device_list) {
2407 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2412 for (i = 0; i < num_new_devices; i++) {
2413 device = kzalloc(sizeof(*device), GFP_KERNEL);
2415 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2420 list_add_tail(&device->new_device_list_entry,
2421 &new_device_list_head);
2425 num_valid_devices = 0;
2429 for (i = 0; i < num_new_devices; i++) {
2431 if ((!pqi_expose_ld_first && i < num_physicals) ||
2432 (pqi_expose_ld_first && i >= num_logicals)) {
2433 is_physical_device = true;
2434 phys_lun = &physdev_list->lun_entries[physical_index++];
2436 scsi3addr = phys_lun->lunid;
2438 is_physical_device = false;
2440 log_lun = &logdev_list->lun_entries[logical_index++];
2441 scsi3addr = log_lun->lunid;
2444 if (is_physical_device && pqi_skip_device(scsi3addr))
2448 device = list_next_entry(device, new_device_list_entry);
2450 device = list_first_entry(&new_device_list_head,
2451 struct pqi_scsi_dev, new_device_list_entry);
2453 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2454 device->is_physical_device = is_physical_device;
2455 if (is_physical_device) {
2456 device->device_type = phys_lun->device_type;
2457 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2458 device->is_expander_smp_device = true;
2460 device->is_external_raid_device =
2461 pqi_is_external_raid_addr(scsi3addr);
2464 if (!pqi_is_supported_device(device))
2467 /* Gather information about the device. */
2468 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2469 if (rc == -ENOMEM) {
2470 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2475 if (device->is_physical_device)
2476 dev_warn(&ctrl_info->pci_dev->dev,
2477 "obtaining device info failed, skipping physical device %016llx%016llx\n",
2478 get_unaligned_be64(&phys_lun->wwid[0]),
2479 get_unaligned_be64(&phys_lun->wwid[8]));
2481 dev_warn(&ctrl_info->pci_dev->dev,
2482 "obtaining device info failed, skipping logical device %08x%08x\n",
2483 *((u32 *)&device->scsi3addr),
2484 *((u32 *)&device->scsi3addr[4]));
2489 /* Do not present disks that the OS cannot fully probe. */
2490 if (pqi_keep_device_offline(device))
2493 pqi_assign_bus_target_lun(device);
2495 if (device->is_physical_device) {
2496 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2497 if ((phys_lun->device_flags &
2498 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2499 phys_lun->aio_handle) {
2500 device->aio_enabled = true;
2501 device->aio_handle =
2502 phys_lun->aio_handle;
2505 memcpy(device->volume_id, log_lun->volume_id,
2506 sizeof(device->volume_id));
2509 device->sas_address = get_unaligned_be64(&device->wwid[0]);
2511 new_device_list[num_valid_devices++] = device;
2514 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2517 list_for_each_entry_safe(device, next, &new_device_list_head,
2518 new_device_list_entry) {
2519 if (device->keep_device)
2521 list_del(&device->new_device_list_entry);
2522 pqi_free_device(device);
2525 kfree(new_device_list);
2526 kfree(physdev_list);
2533 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2538 if (pqi_ctrl_offline(ctrl_info))
2541 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2543 if (!mutex_acquired) {
2544 if (pqi_ctrl_scan_blocked(ctrl_info))
2546 pqi_schedule_rescan_worker_delayed(ctrl_info);
2547 return -EINPROGRESS;
2550 rc = pqi_update_scsi_devices(ctrl_info);
2551 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2552 pqi_schedule_rescan_worker_delayed(ctrl_info);
2554 mutex_unlock(&ctrl_info->scan_mutex);
2559 static void pqi_scan_start(struct Scsi_Host *shost)
2561 struct pqi_ctrl_info *ctrl_info;
2563 ctrl_info = shost_to_hba(shost);
2565 pqi_scan_scsi_devices(ctrl_info);
2568 /* Returns TRUE if scan is finished. */
2570 static int pqi_scan_finished(struct Scsi_Host *shost,
2571 unsigned long elapsed_time)
2573 struct pqi_ctrl_info *ctrl_info;
2575 ctrl_info = shost_priv(shost);
2577 return !mutex_is_locked(&ctrl_info->scan_mutex);
2580 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2581 struct raid_map *raid_map, u64 first_block)
2583 u32 volume_blk_size;
2586 * Set the encryption tweak values based on logical block address.
2587 * If the block size is 512, the tweak value is equal to the LBA.
2588 * For other block sizes, tweak value is (LBA * block size) / 512.
2590 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2591 if (volume_blk_size != 512)
2592 first_block = (first_block * volume_blk_size) / 512;
2594 encryption_info->data_encryption_key_index =
2595 get_unaligned_le16(&raid_map->data_encryption_key_index);
2596 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2597 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2601 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2604 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2605 struct pqi_scsi_dev_raid_map_data *rmd)
2607 bool is_supported = true;
2609 switch (rmd->raid_level) {
2613 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2614 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2615 is_supported = false;
2617 case SA_RAID_TRIPLE:
2618 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2619 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2620 is_supported = false;
2623 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2624 rmd->data_length > ctrl_info->max_write_raid_5_6))
2625 is_supported = false;
2628 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2629 rmd->data_length > ctrl_info->max_write_raid_5_6))
2630 is_supported = false;
2633 is_supported = false;
2637 return is_supported;
2640 #define PQI_RAID_BYPASS_INELIGIBLE 1
2642 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2643 struct pqi_scsi_dev_raid_map_data *rmd)
2645 /* Check for valid opcode, get LBA and block count. */
2646 switch (scmd->cmnd[0]) {
2648 rmd->is_write = true;
2651 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2652 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2653 rmd->block_cnt = (u32)scmd->cmnd[4];
2654 if (rmd->block_cnt == 0)
2655 rmd->block_cnt = 256;
2658 rmd->is_write = true;
2661 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2662 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2665 rmd->is_write = true;
2668 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2669 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2672 rmd->is_write = true;
2675 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2676 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2679 /* Process via normal I/O path. */
2680 return PQI_RAID_BYPASS_INELIGIBLE;
2683 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2688 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2689 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2691 #if BITS_PER_LONG == 32
2695 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2697 /* Check for invalid block or wraparound. */
2698 if (rmd->last_block >=
2699 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2700 rmd->last_block < rmd->first_block)
2701 return PQI_RAID_BYPASS_INELIGIBLE;
2703 rmd->data_disks_per_row =
2704 get_unaligned_le16(&raid_map->data_disks_per_row);
2705 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2706 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2708 /* Calculate stripe information for the request. */
2709 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2710 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2711 return PQI_RAID_BYPASS_INELIGIBLE;
2712 #if BITS_PER_LONG == 32
2713 tmpdiv = rmd->first_block;
2714 do_div(tmpdiv, rmd->blocks_per_row);
2715 rmd->first_row = tmpdiv;
2716 tmpdiv = rmd->last_block;
2717 do_div(tmpdiv, rmd->blocks_per_row);
2718 rmd->last_row = tmpdiv;
2719 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2720 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2721 tmpdiv = rmd->first_row_offset;
2722 do_div(tmpdiv, rmd->strip_size);
2723 rmd->first_column = tmpdiv;
2724 tmpdiv = rmd->last_row_offset;
2725 do_div(tmpdiv, rmd->strip_size);
2726 rmd->last_column = tmpdiv;
2728 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2729 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2730 rmd->first_row_offset = (u32)(rmd->first_block -
2731 (rmd->first_row * rmd->blocks_per_row));
2732 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2733 rmd->blocks_per_row));
2734 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2735 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2738 /* If this isn't a single row/column then give to the controller. */
2739 if (rmd->first_row != rmd->last_row ||
2740 rmd->first_column != rmd->last_column)
2741 return PQI_RAID_BYPASS_INELIGIBLE;
2743 /* Proceeding with driver mapping. */
2744 rmd->total_disks_per_row = rmd->data_disks_per_row +
2745 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2746 rmd->map_row = ((u32)(rmd->first_row >>
2747 raid_map->parity_rotation_shift)) %
2748 get_unaligned_le16(&raid_map->row_cnt);
2749 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2755 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2756 struct raid_map *raid_map)
2758 #if BITS_PER_LONG == 32
2762 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2763 return PQI_RAID_BYPASS_INELIGIBLE;
2766 /* Verify first and last block are in same RAID group. */
2767 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2768 #if BITS_PER_LONG == 32
2769 tmpdiv = rmd->first_block;
2770 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2771 tmpdiv = rmd->first_group;
2772 do_div(tmpdiv, rmd->blocks_per_row);
2773 rmd->first_group = tmpdiv;
2774 tmpdiv = rmd->last_block;
2775 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2776 tmpdiv = rmd->last_group;
2777 do_div(tmpdiv, rmd->blocks_per_row);
2778 rmd->last_group = tmpdiv;
2780 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2781 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2783 if (rmd->first_group != rmd->last_group)
2784 return PQI_RAID_BYPASS_INELIGIBLE;
2786 /* Verify request is in a single row of RAID 5/6. */
2787 #if BITS_PER_LONG == 32
2788 tmpdiv = rmd->first_block;
2789 do_div(tmpdiv, rmd->stripesize);
2790 rmd->first_row = tmpdiv;
2791 rmd->r5or6_first_row = tmpdiv;
2792 tmpdiv = rmd->last_block;
2793 do_div(tmpdiv, rmd->stripesize);
2794 rmd->r5or6_last_row = tmpdiv;
2796 rmd->first_row = rmd->r5or6_first_row =
2797 rmd->first_block / rmd->stripesize;
2798 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2800 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2801 return PQI_RAID_BYPASS_INELIGIBLE;
2803 /* Verify request is in a single column. */
2804 #if BITS_PER_LONG == 32
2805 tmpdiv = rmd->first_block;
2806 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2807 tmpdiv = rmd->first_row_offset;
2808 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2809 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2810 tmpdiv = rmd->last_block;
2811 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2812 tmpdiv = rmd->r5or6_last_row_offset;
2813 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2814 tmpdiv = rmd->r5or6_first_row_offset;
2815 do_div(tmpdiv, rmd->strip_size);
2816 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2817 tmpdiv = rmd->r5or6_last_row_offset;
2818 do_div(tmpdiv, rmd->strip_size);
2819 rmd->r5or6_last_column = tmpdiv;
2821 rmd->first_row_offset = rmd->r5or6_first_row_offset =
2822 (u32)((rmd->first_block % rmd->stripesize) %
2823 rmd->blocks_per_row);
2825 rmd->r5or6_last_row_offset =
2826 (u32)((rmd->last_block % rmd->stripesize) %
2827 rmd->blocks_per_row);
2830 rmd->r5or6_first_row_offset / rmd->strip_size;
2831 rmd->r5or6_first_column = rmd->first_column;
2832 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2834 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2835 return PQI_RAID_BYPASS_INELIGIBLE;
2837 /* Request is eligible. */
2839 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2840 get_unaligned_le16(&raid_map->row_cnt);
2842 rmd->map_index = (rmd->first_group *
2843 (get_unaligned_le16(&raid_map->row_cnt) *
2844 rmd->total_disks_per_row)) +
2845 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2847 if (rmd->is_write) {
2851 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2852 * parity entries inside the device's raid_map.
2854 * A device's RAID map is bounded by: number of RAID disks squared.
2856 * The devices RAID map size is checked during device
2859 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2860 index *= rmd->total_disks_per_row;
2861 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2863 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2864 if (rmd->raid_level == SA_RAID_6) {
2865 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2866 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2868 #if BITS_PER_LONG == 32
2869 tmpdiv = rmd->first_block;
2870 do_div(tmpdiv, rmd->blocks_per_row);
2873 rmd->row = rmd->first_block / rmd->blocks_per_row;
2880 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2882 /* Build the new CDB for the physical disk I/O. */
2883 if (rmd->disk_block > 0xffffffff) {
2884 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2886 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2887 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2890 rmd->cdb_length = 16;
2892 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2894 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2896 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2898 rmd->cdb_length = 10;
2902 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2903 struct pqi_scsi_dev_raid_map_data *rmd)
2908 group = rmd->map_index / rmd->data_disks_per_row;
2910 index = rmd->map_index - (group * rmd->data_disks_per_row);
2911 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2912 index += rmd->data_disks_per_row;
2913 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2914 if (rmd->layout_map_count > 2) {
2915 index += rmd->data_disks_per_row;
2916 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2919 rmd->num_it_nexus_entries = rmd->layout_map_count;
2922 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2923 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2924 struct pqi_queue_group *queue_group)
2927 struct raid_map *raid_map;
2929 u32 next_bypass_group;
2930 struct pqi_encryption_info *encryption_info_ptr;
2931 struct pqi_encryption_info encryption_info;
2932 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2934 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2936 return PQI_RAID_BYPASS_INELIGIBLE;
2938 rmd.raid_level = device->raid_level;
2940 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2941 return PQI_RAID_BYPASS_INELIGIBLE;
2943 if (unlikely(rmd.block_cnt == 0))
2944 return PQI_RAID_BYPASS_INELIGIBLE;
2946 raid_map = device->raid_map;
2948 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2950 return PQI_RAID_BYPASS_INELIGIBLE;
2952 if (device->raid_level == SA_RAID_1 ||
2953 device->raid_level == SA_RAID_TRIPLE) {
2955 pqi_calc_aio_r1_nexus(raid_map, &rmd);
2957 group = device->next_bypass_group[rmd.map_index];
2958 next_bypass_group = group + 1;
2959 if (next_bypass_group >= rmd.layout_map_count)
2960 next_bypass_group = 0;
2961 device->next_bypass_group[rmd.map_index] = next_bypass_group;
2962 rmd.map_index += group * rmd.data_disks_per_row;
2964 } else if ((device->raid_level == SA_RAID_5 ||
2965 device->raid_level == SA_RAID_6) &&
2966 (rmd.layout_map_count > 1 || rmd.is_write)) {
2967 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2969 return PQI_RAID_BYPASS_INELIGIBLE;
2972 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2973 return PQI_RAID_BYPASS_INELIGIBLE;
2975 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2976 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2977 rmd.first_row * rmd.strip_size +
2978 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2979 rmd.disk_block_cnt = rmd.block_cnt;
2981 /* Handle differing logical/physical block sizes. */
2982 if (raid_map->phys_blk_shift) {
2983 rmd.disk_block <<= raid_map->phys_blk_shift;
2984 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
2987 if (unlikely(rmd.disk_block_cnt > 0xffff))
2988 return PQI_RAID_BYPASS_INELIGIBLE;
2990 pqi_set_aio_cdb(&rmd);
2992 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
2993 if (rmd.data_length > device->max_transfer_encrypted)
2994 return PQI_RAID_BYPASS_INELIGIBLE;
2995 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
2996 encryption_info_ptr = &encryption_info;
2998 encryption_info_ptr = NULL;
3002 switch (device->raid_level) {
3004 case SA_RAID_TRIPLE:
3005 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3006 encryption_info_ptr, device, &rmd);
3009 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3010 encryption_info_ptr, device, &rmd);
3014 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3015 rmd.cdb, rmd.cdb_length, queue_group,
3016 encryption_info_ptr, true, false);
3019 #define PQI_STATUS_IDLE 0x0
3021 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
3022 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
3024 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
3025 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
3026 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
3027 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
3028 #define PQI_DEVICE_STATE_ERROR 0x4
3030 #define PQI_MODE_READY_TIMEOUT_SECS 30
3031 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
3033 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3035 struct pqi_device_registers __iomem *pqi_registers;
3036 unsigned long timeout;
3040 pqi_registers = ctrl_info->pqi_registers;
3041 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3044 signature = readq(&pqi_registers->signature);
3045 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3046 sizeof(signature)) == 0)
3048 if (time_after(jiffies, timeout)) {
3049 dev_err(&ctrl_info->pci_dev->dev,
3050 "timed out waiting for PQI signature\n");
3053 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3057 status = readb(&pqi_registers->function_and_status_code);
3058 if (status == PQI_STATUS_IDLE)
3060 if (time_after(jiffies, timeout)) {
3061 dev_err(&ctrl_info->pci_dev->dev,
3062 "timed out waiting for PQI IDLE\n");
3065 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3069 if (readl(&pqi_registers->device_status) ==
3070 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3072 if (time_after(jiffies, timeout)) {
3073 dev_err(&ctrl_info->pci_dev->dev,
3074 "timed out waiting for PQI all registers ready\n");
3077 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3083 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3085 struct pqi_scsi_dev *device;
3087 device = io_request->scmd->device->hostdata;
3088 device->raid_bypass_enabled = false;
3089 device->aio_enabled = false;
3092 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3094 struct pqi_ctrl_info *ctrl_info;
3095 struct pqi_scsi_dev *device;
3097 device = sdev->hostdata;
3098 if (device->device_offline)
3101 device->device_offline = true;
3102 ctrl_info = shost_to_hba(sdev->host);
3103 pqi_schedule_rescan_worker(ctrl_info);
3104 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3105 path, ctrl_info->scsi_host->host_no, device->bus,
3106 device->target, device->lun);
3109 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3113 struct scsi_cmnd *scmd;
3114 struct pqi_raid_error_info *error_info;
3115 size_t sense_data_length;
3118 struct scsi_sense_hdr sshdr;
3120 scmd = io_request->scmd;
3124 error_info = io_request->error_info;
3125 scsi_status = error_info->status;
3128 switch (error_info->data_out_result) {
3129 case PQI_DATA_IN_OUT_GOOD:
3131 case PQI_DATA_IN_OUT_UNDERFLOW:
3133 get_unaligned_le32(&error_info->data_out_transferred);
3134 residual_count = scsi_bufflen(scmd) - xfer_count;
3135 scsi_set_resid(scmd, residual_count);
3136 if (xfer_count < scmd->underflow)
3137 host_byte = DID_SOFT_ERROR;
3139 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3140 case PQI_DATA_IN_OUT_ABORTED:
3141 host_byte = DID_ABORT;
3143 case PQI_DATA_IN_OUT_TIMEOUT:
3144 host_byte = DID_TIME_OUT;
3146 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3147 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3148 case PQI_DATA_IN_OUT_BUFFER_ERROR:
3149 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3150 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3151 case PQI_DATA_IN_OUT_ERROR:
3152 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3153 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3154 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3155 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3156 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3157 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3158 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3159 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3160 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3161 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3163 host_byte = DID_ERROR;
3167 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3168 if (sense_data_length == 0)
3170 get_unaligned_le16(&error_info->response_data_length);
3171 if (sense_data_length) {
3172 if (sense_data_length > sizeof(error_info->data))
3173 sense_data_length = sizeof(error_info->data);
3175 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3176 scsi_normalize_sense(error_info->data,
3177 sense_data_length, &sshdr) &&
3178 sshdr.sense_key == HARDWARE_ERROR &&
3179 sshdr.asc == 0x3e) {
3180 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3181 struct pqi_scsi_dev *device = scmd->device->hostdata;
3183 switch (sshdr.ascq) {
3184 case 0x1: /* LOGICAL UNIT FAILURE */
3185 if (printk_ratelimit())
3186 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3187 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3188 pqi_take_device_offline(scmd->device, "RAID");
3189 host_byte = DID_NO_CONNECT;
3192 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3193 if (printk_ratelimit())
3194 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3195 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3200 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3201 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3202 memcpy(scmd->sense_buffer, error_info->data,
3206 scmd->result = scsi_status;
3207 set_host_byte(scmd, host_byte);
3210 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3214 struct scsi_cmnd *scmd;
3215 struct pqi_aio_error_info *error_info;
3216 size_t sense_data_length;
3219 bool device_offline;
3220 struct pqi_scsi_dev *device;
3222 scmd = io_request->scmd;
3223 error_info = io_request->error_info;
3225 sense_data_length = 0;
3226 device_offline = false;
3227 device = scmd->device->hostdata;
3229 switch (error_info->service_response) {
3230 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3231 scsi_status = error_info->status;
3233 case PQI_AIO_SERV_RESPONSE_FAILURE:
3234 switch (error_info->status) {
3235 case PQI_AIO_STATUS_IO_ABORTED:
3236 scsi_status = SAM_STAT_TASK_ABORTED;
3238 case PQI_AIO_STATUS_UNDERRUN:
3239 scsi_status = SAM_STAT_GOOD;
3240 residual_count = get_unaligned_le32(
3241 &error_info->residual_count);
3242 scsi_set_resid(scmd, residual_count);
3243 xfer_count = scsi_bufflen(scmd) - residual_count;
3244 if (xfer_count < scmd->underflow)
3245 host_byte = DID_SOFT_ERROR;
3247 case PQI_AIO_STATUS_OVERRUN:
3248 scsi_status = SAM_STAT_GOOD;
3250 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3251 pqi_aio_path_disabled(io_request);
3252 if (pqi_is_multipath_device(device)) {
3253 pqi_device_remove_start(device);
3254 host_byte = DID_NO_CONNECT;
3255 scsi_status = SAM_STAT_CHECK_CONDITION;
3257 scsi_status = SAM_STAT_GOOD;
3258 io_request->status = -EAGAIN;
3261 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3262 case PQI_AIO_STATUS_INVALID_DEVICE:
3263 if (!io_request->raid_bypass) {
3264 device_offline = true;
3265 pqi_take_device_offline(scmd->device, "AIO");
3266 host_byte = DID_NO_CONNECT;
3268 scsi_status = SAM_STAT_CHECK_CONDITION;
3270 case PQI_AIO_STATUS_IO_ERROR:
3272 scsi_status = SAM_STAT_CHECK_CONDITION;
3276 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3277 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3278 scsi_status = SAM_STAT_GOOD;
3280 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3281 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3283 scsi_status = SAM_STAT_CHECK_CONDITION;
3287 if (error_info->data_present) {
3289 get_unaligned_le16(&error_info->data_length);
3290 if (sense_data_length) {
3291 if (sense_data_length > sizeof(error_info->data))
3292 sense_data_length = sizeof(error_info->data);
3293 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3294 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3295 memcpy(scmd->sense_buffer, error_info->data,
3300 if (device_offline && sense_data_length == 0)
3301 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3303 scmd->result = scsi_status;
3304 set_host_byte(scmd, host_byte);
3307 static void pqi_process_io_error(unsigned int iu_type,
3308 struct pqi_io_request *io_request)
3311 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3312 pqi_process_raid_io_error(io_request);
3314 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3315 pqi_process_aio_io_error(io_request);
3320 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3321 struct pqi_task_management_response *response)
3325 switch (response->response_code) {
3326 case SOP_TMF_COMPLETE:
3327 case SOP_TMF_FUNCTION_SUCCEEDED:
3330 case SOP_TMF_REJECTED:
3333 case SOP_RC_INCORRECT_LOGICAL_UNIT:
3342 dev_err(&ctrl_info->pci_dev->dev,
3343 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3348 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3349 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3351 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3354 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3359 struct pqi_io_request *io_request;
3360 struct pqi_io_response *response;
3364 oq_ci = queue_group->oq_ci_copy;
3367 oq_pi = readl(queue_group->oq_pi);
3368 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3369 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3370 dev_err(&ctrl_info->pci_dev->dev,
3371 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3372 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3379 response = queue_group->oq_element_array +
3380 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3382 request_id = get_unaligned_le16(&response->request_id);
3383 if (request_id >= ctrl_info->max_io_slots) {
3384 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3385 dev_err(&ctrl_info->pci_dev->dev,
3386 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3387 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3391 io_request = &ctrl_info->io_request_pool[request_id];
3392 if (atomic_read(&io_request->refcount) == 0) {
3393 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3394 dev_err(&ctrl_info->pci_dev->dev,
3395 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3396 request_id, oq_pi, oq_ci);
3400 switch (response->header.iu_type) {
3401 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3402 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3403 if (io_request->scmd)
3404 io_request->scmd->result = 0;
3406 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3408 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3409 io_request->status =
3411 &((struct pqi_vendor_general_response *)response)->status);
3413 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3414 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3417 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3418 pqi_aio_path_disabled(io_request);
3419 io_request->status = -EAGAIN;
3421 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3422 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3423 io_request->error_info = ctrl_info->error_buffer +
3424 (get_unaligned_le16(&response->error_index) *
3425 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3426 pqi_process_io_error(response->header.iu_type, io_request);
3429 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3430 dev_err(&ctrl_info->pci_dev->dev,
3431 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3432 response->header.iu_type, oq_pi, oq_ci);
3436 io_request->io_complete_callback(io_request, io_request->context);
3439 * Note that the I/O request structure CANNOT BE TOUCHED after
3440 * returning from the I/O completion callback!
3442 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3445 if (num_responses) {
3446 queue_group->oq_ci_copy = oq_ci;
3447 writel(oq_ci, queue_group->oq_ci);
3450 return num_responses;
3453 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3454 unsigned int ci, unsigned int elements_in_queue)
3456 unsigned int num_elements_used;
3459 num_elements_used = pi - ci;
3461 num_elements_used = elements_in_queue - ci + pi;
3463 return elements_in_queue - num_elements_used - 1;
3466 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3467 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3471 unsigned long flags;
3473 struct pqi_queue_group *queue_group;
3475 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3476 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3479 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3481 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3482 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3484 if (pqi_num_elements_free(iq_pi, iq_ci,
3485 ctrl_info->num_elements_per_iq))
3488 spin_unlock_irqrestore(
3489 &queue_group->submit_lock[RAID_PATH], flags);
3491 if (pqi_ctrl_offline(ctrl_info))
3495 next_element = queue_group->iq_element_array[RAID_PATH] +
3496 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3498 memcpy(next_element, iu, iu_length);
3500 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3501 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3504 * This write notifies the controller that an IU is available to be
3507 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3509 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3512 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3513 struct pqi_event *event)
3515 struct pqi_event_acknowledge_request request;
3517 memset(&request, 0, sizeof(request));
3519 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3520 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3521 &request.header.iu_length);
3522 request.event_type = event->event_type;
3523 put_unaligned_le16(event->event_id, &request.event_id);
3524 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3526 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3529 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3530 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3532 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3533 struct pqi_ctrl_info *ctrl_info)
3536 unsigned long timeout;
3538 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3541 status = pqi_read_soft_reset_status(ctrl_info);
3542 if (status & PQI_SOFT_RESET_INITIATE)
3543 return RESET_INITIATE_DRIVER;
3545 if (status & PQI_SOFT_RESET_ABORT)
3548 if (!sis_is_firmware_running(ctrl_info))
3549 return RESET_NORESPONSE;
3551 if (time_after(jiffies, timeout)) {
3552 dev_warn(&ctrl_info->pci_dev->dev,
3553 "timed out waiting for soft reset status\n");
3554 return RESET_TIMEDOUT;
3557 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3561 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3564 unsigned int delay_secs;
3565 enum pqi_soft_reset_status reset_status;
3567 if (ctrl_info->soft_reset_handshake_supported)
3568 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3570 reset_status = RESET_INITIATE_FIRMWARE;
3572 delay_secs = PQI_POST_RESET_DELAY_SECS;
3574 switch (reset_status) {
3575 case RESET_TIMEDOUT:
3576 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3578 case RESET_INITIATE_DRIVER:
3579 dev_info(&ctrl_info->pci_dev->dev,
3580 "Online Firmware Activation: resetting controller\n");
3581 sis_soft_reset(ctrl_info);
3583 case RESET_INITIATE_FIRMWARE:
3584 ctrl_info->pqi_mode_enabled = false;
3585 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3586 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3587 pqi_ofa_free_host_buffer(ctrl_info);
3588 pqi_ctrl_ofa_done(ctrl_info);
3589 dev_info(&ctrl_info->pci_dev->dev,
3590 "Online Firmware Activation: %s\n",
3591 rc == 0 ? "SUCCESS" : "FAILED");
3594 dev_info(&ctrl_info->pci_dev->dev,
3595 "Online Firmware Activation ABORTED\n");
3596 if (ctrl_info->soft_reset_handshake_supported)
3597 pqi_clear_soft_reset_status(ctrl_info);
3598 pqi_ofa_free_host_buffer(ctrl_info);
3599 pqi_ctrl_ofa_done(ctrl_info);
3600 pqi_ofa_ctrl_unquiesce(ctrl_info);
3602 case RESET_NORESPONSE:
3605 dev_err(&ctrl_info->pci_dev->dev,
3606 "unexpected Online Firmware Activation reset status: 0x%x\n",
3608 pqi_ofa_free_host_buffer(ctrl_info);
3609 pqi_ctrl_ofa_done(ctrl_info);
3610 pqi_ofa_ctrl_unquiesce(ctrl_info);
3611 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3616 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3618 struct pqi_ctrl_info *ctrl_info;
3620 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3622 pqi_ctrl_ofa_start(ctrl_info);
3623 pqi_ofa_setup_host_buffer(ctrl_info);
3624 pqi_ofa_host_memory_update(ctrl_info);
3627 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3629 struct pqi_ctrl_info *ctrl_info;
3630 struct pqi_event *event;
3632 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3634 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3636 pqi_ofa_ctrl_quiesce(ctrl_info);
3637 pqi_acknowledge_event(ctrl_info, event);
3638 pqi_process_soft_reset(ctrl_info);
3641 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3642 struct pqi_event *event)
3648 switch (event->event_id) {
3649 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3650 dev_info(&ctrl_info->pci_dev->dev,
3651 "received Online Firmware Activation memory allocation request\n");
3652 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3654 case PQI_EVENT_OFA_QUIESCE:
3655 dev_info(&ctrl_info->pci_dev->dev,
3656 "received Online Firmware Activation quiesce request\n");
3657 schedule_work(&ctrl_info->ofa_quiesce_work);
3660 case PQI_EVENT_OFA_CANCELED:
3661 dev_info(&ctrl_info->pci_dev->dev,
3662 "received Online Firmware Activation cancel request: reason: %u\n",
3663 ctrl_info->ofa_cancel_reason);
3664 pqi_ofa_free_host_buffer(ctrl_info);
3665 pqi_ctrl_ofa_done(ctrl_info);
3668 dev_err(&ctrl_info->pci_dev->dev,
3669 "received unknown Online Firmware Activation request: event ID: %u\n",
3677 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
3679 unsigned long flags;
3680 struct pqi_scsi_dev *device;
3682 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3684 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
3685 if (device->raid_bypass_enabled)
3686 device->raid_bypass_enabled = false;
3688 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3691 static void pqi_event_worker(struct work_struct *work)
3695 struct pqi_ctrl_info *ctrl_info;
3696 struct pqi_event *event;
3699 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3701 pqi_ctrl_busy(ctrl_info);
3702 pqi_wait_if_ctrl_blocked(ctrl_info);
3703 if (pqi_ctrl_offline(ctrl_info))
3706 rescan_needed = false;
3707 event = ctrl_info->events;
3708 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3709 if (event->pending) {
3710 event->pending = false;
3711 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3712 ack_event = pqi_ofa_process_event(ctrl_info, event);
3715 rescan_needed = true;
3716 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3717 ctrl_info->logical_volume_rescan_needed = true;
3718 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
3719 pqi_disable_raid_bypass(ctrl_info);
3722 pqi_acknowledge_event(ctrl_info, event);
3727 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
3730 pqi_schedule_rescan_worker_with_delay(ctrl_info,
3731 PQI_RESCAN_WORK_FOR_EVENT_DELAY);
3734 pqi_ctrl_unbusy(ctrl_info);
3737 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
3739 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3742 u32 heartbeat_count;
3743 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3745 pqi_check_ctrl_health(ctrl_info);
3746 if (pqi_ctrl_offline(ctrl_info))
3749 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3750 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3752 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3753 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3754 dev_err(&ctrl_info->pci_dev->dev,
3755 "no heartbeat detected - last heartbeat count: %u\n",
3757 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3761 ctrl_info->previous_num_interrupts = num_interrupts;
3764 ctrl_info->previous_heartbeat_count = heartbeat_count;
3765 mod_timer(&ctrl_info->heartbeat_timer,
3766 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3769 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3771 if (!ctrl_info->heartbeat_counter)
3774 ctrl_info->previous_num_interrupts =
3775 atomic_read(&ctrl_info->num_interrupts);
3776 ctrl_info->previous_heartbeat_count =
3777 pqi_read_heartbeat_counter(ctrl_info);
3779 ctrl_info->heartbeat_timer.expires =
3780 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3781 add_timer(&ctrl_info->heartbeat_timer);
3784 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3786 del_timer_sync(&ctrl_info->heartbeat_timer);
3789 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3790 struct pqi_event *event, struct pqi_event_response *response)
3792 switch (event->event_id) {
3793 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3794 ctrl_info->ofa_bytes_requested =
3795 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3797 case PQI_EVENT_OFA_CANCELED:
3798 ctrl_info->ofa_cancel_reason =
3799 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3804 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3809 struct pqi_event_queue *event_queue;
3810 struct pqi_event_response *response;
3811 struct pqi_event *event;
3814 event_queue = &ctrl_info->event_queue;
3816 oq_ci = event_queue->oq_ci_copy;
3819 oq_pi = readl(event_queue->oq_pi);
3820 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3821 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3822 dev_err(&ctrl_info->pci_dev->dev,
3823 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3824 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3832 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3834 event_index = pqi_event_type_to_event_index(response->event_type);
3836 if (event_index >= 0 && response->request_acknowledge) {
3837 event = &ctrl_info->events[event_index];
3838 event->pending = true;
3839 event->event_type = response->event_type;
3840 event->event_id = get_unaligned_le16(&response->event_id);
3841 event->additional_event_id =
3842 get_unaligned_le32(&response->additional_event_id);
3843 if (event->event_type == PQI_EVENT_TYPE_OFA)
3844 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3847 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3851 event_queue->oq_ci_copy = oq_ci;
3852 writel(oq_ci, event_queue->oq_ci);
3853 schedule_work(&ctrl_info->event_work);
3859 #define PQI_LEGACY_INTX_MASK 0x1
3861 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3864 struct pqi_device_registers __iomem *pqi_registers;
3865 volatile void __iomem *register_addr;
3867 pqi_registers = ctrl_info->pqi_registers;
3870 register_addr = &pqi_registers->legacy_intx_mask_clear;
3872 register_addr = &pqi_registers->legacy_intx_mask_set;
3874 intx_mask = readl(register_addr);
3875 intx_mask |= PQI_LEGACY_INTX_MASK;
3876 writel(intx_mask, register_addr);
3879 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3880 enum pqi_irq_mode new_mode)
3882 switch (ctrl_info->irq_mode) {
3888 pqi_configure_legacy_intx(ctrl_info, true);
3889 sis_enable_intx(ctrl_info);
3898 pqi_configure_legacy_intx(ctrl_info, false);
3899 sis_enable_msix(ctrl_info);
3904 pqi_configure_legacy_intx(ctrl_info, false);
3911 sis_enable_msix(ctrl_info);
3914 pqi_configure_legacy_intx(ctrl_info, true);
3915 sis_enable_intx(ctrl_info);
3923 ctrl_info->irq_mode = new_mode;
3926 #define PQI_LEGACY_INTX_PENDING 0x1
3928 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3933 switch (ctrl_info->irq_mode) {
3938 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
3939 if (intx_status & PQI_LEGACY_INTX_PENDING)
3953 static irqreturn_t pqi_irq_handler(int irq, void *data)
3955 struct pqi_ctrl_info *ctrl_info;
3956 struct pqi_queue_group *queue_group;
3957 int num_io_responses_handled;
3958 int num_events_handled;
3961 ctrl_info = queue_group->ctrl_info;
3963 if (!pqi_is_valid_irq(ctrl_info))
3966 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3967 if (num_io_responses_handled < 0)
3970 if (irq == ctrl_info->event_irq) {
3971 num_events_handled = pqi_process_event_intr(ctrl_info);
3972 if (num_events_handled < 0)
3975 num_events_handled = 0;
3978 if (num_io_responses_handled + num_events_handled > 0)
3979 atomic_inc(&ctrl_info->num_interrupts);
3981 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3982 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3988 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3990 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3994 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3996 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3997 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3998 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
4000 dev_err(&pci_dev->dev,
4001 "irq %u init failed with error %d\n",
4002 pci_irq_vector(pci_dev, i), rc);
4005 ctrl_info->num_msix_vectors_initialized++;
4011 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
4015 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
4016 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
4017 &ctrl_info->queue_groups[i]);
4019 ctrl_info->num_msix_vectors_initialized = 0;
4022 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4024 int num_vectors_enabled;
4025 unsigned int flags = PCI_IRQ_MSIX;
4027 if (!pqi_disable_managed_interrupts)
4028 flags |= PCI_IRQ_AFFINITY;
4030 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
4031 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
4033 if (num_vectors_enabled < 0) {
4034 dev_err(&ctrl_info->pci_dev->dev,
4035 "MSI-X init failed with error %d\n",
4036 num_vectors_enabled);
4037 return num_vectors_enabled;
4040 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4041 ctrl_info->irq_mode = IRQ_MODE_MSIX;
4045 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4047 if (ctrl_info->num_msix_vectors_enabled) {
4048 pci_free_irq_vectors(ctrl_info->pci_dev);
4049 ctrl_info->num_msix_vectors_enabled = 0;
4053 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4056 size_t alloc_length;
4057 size_t element_array_length_per_iq;
4058 size_t element_array_length_per_oq;
4059 void *element_array;
4060 void __iomem *next_queue_index;
4061 void *aligned_pointer;
4062 unsigned int num_inbound_queues;
4063 unsigned int num_outbound_queues;
4064 unsigned int num_queue_indexes;
4065 struct pqi_queue_group *queue_group;
4067 element_array_length_per_iq =
4068 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4069 ctrl_info->num_elements_per_iq;
4070 element_array_length_per_oq =
4071 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4072 ctrl_info->num_elements_per_oq;
4073 num_inbound_queues = ctrl_info->num_queue_groups * 2;
4074 num_outbound_queues = ctrl_info->num_queue_groups;
4075 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4077 aligned_pointer = NULL;
4079 for (i = 0; i < num_inbound_queues; i++) {
4080 aligned_pointer = PTR_ALIGN(aligned_pointer,
4081 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4082 aligned_pointer += element_array_length_per_iq;
4085 for (i = 0; i < num_outbound_queues; i++) {
4086 aligned_pointer = PTR_ALIGN(aligned_pointer,
4087 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4088 aligned_pointer += element_array_length_per_oq;
4091 aligned_pointer = PTR_ALIGN(aligned_pointer,
4092 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4093 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4094 PQI_EVENT_OQ_ELEMENT_LENGTH;
4096 for (i = 0; i < num_queue_indexes; i++) {
4097 aligned_pointer = PTR_ALIGN(aligned_pointer,
4098 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4099 aligned_pointer += sizeof(pqi_index_t);
4102 alloc_length = (size_t)aligned_pointer +
4103 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4105 alloc_length += PQI_EXTRA_SGL_MEMORY;
4107 ctrl_info->queue_memory_base =
4108 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4109 &ctrl_info->queue_memory_base_dma_handle,
4112 if (!ctrl_info->queue_memory_base)
4115 ctrl_info->queue_memory_length = alloc_length;
4117 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4118 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4120 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4121 queue_group = &ctrl_info->queue_groups[i];
4122 queue_group->iq_element_array[RAID_PATH] = element_array;
4123 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4124 ctrl_info->queue_memory_base_dma_handle +
4125 (element_array - ctrl_info->queue_memory_base);
4126 element_array += element_array_length_per_iq;
4127 element_array = PTR_ALIGN(element_array,
4128 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4129 queue_group->iq_element_array[AIO_PATH] = element_array;
4130 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4131 ctrl_info->queue_memory_base_dma_handle +
4132 (element_array - ctrl_info->queue_memory_base);
4133 element_array += element_array_length_per_iq;
4134 element_array = PTR_ALIGN(element_array,
4135 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4138 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4139 queue_group = &ctrl_info->queue_groups[i];
4140 queue_group->oq_element_array = element_array;
4141 queue_group->oq_element_array_bus_addr =
4142 ctrl_info->queue_memory_base_dma_handle +
4143 (element_array - ctrl_info->queue_memory_base);
4144 element_array += element_array_length_per_oq;
4145 element_array = PTR_ALIGN(element_array,
4146 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4149 ctrl_info->event_queue.oq_element_array = element_array;
4150 ctrl_info->event_queue.oq_element_array_bus_addr =
4151 ctrl_info->queue_memory_base_dma_handle +
4152 (element_array - ctrl_info->queue_memory_base);
4153 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4154 PQI_EVENT_OQ_ELEMENT_LENGTH;
4156 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4157 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4159 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4160 queue_group = &ctrl_info->queue_groups[i];
4161 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4162 queue_group->iq_ci_bus_addr[RAID_PATH] =
4163 ctrl_info->queue_memory_base_dma_handle +
4165 (void __iomem *)ctrl_info->queue_memory_base);
4166 next_queue_index += sizeof(pqi_index_t);
4167 next_queue_index = PTR_ALIGN(next_queue_index,
4168 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4169 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4170 queue_group->iq_ci_bus_addr[AIO_PATH] =
4171 ctrl_info->queue_memory_base_dma_handle +
4173 (void __iomem *)ctrl_info->queue_memory_base);
4174 next_queue_index += sizeof(pqi_index_t);
4175 next_queue_index = PTR_ALIGN(next_queue_index,
4176 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4177 queue_group->oq_pi = next_queue_index;
4178 queue_group->oq_pi_bus_addr =
4179 ctrl_info->queue_memory_base_dma_handle +
4181 (void __iomem *)ctrl_info->queue_memory_base);
4182 next_queue_index += sizeof(pqi_index_t);
4183 next_queue_index = PTR_ALIGN(next_queue_index,
4184 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4187 ctrl_info->event_queue.oq_pi = next_queue_index;
4188 ctrl_info->event_queue.oq_pi_bus_addr =
4189 ctrl_info->queue_memory_base_dma_handle +
4191 (void __iomem *)ctrl_info->queue_memory_base);
4196 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4199 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4200 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4203 * Initialize the backpointers to the controller structure in
4204 * each operational queue group structure.
4206 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4207 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4210 * Assign IDs to all operational queues. Note that the IDs
4211 * assigned to operational IQs are independent of the IDs
4212 * assigned to operational OQs.
4214 ctrl_info->event_queue.oq_id = next_oq_id++;
4215 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4216 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4217 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4218 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4222 * Assign MSI-X table entry indexes to all queues. Note that the
4223 * interrupt for the event queue is shared with the first queue group.
4225 ctrl_info->event_queue.int_msg_num = 0;
4226 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4227 ctrl_info->queue_groups[i].int_msg_num = i;
4229 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4230 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4231 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4232 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4233 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4237 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4239 size_t alloc_length;
4240 struct pqi_admin_queues_aligned *admin_queues_aligned;
4241 struct pqi_admin_queues *admin_queues;
4243 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4244 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4246 ctrl_info->admin_queue_memory_base =
4247 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4248 &ctrl_info->admin_queue_memory_base_dma_handle,
4251 if (!ctrl_info->admin_queue_memory_base)
4254 ctrl_info->admin_queue_memory_length = alloc_length;
4256 admin_queues = &ctrl_info->admin_queues;
4257 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4258 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4259 admin_queues->iq_element_array =
4260 &admin_queues_aligned->iq_element_array;
4261 admin_queues->oq_element_array =
4262 &admin_queues_aligned->oq_element_array;
4263 admin_queues->iq_ci =
4264 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4265 admin_queues->oq_pi =
4266 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4268 admin_queues->iq_element_array_bus_addr =
4269 ctrl_info->admin_queue_memory_base_dma_handle +
4270 (admin_queues->iq_element_array -
4271 ctrl_info->admin_queue_memory_base);
4272 admin_queues->oq_element_array_bus_addr =
4273 ctrl_info->admin_queue_memory_base_dma_handle +
4274 (admin_queues->oq_element_array -
4275 ctrl_info->admin_queue_memory_base);
4276 admin_queues->iq_ci_bus_addr =
4277 ctrl_info->admin_queue_memory_base_dma_handle +
4278 ((void __iomem *)admin_queues->iq_ci -
4279 (void __iomem *)ctrl_info->admin_queue_memory_base);
4280 admin_queues->oq_pi_bus_addr =
4281 ctrl_info->admin_queue_memory_base_dma_handle +
4282 ((void __iomem *)admin_queues->oq_pi -
4283 (void __iomem *)ctrl_info->admin_queue_memory_base);
4288 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
4289 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4291 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4293 struct pqi_device_registers __iomem *pqi_registers;
4294 struct pqi_admin_queues *admin_queues;
4295 unsigned long timeout;
4299 pqi_registers = ctrl_info->pqi_registers;
4300 admin_queues = &ctrl_info->admin_queues;
4302 writeq((u64)admin_queues->iq_element_array_bus_addr,
4303 &pqi_registers->admin_iq_element_array_addr);
4304 writeq((u64)admin_queues->oq_element_array_bus_addr,
4305 &pqi_registers->admin_oq_element_array_addr);
4306 writeq((u64)admin_queues->iq_ci_bus_addr,
4307 &pqi_registers->admin_iq_ci_addr);
4308 writeq((u64)admin_queues->oq_pi_bus_addr,
4309 &pqi_registers->admin_oq_pi_addr);
4311 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4312 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4313 (admin_queues->int_msg_num << 16);
4314 writel(reg, &pqi_registers->admin_iq_num_elements);
4316 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4317 &pqi_registers->function_and_status_code);
4319 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4321 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4322 status = readb(&pqi_registers->function_and_status_code);
4323 if (status == PQI_STATUS_IDLE)
4325 if (time_after(jiffies, timeout))
4330 * The offset registers are not initialized to the correct
4331 * offsets until *after* the create admin queue pair command
4332 * completes successfully.
4334 admin_queues->iq_pi = ctrl_info->iomem_base +
4335 PQI_DEVICE_REGISTERS_OFFSET +
4336 readq(&pqi_registers->admin_iq_pi_offset);
4337 admin_queues->oq_ci = ctrl_info->iomem_base +
4338 PQI_DEVICE_REGISTERS_OFFSET +
4339 readq(&pqi_registers->admin_oq_ci_offset);
4344 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4345 struct pqi_general_admin_request *request)
4347 struct pqi_admin_queues *admin_queues;
4351 admin_queues = &ctrl_info->admin_queues;
4352 iq_pi = admin_queues->iq_pi_copy;
4354 next_element = admin_queues->iq_element_array +
4355 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4357 memcpy(next_element, request, sizeof(*request));
4359 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4360 admin_queues->iq_pi_copy = iq_pi;
4363 * This write notifies the controller that an IU is available to be
4366 writel(iq_pi, admin_queues->iq_pi);
4369 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4371 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4372 struct pqi_general_admin_response *response)
4374 struct pqi_admin_queues *admin_queues;
4377 unsigned long timeout;
4379 admin_queues = &ctrl_info->admin_queues;
4380 oq_ci = admin_queues->oq_ci_copy;
4382 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4385 oq_pi = readl(admin_queues->oq_pi);
4388 if (time_after(jiffies, timeout)) {
4389 dev_err(&ctrl_info->pci_dev->dev,
4390 "timed out waiting for admin response\n");
4393 if (!sis_is_firmware_running(ctrl_info))
4395 usleep_range(1000, 2000);
4398 memcpy(response, admin_queues->oq_element_array +
4399 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4401 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4402 admin_queues->oq_ci_copy = oq_ci;
4403 writel(oq_ci, admin_queues->oq_ci);
4408 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4409 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4410 struct pqi_io_request *io_request)
4412 struct pqi_io_request *next;
4417 unsigned long flags;
4418 unsigned int num_elements_needed;
4419 unsigned int num_elements_to_end_of_queue;
4421 struct pqi_iu_header *request;
4423 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4426 io_request->queue_group = queue_group;
4427 list_add_tail(&io_request->request_list_entry,
4428 &queue_group->request_list[path]);
4431 iq_pi = queue_group->iq_pi_copy[path];
4433 list_for_each_entry_safe(io_request, next,
4434 &queue_group->request_list[path], request_list_entry) {
4436 request = io_request->iu;
4438 iu_length = get_unaligned_le16(&request->iu_length) +
4439 PQI_REQUEST_HEADER_LENGTH;
4440 num_elements_needed =
4441 DIV_ROUND_UP(iu_length,
4442 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4444 iq_ci = readl(queue_group->iq_ci[path]);
4446 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4447 ctrl_info->num_elements_per_iq))
4450 put_unaligned_le16(queue_group->oq_id,
4451 &request->response_queue_id);
4453 next_element = queue_group->iq_element_array[path] +
4454 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4456 num_elements_to_end_of_queue =
4457 ctrl_info->num_elements_per_iq - iq_pi;
4459 if (num_elements_needed <= num_elements_to_end_of_queue) {
4460 memcpy(next_element, request, iu_length);
4462 copy_count = num_elements_to_end_of_queue *
4463 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4464 memcpy(next_element, request, copy_count);
4465 memcpy(queue_group->iq_element_array[path],
4466 (u8 *)request + copy_count,
4467 iu_length - copy_count);
4470 iq_pi = (iq_pi + num_elements_needed) %
4471 ctrl_info->num_elements_per_iq;
4473 list_del(&io_request->request_list_entry);
4476 if (iq_pi != queue_group->iq_pi_copy[path]) {
4477 queue_group->iq_pi_copy[path] = iq_pi;
4479 * This write notifies the controller that one or more IUs are
4480 * available to be processed.
4482 writel(iq_pi, queue_group->iq_pi[path]);
4485 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4488 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4490 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4491 struct completion *wait)
4496 if (wait_for_completion_io_timeout(wait,
4497 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4502 pqi_check_ctrl_health(ctrl_info);
4503 if (pqi_ctrl_offline(ctrl_info)) {
4512 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4515 struct completion *waiting = context;
4520 static int pqi_process_raid_io_error_synchronous(
4521 struct pqi_raid_error_info *error_info)
4525 switch (error_info->data_out_result) {
4526 case PQI_DATA_IN_OUT_GOOD:
4527 if (error_info->status == SAM_STAT_GOOD)
4530 case PQI_DATA_IN_OUT_UNDERFLOW:
4531 if (error_info->status == SAM_STAT_GOOD ||
4532 error_info->status == SAM_STAT_CHECK_CONDITION)
4535 case PQI_DATA_IN_OUT_ABORTED:
4536 rc = PQI_CMD_STATUS_ABORTED;
4543 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4545 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4548 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4549 struct pqi_iu_header *request, unsigned int flags,
4550 struct pqi_raid_error_info *error_info)
4553 struct pqi_io_request *io_request;
4555 DECLARE_COMPLETION_ONSTACK(wait);
4557 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4558 if (down_interruptible(&ctrl_info->sync_request_sem))
4559 return -ERESTARTSYS;
4561 down(&ctrl_info->sync_request_sem);
4564 pqi_ctrl_busy(ctrl_info);
4566 * Wait for other admin queue updates such as;
4567 * config table changes, OFA memory updates, ...
4569 if (pqi_is_blockable_request(request))
4570 pqi_wait_if_ctrl_blocked(ctrl_info);
4572 if (pqi_ctrl_offline(ctrl_info)) {
4577 io_request = pqi_alloc_io_request(ctrl_info, NULL);
4579 put_unaligned_le16(io_request->index,
4580 &(((struct pqi_raid_path_request *)request)->request_id));
4582 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4583 ((struct pqi_raid_path_request *)request)->error_index =
4584 ((struct pqi_raid_path_request *)request)->request_id;
4586 iu_length = get_unaligned_le16(&request->iu_length) +
4587 PQI_REQUEST_HEADER_LENGTH;
4588 memcpy(io_request->iu, request, iu_length);
4590 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4591 io_request->context = &wait;
4593 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4596 pqi_wait_for_completion_io(ctrl_info, &wait);
4599 if (io_request->error_info)
4600 memcpy(error_info, io_request->error_info, sizeof(*error_info));
4602 memset(error_info, 0, sizeof(*error_info));
4603 } else if (rc == 0 && io_request->error_info) {
4604 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4607 pqi_free_io_request(io_request);
4610 pqi_ctrl_unbusy(ctrl_info);
4611 up(&ctrl_info->sync_request_sem);
4616 static int pqi_validate_admin_response(
4617 struct pqi_general_admin_response *response, u8 expected_function_code)
4619 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4622 if (get_unaligned_le16(&response->header.iu_length) !=
4623 PQI_GENERAL_ADMIN_IU_LENGTH)
4626 if (response->function_code != expected_function_code)
4629 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4635 static int pqi_submit_admin_request_synchronous(
4636 struct pqi_ctrl_info *ctrl_info,
4637 struct pqi_general_admin_request *request,
4638 struct pqi_general_admin_response *response)
4642 pqi_submit_admin_request(ctrl_info, request);
4644 rc = pqi_poll_for_admin_response(ctrl_info, response);
4647 rc = pqi_validate_admin_response(response, request->function_code);
4652 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4655 struct pqi_general_admin_request request;
4656 struct pqi_general_admin_response response;
4657 struct pqi_device_capability *capability;
4658 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4660 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4664 memset(&request, 0, sizeof(request));
4666 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4667 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4668 &request.header.iu_length);
4669 request.function_code =
4670 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4671 put_unaligned_le32(sizeof(*capability),
4672 &request.data.report_device_capability.buffer_length);
4674 rc = pqi_map_single(ctrl_info->pci_dev,
4675 &request.data.report_device_capability.sg_descriptor,
4676 capability, sizeof(*capability),
4681 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4683 pqi_pci_unmap(ctrl_info->pci_dev,
4684 &request.data.report_device_capability.sg_descriptor, 1,
4690 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4695 ctrl_info->max_inbound_queues =
4696 get_unaligned_le16(&capability->max_inbound_queues);
4697 ctrl_info->max_elements_per_iq =
4698 get_unaligned_le16(&capability->max_elements_per_iq);
4699 ctrl_info->max_iq_element_length =
4700 get_unaligned_le16(&capability->max_iq_element_length)
4702 ctrl_info->max_outbound_queues =
4703 get_unaligned_le16(&capability->max_outbound_queues);
4704 ctrl_info->max_elements_per_oq =
4705 get_unaligned_le16(&capability->max_elements_per_oq);
4706 ctrl_info->max_oq_element_length =
4707 get_unaligned_le16(&capability->max_oq_element_length)
4710 sop_iu_layer_descriptor =
4711 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4713 ctrl_info->max_inbound_iu_length_per_firmware =
4715 &sop_iu_layer_descriptor->max_inbound_iu_length);
4716 ctrl_info->inbound_spanning_supported =
4717 sop_iu_layer_descriptor->inbound_spanning_supported;
4718 ctrl_info->outbound_spanning_supported =
4719 sop_iu_layer_descriptor->outbound_spanning_supported;
4727 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4729 if (ctrl_info->max_iq_element_length <
4730 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4731 dev_err(&ctrl_info->pci_dev->dev,
4732 "max. inbound queue element length of %d is less than the required length of %d\n",
4733 ctrl_info->max_iq_element_length,
4734 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4738 if (ctrl_info->max_oq_element_length <
4739 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4740 dev_err(&ctrl_info->pci_dev->dev,
4741 "max. outbound queue element length of %d is less than the required length of %d\n",
4742 ctrl_info->max_oq_element_length,
4743 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4747 if (ctrl_info->max_inbound_iu_length_per_firmware <
4748 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4749 dev_err(&ctrl_info->pci_dev->dev,
4750 "max. inbound IU length of %u is less than the min. required length of %d\n",
4751 ctrl_info->max_inbound_iu_length_per_firmware,
4752 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4756 if (!ctrl_info->inbound_spanning_supported) {
4757 dev_err(&ctrl_info->pci_dev->dev,
4758 "the controller does not support inbound spanning\n");
4762 if (ctrl_info->outbound_spanning_supported) {
4763 dev_err(&ctrl_info->pci_dev->dev,
4764 "the controller supports outbound spanning but this driver does not\n");
4771 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4774 struct pqi_event_queue *event_queue;
4775 struct pqi_general_admin_request request;
4776 struct pqi_general_admin_response response;
4778 event_queue = &ctrl_info->event_queue;
4781 * Create OQ (Outbound Queue - device to host queue) to dedicate
4784 memset(&request, 0, sizeof(request));
4785 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4786 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4787 &request.header.iu_length);
4788 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4789 put_unaligned_le16(event_queue->oq_id,
4790 &request.data.create_operational_oq.queue_id);
4791 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4792 &request.data.create_operational_oq.element_array_addr);
4793 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4794 &request.data.create_operational_oq.pi_addr);
4795 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4796 &request.data.create_operational_oq.num_elements);
4797 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4798 &request.data.create_operational_oq.element_length);
4799 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4800 put_unaligned_le16(event_queue->int_msg_num,
4801 &request.data.create_operational_oq.int_msg_num);
4803 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4808 event_queue->oq_ci = ctrl_info->iomem_base +
4809 PQI_DEVICE_REGISTERS_OFFSET +
4811 &response.data.create_operational_oq.oq_ci_offset);
4816 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4817 unsigned int group_number)
4820 struct pqi_queue_group *queue_group;
4821 struct pqi_general_admin_request request;
4822 struct pqi_general_admin_response response;
4824 queue_group = &ctrl_info->queue_groups[group_number];
4827 * Create IQ (Inbound Queue - host to device queue) for
4830 memset(&request, 0, sizeof(request));
4831 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4832 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4833 &request.header.iu_length);
4834 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4835 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4836 &request.data.create_operational_iq.queue_id);
4838 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4839 &request.data.create_operational_iq.element_array_addr);
4840 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4841 &request.data.create_operational_iq.ci_addr);
4842 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4843 &request.data.create_operational_iq.num_elements);
4844 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4845 &request.data.create_operational_iq.element_length);
4846 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4848 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4851 dev_err(&ctrl_info->pci_dev->dev,
4852 "error creating inbound RAID queue\n");
4856 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4857 PQI_DEVICE_REGISTERS_OFFSET +
4859 &response.data.create_operational_iq.iq_pi_offset);
4862 * Create IQ (Inbound Queue - host to device queue) for
4863 * Advanced I/O (AIO) path.
4865 memset(&request, 0, sizeof(request));
4866 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4867 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4868 &request.header.iu_length);
4869 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4870 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4871 &request.data.create_operational_iq.queue_id);
4872 put_unaligned_le64((u64)queue_group->
4873 iq_element_array_bus_addr[AIO_PATH],
4874 &request.data.create_operational_iq.element_array_addr);
4875 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4876 &request.data.create_operational_iq.ci_addr);
4877 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4878 &request.data.create_operational_iq.num_elements);
4879 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4880 &request.data.create_operational_iq.element_length);
4881 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4883 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4886 dev_err(&ctrl_info->pci_dev->dev,
4887 "error creating inbound AIO queue\n");
4891 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4892 PQI_DEVICE_REGISTERS_OFFSET +
4894 &response.data.create_operational_iq.iq_pi_offset);
4897 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4898 * assumed to be for RAID path I/O unless we change the queue's
4901 memset(&request, 0, sizeof(request));
4902 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4903 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4904 &request.header.iu_length);
4905 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4906 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4907 &request.data.change_operational_iq_properties.queue_id);
4908 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4909 &request.data.change_operational_iq_properties.vendor_specific);
4911 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4914 dev_err(&ctrl_info->pci_dev->dev,
4915 "error changing queue property\n");
4920 * Create OQ (Outbound Queue - device to host queue).
4922 memset(&request, 0, sizeof(request));
4923 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4924 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4925 &request.header.iu_length);
4926 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4927 put_unaligned_le16(queue_group->oq_id,
4928 &request.data.create_operational_oq.queue_id);
4929 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4930 &request.data.create_operational_oq.element_array_addr);
4931 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4932 &request.data.create_operational_oq.pi_addr);
4933 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4934 &request.data.create_operational_oq.num_elements);
4935 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4936 &request.data.create_operational_oq.element_length);
4937 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4938 put_unaligned_le16(queue_group->int_msg_num,
4939 &request.data.create_operational_oq.int_msg_num);
4941 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4944 dev_err(&ctrl_info->pci_dev->dev,
4945 "error creating outbound queue\n");
4949 queue_group->oq_ci = ctrl_info->iomem_base +
4950 PQI_DEVICE_REGISTERS_OFFSET +
4952 &response.data.create_operational_oq.oq_ci_offset);
4957 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4962 rc = pqi_create_event_queue(ctrl_info);
4964 dev_err(&ctrl_info->pci_dev->dev,
4965 "error creating event queue\n");
4969 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4970 rc = pqi_create_queue_group(ctrl_info, i);
4972 dev_err(&ctrl_info->pci_dev->dev,
4973 "error creating queue group number %u/%u\n",
4974 i, ctrl_info->num_queue_groups);
4982 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4983 struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
4985 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4990 struct pqi_event_config *event_config;
4991 struct pqi_event_descriptor *event_descriptor;
4992 struct pqi_general_management_request request;
4994 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4999 memset(&request, 0, sizeof(request));
5001 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
5002 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5003 data.report_event_configuration.sg_descriptors[1]) -
5004 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5005 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5006 &request.data.report_event_configuration.buffer_length);
5008 rc = pqi_map_single(ctrl_info->pci_dev,
5009 request.data.report_event_configuration.sg_descriptors,
5010 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5015 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5017 pqi_pci_unmap(ctrl_info->pci_dev,
5018 request.data.report_event_configuration.sg_descriptors, 1,
5024 for (i = 0; i < event_config->num_event_descriptors; i++) {
5025 event_descriptor = &event_config->descriptors[i];
5026 if (enable_events &&
5027 pqi_is_supported_event(event_descriptor->event_type))
5028 put_unaligned_le16(ctrl_info->event_queue.oq_id,
5029 &event_descriptor->oq_id);
5031 put_unaligned_le16(0, &event_descriptor->oq_id);
5034 memset(&request, 0, sizeof(request));
5036 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5037 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5038 data.report_event_configuration.sg_descriptors[1]) -
5039 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5040 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5041 &request.data.report_event_configuration.buffer_length);
5043 rc = pqi_map_single(ctrl_info->pci_dev,
5044 request.data.report_event_configuration.sg_descriptors,
5045 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5050 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5052 pqi_pci_unmap(ctrl_info->pci_dev,
5053 request.data.report_event_configuration.sg_descriptors, 1,
5057 kfree(event_config);
5062 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5064 return pqi_configure_events(ctrl_info, true);
5067 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5071 size_t sg_chain_buffer_length;
5072 struct pqi_io_request *io_request;
5074 if (!ctrl_info->io_request_pool)
5077 dev = &ctrl_info->pci_dev->dev;
5078 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5079 io_request = ctrl_info->io_request_pool;
5081 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5082 kfree(io_request->iu);
5083 if (!io_request->sg_chain_buffer)
5085 dma_free_coherent(dev, sg_chain_buffer_length,
5086 io_request->sg_chain_buffer,
5087 io_request->sg_chain_buffer_dma_handle);
5091 kfree(ctrl_info->io_request_pool);
5092 ctrl_info->io_request_pool = NULL;
5095 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5097 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5098 ctrl_info->error_buffer_length,
5099 &ctrl_info->error_buffer_dma_handle,
5101 if (!ctrl_info->error_buffer)
5107 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5110 void *sg_chain_buffer;
5111 size_t sg_chain_buffer_length;
5112 dma_addr_t sg_chain_buffer_dma_handle;
5114 struct pqi_io_request *io_request;
5116 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5117 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5119 if (!ctrl_info->io_request_pool) {
5120 dev_err(&ctrl_info->pci_dev->dev,
5121 "failed to allocate I/O request pool\n");
5125 dev = &ctrl_info->pci_dev->dev;
5126 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5127 io_request = ctrl_info->io_request_pool;
5129 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5130 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5132 if (!io_request->iu) {
5133 dev_err(&ctrl_info->pci_dev->dev,
5134 "failed to allocate IU buffers\n");
5138 sg_chain_buffer = dma_alloc_coherent(dev,
5139 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5142 if (!sg_chain_buffer) {
5143 dev_err(&ctrl_info->pci_dev->dev,
5144 "failed to allocate PQI scatter-gather chain buffers\n");
5148 io_request->index = i;
5149 io_request->sg_chain_buffer = sg_chain_buffer;
5150 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5157 pqi_free_all_io_requests(ctrl_info);
5163 * Calculate required resources that are sized based on max. outstanding
5164 * requests and max. transfer size.
5167 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5169 u32 max_transfer_size;
5172 ctrl_info->scsi_ml_can_queue =
5173 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5174 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5176 ctrl_info->error_buffer_length =
5177 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5180 max_transfer_size = min(ctrl_info->max_transfer_size,
5181 PQI_MAX_TRANSFER_SIZE_KDUMP);
5183 max_transfer_size = min(ctrl_info->max_transfer_size,
5184 PQI_MAX_TRANSFER_SIZE);
5186 max_sg_entries = max_transfer_size / PAGE_SIZE;
5188 /* +1 to cover when the buffer is not page-aligned. */
5191 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5193 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5195 ctrl_info->sg_chain_buffer_length =
5196 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5197 PQI_EXTRA_SGL_MEMORY;
5198 ctrl_info->sg_tablesize = max_sg_entries;
5199 ctrl_info->max_sectors = max_transfer_size / 512;
5202 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5204 int num_queue_groups;
5205 u16 num_elements_per_iq;
5206 u16 num_elements_per_oq;
5208 if (reset_devices) {
5209 num_queue_groups = 1;
5212 int max_queue_groups;
5214 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5215 ctrl_info->max_outbound_queues - 1);
5216 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5218 num_cpus = num_online_cpus();
5219 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5220 num_queue_groups = min(num_queue_groups, max_queue_groups);
5223 ctrl_info->num_queue_groups = num_queue_groups;
5226 * Make sure that the max. inbound IU length is an even multiple
5227 * of our inbound element length.
5229 ctrl_info->max_inbound_iu_length =
5230 (ctrl_info->max_inbound_iu_length_per_firmware /
5231 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5232 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5234 num_elements_per_iq =
5235 (ctrl_info->max_inbound_iu_length /
5236 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5238 /* Add one because one element in each queue is unusable. */
5239 num_elements_per_iq++;
5241 num_elements_per_iq = min(num_elements_per_iq,
5242 ctrl_info->max_elements_per_iq);
5244 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5245 num_elements_per_oq = min(num_elements_per_oq,
5246 ctrl_info->max_elements_per_oq);
5248 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5249 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5251 ctrl_info->max_sg_per_iu =
5252 ((ctrl_info->max_inbound_iu_length -
5253 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5254 sizeof(struct pqi_sg_descriptor)) +
5255 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5257 ctrl_info->max_sg_per_r56_iu =
5258 ((ctrl_info->max_inbound_iu_length -
5259 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5260 sizeof(struct pqi_sg_descriptor)) +
5261 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5264 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5265 struct scatterlist *sg)
5267 u64 address = (u64)sg_dma_address(sg);
5268 unsigned int length = sg_dma_len(sg);
5270 put_unaligned_le64(address, &sg_descriptor->address);
5271 put_unaligned_le32(length, &sg_descriptor->length);
5272 put_unaligned_le32(0, &sg_descriptor->flags);
5275 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5276 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5277 int max_sg_per_iu, bool *chained)
5280 unsigned int num_sg_in_iu;
5285 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
5288 pqi_set_sg_descriptor(sg_descriptor, sg);
5295 if (i == max_sg_per_iu) {
5296 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5297 &sg_descriptor->address);
5298 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5299 &sg_descriptor->length);
5300 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5303 sg_descriptor = io_request->sg_chain_buffer;
5308 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5310 return num_sg_in_iu;
5313 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5314 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5315 struct pqi_io_request *io_request)
5320 unsigned int num_sg_in_iu;
5321 struct scatterlist *sg;
5322 struct pqi_sg_descriptor *sg_descriptor;
5324 sg_count = scsi_dma_map(scmd);
5328 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5329 PQI_REQUEST_HEADER_LENGTH;
5334 sg = scsi_sglist(scmd);
5335 sg_descriptor = request->sg_descriptors;
5337 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5338 ctrl_info->max_sg_per_iu, &chained);
5340 request->partial = chained;
5341 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5344 put_unaligned_le16(iu_length, &request->header.iu_length);
5349 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5350 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5351 struct pqi_io_request *io_request)
5356 unsigned int num_sg_in_iu;
5357 struct scatterlist *sg;
5358 struct pqi_sg_descriptor *sg_descriptor;
5360 sg_count = scsi_dma_map(scmd);
5364 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5365 PQI_REQUEST_HEADER_LENGTH;
5371 sg = scsi_sglist(scmd);
5372 sg_descriptor = request->sg_descriptors;
5374 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5375 ctrl_info->max_sg_per_iu, &chained);
5377 request->partial = chained;
5378 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5381 put_unaligned_le16(iu_length, &request->header.iu_length);
5382 request->num_sg_descriptors = num_sg_in_iu;
5387 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5388 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5389 struct pqi_io_request *io_request)
5394 unsigned int num_sg_in_iu;
5395 struct scatterlist *sg;
5396 struct pqi_sg_descriptor *sg_descriptor;
5398 sg_count = scsi_dma_map(scmd);
5402 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5403 PQI_REQUEST_HEADER_LENGTH;
5406 if (sg_count != 0) {
5407 sg = scsi_sglist(scmd);
5408 sg_descriptor = request->sg_descriptors;
5410 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5411 ctrl_info->max_sg_per_r56_iu, &chained);
5413 request->partial = chained;
5414 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5417 put_unaligned_le16(iu_length, &request->header.iu_length);
5418 request->num_sg_descriptors = num_sg_in_iu;
5423 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5424 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5425 struct pqi_io_request *io_request)
5430 unsigned int num_sg_in_iu;
5431 struct scatterlist *sg;
5432 struct pqi_sg_descriptor *sg_descriptor;
5434 sg_count = scsi_dma_map(scmd);
5438 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5439 PQI_REQUEST_HEADER_LENGTH;
5445 sg = scsi_sglist(scmd);
5446 sg_descriptor = request->sg_descriptors;
5448 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5449 ctrl_info->max_sg_per_iu, &chained);
5451 request->partial = chained;
5452 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5455 put_unaligned_le16(iu_length, &request->header.iu_length);
5456 request->num_sg_descriptors = num_sg_in_iu;
5461 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5464 struct scsi_cmnd *scmd;
5466 scmd = io_request->scmd;
5467 pqi_free_io_request(io_request);
5468 scsi_dma_unmap(scmd);
5469 pqi_scsi_done(scmd);
5472 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
5473 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5474 struct pqi_queue_group *queue_group, bool io_high_prio)
5478 struct pqi_io_request *io_request;
5479 struct pqi_raid_path_request *request;
5481 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5483 return SCSI_MLQUEUE_HOST_BUSY;
5485 io_request->io_complete_callback = pqi_raid_io_complete;
5486 io_request->scmd = scmd;
5488 request = io_request->iu;
5489 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5491 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5492 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5493 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5494 request->command_priority = io_high_prio;
5495 put_unaligned_le16(io_request->index, &request->request_id);
5496 request->error_index = request->request_id;
5497 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5498 request->ml_device_lun_number = (u8)scmd->device->lun;
5500 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5501 memcpy(request->cdb, scmd->cmnd, cdb_length);
5503 switch (cdb_length) {
5508 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5511 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5514 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5517 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5521 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5525 switch (scmd->sc_data_direction) {
5526 case DMA_FROM_DEVICE:
5527 request->data_direction = SOP_READ_FLAG;
5530 request->data_direction = SOP_WRITE_FLAG;
5533 request->data_direction = SOP_NO_DIRECTION_FLAG;
5535 case DMA_BIDIRECTIONAL:
5536 request->data_direction = SOP_BIDIRECTIONAL;
5539 dev_err(&ctrl_info->pci_dev->dev,
5540 "unknown data direction: %d\n",
5541 scmd->sc_data_direction);
5545 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5547 pqi_free_io_request(io_request);
5548 return SCSI_MLQUEUE_HOST_BUSY;
5551 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5556 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5557 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5558 struct pqi_queue_group *queue_group)
5562 io_high_prio = pqi_is_io_high_priority(device, scmd);
5564 return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
5567 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5569 struct scsi_cmnd *scmd;
5570 struct pqi_scsi_dev *device;
5571 struct pqi_ctrl_info *ctrl_info;
5573 if (!io_request->raid_bypass)
5576 scmd = io_request->scmd;
5577 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5579 if (host_byte(scmd->result) == DID_NO_CONNECT)
5582 device = scmd->device->hostdata;
5583 if (pqi_device_offline(device) || pqi_device_in_remove(device))
5586 ctrl_info = shost_to_hba(scmd->device->host);
5587 if (pqi_ctrl_offline(ctrl_info))
5593 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5596 struct scsi_cmnd *scmd;
5598 scmd = io_request->scmd;
5599 scsi_dma_unmap(scmd);
5600 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5601 set_host_byte(scmd, DID_IMM_RETRY);
5602 pqi_cmd_priv(scmd)->this_residual++;
5605 pqi_free_io_request(io_request);
5606 pqi_scsi_done(scmd);
5609 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5610 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5611 struct pqi_queue_group *queue_group)
5615 io_high_prio = pqi_is_io_high_priority(device, scmd);
5617 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5618 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5619 false, io_high_prio);
5622 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5623 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5624 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5625 struct pqi_encryption_info *encryption_info, bool raid_bypass,
5629 struct pqi_io_request *io_request;
5630 struct pqi_aio_path_request *request;
5631 struct pqi_scsi_dev *device;
5633 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5635 return SCSI_MLQUEUE_HOST_BUSY;
5637 io_request->io_complete_callback = pqi_aio_io_complete;
5638 io_request->scmd = scmd;
5639 io_request->raid_bypass = raid_bypass;
5641 request = io_request->iu;
5642 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5644 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5645 put_unaligned_le32(aio_handle, &request->nexus_id);
5646 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5647 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5648 request->command_priority = io_high_prio;
5649 put_unaligned_le16(io_request->index, &request->request_id);
5650 request->error_index = request->request_id;
5651 device = scmd->device->hostdata;
5652 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
5653 put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
5654 if (cdb_length > sizeof(request->cdb))
5655 cdb_length = sizeof(request->cdb);
5656 request->cdb_length = cdb_length;
5657 memcpy(request->cdb, cdb, cdb_length);
5659 switch (scmd->sc_data_direction) {
5661 request->data_direction = SOP_READ_FLAG;
5663 case DMA_FROM_DEVICE:
5664 request->data_direction = SOP_WRITE_FLAG;
5667 request->data_direction = SOP_NO_DIRECTION_FLAG;
5669 case DMA_BIDIRECTIONAL:
5670 request->data_direction = SOP_BIDIRECTIONAL;
5673 dev_err(&ctrl_info->pci_dev->dev,
5674 "unknown data direction: %d\n",
5675 scmd->sc_data_direction);
5679 if (encryption_info) {
5680 request->encryption_enable = true;
5681 put_unaligned_le16(encryption_info->data_encryption_key_index,
5682 &request->data_encryption_key_index);
5683 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5684 &request->encrypt_tweak_lower);
5685 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5686 &request->encrypt_tweak_upper);
5689 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5691 pqi_free_io_request(io_request);
5692 return SCSI_MLQUEUE_HOST_BUSY;
5695 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5700 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5701 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5702 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5703 struct pqi_scsi_dev_raid_map_data *rmd)
5706 struct pqi_io_request *io_request;
5707 struct pqi_aio_r1_path_request *r1_request;
5709 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5711 return SCSI_MLQUEUE_HOST_BUSY;
5713 io_request->io_complete_callback = pqi_aio_io_complete;
5714 io_request->scmd = scmd;
5715 io_request->raid_bypass = true;
5717 r1_request = io_request->iu;
5718 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5720 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5721 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5722 r1_request->num_drives = rmd->num_it_nexus_entries;
5723 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5724 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5725 if (rmd->num_it_nexus_entries == 3)
5726 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5728 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5729 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5730 put_unaligned_le16(io_request->index, &r1_request->request_id);
5731 r1_request->error_index = r1_request->request_id;
5732 if (rmd->cdb_length > sizeof(r1_request->cdb))
5733 rmd->cdb_length = sizeof(r1_request->cdb);
5734 r1_request->cdb_length = rmd->cdb_length;
5735 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5737 /* The direction is always write. */
5738 r1_request->data_direction = SOP_READ_FLAG;
5740 if (encryption_info) {
5741 r1_request->encryption_enable = true;
5742 put_unaligned_le16(encryption_info->data_encryption_key_index,
5743 &r1_request->data_encryption_key_index);
5744 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5745 &r1_request->encrypt_tweak_lower);
5746 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5747 &r1_request->encrypt_tweak_upper);
5750 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5752 pqi_free_io_request(io_request);
5753 return SCSI_MLQUEUE_HOST_BUSY;
5756 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5761 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5762 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5763 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5764 struct pqi_scsi_dev_raid_map_data *rmd)
5767 struct pqi_io_request *io_request;
5768 struct pqi_aio_r56_path_request *r56_request;
5770 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5772 return SCSI_MLQUEUE_HOST_BUSY;
5773 io_request->io_complete_callback = pqi_aio_io_complete;
5774 io_request->scmd = scmd;
5775 io_request->raid_bypass = true;
5777 r56_request = io_request->iu;
5778 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5780 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5781 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5783 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5785 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5786 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5787 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5788 if (rmd->raid_level == SA_RAID_6) {
5789 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5790 r56_request->xor_multiplier = rmd->xor_mult;
5792 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5793 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5794 put_unaligned_le64(rmd->row, &r56_request->row);
5796 put_unaligned_le16(io_request->index, &r56_request->request_id);
5797 r56_request->error_index = r56_request->request_id;
5799 if (rmd->cdb_length > sizeof(r56_request->cdb))
5800 rmd->cdb_length = sizeof(r56_request->cdb);
5801 r56_request->cdb_length = rmd->cdb_length;
5802 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5804 /* The direction is always write. */
5805 r56_request->data_direction = SOP_READ_FLAG;
5807 if (encryption_info) {
5808 r56_request->encryption_enable = true;
5809 put_unaligned_le16(encryption_info->data_encryption_key_index,
5810 &r56_request->data_encryption_key_index);
5811 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5812 &r56_request->encrypt_tweak_lower);
5813 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5814 &r56_request->encrypt_tweak_upper);
5817 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5819 pqi_free_io_request(io_request);
5820 return SCSI_MLQUEUE_HOST_BUSY;
5823 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5828 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5829 struct scsi_cmnd *scmd)
5832 * We are setting host_tagset = 1 during init.
5834 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5837 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5839 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5842 return pqi_cmd_priv(scmd)->this_residual == 0;
5846 * This function gets called just before we hand the completed SCSI request
5850 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5852 struct pqi_scsi_dev *device;
5854 if (!scmd->device) {
5855 set_host_byte(scmd, DID_NO_CONNECT);
5859 device = scmd->device->hostdata;
5861 set_host_byte(scmd, DID_NO_CONNECT);
5865 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
5868 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5869 struct scsi_cmnd *scmd)
5875 struct pqi_scsi_dev *device;
5876 struct pqi_stream_data *pqi_stream_data;
5877 struct pqi_scsi_dev_raid_map_data rmd;
5879 if (!ctrl_info->enable_stream_detection)
5882 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5886 /* Check writes only. */
5890 device = scmd->device->hostdata;
5892 /* Check for RAID 5/6 streams. */
5893 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5897 * If controller does not support AIO RAID{5,6} writes, need to send
5898 * requests down non-AIO path.
5900 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5901 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5905 oldest_jiffies = INT_MAX;
5906 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5907 pqi_stream_data = &device->stream_data[i];
5909 * Check for adjacent request or request is within
5910 * the previous request.
5912 if ((pqi_stream_data->next_lba &&
5913 rmd.first_block >= pqi_stream_data->next_lba) &&
5914 rmd.first_block <= pqi_stream_data->next_lba +
5916 pqi_stream_data->next_lba = rmd.first_block +
5918 pqi_stream_data->last_accessed = jiffies;
5923 if (pqi_stream_data->last_accessed == 0) {
5928 /* Find entry with oldest last accessed time. */
5929 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5930 oldest_jiffies = pqi_stream_data->last_accessed;
5935 /* Set LRU entry. */
5936 pqi_stream_data = &device->stream_data[lru_index];
5937 pqi_stream_data->last_accessed = jiffies;
5938 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5943 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5946 struct pqi_ctrl_info *ctrl_info;
5947 struct pqi_scsi_dev *device;
5949 struct pqi_queue_group *queue_group;
5952 device = scmd->device->hostdata;
5955 set_host_byte(scmd, DID_NO_CONNECT);
5956 pqi_scsi_done(scmd);
5960 atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
5962 ctrl_info = shost_to_hba(shost);
5964 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
5965 set_host_byte(scmd, DID_NO_CONNECT);
5966 pqi_scsi_done(scmd);
5970 if (pqi_ctrl_blocked(ctrl_info)) {
5971 rc = SCSI_MLQUEUE_HOST_BUSY;
5976 * This is necessary because the SML doesn't zero out this field during
5981 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5982 queue_group = &ctrl_info->queue_groups[hw_queue];
5984 if (pqi_is_logical_device(device)) {
5985 raid_bypassed = false;
5986 if (device->raid_bypass_enabled &&
5987 pqi_is_bypass_eligible_request(scmd) &&
5988 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
5989 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5990 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
5991 raid_bypassed = true;
5992 device->raid_bypass_cnt++;
5996 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5998 if (device->aio_enabled)
5999 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6001 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6006 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
6011 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
6015 unsigned long flags;
6016 unsigned int queued_io_count;
6017 struct pqi_queue_group *queue_group;
6018 struct pqi_io_request *io_request;
6020 queued_io_count = 0;
6022 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6023 queue_group = &ctrl_info->queue_groups[i];
6024 for (path = 0; path < 2; path++) {
6025 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6026 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6028 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6032 return queued_io_count;
6035 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6039 unsigned int nonempty_inbound_queue_count;
6040 struct pqi_queue_group *queue_group;
6044 nonempty_inbound_queue_count = 0;
6046 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6047 queue_group = &ctrl_info->queue_groups[i];
6048 for (path = 0; path < 2; path++) {
6049 iq_pi = queue_group->iq_pi_copy[path];
6050 iq_ci = readl(queue_group->iq_ci[path]);
6052 nonempty_inbound_queue_count++;
6056 return nonempty_inbound_queue_count;
6059 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
6061 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6063 unsigned long start_jiffies;
6064 unsigned long warning_timeout;
6065 unsigned int queued_io_count;
6066 unsigned int nonempty_inbound_queue_count;
6067 bool displayed_warning;
6069 displayed_warning = false;
6070 start_jiffies = jiffies;
6071 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6074 queued_io_count = pqi_queued_io_count(ctrl_info);
6075 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6076 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6078 pqi_check_ctrl_health(ctrl_info);
6079 if (pqi_ctrl_offline(ctrl_info))
6081 if (time_after(jiffies, warning_timeout)) {
6082 dev_warn(&ctrl_info->pci_dev->dev,
6083 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6084 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6085 displayed_warning = true;
6086 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6088 usleep_range(1000, 2000);
6091 if (displayed_warning)
6092 dev_warn(&ctrl_info->pci_dev->dev,
6093 "queued I/O drained after waiting for %u seconds\n",
6094 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6099 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6100 struct pqi_scsi_dev *device)
6104 struct pqi_queue_group *queue_group;
6105 unsigned long flags;
6106 struct pqi_io_request *io_request;
6107 struct pqi_io_request *next;
6108 struct scsi_cmnd *scmd;
6109 struct pqi_scsi_dev *scsi_device;
6111 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6112 queue_group = &ctrl_info->queue_groups[i];
6114 for (path = 0; path < 2; path++) {
6116 &queue_group->submit_lock[path], flags);
6118 list_for_each_entry_safe(io_request, next,
6119 &queue_group->request_list[path],
6120 request_list_entry) {
6122 scmd = io_request->scmd;
6126 scsi_device = scmd->device->hostdata;
6127 if (scsi_device != device)
6130 list_del(&io_request->request_list_entry);
6131 set_host_byte(scmd, DID_RESET);
6132 pqi_free_io_request(io_request);
6133 scsi_dma_unmap(scmd);
6134 pqi_scsi_done(scmd);
6137 spin_unlock_irqrestore(
6138 &queue_group->submit_lock[path], flags);
6143 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
6145 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6146 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
6148 int cmds_outstanding;
6149 unsigned long start_jiffies;
6150 unsigned long warning_timeout;
6151 unsigned long msecs_waiting;
6153 start_jiffies = jiffies;
6154 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6156 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
6157 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
6158 pqi_check_ctrl_health(ctrl_info);
6159 if (pqi_ctrl_offline(ctrl_info))
6162 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6163 if (msecs_waiting >= timeout_msecs) {
6164 dev_err(&ctrl_info->pci_dev->dev,
6165 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6166 ctrl_info->scsi_host->host_no, device->bus, device->target,
6167 lun, msecs_waiting / 1000, cmds_outstanding);
6170 if (time_after(jiffies, warning_timeout)) {
6171 dev_warn(&ctrl_info->pci_dev->dev,
6172 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6173 ctrl_info->scsi_host->host_no, device->bus, device->target,
6174 lun, msecs_waiting / 1000, cmds_outstanding);
6175 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6177 usleep_range(1000, 2000);
6183 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6186 struct completion *waiting = context;
6191 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
6193 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6194 struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
6197 unsigned int wait_secs;
6198 int cmds_outstanding;
6203 if (wait_for_completion_io_timeout(wait,
6204 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6209 pqi_check_ctrl_health(ctrl_info);
6210 if (pqi_ctrl_offline(ctrl_info)) {
6215 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6216 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
6217 dev_warn(&ctrl_info->pci_dev->dev,
6218 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6219 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
6225 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
6227 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6230 struct pqi_io_request *io_request;
6231 DECLARE_COMPLETION_ONSTACK(wait);
6232 struct pqi_task_management_request *request;
6233 struct pqi_scsi_dev *device;
6235 device = scmd->device->hostdata;
6236 io_request = pqi_alloc_io_request(ctrl_info, NULL);
6237 io_request->io_complete_callback = pqi_lun_reset_complete;
6238 io_request->context = &wait;
6240 request = io_request->iu;
6241 memset(request, 0, sizeof(*request));
6243 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6244 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6245 &request->header.iu_length);
6246 put_unaligned_le16(io_request->index, &request->request_id);
6247 memcpy(request->lun_number, device->scsi3addr,
6248 sizeof(request->lun_number));
6249 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
6250 request->ml_device_lun_number = (u8)scmd->device->lun;
6251 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6252 if (ctrl_info->tmf_iu_timeout_supported)
6253 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6255 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6258 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
6260 rc = io_request->status;
6262 pqi_free_io_request(io_request);
6267 #define PQI_LUN_RESET_RETRIES 3
6268 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
6269 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
6270 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
6272 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6276 unsigned int retries;
6277 unsigned long timeout_msecs;
6278 struct pqi_scsi_dev *device;
6280 device = scmd->device->hostdata;
6281 for (retries = 0;;) {
6282 reset_rc = pqi_lun_reset(ctrl_info, scmd);
6283 if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
6285 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6288 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6289 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6291 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
6292 if (wait_rc && reset_rc == 0)
6295 return reset_rc == 0 ? SUCCESS : FAILED;
6298 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6301 struct pqi_scsi_dev *device;
6303 device = scmd->device->hostdata;
6304 pqi_ctrl_block_requests(ctrl_info);
6305 pqi_ctrl_wait_until_quiesced(ctrl_info);
6306 pqi_fail_io_queued_for_device(ctrl_info, device);
6307 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6311 rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
6312 pqi_ctrl_unblock_requests(ctrl_info);
6317 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6320 struct Scsi_Host *shost;
6321 struct pqi_ctrl_info *ctrl_info;
6322 struct pqi_scsi_dev *device;
6324 shost = scmd->device->host;
6325 ctrl_info = shost_to_hba(shost);
6326 device = scmd->device->hostdata;
6328 mutex_lock(&ctrl_info->lun_reset_mutex);
6330 dev_err(&ctrl_info->pci_dev->dev,
6331 "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
6333 device->bus, device->target, (u32)scmd->device->lun,
6334 scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
6336 pqi_check_ctrl_health(ctrl_info);
6337 if (pqi_ctrl_offline(ctrl_info))
6340 rc = pqi_device_reset(ctrl_info, scmd);
6342 dev_err(&ctrl_info->pci_dev->dev,
6343 "reset of scsi %d:%d:%d:%d: %s\n",
6344 shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
6345 rc == SUCCESS ? "SUCCESS" : "FAILED");
6347 mutex_unlock(&ctrl_info->lun_reset_mutex);
6352 static int pqi_slave_alloc(struct scsi_device *sdev)
6354 struct pqi_scsi_dev *device;
6355 unsigned long flags;
6356 struct pqi_ctrl_info *ctrl_info;
6357 struct scsi_target *starget;
6358 struct sas_rphy *rphy;
6360 ctrl_info = shost_to_hba(sdev->host);
6362 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6364 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6365 starget = scsi_target(sdev);
6366 rphy = target_to_rphy(starget);
6367 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6369 if (device->target_lun_valid) {
6370 device->ignore_device = true;
6372 device->target = sdev_id(sdev);
6373 device->lun = sdev->lun;
6374 device->target_lun_valid = true;
6378 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6379 sdev_id(sdev), sdev->lun);
6383 sdev->hostdata = device;
6384 device->sdev = sdev;
6385 if (device->queue_depth) {
6386 device->advertised_queue_depth = device->queue_depth;
6387 scsi_change_queue_depth(sdev,
6388 device->advertised_queue_depth);
6390 if (pqi_is_logical_device(device)) {
6391 pqi_disable_write_same(sdev);
6393 sdev->allow_restart = 1;
6394 if (device->device_type == SA_DEVICE_TYPE_NVME)
6395 pqi_disable_write_same(sdev);
6399 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6404 static void pqi_map_queues(struct Scsi_Host *shost)
6406 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6408 blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6409 ctrl_info->pci_dev, 0);
6412 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6414 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6417 static int pqi_slave_configure(struct scsi_device *sdev)
6420 struct pqi_scsi_dev *device;
6422 device = sdev->hostdata;
6423 device->devtype = sdev->type;
6425 if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6427 device->ignore_device = false;
6433 static void pqi_slave_destroy(struct scsi_device *sdev)
6435 struct pqi_ctrl_info *ctrl_info;
6436 struct pqi_scsi_dev *device;
6438 unsigned long flags;
6440 ctrl_info = shost_to_hba(sdev->host);
6442 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
6443 if (!mutex_acquired)
6446 device = sdev->hostdata;
6448 mutex_unlock(&ctrl_info->scan_mutex);
6452 device->lun_count--;
6453 if (device->lun_count > 0) {
6454 mutex_unlock(&ctrl_info->scan_mutex);
6458 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6459 list_del(&device->scsi_device_list_entry);
6460 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6462 mutex_unlock(&ctrl_info->scan_mutex);
6464 pqi_dev_info(ctrl_info, "removed", device);
6465 pqi_free_device(device);
6468 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6470 struct pci_dev *pci_dev;
6471 u32 subsystem_vendor;
6472 u32 subsystem_device;
6473 cciss_pci_info_struct pciinfo;
6478 pci_dev = ctrl_info->pci_dev;
6480 pciinfo.domain = pci_domain_nr(pci_dev->bus);
6481 pciinfo.bus = pci_dev->bus->number;
6482 pciinfo.dev_fn = pci_dev->devfn;
6483 subsystem_vendor = pci_dev->subsystem_vendor;
6484 subsystem_device = pci_dev->subsystem_device;
6485 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6487 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6493 static int pqi_getdrivver_ioctl(void __user *arg)
6500 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6501 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6503 if (copy_to_user(arg, &version, sizeof(version)))
6509 struct ciss_error_info {
6512 size_t sense_data_length;
6515 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6516 struct ciss_error_info *ciss_error_info)
6518 int ciss_cmd_status;
6519 size_t sense_data_length;
6521 switch (pqi_error_info->data_out_result) {
6522 case PQI_DATA_IN_OUT_GOOD:
6523 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6525 case PQI_DATA_IN_OUT_UNDERFLOW:
6526 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6528 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6529 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6531 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6532 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6533 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6534 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6535 case PQI_DATA_IN_OUT_ERROR:
6536 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6538 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6539 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6540 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6541 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6542 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6543 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6544 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6545 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6546 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6547 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6548 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6550 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6551 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6553 case PQI_DATA_IN_OUT_ABORTED:
6554 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6556 case PQI_DATA_IN_OUT_TIMEOUT:
6557 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6560 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6565 get_unaligned_le16(&pqi_error_info->sense_data_length);
6566 if (sense_data_length == 0)
6568 get_unaligned_le16(&pqi_error_info->response_data_length);
6569 if (sense_data_length)
6570 if (sense_data_length > sizeof(pqi_error_info->data))
6571 sense_data_length = sizeof(pqi_error_info->data);
6573 ciss_error_info->scsi_status = pqi_error_info->status;
6574 ciss_error_info->command_status = ciss_cmd_status;
6575 ciss_error_info->sense_data_length = sense_data_length;
6578 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6581 char *kernel_buffer = NULL;
6583 size_t sense_data_length;
6584 IOCTL_Command_struct iocommand;
6585 struct pqi_raid_path_request request;
6586 struct pqi_raid_error_info pqi_error_info;
6587 struct ciss_error_info ciss_error_info;
6589 if (pqi_ctrl_offline(ctrl_info))
6591 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6595 if (!capable(CAP_SYS_RAWIO))
6597 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6599 if (iocommand.buf_size < 1 &&
6600 iocommand.Request.Type.Direction != XFER_NONE)
6602 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6604 if (iocommand.Request.Type.Type != TYPE_CMD)
6607 switch (iocommand.Request.Type.Direction) {
6611 case XFER_READ | XFER_WRITE:
6617 if (iocommand.buf_size > 0) {
6618 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6621 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6622 if (copy_from_user(kernel_buffer, iocommand.buf,
6623 iocommand.buf_size)) {
6628 memset(kernel_buffer, 0, iocommand.buf_size);
6632 memset(&request, 0, sizeof(request));
6634 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6635 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6636 PQI_REQUEST_HEADER_LENGTH;
6637 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6638 sizeof(request.lun_number));
6639 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6640 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6642 switch (iocommand.Request.Type.Direction) {
6644 request.data_direction = SOP_NO_DIRECTION_FLAG;
6647 request.data_direction = SOP_WRITE_FLAG;
6650 request.data_direction = SOP_READ_FLAG;
6652 case XFER_READ | XFER_WRITE:
6653 request.data_direction = SOP_BIDIRECTIONAL;
6657 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6659 if (iocommand.buf_size > 0) {
6660 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6662 rc = pqi_map_single(ctrl_info->pci_dev,
6663 &request.sg_descriptors[0], kernel_buffer,
6664 iocommand.buf_size, DMA_BIDIRECTIONAL);
6668 iu_length += sizeof(request.sg_descriptors[0]);
6671 put_unaligned_le16(iu_length, &request.header.iu_length);
6673 if (ctrl_info->raid_iu_timeout_supported)
6674 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6676 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6677 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6679 if (iocommand.buf_size > 0)
6680 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6683 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6686 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6687 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6688 iocommand.error_info.CommandStatus =
6689 ciss_error_info.command_status;
6690 sense_data_length = ciss_error_info.sense_data_length;
6691 if (sense_data_length) {
6692 if (sense_data_length >
6693 sizeof(iocommand.error_info.SenseInfo))
6695 sizeof(iocommand.error_info.SenseInfo);
6696 memcpy(iocommand.error_info.SenseInfo,
6697 pqi_error_info.data, sense_data_length);
6698 iocommand.error_info.SenseLen = sense_data_length;
6702 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6707 if (rc == 0 && iocommand.buf_size > 0 &&
6708 (iocommand.Request.Type.Direction & XFER_READ)) {
6709 if (copy_to_user(iocommand.buf, kernel_buffer,
6710 iocommand.buf_size)) {
6716 kfree(kernel_buffer);
6721 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6725 struct pqi_ctrl_info *ctrl_info;
6727 ctrl_info = shost_to_hba(sdev->host);
6730 case CCISS_DEREGDISK:
6731 case CCISS_REGNEWDISK:
6733 rc = pqi_scan_scsi_devices(ctrl_info);
6735 case CCISS_GETPCIINFO:
6736 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6738 case CCISS_GETDRIVVER:
6739 rc = pqi_getdrivver_ioctl(arg);
6741 case CCISS_PASSTHRU:
6742 rc = pqi_passthru_ioctl(ctrl_info, arg);
6752 static ssize_t pqi_firmware_version_show(struct device *dev,
6753 struct device_attribute *attr, char *buffer)
6755 struct Scsi_Host *shost;
6756 struct pqi_ctrl_info *ctrl_info;
6758 shost = class_to_shost(dev);
6759 ctrl_info = shost_to_hba(shost);
6761 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6764 static ssize_t pqi_driver_version_show(struct device *dev,
6765 struct device_attribute *attr, char *buffer)
6767 return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6770 static ssize_t pqi_serial_number_show(struct device *dev,
6771 struct device_attribute *attr, char *buffer)
6773 struct Scsi_Host *shost;
6774 struct pqi_ctrl_info *ctrl_info;
6776 shost = class_to_shost(dev);
6777 ctrl_info = shost_to_hba(shost);
6779 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6782 static ssize_t pqi_model_show(struct device *dev,
6783 struct device_attribute *attr, char *buffer)
6785 struct Scsi_Host *shost;
6786 struct pqi_ctrl_info *ctrl_info;
6788 shost = class_to_shost(dev);
6789 ctrl_info = shost_to_hba(shost);
6791 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6794 static ssize_t pqi_vendor_show(struct device *dev,
6795 struct device_attribute *attr, char *buffer)
6797 struct Scsi_Host *shost;
6798 struct pqi_ctrl_info *ctrl_info;
6800 shost = class_to_shost(dev);
6801 ctrl_info = shost_to_hba(shost);
6803 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6806 static ssize_t pqi_host_rescan_store(struct device *dev,
6807 struct device_attribute *attr, const char *buffer, size_t count)
6809 struct Scsi_Host *shost = class_to_shost(dev);
6811 pqi_scan_start(shost);
6816 static ssize_t pqi_lockup_action_show(struct device *dev,
6817 struct device_attribute *attr, char *buffer)
6822 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6823 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6824 count += scnprintf(buffer + count, PAGE_SIZE - count,
6825 "[%s] ", pqi_lockup_actions[i].name);
6827 count += scnprintf(buffer + count, PAGE_SIZE - count,
6828 "%s ", pqi_lockup_actions[i].name);
6831 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6836 static ssize_t pqi_lockup_action_store(struct device *dev,
6837 struct device_attribute *attr, const char *buffer, size_t count)
6841 char action_name_buffer[32];
6843 strscpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6844 action_name = strstrip(action_name_buffer);
6846 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6847 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6848 pqi_lockup_action = pqi_lockup_actions[i].action;
6856 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6857 struct device_attribute *attr, char *buffer)
6859 struct Scsi_Host *shost = class_to_shost(dev);
6860 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6862 return scnprintf(buffer, 10, "%x\n",
6863 ctrl_info->enable_stream_detection);
6866 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6867 struct device_attribute *attr, const char *buffer, size_t count)
6869 struct Scsi_Host *shost = class_to_shost(dev);
6870 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6871 u8 set_stream_detection = 0;
6873 if (kstrtou8(buffer, 0, &set_stream_detection))
6876 if (set_stream_detection > 0)
6877 set_stream_detection = 1;
6879 ctrl_info->enable_stream_detection = set_stream_detection;
6884 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6885 struct device_attribute *attr, char *buffer)
6887 struct Scsi_Host *shost = class_to_shost(dev);
6888 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6890 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6893 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6894 struct device_attribute *attr, const char *buffer, size_t count)
6896 struct Scsi_Host *shost = class_to_shost(dev);
6897 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6898 u8 set_r5_writes = 0;
6900 if (kstrtou8(buffer, 0, &set_r5_writes))
6903 if (set_r5_writes > 0)
6906 ctrl_info->enable_r5_writes = set_r5_writes;
6911 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6912 struct device_attribute *attr, char *buffer)
6914 struct Scsi_Host *shost = class_to_shost(dev);
6915 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6917 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6920 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6921 struct device_attribute *attr, const char *buffer, size_t count)
6923 struct Scsi_Host *shost = class_to_shost(dev);
6924 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6925 u8 set_r6_writes = 0;
6927 if (kstrtou8(buffer, 0, &set_r6_writes))
6930 if (set_r6_writes > 0)
6933 ctrl_info->enable_r6_writes = set_r6_writes;
6938 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6939 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6940 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6941 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6942 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6943 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6944 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6945 pqi_lockup_action_store);
6946 static DEVICE_ATTR(enable_stream_detection, 0644,
6947 pqi_host_enable_stream_detection_show,
6948 pqi_host_enable_stream_detection_store);
6949 static DEVICE_ATTR(enable_r5_writes, 0644,
6950 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6951 static DEVICE_ATTR(enable_r6_writes, 0644,
6952 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6954 static struct attribute *pqi_shost_attrs[] = {
6955 &dev_attr_driver_version.attr,
6956 &dev_attr_firmware_version.attr,
6957 &dev_attr_model.attr,
6958 &dev_attr_serial_number.attr,
6959 &dev_attr_vendor.attr,
6960 &dev_attr_rescan.attr,
6961 &dev_attr_lockup_action.attr,
6962 &dev_attr_enable_stream_detection.attr,
6963 &dev_attr_enable_r5_writes.attr,
6964 &dev_attr_enable_r6_writes.attr,
6968 ATTRIBUTE_GROUPS(pqi_shost);
6970 static ssize_t pqi_unique_id_show(struct device *dev,
6971 struct device_attribute *attr, char *buffer)
6973 struct pqi_ctrl_info *ctrl_info;
6974 struct scsi_device *sdev;
6975 struct pqi_scsi_dev *device;
6976 unsigned long flags;
6979 sdev = to_scsi_device(dev);
6980 ctrl_info = shost_to_hba(sdev->host);
6982 if (pqi_ctrl_offline(ctrl_info))
6985 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6987 device = sdev->hostdata;
6989 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6993 if (device->is_physical_device)
6994 memcpy(unique_id, device->wwid, sizeof(device->wwid));
6996 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6998 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7000 return scnprintf(buffer, PAGE_SIZE,
7001 "%02X%02X%02X%02X%02X%02X%02X%02X"
7002 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
7003 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
7004 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
7005 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
7006 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
7009 static ssize_t pqi_lunid_show(struct device *dev,
7010 struct device_attribute *attr, char *buffer)
7012 struct pqi_ctrl_info *ctrl_info;
7013 struct scsi_device *sdev;
7014 struct pqi_scsi_dev *device;
7015 unsigned long flags;
7018 sdev = to_scsi_device(dev);
7019 ctrl_info = shost_to_hba(sdev->host);
7021 if (pqi_ctrl_offline(ctrl_info))
7024 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7026 device = sdev->hostdata;
7028 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7032 memcpy(lunid, device->scsi3addr, sizeof(lunid));
7034 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7036 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
7041 static ssize_t pqi_path_info_show(struct device *dev,
7042 struct device_attribute *attr, char *buf)
7044 struct pqi_ctrl_info *ctrl_info;
7045 struct scsi_device *sdev;
7046 struct pqi_scsi_dev *device;
7047 unsigned long flags;
7054 u8 phys_connector[2];
7056 sdev = to_scsi_device(dev);
7057 ctrl_info = shost_to_hba(sdev->host);
7059 if (pqi_ctrl_offline(ctrl_info))
7062 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7064 device = sdev->hostdata;
7066 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7071 for (i = 0; i < MAX_PATHS; i++) {
7072 path_map_index = 1 << i;
7073 if (i == device->active_path_index)
7075 else if (device->path_map & path_map_index)
7076 active = "Inactive";
7080 output_len += scnprintf(buf + output_len,
7081 PAGE_SIZE - output_len,
7082 "[%d:%d:%d:%d] %20.20s ",
7083 ctrl_info->scsi_host->host_no,
7084 device->bus, device->target,
7086 scsi_device_type(device->devtype));
7088 if (device->devtype == TYPE_RAID ||
7089 pqi_is_logical_device(device))
7092 memcpy(&phys_connector, &device->phys_connector[i],
7093 sizeof(phys_connector));
7094 if (phys_connector[0] < '0')
7095 phys_connector[0] = '0';
7096 if (phys_connector[1] < '0')
7097 phys_connector[1] = '0';
7099 output_len += scnprintf(buf + output_len,
7100 PAGE_SIZE - output_len,
7101 "PORT: %.2s ", phys_connector);
7103 box = device->box[i];
7104 if (box != 0 && box != 0xFF)
7105 output_len += scnprintf(buf + output_len,
7106 PAGE_SIZE - output_len,
7109 if ((device->devtype == TYPE_DISK ||
7110 device->devtype == TYPE_ZBC) &&
7111 pqi_expose_device(device))
7112 output_len += scnprintf(buf + output_len,
7113 PAGE_SIZE - output_len,
7117 output_len += scnprintf(buf + output_len,
7118 PAGE_SIZE - output_len,
7122 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7127 static ssize_t pqi_sas_address_show(struct device *dev,
7128 struct device_attribute *attr, char *buffer)
7130 struct pqi_ctrl_info *ctrl_info;
7131 struct scsi_device *sdev;
7132 struct pqi_scsi_dev *device;
7133 unsigned long flags;
7136 sdev = to_scsi_device(dev);
7137 ctrl_info = shost_to_hba(sdev->host);
7139 if (pqi_ctrl_offline(ctrl_info))
7142 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7144 device = sdev->hostdata;
7146 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7150 sas_address = device->sas_address;
7152 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7154 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7157 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7158 struct device_attribute *attr, char *buffer)
7160 struct pqi_ctrl_info *ctrl_info;
7161 struct scsi_device *sdev;
7162 struct pqi_scsi_dev *device;
7163 unsigned long flags;
7165 sdev = to_scsi_device(dev);
7166 ctrl_info = shost_to_hba(sdev->host);
7168 if (pqi_ctrl_offline(ctrl_info))
7171 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7173 device = sdev->hostdata;
7175 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7179 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7183 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7188 static ssize_t pqi_raid_level_show(struct device *dev,
7189 struct device_attribute *attr, char *buffer)
7191 struct pqi_ctrl_info *ctrl_info;
7192 struct scsi_device *sdev;
7193 struct pqi_scsi_dev *device;
7194 unsigned long flags;
7197 sdev = to_scsi_device(dev);
7198 ctrl_info = shost_to_hba(sdev->host);
7200 if (pqi_ctrl_offline(ctrl_info))
7203 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7205 device = sdev->hostdata;
7207 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7211 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
7212 raid_level = pqi_raid_level_to_string(device->raid_level);
7216 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7218 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7221 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7222 struct device_attribute *attr, char *buffer)
7224 struct pqi_ctrl_info *ctrl_info;
7225 struct scsi_device *sdev;
7226 struct pqi_scsi_dev *device;
7227 unsigned long flags;
7228 unsigned int raid_bypass_cnt;
7230 sdev = to_scsi_device(dev);
7231 ctrl_info = shost_to_hba(sdev->host);
7233 if (pqi_ctrl_offline(ctrl_info))
7236 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7238 device = sdev->hostdata;
7240 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7244 raid_bypass_cnt = device->raid_bypass_cnt;
7246 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7248 return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
7251 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7252 struct device_attribute *attr, char *buf)
7254 struct pqi_ctrl_info *ctrl_info;
7255 struct scsi_device *sdev;
7256 struct pqi_scsi_dev *device;
7257 unsigned long flags;
7260 sdev = to_scsi_device(dev);
7261 ctrl_info = shost_to_hba(sdev->host);
7263 if (pqi_ctrl_offline(ctrl_info))
7266 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7268 device = sdev->hostdata;
7270 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7274 output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7275 device->ncq_prio_enable);
7276 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7281 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7282 struct device_attribute *attr,
7283 const char *buf, size_t count)
7285 struct pqi_ctrl_info *ctrl_info;
7286 struct scsi_device *sdev;
7287 struct pqi_scsi_dev *device;
7288 unsigned long flags;
7289 u8 ncq_prio_enable = 0;
7291 if (kstrtou8(buf, 0, &ncq_prio_enable))
7294 sdev = to_scsi_device(dev);
7295 ctrl_info = shost_to_hba(sdev->host);
7297 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7299 device = sdev->hostdata;
7302 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7306 if (!device->ncq_prio_support) {
7307 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7311 device->ncq_prio_enable = ncq_prio_enable;
7313 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7318 static ssize_t pqi_numa_node_show(struct device *dev,
7319 struct device_attribute *attr, char *buffer)
7321 struct scsi_device *sdev;
7322 struct pqi_ctrl_info *ctrl_info;
7324 sdev = to_scsi_device(dev);
7325 ctrl_info = shost_to_hba(sdev->host);
7327 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
7330 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7331 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7332 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7333 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7334 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7335 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7336 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7337 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7338 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7339 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
7341 static struct attribute *pqi_sdev_attrs[] = {
7342 &dev_attr_lunid.attr,
7343 &dev_attr_unique_id.attr,
7344 &dev_attr_path_info.attr,
7345 &dev_attr_sas_address.attr,
7346 &dev_attr_ssd_smart_path_enabled.attr,
7347 &dev_attr_raid_level.attr,
7348 &dev_attr_raid_bypass_cnt.attr,
7349 &dev_attr_sas_ncq_prio_enable.attr,
7350 &dev_attr_numa_node.attr,
7354 ATTRIBUTE_GROUPS(pqi_sdev);
7356 static const struct scsi_host_template pqi_driver_template = {
7357 .module = THIS_MODULE,
7358 .name = DRIVER_NAME_SHORT,
7359 .proc_name = DRIVER_NAME_SHORT,
7360 .queuecommand = pqi_scsi_queue_command,
7361 .scan_start = pqi_scan_start,
7362 .scan_finished = pqi_scan_finished,
7364 .eh_device_reset_handler = pqi_eh_device_reset_handler,
7366 .slave_alloc = pqi_slave_alloc,
7367 .slave_configure = pqi_slave_configure,
7368 .slave_destroy = pqi_slave_destroy,
7369 .map_queues = pqi_map_queues,
7370 .sdev_groups = pqi_sdev_groups,
7371 .shost_groups = pqi_shost_groups,
7372 .cmd_size = sizeof(struct pqi_cmd_priv),
7375 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7378 struct Scsi_Host *shost;
7380 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7382 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7387 shost->n_io_port = 0;
7388 shost->this_id = -1;
7389 shost->max_channel = PQI_MAX_BUS;
7390 shost->max_cmd_len = MAX_COMMAND_SIZE;
7391 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
7393 shost->max_sectors = ctrl_info->max_sectors;
7394 shost->can_queue = ctrl_info->scsi_ml_can_queue;
7395 shost->cmd_per_lun = shost->can_queue;
7396 shost->sg_tablesize = ctrl_info->sg_tablesize;
7397 shost->transportt = pqi_sas_transport_template;
7398 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7399 shost->unique_id = shost->irq;
7400 shost->nr_hw_queues = ctrl_info->num_queue_groups;
7401 shost->host_tagset = 1;
7402 shost->hostdata[0] = (unsigned long)ctrl_info;
7404 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7406 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7410 rc = pqi_add_sas_host(shost, ctrl_info);
7412 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7416 ctrl_info->scsi_host = shost;
7421 scsi_remove_host(shost);
7423 scsi_host_put(shost);
7428 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7430 struct Scsi_Host *shost;
7432 pqi_delete_sas_host(ctrl_info);
7434 shost = ctrl_info->scsi_host;
7438 scsi_remove_host(shost);
7439 scsi_host_put(shost);
7442 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7445 struct pqi_device_registers __iomem *pqi_registers;
7446 unsigned long timeout;
7447 unsigned int timeout_msecs;
7448 union pqi_reset_register reset_reg;
7450 pqi_registers = ctrl_info->pqi_registers;
7451 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7452 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7455 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7456 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7457 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7459 if (!sis_is_firmware_running(ctrl_info)) {
7463 if (time_after(jiffies, timeout)) {
7472 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7475 union pqi_reset_register reset_reg;
7477 if (ctrl_info->pqi_reset_quiesce_supported) {
7478 rc = sis_pqi_reset_quiesce(ctrl_info);
7480 dev_err(&ctrl_info->pci_dev->dev,
7481 "PQI reset failed during quiesce with error %d\n", rc);
7486 reset_reg.all_bits = 0;
7487 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7488 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7490 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7492 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7494 dev_err(&ctrl_info->pci_dev->dev,
7495 "PQI reset failed with error %d\n", rc);
7500 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7503 struct bmic_sense_subsystem_info *sense_info;
7505 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7509 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7513 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7514 sizeof(sense_info->ctrl_serial_number));
7515 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7523 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7526 struct bmic_identify_controller *identify;
7528 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7532 rc = pqi_identify_controller(ctrl_info, identify);
7536 if (get_unaligned_le32(&identify->extra_controller_flags) &
7537 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7538 memcpy(ctrl_info->firmware_version,
7539 identify->firmware_version_long,
7540 sizeof(identify->firmware_version_long));
7542 memcpy(ctrl_info->firmware_version,
7543 identify->firmware_version_short,
7544 sizeof(identify->firmware_version_short));
7545 ctrl_info->firmware_version
7546 [sizeof(identify->firmware_version_short)] = '\0';
7547 snprintf(ctrl_info->firmware_version +
7548 strlen(ctrl_info->firmware_version),
7549 sizeof(ctrl_info->firmware_version) -
7550 sizeof(identify->firmware_version_short),
7552 get_unaligned_le16(&identify->firmware_build_number));
7555 memcpy(ctrl_info->model, identify->product_id,
7556 sizeof(identify->product_id));
7557 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7559 memcpy(ctrl_info->vendor, identify->vendor_id,
7560 sizeof(identify->vendor_id));
7561 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7563 dev_info(&ctrl_info->pci_dev->dev,
7564 "Firmware version: %s\n", ctrl_info->firmware_version);
7572 struct pqi_config_table_section_info {
7573 struct pqi_ctrl_info *ctrl_info;
7576 void __iomem *section_iomem_addr;
7579 static inline bool pqi_is_firmware_feature_supported(
7580 struct pqi_config_table_firmware_features *firmware_features,
7581 unsigned int bit_position)
7583 unsigned int byte_index;
7585 byte_index = bit_position / BITS_PER_BYTE;
7587 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7590 return firmware_features->features_supported[byte_index] &
7591 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7594 static inline bool pqi_is_firmware_feature_enabled(
7595 struct pqi_config_table_firmware_features *firmware_features,
7596 void __iomem *firmware_features_iomem_addr,
7597 unsigned int bit_position)
7599 unsigned int byte_index;
7600 u8 __iomem *features_enabled_iomem_addr;
7602 byte_index = (bit_position / BITS_PER_BYTE) +
7603 (le16_to_cpu(firmware_features->num_elements) * 2);
7605 features_enabled_iomem_addr = firmware_features_iomem_addr +
7606 offsetof(struct pqi_config_table_firmware_features,
7607 features_supported) + byte_index;
7609 return *((__force u8 *)features_enabled_iomem_addr) &
7610 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7613 static inline void pqi_request_firmware_feature(
7614 struct pqi_config_table_firmware_features *firmware_features,
7615 unsigned int bit_position)
7617 unsigned int byte_index;
7619 byte_index = (bit_position / BITS_PER_BYTE) +
7620 le16_to_cpu(firmware_features->num_elements);
7622 firmware_features->features_supported[byte_index] |=
7623 (1 << (bit_position % BITS_PER_BYTE));
7626 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7627 u16 first_section, u16 last_section)
7629 struct pqi_vendor_general_request request;
7631 memset(&request, 0, sizeof(request));
7633 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7634 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7635 &request.header.iu_length);
7636 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7637 &request.function_code);
7638 put_unaligned_le16(first_section,
7639 &request.data.config_table_update.first_section);
7640 put_unaligned_le16(last_section,
7641 &request.data.config_table_update.last_section);
7643 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7646 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7647 struct pqi_config_table_firmware_features *firmware_features,
7648 void __iomem *firmware_features_iomem_addr)
7650 void *features_requested;
7651 void __iomem *features_requested_iomem_addr;
7652 void __iomem *host_max_known_feature_iomem_addr;
7654 features_requested = firmware_features->features_supported +
7655 le16_to_cpu(firmware_features->num_elements);
7657 features_requested_iomem_addr = firmware_features_iomem_addr +
7658 (features_requested - (void *)firmware_features);
7660 memcpy_toio(features_requested_iomem_addr, features_requested,
7661 le16_to_cpu(firmware_features->num_elements));
7663 if (pqi_is_firmware_feature_supported(firmware_features,
7664 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7665 host_max_known_feature_iomem_addr =
7666 features_requested_iomem_addr +
7667 (le16_to_cpu(firmware_features->num_elements) * 2) +
7669 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
7670 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
7673 return pqi_config_table_update(ctrl_info,
7674 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7675 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7678 struct pqi_firmware_feature {
7680 unsigned int feature_bit;
7683 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7684 struct pqi_firmware_feature *firmware_feature);
7687 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7688 struct pqi_firmware_feature *firmware_feature)
7690 if (!firmware_feature->supported) {
7691 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7692 firmware_feature->feature_name);
7696 if (firmware_feature->enabled) {
7697 dev_info(&ctrl_info->pci_dev->dev,
7698 "%s enabled\n", firmware_feature->feature_name);
7702 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7703 firmware_feature->feature_name);
7706 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7707 struct pqi_firmware_feature *firmware_feature)
7709 switch (firmware_feature->feature_bit) {
7710 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7711 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7713 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7714 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7716 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7717 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7719 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7720 ctrl_info->soft_reset_handshake_supported =
7721 firmware_feature->enabled &&
7722 pqi_read_soft_reset_status(ctrl_info);
7724 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7725 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7727 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7728 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7730 case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7731 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7732 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7734 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7735 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7737 case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
7738 ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
7742 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7745 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7746 struct pqi_firmware_feature *firmware_feature)
7748 if (firmware_feature->feature_status)
7749 firmware_feature->feature_status(ctrl_info, firmware_feature);
7752 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7754 static struct pqi_firmware_feature pqi_firmware_features[] = {
7756 .feature_name = "Online Firmware Activation",
7757 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7758 .feature_status = pqi_firmware_feature_status,
7761 .feature_name = "Serial Management Protocol",
7762 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7763 .feature_status = pqi_firmware_feature_status,
7766 .feature_name = "Maximum Known Feature",
7767 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7768 .feature_status = pqi_firmware_feature_status,
7771 .feature_name = "RAID 0 Read Bypass",
7772 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7773 .feature_status = pqi_firmware_feature_status,
7776 .feature_name = "RAID 1 Read Bypass",
7777 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7778 .feature_status = pqi_firmware_feature_status,
7781 .feature_name = "RAID 5 Read Bypass",
7782 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7783 .feature_status = pqi_firmware_feature_status,
7786 .feature_name = "RAID 6 Read Bypass",
7787 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7788 .feature_status = pqi_firmware_feature_status,
7791 .feature_name = "RAID 0 Write Bypass",
7792 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7793 .feature_status = pqi_firmware_feature_status,
7796 .feature_name = "RAID 1 Write Bypass",
7797 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7798 .feature_status = pqi_ctrl_update_feature_flags,
7801 .feature_name = "RAID 5 Write Bypass",
7802 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7803 .feature_status = pqi_ctrl_update_feature_flags,
7806 .feature_name = "RAID 6 Write Bypass",
7807 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7808 .feature_status = pqi_ctrl_update_feature_flags,
7811 .feature_name = "New Soft Reset Handshake",
7812 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
7813 .feature_status = pqi_ctrl_update_feature_flags,
7816 .feature_name = "RAID IU Timeout",
7817 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7818 .feature_status = pqi_ctrl_update_feature_flags,
7821 .feature_name = "TMF IU Timeout",
7822 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7823 .feature_status = pqi_ctrl_update_feature_flags,
7826 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7827 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7828 .feature_status = pqi_firmware_feature_status,
7831 .feature_name = "Firmware Triage",
7832 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
7833 .feature_status = pqi_ctrl_update_feature_flags,
7836 .feature_name = "RPL Extended Formats 4 and 5",
7837 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
7838 .feature_status = pqi_ctrl_update_feature_flags,
7841 .feature_name = "Multi-LUN Target",
7842 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
7843 .feature_status = pqi_ctrl_update_feature_flags,
7847 static void pqi_process_firmware_features(
7848 struct pqi_config_table_section_info *section_info)
7851 struct pqi_ctrl_info *ctrl_info;
7852 struct pqi_config_table_firmware_features *firmware_features;
7853 void __iomem *firmware_features_iomem_addr;
7855 unsigned int num_features_supported;
7857 ctrl_info = section_info->ctrl_info;
7858 firmware_features = section_info->section;
7859 firmware_features_iomem_addr = section_info->section_iomem_addr;
7861 for (i = 0, num_features_supported = 0;
7862 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7863 if (pqi_is_firmware_feature_supported(firmware_features,
7864 pqi_firmware_features[i].feature_bit)) {
7865 pqi_firmware_features[i].supported = true;
7866 num_features_supported++;
7868 pqi_firmware_feature_update(ctrl_info,
7869 &pqi_firmware_features[i]);
7873 if (num_features_supported == 0)
7876 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7877 if (!pqi_firmware_features[i].supported)
7879 pqi_request_firmware_feature(firmware_features,
7880 pqi_firmware_features[i].feature_bit);
7883 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7884 firmware_features_iomem_addr);
7886 dev_err(&ctrl_info->pci_dev->dev,
7887 "failed to enable firmware features in PQI configuration table\n");
7888 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7889 if (!pqi_firmware_features[i].supported)
7891 pqi_firmware_feature_update(ctrl_info,
7892 &pqi_firmware_features[i]);
7897 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7898 if (!pqi_firmware_features[i].supported)
7900 if (pqi_is_firmware_feature_enabled(firmware_features,
7901 firmware_features_iomem_addr,
7902 pqi_firmware_features[i].feature_bit)) {
7903 pqi_firmware_features[i].enabled = true;
7905 pqi_firmware_feature_update(ctrl_info,
7906 &pqi_firmware_features[i]);
7910 static void pqi_init_firmware_features(void)
7914 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7915 pqi_firmware_features[i].supported = false;
7916 pqi_firmware_features[i].enabled = false;
7920 static void pqi_process_firmware_features_section(
7921 struct pqi_config_table_section_info *section_info)
7923 mutex_lock(&pqi_firmware_features_mutex);
7924 pqi_init_firmware_features();
7925 pqi_process_firmware_features(section_info);
7926 mutex_unlock(&pqi_firmware_features_mutex);
7930 * Reset all controller settings that can be initialized during the processing
7931 * of the PQI Configuration Table.
7934 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7936 ctrl_info->heartbeat_counter = NULL;
7937 ctrl_info->soft_reset_status = NULL;
7938 ctrl_info->soft_reset_handshake_supported = false;
7939 ctrl_info->enable_r1_writes = false;
7940 ctrl_info->enable_r5_writes = false;
7941 ctrl_info->enable_r6_writes = false;
7942 ctrl_info->raid_iu_timeout_supported = false;
7943 ctrl_info->tmf_iu_timeout_supported = false;
7944 ctrl_info->firmware_triage_supported = false;
7945 ctrl_info->rpl_extended_format_4_5_supported = false;
7946 ctrl_info->multi_lun_device_supported = false;
7949 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7953 bool firmware_feature_section_present;
7954 void __iomem *table_iomem_addr;
7955 struct pqi_config_table *config_table;
7956 struct pqi_config_table_section_header *section;
7957 struct pqi_config_table_section_info section_info;
7958 struct pqi_config_table_section_info feature_section_info = {0};
7960 table_length = ctrl_info->config_table_length;
7961 if (table_length == 0)
7964 config_table = kmalloc(table_length, GFP_KERNEL);
7965 if (!config_table) {
7966 dev_err(&ctrl_info->pci_dev->dev,
7967 "failed to allocate memory for PQI configuration table\n");
7972 * Copy the config table contents from I/O memory space into the
7975 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
7976 memcpy_fromio(config_table, table_iomem_addr, table_length);
7978 firmware_feature_section_present = false;
7979 section_info.ctrl_info = ctrl_info;
7980 section_offset = get_unaligned_le32(&config_table->first_section_offset);
7982 while (section_offset) {
7983 section = (void *)config_table + section_offset;
7985 section_info.section = section;
7986 section_info.section_offset = section_offset;
7987 section_info.section_iomem_addr = table_iomem_addr + section_offset;
7989 switch (get_unaligned_le16(§ion->section_id)) {
7990 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7991 firmware_feature_section_present = true;
7992 feature_section_info = section_info;
7994 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7995 if (pqi_disable_heartbeat)
7996 dev_warn(&ctrl_info->pci_dev->dev,
7997 "heartbeat disabled by module parameter\n");
7999 ctrl_info->heartbeat_counter =
8002 offsetof(struct pqi_config_table_heartbeat,
8005 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
8006 ctrl_info->soft_reset_status =
8009 offsetof(struct pqi_config_table_soft_reset,
8014 section_offset = get_unaligned_le16(§ion->next_section_offset);
8018 * We process the firmware feature section after all other sections
8019 * have been processed so that the feature bit callbacks can take
8020 * into account the settings configured by other sections.
8022 if (firmware_feature_section_present)
8023 pqi_process_firmware_features_section(&feature_section_info);
8025 kfree(config_table);
8030 /* Switches the controller from PQI mode back into SIS mode. */
8032 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
8036 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
8037 rc = pqi_reset(ctrl_info);
8040 rc = sis_reenable_sis_mode(ctrl_info);
8042 dev_err(&ctrl_info->pci_dev->dev,
8043 "re-enabling SIS mode failed with error %d\n", rc);
8046 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8052 * If the controller isn't already in SIS mode, this function forces it into
8056 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
8058 if (!sis_is_firmware_running(ctrl_info))
8061 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
8064 if (sis_is_kernel_up(ctrl_info)) {
8065 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8069 return pqi_revert_to_sis_mode(ctrl_info);
8072 static void pqi_perform_lockup_action(void)
8074 switch (pqi_lockup_action) {
8076 panic("FATAL: Smart Family Controller lockup detected");
8079 emergency_restart();
8087 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
8092 if (reset_devices) {
8093 if (pqi_is_fw_triage_supported(ctrl_info)) {
8094 rc = sis_wait_for_fw_triage_completion(ctrl_info);
8098 sis_soft_reset(ctrl_info);
8099 ssleep(PQI_POST_RESET_DELAY_SECS);
8101 rc = pqi_force_sis_mode(ctrl_info);
8107 * Wait until the controller is ready to start accepting SIS
8110 rc = sis_wait_for_ctrl_ready(ctrl_info);
8112 if (reset_devices) {
8113 dev_err(&ctrl_info->pci_dev->dev,
8114 "kdump init failed with error %d\n", rc);
8115 pqi_lockup_action = REBOOT;
8116 pqi_perform_lockup_action();
8122 * Get the controller properties. This allows us to determine
8123 * whether or not it supports PQI mode.
8125 rc = sis_get_ctrl_properties(ctrl_info);
8127 dev_err(&ctrl_info->pci_dev->dev,
8128 "error obtaining controller properties\n");
8132 rc = sis_get_pqi_capabilities(ctrl_info);
8134 dev_err(&ctrl_info->pci_dev->dev,
8135 "error obtaining controller capabilities\n");
8139 product_id = sis_get_product_id(ctrl_info);
8140 ctrl_info->product_id = (u8)product_id;
8141 ctrl_info->product_revision = (u8)(product_id >> 8);
8143 if (reset_devices) {
8144 if (ctrl_info->max_outstanding_requests >
8145 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8146 ctrl_info->max_outstanding_requests =
8147 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8149 if (ctrl_info->max_outstanding_requests >
8150 PQI_MAX_OUTSTANDING_REQUESTS)
8151 ctrl_info->max_outstanding_requests =
8152 PQI_MAX_OUTSTANDING_REQUESTS;
8155 pqi_calculate_io_resources(ctrl_info);
8157 rc = pqi_alloc_error_buffer(ctrl_info);
8159 dev_err(&ctrl_info->pci_dev->dev,
8160 "failed to allocate PQI error buffer\n");
8165 * If the function we are about to call succeeds, the
8166 * controller will transition from legacy SIS mode
8169 rc = sis_init_base_struct_addr(ctrl_info);
8171 dev_err(&ctrl_info->pci_dev->dev,
8172 "error initializing PQI mode\n");
8176 /* Wait for the controller to complete the SIS -> PQI transition. */
8177 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8179 dev_err(&ctrl_info->pci_dev->dev,
8180 "transition to PQI mode failed\n");
8184 /* From here on, we are running in PQI mode. */
8185 ctrl_info->pqi_mode_enabled = true;
8186 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8188 rc = pqi_alloc_admin_queues(ctrl_info);
8190 dev_err(&ctrl_info->pci_dev->dev,
8191 "failed to allocate admin queues\n");
8195 rc = pqi_create_admin_queues(ctrl_info);
8197 dev_err(&ctrl_info->pci_dev->dev,
8198 "error creating admin queues\n");
8202 rc = pqi_report_device_capability(ctrl_info);
8204 dev_err(&ctrl_info->pci_dev->dev,
8205 "obtaining device capability failed\n");
8209 rc = pqi_validate_device_capability(ctrl_info);
8213 pqi_calculate_queue_resources(ctrl_info);
8215 rc = pqi_enable_msix_interrupts(ctrl_info);
8219 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8220 ctrl_info->max_msix_vectors =
8221 ctrl_info->num_msix_vectors_enabled;
8222 pqi_calculate_queue_resources(ctrl_info);
8225 rc = pqi_alloc_io_resources(ctrl_info);
8229 rc = pqi_alloc_operational_queues(ctrl_info);
8231 dev_err(&ctrl_info->pci_dev->dev,
8232 "failed to allocate operational queues\n");
8236 pqi_init_operational_queues(ctrl_info);
8238 rc = pqi_create_queues(ctrl_info);
8242 rc = pqi_request_irqs(ctrl_info);
8246 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8248 ctrl_info->controller_online = true;
8250 rc = pqi_process_config_table(ctrl_info);
8254 pqi_start_heartbeat_timer(ctrl_info);
8256 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8257 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8258 if (rc) { /* Supported features not returned correctly. */
8259 dev_err(&ctrl_info->pci_dev->dev,
8260 "error obtaining advanced RAID bypass configuration\n");
8263 ctrl_info->ciss_report_log_flags |=
8264 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8267 rc = pqi_enable_events(ctrl_info);
8269 dev_err(&ctrl_info->pci_dev->dev,
8270 "error enabling events\n");
8274 /* Register with the SCSI subsystem. */
8275 rc = pqi_register_scsi(ctrl_info);
8279 rc = pqi_get_ctrl_product_details(ctrl_info);
8281 dev_err(&ctrl_info->pci_dev->dev,
8282 "error obtaining product details\n");
8286 rc = pqi_get_ctrl_serial_number(ctrl_info);
8288 dev_err(&ctrl_info->pci_dev->dev,
8289 "error obtaining ctrl serial number\n");
8293 rc = pqi_set_diag_rescan(ctrl_info);
8295 dev_err(&ctrl_info->pci_dev->dev,
8296 "error enabling multi-lun rescan\n");
8300 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8302 dev_err(&ctrl_info->pci_dev->dev,
8303 "error updating host wellness\n");
8307 pqi_schedule_update_time_worker(ctrl_info);
8309 pqi_scan_scsi_devices(ctrl_info);
8314 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8317 struct pqi_admin_queues *admin_queues;
8318 struct pqi_event_queue *event_queue;
8320 admin_queues = &ctrl_info->admin_queues;
8321 admin_queues->iq_pi_copy = 0;
8322 admin_queues->oq_ci_copy = 0;
8323 writel(0, admin_queues->oq_pi);
8325 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8326 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8327 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8328 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8330 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8331 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8332 writel(0, ctrl_info->queue_groups[i].oq_pi);
8335 event_queue = &ctrl_info->event_queue;
8336 writel(0, event_queue->oq_pi);
8337 event_queue->oq_ci_copy = 0;
8340 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8344 rc = pqi_force_sis_mode(ctrl_info);
8349 * Wait until the controller is ready to start accepting SIS
8352 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8357 * Get the controller properties. This allows us to determine
8358 * whether or not it supports PQI mode.
8360 rc = sis_get_ctrl_properties(ctrl_info);
8362 dev_err(&ctrl_info->pci_dev->dev,
8363 "error obtaining controller properties\n");
8367 rc = sis_get_pqi_capabilities(ctrl_info);
8369 dev_err(&ctrl_info->pci_dev->dev,
8370 "error obtaining controller capabilities\n");
8375 * If the function we are about to call succeeds, the
8376 * controller will transition from legacy SIS mode
8379 rc = sis_init_base_struct_addr(ctrl_info);
8381 dev_err(&ctrl_info->pci_dev->dev,
8382 "error initializing PQI mode\n");
8386 /* Wait for the controller to complete the SIS -> PQI transition. */
8387 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8389 dev_err(&ctrl_info->pci_dev->dev,
8390 "transition to PQI mode failed\n");
8394 /* From here on, we are running in PQI mode. */
8395 ctrl_info->pqi_mode_enabled = true;
8396 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8398 pqi_reinit_queues(ctrl_info);
8400 rc = pqi_create_admin_queues(ctrl_info);
8402 dev_err(&ctrl_info->pci_dev->dev,
8403 "error creating admin queues\n");
8407 rc = pqi_create_queues(ctrl_info);
8411 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8413 ctrl_info->controller_online = true;
8414 pqi_ctrl_unblock_requests(ctrl_info);
8416 pqi_ctrl_reset_config(ctrl_info);
8418 rc = pqi_process_config_table(ctrl_info);
8422 pqi_start_heartbeat_timer(ctrl_info);
8424 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8425 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8427 dev_err(&ctrl_info->pci_dev->dev,
8428 "error obtaining advanced RAID bypass configuration\n");
8431 ctrl_info->ciss_report_log_flags |=
8432 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8435 rc = pqi_enable_events(ctrl_info);
8437 dev_err(&ctrl_info->pci_dev->dev,
8438 "error enabling events\n");
8442 rc = pqi_get_ctrl_product_details(ctrl_info);
8444 dev_err(&ctrl_info->pci_dev->dev,
8445 "error obtaining product details\n");
8449 rc = pqi_set_diag_rescan(ctrl_info);
8451 dev_err(&ctrl_info->pci_dev->dev,
8452 "error enabling multi-lun rescan\n");
8456 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8458 dev_err(&ctrl_info->pci_dev->dev,
8459 "error updating host wellness\n");
8463 if (pqi_ofa_in_progress(ctrl_info))
8464 pqi_ctrl_unblock_scan(ctrl_info);
8466 pqi_scan_scsi_devices(ctrl_info);
8471 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8475 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8476 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8478 return pcibios_err_to_errno(rc);
8481 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8486 rc = pci_enable_device(ctrl_info->pci_dev);
8488 dev_err(&ctrl_info->pci_dev->dev,
8489 "failed to enable PCI device\n");
8493 if (sizeof(dma_addr_t) > 4)
8494 mask = DMA_BIT_MASK(64);
8496 mask = DMA_BIT_MASK(32);
8498 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8500 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8501 goto disable_device;
8504 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8506 dev_err(&ctrl_info->pci_dev->dev,
8507 "failed to obtain PCI resources\n");
8508 goto disable_device;
8511 ctrl_info->iomem_base = ioremap(pci_resource_start(
8512 ctrl_info->pci_dev, 0),
8513 pci_resource_len(ctrl_info->pci_dev, 0));
8514 if (!ctrl_info->iomem_base) {
8515 dev_err(&ctrl_info->pci_dev->dev,
8516 "failed to map memory for controller registers\n");
8518 goto release_regions;
8521 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8523 /* Increase the PCIe completion timeout. */
8524 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8525 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8527 dev_err(&ctrl_info->pci_dev->dev,
8528 "failed to set PCIe completion timeout\n");
8529 goto release_regions;
8532 /* Enable bus mastering. */
8533 pci_set_master(ctrl_info->pci_dev);
8535 ctrl_info->registers = ctrl_info->iomem_base;
8536 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8538 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8543 pci_release_regions(ctrl_info->pci_dev);
8545 pci_disable_device(ctrl_info->pci_dev);
8550 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8552 iounmap(ctrl_info->iomem_base);
8553 pci_release_regions(ctrl_info->pci_dev);
8554 if (pci_is_enabled(ctrl_info->pci_dev))
8555 pci_disable_device(ctrl_info->pci_dev);
8556 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8559 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8561 struct pqi_ctrl_info *ctrl_info;
8563 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8564 GFP_KERNEL, numa_node);
8568 mutex_init(&ctrl_info->scan_mutex);
8569 mutex_init(&ctrl_info->lun_reset_mutex);
8570 mutex_init(&ctrl_info->ofa_mutex);
8572 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8573 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8575 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8576 atomic_set(&ctrl_info->num_interrupts, 0);
8578 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8579 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8581 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8582 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8584 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8585 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8587 sema_init(&ctrl_info->sync_request_sem,
8588 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8589 init_waitqueue_head(&ctrl_info->block_requests_wait);
8591 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8592 ctrl_info->irq_mode = IRQ_MODE_NONE;
8593 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8595 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8596 ctrl_info->max_transfer_encrypted_sas_sata =
8597 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8598 ctrl_info->max_transfer_encrypted_nvme =
8599 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8600 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8601 ctrl_info->max_write_raid_1_10_2drive = ~0;
8602 ctrl_info->max_write_raid_1_10_3drive = ~0;
8603 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
8608 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8613 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8615 pqi_free_irqs(ctrl_info);
8616 pqi_disable_msix_interrupts(ctrl_info);
8619 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8621 pqi_free_interrupts(ctrl_info);
8622 if (ctrl_info->queue_memory_base)
8623 dma_free_coherent(&ctrl_info->pci_dev->dev,
8624 ctrl_info->queue_memory_length,
8625 ctrl_info->queue_memory_base,
8626 ctrl_info->queue_memory_base_dma_handle);
8627 if (ctrl_info->admin_queue_memory_base)
8628 dma_free_coherent(&ctrl_info->pci_dev->dev,
8629 ctrl_info->admin_queue_memory_length,
8630 ctrl_info->admin_queue_memory_base,
8631 ctrl_info->admin_queue_memory_base_dma_handle);
8632 pqi_free_all_io_requests(ctrl_info);
8633 if (ctrl_info->error_buffer)
8634 dma_free_coherent(&ctrl_info->pci_dev->dev,
8635 ctrl_info->error_buffer_length,
8636 ctrl_info->error_buffer,
8637 ctrl_info->error_buffer_dma_handle);
8638 if (ctrl_info->iomem_base)
8639 pqi_cleanup_pci_init(ctrl_info);
8640 pqi_free_ctrl_info(ctrl_info);
8643 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8645 ctrl_info->controller_online = false;
8646 pqi_stop_heartbeat_timer(ctrl_info);
8647 pqi_ctrl_block_requests(ctrl_info);
8648 pqi_cancel_rescan_worker(ctrl_info);
8649 pqi_cancel_update_time_worker(ctrl_info);
8650 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
8651 pqi_fail_all_outstanding_requests(ctrl_info);
8652 ctrl_info->pqi_mode_enabled = false;
8654 pqi_unregister_scsi(ctrl_info);
8655 if (ctrl_info->pqi_mode_enabled)
8656 pqi_revert_to_sis_mode(ctrl_info);
8657 pqi_free_ctrl_resources(ctrl_info);
8660 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8662 pqi_ctrl_block_scan(ctrl_info);
8663 pqi_scsi_block_requests(ctrl_info);
8664 pqi_ctrl_block_device_reset(ctrl_info);
8665 pqi_ctrl_block_requests(ctrl_info);
8666 pqi_ctrl_wait_until_quiesced(ctrl_info);
8667 pqi_stop_heartbeat_timer(ctrl_info);
8670 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8672 pqi_start_heartbeat_timer(ctrl_info);
8673 pqi_ctrl_unblock_requests(ctrl_info);
8674 pqi_ctrl_unblock_device_reset(ctrl_info);
8675 pqi_scsi_unblock_requests(ctrl_info);
8676 pqi_ctrl_unblock_scan(ctrl_info);
8679 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
8684 struct pqi_ofa_memory *ofap;
8685 struct pqi_sg_descriptor *mem_descriptor;
8686 dma_addr_t dma_handle;
8688 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8690 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8691 if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
8694 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
8695 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8698 dev = &ctrl_info->pci_dev->dev;
8700 for (i = 0; i < sg_count; i++) {
8701 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
8702 dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8703 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
8704 goto out_free_chunks;
8705 mem_descriptor = &ofap->sg_descriptor[i];
8706 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8707 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8710 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8711 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
8712 put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
8718 mem_descriptor = &ofap->sg_descriptor[i];
8719 dma_free_coherent(dev, chunk_size,
8720 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8721 get_unaligned_le64(&mem_descriptor->address));
8723 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8729 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8735 if (ctrl_info->ofa_bytes_requested == 0)
8738 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8739 min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8740 min_chunk_size = PAGE_ALIGN(min_chunk_size);
8742 for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8743 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
8746 chunk_size = PAGE_ALIGN(chunk_size);
8752 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
8755 struct pqi_ofa_memory *ofap;
8757 dev = &ctrl_info->pci_dev->dev;
8759 ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8760 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8764 ctrl_info->pqi_ofa_mem_virt_addr = ofap;
8766 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
8768 "failed to allocate host buffer for Online Firmware Activation\n");
8769 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8770 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8774 put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8775 memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
8778 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8782 struct pqi_ofa_memory *ofap;
8783 struct pqi_sg_descriptor *mem_descriptor;
8784 unsigned int num_memory_descriptors;
8786 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8790 dev = &ctrl_info->pci_dev->dev;
8792 if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
8795 mem_descriptor = ofap->sg_descriptor;
8796 num_memory_descriptors =
8797 get_unaligned_le16(&ofap->num_memory_descriptors);
8799 for (i = 0; i < num_memory_descriptors; i++) {
8800 dma_free_coherent(dev,
8801 get_unaligned_le32(&mem_descriptor[i].length),
8802 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8803 get_unaligned_le64(&mem_descriptor[i].address));
8805 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8808 dma_free_coherent(dev, sizeof(*ofap), ofap,
8809 ctrl_info->pqi_ofa_mem_dma_handle);
8810 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8813 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8816 struct pqi_vendor_general_request request;
8817 struct pqi_ofa_memory *ofap;
8819 memset(&request, 0, sizeof(request));
8821 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8822 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8823 &request.header.iu_length);
8824 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8825 &request.function_code);
8827 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8830 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
8831 get_unaligned_le16(&ofap->num_memory_descriptors) *
8832 sizeof(struct pqi_sg_descriptor);
8834 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8835 &request.data.ofa_memory_allocation.buffer_address);
8836 put_unaligned_le32(buffer_length,
8837 &request.data.ofa_memory_allocation.buffer_length);
8840 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
8843 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8847 return pqi_ctrl_init_resume(ctrl_info);
8850 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8851 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8852 .status = SAM_STAT_CHECK_CONDITION,
8855 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
8858 struct pqi_io_request *io_request;
8859 struct scsi_cmnd *scmd;
8860 struct scsi_device *sdev;
8862 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8863 io_request = &ctrl_info->io_request_pool[i];
8864 if (atomic_read(&io_request->refcount) == 0)
8867 scmd = io_request->scmd;
8869 sdev = scmd->device;
8870 if (!sdev || !scsi_device_online(sdev)) {
8871 pqi_free_io_request(io_request);
8874 set_host_byte(scmd, DID_NO_CONNECT);
8877 io_request->status = -ENXIO;
8878 io_request->error_info =
8879 &pqi_ctrl_offline_raid_error_info;
8882 io_request->io_complete_callback(io_request,
8883 io_request->context);
8887 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
8889 pqi_perform_lockup_action();
8890 pqi_stop_heartbeat_timer(ctrl_info);
8891 pqi_free_interrupts(ctrl_info);
8892 pqi_cancel_rescan_worker(ctrl_info);
8893 pqi_cancel_update_time_worker(ctrl_info);
8894 pqi_ctrl_wait_until_quiesced(ctrl_info);
8895 pqi_fail_all_outstanding_requests(ctrl_info);
8896 pqi_ctrl_unblock_requests(ctrl_info);
8899 static void pqi_ctrl_offline_worker(struct work_struct *work)
8901 struct pqi_ctrl_info *ctrl_info;
8903 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8904 pqi_take_ctrl_offline_deferred(ctrl_info);
8907 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
8908 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
8910 if (!ctrl_info->controller_online)
8913 ctrl_info->controller_online = false;
8914 ctrl_info->pqi_mode_enabled = false;
8915 pqi_ctrl_block_requests(ctrl_info);
8916 if (!pqi_disable_ctrl_shutdown)
8917 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
8918 pci_disable_device(ctrl_info->pci_dev);
8919 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
8920 schedule_work(&ctrl_info->ctrl_offline_work);
8923 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
8924 const struct pci_device_id *id)
8926 char *ctrl_description;
8928 if (id->driver_data)
8929 ctrl_description = (char *)id->driver_data;
8931 ctrl_description = "Microchip Smart Family Controller";
8933 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
8936 static int pqi_pci_probe(struct pci_dev *pci_dev,
8937 const struct pci_device_id *id)
8941 struct pqi_ctrl_info *ctrl_info;
8943 pqi_print_ctrl_info(pci_dev, id);
8945 if (pqi_disable_device_id_wildcards &&
8946 id->subvendor == PCI_ANY_ID &&
8947 id->subdevice == PCI_ANY_ID) {
8948 dev_warn(&pci_dev->dev,
8949 "controller not probed because device ID wildcards are disabled\n");
8953 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
8954 dev_warn(&pci_dev->dev,
8955 "controller device ID matched using wildcards\n");
8957 node = dev_to_node(&pci_dev->dev);
8958 if (node == NUMA_NO_NODE) {
8959 node = cpu_to_node(0);
8960 if (node == NUMA_NO_NODE)
8962 set_dev_node(&pci_dev->dev, node);
8965 ctrl_info = pqi_alloc_ctrl_info(node);
8967 dev_err(&pci_dev->dev,
8968 "failed to allocate controller info block\n");
8971 ctrl_info->numa_node = node;
8973 ctrl_info->pci_dev = pci_dev;
8975 rc = pqi_pci_init(ctrl_info);
8979 rc = pqi_ctrl_init(ctrl_info);
8986 pqi_remove_ctrl(ctrl_info);
8991 static void pqi_pci_remove(struct pci_dev *pci_dev)
8993 struct pqi_ctrl_info *ctrl_info;
8997 ctrl_info = pci_get_drvdata(pci_dev);
9001 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
9002 if (vendor_id == 0xffff)
9003 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
9005 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
9007 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) {
9008 rc = pqi_flush_cache(ctrl_info, RESTART);
9010 dev_err(&pci_dev->dev,
9011 "unable to flush controller cache during remove\n");
9014 pqi_remove_ctrl(ctrl_info);
9017 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
9020 struct pqi_io_request *io_request;
9021 struct scsi_cmnd *scmd;
9023 for (i = 0; i < ctrl_info->max_io_slots; i++) {
9024 io_request = &ctrl_info->io_request_pool[i];
9025 if (atomic_read(&io_request->refcount) == 0)
9027 scmd = io_request->scmd;
9028 WARN_ON(scmd != NULL); /* IO command from SML */
9029 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
9033 static void pqi_shutdown(struct pci_dev *pci_dev)
9036 struct pqi_ctrl_info *ctrl_info;
9037 enum bmic_flush_cache_shutdown_event shutdown_event;
9039 ctrl_info = pci_get_drvdata(pci_dev);
9041 dev_err(&pci_dev->dev,
9042 "cache could not be flushed\n");
9046 pqi_wait_until_ofa_finished(ctrl_info);
9048 pqi_scsi_block_requests(ctrl_info);
9049 pqi_ctrl_block_device_reset(ctrl_info);
9050 pqi_ctrl_block_requests(ctrl_info);
9051 pqi_ctrl_wait_until_quiesced(ctrl_info);
9053 if (system_state == SYSTEM_RESTART)
9054 shutdown_event = RESTART;
9056 shutdown_event = SHUTDOWN;
9059 * Write all data in the controller's battery-backed cache to
9062 rc = pqi_flush_cache(ctrl_info, shutdown_event);
9064 dev_err(&pci_dev->dev,
9065 "unable to flush controller cache\n");
9067 pqi_crash_if_pending_command(ctrl_info);
9068 pqi_reset(ctrl_info);
9071 static void pqi_process_lockup_action_param(void)
9075 if (!pqi_lockup_action_param)
9078 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
9079 if (strcmp(pqi_lockup_action_param,
9080 pqi_lockup_actions[i].name) == 0) {
9081 pqi_lockup_action = pqi_lockup_actions[i].action;
9086 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
9087 DRIVER_NAME_SHORT, pqi_lockup_action_param);
9090 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30
9091 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60)
9093 static void pqi_process_ctrl_ready_timeout_param(void)
9095 if (pqi_ctrl_ready_timeout_secs == 0)
9098 if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
9099 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
9100 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
9101 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
9102 } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
9103 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
9104 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
9105 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
9108 sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
9111 static void pqi_process_module_params(void)
9113 pqi_process_lockup_action_param();
9114 pqi_process_ctrl_ready_timeout_param();
9117 #if defined(CONFIG_PM)
9119 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
9121 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
9127 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
9129 struct pci_dev *pci_dev;
9130 struct pqi_ctrl_info *ctrl_info;
9132 pci_dev = to_pci_dev(dev);
9133 ctrl_info = pci_get_drvdata(pci_dev);
9135 pqi_wait_until_ofa_finished(ctrl_info);
9137 pqi_ctrl_block_scan(ctrl_info);
9138 pqi_scsi_block_requests(ctrl_info);
9139 pqi_ctrl_block_device_reset(ctrl_info);
9140 pqi_ctrl_block_requests(ctrl_info);
9141 pqi_ctrl_wait_until_quiesced(ctrl_info);
9144 enum bmic_flush_cache_shutdown_event shutdown_event;
9146 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9147 pqi_flush_cache(ctrl_info, shutdown_event);
9150 pqi_stop_heartbeat_timer(ctrl_info);
9151 pqi_crash_if_pending_command(ctrl_info);
9152 pqi_free_irqs(ctrl_info);
9154 ctrl_info->controller_online = false;
9155 ctrl_info->pqi_mode_enabled = false;
9160 static __maybe_unused int pqi_suspend(struct device *dev)
9162 return pqi_suspend_or_freeze(dev, true);
9165 static int pqi_resume_or_restore(struct device *dev)
9168 struct pci_dev *pci_dev;
9169 struct pqi_ctrl_info *ctrl_info;
9171 pci_dev = to_pci_dev(dev);
9172 ctrl_info = pci_get_drvdata(pci_dev);
9174 rc = pqi_request_irqs(ctrl_info);
9178 pqi_ctrl_unblock_device_reset(ctrl_info);
9179 pqi_ctrl_unblock_requests(ctrl_info);
9180 pqi_scsi_unblock_requests(ctrl_info);
9181 pqi_ctrl_unblock_scan(ctrl_info);
9183 ssleep(PQI_POST_RESET_DELAY_SECS);
9185 return pqi_ctrl_init_resume(ctrl_info);
9188 static int pqi_freeze(struct device *dev)
9190 return pqi_suspend_or_freeze(dev, false);
9193 static int pqi_thaw(struct device *dev)
9196 struct pci_dev *pci_dev;
9197 struct pqi_ctrl_info *ctrl_info;
9199 pci_dev = to_pci_dev(dev);
9200 ctrl_info = pci_get_drvdata(pci_dev);
9202 rc = pqi_request_irqs(ctrl_info);
9206 ctrl_info->controller_online = true;
9207 ctrl_info->pqi_mode_enabled = true;
9209 pqi_ctrl_unblock_device_reset(ctrl_info);
9210 pqi_ctrl_unblock_requests(ctrl_info);
9211 pqi_scsi_unblock_requests(ctrl_info);
9212 pqi_ctrl_unblock_scan(ctrl_info);
9217 static int pqi_poweroff(struct device *dev)
9219 struct pci_dev *pci_dev;
9220 struct pqi_ctrl_info *ctrl_info;
9221 enum bmic_flush_cache_shutdown_event shutdown_event;
9223 pci_dev = to_pci_dev(dev);
9224 ctrl_info = pci_get_drvdata(pci_dev);
9226 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9227 pqi_flush_cache(ctrl_info, shutdown_event);
9232 static const struct dev_pm_ops pqi_pm_ops = {
9233 .suspend = pqi_suspend,
9234 .resume = pqi_resume_or_restore,
9235 .freeze = pqi_freeze,
9237 .poweroff = pqi_poweroff,
9238 .restore = pqi_resume_or_restore,
9241 #endif /* CONFIG_PM */
9243 /* Define the PCI IDs for the controllers that we support. */
9244 static const struct pci_device_id pqi_pci_id_table[] = {
9246 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9250 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9254 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9258 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9262 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9266 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9270 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9274 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9278 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9282 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9286 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9290 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9294 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9298 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9302 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9306 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9310 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9314 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9318 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9322 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9326 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9330 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9334 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9338 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9342 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9346 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9350 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9354 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9358 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9362 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9366 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9370 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9374 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9378 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9382 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9386 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9390 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9394 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9398 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9402 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9406 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9410 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9414 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9418 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9422 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9426 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9430 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9434 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9438 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9442 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9443 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9446 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9447 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9450 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9451 PCI_VENDOR_ID_ADAPTEC2, 0x0659)
9454 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9455 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9458 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9459 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9462 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9463 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9466 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9467 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9470 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9471 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9474 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9475 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9478 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9479 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9482 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9483 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9486 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9487 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9490 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9491 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9494 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9495 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9498 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9499 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9502 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9503 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9506 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9507 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9510 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9511 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9514 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9515 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9518 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9519 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9522 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9523 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9526 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9527 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9530 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9531 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9534 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9535 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9538 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9539 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9542 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9543 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9546 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9547 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9550 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9551 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9554 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9555 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9558 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9559 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9562 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9563 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9566 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9567 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9570 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9571 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9574 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9575 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9578 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9579 PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9582 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9583 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9586 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9587 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9590 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9591 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9594 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9595 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9598 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9599 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9602 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9603 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9606 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9607 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9610 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9611 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9614 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9615 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9618 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9619 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9622 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9623 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9626 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9627 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9630 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9631 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9634 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9635 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9638 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9639 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9642 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9643 PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9646 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9647 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9650 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9651 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9654 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9655 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9658 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9659 PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9662 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9663 PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9666 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9667 PCI_VENDOR_ID_ADAPTEC2, 0x1475)
9670 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9671 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9674 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9675 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9678 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9679 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9682 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9683 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9686 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9687 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9690 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9691 PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9694 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9695 PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
9698 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9699 PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
9702 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9703 PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
9706 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9707 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9710 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9711 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9714 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9715 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9718 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9719 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9722 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9723 PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
9726 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9727 PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
9730 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9731 PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
9734 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9735 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9738 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9739 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9742 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9743 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9746 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9747 PCI_VENDOR_ID_ADVANTECH, 0x8312)
9750 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9751 PCI_VENDOR_ID_DELL, 0x1fe0)
9754 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9755 PCI_VENDOR_ID_HP, 0x0600)
9758 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9759 PCI_VENDOR_ID_HP, 0x0601)
9762 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9763 PCI_VENDOR_ID_HP, 0x0602)
9766 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9767 PCI_VENDOR_ID_HP, 0x0603)
9770 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9771 PCI_VENDOR_ID_HP, 0x0609)
9774 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9775 PCI_VENDOR_ID_HP, 0x0650)
9778 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9779 PCI_VENDOR_ID_HP, 0x0651)
9782 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9783 PCI_VENDOR_ID_HP, 0x0652)
9786 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9787 PCI_VENDOR_ID_HP, 0x0653)
9790 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9791 PCI_VENDOR_ID_HP, 0x0654)
9794 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9795 PCI_VENDOR_ID_HP, 0x0655)
9798 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9799 PCI_VENDOR_ID_HP, 0x0700)
9802 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9803 PCI_VENDOR_ID_HP, 0x0701)
9806 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9807 PCI_VENDOR_ID_HP, 0x1001)
9810 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9811 PCI_VENDOR_ID_HP, 0x1002)
9814 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9815 PCI_VENDOR_ID_HP, 0x1100)
9818 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9819 PCI_VENDOR_ID_HP, 0x1101)
9822 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9826 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9830 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9834 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9838 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9842 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9846 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9850 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9854 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9858 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9862 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9866 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9870 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9871 PCI_VENDOR_ID_GIGABYTE, 0x1000)
9874 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9878 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9882 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9886 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9890 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9894 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9898 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9902 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9906 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9910 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9914 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9918 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9922 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9926 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9930 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9934 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9938 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9942 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9946 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9950 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9954 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9958 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9962 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9963 PCI_VENDOR_ID_LENOVO, 0x0220)
9966 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9967 PCI_VENDOR_ID_LENOVO, 0x0221)
9970 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9971 PCI_VENDOR_ID_LENOVO, 0x0520)
9974 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9975 PCI_VENDOR_ID_LENOVO, 0x0522)
9978 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9979 PCI_VENDOR_ID_LENOVO, 0x0620)
9982 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9983 PCI_VENDOR_ID_LENOVO, 0x0621)
9986 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9987 PCI_VENDOR_ID_LENOVO, 0x0622)
9990 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9991 PCI_VENDOR_ID_LENOVO, 0x0623)
9994 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9998 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10002 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10006 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10010 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10014 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10018 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10022 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10026 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10030 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10034 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10038 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10042 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10046 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10050 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10054 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10055 PCI_ANY_ID, PCI_ANY_ID)
10060 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
10062 static struct pci_driver pqi_pci_driver = {
10063 .name = DRIVER_NAME_SHORT,
10064 .id_table = pqi_pci_id_table,
10065 .probe = pqi_pci_probe,
10066 .remove = pqi_pci_remove,
10067 .shutdown = pqi_shutdown,
10068 #if defined(CONFIG_PM)
10075 static int __init pqi_init(void)
10079 pr_info(DRIVER_NAME "\n");
10080 pqi_verify_structures();
10081 sis_verify_structures();
10083 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
10084 if (!pqi_sas_transport_template)
10087 pqi_process_module_params();
10089 rc = pci_register_driver(&pqi_pci_driver);
10091 sas_release_transport(pqi_sas_transport_template);
10096 static void __exit pqi_cleanup(void)
10098 pci_unregister_driver(&pqi_pci_driver);
10099 sas_release_transport(pqi_sas_transport_template);
10102 module_init(pqi_init);
10103 module_exit(pqi_cleanup);
10105 static void pqi_verify_structures(void)
10107 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10108 sis_host_to_ctrl_doorbell) != 0x20);
10109 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10110 sis_interrupt_mask) != 0x34);
10111 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10112 sis_ctrl_to_host_doorbell) != 0x9c);
10113 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10114 sis_ctrl_to_host_doorbell_clear) != 0xa0);
10115 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10116 sis_driver_scratch) != 0xb0);
10117 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10118 sis_product_identifier) != 0xb4);
10119 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10120 sis_firmware_status) != 0xbc);
10121 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10122 sis_ctrl_shutdown_reason_code) != 0xcc);
10123 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10124 sis_mailbox) != 0x1000);
10125 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10126 pqi_registers) != 0x4000);
10128 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10130 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10131 iu_length) != 0x2);
10132 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10133 response_queue_id) != 0x4);
10134 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10135 driver_flags) != 0x6);
10136 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
10138 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10140 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10141 service_response) != 0x1);
10142 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10143 data_present) != 0x2);
10144 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10146 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10147 residual_count) != 0x4);
10148 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10149 data_length) != 0x8);
10150 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10151 reserved1) != 0xa);
10152 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10154 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
10156 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10157 data_in_result) != 0x0);
10158 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10159 data_out_result) != 0x1);
10160 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10162 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10164 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10165 status_qualifier) != 0x6);
10166 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10167 sense_data_length) != 0x8);
10168 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10169 response_data_length) != 0xa);
10170 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10171 data_in_transferred) != 0xc);
10172 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10173 data_out_transferred) != 0x10);
10174 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10176 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
10178 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10179 signature) != 0x0);
10180 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10181 function_and_status_code) != 0x8);
10182 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10183 max_admin_iq_elements) != 0x10);
10184 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10185 max_admin_oq_elements) != 0x11);
10186 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10187 admin_iq_element_length) != 0x12);
10188 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10189 admin_oq_element_length) != 0x13);
10190 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10191 max_reset_timeout) != 0x14);
10192 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10193 legacy_intx_status) != 0x18);
10194 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10195 legacy_intx_mask_set) != 0x1c);
10196 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10197 legacy_intx_mask_clear) != 0x20);
10198 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10199 device_status) != 0x40);
10200 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10201 admin_iq_pi_offset) != 0x48);
10202 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10203 admin_oq_ci_offset) != 0x50);
10204 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10205 admin_iq_element_array_addr) != 0x58);
10206 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10207 admin_oq_element_array_addr) != 0x60);
10208 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10209 admin_iq_ci_addr) != 0x68);
10210 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10211 admin_oq_pi_addr) != 0x70);
10212 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10213 admin_iq_num_elements) != 0x78);
10214 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10215 admin_oq_num_elements) != 0x79);
10216 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10217 admin_queue_int_msg_num) != 0x7a);
10218 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10219 device_error) != 0x80);
10220 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10221 error_details) != 0x88);
10222 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10223 device_reset) != 0x90);
10224 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10225 power_action) != 0x94);
10226 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10228 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10229 header.iu_type) != 0);
10230 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10231 header.iu_length) != 2);
10232 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10233 header.driver_flags) != 6);
10234 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10236 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10237 function_code) != 10);
10238 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10239 data.report_device_capability.buffer_length) != 44);
10240 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10241 data.report_device_capability.sg_descriptor) != 48);
10242 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10243 data.create_operational_iq.queue_id) != 12);
10244 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10245 data.create_operational_iq.element_array_addr) != 16);
10246 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10247 data.create_operational_iq.ci_addr) != 24);
10248 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10249 data.create_operational_iq.num_elements) != 32);
10250 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10251 data.create_operational_iq.element_length) != 34);
10252 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10253 data.create_operational_iq.queue_protocol) != 36);
10254 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10255 data.create_operational_oq.queue_id) != 12);
10256 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10257 data.create_operational_oq.element_array_addr) != 16);
10258 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10259 data.create_operational_oq.pi_addr) != 24);
10260 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10261 data.create_operational_oq.num_elements) != 32);
10262 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10263 data.create_operational_oq.element_length) != 34);
10264 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10265 data.create_operational_oq.queue_protocol) != 36);
10266 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10267 data.create_operational_oq.int_msg_num) != 40);
10268 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10269 data.create_operational_oq.coalescing_count) != 42);
10270 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10271 data.create_operational_oq.min_coalescing_time) != 44);
10272 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10273 data.create_operational_oq.max_coalescing_time) != 48);
10274 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10275 data.delete_operational_queue.queue_id) != 12);
10276 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
10277 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10278 data.create_operational_iq) != 64 - 11);
10279 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10280 data.create_operational_oq) != 64 - 11);
10281 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10282 data.delete_operational_queue) != 64 - 11);
10284 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10285 header.iu_type) != 0);
10286 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10287 header.iu_length) != 2);
10288 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10289 header.driver_flags) != 6);
10290 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10292 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10293 function_code) != 10);
10294 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10296 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10297 data.create_operational_iq.status_descriptor) != 12);
10298 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10299 data.create_operational_iq.iq_pi_offset) != 16);
10300 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10301 data.create_operational_oq.status_descriptor) != 12);
10302 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10303 data.create_operational_oq.oq_ci_offset) != 16);
10304 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10306 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10307 header.iu_type) != 0);
10308 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10309 header.iu_length) != 2);
10310 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10311 header.response_queue_id) != 4);
10312 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10313 header.driver_flags) != 6);
10314 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10316 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10318 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10319 buffer_length) != 12);
10320 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10321 lun_number) != 16);
10322 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10323 protocol_specific) != 24);
10324 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10325 error_index) != 27);
10326 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10328 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10330 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10331 sg_descriptors) != 64);
10332 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10333 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10335 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10336 header.iu_type) != 0);
10337 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10338 header.iu_length) != 2);
10339 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10340 header.response_queue_id) != 4);
10341 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10342 header.driver_flags) != 6);
10343 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10345 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10347 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10348 buffer_length) != 16);
10349 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10350 data_encryption_key_index) != 22);
10351 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10352 encrypt_tweak_lower) != 24);
10353 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10354 encrypt_tweak_upper) != 28);
10355 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10357 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10358 error_index) != 48);
10359 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10360 num_sg_descriptors) != 50);
10361 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10362 cdb_length) != 51);
10363 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10364 lun_number) != 52);
10365 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10366 sg_descriptors) != 64);
10367 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10368 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10370 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10371 header.iu_type) != 0);
10372 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10373 header.iu_length) != 2);
10374 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10376 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10377 error_index) != 10);
10379 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10380 header.iu_type) != 0);
10381 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10382 header.iu_length) != 2);
10383 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10384 header.response_queue_id) != 4);
10385 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10387 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10388 data.report_event_configuration.buffer_length) != 12);
10389 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10390 data.report_event_configuration.sg_descriptors) != 16);
10391 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10392 data.set_event_configuration.global_event_oq_id) != 10);
10393 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10394 data.set_event_configuration.buffer_length) != 12);
10395 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10396 data.set_event_configuration.sg_descriptors) != 16);
10398 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10399 max_inbound_iu_length) != 6);
10400 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10401 max_outbound_iu_length) != 14);
10402 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10404 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10405 data_length) != 0);
10406 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10407 iq_arbitration_priority_support_bitmask) != 8);
10408 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10409 maximum_aw_a) != 9);
10410 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10411 maximum_aw_b) != 10);
10412 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10413 maximum_aw_c) != 11);
10414 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10415 max_inbound_queues) != 16);
10416 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10417 max_elements_per_iq) != 18);
10418 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10419 max_iq_element_length) != 24);
10420 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10421 min_iq_element_length) != 26);
10422 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10423 max_outbound_queues) != 30);
10424 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10425 max_elements_per_oq) != 32);
10426 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10427 intr_coalescing_time_granularity) != 34);
10428 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10429 max_oq_element_length) != 36);
10430 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10431 min_oq_element_length) != 38);
10432 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10433 iu_layer_descriptors) != 64);
10434 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10436 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10438 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10440 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10442 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10443 num_event_descriptors) != 2);
10444 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10445 descriptors) != 4);
10447 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10448 ARRAY_SIZE(pqi_supported_event_types));
10450 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10451 header.iu_type) != 0);
10452 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10453 header.iu_length) != 2);
10454 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10456 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10458 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10459 additional_event_id) != 12);
10460 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10462 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10464 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10465 header.iu_type) != 0);
10466 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10467 header.iu_length) != 2);
10468 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10470 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10472 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10473 additional_event_id) != 12);
10474 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10476 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10477 header.iu_type) != 0);
10478 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10479 header.iu_length) != 2);
10480 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10482 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10484 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10486 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10487 lun_number) != 16);
10488 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10489 protocol_specific) != 24);
10490 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10491 outbound_queue_id_to_manage) != 26);
10492 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10493 request_id_to_manage) != 28);
10494 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10495 task_management_function) != 30);
10496 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10498 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10499 header.iu_type) != 0);
10500 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10501 header.iu_length) != 2);
10502 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10504 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10506 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10507 additional_response_info) != 12);
10508 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10509 response_code) != 15);
10510 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10512 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10513 configured_logical_drive_count) != 0);
10514 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10515 configuration_signature) != 1);
10516 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10517 firmware_version_short) != 5);
10518 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10519 extended_logical_unit_count) != 154);
10520 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10521 firmware_build_number) != 190);
10522 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10523 vendor_id) != 200);
10524 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10525 product_id) != 208);
10526 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10527 extra_controller_flags) != 286);
10528 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10529 controller_mode) != 292);
10530 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10531 spare_part_number) != 293);
10532 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10533 firmware_version_long) != 325);
10535 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10536 phys_bay_in_box) != 115);
10537 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10538 device_type) != 120);
10539 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10540 redundant_path_present_map) != 1736);
10541 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10542 active_path_number) != 1738);
10543 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10544 alternate_paths_phys_connector) != 1739);
10545 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10546 alternate_paths_phys_box_on_port) != 1755);
10547 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10548 current_queue_depth_limit) != 1796);
10549 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
10551 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
10552 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10554 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10555 subpage_code) != 1);
10556 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10557 buffer_length) != 2);
10559 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
10560 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10562 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10563 subpage_code) != 1);
10564 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10565 page_length) != 2);
10567 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
10569 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10571 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10572 firmware_read_support) != 4);
10573 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10574 driver_read_support) != 5);
10575 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10576 firmware_write_support) != 6);
10577 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10578 driver_write_support) != 7);
10579 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10580 max_transfer_encrypted_sas_sata) != 8);
10581 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10582 max_transfer_encrypted_nvme) != 10);
10583 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10584 max_write_raid_5_6) != 12);
10585 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10586 max_write_raid_1_10_2drive) != 14);
10587 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10588 max_write_raid_1_10_3drive) != 16);
10590 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
10591 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10592 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10593 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10594 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10595 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10596 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10597 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10598 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10599 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10600 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10601 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10603 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
10604 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10605 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);