1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microsemi PQI-based storage controllers
4 * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "1.2.16-012"
37 #define DRIVER_MAJOR 1
38 #define DRIVER_MINOR 2
39 #define DRIVER_RELEASE 16
40 #define DRIVER_REVISION 12
42 #define DRIVER_NAME "Microsemi PQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 MODULE_AUTHOR("Microsemi");
49 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
51 MODULE_VERSION(DRIVER_VERSION);
52 MODULE_LICENSE("GPL");
54 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
55 static void pqi_ctrl_offline_worker(struct work_struct *work);
56 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
57 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
58 static void pqi_scan_start(struct Scsi_Host *shost);
59 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
60 struct pqi_queue_group *queue_group, enum pqi_io_path path,
61 struct pqi_io_request *io_request);
62 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
63 struct pqi_iu_header *request, unsigned int flags,
64 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
65 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
66 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
67 unsigned int cdb_length, struct pqi_queue_group *queue_group,
68 struct pqi_encryption_info *encryption_info, bool raid_bypass);
69 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
70 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
71 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
72 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
74 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
75 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
76 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
77 struct pqi_scsi_dev *device, unsigned long timeout_secs);
79 /* for flags argument to pqi_submit_raid_request_synchronous() */
80 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
82 static struct scsi_transport_template *pqi_sas_transport_template;
84 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
86 enum pqi_lockup_action {
92 static enum pqi_lockup_action pqi_lockup_action = NONE;
95 enum pqi_lockup_action action;
97 } pqi_lockup_actions[] = {
112 static unsigned int pqi_supported_event_types[] = {
113 PQI_EVENT_TYPE_HOTPLUG,
114 PQI_EVENT_TYPE_HARDWARE,
115 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
116 PQI_EVENT_TYPE_LOGICAL_DEVICE,
118 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
119 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
122 static int pqi_disable_device_id_wildcards;
123 module_param_named(disable_device_id_wildcards,
124 pqi_disable_device_id_wildcards, int, 0644);
125 MODULE_PARM_DESC(disable_device_id_wildcards,
126 "Disable device ID wildcards.");
128 static int pqi_disable_heartbeat;
129 module_param_named(disable_heartbeat,
130 pqi_disable_heartbeat, int, 0644);
131 MODULE_PARM_DESC(disable_heartbeat,
132 "Disable heartbeat.");
134 static int pqi_disable_ctrl_shutdown;
135 module_param_named(disable_ctrl_shutdown,
136 pqi_disable_ctrl_shutdown, int, 0644);
137 MODULE_PARM_DESC(disable_ctrl_shutdown,
138 "Disable controller shutdown when controller locked up.");
140 static char *pqi_lockup_action_param;
141 module_param_named(lockup_action,
142 pqi_lockup_action_param, charp, 0644);
143 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
144 "\t\tSupported: none, reboot, panic\n"
145 "\t\tDefault: none");
147 static int pqi_expose_ld_first;
148 module_param_named(expose_ld_first,
149 pqi_expose_ld_first, int, 0644);
150 MODULE_PARM_DESC(expose_ld_first,
151 "Expose logical drives before physical drives.");
153 static int pqi_hide_vsep;
154 module_param_named(hide_vsep,
155 pqi_hide_vsep, int, 0644);
156 MODULE_PARM_DESC(hide_vsep,
157 "Hide the virtual SEP for direct attached drives.");
159 static char *raid_levels[] = {
169 static char *pqi_raid_level_to_string(u8 raid_level)
171 if (raid_level < ARRAY_SIZE(raid_levels))
172 return raid_levels[raid_level];
174 return "RAID UNKNOWN";
179 #define SA_RAID_1 2 /* also used for RAID 10 */
180 #define SA_RAID_5 3 /* also used for RAID 50 */
182 #define SA_RAID_6 5 /* also used for RAID 60 */
183 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
184 #define SA_RAID_MAX SA_RAID_ADM
185 #define SA_RAID_UNKNOWN 0xff
187 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
189 pqi_prep_for_scsi_done(scmd);
190 scmd->scsi_done(scmd);
193 static inline void pqi_disable_write_same(struct scsi_device *sdev)
195 sdev->no_write_same = 1;
198 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
200 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
203 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
205 return !device->is_physical_device;
208 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
210 return scsi3addr[2] != 0;
213 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
215 return !ctrl_info->controller_online;
218 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
220 if (ctrl_info->controller_online)
221 if (!sis_is_firmware_running(ctrl_info))
222 pqi_take_ctrl_offline(ctrl_info);
225 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
227 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
230 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
231 struct pqi_ctrl_info *ctrl_info)
233 return sis_read_driver_scratch(ctrl_info);
236 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
237 enum pqi_ctrl_mode mode)
239 sis_write_driver_scratch(ctrl_info, mode);
242 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
244 ctrl_info->block_device_reset = true;
247 static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
249 return ctrl_info->block_device_reset;
252 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
254 return ctrl_info->block_requests;
257 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
259 ctrl_info->block_requests = true;
260 scsi_block_requests(ctrl_info->scsi_host);
263 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
265 ctrl_info->block_requests = false;
266 wake_up_all(&ctrl_info->block_requests_wait);
267 pqi_retry_raid_bypass_requests(ctrl_info);
268 scsi_unblock_requests(ctrl_info->scsi_host);
271 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
272 unsigned long timeout_msecs)
274 unsigned long remaining_msecs;
276 if (!pqi_ctrl_blocked(ctrl_info))
277 return timeout_msecs;
279 atomic_inc(&ctrl_info->num_blocked_threads);
281 if (timeout_msecs == NO_TIMEOUT) {
282 wait_event(ctrl_info->block_requests_wait,
283 !pqi_ctrl_blocked(ctrl_info));
284 remaining_msecs = timeout_msecs;
286 unsigned long remaining_jiffies;
289 wait_event_timeout(ctrl_info->block_requests_wait,
290 !pqi_ctrl_blocked(ctrl_info),
291 msecs_to_jiffies(timeout_msecs));
292 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
295 atomic_dec(&ctrl_info->num_blocked_threads);
297 return remaining_msecs;
300 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
302 while (atomic_read(&ctrl_info->num_busy_threads) >
303 atomic_read(&ctrl_info->num_blocked_threads))
304 usleep_range(1000, 2000);
307 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
309 return device->device_offline;
312 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
314 device->in_reset = true;
317 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
319 device->in_reset = false;
322 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
324 return device->in_reset;
327 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
329 ctrl_info->in_ofa = true;
332 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
334 ctrl_info->in_ofa = false;
337 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
339 return ctrl_info->in_ofa;
342 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
344 device->in_remove = true;
347 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
349 return device->in_remove;
352 static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
354 ctrl_info->in_shutdown = true;
357 static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
359 return ctrl_info->in_shutdown;
362 static inline void pqi_schedule_rescan_worker_with_delay(
363 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
365 if (pqi_ctrl_offline(ctrl_info))
367 if (pqi_ctrl_in_ofa(ctrl_info))
370 schedule_delayed_work(&ctrl_info->rescan_work, delay);
373 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
375 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
378 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
380 static inline void pqi_schedule_rescan_worker_delayed(
381 struct pqi_ctrl_info *ctrl_info)
383 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
386 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
388 cancel_delayed_work_sync(&ctrl_info->rescan_work);
391 static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
393 cancel_work_sync(&ctrl_info->event_work);
396 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
398 if (!ctrl_info->heartbeat_counter)
401 return readl(ctrl_info->heartbeat_counter);
404 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
406 if (!ctrl_info->soft_reset_status)
409 return readb(ctrl_info->soft_reset_status);
412 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
417 if (!ctrl_info->soft_reset_status)
420 status = pqi_read_soft_reset_status(ctrl_info);
422 writeb(status, ctrl_info->soft_reset_status);
425 static int pqi_map_single(struct pci_dev *pci_dev,
426 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
427 size_t buffer_length, enum dma_data_direction data_direction)
429 dma_addr_t bus_address;
431 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
434 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
436 if (dma_mapping_error(&pci_dev->dev, bus_address))
439 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
440 put_unaligned_le32(buffer_length, &sg_descriptor->length);
441 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
446 static void pqi_pci_unmap(struct pci_dev *pci_dev,
447 struct pqi_sg_descriptor *descriptors, int num_descriptors,
448 enum dma_data_direction data_direction)
452 if (data_direction == DMA_NONE)
455 for (i = 0; i < num_descriptors; i++)
456 dma_unmap_single(&pci_dev->dev,
457 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
458 get_unaligned_le32(&descriptors[i].length),
462 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
463 struct pqi_raid_path_request *request, u8 cmd,
464 u8 *scsi3addr, void *buffer, size_t buffer_length,
465 u16 vpd_page, enum dma_data_direction *dir)
468 size_t cdb_length = buffer_length;
470 memset(request, 0, sizeof(*request));
472 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
473 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
474 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
475 &request->header.iu_length);
476 put_unaligned_le32(buffer_length, &request->buffer_length);
477 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
478 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
479 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
485 request->data_direction = SOP_READ_FLAG;
487 if (vpd_page & VPD_PAGE) {
489 cdb[2] = (u8)vpd_page;
491 cdb[4] = (u8)cdb_length;
493 case CISS_REPORT_LOG:
494 case CISS_REPORT_PHYS:
495 request->data_direction = SOP_READ_FLAG;
497 if (cmd == CISS_REPORT_PHYS)
498 cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
500 cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
501 put_unaligned_be32(cdb_length, &cdb[6]);
503 case CISS_GET_RAID_MAP:
504 request->data_direction = SOP_READ_FLAG;
506 cdb[1] = CISS_GET_RAID_MAP;
507 put_unaligned_be32(cdb_length, &cdb[6]);
510 request->data_direction = SOP_WRITE_FLAG;
512 cdb[6] = BMIC_FLUSH_CACHE;
513 put_unaligned_be16(cdb_length, &cdb[7]);
515 case BMIC_SENSE_DIAG_OPTIONS:
518 case BMIC_IDENTIFY_CONTROLLER:
519 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
520 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
521 request->data_direction = SOP_READ_FLAG;
524 put_unaligned_be16(cdb_length, &cdb[7]);
526 case BMIC_SET_DIAG_OPTIONS:
529 case BMIC_WRITE_HOST_WELLNESS:
530 request->data_direction = SOP_WRITE_FLAG;
533 put_unaligned_be16(cdb_length, &cdb[7]);
535 case BMIC_CSMI_PASSTHRU:
536 request->data_direction = SOP_BIDIRECTIONAL;
538 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
540 put_unaligned_be16(cdb_length, &cdb[7]);
543 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
547 switch (request->data_direction) {
549 *dir = DMA_FROM_DEVICE;
552 *dir = DMA_TO_DEVICE;
554 case SOP_NO_DIRECTION_FLAG:
558 *dir = DMA_BIDIRECTIONAL;
562 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
563 buffer, buffer_length, *dir);
566 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
568 io_request->scmd = NULL;
569 io_request->status = 0;
570 io_request->error_info = NULL;
571 io_request->raid_bypass = false;
574 static struct pqi_io_request *pqi_alloc_io_request(
575 struct pqi_ctrl_info *ctrl_info)
577 struct pqi_io_request *io_request;
578 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
581 io_request = &ctrl_info->io_request_pool[i];
582 if (atomic_inc_return(&io_request->refcount) == 1)
584 atomic_dec(&io_request->refcount);
585 i = (i + 1) % ctrl_info->max_io_slots;
589 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
591 pqi_reinit_io_request(io_request);
596 static void pqi_free_io_request(struct pqi_io_request *io_request)
598 atomic_dec(&io_request->refcount);
601 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
602 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
603 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
606 struct pqi_raid_path_request request;
607 enum dma_data_direction dir;
609 rc = pqi_build_raid_path_request(ctrl_info, &request,
610 cmd, scsi3addr, buffer,
611 buffer_length, vpd_page, &dir);
615 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
616 error_info, timeout_msecs);
618 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
623 /* helper functions for pqi_send_scsi_raid_request */
625 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
626 u8 cmd, void *buffer, size_t buffer_length)
628 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
629 buffer, buffer_length, 0, NULL, NO_TIMEOUT);
632 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
633 u8 cmd, void *buffer, size_t buffer_length,
634 struct pqi_raid_error_info *error_info)
636 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
637 buffer, buffer_length, 0, error_info, NO_TIMEOUT);
640 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
641 struct bmic_identify_controller *buffer)
643 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
644 buffer, sizeof(*buffer));
647 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
648 struct bmic_sense_subsystem_info *sense_info)
650 return pqi_send_ctrl_raid_request(ctrl_info,
651 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
652 sizeof(*sense_info));
655 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
656 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
658 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
659 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
662 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
663 struct pqi_scsi_dev *device,
664 struct bmic_identify_physical_device *buffer, size_t buffer_length)
667 enum dma_data_direction dir;
668 u16 bmic_device_index;
669 struct pqi_raid_path_request request;
671 rc = pqi_build_raid_path_request(ctrl_info, &request,
672 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
673 buffer_length, 0, &dir);
677 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
678 request.cdb[2] = (u8)bmic_device_index;
679 request.cdb[9] = (u8)(bmic_device_index >> 8);
681 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
682 0, NULL, NO_TIMEOUT);
684 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
689 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
690 enum bmic_flush_cache_shutdown_event shutdown_event)
693 struct bmic_flush_cache *flush_cache;
696 * Don't bother trying to flush the cache if the controller is
699 if (pqi_ctrl_offline(ctrl_info))
702 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
706 flush_cache->shutdown_event = shutdown_event;
708 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
709 sizeof(*flush_cache));
716 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
717 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
718 struct pqi_raid_error_info *error_info)
720 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
721 buffer, buffer_length, error_info);
724 #define PQI_FETCH_PTRAID_DATA (1 << 31)
726 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
729 struct bmic_diag_options *diag;
731 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
735 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
736 diag, sizeof(*diag));
740 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
742 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
751 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
752 void *buffer, size_t buffer_length)
754 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
755 buffer, buffer_length);
760 struct bmic_host_wellness_driver_version {
762 u8 driver_version_tag[2];
763 __le16 driver_version_length;
764 char driver_version[32];
765 u8 dont_write_tag[2];
771 static int pqi_write_driver_version_to_host_wellness(
772 struct pqi_ctrl_info *ctrl_info)
775 struct bmic_host_wellness_driver_version *buffer;
776 size_t buffer_length;
778 buffer_length = sizeof(*buffer);
780 buffer = kmalloc(buffer_length, GFP_KERNEL);
784 buffer->start_tag[0] = '<';
785 buffer->start_tag[1] = 'H';
786 buffer->start_tag[2] = 'W';
787 buffer->start_tag[3] = '>';
788 buffer->driver_version_tag[0] = 'D';
789 buffer->driver_version_tag[1] = 'V';
790 put_unaligned_le16(sizeof(buffer->driver_version),
791 &buffer->driver_version_length);
792 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
793 sizeof(buffer->driver_version) - 1);
794 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
795 buffer->dont_write_tag[0] = 'D';
796 buffer->dont_write_tag[1] = 'W';
797 buffer->end_tag[0] = 'Z';
798 buffer->end_tag[1] = 'Z';
800 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
809 struct bmic_host_wellness_time {
814 u8 dont_write_tag[2];
820 static int pqi_write_current_time_to_host_wellness(
821 struct pqi_ctrl_info *ctrl_info)
824 struct bmic_host_wellness_time *buffer;
825 size_t buffer_length;
830 buffer_length = sizeof(*buffer);
832 buffer = kmalloc(buffer_length, GFP_KERNEL);
836 buffer->start_tag[0] = '<';
837 buffer->start_tag[1] = 'H';
838 buffer->start_tag[2] = 'W';
839 buffer->start_tag[3] = '>';
840 buffer->time_tag[0] = 'T';
841 buffer->time_tag[1] = 'D';
842 put_unaligned_le16(sizeof(buffer->time),
843 &buffer->time_length);
845 local_time = ktime_get_real_seconds();
846 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
847 year = tm.tm_year + 1900;
849 buffer->time[0] = bin2bcd(tm.tm_hour);
850 buffer->time[1] = bin2bcd(tm.tm_min);
851 buffer->time[2] = bin2bcd(tm.tm_sec);
853 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
854 buffer->time[5] = bin2bcd(tm.tm_mday);
855 buffer->time[6] = bin2bcd(year / 100);
856 buffer->time[7] = bin2bcd(year % 100);
858 buffer->dont_write_tag[0] = 'D';
859 buffer->dont_write_tag[1] = 'W';
860 buffer->end_tag[0] = 'Z';
861 buffer->end_tag[1] = 'Z';
863 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
870 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
872 static void pqi_update_time_worker(struct work_struct *work)
875 struct pqi_ctrl_info *ctrl_info;
877 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
880 if (pqi_ctrl_offline(ctrl_info))
883 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
885 dev_warn(&ctrl_info->pci_dev->dev,
886 "error updating time on controller\n");
888 schedule_delayed_work(&ctrl_info->update_time_work,
889 PQI_UPDATE_TIME_WORK_INTERVAL);
892 static inline void pqi_schedule_update_time_worker(
893 struct pqi_ctrl_info *ctrl_info)
895 schedule_delayed_work(&ctrl_info->update_time_work, 0);
898 static inline void pqi_cancel_update_time_worker(
899 struct pqi_ctrl_info *ctrl_info)
901 cancel_delayed_work_sync(&ctrl_info->update_time_work);
904 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
905 void *buffer, size_t buffer_length)
907 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
911 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
915 size_t lun_list_length;
916 size_t lun_data_length;
917 size_t new_lun_list_length;
918 void *lun_data = NULL;
919 struct report_lun_header *report_lun_header;
921 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
922 if (!report_lun_header) {
927 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
928 sizeof(*report_lun_header));
932 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
935 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
937 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
943 if (lun_list_length == 0) {
944 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
948 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
952 new_lun_list_length = get_unaligned_be32(
953 &((struct report_lun_header *)lun_data)->list_length);
955 if (new_lun_list_length > lun_list_length) {
956 lun_list_length = new_lun_list_length;
962 kfree(report_lun_header);
974 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
977 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
981 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
984 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
987 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
988 struct report_phys_lun_extended **physdev_list,
989 struct report_log_lun_extended **logdev_list)
992 size_t logdev_list_length;
993 size_t logdev_data_length;
994 struct report_log_lun_extended *internal_logdev_list;
995 struct report_log_lun_extended *logdev_data;
996 struct report_lun_header report_lun_header;
998 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1000 dev_err(&ctrl_info->pci_dev->dev,
1001 "report physical LUNs failed\n");
1003 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1005 dev_err(&ctrl_info->pci_dev->dev,
1006 "report logical LUNs failed\n");
1009 * Tack the controller itself onto the end of the logical device list.
1012 logdev_data = *logdev_list;
1015 logdev_list_length =
1016 get_unaligned_be32(&logdev_data->header.list_length);
1018 memset(&report_lun_header, 0, sizeof(report_lun_header));
1020 (struct report_log_lun_extended *)&report_lun_header;
1021 logdev_list_length = 0;
1024 logdev_data_length = sizeof(struct report_lun_header) +
1027 internal_logdev_list = kmalloc(logdev_data_length +
1028 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1029 if (!internal_logdev_list) {
1030 kfree(*logdev_list);
1031 *logdev_list = NULL;
1035 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1036 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1037 sizeof(struct report_log_lun_extended_entry));
1038 put_unaligned_be32(logdev_list_length +
1039 sizeof(struct report_log_lun_extended_entry),
1040 &internal_logdev_list->header.list_length);
1042 kfree(*logdev_list);
1043 *logdev_list = internal_logdev_list;
1048 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1049 int bus, int target, int lun)
1052 device->target = target;
1056 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1064 scsi3addr = device->scsi3addr;
1065 lunid = get_unaligned_le32(scsi3addr);
1067 if (pqi_is_hba_lunid(scsi3addr)) {
1068 /* The specified device is the controller. */
1069 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1070 device->target_lun_valid = true;
1074 if (pqi_is_logical_device(device)) {
1075 if (device->is_external_raid_device) {
1076 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1077 target = (lunid >> 16) & 0x3fff;
1080 bus = PQI_RAID_VOLUME_BUS;
1082 lun = lunid & 0x3fff;
1084 pqi_set_bus_target_lun(device, bus, target, lun);
1085 device->target_lun_valid = true;
1090 * Defer target and LUN assignment for non-controller physical devices
1091 * because the SAS transport layer will make these assignments later.
1093 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1096 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1097 struct pqi_scsi_dev *device)
1103 raid_level = SA_RAID_UNKNOWN;
1105 buffer = kmalloc(64, GFP_KERNEL);
1107 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1108 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1110 raid_level = buffer[8];
1111 if (raid_level > SA_RAID_MAX)
1112 raid_level = SA_RAID_UNKNOWN;
1117 device->raid_level = raid_level;
1120 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1121 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1125 u32 r5or6_blocks_per_row;
1127 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1129 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1130 err_msg = "RAID map too small";
1134 if (device->raid_level == SA_RAID_1) {
1135 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1136 err_msg = "invalid RAID-1 map";
1139 } else if (device->raid_level == SA_RAID_ADM) {
1140 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1141 err_msg = "invalid RAID-1(ADM) map";
1144 } else if ((device->raid_level == SA_RAID_5 ||
1145 device->raid_level == SA_RAID_6) &&
1146 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1148 r5or6_blocks_per_row =
1149 get_unaligned_le16(&raid_map->strip_size) *
1150 get_unaligned_le16(&raid_map->data_disks_per_row);
1151 if (r5or6_blocks_per_row == 0) {
1152 err_msg = "invalid RAID-5 or RAID-6 map";
1160 dev_warn(&ctrl_info->pci_dev->dev,
1161 "logical device %08x%08x %s\n",
1162 *((u32 *)&device->scsi3addr),
1163 *((u32 *)&device->scsi3addr[4]), err_msg);
1168 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1169 struct pqi_scsi_dev *device)
1173 struct raid_map *raid_map;
1175 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1179 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1180 device->scsi3addr, raid_map, sizeof(*raid_map),
1181 0, NULL, NO_TIMEOUT);
1186 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1188 if (raid_map_size > sizeof(*raid_map)) {
1192 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1196 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1197 device->scsi3addr, raid_map, raid_map_size,
1198 0, NULL, NO_TIMEOUT);
1202 if (get_unaligned_le32(&raid_map->structure_size)
1204 dev_warn(&ctrl_info->pci_dev->dev,
1205 "Requested %d bytes, received %d bytes",
1207 get_unaligned_le32(&raid_map->structure_size));
1212 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1216 device->raid_map = raid_map;
1226 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1227 struct pqi_scsi_dev *device)
1233 buffer = kmalloc(64, GFP_KERNEL);
1237 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1238 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1242 #define RAID_BYPASS_STATUS 4
1243 #define RAID_BYPASS_CONFIGURED 0x1
1244 #define RAID_BYPASS_ENABLED 0x2
1246 bypass_status = buffer[RAID_BYPASS_STATUS];
1247 device->raid_bypass_configured =
1248 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1249 if (device->raid_bypass_configured &&
1250 (bypass_status & RAID_BYPASS_ENABLED) &&
1251 pqi_get_raid_map(ctrl_info, device) == 0)
1252 device->raid_bypass_enabled = true;
1259 * Use vendor-specific VPD to determine online/offline status of a volume.
1262 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1263 struct pqi_scsi_dev *device)
1267 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1268 bool volume_offline = true;
1270 struct ciss_vpd_logical_volume_status *vpd;
1272 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1276 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1277 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1281 if (vpd->page_code != CISS_VPD_LV_STATUS)
1284 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1285 volume_status) + vpd->page_length;
1286 if (page_length < sizeof(*vpd))
1289 volume_status = vpd->volume_status;
1290 volume_flags = get_unaligned_be32(&vpd->flags);
1291 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1296 device->volume_status = volume_status;
1297 device->volume_offline = volume_offline;
1300 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1301 struct pqi_scsi_dev *device,
1302 struct bmic_identify_physical_device *id_phys)
1306 memset(id_phys, 0, sizeof(*id_phys));
1308 rc = pqi_identify_physical_device(ctrl_info, device,
1309 id_phys, sizeof(*id_phys));
1311 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1315 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1316 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1318 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1319 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1321 device->box_index = id_phys->box_index;
1322 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1323 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1324 device->queue_depth =
1325 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1326 device->active_path_index = id_phys->active_path_number;
1327 device->path_map = id_phys->redundant_path_present_map;
1328 memcpy(&device->box,
1329 &id_phys->alternate_paths_phys_box_on_port,
1330 sizeof(device->box));
1331 memcpy(&device->phys_connector,
1332 &id_phys->alternate_paths_phys_connector,
1333 sizeof(device->phys_connector));
1334 device->bay = id_phys->phys_bay_in_box;
1339 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1340 struct pqi_scsi_dev *device)
1345 buffer = kmalloc(64, GFP_KERNEL);
1349 /* Send an inquiry to the device to see what it is. */
1350 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1354 scsi_sanitize_inquiry_string(&buffer[8], 8);
1355 scsi_sanitize_inquiry_string(&buffer[16], 16);
1357 device->devtype = buffer[0] & 0x1f;
1358 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1359 memcpy(device->model, &buffer[16], sizeof(device->model));
1361 if (device->devtype == TYPE_DISK) {
1362 if (device->is_external_raid_device) {
1363 device->raid_level = SA_RAID_UNKNOWN;
1364 device->volume_status = CISS_LV_OK;
1365 device->volume_offline = false;
1367 pqi_get_raid_level(ctrl_info, device);
1368 pqi_get_raid_bypass_status(ctrl_info, device);
1369 pqi_get_volume_status(ctrl_info, device);
1379 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1380 struct pqi_scsi_dev *device,
1381 struct bmic_identify_physical_device *id_phys)
1385 if (device->is_expander_smp_device)
1388 if (pqi_is_logical_device(device))
1389 rc = pqi_get_logical_device_info(ctrl_info, device);
1391 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1396 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1397 struct pqi_scsi_dev *device)
1400 static const char unknown_state_str[] =
1401 "Volume is in an unknown state (%u)";
1402 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1404 switch (device->volume_status) {
1406 status = "Volume online";
1408 case CISS_LV_FAILED:
1409 status = "Volume failed";
1411 case CISS_LV_NOT_CONFIGURED:
1412 status = "Volume not configured";
1414 case CISS_LV_DEGRADED:
1415 status = "Volume degraded";
1417 case CISS_LV_READY_FOR_RECOVERY:
1418 status = "Volume ready for recovery operation";
1420 case CISS_LV_UNDERGOING_RECOVERY:
1421 status = "Volume undergoing recovery";
1423 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1424 status = "Wrong physical drive was replaced";
1426 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1427 status = "A physical drive not properly connected";
1429 case CISS_LV_HARDWARE_OVERHEATING:
1430 status = "Hardware is overheating";
1432 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1433 status = "Hardware has overheated";
1435 case CISS_LV_UNDERGOING_EXPANSION:
1436 status = "Volume undergoing expansion";
1438 case CISS_LV_NOT_AVAILABLE:
1439 status = "Volume waiting for transforming volume";
1441 case CISS_LV_QUEUED_FOR_EXPANSION:
1442 status = "Volume queued for expansion";
1444 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1445 status = "Volume disabled due to SCSI ID conflict";
1447 case CISS_LV_EJECTED:
1448 status = "Volume has been ejected";
1450 case CISS_LV_UNDERGOING_ERASE:
1451 status = "Volume undergoing background erase";
1453 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1454 status = "Volume ready for predictive spare rebuild";
1456 case CISS_LV_UNDERGOING_RPI:
1457 status = "Volume undergoing rapid parity initialization";
1459 case CISS_LV_PENDING_RPI:
1460 status = "Volume queued for rapid parity initialization";
1462 case CISS_LV_ENCRYPTED_NO_KEY:
1463 status = "Encrypted volume inaccessible - key not present";
1465 case CISS_LV_UNDERGOING_ENCRYPTION:
1466 status = "Volume undergoing encryption process";
1468 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1469 status = "Volume undergoing encryption re-keying process";
1471 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1472 status = "Volume encrypted but encryption is disabled";
1474 case CISS_LV_PENDING_ENCRYPTION:
1475 status = "Volume pending migration to encrypted state";
1477 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1478 status = "Volume pending encryption rekeying";
1480 case CISS_LV_NOT_SUPPORTED:
1481 status = "Volume not supported on this controller";
1483 case CISS_LV_STATUS_UNAVAILABLE:
1484 status = "Volume status not available";
1487 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1488 unknown_state_str, device->volume_status);
1489 status = unknown_state_buffer;
1493 dev_info(&ctrl_info->pci_dev->dev,
1494 "scsi %d:%d:%d:%d %s\n",
1495 ctrl_info->scsi_host->host_no,
1496 device->bus, device->target, device->lun, status);
1499 static void pqi_rescan_worker(struct work_struct *work)
1501 struct pqi_ctrl_info *ctrl_info;
1503 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1506 pqi_scan_scsi_devices(ctrl_info);
1509 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1510 struct pqi_scsi_dev *device)
1514 if (pqi_is_logical_device(device))
1515 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1516 device->target, device->lun);
1518 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1523 #define PQI_PENDING_IO_TIMEOUT_SECS 20
1525 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1526 struct pqi_scsi_dev *device)
1530 pqi_device_remove_start(device);
1532 rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS);
1534 dev_err(&ctrl_info->pci_dev->dev,
1535 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1536 ctrl_info->scsi_host->host_no, device->bus,
1537 device->target, device->lun,
1538 atomic_read(&device->scsi_cmds_outstanding));
1540 if (pqi_is_logical_device(device))
1541 scsi_remove_device(device->sdev);
1543 pqi_remove_sas_device(device);
1546 /* Assumes the SCSI device list lock is held. */
1548 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1549 int bus, int target, int lun)
1551 struct pqi_scsi_dev *device;
1553 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1554 if (device->bus == bus && device->target == target && device->lun == lun)
1560 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1561 struct pqi_scsi_dev *dev2)
1563 if (dev1->is_physical_device != dev2->is_physical_device)
1566 if (dev1->is_physical_device)
1567 return dev1->wwid == dev2->wwid;
1569 return memcmp(dev1->volume_id, dev2->volume_id,
1570 sizeof(dev1->volume_id)) == 0;
1573 enum pqi_find_result {
1579 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1580 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1582 struct pqi_scsi_dev *device;
1584 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1585 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1586 *matching_device = device;
1587 if (pqi_device_equal(device_to_find, device)) {
1588 if (device_to_find->volume_offline)
1589 return DEVICE_CHANGED;
1592 return DEVICE_CHANGED;
1596 return DEVICE_NOT_FOUND;
1599 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1601 if (device->is_expander_smp_device)
1602 return "Enclosure SMP ";
1604 return scsi_device_type(device->devtype);
1607 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1609 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1610 char *action, struct pqi_scsi_dev *device)
1613 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1615 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1616 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1618 if (device->target_lun_valid)
1619 count += scnprintf(buffer + count,
1620 PQI_DEV_INFO_BUFFER_LENGTH - count,
1625 count += scnprintf(buffer + count,
1626 PQI_DEV_INFO_BUFFER_LENGTH - count,
1629 if (pqi_is_logical_device(device))
1630 count += scnprintf(buffer + count,
1631 PQI_DEV_INFO_BUFFER_LENGTH - count,
1633 *((u32 *)&device->scsi3addr),
1634 *((u32 *)&device->scsi3addr[4]));
1636 count += scnprintf(buffer + count,
1637 PQI_DEV_INFO_BUFFER_LENGTH - count,
1638 " %016llx", device->sas_address);
1640 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1642 pqi_device_type(device),
1646 if (pqi_is_logical_device(device)) {
1647 if (device->devtype == TYPE_DISK)
1648 count += scnprintf(buffer + count,
1649 PQI_DEV_INFO_BUFFER_LENGTH - count,
1650 "SSDSmartPathCap%c En%c %-12s",
1651 device->raid_bypass_configured ? '+' : '-',
1652 device->raid_bypass_enabled ? '+' : '-',
1653 pqi_raid_level_to_string(device->raid_level));
1655 count += scnprintf(buffer + count,
1656 PQI_DEV_INFO_BUFFER_LENGTH - count,
1657 "AIO%c", device->aio_enabled ? '+' : '-');
1658 if (device->devtype == TYPE_DISK ||
1659 device->devtype == TYPE_ZBC)
1660 count += scnprintf(buffer + count,
1661 PQI_DEV_INFO_BUFFER_LENGTH - count,
1662 " qd=%-6d", device->queue_depth);
1665 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
1668 /* Assumes the SCSI device list lock is held. */
1670 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1671 struct pqi_scsi_dev *new_device)
1673 existing_device->devtype = new_device->devtype;
1674 existing_device->device_type = new_device->device_type;
1675 existing_device->bus = new_device->bus;
1676 if (new_device->target_lun_valid) {
1677 existing_device->target = new_device->target;
1678 existing_device->lun = new_device->lun;
1679 existing_device->target_lun_valid = true;
1682 if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
1683 existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
1684 new_device->volume_status == CISS_LV_OK)
1685 existing_device->rescan = true;
1687 /* By definition, the scsi3addr and wwid fields are already the same. */
1689 existing_device->is_physical_device = new_device->is_physical_device;
1690 existing_device->is_external_raid_device =
1691 new_device->is_external_raid_device;
1692 existing_device->is_expander_smp_device =
1693 new_device->is_expander_smp_device;
1694 existing_device->aio_enabled = new_device->aio_enabled;
1695 memcpy(existing_device->vendor, new_device->vendor,
1696 sizeof(existing_device->vendor));
1697 memcpy(existing_device->model, new_device->model,
1698 sizeof(existing_device->model));
1699 existing_device->sas_address = new_device->sas_address;
1700 existing_device->raid_level = new_device->raid_level;
1701 existing_device->queue_depth = new_device->queue_depth;
1702 existing_device->aio_handle = new_device->aio_handle;
1703 existing_device->volume_status = new_device->volume_status;
1704 existing_device->active_path_index = new_device->active_path_index;
1705 existing_device->path_map = new_device->path_map;
1706 existing_device->bay = new_device->bay;
1707 existing_device->box_index = new_device->box_index;
1708 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
1709 existing_device->phy_connected_dev_type =
1710 new_device->phy_connected_dev_type;
1711 memcpy(existing_device->box, new_device->box,
1712 sizeof(existing_device->box));
1713 memcpy(existing_device->phys_connector, new_device->phys_connector,
1714 sizeof(existing_device->phys_connector));
1715 existing_device->offload_to_mirror = 0;
1716 kfree(existing_device->raid_map);
1717 existing_device->raid_map = new_device->raid_map;
1718 existing_device->raid_bypass_configured =
1719 new_device->raid_bypass_configured;
1720 existing_device->raid_bypass_enabled =
1721 new_device->raid_bypass_enabled;
1722 existing_device->device_offline = false;
1724 /* To prevent this from being freed later. */
1725 new_device->raid_map = NULL;
1728 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1731 kfree(device->raid_map);
1737 * Called when exposing a new device to the OS fails in order to re-adjust
1738 * our internal SCSI device list to match the SCSI ML's view.
1741 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1742 struct pqi_scsi_dev *device)
1744 unsigned long flags;
1746 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1747 list_del(&device->scsi_device_list_entry);
1748 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1750 /* Allow the device structure to be freed later. */
1751 device->keep_device = false;
1754 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1756 if (device->is_expander_smp_device)
1757 return device->sas_port != NULL;
1759 return device->sdev != NULL;
1762 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1763 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1767 unsigned long flags;
1768 enum pqi_find_result find_result;
1769 struct pqi_scsi_dev *device;
1770 struct pqi_scsi_dev *next;
1771 struct pqi_scsi_dev *matching_device;
1772 LIST_HEAD(add_list);
1773 LIST_HEAD(delete_list);
1776 * The idea here is to do as little work as possible while holding the
1777 * spinlock. That's why we go to great pains to defer anything other
1778 * than updating the internal device list until after we release the
1782 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1784 /* Assume that all devices in the existing list have gone away. */
1785 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1786 device->device_gone = true;
1788 for (i = 0; i < num_new_devices; i++) {
1789 device = new_device_list[i];
1791 find_result = pqi_scsi_find_entry(ctrl_info, device,
1794 switch (find_result) {
1797 * The newly found device is already in the existing
1800 device->new_device = false;
1801 matching_device->device_gone = false;
1802 pqi_scsi_update_device(matching_device, device);
1804 case DEVICE_NOT_FOUND:
1806 * The newly found device is NOT in the existing device
1809 device->new_device = true;
1811 case DEVICE_CHANGED:
1813 * The original device has gone away and we need to add
1816 device->new_device = true;
1821 /* Process all devices that have gone away. */
1822 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1823 scsi_device_list_entry) {
1824 if (device->device_gone) {
1825 list_del_init(&device->scsi_device_list_entry);
1826 list_add_tail(&device->delete_list_entry, &delete_list);
1830 /* Process all new devices. */
1831 for (i = 0; i < num_new_devices; i++) {
1832 device = new_device_list[i];
1833 if (!device->new_device)
1835 if (device->volume_offline)
1837 list_add_tail(&device->scsi_device_list_entry,
1838 &ctrl_info->scsi_device_list);
1839 list_add_tail(&device->add_list_entry, &add_list);
1840 /* To prevent this device structure from being freed later. */
1841 device->keep_device = true;
1844 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1846 if (pqi_ctrl_in_ofa(ctrl_info))
1847 pqi_ctrl_ofa_done(ctrl_info);
1849 /* Remove all devices that have gone away. */
1850 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
1851 if (device->volume_offline) {
1852 pqi_dev_info(ctrl_info, "offline", device);
1853 pqi_show_volume_status(ctrl_info, device);
1855 list_del(&device->delete_list_entry);
1856 if (pqi_is_device_added(device)) {
1857 pqi_remove_device(ctrl_info, device);
1859 if (!device->volume_offline)
1860 pqi_dev_info(ctrl_info, "removed", device);
1861 pqi_free_device(device);
1866 * Notify the SCSI ML if the queue depth of any existing device has
1869 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1870 scsi_device_list_entry) {
1872 if (device->queue_depth !=
1873 device->advertised_queue_depth) {
1874 device->advertised_queue_depth = device->queue_depth;
1875 scsi_change_queue_depth(device->sdev,
1876 device->advertised_queue_depth);
1878 if (device->rescan) {
1879 scsi_rescan_device(&device->sdev->sdev_gendev);
1880 device->rescan = false;
1885 /* Expose any new devices. */
1886 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1887 if (!pqi_is_device_added(device)) {
1888 rc = pqi_add_device(ctrl_info, device);
1890 pqi_dev_info(ctrl_info, "added", device);
1892 dev_warn(&ctrl_info->pci_dev->dev,
1893 "scsi %d:%d:%d:%d addition failed, device not added\n",
1894 ctrl_info->scsi_host->host_no,
1895 device->bus, device->target,
1897 pqi_fixup_botched_add(ctrl_info, device);
1903 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1906 * Only support the HBA controller itself as a RAID
1907 * controller. If it's a RAID controller other than
1908 * the HBA itself (an external RAID controller, for
1909 * example), we don't support it.
1911 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
1912 !pqi_is_hba_lunid(device->scsi3addr))
1918 static inline bool pqi_skip_device(u8 *scsi3addr)
1920 /* Ignore all masked devices. */
1921 if (MASKED_DEVICE(scsi3addr))
1927 static inline void pqi_mask_device(u8 *scsi3addr)
1929 scsi3addr[3] |= 0xc0;
1932 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
1934 switch (device->device_type) {
1935 case SA_DEVICE_TYPE_SAS:
1936 case SA_DEVICE_TYPE_EXPANDER_SMP:
1937 case SA_DEVICE_TYPE_SES:
1944 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1946 return !device->is_physical_device ||
1947 !pqi_skip_device(device->scsi3addr);
1950 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1954 LIST_HEAD(new_device_list_head);
1955 struct report_phys_lun_extended *physdev_list = NULL;
1956 struct report_log_lun_extended *logdev_list = NULL;
1957 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1958 struct report_log_lun_extended_entry *log_lun_ext_entry;
1959 struct bmic_identify_physical_device *id_phys = NULL;
1962 struct pqi_scsi_dev **new_device_list = NULL;
1963 struct pqi_scsi_dev *device;
1964 struct pqi_scsi_dev *next;
1965 unsigned int num_new_devices;
1966 unsigned int num_valid_devices;
1967 bool is_physical_device;
1969 unsigned int physical_index;
1970 unsigned int logical_index;
1971 static char *out_of_memory_msg =
1972 "failed to allocate memory, device discovery stopped";
1974 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1980 get_unaligned_be32(&physdev_list->header.list_length)
1981 / sizeof(physdev_list->lun_entries[0]);
1987 get_unaligned_be32(&logdev_list->header.list_length)
1988 / sizeof(logdev_list->lun_entries[0]);
1992 if (num_physicals) {
1994 * We need this buffer for calls to pqi_get_physical_disk_info()
1995 * below. We allocate it here instead of inside
1996 * pqi_get_physical_disk_info() because it's a fairly large
1999 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2001 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2007 if (pqi_hide_vsep) {
2008 for (i = num_physicals - 1; i >= 0; i--) {
2009 phys_lun_ext_entry =
2010 &physdev_list->lun_entries[i];
2011 if (CISS_GET_DRIVE_NUMBER(
2012 phys_lun_ext_entry->lunid) ==
2013 PQI_VSEP_CISS_BTL) {
2015 phys_lun_ext_entry->lunid);
2022 num_new_devices = num_physicals + num_logicals;
2024 new_device_list = kmalloc_array(num_new_devices,
2025 sizeof(*new_device_list),
2027 if (!new_device_list) {
2028 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2033 for (i = 0; i < num_new_devices; i++) {
2034 device = kzalloc(sizeof(*device), GFP_KERNEL);
2036 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2041 list_add_tail(&device->new_device_list_entry,
2042 &new_device_list_head);
2046 num_valid_devices = 0;
2050 for (i = 0; i < num_new_devices; i++) {
2052 if ((!pqi_expose_ld_first && i < num_physicals) ||
2053 (pqi_expose_ld_first && i >= num_logicals)) {
2054 is_physical_device = true;
2055 phys_lun_ext_entry =
2056 &physdev_list->lun_entries[physical_index++];
2057 log_lun_ext_entry = NULL;
2058 scsi3addr = phys_lun_ext_entry->lunid;
2060 is_physical_device = false;
2061 phys_lun_ext_entry = NULL;
2063 &logdev_list->lun_entries[logical_index++];
2064 scsi3addr = log_lun_ext_entry->lunid;
2067 if (is_physical_device && pqi_skip_device(scsi3addr))
2071 device = list_next_entry(device, new_device_list_entry);
2073 device = list_first_entry(&new_device_list_head,
2074 struct pqi_scsi_dev, new_device_list_entry);
2076 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2077 device->is_physical_device = is_physical_device;
2078 if (is_physical_device) {
2079 device->device_type = phys_lun_ext_entry->device_type;
2080 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2081 device->is_expander_smp_device = true;
2083 device->is_external_raid_device =
2084 pqi_is_external_raid_addr(scsi3addr);
2087 if (!pqi_is_supported_device(device))
2090 /* Gather information about the device. */
2091 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2092 if (rc == -ENOMEM) {
2093 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2098 if (device->is_physical_device)
2099 dev_warn(&ctrl_info->pci_dev->dev,
2100 "obtaining device info failed, skipping physical device %016llx\n",
2102 &phys_lun_ext_entry->wwid));
2104 dev_warn(&ctrl_info->pci_dev->dev,
2105 "obtaining device info failed, skipping logical device %08x%08x\n",
2106 *((u32 *)&device->scsi3addr),
2107 *((u32 *)&device->scsi3addr[4]));
2112 pqi_assign_bus_target_lun(device);
2114 if (device->is_physical_device) {
2115 device->wwid = phys_lun_ext_entry->wwid;
2116 if ((phys_lun_ext_entry->device_flags &
2117 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2118 phys_lun_ext_entry->aio_handle) {
2119 device->aio_enabled = true;
2120 device->aio_handle =
2121 phys_lun_ext_entry->aio_handle;
2124 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2125 sizeof(device->volume_id));
2128 if (pqi_is_device_with_sas_address(device))
2129 device->sas_address = get_unaligned_be64(&device->wwid);
2131 new_device_list[num_valid_devices++] = device;
2134 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2137 list_for_each_entry_safe(device, next, &new_device_list_head,
2138 new_device_list_entry) {
2139 if (device->keep_device)
2141 list_del(&device->new_device_list_entry);
2142 pqi_free_device(device);
2145 kfree(new_device_list);
2146 kfree(physdev_list);
2153 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2157 if (pqi_ctrl_offline(ctrl_info))
2160 if (!mutex_trylock(&ctrl_info->scan_mutex)) {
2161 pqi_schedule_rescan_worker_delayed(ctrl_info);
2164 rc = pqi_update_scsi_devices(ctrl_info);
2166 pqi_schedule_rescan_worker_delayed(ctrl_info);
2167 mutex_unlock(&ctrl_info->scan_mutex);
2173 static void pqi_scan_start(struct Scsi_Host *shost)
2175 struct pqi_ctrl_info *ctrl_info;
2177 ctrl_info = shost_to_hba(shost);
2178 if (pqi_ctrl_in_ofa(ctrl_info))
2181 pqi_scan_scsi_devices(ctrl_info);
2184 /* Returns TRUE if scan is finished. */
2186 static int pqi_scan_finished(struct Scsi_Host *shost,
2187 unsigned long elapsed_time)
2189 struct pqi_ctrl_info *ctrl_info;
2191 ctrl_info = shost_priv(shost);
2193 return !mutex_is_locked(&ctrl_info->scan_mutex);
2196 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2198 mutex_lock(&ctrl_info->scan_mutex);
2199 mutex_unlock(&ctrl_info->scan_mutex);
2202 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2204 mutex_lock(&ctrl_info->lun_reset_mutex);
2205 mutex_unlock(&ctrl_info->lun_reset_mutex);
2208 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
2210 mutex_lock(&ctrl_info->ofa_mutex);
2211 mutex_unlock(&ctrl_info->ofa_mutex);
2214 static inline void pqi_set_encryption_info(
2215 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2218 u32 volume_blk_size;
2221 * Set the encryption tweak values based on logical block address.
2222 * If the block size is 512, the tweak value is equal to the LBA.
2223 * For other block sizes, tweak value is (LBA * block size) / 512.
2225 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2226 if (volume_blk_size != 512)
2227 first_block = (first_block * volume_blk_size) / 512;
2229 encryption_info->data_encryption_key_index =
2230 get_unaligned_le16(&raid_map->data_encryption_key_index);
2231 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2232 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2236 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2239 #define PQI_RAID_BYPASS_INELIGIBLE 1
2241 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2242 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2243 struct pqi_queue_group *queue_group)
2245 struct raid_map *raid_map;
2246 bool is_write = false;
2254 u32 first_row_offset;
2255 u32 last_row_offset;
2260 u32 r5or6_blocks_per_row;
2261 u64 r5or6_first_row;
2263 u32 r5or6_first_row_offset;
2264 u32 r5or6_last_row_offset;
2265 u32 r5or6_first_column;
2266 u32 r5or6_last_column;
2267 u16 data_disks_per_row;
2268 u32 total_disks_per_row;
2269 u16 layout_map_count;
2281 int offload_to_mirror;
2282 struct pqi_encryption_info *encryption_info_ptr;
2283 struct pqi_encryption_info encryption_info;
2284 #if BITS_PER_LONG == 32
2288 /* Check for valid opcode, get LBA and block count. */
2289 switch (scmd->cmnd[0]) {
2294 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2295 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2296 block_cnt = (u32)scmd->cmnd[4];
2304 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2305 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2311 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2312 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2318 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2319 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2322 /* Process via normal I/O path. */
2323 return PQI_RAID_BYPASS_INELIGIBLE;
2326 /* Check for write to non-RAID-0. */
2327 if (is_write && device->raid_level != SA_RAID_0)
2328 return PQI_RAID_BYPASS_INELIGIBLE;
2330 if (unlikely(block_cnt == 0))
2331 return PQI_RAID_BYPASS_INELIGIBLE;
2333 last_block = first_block + block_cnt - 1;
2334 raid_map = device->raid_map;
2336 /* Check for invalid block or wraparound. */
2337 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2338 last_block < first_block)
2339 return PQI_RAID_BYPASS_INELIGIBLE;
2341 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2342 strip_size = get_unaligned_le16(&raid_map->strip_size);
2343 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2345 /* Calculate stripe information for the request. */
2346 blocks_per_row = data_disks_per_row * strip_size;
2347 #if BITS_PER_LONG == 32
2348 tmpdiv = first_block;
2349 do_div(tmpdiv, blocks_per_row);
2351 tmpdiv = last_block;
2352 do_div(tmpdiv, blocks_per_row);
2354 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2355 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2356 tmpdiv = first_row_offset;
2357 do_div(tmpdiv, strip_size);
2358 first_column = tmpdiv;
2359 tmpdiv = last_row_offset;
2360 do_div(tmpdiv, strip_size);
2361 last_column = tmpdiv;
2363 first_row = first_block / blocks_per_row;
2364 last_row = last_block / blocks_per_row;
2365 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2366 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2367 first_column = first_row_offset / strip_size;
2368 last_column = last_row_offset / strip_size;
2371 /* If this isn't a single row/column then give to the controller. */
2372 if (first_row != last_row || first_column != last_column)
2373 return PQI_RAID_BYPASS_INELIGIBLE;
2375 /* Proceeding with driver mapping. */
2376 total_disks_per_row = data_disks_per_row +
2377 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2378 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2379 get_unaligned_le16(&raid_map->row_cnt);
2380 map_index = (map_row * total_disks_per_row) + first_column;
2383 if (device->raid_level == SA_RAID_1) {
2384 if (device->offload_to_mirror)
2385 map_index += data_disks_per_row;
2386 device->offload_to_mirror = !device->offload_to_mirror;
2387 } else if (device->raid_level == SA_RAID_ADM) {
2390 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2393 offload_to_mirror = device->offload_to_mirror;
2394 if (offload_to_mirror == 0) {
2395 /* use physical disk in the first mirrored group. */
2396 map_index %= data_disks_per_row;
2400 * Determine mirror group that map_index
2403 current_group = map_index / data_disks_per_row;
2405 if (offload_to_mirror != current_group) {
2407 layout_map_count - 1) {
2409 * Select raid index from
2412 map_index += data_disks_per_row;
2416 * Select raid index from first
2419 map_index %= data_disks_per_row;
2423 } while (offload_to_mirror != current_group);
2426 /* Set mirror group to use next time. */
2428 (offload_to_mirror >= layout_map_count - 1) ?
2429 0 : offload_to_mirror + 1;
2430 device->offload_to_mirror = offload_to_mirror;
2432 * Avoid direct use of device->offload_to_mirror within this
2433 * function since multiple threads might simultaneously
2434 * increment it beyond the range of device->layout_map_count -1.
2436 } else if ((device->raid_level == SA_RAID_5 ||
2437 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2439 /* Verify first and last block are in same RAID group */
2440 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2441 stripesize = r5or6_blocks_per_row * layout_map_count;
2442 #if BITS_PER_LONG == 32
2443 tmpdiv = first_block;
2444 first_group = do_div(tmpdiv, stripesize);
2445 tmpdiv = first_group;
2446 do_div(tmpdiv, r5or6_blocks_per_row);
2447 first_group = tmpdiv;
2448 tmpdiv = last_block;
2449 last_group = do_div(tmpdiv, stripesize);
2450 tmpdiv = last_group;
2451 do_div(tmpdiv, r5or6_blocks_per_row);
2452 last_group = tmpdiv;
2454 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2455 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2457 if (first_group != last_group)
2458 return PQI_RAID_BYPASS_INELIGIBLE;
2460 /* Verify request is in a single row of RAID 5/6 */
2461 #if BITS_PER_LONG == 32
2462 tmpdiv = first_block;
2463 do_div(tmpdiv, stripesize);
2464 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2465 tmpdiv = last_block;
2466 do_div(tmpdiv, stripesize);
2467 r5or6_last_row = r0_last_row = tmpdiv;
2469 first_row = r5or6_first_row = r0_first_row =
2470 first_block / stripesize;
2471 r5or6_last_row = r0_last_row = last_block / stripesize;
2473 if (r5or6_first_row != r5or6_last_row)
2474 return PQI_RAID_BYPASS_INELIGIBLE;
2476 /* Verify request is in a single column */
2477 #if BITS_PER_LONG == 32
2478 tmpdiv = first_block;
2479 first_row_offset = do_div(tmpdiv, stripesize);
2480 tmpdiv = first_row_offset;
2481 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2482 r5or6_first_row_offset = first_row_offset;
2483 tmpdiv = last_block;
2484 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2485 tmpdiv = r5or6_last_row_offset;
2486 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2487 tmpdiv = r5or6_first_row_offset;
2488 do_div(tmpdiv, strip_size);
2489 first_column = r5or6_first_column = tmpdiv;
2490 tmpdiv = r5or6_last_row_offset;
2491 do_div(tmpdiv, strip_size);
2492 r5or6_last_column = tmpdiv;
2494 first_row_offset = r5or6_first_row_offset =
2495 (u32)((first_block % stripesize) %
2496 r5or6_blocks_per_row);
2498 r5or6_last_row_offset =
2499 (u32)((last_block % stripesize) %
2500 r5or6_blocks_per_row);
2502 first_column = r5or6_first_row_offset / strip_size;
2503 r5or6_first_column = first_column;
2504 r5or6_last_column = r5or6_last_row_offset / strip_size;
2506 if (r5or6_first_column != r5or6_last_column)
2507 return PQI_RAID_BYPASS_INELIGIBLE;
2509 /* Request is eligible */
2511 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2512 get_unaligned_le16(&raid_map->row_cnt);
2514 map_index = (first_group *
2515 (get_unaligned_le16(&raid_map->row_cnt) *
2516 total_disks_per_row)) +
2517 (map_row * total_disks_per_row) + first_column;
2520 aio_handle = raid_map->disk_data[map_index].aio_handle;
2521 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2522 first_row * strip_size +
2523 (first_row_offset - first_column * strip_size);
2524 disk_block_cnt = block_cnt;
2526 /* Handle differing logical/physical block sizes. */
2527 if (raid_map->phys_blk_shift) {
2528 disk_block <<= raid_map->phys_blk_shift;
2529 disk_block_cnt <<= raid_map->phys_blk_shift;
2532 if (unlikely(disk_block_cnt > 0xffff))
2533 return PQI_RAID_BYPASS_INELIGIBLE;
2535 /* Build the new CDB for the physical disk I/O. */
2536 if (disk_block > 0xffffffff) {
2537 cdb[0] = is_write ? WRITE_16 : READ_16;
2539 put_unaligned_be64(disk_block, &cdb[2]);
2540 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2545 cdb[0] = is_write ? WRITE_10 : READ_10;
2547 put_unaligned_be32((u32)disk_block, &cdb[2]);
2549 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2554 if (get_unaligned_le16(&raid_map->flags) &
2555 RAID_MAP_ENCRYPTION_ENABLED) {
2556 pqi_set_encryption_info(&encryption_info, raid_map,
2558 encryption_info_ptr = &encryption_info;
2560 encryption_info_ptr = NULL;
2563 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2564 cdb, cdb_length, queue_group, encryption_info_ptr, true);
2567 #define PQI_STATUS_IDLE 0x0
2569 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2570 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2572 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2573 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2574 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2575 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2576 #define PQI_DEVICE_STATE_ERROR 0x4
2578 #define PQI_MODE_READY_TIMEOUT_SECS 30
2579 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2581 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2583 struct pqi_device_registers __iomem *pqi_registers;
2584 unsigned long timeout;
2588 pqi_registers = ctrl_info->pqi_registers;
2589 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
2592 signature = readq(&pqi_registers->signature);
2593 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2594 sizeof(signature)) == 0)
2596 if (time_after(jiffies, timeout)) {
2597 dev_err(&ctrl_info->pci_dev->dev,
2598 "timed out waiting for PQI signature\n");
2601 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2605 status = readb(&pqi_registers->function_and_status_code);
2606 if (status == PQI_STATUS_IDLE)
2608 if (time_after(jiffies, timeout)) {
2609 dev_err(&ctrl_info->pci_dev->dev,
2610 "timed out waiting for PQI IDLE\n");
2613 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2617 if (readl(&pqi_registers->device_status) ==
2618 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2620 if (time_after(jiffies, timeout)) {
2621 dev_err(&ctrl_info->pci_dev->dev,
2622 "timed out waiting for PQI all registers ready\n");
2625 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2631 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2633 struct pqi_scsi_dev *device;
2635 device = io_request->scmd->device->hostdata;
2636 device->raid_bypass_enabled = false;
2637 device->aio_enabled = false;
2640 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
2642 struct pqi_ctrl_info *ctrl_info;
2643 struct pqi_scsi_dev *device;
2645 device = sdev->hostdata;
2646 if (device->device_offline)
2649 device->device_offline = true;
2650 ctrl_info = shost_to_hba(sdev->host);
2651 pqi_schedule_rescan_worker(ctrl_info);
2652 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
2653 path, ctrl_info->scsi_host->host_no, device->bus,
2654 device->target, device->lun);
2657 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2661 struct scsi_cmnd *scmd;
2662 struct pqi_raid_error_info *error_info;
2663 size_t sense_data_length;
2666 struct scsi_sense_hdr sshdr;
2668 scmd = io_request->scmd;
2672 error_info = io_request->error_info;
2673 scsi_status = error_info->status;
2676 switch (error_info->data_out_result) {
2677 case PQI_DATA_IN_OUT_GOOD:
2679 case PQI_DATA_IN_OUT_UNDERFLOW:
2681 get_unaligned_le32(&error_info->data_out_transferred);
2682 residual_count = scsi_bufflen(scmd) - xfer_count;
2683 scsi_set_resid(scmd, residual_count);
2684 if (xfer_count < scmd->underflow)
2685 host_byte = DID_SOFT_ERROR;
2687 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2688 case PQI_DATA_IN_OUT_ABORTED:
2689 host_byte = DID_ABORT;
2691 case PQI_DATA_IN_OUT_TIMEOUT:
2692 host_byte = DID_TIME_OUT;
2694 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2695 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2696 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2697 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2698 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2699 case PQI_DATA_IN_OUT_ERROR:
2700 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2701 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2702 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2703 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2704 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2705 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2706 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2707 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2708 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2709 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2711 host_byte = DID_ERROR;
2715 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2716 if (sense_data_length == 0)
2718 get_unaligned_le16(&error_info->response_data_length);
2719 if (sense_data_length) {
2720 if (sense_data_length > sizeof(error_info->data))
2721 sense_data_length = sizeof(error_info->data);
2723 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2724 scsi_normalize_sense(error_info->data,
2725 sense_data_length, &sshdr) &&
2726 sshdr.sense_key == HARDWARE_ERROR &&
2727 sshdr.asc == 0x3e) {
2728 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2729 struct pqi_scsi_dev *device = scmd->device->hostdata;
2731 switch (sshdr.ascq) {
2732 case 0x1: /* LOGICAL UNIT FAILURE */
2733 if (printk_ratelimit())
2734 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2735 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2736 pqi_take_device_offline(scmd->device, "RAID");
2737 host_byte = DID_NO_CONNECT;
2740 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
2741 if (printk_ratelimit())
2742 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
2743 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2748 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2749 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2750 memcpy(scmd->sense_buffer, error_info->data,
2754 scmd->result = scsi_status;
2755 set_host_byte(scmd, host_byte);
2758 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2762 struct scsi_cmnd *scmd;
2763 struct pqi_aio_error_info *error_info;
2764 size_t sense_data_length;
2767 bool device_offline;
2769 scmd = io_request->scmd;
2770 error_info = io_request->error_info;
2772 sense_data_length = 0;
2773 device_offline = false;
2775 switch (error_info->service_response) {
2776 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2777 scsi_status = error_info->status;
2779 case PQI_AIO_SERV_RESPONSE_FAILURE:
2780 switch (error_info->status) {
2781 case PQI_AIO_STATUS_IO_ABORTED:
2782 scsi_status = SAM_STAT_TASK_ABORTED;
2784 case PQI_AIO_STATUS_UNDERRUN:
2785 scsi_status = SAM_STAT_GOOD;
2786 residual_count = get_unaligned_le32(
2787 &error_info->residual_count);
2788 scsi_set_resid(scmd, residual_count);
2789 xfer_count = scsi_bufflen(scmd) - residual_count;
2790 if (xfer_count < scmd->underflow)
2791 host_byte = DID_SOFT_ERROR;
2793 case PQI_AIO_STATUS_OVERRUN:
2794 scsi_status = SAM_STAT_GOOD;
2796 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2797 pqi_aio_path_disabled(io_request);
2798 scsi_status = SAM_STAT_GOOD;
2799 io_request->status = -EAGAIN;
2801 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2802 case PQI_AIO_STATUS_INVALID_DEVICE:
2803 if (!io_request->raid_bypass) {
2804 device_offline = true;
2805 pqi_take_device_offline(scmd->device, "AIO");
2806 host_byte = DID_NO_CONNECT;
2808 scsi_status = SAM_STAT_CHECK_CONDITION;
2810 case PQI_AIO_STATUS_IO_ERROR:
2812 scsi_status = SAM_STAT_CHECK_CONDITION;
2816 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2817 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2818 scsi_status = SAM_STAT_GOOD;
2820 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2821 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2823 scsi_status = SAM_STAT_CHECK_CONDITION;
2827 if (error_info->data_present) {
2829 get_unaligned_le16(&error_info->data_length);
2830 if (sense_data_length) {
2831 if (sense_data_length > sizeof(error_info->data))
2832 sense_data_length = sizeof(error_info->data);
2833 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2834 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2835 memcpy(scmd->sense_buffer, error_info->data,
2840 if (device_offline && sense_data_length == 0)
2841 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2844 scmd->result = scsi_status;
2845 set_host_byte(scmd, host_byte);
2848 static void pqi_process_io_error(unsigned int iu_type,
2849 struct pqi_io_request *io_request)
2852 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2853 pqi_process_raid_io_error(io_request);
2855 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2856 pqi_process_aio_io_error(io_request);
2861 static int pqi_interpret_task_management_response(
2862 struct pqi_task_management_response *response)
2866 switch (response->response_code) {
2867 case SOP_TMF_COMPLETE:
2868 case SOP_TMF_FUNCTION_SUCCEEDED:
2871 case SOP_TMF_REJECTED:
2882 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
2884 pqi_take_ctrl_offline(ctrl_info);
2887 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
2892 struct pqi_io_request *io_request;
2893 struct pqi_io_response *response;
2897 oq_ci = queue_group->oq_ci_copy;
2900 oq_pi = readl(queue_group->oq_pi);
2901 if (oq_pi >= ctrl_info->num_elements_per_oq) {
2902 pqi_invalid_response(ctrl_info);
2903 dev_err(&ctrl_info->pci_dev->dev,
2904 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
2905 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
2912 response = queue_group->oq_element_array +
2913 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2915 request_id = get_unaligned_le16(&response->request_id);
2916 if (request_id >= ctrl_info->max_io_slots) {
2917 pqi_invalid_response(ctrl_info);
2918 dev_err(&ctrl_info->pci_dev->dev,
2919 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
2920 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
2924 io_request = &ctrl_info->io_request_pool[request_id];
2925 if (atomic_read(&io_request->refcount) == 0) {
2926 pqi_invalid_response(ctrl_info);
2927 dev_err(&ctrl_info->pci_dev->dev,
2928 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
2929 request_id, oq_pi, oq_ci);
2933 switch (response->header.iu_type) {
2934 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2935 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2936 if (io_request->scmd)
2937 io_request->scmd->result = 0;
2939 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2941 case PQI_RESPONSE_IU_VENDOR_GENERAL:
2942 io_request->status =
2944 &((struct pqi_vendor_general_response *)
2947 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2948 io_request->status =
2949 pqi_interpret_task_management_response(
2952 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2953 pqi_aio_path_disabled(io_request);
2954 io_request->status = -EAGAIN;
2956 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2957 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2958 io_request->error_info = ctrl_info->error_buffer +
2959 (get_unaligned_le16(&response->error_index) *
2960 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2961 pqi_process_io_error(response->header.iu_type, io_request);
2964 pqi_invalid_response(ctrl_info);
2965 dev_err(&ctrl_info->pci_dev->dev,
2966 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
2967 response->header.iu_type, oq_pi, oq_ci);
2971 io_request->io_complete_callback(io_request, io_request->context);
2974 * Note that the I/O request structure CANNOT BE TOUCHED after
2975 * returning from the I/O completion callback!
2977 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2980 if (num_responses) {
2981 queue_group->oq_ci_copy = oq_ci;
2982 writel(oq_ci, queue_group->oq_ci);
2985 return num_responses;
2988 static inline unsigned int pqi_num_elements_free(unsigned int pi,
2989 unsigned int ci, unsigned int elements_in_queue)
2991 unsigned int num_elements_used;
2994 num_elements_used = pi - ci;
2996 num_elements_used = elements_in_queue - ci + pi;
2998 return elements_in_queue - num_elements_used - 1;
3001 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3002 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3006 unsigned long flags;
3008 struct pqi_queue_group *queue_group;
3010 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3011 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3014 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3016 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3017 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3019 if (pqi_num_elements_free(iq_pi, iq_ci,
3020 ctrl_info->num_elements_per_iq))
3023 spin_unlock_irqrestore(
3024 &queue_group->submit_lock[RAID_PATH], flags);
3026 if (pqi_ctrl_offline(ctrl_info))
3030 next_element = queue_group->iq_element_array[RAID_PATH] +
3031 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3033 memcpy(next_element, iu, iu_length);
3035 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3036 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3039 * This write notifies the controller that an IU is available to be
3042 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3044 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3047 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3048 struct pqi_event *event)
3050 struct pqi_event_acknowledge_request request;
3052 memset(&request, 0, sizeof(request));
3054 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3055 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3056 &request.header.iu_length);
3057 request.event_type = event->event_type;
3058 request.event_id = event->event_id;
3059 request.additional_event_id = event->additional_event_id;
3061 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3064 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3065 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3067 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3068 struct pqi_ctrl_info *ctrl_info)
3070 unsigned long timeout;
3073 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3076 status = pqi_read_soft_reset_status(ctrl_info);
3077 if (status & PQI_SOFT_RESET_INITIATE)
3078 return RESET_INITIATE_DRIVER;
3080 if (status & PQI_SOFT_RESET_ABORT)
3083 if (time_after(jiffies, timeout)) {
3084 dev_err(&ctrl_info->pci_dev->dev,
3085 "timed out waiting for soft reset status\n");
3086 return RESET_TIMEDOUT;
3089 if (!sis_is_firmware_running(ctrl_info))
3090 return RESET_NORESPONSE;
3092 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3096 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
3097 enum pqi_soft_reset_status reset_status)
3101 switch (reset_status) {
3102 case RESET_INITIATE_DRIVER:
3103 case RESET_TIMEDOUT:
3104 dev_info(&ctrl_info->pci_dev->dev,
3105 "resetting controller %u\n", ctrl_info->ctrl_id);
3106 sis_soft_reset(ctrl_info);
3108 case RESET_INITIATE_FIRMWARE:
3109 rc = pqi_ofa_ctrl_restart(ctrl_info);
3110 pqi_ofa_free_host_buffer(ctrl_info);
3111 dev_info(&ctrl_info->pci_dev->dev,
3112 "Online Firmware Activation for controller %u: %s\n",
3113 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
3116 pqi_ofa_ctrl_unquiesce(ctrl_info);
3117 dev_info(&ctrl_info->pci_dev->dev,
3118 "Online Firmware Activation for controller %u: %s\n",
3119 ctrl_info->ctrl_id, "ABORTED");
3121 case RESET_NORESPONSE:
3122 pqi_ofa_free_host_buffer(ctrl_info);
3123 pqi_take_ctrl_offline(ctrl_info);
3128 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3129 struct pqi_event *event)
3132 enum pqi_soft_reset_status status;
3134 event_id = get_unaligned_le16(&event->event_id);
3136 mutex_lock(&ctrl_info->ofa_mutex);
3138 if (event_id == PQI_EVENT_OFA_QUIESCE) {
3139 dev_info(&ctrl_info->pci_dev->dev,
3140 "Received Online Firmware Activation quiesce event for controller %u\n",
3141 ctrl_info->ctrl_id);
3142 pqi_ofa_ctrl_quiesce(ctrl_info);
3143 pqi_acknowledge_event(ctrl_info, event);
3144 if (ctrl_info->soft_reset_handshake_supported) {
3145 status = pqi_poll_for_soft_reset_status(ctrl_info);
3146 pqi_process_soft_reset(ctrl_info, status);
3148 pqi_process_soft_reset(ctrl_info,
3149 RESET_INITIATE_FIRMWARE);
3152 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3153 pqi_acknowledge_event(ctrl_info, event);
3154 pqi_ofa_setup_host_buffer(ctrl_info,
3155 le32_to_cpu(event->ofa_bytes_requested));
3156 pqi_ofa_host_memory_update(ctrl_info);
3157 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3158 pqi_ofa_free_host_buffer(ctrl_info);
3159 pqi_acknowledge_event(ctrl_info, event);
3160 dev_info(&ctrl_info->pci_dev->dev,
3161 "Online Firmware Activation(%u) cancel reason : %u\n",
3162 ctrl_info->ctrl_id, event->ofa_cancel_reason);
3165 mutex_unlock(&ctrl_info->ofa_mutex);
3168 static void pqi_event_worker(struct work_struct *work)
3171 struct pqi_ctrl_info *ctrl_info;
3172 struct pqi_event *event;
3174 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3176 pqi_ctrl_busy(ctrl_info);
3177 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
3178 if (pqi_ctrl_offline(ctrl_info))
3181 pqi_schedule_rescan_worker_delayed(ctrl_info);
3183 event = ctrl_info->events;
3184 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3185 if (event->pending) {
3186 event->pending = false;
3187 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3188 pqi_ctrl_unbusy(ctrl_info);
3189 pqi_ofa_process_event(ctrl_info, event);
3192 pqi_acknowledge_event(ctrl_info, event);
3198 pqi_ctrl_unbusy(ctrl_info);
3201 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
3203 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3206 u32 heartbeat_count;
3207 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
3210 pqi_check_ctrl_health(ctrl_info);
3211 if (pqi_ctrl_offline(ctrl_info))
3214 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3215 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3217 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3218 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3219 dev_err(&ctrl_info->pci_dev->dev,
3220 "no heartbeat detected - last heartbeat count: %u\n",
3222 pqi_take_ctrl_offline(ctrl_info);
3226 ctrl_info->previous_num_interrupts = num_interrupts;
3229 ctrl_info->previous_heartbeat_count = heartbeat_count;
3230 mod_timer(&ctrl_info->heartbeat_timer,
3231 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3234 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3236 if (!ctrl_info->heartbeat_counter)
3239 ctrl_info->previous_num_interrupts =
3240 atomic_read(&ctrl_info->num_interrupts);
3241 ctrl_info->previous_heartbeat_count =
3242 pqi_read_heartbeat_counter(ctrl_info);
3244 ctrl_info->heartbeat_timer.expires =
3245 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3246 add_timer(&ctrl_info->heartbeat_timer);
3249 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3251 del_timer_sync(&ctrl_info->heartbeat_timer);
3254 static inline int pqi_event_type_to_event_index(unsigned int event_type)
3258 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
3259 if (event_type == pqi_supported_event_types[index])
3265 static inline bool pqi_is_supported_event(unsigned int event_type)
3267 return pqi_event_type_to_event_index(event_type) != -1;
3270 static void pqi_ofa_capture_event_payload(struct pqi_event *event,
3271 struct pqi_event_response *response)
3275 event_id = get_unaligned_le16(&event->event_id);
3277 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3278 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3279 event->ofa_bytes_requested =
3280 response->data.ofa_memory_allocation.bytes_requested;
3281 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3282 event->ofa_cancel_reason =
3283 response->data.ofa_cancelled.reason;
3288 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3293 struct pqi_event_queue *event_queue;
3294 struct pqi_event_response *response;
3295 struct pqi_event *event;
3298 event_queue = &ctrl_info->event_queue;
3300 oq_ci = event_queue->oq_ci_copy;
3303 oq_pi = readl(event_queue->oq_pi);
3304 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3305 pqi_invalid_response(ctrl_info);
3306 dev_err(&ctrl_info->pci_dev->dev,
3307 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3308 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3316 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3319 pqi_event_type_to_event_index(response->event_type);
3321 if (event_index >= 0 && response->request_acknowledge) {
3322 event = &ctrl_info->events[event_index];
3323 event->pending = true;
3324 event->event_type = response->event_type;
3325 event->event_id = response->event_id;
3326 event->additional_event_id = response->additional_event_id;
3327 if (event->event_type == PQI_EVENT_TYPE_OFA)
3328 pqi_ofa_capture_event_payload(event, response);
3331 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3335 event_queue->oq_ci_copy = oq_ci;
3336 writel(oq_ci, event_queue->oq_ci);
3337 schedule_work(&ctrl_info->event_work);
3343 #define PQI_LEGACY_INTX_MASK 0x1
3345 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
3349 struct pqi_device_registers __iomem *pqi_registers;
3350 volatile void __iomem *register_addr;
3352 pqi_registers = ctrl_info->pqi_registers;
3355 register_addr = &pqi_registers->legacy_intx_mask_clear;
3357 register_addr = &pqi_registers->legacy_intx_mask_set;
3359 intx_mask = readl(register_addr);
3360 intx_mask |= PQI_LEGACY_INTX_MASK;
3361 writel(intx_mask, register_addr);
3364 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3365 enum pqi_irq_mode new_mode)
3367 switch (ctrl_info->irq_mode) {
3373 pqi_configure_legacy_intx(ctrl_info, true);
3374 sis_enable_intx(ctrl_info);
3383 pqi_configure_legacy_intx(ctrl_info, false);
3384 sis_enable_msix(ctrl_info);
3389 pqi_configure_legacy_intx(ctrl_info, false);
3396 sis_enable_msix(ctrl_info);
3399 pqi_configure_legacy_intx(ctrl_info, true);
3400 sis_enable_intx(ctrl_info);
3408 ctrl_info->irq_mode = new_mode;
3411 #define PQI_LEGACY_INTX_PENDING 0x1
3413 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3418 switch (ctrl_info->irq_mode) {
3424 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3425 if (intx_status & PQI_LEGACY_INTX_PENDING)
3439 static irqreturn_t pqi_irq_handler(int irq, void *data)
3441 struct pqi_ctrl_info *ctrl_info;
3442 struct pqi_queue_group *queue_group;
3443 int num_io_responses_handled;
3444 int num_events_handled;
3447 ctrl_info = queue_group->ctrl_info;
3449 if (!pqi_is_valid_irq(ctrl_info))
3452 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3453 if (num_io_responses_handled < 0)
3456 if (irq == ctrl_info->event_irq) {
3457 num_events_handled = pqi_process_event_intr(ctrl_info);
3458 if (num_events_handled < 0)
3461 num_events_handled = 0;
3464 if (num_io_responses_handled + num_events_handled > 0)
3465 atomic_inc(&ctrl_info->num_interrupts);
3467 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3468 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3474 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3476 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3480 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3482 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3483 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3484 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3486 dev_err(&pci_dev->dev,
3487 "irq %u init failed with error %d\n",
3488 pci_irq_vector(pci_dev, i), rc);
3491 ctrl_info->num_msix_vectors_initialized++;
3497 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3501 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3502 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3503 &ctrl_info->queue_groups[i]);
3505 ctrl_info->num_msix_vectors_initialized = 0;
3508 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3510 int num_vectors_enabled;
3512 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3513 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3514 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3515 if (num_vectors_enabled < 0) {
3516 dev_err(&ctrl_info->pci_dev->dev,
3517 "MSI-X init failed with error %d\n",
3518 num_vectors_enabled);
3519 return num_vectors_enabled;
3522 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3523 ctrl_info->irq_mode = IRQ_MODE_MSIX;
3527 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3529 if (ctrl_info->num_msix_vectors_enabled) {
3530 pci_free_irq_vectors(ctrl_info->pci_dev);
3531 ctrl_info->num_msix_vectors_enabled = 0;
3535 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3538 size_t alloc_length;
3539 size_t element_array_length_per_iq;
3540 size_t element_array_length_per_oq;
3541 void *element_array;
3542 void __iomem *next_queue_index;
3543 void *aligned_pointer;
3544 unsigned int num_inbound_queues;
3545 unsigned int num_outbound_queues;
3546 unsigned int num_queue_indexes;
3547 struct pqi_queue_group *queue_group;
3549 element_array_length_per_iq =
3550 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3551 ctrl_info->num_elements_per_iq;
3552 element_array_length_per_oq =
3553 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3554 ctrl_info->num_elements_per_oq;
3555 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3556 num_outbound_queues = ctrl_info->num_queue_groups;
3557 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3559 aligned_pointer = NULL;
3561 for (i = 0; i < num_inbound_queues; i++) {
3562 aligned_pointer = PTR_ALIGN(aligned_pointer,
3563 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3564 aligned_pointer += element_array_length_per_iq;
3567 for (i = 0; i < num_outbound_queues; i++) {
3568 aligned_pointer = PTR_ALIGN(aligned_pointer,
3569 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3570 aligned_pointer += element_array_length_per_oq;
3573 aligned_pointer = PTR_ALIGN(aligned_pointer,
3574 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3575 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3576 PQI_EVENT_OQ_ELEMENT_LENGTH;
3578 for (i = 0; i < num_queue_indexes; i++) {
3579 aligned_pointer = PTR_ALIGN(aligned_pointer,
3580 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3581 aligned_pointer += sizeof(pqi_index_t);
3584 alloc_length = (size_t)aligned_pointer +
3585 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3587 alloc_length += PQI_EXTRA_SGL_MEMORY;
3589 ctrl_info->queue_memory_base =
3590 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3591 &ctrl_info->queue_memory_base_dma_handle,
3594 if (!ctrl_info->queue_memory_base)
3597 ctrl_info->queue_memory_length = alloc_length;
3599 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3600 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3602 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3603 queue_group = &ctrl_info->queue_groups[i];
3604 queue_group->iq_element_array[RAID_PATH] = element_array;
3605 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3606 ctrl_info->queue_memory_base_dma_handle +
3607 (element_array - ctrl_info->queue_memory_base);
3608 element_array += element_array_length_per_iq;
3609 element_array = PTR_ALIGN(element_array,
3610 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3611 queue_group->iq_element_array[AIO_PATH] = element_array;
3612 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3613 ctrl_info->queue_memory_base_dma_handle +
3614 (element_array - ctrl_info->queue_memory_base);
3615 element_array += element_array_length_per_iq;
3616 element_array = PTR_ALIGN(element_array,
3617 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3620 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3621 queue_group = &ctrl_info->queue_groups[i];
3622 queue_group->oq_element_array = element_array;
3623 queue_group->oq_element_array_bus_addr =
3624 ctrl_info->queue_memory_base_dma_handle +
3625 (element_array - ctrl_info->queue_memory_base);
3626 element_array += element_array_length_per_oq;
3627 element_array = PTR_ALIGN(element_array,
3628 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3631 ctrl_info->event_queue.oq_element_array = element_array;
3632 ctrl_info->event_queue.oq_element_array_bus_addr =
3633 ctrl_info->queue_memory_base_dma_handle +
3634 (element_array - ctrl_info->queue_memory_base);
3635 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3636 PQI_EVENT_OQ_ELEMENT_LENGTH;
3638 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
3639 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3641 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3642 queue_group = &ctrl_info->queue_groups[i];
3643 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3644 queue_group->iq_ci_bus_addr[RAID_PATH] =
3645 ctrl_info->queue_memory_base_dma_handle +
3647 (void __iomem *)ctrl_info->queue_memory_base);
3648 next_queue_index += sizeof(pqi_index_t);
3649 next_queue_index = PTR_ALIGN(next_queue_index,
3650 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3651 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3652 queue_group->iq_ci_bus_addr[AIO_PATH] =
3653 ctrl_info->queue_memory_base_dma_handle +
3655 (void __iomem *)ctrl_info->queue_memory_base);
3656 next_queue_index += sizeof(pqi_index_t);
3657 next_queue_index = PTR_ALIGN(next_queue_index,
3658 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3659 queue_group->oq_pi = next_queue_index;
3660 queue_group->oq_pi_bus_addr =
3661 ctrl_info->queue_memory_base_dma_handle +
3663 (void __iomem *)ctrl_info->queue_memory_base);
3664 next_queue_index += sizeof(pqi_index_t);
3665 next_queue_index = PTR_ALIGN(next_queue_index,
3666 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3669 ctrl_info->event_queue.oq_pi = next_queue_index;
3670 ctrl_info->event_queue.oq_pi_bus_addr =
3671 ctrl_info->queue_memory_base_dma_handle +
3673 (void __iomem *)ctrl_info->queue_memory_base);
3678 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3681 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3682 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3685 * Initialize the backpointers to the controller structure in
3686 * each operational queue group structure.
3688 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3689 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3692 * Assign IDs to all operational queues. Note that the IDs
3693 * assigned to operational IQs are independent of the IDs
3694 * assigned to operational OQs.
3696 ctrl_info->event_queue.oq_id = next_oq_id++;
3697 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3698 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3699 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3700 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3704 * Assign MSI-X table entry indexes to all queues. Note that the
3705 * interrupt for the event queue is shared with the first queue group.
3707 ctrl_info->event_queue.int_msg_num = 0;
3708 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3709 ctrl_info->queue_groups[i].int_msg_num = i;
3711 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3712 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3713 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3714 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3715 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3719 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3721 size_t alloc_length;
3722 struct pqi_admin_queues_aligned *admin_queues_aligned;
3723 struct pqi_admin_queues *admin_queues;
3725 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3726 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3728 ctrl_info->admin_queue_memory_base =
3729 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3730 &ctrl_info->admin_queue_memory_base_dma_handle,
3733 if (!ctrl_info->admin_queue_memory_base)
3736 ctrl_info->admin_queue_memory_length = alloc_length;
3738 admin_queues = &ctrl_info->admin_queues;
3739 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3740 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3741 admin_queues->iq_element_array =
3742 &admin_queues_aligned->iq_element_array;
3743 admin_queues->oq_element_array =
3744 &admin_queues_aligned->oq_element_array;
3745 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3746 admin_queues->oq_pi =
3747 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
3749 admin_queues->iq_element_array_bus_addr =
3750 ctrl_info->admin_queue_memory_base_dma_handle +
3751 (admin_queues->iq_element_array -
3752 ctrl_info->admin_queue_memory_base);
3753 admin_queues->oq_element_array_bus_addr =
3754 ctrl_info->admin_queue_memory_base_dma_handle +
3755 (admin_queues->oq_element_array -
3756 ctrl_info->admin_queue_memory_base);
3757 admin_queues->iq_ci_bus_addr =
3758 ctrl_info->admin_queue_memory_base_dma_handle +
3759 ((void *)admin_queues->iq_ci -
3760 ctrl_info->admin_queue_memory_base);
3761 admin_queues->oq_pi_bus_addr =
3762 ctrl_info->admin_queue_memory_base_dma_handle +
3763 ((void __iomem *)admin_queues->oq_pi -
3764 (void __iomem *)ctrl_info->admin_queue_memory_base);
3769 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
3770 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3772 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3774 struct pqi_device_registers __iomem *pqi_registers;
3775 struct pqi_admin_queues *admin_queues;
3776 unsigned long timeout;
3780 pqi_registers = ctrl_info->pqi_registers;
3781 admin_queues = &ctrl_info->admin_queues;
3783 writeq((u64)admin_queues->iq_element_array_bus_addr,
3784 &pqi_registers->admin_iq_element_array_addr);
3785 writeq((u64)admin_queues->oq_element_array_bus_addr,
3786 &pqi_registers->admin_oq_element_array_addr);
3787 writeq((u64)admin_queues->iq_ci_bus_addr,
3788 &pqi_registers->admin_iq_ci_addr);
3789 writeq((u64)admin_queues->oq_pi_bus_addr,
3790 &pqi_registers->admin_oq_pi_addr);
3792 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3793 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
3794 (admin_queues->int_msg_num << 16);
3795 writel(reg, &pqi_registers->admin_iq_num_elements);
3796 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3797 &pqi_registers->function_and_status_code);
3799 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3801 status = readb(&pqi_registers->function_and_status_code);
3802 if (status == PQI_STATUS_IDLE)
3804 if (time_after(jiffies, timeout))
3806 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3810 * The offset registers are not initialized to the correct
3811 * offsets until *after* the create admin queue pair command
3812 * completes successfully.
3814 admin_queues->iq_pi = ctrl_info->iomem_base +
3815 PQI_DEVICE_REGISTERS_OFFSET +
3816 readq(&pqi_registers->admin_iq_pi_offset);
3817 admin_queues->oq_ci = ctrl_info->iomem_base +
3818 PQI_DEVICE_REGISTERS_OFFSET +
3819 readq(&pqi_registers->admin_oq_ci_offset);
3824 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3825 struct pqi_general_admin_request *request)
3827 struct pqi_admin_queues *admin_queues;
3831 admin_queues = &ctrl_info->admin_queues;
3832 iq_pi = admin_queues->iq_pi_copy;
3834 next_element = admin_queues->iq_element_array +
3835 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3837 memcpy(next_element, request, sizeof(*request));
3839 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3840 admin_queues->iq_pi_copy = iq_pi;
3843 * This write notifies the controller that an IU is available to be
3846 writel(iq_pi, admin_queues->iq_pi);
3849 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3851 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3852 struct pqi_general_admin_response *response)
3854 struct pqi_admin_queues *admin_queues;
3857 unsigned long timeout;
3859 admin_queues = &ctrl_info->admin_queues;
3860 oq_ci = admin_queues->oq_ci_copy;
3862 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
3865 oq_pi = readl(admin_queues->oq_pi);
3868 if (time_after(jiffies, timeout)) {
3869 dev_err(&ctrl_info->pci_dev->dev,
3870 "timed out waiting for admin response\n");
3873 if (!sis_is_firmware_running(ctrl_info))
3875 usleep_range(1000, 2000);
3878 memcpy(response, admin_queues->oq_element_array +
3879 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3881 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3882 admin_queues->oq_ci_copy = oq_ci;
3883 writel(oq_ci, admin_queues->oq_ci);
3888 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3889 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3890 struct pqi_io_request *io_request)
3892 struct pqi_io_request *next;
3897 unsigned long flags;
3898 unsigned int num_elements_needed;
3899 unsigned int num_elements_to_end_of_queue;
3901 struct pqi_iu_header *request;
3903 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3906 io_request->queue_group = queue_group;
3907 list_add_tail(&io_request->request_list_entry,
3908 &queue_group->request_list[path]);
3911 iq_pi = queue_group->iq_pi_copy[path];
3913 list_for_each_entry_safe(io_request, next,
3914 &queue_group->request_list[path], request_list_entry) {
3916 request = io_request->iu;
3918 iu_length = get_unaligned_le16(&request->iu_length) +
3919 PQI_REQUEST_HEADER_LENGTH;
3920 num_elements_needed =
3921 DIV_ROUND_UP(iu_length,
3922 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3924 iq_ci = readl(queue_group->iq_ci[path]);
3926 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3927 ctrl_info->num_elements_per_iq))
3930 put_unaligned_le16(queue_group->oq_id,
3931 &request->response_queue_id);
3933 next_element = queue_group->iq_element_array[path] +
3934 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3936 num_elements_to_end_of_queue =
3937 ctrl_info->num_elements_per_iq - iq_pi;
3939 if (num_elements_needed <= num_elements_to_end_of_queue) {
3940 memcpy(next_element, request, iu_length);
3942 copy_count = num_elements_to_end_of_queue *
3943 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3944 memcpy(next_element, request, copy_count);
3945 memcpy(queue_group->iq_element_array[path],
3946 (u8 *)request + copy_count,
3947 iu_length - copy_count);
3950 iq_pi = (iq_pi + num_elements_needed) %
3951 ctrl_info->num_elements_per_iq;
3953 list_del(&io_request->request_list_entry);
3956 if (iq_pi != queue_group->iq_pi_copy[path]) {
3957 queue_group->iq_pi_copy[path] = iq_pi;
3959 * This write notifies the controller that one or more IUs are
3960 * available to be processed.
3962 writel(iq_pi, queue_group->iq_pi[path]);
3965 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3968 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3970 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3971 struct completion *wait)
3976 if (wait_for_completion_io_timeout(wait,
3977 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
3982 pqi_check_ctrl_health(ctrl_info);
3983 if (pqi_ctrl_offline(ctrl_info)) {
3992 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3995 struct completion *waiting = context;
4000 static int pqi_process_raid_io_error_synchronous(
4001 struct pqi_raid_error_info *error_info)
4005 switch (error_info->data_out_result) {
4006 case PQI_DATA_IN_OUT_GOOD:
4007 if (error_info->status == SAM_STAT_GOOD)
4010 case PQI_DATA_IN_OUT_UNDERFLOW:
4011 if (error_info->status == SAM_STAT_GOOD ||
4012 error_info->status == SAM_STAT_CHECK_CONDITION)
4015 case PQI_DATA_IN_OUT_ABORTED:
4016 rc = PQI_CMD_STATUS_ABORTED;
4023 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4024 struct pqi_iu_header *request, unsigned int flags,
4025 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
4028 struct pqi_io_request *io_request;
4029 unsigned long start_jiffies;
4030 unsigned long msecs_blocked;
4032 DECLARE_COMPLETION_ONSTACK(wait);
4035 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
4036 * are mutually exclusive.
4039 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4040 if (down_interruptible(&ctrl_info->sync_request_sem))
4041 return -ERESTARTSYS;
4043 if (timeout_msecs == NO_TIMEOUT) {
4044 down(&ctrl_info->sync_request_sem);
4046 start_jiffies = jiffies;
4047 if (down_timeout(&ctrl_info->sync_request_sem,
4048 msecs_to_jiffies(timeout_msecs)))
4051 jiffies_to_msecs(jiffies - start_jiffies);
4052 if (msecs_blocked >= timeout_msecs) {
4056 timeout_msecs -= msecs_blocked;
4060 pqi_ctrl_busy(ctrl_info);
4061 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
4062 if (timeout_msecs == 0) {
4063 pqi_ctrl_unbusy(ctrl_info);
4068 if (pqi_ctrl_offline(ctrl_info)) {
4069 pqi_ctrl_unbusy(ctrl_info);
4074 atomic_inc(&ctrl_info->sync_cmds_outstanding);
4076 io_request = pqi_alloc_io_request(ctrl_info);
4078 put_unaligned_le16(io_request->index,
4079 &(((struct pqi_raid_path_request *)request)->request_id));
4081 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4082 ((struct pqi_raid_path_request *)request)->error_index =
4083 ((struct pqi_raid_path_request *)request)->request_id;
4085 iu_length = get_unaligned_le16(&request->iu_length) +
4086 PQI_REQUEST_HEADER_LENGTH;
4087 memcpy(io_request->iu, request, iu_length);
4089 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4090 io_request->context = &wait;
4092 pqi_start_io(ctrl_info,
4093 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4096 pqi_ctrl_unbusy(ctrl_info);
4098 if (timeout_msecs == NO_TIMEOUT) {
4099 pqi_wait_for_completion_io(ctrl_info, &wait);
4101 if (!wait_for_completion_io_timeout(&wait,
4102 msecs_to_jiffies(timeout_msecs))) {
4103 dev_warn(&ctrl_info->pci_dev->dev,
4104 "command timed out\n");
4110 if (io_request->error_info)
4111 memcpy(error_info, io_request->error_info,
4112 sizeof(*error_info));
4114 memset(error_info, 0, sizeof(*error_info));
4115 } else if (rc == 0 && io_request->error_info) {
4116 rc = pqi_process_raid_io_error_synchronous(
4117 io_request->error_info);
4120 pqi_free_io_request(io_request);
4122 atomic_dec(&ctrl_info->sync_cmds_outstanding);
4124 up(&ctrl_info->sync_request_sem);
4129 static int pqi_validate_admin_response(
4130 struct pqi_general_admin_response *response, u8 expected_function_code)
4132 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4135 if (get_unaligned_le16(&response->header.iu_length) !=
4136 PQI_GENERAL_ADMIN_IU_LENGTH)
4139 if (response->function_code != expected_function_code)
4142 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4148 static int pqi_submit_admin_request_synchronous(
4149 struct pqi_ctrl_info *ctrl_info,
4150 struct pqi_general_admin_request *request,
4151 struct pqi_general_admin_response *response)
4155 pqi_submit_admin_request(ctrl_info, request);
4157 rc = pqi_poll_for_admin_response(ctrl_info, response);
4160 rc = pqi_validate_admin_response(response,
4161 request->function_code);
4166 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4169 struct pqi_general_admin_request request;
4170 struct pqi_general_admin_response response;
4171 struct pqi_device_capability *capability;
4172 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4174 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4178 memset(&request, 0, sizeof(request));
4180 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4181 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4182 &request.header.iu_length);
4183 request.function_code =
4184 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4185 put_unaligned_le32(sizeof(*capability),
4186 &request.data.report_device_capability.buffer_length);
4188 rc = pqi_map_single(ctrl_info->pci_dev,
4189 &request.data.report_device_capability.sg_descriptor,
4190 capability, sizeof(*capability),
4195 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4198 pqi_pci_unmap(ctrl_info->pci_dev,
4199 &request.data.report_device_capability.sg_descriptor, 1,
4205 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4210 ctrl_info->max_inbound_queues =
4211 get_unaligned_le16(&capability->max_inbound_queues);
4212 ctrl_info->max_elements_per_iq =
4213 get_unaligned_le16(&capability->max_elements_per_iq);
4214 ctrl_info->max_iq_element_length =
4215 get_unaligned_le16(&capability->max_iq_element_length)
4217 ctrl_info->max_outbound_queues =
4218 get_unaligned_le16(&capability->max_outbound_queues);
4219 ctrl_info->max_elements_per_oq =
4220 get_unaligned_le16(&capability->max_elements_per_oq);
4221 ctrl_info->max_oq_element_length =
4222 get_unaligned_le16(&capability->max_oq_element_length)
4225 sop_iu_layer_descriptor =
4226 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4228 ctrl_info->max_inbound_iu_length_per_firmware =
4230 &sop_iu_layer_descriptor->max_inbound_iu_length);
4231 ctrl_info->inbound_spanning_supported =
4232 sop_iu_layer_descriptor->inbound_spanning_supported;
4233 ctrl_info->outbound_spanning_supported =
4234 sop_iu_layer_descriptor->outbound_spanning_supported;
4242 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4244 if (ctrl_info->max_iq_element_length <
4245 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4246 dev_err(&ctrl_info->pci_dev->dev,
4247 "max. inbound queue element length of %d is less than the required length of %d\n",
4248 ctrl_info->max_iq_element_length,
4249 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4253 if (ctrl_info->max_oq_element_length <
4254 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4255 dev_err(&ctrl_info->pci_dev->dev,
4256 "max. outbound queue element length of %d is less than the required length of %d\n",
4257 ctrl_info->max_oq_element_length,
4258 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4262 if (ctrl_info->max_inbound_iu_length_per_firmware <
4263 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4264 dev_err(&ctrl_info->pci_dev->dev,
4265 "max. inbound IU length of %u is less than the min. required length of %d\n",
4266 ctrl_info->max_inbound_iu_length_per_firmware,
4267 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4271 if (!ctrl_info->inbound_spanning_supported) {
4272 dev_err(&ctrl_info->pci_dev->dev,
4273 "the controller does not support inbound spanning\n");
4277 if (ctrl_info->outbound_spanning_supported) {
4278 dev_err(&ctrl_info->pci_dev->dev,
4279 "the controller supports outbound spanning but this driver does not\n");
4286 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4289 struct pqi_event_queue *event_queue;
4290 struct pqi_general_admin_request request;
4291 struct pqi_general_admin_response response;
4293 event_queue = &ctrl_info->event_queue;
4296 * Create OQ (Outbound Queue - device to host queue) to dedicate
4299 memset(&request, 0, sizeof(request));
4300 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4301 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4302 &request.header.iu_length);
4303 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4304 put_unaligned_le16(event_queue->oq_id,
4305 &request.data.create_operational_oq.queue_id);
4306 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4307 &request.data.create_operational_oq.element_array_addr);
4308 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4309 &request.data.create_operational_oq.pi_addr);
4310 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4311 &request.data.create_operational_oq.num_elements);
4312 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4313 &request.data.create_operational_oq.element_length);
4314 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4315 put_unaligned_le16(event_queue->int_msg_num,
4316 &request.data.create_operational_oq.int_msg_num);
4318 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4323 event_queue->oq_ci = ctrl_info->iomem_base +
4324 PQI_DEVICE_REGISTERS_OFFSET +
4326 &response.data.create_operational_oq.oq_ci_offset);
4331 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4332 unsigned int group_number)
4335 struct pqi_queue_group *queue_group;
4336 struct pqi_general_admin_request request;
4337 struct pqi_general_admin_response response;
4339 queue_group = &ctrl_info->queue_groups[group_number];
4342 * Create IQ (Inbound Queue - host to device queue) for
4345 memset(&request, 0, sizeof(request));
4346 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4347 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4348 &request.header.iu_length);
4349 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4350 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4351 &request.data.create_operational_iq.queue_id);
4353 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4354 &request.data.create_operational_iq.element_array_addr);
4355 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4356 &request.data.create_operational_iq.ci_addr);
4357 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4358 &request.data.create_operational_iq.num_elements);
4359 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4360 &request.data.create_operational_iq.element_length);
4361 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4363 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4366 dev_err(&ctrl_info->pci_dev->dev,
4367 "error creating inbound RAID queue\n");
4371 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4372 PQI_DEVICE_REGISTERS_OFFSET +
4374 &response.data.create_operational_iq.iq_pi_offset);
4377 * Create IQ (Inbound Queue - host to device queue) for
4378 * Advanced I/O (AIO) path.
4380 memset(&request, 0, sizeof(request));
4381 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4382 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4383 &request.header.iu_length);
4384 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4385 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4386 &request.data.create_operational_iq.queue_id);
4387 put_unaligned_le64((u64)queue_group->
4388 iq_element_array_bus_addr[AIO_PATH],
4389 &request.data.create_operational_iq.element_array_addr);
4390 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4391 &request.data.create_operational_iq.ci_addr);
4392 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4393 &request.data.create_operational_iq.num_elements);
4394 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4395 &request.data.create_operational_iq.element_length);
4396 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4398 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4401 dev_err(&ctrl_info->pci_dev->dev,
4402 "error creating inbound AIO queue\n");
4406 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4407 PQI_DEVICE_REGISTERS_OFFSET +
4409 &response.data.create_operational_iq.iq_pi_offset);
4412 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4413 * assumed to be for RAID path I/O unless we change the queue's
4416 memset(&request, 0, sizeof(request));
4417 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4418 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4419 &request.header.iu_length);
4420 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4421 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4422 &request.data.change_operational_iq_properties.queue_id);
4423 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4424 &request.data.change_operational_iq_properties.vendor_specific);
4426 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4429 dev_err(&ctrl_info->pci_dev->dev,
4430 "error changing queue property\n");
4435 * Create OQ (Outbound Queue - device to host queue).
4437 memset(&request, 0, sizeof(request));
4438 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4439 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4440 &request.header.iu_length);
4441 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4442 put_unaligned_le16(queue_group->oq_id,
4443 &request.data.create_operational_oq.queue_id);
4444 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4445 &request.data.create_operational_oq.element_array_addr);
4446 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4447 &request.data.create_operational_oq.pi_addr);
4448 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4449 &request.data.create_operational_oq.num_elements);
4450 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4451 &request.data.create_operational_oq.element_length);
4452 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4453 put_unaligned_le16(queue_group->int_msg_num,
4454 &request.data.create_operational_oq.int_msg_num);
4456 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4459 dev_err(&ctrl_info->pci_dev->dev,
4460 "error creating outbound queue\n");
4464 queue_group->oq_ci = ctrl_info->iomem_base +
4465 PQI_DEVICE_REGISTERS_OFFSET +
4467 &response.data.create_operational_oq.oq_ci_offset);
4472 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4477 rc = pqi_create_event_queue(ctrl_info);
4479 dev_err(&ctrl_info->pci_dev->dev,
4480 "error creating event queue\n");
4484 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4485 rc = pqi_create_queue_group(ctrl_info, i);
4487 dev_err(&ctrl_info->pci_dev->dev,
4488 "error creating queue group number %u/%u\n",
4489 i, ctrl_info->num_queue_groups);
4497 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4498 (offsetof(struct pqi_event_config, descriptors) + \
4499 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4501 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4506 struct pqi_event_config *event_config;
4507 struct pqi_event_descriptor *event_descriptor;
4508 struct pqi_general_management_request request;
4510 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4515 memset(&request, 0, sizeof(request));
4517 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4518 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4519 data.report_event_configuration.sg_descriptors[1]) -
4520 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4521 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4522 &request.data.report_event_configuration.buffer_length);
4524 rc = pqi_map_single(ctrl_info->pci_dev,
4525 request.data.report_event_configuration.sg_descriptors,
4526 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4531 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4532 0, NULL, NO_TIMEOUT);
4534 pqi_pci_unmap(ctrl_info->pci_dev,
4535 request.data.report_event_configuration.sg_descriptors, 1,
4541 for (i = 0; i < event_config->num_event_descriptors; i++) {
4542 event_descriptor = &event_config->descriptors[i];
4543 if (enable_events &&
4544 pqi_is_supported_event(event_descriptor->event_type))
4545 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4546 &event_descriptor->oq_id);
4548 put_unaligned_le16(0, &event_descriptor->oq_id);
4551 memset(&request, 0, sizeof(request));
4553 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4554 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4555 data.report_event_configuration.sg_descriptors[1]) -
4556 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4557 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4558 &request.data.report_event_configuration.buffer_length);
4560 rc = pqi_map_single(ctrl_info->pci_dev,
4561 request.data.report_event_configuration.sg_descriptors,
4562 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4567 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4570 pqi_pci_unmap(ctrl_info->pci_dev,
4571 request.data.report_event_configuration.sg_descriptors, 1,
4575 kfree(event_config);
4580 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4582 return pqi_configure_events(ctrl_info, true);
4585 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4587 return pqi_configure_events(ctrl_info, false);
4590 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4594 size_t sg_chain_buffer_length;
4595 struct pqi_io_request *io_request;
4597 if (!ctrl_info->io_request_pool)
4600 dev = &ctrl_info->pci_dev->dev;
4601 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4602 io_request = ctrl_info->io_request_pool;
4604 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4605 kfree(io_request->iu);
4606 if (!io_request->sg_chain_buffer)
4608 dma_free_coherent(dev, sg_chain_buffer_length,
4609 io_request->sg_chain_buffer,
4610 io_request->sg_chain_buffer_dma_handle);
4614 kfree(ctrl_info->io_request_pool);
4615 ctrl_info->io_request_pool = NULL;
4618 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4621 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4622 ctrl_info->error_buffer_length,
4623 &ctrl_info->error_buffer_dma_handle,
4625 if (!ctrl_info->error_buffer)
4631 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4634 void *sg_chain_buffer;
4635 size_t sg_chain_buffer_length;
4636 dma_addr_t sg_chain_buffer_dma_handle;
4638 struct pqi_io_request *io_request;
4640 ctrl_info->io_request_pool =
4641 kcalloc(ctrl_info->max_io_slots,
4642 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4644 if (!ctrl_info->io_request_pool) {
4645 dev_err(&ctrl_info->pci_dev->dev,
4646 "failed to allocate I/O request pool\n");
4650 dev = &ctrl_info->pci_dev->dev;
4651 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4652 io_request = ctrl_info->io_request_pool;
4654 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4656 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4658 if (!io_request->iu) {
4659 dev_err(&ctrl_info->pci_dev->dev,
4660 "failed to allocate IU buffers\n");
4664 sg_chain_buffer = dma_alloc_coherent(dev,
4665 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4668 if (!sg_chain_buffer) {
4669 dev_err(&ctrl_info->pci_dev->dev,
4670 "failed to allocate PQI scatter-gather chain buffers\n");
4674 io_request->index = i;
4675 io_request->sg_chain_buffer = sg_chain_buffer;
4676 io_request->sg_chain_buffer_dma_handle =
4677 sg_chain_buffer_dma_handle;
4684 pqi_free_all_io_requests(ctrl_info);
4690 * Calculate required resources that are sized based on max. outstanding
4691 * requests and max. transfer size.
4694 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4696 u32 max_transfer_size;
4699 ctrl_info->scsi_ml_can_queue =
4700 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4701 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4703 ctrl_info->error_buffer_length =
4704 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4707 max_transfer_size = min(ctrl_info->max_transfer_size,
4708 PQI_MAX_TRANSFER_SIZE_KDUMP);
4710 max_transfer_size = min(ctrl_info->max_transfer_size,
4711 PQI_MAX_TRANSFER_SIZE);
4713 max_sg_entries = max_transfer_size / PAGE_SIZE;
4715 /* +1 to cover when the buffer is not page-aligned. */
4718 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4720 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4722 ctrl_info->sg_chain_buffer_length =
4723 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4724 PQI_EXTRA_SGL_MEMORY;
4725 ctrl_info->sg_tablesize = max_sg_entries;
4726 ctrl_info->max_sectors = max_transfer_size / 512;
4729 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4731 int num_queue_groups;
4732 u16 num_elements_per_iq;
4733 u16 num_elements_per_oq;
4735 if (reset_devices) {
4736 num_queue_groups = 1;
4739 int max_queue_groups;
4741 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4742 ctrl_info->max_outbound_queues - 1);
4743 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4745 num_cpus = num_online_cpus();
4746 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4747 num_queue_groups = min(num_queue_groups, max_queue_groups);
4750 ctrl_info->num_queue_groups = num_queue_groups;
4751 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
4754 * Make sure that the max. inbound IU length is an even multiple
4755 * of our inbound element length.
4757 ctrl_info->max_inbound_iu_length =
4758 (ctrl_info->max_inbound_iu_length_per_firmware /
4759 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4760 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4762 num_elements_per_iq =
4763 (ctrl_info->max_inbound_iu_length /
4764 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4766 /* Add one because one element in each queue is unusable. */
4767 num_elements_per_iq++;
4769 num_elements_per_iq = min(num_elements_per_iq,
4770 ctrl_info->max_elements_per_iq);
4772 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4773 num_elements_per_oq = min(num_elements_per_oq,
4774 ctrl_info->max_elements_per_oq);
4776 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4777 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4779 ctrl_info->max_sg_per_iu =
4780 ((ctrl_info->max_inbound_iu_length -
4781 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4782 sizeof(struct pqi_sg_descriptor)) +
4783 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4786 static inline void pqi_set_sg_descriptor(
4787 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4789 u64 address = (u64)sg_dma_address(sg);
4790 unsigned int length = sg_dma_len(sg);
4792 put_unaligned_le64(address, &sg_descriptor->address);
4793 put_unaligned_le32(length, &sg_descriptor->length);
4794 put_unaligned_le32(0, &sg_descriptor->flags);
4797 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4798 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4799 struct pqi_io_request *io_request)
4805 unsigned int num_sg_in_iu;
4806 unsigned int max_sg_per_iu;
4807 struct scatterlist *sg;
4808 struct pqi_sg_descriptor *sg_descriptor;
4810 sg_count = scsi_dma_map(scmd);
4814 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4815 PQI_REQUEST_HEADER_LENGTH;
4820 sg = scsi_sglist(scmd);
4821 sg_descriptor = request->sg_descriptors;
4822 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4828 pqi_set_sg_descriptor(sg_descriptor, sg);
4835 if (i == max_sg_per_iu) {
4837 (u64)io_request->sg_chain_buffer_dma_handle,
4838 &sg_descriptor->address);
4839 put_unaligned_le32((sg_count - num_sg_in_iu)
4840 * sizeof(*sg_descriptor),
4841 &sg_descriptor->length);
4842 put_unaligned_le32(CISS_SG_CHAIN,
4843 &sg_descriptor->flags);
4846 sg_descriptor = io_request->sg_chain_buffer;
4851 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4852 request->partial = chained;
4853 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4856 put_unaligned_le16(iu_length, &request->header.iu_length);
4861 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4862 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4863 struct pqi_io_request *io_request)
4869 unsigned int num_sg_in_iu;
4870 unsigned int max_sg_per_iu;
4871 struct scatterlist *sg;
4872 struct pqi_sg_descriptor *sg_descriptor;
4874 sg_count = scsi_dma_map(scmd);
4878 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4879 PQI_REQUEST_HEADER_LENGTH;
4885 sg = scsi_sglist(scmd);
4886 sg_descriptor = request->sg_descriptors;
4887 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4892 pqi_set_sg_descriptor(sg_descriptor, sg);
4899 if (i == max_sg_per_iu) {
4901 (u64)io_request->sg_chain_buffer_dma_handle,
4902 &sg_descriptor->address);
4903 put_unaligned_le32((sg_count - num_sg_in_iu)
4904 * sizeof(*sg_descriptor),
4905 &sg_descriptor->length);
4906 put_unaligned_le32(CISS_SG_CHAIN,
4907 &sg_descriptor->flags);
4910 sg_descriptor = io_request->sg_chain_buffer;
4915 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4916 request->partial = chained;
4917 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4920 put_unaligned_le16(iu_length, &request->header.iu_length);
4921 request->num_sg_descriptors = num_sg_in_iu;
4926 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4929 struct scsi_cmnd *scmd;
4931 scmd = io_request->scmd;
4932 pqi_free_io_request(io_request);
4933 scsi_dma_unmap(scmd);
4934 pqi_scsi_done(scmd);
4937 static int pqi_raid_submit_scsi_cmd_with_io_request(
4938 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
4939 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4940 struct pqi_queue_group *queue_group)
4944 struct pqi_raid_path_request *request;
4946 io_request->io_complete_callback = pqi_raid_io_complete;
4947 io_request->scmd = scmd;
4949 request = io_request->iu;
4951 offsetof(struct pqi_raid_path_request, sg_descriptors));
4953 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4954 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4955 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4956 put_unaligned_le16(io_request->index, &request->request_id);
4957 request->error_index = request->request_id;
4958 memcpy(request->lun_number, device->scsi3addr,
4959 sizeof(request->lun_number));
4961 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4962 memcpy(request->cdb, scmd->cmnd, cdb_length);
4964 switch (cdb_length) {
4969 /* No bytes in the Additional CDB bytes field */
4970 request->additional_cdb_bytes_usage =
4971 SOP_ADDITIONAL_CDB_BYTES_0;
4974 /* 4 bytes in the Additional cdb field */
4975 request->additional_cdb_bytes_usage =
4976 SOP_ADDITIONAL_CDB_BYTES_4;
4979 /* 8 bytes in the Additional cdb field */
4980 request->additional_cdb_bytes_usage =
4981 SOP_ADDITIONAL_CDB_BYTES_8;
4984 /* 12 bytes in the Additional cdb field */
4985 request->additional_cdb_bytes_usage =
4986 SOP_ADDITIONAL_CDB_BYTES_12;
4990 /* 16 bytes in the Additional cdb field */
4991 request->additional_cdb_bytes_usage =
4992 SOP_ADDITIONAL_CDB_BYTES_16;
4996 switch (scmd->sc_data_direction) {
4998 request->data_direction = SOP_READ_FLAG;
5000 case DMA_FROM_DEVICE:
5001 request->data_direction = SOP_WRITE_FLAG;
5004 request->data_direction = SOP_NO_DIRECTION_FLAG;
5006 case DMA_BIDIRECTIONAL:
5007 request->data_direction = SOP_BIDIRECTIONAL;
5010 dev_err(&ctrl_info->pci_dev->dev,
5011 "unknown data direction: %d\n",
5012 scmd->sc_data_direction);
5016 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5018 pqi_free_io_request(io_request);
5019 return SCSI_MLQUEUE_HOST_BUSY;
5022 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5027 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5028 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5029 struct pqi_queue_group *queue_group)
5031 struct pqi_io_request *io_request;
5033 io_request = pqi_alloc_io_request(ctrl_info);
5035 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5036 device, scmd, queue_group);
5039 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
5041 if (!pqi_ctrl_blocked(ctrl_info))
5042 schedule_work(&ctrl_info->raid_bypass_retry_work);
5045 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5047 struct scsi_cmnd *scmd;
5048 struct pqi_scsi_dev *device;
5049 struct pqi_ctrl_info *ctrl_info;
5051 if (!io_request->raid_bypass)
5054 scmd = io_request->scmd;
5055 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5057 if (host_byte(scmd->result) == DID_NO_CONNECT)
5060 device = scmd->device->hostdata;
5061 if (pqi_device_offline(device))
5064 ctrl_info = shost_to_hba(scmd->device->host);
5065 if (pqi_ctrl_offline(ctrl_info))
5071 static inline void pqi_add_to_raid_bypass_retry_list(
5072 struct pqi_ctrl_info *ctrl_info,
5073 struct pqi_io_request *io_request, bool at_head)
5075 unsigned long flags;
5077 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5079 list_add(&io_request->request_list_entry,
5080 &ctrl_info->raid_bypass_retry_list);
5082 list_add_tail(&io_request->request_list_entry,
5083 &ctrl_info->raid_bypass_retry_list);
5084 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5087 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
5090 struct scsi_cmnd *scmd;
5092 scmd = io_request->scmd;
5093 pqi_free_io_request(io_request);
5094 pqi_scsi_done(scmd);
5097 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
5099 struct scsi_cmnd *scmd;
5100 struct pqi_ctrl_info *ctrl_info;
5102 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
5103 scmd = io_request->scmd;
5105 ctrl_info = shost_to_hba(scmd->device->host);
5107 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
5108 pqi_schedule_bypass_retry(ctrl_info);
5111 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
5113 struct scsi_cmnd *scmd;
5114 struct pqi_scsi_dev *device;
5115 struct pqi_ctrl_info *ctrl_info;
5116 struct pqi_queue_group *queue_group;
5118 scmd = io_request->scmd;
5119 device = scmd->device->hostdata;
5120 if (pqi_device_in_reset(device)) {
5121 pqi_free_io_request(io_request);
5122 set_host_byte(scmd, DID_RESET);
5123 pqi_scsi_done(scmd);
5127 ctrl_info = shost_to_hba(scmd->device->host);
5128 queue_group = io_request->queue_group;
5130 pqi_reinit_io_request(io_request);
5132 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5133 device, scmd, queue_group);
5136 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
5137 struct pqi_ctrl_info *ctrl_info)
5139 unsigned long flags;
5140 struct pqi_io_request *io_request;
5142 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5143 io_request = list_first_entry_or_null(
5144 &ctrl_info->raid_bypass_retry_list,
5145 struct pqi_io_request, request_list_entry);
5147 list_del(&io_request->request_list_entry);
5148 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5153 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
5156 struct pqi_io_request *io_request;
5158 pqi_ctrl_busy(ctrl_info);
5161 if (pqi_ctrl_blocked(ctrl_info))
5163 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
5166 rc = pqi_retry_raid_bypass(io_request);
5168 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
5170 pqi_schedule_bypass_retry(ctrl_info);
5175 pqi_ctrl_unbusy(ctrl_info);
5178 static void pqi_raid_bypass_retry_worker(struct work_struct *work)
5180 struct pqi_ctrl_info *ctrl_info;
5182 ctrl_info = container_of(work, struct pqi_ctrl_info,
5183 raid_bypass_retry_work);
5184 pqi_retry_raid_bypass_requests(ctrl_info);
5187 static void pqi_clear_all_queued_raid_bypass_retries(
5188 struct pqi_ctrl_info *ctrl_info)
5190 unsigned long flags;
5192 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5193 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
5194 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5197 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5200 struct scsi_cmnd *scmd;
5202 scmd = io_request->scmd;
5203 scsi_dma_unmap(scmd);
5204 if (io_request->status == -EAGAIN)
5205 set_host_byte(scmd, DID_IMM_RETRY);
5206 else if (pqi_raid_bypass_retry_needed(io_request)) {
5207 pqi_queue_raid_bypass_retry(io_request);
5210 pqi_free_io_request(io_request);
5211 pqi_scsi_done(scmd);
5214 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5215 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5216 struct pqi_queue_group *queue_group)
5218 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5219 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
5222 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5223 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5224 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5225 struct pqi_encryption_info *encryption_info, bool raid_bypass)
5228 struct pqi_io_request *io_request;
5229 struct pqi_aio_path_request *request;
5231 io_request = pqi_alloc_io_request(ctrl_info);
5232 io_request->io_complete_callback = pqi_aio_io_complete;
5233 io_request->scmd = scmd;
5234 io_request->raid_bypass = raid_bypass;
5236 request = io_request->iu;
5238 offsetof(struct pqi_raid_path_request, sg_descriptors));
5240 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5241 put_unaligned_le32(aio_handle, &request->nexus_id);
5242 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5243 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5244 put_unaligned_le16(io_request->index, &request->request_id);
5245 request->error_index = request->request_id;
5246 if (cdb_length > sizeof(request->cdb))
5247 cdb_length = sizeof(request->cdb);
5248 request->cdb_length = cdb_length;
5249 memcpy(request->cdb, cdb, cdb_length);
5251 switch (scmd->sc_data_direction) {
5253 request->data_direction = SOP_READ_FLAG;
5255 case DMA_FROM_DEVICE:
5256 request->data_direction = SOP_WRITE_FLAG;
5259 request->data_direction = SOP_NO_DIRECTION_FLAG;
5261 case DMA_BIDIRECTIONAL:
5262 request->data_direction = SOP_BIDIRECTIONAL;
5265 dev_err(&ctrl_info->pci_dev->dev,
5266 "unknown data direction: %d\n",
5267 scmd->sc_data_direction);
5271 if (encryption_info) {
5272 request->encryption_enable = true;
5273 put_unaligned_le16(encryption_info->data_encryption_key_index,
5274 &request->data_encryption_key_index);
5275 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5276 &request->encrypt_tweak_lower);
5277 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5278 &request->encrypt_tweak_upper);
5281 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5283 pqi_free_io_request(io_request);
5284 return SCSI_MLQUEUE_HOST_BUSY;
5287 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5292 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5293 struct scsi_cmnd *scmd)
5297 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5298 if (hw_queue > ctrl_info->max_hw_queue_index)
5305 * This function gets called just before we hand the completed SCSI request
5309 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5311 struct pqi_scsi_dev *device;
5313 if (!scmd->device) {
5314 set_host_byte(scmd, DID_NO_CONNECT);
5318 device = scmd->device->hostdata;
5320 set_host_byte(scmd, DID_NO_CONNECT);
5324 atomic_dec(&device->scsi_cmds_outstanding);
5327 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
5328 struct scsi_cmnd *scmd)
5331 struct pqi_ctrl_info *ctrl_info;
5332 struct pqi_scsi_dev *device;
5334 struct pqi_queue_group *queue_group;
5337 device = scmd->device->hostdata;
5338 ctrl_info = shost_to_hba(shost);
5341 set_host_byte(scmd, DID_NO_CONNECT);
5342 pqi_scsi_done(scmd);
5346 atomic_inc(&device->scsi_cmds_outstanding);
5348 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
5349 set_host_byte(scmd, DID_NO_CONNECT);
5350 pqi_scsi_done(scmd);
5354 pqi_ctrl_busy(ctrl_info);
5355 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
5356 pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
5357 rc = SCSI_MLQUEUE_HOST_BUSY;
5362 * This is necessary because the SML doesn't zero out this field during
5367 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5368 queue_group = &ctrl_info->queue_groups[hw_queue];
5370 if (pqi_is_logical_device(device)) {
5371 raid_bypassed = false;
5372 if (device->raid_bypass_enabled &&
5373 !blk_rq_is_passthrough(scmd->request)) {
5374 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5376 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
5377 raid_bypassed = true;
5378 atomic_inc(&device->raid_bypass_cnt);
5382 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5384 if (device->aio_enabled)
5385 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5387 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5391 pqi_ctrl_unbusy(ctrl_info);
5393 atomic_dec(&device->scsi_cmds_outstanding);
5398 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5399 struct pqi_queue_group *queue_group)
5402 unsigned long flags;
5405 for (path = 0; path < 2; path++) {
5408 &queue_group->submit_lock[path], flags);
5410 list_empty(&queue_group->request_list[path]);
5411 spin_unlock_irqrestore(
5412 &queue_group->submit_lock[path], flags);
5415 pqi_check_ctrl_health(ctrl_info);
5416 if (pqi_ctrl_offline(ctrl_info))
5418 usleep_range(1000, 2000);
5425 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5430 struct pqi_queue_group *queue_group;
5434 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5435 queue_group = &ctrl_info->queue_groups[i];
5437 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5441 for (path = 0; path < 2; path++) {
5442 iq_pi = queue_group->iq_pi_copy[path];
5445 iq_ci = readl(queue_group->iq_ci[path]);
5448 pqi_check_ctrl_health(ctrl_info);
5449 if (pqi_ctrl_offline(ctrl_info))
5451 usleep_range(1000, 2000);
5459 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5460 struct pqi_scsi_dev *device)
5464 struct pqi_queue_group *queue_group;
5465 unsigned long flags;
5466 struct pqi_io_request *io_request;
5467 struct pqi_io_request *next;
5468 struct scsi_cmnd *scmd;
5469 struct pqi_scsi_dev *scsi_device;
5471 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5472 queue_group = &ctrl_info->queue_groups[i];
5474 for (path = 0; path < 2; path++) {
5476 &queue_group->submit_lock[path], flags);
5478 list_for_each_entry_safe(io_request, next,
5479 &queue_group->request_list[path],
5480 request_list_entry) {
5481 scmd = io_request->scmd;
5485 scsi_device = scmd->device->hostdata;
5486 if (scsi_device != device)
5489 list_del(&io_request->request_list_entry);
5490 set_host_byte(scmd, DID_RESET);
5491 pqi_scsi_done(scmd);
5494 spin_unlock_irqrestore(
5495 &queue_group->submit_lock[path], flags);
5500 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
5504 struct pqi_queue_group *queue_group;
5505 unsigned long flags;
5506 struct pqi_io_request *io_request;
5507 struct pqi_io_request *next;
5508 struct scsi_cmnd *scmd;
5510 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5511 queue_group = &ctrl_info->queue_groups[i];
5513 for (path = 0; path < 2; path++) {
5514 spin_lock_irqsave(&queue_group->submit_lock[path],
5517 list_for_each_entry_safe(io_request, next,
5518 &queue_group->request_list[path],
5519 request_list_entry) {
5521 scmd = io_request->scmd;
5525 list_del(&io_request->request_list_entry);
5526 set_host_byte(scmd, DID_RESET);
5527 pqi_scsi_done(scmd);
5530 spin_unlock_irqrestore(
5531 &queue_group->submit_lock[path], flags);
5536 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5537 struct pqi_scsi_dev *device, unsigned long timeout_secs)
5539 unsigned long timeout;
5541 timeout = (timeout_secs * PQI_HZ) + jiffies;
5543 while (atomic_read(&device->scsi_cmds_outstanding)) {
5544 pqi_check_ctrl_health(ctrl_info);
5545 if (pqi_ctrl_offline(ctrl_info))
5547 if (timeout_secs != NO_TIMEOUT) {
5548 if (time_after(jiffies, timeout)) {
5549 dev_err(&ctrl_info->pci_dev->dev,
5550 "timed out waiting for pending IO\n");
5554 usleep_range(1000, 2000);
5560 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5561 unsigned long timeout_secs)
5564 unsigned long flags;
5565 unsigned long timeout;
5566 struct pqi_scsi_dev *device;
5568 timeout = (timeout_secs * PQI_HZ) + jiffies;
5572 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5573 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5574 scsi_device_list_entry) {
5575 if (atomic_read(&device->scsi_cmds_outstanding)) {
5580 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5586 pqi_check_ctrl_health(ctrl_info);
5587 if (pqi_ctrl_offline(ctrl_info))
5590 if (timeout_secs != NO_TIMEOUT) {
5591 if (time_after(jiffies, timeout)) {
5592 dev_err(&ctrl_info->pci_dev->dev,
5593 "timed out waiting for pending IO\n");
5597 usleep_range(1000, 2000);
5603 static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
5605 while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
5606 pqi_check_ctrl_health(ctrl_info);
5607 if (pqi_ctrl_offline(ctrl_info))
5609 usleep_range(1000, 2000);
5615 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5618 struct completion *waiting = context;
5623 #define PQI_LUN_RESET_TIMEOUT_SECS 30
5624 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
5626 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5627 struct pqi_scsi_dev *device, struct completion *wait)
5632 if (wait_for_completion_io_timeout(wait,
5633 PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
5638 pqi_check_ctrl_health(ctrl_info);
5639 if (pqi_ctrl_offline(ctrl_info)) {
5648 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
5649 struct pqi_scsi_dev *device)
5652 struct pqi_io_request *io_request;
5653 DECLARE_COMPLETION_ONSTACK(wait);
5654 struct pqi_task_management_request *request;
5656 io_request = pqi_alloc_io_request(ctrl_info);
5657 io_request->io_complete_callback = pqi_lun_reset_complete;
5658 io_request->context = &wait;
5660 request = io_request->iu;
5661 memset(request, 0, sizeof(*request));
5663 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5664 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5665 &request->header.iu_length);
5666 put_unaligned_le16(io_request->index, &request->request_id);
5667 memcpy(request->lun_number, device->scsi3addr,
5668 sizeof(request->lun_number));
5669 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5670 if (ctrl_info->tmf_iu_timeout_supported)
5671 put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
5674 pqi_start_io(ctrl_info,
5675 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5678 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5680 rc = io_request->status;
5682 pqi_free_io_request(io_request);
5687 /* Performs a reset at the LUN level. */
5689 #define PQI_LUN_RESET_RETRIES 3
5690 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
5691 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
5693 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5694 struct pqi_scsi_dev *device)
5697 unsigned int retries;
5698 unsigned long timeout_secs;
5700 for (retries = 0;;) {
5701 rc = pqi_lun_reset(ctrl_info, device);
5702 if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
5704 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5707 timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
5709 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
5711 return rc == 0 ? SUCCESS : FAILED;
5714 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5715 struct pqi_scsi_dev *device)
5719 mutex_lock(&ctrl_info->lun_reset_mutex);
5721 pqi_ctrl_block_requests(ctrl_info);
5722 pqi_ctrl_wait_until_quiesced(ctrl_info);
5723 pqi_fail_io_queued_for_device(ctrl_info, device);
5724 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5725 pqi_device_reset_start(device);
5726 pqi_ctrl_unblock_requests(ctrl_info);
5731 rc = _pqi_device_reset(ctrl_info, device);
5733 pqi_device_reset_done(device);
5735 mutex_unlock(&ctrl_info->lun_reset_mutex);
5740 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5743 struct Scsi_Host *shost;
5744 struct pqi_ctrl_info *ctrl_info;
5745 struct pqi_scsi_dev *device;
5747 shost = scmd->device->host;
5748 ctrl_info = shost_to_hba(shost);
5749 device = scmd->device->hostdata;
5751 dev_err(&ctrl_info->pci_dev->dev,
5752 "resetting scsi %d:%d:%d:%d\n",
5753 shost->host_no, device->bus, device->target, device->lun);
5755 pqi_check_ctrl_health(ctrl_info);
5756 if (pqi_ctrl_offline(ctrl_info) ||
5757 pqi_device_reset_blocked(ctrl_info)) {
5762 pqi_wait_until_ofa_finished(ctrl_info);
5764 atomic_inc(&ctrl_info->sync_cmds_outstanding);
5765 rc = pqi_device_reset(ctrl_info, device);
5766 atomic_dec(&ctrl_info->sync_cmds_outstanding);
5769 dev_err(&ctrl_info->pci_dev->dev,
5770 "reset of scsi %d:%d:%d:%d: %s\n",
5771 shost->host_no, device->bus, device->target, device->lun,
5772 rc == SUCCESS ? "SUCCESS" : "FAILED");
5777 static int pqi_slave_alloc(struct scsi_device *sdev)
5779 struct pqi_scsi_dev *device;
5780 unsigned long flags;
5781 struct pqi_ctrl_info *ctrl_info;
5782 struct scsi_target *starget;
5783 struct sas_rphy *rphy;
5785 ctrl_info = shost_to_hba(sdev->host);
5787 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5789 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5790 starget = scsi_target(sdev);
5791 rphy = target_to_rphy(starget);
5792 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5794 device->target = sdev_id(sdev);
5795 device->lun = sdev->lun;
5796 device->target_lun_valid = true;
5799 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5800 sdev_id(sdev), sdev->lun);
5804 sdev->hostdata = device;
5805 device->sdev = sdev;
5806 if (device->queue_depth) {
5807 device->advertised_queue_depth = device->queue_depth;
5808 scsi_change_queue_depth(sdev,
5809 device->advertised_queue_depth);
5811 if (pqi_is_logical_device(device))
5812 pqi_disable_write_same(sdev);
5814 sdev->allow_restart = 1;
5817 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5822 static int pqi_map_queues(struct Scsi_Host *shost)
5824 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5826 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
5827 ctrl_info->pci_dev, 0);
5830 static int pqi_slave_configure(struct scsi_device *sdev)
5832 struct pqi_scsi_dev *device;
5834 device = sdev->hostdata;
5835 device->devtype = sdev->type;
5840 static void pqi_slave_destroy(struct scsi_device *sdev)
5842 unsigned long flags;
5843 struct pqi_scsi_dev *device;
5844 struct pqi_ctrl_info *ctrl_info;
5846 ctrl_info = shost_to_hba(sdev->host);
5848 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5850 device = sdev->hostdata;
5852 sdev->hostdata = NULL;
5853 if (!list_empty(&device->scsi_device_list_entry))
5854 list_del(&device->scsi_device_list_entry);
5857 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5860 pqi_dev_info(ctrl_info, "removed", device);
5861 pqi_free_device(device);
5865 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5867 struct pci_dev *pci_dev;
5868 u32 subsystem_vendor;
5869 u32 subsystem_device;
5870 cciss_pci_info_struct pciinfo;
5875 pci_dev = ctrl_info->pci_dev;
5877 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5878 pciinfo.bus = pci_dev->bus->number;
5879 pciinfo.dev_fn = pci_dev->devfn;
5880 subsystem_vendor = pci_dev->subsystem_vendor;
5881 subsystem_device = pci_dev->subsystem_device;
5882 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
5884 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5890 static int pqi_getdrivver_ioctl(void __user *arg)
5897 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5898 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5900 if (copy_to_user(arg, &version, sizeof(version)))
5906 struct ciss_error_info {
5909 size_t sense_data_length;
5912 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5913 struct ciss_error_info *ciss_error_info)
5915 int ciss_cmd_status;
5916 size_t sense_data_length;
5918 switch (pqi_error_info->data_out_result) {
5919 case PQI_DATA_IN_OUT_GOOD:
5920 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5922 case PQI_DATA_IN_OUT_UNDERFLOW:
5923 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5925 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5926 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5928 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5929 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5930 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5931 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5932 case PQI_DATA_IN_OUT_ERROR:
5933 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5935 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5936 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5937 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5938 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5939 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5940 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5941 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5942 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5943 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5944 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5945 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5947 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5948 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5950 case PQI_DATA_IN_OUT_ABORTED:
5951 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5953 case PQI_DATA_IN_OUT_TIMEOUT:
5954 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5957 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5962 get_unaligned_le16(&pqi_error_info->sense_data_length);
5963 if (sense_data_length == 0)
5965 get_unaligned_le16(&pqi_error_info->response_data_length);
5966 if (sense_data_length)
5967 if (sense_data_length > sizeof(pqi_error_info->data))
5968 sense_data_length = sizeof(pqi_error_info->data);
5970 ciss_error_info->scsi_status = pqi_error_info->status;
5971 ciss_error_info->command_status = ciss_cmd_status;
5972 ciss_error_info->sense_data_length = sense_data_length;
5975 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5978 char *kernel_buffer = NULL;
5980 size_t sense_data_length;
5981 IOCTL_Command_struct iocommand;
5982 struct pqi_raid_path_request request;
5983 struct pqi_raid_error_info pqi_error_info;
5984 struct ciss_error_info ciss_error_info;
5986 if (pqi_ctrl_offline(ctrl_info))
5990 if (!capable(CAP_SYS_RAWIO))
5992 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5994 if (iocommand.buf_size < 1 &&
5995 iocommand.Request.Type.Direction != XFER_NONE)
5997 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5999 if (iocommand.Request.Type.Type != TYPE_CMD)
6002 switch (iocommand.Request.Type.Direction) {
6006 case XFER_READ | XFER_WRITE:
6012 if (iocommand.buf_size > 0) {
6013 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6016 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6017 if (copy_from_user(kernel_buffer, iocommand.buf,
6018 iocommand.buf_size)) {
6023 memset(kernel_buffer, 0, iocommand.buf_size);
6027 memset(&request, 0, sizeof(request));
6029 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6030 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6031 PQI_REQUEST_HEADER_LENGTH;
6032 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6033 sizeof(request.lun_number));
6034 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6035 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6037 switch (iocommand.Request.Type.Direction) {
6039 request.data_direction = SOP_NO_DIRECTION_FLAG;
6042 request.data_direction = SOP_WRITE_FLAG;
6045 request.data_direction = SOP_READ_FLAG;
6047 case XFER_READ | XFER_WRITE:
6048 request.data_direction = SOP_BIDIRECTIONAL;
6052 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6054 if (iocommand.buf_size > 0) {
6055 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6057 rc = pqi_map_single(ctrl_info->pci_dev,
6058 &request.sg_descriptors[0], kernel_buffer,
6059 iocommand.buf_size, DMA_BIDIRECTIONAL);
6063 iu_length += sizeof(request.sg_descriptors[0]);
6066 put_unaligned_le16(iu_length, &request.header.iu_length);
6068 if (ctrl_info->raid_iu_timeout_supported)
6069 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6071 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6072 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
6074 if (iocommand.buf_size > 0)
6075 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6078 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6081 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6082 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6083 iocommand.error_info.CommandStatus =
6084 ciss_error_info.command_status;
6085 sense_data_length = ciss_error_info.sense_data_length;
6086 if (sense_data_length) {
6087 if (sense_data_length >
6088 sizeof(iocommand.error_info.SenseInfo))
6090 sizeof(iocommand.error_info.SenseInfo);
6091 memcpy(iocommand.error_info.SenseInfo,
6092 pqi_error_info.data, sense_data_length);
6093 iocommand.error_info.SenseLen = sense_data_length;
6097 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6102 if (rc == 0 && iocommand.buf_size > 0 &&
6103 (iocommand.Request.Type.Direction & XFER_READ)) {
6104 if (copy_to_user(iocommand.buf, kernel_buffer,
6105 iocommand.buf_size)) {
6111 kfree(kernel_buffer);
6116 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6120 struct pqi_ctrl_info *ctrl_info;
6122 ctrl_info = shost_to_hba(sdev->host);
6124 if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
6128 case CCISS_DEREGDISK:
6129 case CCISS_REGNEWDISK:
6131 rc = pqi_scan_scsi_devices(ctrl_info);
6133 case CCISS_GETPCIINFO:
6134 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6136 case CCISS_GETDRIVVER:
6137 rc = pqi_getdrivver_ioctl(arg);
6139 case CCISS_PASSTHRU:
6140 rc = pqi_passthru_ioctl(ctrl_info, arg);
6150 static ssize_t pqi_firmware_version_show(struct device *dev,
6151 struct device_attribute *attr, char *buffer)
6153 struct Scsi_Host *shost;
6154 struct pqi_ctrl_info *ctrl_info;
6156 shost = class_to_shost(dev);
6157 ctrl_info = shost_to_hba(shost);
6159 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6162 static ssize_t pqi_driver_version_show(struct device *dev,
6163 struct device_attribute *attr, char *buffer)
6165 return snprintf(buffer, PAGE_SIZE, "%s\n",
6166 DRIVER_VERSION BUILD_TIMESTAMP);
6169 static ssize_t pqi_serial_number_show(struct device *dev,
6170 struct device_attribute *attr, char *buffer)
6172 struct Scsi_Host *shost;
6173 struct pqi_ctrl_info *ctrl_info;
6175 shost = class_to_shost(dev);
6176 ctrl_info = shost_to_hba(shost);
6178 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6181 static ssize_t pqi_model_show(struct device *dev,
6182 struct device_attribute *attr, char *buffer)
6184 struct Scsi_Host *shost;
6185 struct pqi_ctrl_info *ctrl_info;
6187 shost = class_to_shost(dev);
6188 ctrl_info = shost_to_hba(shost);
6190 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6193 static ssize_t pqi_vendor_show(struct device *dev,
6194 struct device_attribute *attr, char *buffer)
6196 struct Scsi_Host *shost;
6197 struct pqi_ctrl_info *ctrl_info;
6199 shost = class_to_shost(dev);
6200 ctrl_info = shost_to_hba(shost);
6202 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6205 static ssize_t pqi_host_rescan_store(struct device *dev,
6206 struct device_attribute *attr, const char *buffer, size_t count)
6208 struct Scsi_Host *shost = class_to_shost(dev);
6210 pqi_scan_start(shost);
6215 static ssize_t pqi_lockup_action_show(struct device *dev,
6216 struct device_attribute *attr, char *buffer)
6221 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6222 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6223 count += scnprintf(buffer + count, PAGE_SIZE - count,
6224 "[%s] ", pqi_lockup_actions[i].name);
6226 count += scnprintf(buffer + count, PAGE_SIZE - count,
6227 "%s ", pqi_lockup_actions[i].name);
6230 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6235 static ssize_t pqi_lockup_action_store(struct device *dev,
6236 struct device_attribute *attr, const char *buffer, size_t count)
6240 char action_name_buffer[32];
6242 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6243 action_name = strstrip(action_name_buffer);
6245 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6246 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6247 pqi_lockup_action = pqi_lockup_actions[i].action;
6255 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6256 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6257 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6258 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6259 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6260 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6261 static DEVICE_ATTR(lockup_action, 0644,
6262 pqi_lockup_action_show, pqi_lockup_action_store);
6264 static struct device_attribute *pqi_shost_attrs[] = {
6265 &dev_attr_driver_version,
6266 &dev_attr_firmware_version,
6268 &dev_attr_serial_number,
6271 &dev_attr_lockup_action,
6275 static ssize_t pqi_unique_id_show(struct device *dev,
6276 struct device_attribute *attr, char *buffer)
6278 struct pqi_ctrl_info *ctrl_info;
6279 struct scsi_device *sdev;
6280 struct pqi_scsi_dev *device;
6281 unsigned long flags;
6284 sdev = to_scsi_device(dev);
6285 ctrl_info = shost_to_hba(sdev->host);
6287 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6289 device = sdev->hostdata;
6291 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6295 if (device->is_physical_device) {
6296 memset(unique_id, 0, 8);
6297 memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
6299 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6302 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6304 return snprintf(buffer, PAGE_SIZE,
6305 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
6306 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6307 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6308 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6309 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
6312 static ssize_t pqi_lunid_show(struct device *dev,
6313 struct device_attribute *attr, char *buffer)
6315 struct pqi_ctrl_info *ctrl_info;
6316 struct scsi_device *sdev;
6317 struct pqi_scsi_dev *device;
6318 unsigned long flags;
6321 sdev = to_scsi_device(dev);
6322 ctrl_info = shost_to_hba(sdev->host);
6324 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6326 device = sdev->hostdata;
6328 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6332 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6334 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6336 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6341 static ssize_t pqi_path_info_show(struct device *dev,
6342 struct device_attribute *attr, char *buf)
6344 struct pqi_ctrl_info *ctrl_info;
6345 struct scsi_device *sdev;
6346 struct pqi_scsi_dev *device;
6347 unsigned long flags;
6354 u8 phys_connector[2];
6356 sdev = to_scsi_device(dev);
6357 ctrl_info = shost_to_hba(sdev->host);
6359 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6361 device = sdev->hostdata;
6363 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6368 for (i = 0; i < MAX_PATHS; i++) {
6369 path_map_index = 1 << i;
6370 if (i == device->active_path_index)
6372 else if (device->path_map & path_map_index)
6373 active = "Inactive";
6377 output_len += scnprintf(buf + output_len,
6378 PAGE_SIZE - output_len,
6379 "[%d:%d:%d:%d] %20.20s ",
6380 ctrl_info->scsi_host->host_no,
6381 device->bus, device->target,
6383 scsi_device_type(device->devtype));
6385 if (device->devtype == TYPE_RAID ||
6386 pqi_is_logical_device(device))
6389 memcpy(&phys_connector, &device->phys_connector[i],
6390 sizeof(phys_connector));
6391 if (phys_connector[0] < '0')
6392 phys_connector[0] = '0';
6393 if (phys_connector[1] < '0')
6394 phys_connector[1] = '0';
6396 output_len += scnprintf(buf + output_len,
6397 PAGE_SIZE - output_len,
6398 "PORT: %.2s ", phys_connector);
6400 box = device->box[i];
6401 if (box != 0 && box != 0xFF)
6402 output_len += scnprintf(buf + output_len,
6403 PAGE_SIZE - output_len,
6406 if ((device->devtype == TYPE_DISK ||
6407 device->devtype == TYPE_ZBC) &&
6408 pqi_expose_device(device))
6409 output_len += scnprintf(buf + output_len,
6410 PAGE_SIZE - output_len,
6414 output_len += scnprintf(buf + output_len,
6415 PAGE_SIZE - output_len,
6419 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6424 static ssize_t pqi_sas_address_show(struct device *dev,
6425 struct device_attribute *attr, char *buffer)
6427 struct pqi_ctrl_info *ctrl_info;
6428 struct scsi_device *sdev;
6429 struct pqi_scsi_dev *device;
6430 unsigned long flags;
6433 sdev = to_scsi_device(dev);
6434 ctrl_info = shost_to_hba(sdev->host);
6436 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6438 device = sdev->hostdata;
6439 if (!device || !pqi_is_device_with_sas_address(device)) {
6440 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6444 sas_address = device->sas_address;
6446 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6448 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6451 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6452 struct device_attribute *attr, char *buffer)
6454 struct pqi_ctrl_info *ctrl_info;
6455 struct scsi_device *sdev;
6456 struct pqi_scsi_dev *device;
6457 unsigned long flags;
6459 sdev = to_scsi_device(dev);
6460 ctrl_info = shost_to_hba(sdev->host);
6462 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6464 device = sdev->hostdata;
6466 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6470 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6474 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6479 static ssize_t pqi_raid_level_show(struct device *dev,
6480 struct device_attribute *attr, char *buffer)
6482 struct pqi_ctrl_info *ctrl_info;
6483 struct scsi_device *sdev;
6484 struct pqi_scsi_dev *device;
6485 unsigned long flags;
6488 sdev = to_scsi_device(dev);
6489 ctrl_info = shost_to_hba(sdev->host);
6491 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6493 device = sdev->hostdata;
6495 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6499 if (pqi_is_logical_device(device))
6500 raid_level = pqi_raid_level_to_string(device->raid_level);
6504 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6506 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6509 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
6510 struct device_attribute *attr, char *buffer)
6512 struct pqi_ctrl_info *ctrl_info;
6513 struct scsi_device *sdev;
6514 struct pqi_scsi_dev *device;
6515 unsigned long flags;
6516 int raid_bypass_cnt;
6518 sdev = to_scsi_device(dev);
6519 ctrl_info = shost_to_hba(sdev->host);
6521 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6523 device = sdev->hostdata;
6525 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6529 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
6531 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6533 return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
6536 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6537 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6538 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
6539 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
6540 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
6541 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
6542 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
6544 static struct device_attribute *pqi_sdev_attrs[] = {
6546 &dev_attr_unique_id,
6547 &dev_attr_path_info,
6548 &dev_attr_sas_address,
6549 &dev_attr_ssd_smart_path_enabled,
6550 &dev_attr_raid_level,
6551 &dev_attr_raid_bypass_cnt,
6555 static struct scsi_host_template pqi_driver_template = {
6556 .module = THIS_MODULE,
6557 .name = DRIVER_NAME_SHORT,
6558 .proc_name = DRIVER_NAME_SHORT,
6559 .queuecommand = pqi_scsi_queue_command,
6560 .scan_start = pqi_scan_start,
6561 .scan_finished = pqi_scan_finished,
6563 .eh_device_reset_handler = pqi_eh_device_reset_handler,
6565 .slave_alloc = pqi_slave_alloc,
6566 .slave_configure = pqi_slave_configure,
6567 .slave_destroy = pqi_slave_destroy,
6568 .map_queues = pqi_map_queues,
6569 .sdev_attrs = pqi_sdev_attrs,
6570 .shost_attrs = pqi_shost_attrs,
6573 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6576 struct Scsi_Host *shost;
6578 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6580 dev_err(&ctrl_info->pci_dev->dev,
6581 "scsi_host_alloc failed for controller %u\n",
6582 ctrl_info->ctrl_id);
6587 shost->n_io_port = 0;
6588 shost->this_id = -1;
6589 shost->max_channel = PQI_MAX_BUS;
6590 shost->max_cmd_len = MAX_COMMAND_SIZE;
6591 shost->max_lun = ~0;
6593 shost->max_sectors = ctrl_info->max_sectors;
6594 shost->can_queue = ctrl_info->scsi_ml_can_queue;
6595 shost->cmd_per_lun = shost->can_queue;
6596 shost->sg_tablesize = ctrl_info->sg_tablesize;
6597 shost->transportt = pqi_sas_transport_template;
6598 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6599 shost->unique_id = shost->irq;
6600 shost->nr_hw_queues = ctrl_info->num_queue_groups;
6601 shost->hostdata[0] = (unsigned long)ctrl_info;
6603 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6605 dev_err(&ctrl_info->pci_dev->dev,
6606 "scsi_add_host failed for controller %u\n",
6607 ctrl_info->ctrl_id);
6611 rc = pqi_add_sas_host(shost, ctrl_info);
6613 dev_err(&ctrl_info->pci_dev->dev,
6614 "add SAS host failed for controller %u\n",
6615 ctrl_info->ctrl_id);
6619 ctrl_info->scsi_host = shost;
6624 scsi_remove_host(shost);
6626 scsi_host_put(shost);
6631 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
6633 struct Scsi_Host *shost;
6635 pqi_delete_sas_host(ctrl_info);
6637 shost = ctrl_info->scsi_host;
6641 scsi_remove_host(shost);
6642 scsi_host_put(shost);
6645 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
6648 struct pqi_device_registers __iomem *pqi_registers;
6649 unsigned long timeout;
6650 unsigned int timeout_msecs;
6651 union pqi_reset_register reset_reg;
6653 pqi_registers = ctrl_info->pqi_registers;
6654 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
6655 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
6658 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
6659 reset_reg.all_bits = readl(&pqi_registers->device_reset);
6660 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
6662 pqi_check_ctrl_health(ctrl_info);
6663 if (pqi_ctrl_offline(ctrl_info)) {
6667 if (time_after(jiffies, timeout)) {
6676 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
6679 union pqi_reset_register reset_reg;
6681 if (ctrl_info->pqi_reset_quiesce_supported) {
6682 rc = sis_pqi_reset_quiesce(ctrl_info);
6684 dev_err(&ctrl_info->pci_dev->dev,
6685 "PQI reset failed during quiesce with error %d\n",
6691 reset_reg.all_bits = 0;
6692 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
6693 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
6695 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6697 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6699 dev_err(&ctrl_info->pci_dev->dev,
6700 "PQI reset failed with error %d\n", rc);
6705 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
6708 struct bmic_sense_subsystem_info *sense_info;
6710 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
6714 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
6718 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
6719 sizeof(sense_info->ctrl_serial_number));
6720 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
6728 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
6731 struct bmic_identify_controller *identify;
6733 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
6737 rc = pqi_identify_controller(ctrl_info, identify);
6741 memcpy(ctrl_info->firmware_version, identify->firmware_version,
6742 sizeof(identify->firmware_version));
6743 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
6744 snprintf(ctrl_info->firmware_version +
6745 strlen(ctrl_info->firmware_version),
6746 sizeof(ctrl_info->firmware_version),
6747 "-%u", get_unaligned_le16(&identify->firmware_build_number));
6749 memcpy(ctrl_info->model, identify->product_id,
6750 sizeof(identify->product_id));
6751 ctrl_info->model[sizeof(identify->product_id)] = '\0';
6753 memcpy(ctrl_info->vendor, identify->vendor_id,
6754 sizeof(identify->vendor_id));
6755 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
6763 struct pqi_config_table_section_info {
6764 struct pqi_ctrl_info *ctrl_info;
6767 void __iomem *section_iomem_addr;
6770 static inline bool pqi_is_firmware_feature_supported(
6771 struct pqi_config_table_firmware_features *firmware_features,
6772 unsigned int bit_position)
6774 unsigned int byte_index;
6776 byte_index = bit_position / BITS_PER_BYTE;
6778 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
6781 return firmware_features->features_supported[byte_index] &
6782 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6785 static inline bool pqi_is_firmware_feature_enabled(
6786 struct pqi_config_table_firmware_features *firmware_features,
6787 void __iomem *firmware_features_iomem_addr,
6788 unsigned int bit_position)
6790 unsigned int byte_index;
6791 u8 __iomem *features_enabled_iomem_addr;
6793 byte_index = (bit_position / BITS_PER_BYTE) +
6794 (le16_to_cpu(firmware_features->num_elements) * 2);
6796 features_enabled_iomem_addr = firmware_features_iomem_addr +
6797 offsetof(struct pqi_config_table_firmware_features,
6798 features_supported) + byte_index;
6800 return *((__force u8 *)features_enabled_iomem_addr) &
6801 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6804 static inline void pqi_request_firmware_feature(
6805 struct pqi_config_table_firmware_features *firmware_features,
6806 unsigned int bit_position)
6808 unsigned int byte_index;
6810 byte_index = (bit_position / BITS_PER_BYTE) +
6811 le16_to_cpu(firmware_features->num_elements);
6813 firmware_features->features_supported[byte_index] |=
6814 (1 << (bit_position % BITS_PER_BYTE));
6817 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6818 u16 first_section, u16 last_section)
6820 struct pqi_vendor_general_request request;
6822 memset(&request, 0, sizeof(request));
6824 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6825 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6826 &request.header.iu_length);
6827 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6828 &request.function_code);
6829 put_unaligned_le16(first_section,
6830 &request.data.config_table_update.first_section);
6831 put_unaligned_le16(last_section,
6832 &request.data.config_table_update.last_section);
6834 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6835 0, NULL, NO_TIMEOUT);
6838 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6839 struct pqi_config_table_firmware_features *firmware_features,
6840 void __iomem *firmware_features_iomem_addr)
6842 void *features_requested;
6843 void __iomem *features_requested_iomem_addr;
6845 features_requested = firmware_features->features_supported +
6846 le16_to_cpu(firmware_features->num_elements);
6848 features_requested_iomem_addr = firmware_features_iomem_addr +
6849 (features_requested - (void *)firmware_features);
6851 memcpy_toio(features_requested_iomem_addr, features_requested,
6852 le16_to_cpu(firmware_features->num_elements));
6854 return pqi_config_table_update(ctrl_info,
6855 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6856 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6859 struct pqi_firmware_feature {
6861 unsigned int feature_bit;
6864 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6865 struct pqi_firmware_feature *firmware_feature);
6868 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6869 struct pqi_firmware_feature *firmware_feature)
6871 if (!firmware_feature->supported) {
6872 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6873 firmware_feature->feature_name);
6877 if (firmware_feature->enabled) {
6878 dev_info(&ctrl_info->pci_dev->dev,
6879 "%s enabled\n", firmware_feature->feature_name);
6883 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6884 firmware_feature->feature_name);
6887 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
6888 struct pqi_firmware_feature *firmware_feature)
6890 switch (firmware_feature->feature_bit) {
6891 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
6892 ctrl_info->soft_reset_handshake_supported =
6893 firmware_feature->enabled;
6895 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
6896 ctrl_info->raid_iu_timeout_supported =
6897 firmware_feature->enabled;
6899 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
6900 ctrl_info->tmf_iu_timeout_supported =
6901 firmware_feature->enabled;
6905 pqi_firmware_feature_status(ctrl_info, firmware_feature);
6908 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6909 struct pqi_firmware_feature *firmware_feature)
6911 if (firmware_feature->feature_status)
6912 firmware_feature->feature_status(ctrl_info, firmware_feature);
6915 static DEFINE_MUTEX(pqi_firmware_features_mutex);
6917 static struct pqi_firmware_feature pqi_firmware_features[] = {
6919 .feature_name = "Online Firmware Activation",
6920 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6921 .feature_status = pqi_firmware_feature_status,
6924 .feature_name = "Serial Management Protocol",
6925 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6926 .feature_status = pqi_firmware_feature_status,
6929 .feature_name = "New Soft Reset Handshake",
6930 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
6931 .feature_status = pqi_ctrl_update_feature_flags,
6934 .feature_name = "RAID IU Timeout",
6935 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
6936 .feature_status = pqi_ctrl_update_feature_flags,
6939 .feature_name = "TMF IU Timeout",
6940 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
6941 .feature_status = pqi_ctrl_update_feature_flags,
6945 static void pqi_process_firmware_features(
6946 struct pqi_config_table_section_info *section_info)
6949 struct pqi_ctrl_info *ctrl_info;
6950 struct pqi_config_table_firmware_features *firmware_features;
6951 void __iomem *firmware_features_iomem_addr;
6953 unsigned int num_features_supported;
6955 ctrl_info = section_info->ctrl_info;
6956 firmware_features = section_info->section;
6957 firmware_features_iomem_addr = section_info->section_iomem_addr;
6959 for (i = 0, num_features_supported = 0;
6960 i < ARRAY_SIZE(pqi_firmware_features); i++) {
6961 if (pqi_is_firmware_feature_supported(firmware_features,
6962 pqi_firmware_features[i].feature_bit)) {
6963 pqi_firmware_features[i].supported = true;
6964 num_features_supported++;
6966 pqi_firmware_feature_update(ctrl_info,
6967 &pqi_firmware_features[i]);
6971 if (num_features_supported == 0)
6974 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6975 if (!pqi_firmware_features[i].supported)
6977 pqi_request_firmware_feature(firmware_features,
6978 pqi_firmware_features[i].feature_bit);
6981 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6982 firmware_features_iomem_addr);
6984 dev_err(&ctrl_info->pci_dev->dev,
6985 "failed to enable firmware features in PQI configuration table\n");
6986 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6987 if (!pqi_firmware_features[i].supported)
6989 pqi_firmware_feature_update(ctrl_info,
6990 &pqi_firmware_features[i]);
6995 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6996 if (!pqi_firmware_features[i].supported)
6998 if (pqi_is_firmware_feature_enabled(firmware_features,
6999 firmware_features_iomem_addr,
7000 pqi_firmware_features[i].feature_bit)) {
7001 pqi_firmware_features[i].enabled = true;
7003 pqi_firmware_feature_update(ctrl_info,
7004 &pqi_firmware_features[i]);
7008 static void pqi_init_firmware_features(void)
7012 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7013 pqi_firmware_features[i].supported = false;
7014 pqi_firmware_features[i].enabled = false;
7018 static void pqi_process_firmware_features_section(
7019 struct pqi_config_table_section_info *section_info)
7021 mutex_lock(&pqi_firmware_features_mutex);
7022 pqi_init_firmware_features();
7023 pqi_process_firmware_features(section_info);
7024 mutex_unlock(&pqi_firmware_features_mutex);
7027 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7031 void __iomem *table_iomem_addr;
7032 struct pqi_config_table *config_table;
7033 struct pqi_config_table_section_header *section;
7034 struct pqi_config_table_section_info section_info;
7036 table_length = ctrl_info->config_table_length;
7037 if (table_length == 0)
7040 config_table = kmalloc(table_length, GFP_KERNEL);
7041 if (!config_table) {
7042 dev_err(&ctrl_info->pci_dev->dev,
7043 "failed to allocate memory for PQI configuration table\n");
7048 * Copy the config table contents from I/O memory space into the
7051 table_iomem_addr = ctrl_info->iomem_base +
7052 ctrl_info->config_table_offset;
7053 memcpy_fromio(config_table, table_iomem_addr, table_length);
7055 section_info.ctrl_info = ctrl_info;
7057 get_unaligned_le32(&config_table->first_section_offset);
7059 while (section_offset) {
7060 section = (void *)config_table + section_offset;
7062 section_info.section = section;
7063 section_info.section_offset = section_offset;
7064 section_info.section_iomem_addr =
7065 table_iomem_addr + section_offset;
7067 switch (get_unaligned_le16(§ion->section_id)) {
7068 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7069 pqi_process_firmware_features_section(§ion_info);
7071 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7072 if (pqi_disable_heartbeat)
7073 dev_warn(&ctrl_info->pci_dev->dev,
7074 "heartbeat disabled by module parameter\n");
7076 ctrl_info->heartbeat_counter =
7080 struct pqi_config_table_heartbeat,
7083 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7084 ctrl_info->soft_reset_status =
7087 offsetof(struct pqi_config_table_soft_reset,
7093 get_unaligned_le16(§ion->next_section_offset);
7096 kfree(config_table);
7101 /* Switches the controller from PQI mode back into SIS mode. */
7103 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7107 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7108 rc = pqi_reset(ctrl_info);
7111 rc = sis_reenable_sis_mode(ctrl_info);
7113 dev_err(&ctrl_info->pci_dev->dev,
7114 "re-enabling SIS mode failed with error %d\n", rc);
7117 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7123 * If the controller isn't already in SIS mode, this function forces it into
7127 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7129 if (!sis_is_firmware_running(ctrl_info))
7132 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7135 if (sis_is_kernel_up(ctrl_info)) {
7136 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7140 return pqi_revert_to_sis_mode(ctrl_info);
7143 #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
7145 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7149 if (reset_devices) {
7150 sis_soft_reset(ctrl_info);
7151 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7153 rc = pqi_force_sis_mode(ctrl_info);
7159 * Wait until the controller is ready to start accepting SIS
7162 rc = sis_wait_for_ctrl_ready(ctrl_info);
7167 * Get the controller properties. This allows us to determine
7168 * whether or not it supports PQI mode.
7170 rc = sis_get_ctrl_properties(ctrl_info);
7172 dev_err(&ctrl_info->pci_dev->dev,
7173 "error obtaining controller properties\n");
7177 rc = sis_get_pqi_capabilities(ctrl_info);
7179 dev_err(&ctrl_info->pci_dev->dev,
7180 "error obtaining controller capabilities\n");
7184 if (reset_devices) {
7185 if (ctrl_info->max_outstanding_requests >
7186 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
7187 ctrl_info->max_outstanding_requests =
7188 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
7190 if (ctrl_info->max_outstanding_requests >
7191 PQI_MAX_OUTSTANDING_REQUESTS)
7192 ctrl_info->max_outstanding_requests =
7193 PQI_MAX_OUTSTANDING_REQUESTS;
7196 pqi_calculate_io_resources(ctrl_info);
7198 rc = pqi_alloc_error_buffer(ctrl_info);
7200 dev_err(&ctrl_info->pci_dev->dev,
7201 "failed to allocate PQI error buffer\n");
7206 * If the function we are about to call succeeds, the
7207 * controller will transition from legacy SIS mode
7210 rc = sis_init_base_struct_addr(ctrl_info);
7212 dev_err(&ctrl_info->pci_dev->dev,
7213 "error initializing PQI mode\n");
7217 /* Wait for the controller to complete the SIS -> PQI transition. */
7218 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7220 dev_err(&ctrl_info->pci_dev->dev,
7221 "transition to PQI mode failed\n");
7225 /* From here on, we are running in PQI mode. */
7226 ctrl_info->pqi_mode_enabled = true;
7227 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7229 rc = pqi_alloc_admin_queues(ctrl_info);
7231 dev_err(&ctrl_info->pci_dev->dev,
7232 "failed to allocate admin queues\n");
7236 rc = pqi_create_admin_queues(ctrl_info);
7238 dev_err(&ctrl_info->pci_dev->dev,
7239 "error creating admin queues\n");
7243 rc = pqi_report_device_capability(ctrl_info);
7245 dev_err(&ctrl_info->pci_dev->dev,
7246 "obtaining device capability failed\n");
7250 rc = pqi_validate_device_capability(ctrl_info);
7254 pqi_calculate_queue_resources(ctrl_info);
7256 rc = pqi_enable_msix_interrupts(ctrl_info);
7260 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7261 ctrl_info->max_msix_vectors =
7262 ctrl_info->num_msix_vectors_enabled;
7263 pqi_calculate_queue_resources(ctrl_info);
7266 rc = pqi_alloc_io_resources(ctrl_info);
7270 rc = pqi_alloc_operational_queues(ctrl_info);
7272 dev_err(&ctrl_info->pci_dev->dev,
7273 "failed to allocate operational queues\n");
7277 pqi_init_operational_queues(ctrl_info);
7279 rc = pqi_request_irqs(ctrl_info);
7283 rc = pqi_create_queues(ctrl_info);
7287 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7289 ctrl_info->controller_online = true;
7291 rc = pqi_process_config_table(ctrl_info);
7295 pqi_start_heartbeat_timer(ctrl_info);
7297 rc = pqi_enable_events(ctrl_info);
7299 dev_err(&ctrl_info->pci_dev->dev,
7300 "error enabling events\n");
7304 /* Register with the SCSI subsystem. */
7305 rc = pqi_register_scsi(ctrl_info);
7309 rc = pqi_get_ctrl_product_details(ctrl_info);
7311 dev_err(&ctrl_info->pci_dev->dev,
7312 "error obtaining product details\n");
7316 rc = pqi_get_ctrl_serial_number(ctrl_info);
7318 dev_err(&ctrl_info->pci_dev->dev,
7319 "error obtaining ctrl serial number\n");
7323 rc = pqi_set_diag_rescan(ctrl_info);
7325 dev_err(&ctrl_info->pci_dev->dev,
7326 "error enabling multi-lun rescan\n");
7330 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7332 dev_err(&ctrl_info->pci_dev->dev,
7333 "error updating host wellness\n");
7337 pqi_schedule_update_time_worker(ctrl_info);
7339 pqi_scan_scsi_devices(ctrl_info);
7344 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
7347 struct pqi_admin_queues *admin_queues;
7348 struct pqi_event_queue *event_queue;
7350 admin_queues = &ctrl_info->admin_queues;
7351 admin_queues->iq_pi_copy = 0;
7352 admin_queues->oq_ci_copy = 0;
7353 writel(0, admin_queues->oq_pi);
7355 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
7356 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
7357 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
7358 ctrl_info->queue_groups[i].oq_ci_copy = 0;
7360 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
7361 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
7362 writel(0, ctrl_info->queue_groups[i].oq_pi);
7365 event_queue = &ctrl_info->event_queue;
7366 writel(0, event_queue->oq_pi);
7367 event_queue->oq_ci_copy = 0;
7370 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7374 rc = pqi_force_sis_mode(ctrl_info);
7379 * Wait until the controller is ready to start accepting SIS
7382 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
7387 * Get the controller properties. This allows us to determine
7388 * whether or not it supports PQI mode.
7390 rc = sis_get_ctrl_properties(ctrl_info);
7392 dev_err(&ctrl_info->pci_dev->dev,
7393 "error obtaining controller properties\n");
7397 rc = sis_get_pqi_capabilities(ctrl_info);
7399 dev_err(&ctrl_info->pci_dev->dev,
7400 "error obtaining controller capabilities\n");
7405 * If the function we are about to call succeeds, the
7406 * controller will transition from legacy SIS mode
7409 rc = sis_init_base_struct_addr(ctrl_info);
7411 dev_err(&ctrl_info->pci_dev->dev,
7412 "error initializing PQI mode\n");
7416 /* Wait for the controller to complete the SIS -> PQI transition. */
7417 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7419 dev_err(&ctrl_info->pci_dev->dev,
7420 "transition to PQI mode failed\n");
7424 /* From here on, we are running in PQI mode. */
7425 ctrl_info->pqi_mode_enabled = true;
7426 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7428 pqi_reinit_queues(ctrl_info);
7430 rc = pqi_create_admin_queues(ctrl_info);
7432 dev_err(&ctrl_info->pci_dev->dev,
7433 "error creating admin queues\n");
7437 rc = pqi_create_queues(ctrl_info);
7441 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7443 ctrl_info->controller_online = true;
7444 pqi_ctrl_unblock_requests(ctrl_info);
7446 rc = pqi_process_config_table(ctrl_info);
7450 pqi_start_heartbeat_timer(ctrl_info);
7452 rc = pqi_enable_events(ctrl_info);
7454 dev_err(&ctrl_info->pci_dev->dev,
7455 "error enabling events\n");
7459 rc = pqi_get_ctrl_product_details(ctrl_info);
7461 dev_err(&ctrl_info->pci_dev->dev,
7462 "error obtaining product details\n");
7466 rc = pqi_set_diag_rescan(ctrl_info);
7468 dev_err(&ctrl_info->pci_dev->dev,
7469 "error enabling multi-lun rescan\n");
7473 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7475 dev_err(&ctrl_info->pci_dev->dev,
7476 "error updating host wellness\n");
7480 pqi_schedule_update_time_worker(ctrl_info);
7482 pqi_scan_scsi_devices(ctrl_info);
7487 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
7492 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
7493 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
7495 return pcibios_err_to_errno(rc);
7498 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
7503 rc = pci_enable_device(ctrl_info->pci_dev);
7505 dev_err(&ctrl_info->pci_dev->dev,
7506 "failed to enable PCI device\n");
7510 if (sizeof(dma_addr_t) > 4)
7511 mask = DMA_BIT_MASK(64);
7513 mask = DMA_BIT_MASK(32);
7515 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
7517 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
7518 goto disable_device;
7521 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
7523 dev_err(&ctrl_info->pci_dev->dev,
7524 "failed to obtain PCI resources\n");
7525 goto disable_device;
7528 ctrl_info->iomem_base = ioremap(pci_resource_start(
7529 ctrl_info->pci_dev, 0),
7530 sizeof(struct pqi_ctrl_registers));
7531 if (!ctrl_info->iomem_base) {
7532 dev_err(&ctrl_info->pci_dev->dev,
7533 "failed to map memory for controller registers\n");
7535 goto release_regions;
7538 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
7540 /* Increase the PCIe completion timeout. */
7541 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
7542 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
7544 dev_err(&ctrl_info->pci_dev->dev,
7545 "failed to set PCIe completion timeout\n");
7546 goto release_regions;
7549 /* Enable bus mastering. */
7550 pci_set_master(ctrl_info->pci_dev);
7552 ctrl_info->registers = ctrl_info->iomem_base;
7553 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
7555 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
7560 pci_release_regions(ctrl_info->pci_dev);
7562 pci_disable_device(ctrl_info->pci_dev);
7567 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
7569 iounmap(ctrl_info->iomem_base);
7570 pci_release_regions(ctrl_info->pci_dev);
7571 if (pci_is_enabled(ctrl_info->pci_dev))
7572 pci_disable_device(ctrl_info->pci_dev);
7573 pci_set_drvdata(ctrl_info->pci_dev, NULL);
7576 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
7578 struct pqi_ctrl_info *ctrl_info;
7580 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
7581 GFP_KERNEL, numa_node);
7585 mutex_init(&ctrl_info->scan_mutex);
7586 mutex_init(&ctrl_info->lun_reset_mutex);
7587 mutex_init(&ctrl_info->ofa_mutex);
7589 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
7590 spin_lock_init(&ctrl_info->scsi_device_list_lock);
7592 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
7593 atomic_set(&ctrl_info->num_interrupts, 0);
7594 atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
7596 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
7597 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
7599 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
7600 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
7602 sema_init(&ctrl_info->sync_request_sem,
7603 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7604 init_waitqueue_head(&ctrl_info->block_requests_wait);
7606 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
7607 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
7608 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
7609 pqi_raid_bypass_retry_worker);
7611 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
7612 ctrl_info->irq_mode = IRQ_MODE_NONE;
7613 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
7618 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
7623 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
7625 pqi_free_irqs(ctrl_info);
7626 pqi_disable_msix_interrupts(ctrl_info);
7629 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
7631 pqi_stop_heartbeat_timer(ctrl_info);
7632 pqi_free_interrupts(ctrl_info);
7633 if (ctrl_info->queue_memory_base)
7634 dma_free_coherent(&ctrl_info->pci_dev->dev,
7635 ctrl_info->queue_memory_length,
7636 ctrl_info->queue_memory_base,
7637 ctrl_info->queue_memory_base_dma_handle);
7638 if (ctrl_info->admin_queue_memory_base)
7639 dma_free_coherent(&ctrl_info->pci_dev->dev,
7640 ctrl_info->admin_queue_memory_length,
7641 ctrl_info->admin_queue_memory_base,
7642 ctrl_info->admin_queue_memory_base_dma_handle);
7643 pqi_free_all_io_requests(ctrl_info);
7644 if (ctrl_info->error_buffer)
7645 dma_free_coherent(&ctrl_info->pci_dev->dev,
7646 ctrl_info->error_buffer_length,
7647 ctrl_info->error_buffer,
7648 ctrl_info->error_buffer_dma_handle);
7649 if (ctrl_info->iomem_base)
7650 pqi_cleanup_pci_init(ctrl_info);
7651 pqi_free_ctrl_info(ctrl_info);
7654 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
7656 pqi_cancel_rescan_worker(ctrl_info);
7657 pqi_cancel_update_time_worker(ctrl_info);
7658 pqi_unregister_scsi(ctrl_info);
7659 if (ctrl_info->pqi_mode_enabled)
7660 pqi_revert_to_sis_mode(ctrl_info);
7661 pqi_free_ctrl_resources(ctrl_info);
7664 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
7666 pqi_cancel_update_time_worker(ctrl_info);
7667 pqi_cancel_rescan_worker(ctrl_info);
7668 pqi_wait_until_lun_reset_finished(ctrl_info);
7669 pqi_wait_until_scan_finished(ctrl_info);
7670 pqi_ctrl_ofa_start(ctrl_info);
7671 pqi_ctrl_block_requests(ctrl_info);
7672 pqi_ctrl_wait_until_quiesced(ctrl_info);
7673 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
7674 pqi_fail_io_queued_for_all_devices(ctrl_info);
7675 pqi_wait_until_inbound_queues_empty(ctrl_info);
7676 pqi_stop_heartbeat_timer(ctrl_info);
7677 ctrl_info->pqi_mode_enabled = false;
7678 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7681 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
7683 pqi_ofa_free_host_buffer(ctrl_info);
7684 ctrl_info->pqi_mode_enabled = true;
7685 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7686 ctrl_info->controller_online = true;
7687 pqi_ctrl_unblock_requests(ctrl_info);
7688 pqi_start_heartbeat_timer(ctrl_info);
7689 pqi_schedule_update_time_worker(ctrl_info);
7690 pqi_clear_soft_reset_status(ctrl_info,
7691 PQI_SOFT_RESET_ABORT);
7692 pqi_scan_scsi_devices(ctrl_info);
7695 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7696 u32 total_size, u32 chunk_size)
7701 struct pqi_sg_descriptor *mem_descriptor = NULL;
7703 struct pqi_ofa_memory *ofap;
7705 dev = &ctrl_info->pci_dev->dev;
7707 sg_count = (total_size + chunk_size - 1);
7708 sg_count /= chunk_size;
7710 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7712 if (sg_count*chunk_size < total_size)
7715 ctrl_info->pqi_ofa_chunk_virt_addr =
7716 kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
7717 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
7720 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
7721 dma_addr_t dma_handle;
7723 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7724 dma_alloc_coherent(dev, chunk_size, &dma_handle,
7727 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7730 mem_descriptor = &ofap->sg_descriptor[i];
7731 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
7732 put_unaligned_le32 (chunk_size, &mem_descriptor->length);
7735 if (!size || size < total_size)
7736 goto out_free_chunks;
7738 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
7739 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
7740 put_unaligned_le32(size, &ofap->bytes_allocated);
7746 mem_descriptor = &ofap->sg_descriptor[i];
7747 dma_free_coherent(dev, chunk_size,
7748 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7749 get_unaligned_le64(&mem_descriptor->address));
7751 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7754 put_unaligned_le32 (0, &ofap->bytes_allocated);
7758 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
7764 total_size = le32_to_cpu(
7765 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
7766 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
7768 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
7769 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
7775 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7776 u32 bytes_requested)
7778 struct pqi_ofa_memory *pqi_ofa_memory;
7781 dev = &ctrl_info->pci_dev->dev;
7782 pqi_ofa_memory = dma_alloc_coherent(dev,
7783 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7784 &ctrl_info->pqi_ofa_mem_dma_handle,
7787 if (!pqi_ofa_memory)
7790 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
7791 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
7792 sizeof(pqi_ofa_memory->signature));
7793 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
7795 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
7797 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
7798 dev_err(dev, "Failed to allocate host buffer of size = %u",
7805 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
7808 struct pqi_sg_descriptor *mem_descriptor;
7809 struct pqi_ofa_memory *ofap;
7811 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7816 if (!ofap->bytes_allocated)
7819 mem_descriptor = ofap->sg_descriptor;
7821 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
7823 dma_free_coherent(&ctrl_info->pci_dev->dev,
7824 get_unaligned_le32(&mem_descriptor[i].length),
7825 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7826 get_unaligned_le64(&mem_descriptor[i].address));
7828 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7831 dma_free_coherent(&ctrl_info->pci_dev->dev,
7832 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
7833 ctrl_info->pqi_ofa_mem_dma_handle);
7834 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
7837 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
7839 struct pqi_vendor_general_request request;
7841 struct pqi_ofa_memory *ofap;
7843 memset(&request, 0, sizeof(request));
7845 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7847 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7848 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7849 &request.header.iu_length);
7850 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
7851 &request.function_code);
7854 size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
7855 get_unaligned_le16(&ofap->num_memory_descriptors) *
7856 sizeof(struct pqi_sg_descriptor);
7858 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
7859 &request.data.ofa_memory_allocation.buffer_address);
7860 put_unaligned_le32(size,
7861 &request.data.ofa_memory_allocation.buffer_length);
7865 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
7866 0, NULL, NO_TIMEOUT);
7869 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
7871 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7872 return pqi_ctrl_init_resume(ctrl_info);
7875 static void pqi_perform_lockup_action(void)
7877 switch (pqi_lockup_action) {
7879 panic("FATAL: Smart Family Controller lockup detected");
7882 emergency_restart();
7890 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
7891 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
7892 .status = SAM_STAT_CHECK_CONDITION,
7895 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
7898 struct pqi_io_request *io_request;
7899 struct scsi_cmnd *scmd;
7901 for (i = 0; i < ctrl_info->max_io_slots; i++) {
7902 io_request = &ctrl_info->io_request_pool[i];
7903 if (atomic_read(&io_request->refcount) == 0)
7906 scmd = io_request->scmd;
7908 set_host_byte(scmd, DID_NO_CONNECT);
7910 io_request->status = -ENXIO;
7911 io_request->error_info =
7912 &pqi_ctrl_offline_raid_error_info;
7915 io_request->io_complete_callback(io_request,
7916 io_request->context);
7920 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
7922 pqi_perform_lockup_action();
7923 pqi_stop_heartbeat_timer(ctrl_info);
7924 pqi_free_interrupts(ctrl_info);
7925 pqi_cancel_rescan_worker(ctrl_info);
7926 pqi_cancel_update_time_worker(ctrl_info);
7927 pqi_ctrl_wait_until_quiesced(ctrl_info);
7928 pqi_fail_all_outstanding_requests(ctrl_info);
7929 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
7930 pqi_ctrl_unblock_requests(ctrl_info);
7933 static void pqi_ctrl_offline_worker(struct work_struct *work)
7935 struct pqi_ctrl_info *ctrl_info;
7937 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
7938 pqi_take_ctrl_offline_deferred(ctrl_info);
7941 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
7943 if (!ctrl_info->controller_online)
7946 ctrl_info->controller_online = false;
7947 ctrl_info->pqi_mode_enabled = false;
7948 pqi_ctrl_block_requests(ctrl_info);
7949 if (!pqi_disable_ctrl_shutdown)
7950 sis_shutdown_ctrl(ctrl_info);
7951 pci_disable_device(ctrl_info->pci_dev);
7952 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
7953 schedule_work(&ctrl_info->ctrl_offline_work);
7956 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
7957 const struct pci_device_id *id)
7959 char *ctrl_description;
7961 if (id->driver_data)
7962 ctrl_description = (char *)id->driver_data;
7964 ctrl_description = "Microsemi Smart Family Controller";
7966 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
7969 static int pqi_pci_probe(struct pci_dev *pci_dev,
7970 const struct pci_device_id *id)
7974 struct pqi_ctrl_info *ctrl_info;
7976 pqi_print_ctrl_info(pci_dev, id);
7978 if (pqi_disable_device_id_wildcards &&
7979 id->subvendor == PCI_ANY_ID &&
7980 id->subdevice == PCI_ANY_ID) {
7981 dev_warn(&pci_dev->dev,
7982 "controller not probed because device ID wildcards are disabled\n");
7986 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
7987 dev_warn(&pci_dev->dev,
7988 "controller device ID matched using wildcards\n");
7990 node = dev_to_node(&pci_dev->dev);
7991 if (node == NUMA_NO_NODE) {
7992 cp_node = cpu_to_node(0);
7993 if (cp_node == NUMA_NO_NODE)
7995 set_dev_node(&pci_dev->dev, cp_node);
7998 ctrl_info = pqi_alloc_ctrl_info(node);
8000 dev_err(&pci_dev->dev,
8001 "failed to allocate controller info block\n");
8005 ctrl_info->pci_dev = pci_dev;
8007 rc = pqi_pci_init(ctrl_info);
8011 rc = pqi_ctrl_init(ctrl_info);
8018 pqi_remove_ctrl(ctrl_info);
8023 static void pqi_pci_remove(struct pci_dev *pci_dev)
8025 struct pqi_ctrl_info *ctrl_info;
8027 ctrl_info = pci_get_drvdata(pci_dev);
8031 pqi_remove_ctrl(ctrl_info);
8034 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8037 struct pqi_io_request *io_request;
8038 struct scsi_cmnd *scmd;
8040 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8041 io_request = &ctrl_info->io_request_pool[i];
8042 if (atomic_read(&io_request->refcount) == 0)
8044 scmd = io_request->scmd;
8045 WARN_ON(scmd != NULL); /* IO command from SML */
8046 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
8050 static void pqi_shutdown(struct pci_dev *pci_dev)
8053 struct pqi_ctrl_info *ctrl_info;
8055 ctrl_info = pci_get_drvdata(pci_dev);
8057 dev_err(&pci_dev->dev,
8058 "cache could not be flushed\n");
8062 pqi_disable_events(ctrl_info);
8063 pqi_wait_until_ofa_finished(ctrl_info);
8064 pqi_cancel_update_time_worker(ctrl_info);
8065 pqi_cancel_rescan_worker(ctrl_info);
8066 pqi_cancel_event_worker(ctrl_info);
8068 pqi_ctrl_shutdown_start(ctrl_info);
8069 pqi_ctrl_wait_until_quiesced(ctrl_info);
8071 rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8073 dev_err(&pci_dev->dev,
8074 "wait for pending I/O failed\n");
8078 pqi_ctrl_block_device_reset(ctrl_info);
8079 pqi_wait_until_lun_reset_finished(ctrl_info);
8082 * Write all data in the controller's battery-backed cache to
8085 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
8087 dev_err(&pci_dev->dev,
8088 "unable to flush controller cache\n");
8090 pqi_ctrl_block_requests(ctrl_info);
8092 rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
8094 dev_err(&pci_dev->dev,
8095 "wait for pending sync cmds failed\n");
8099 pqi_crash_if_pending_command(ctrl_info);
8100 pqi_reset(ctrl_info);
8103 static void pqi_process_lockup_action_param(void)
8107 if (!pqi_lockup_action_param)
8110 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
8111 if (strcmp(pqi_lockup_action_param,
8112 pqi_lockup_actions[i].name) == 0) {
8113 pqi_lockup_action = pqi_lockup_actions[i].action;
8118 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8119 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8122 static void pqi_process_module_params(void)
8124 pqi_process_lockup_action_param();
8127 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
8129 struct pqi_ctrl_info *ctrl_info;
8131 ctrl_info = pci_get_drvdata(pci_dev);
8133 pqi_disable_events(ctrl_info);
8134 pqi_cancel_update_time_worker(ctrl_info);
8135 pqi_cancel_rescan_worker(ctrl_info);
8136 pqi_wait_until_scan_finished(ctrl_info);
8137 pqi_wait_until_lun_reset_finished(ctrl_info);
8138 pqi_wait_until_ofa_finished(ctrl_info);
8139 pqi_flush_cache(ctrl_info, SUSPEND);
8140 pqi_ctrl_block_requests(ctrl_info);
8141 pqi_ctrl_wait_until_quiesced(ctrl_info);
8142 pqi_wait_until_inbound_queues_empty(ctrl_info);
8143 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8144 pqi_stop_heartbeat_timer(ctrl_info);
8146 if (state.event == PM_EVENT_FREEZE)
8149 pci_save_state(pci_dev);
8150 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
8152 ctrl_info->controller_online = false;
8153 ctrl_info->pqi_mode_enabled = false;
8158 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
8161 struct pqi_ctrl_info *ctrl_info;
8163 ctrl_info = pci_get_drvdata(pci_dev);
8165 if (pci_dev->current_state != PCI_D0) {
8166 ctrl_info->max_hw_queue_index = 0;
8167 pqi_free_interrupts(ctrl_info);
8168 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
8169 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
8170 IRQF_SHARED, DRIVER_NAME_SHORT,
8171 &ctrl_info->queue_groups[0]);
8173 dev_err(&ctrl_info->pci_dev->dev,
8174 "irq %u init failed with error %d\n",
8178 pqi_start_heartbeat_timer(ctrl_info);
8179 pqi_ctrl_unblock_requests(ctrl_info);
8183 pci_set_power_state(pci_dev, PCI_D0);
8184 pci_restore_state(pci_dev);
8186 return pqi_ctrl_init_resume(ctrl_info);
8189 /* Define the PCI IDs for the controllers that we support. */
8190 static const struct pci_device_id pqi_pci_id_table[] = {
8192 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8196 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8200 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8204 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8208 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8212 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8216 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8220 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8224 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8228 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8232 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8236 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8240 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8244 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8248 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8252 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8256 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8260 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8264 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8268 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8272 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8276 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8280 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8284 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8288 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8292 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8296 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8300 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8304 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8308 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8312 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8316 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8317 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
8320 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8321 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
8324 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8325 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
8328 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8329 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
8332 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8333 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
8336 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8337 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
8340 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8341 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
8344 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8345 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
8348 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8349 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
8352 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8353 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
8356 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8357 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
8360 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8361 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
8364 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8365 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
8368 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8369 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
8372 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8373 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
8376 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8377 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
8380 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8381 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
8384 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8385 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
8388 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8389 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
8392 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8393 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
8396 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8397 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
8400 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8401 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
8404 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8405 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
8408 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8409 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
8412 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8413 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
8416 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8417 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
8420 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8421 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
8424 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8425 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
8428 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8429 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
8432 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8433 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
8436 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8437 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
8440 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8441 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
8444 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8445 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
8448 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8449 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
8452 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8453 PCI_VENDOR_ID_ADVANTECH, 0x8312)
8456 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8457 PCI_VENDOR_ID_DELL, 0x1fe0)
8460 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8461 PCI_VENDOR_ID_HP, 0x0600)
8464 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8465 PCI_VENDOR_ID_HP, 0x0601)
8468 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8469 PCI_VENDOR_ID_HP, 0x0602)
8472 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8473 PCI_VENDOR_ID_HP, 0x0603)
8476 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8477 PCI_VENDOR_ID_HP, 0x0609)
8480 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8481 PCI_VENDOR_ID_HP, 0x0650)
8484 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8485 PCI_VENDOR_ID_HP, 0x0651)
8488 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8489 PCI_VENDOR_ID_HP, 0x0652)
8492 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8493 PCI_VENDOR_ID_HP, 0x0653)
8496 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8497 PCI_VENDOR_ID_HP, 0x0654)
8500 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8501 PCI_VENDOR_ID_HP, 0x0655)
8504 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8505 PCI_VENDOR_ID_HP, 0x0700)
8508 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8509 PCI_VENDOR_ID_HP, 0x0701)
8512 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8513 PCI_VENDOR_ID_HP, 0x1001)
8516 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8517 PCI_VENDOR_ID_HP, 0x1100)
8520 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8521 PCI_VENDOR_ID_HP, 0x1101)
8524 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8528 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8532 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8536 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8540 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8541 PCI_VENDOR_ID_GIGABYTE, 0x1000)
8544 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8545 PCI_ANY_ID, PCI_ANY_ID)
8550 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
8552 static struct pci_driver pqi_pci_driver = {
8553 .name = DRIVER_NAME_SHORT,
8554 .id_table = pqi_pci_id_table,
8555 .probe = pqi_pci_probe,
8556 .remove = pqi_pci_remove,
8557 .shutdown = pqi_shutdown,
8558 #if defined(CONFIG_PM)
8559 .suspend = pqi_suspend,
8560 .resume = pqi_resume,
8564 static int __init pqi_init(void)
8568 pr_info(DRIVER_NAME "\n");
8570 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
8571 if (!pqi_sas_transport_template)
8574 pqi_process_module_params();
8576 rc = pci_register_driver(&pqi_pci_driver);
8578 sas_release_transport(pqi_sas_transport_template);
8583 static void __exit pqi_cleanup(void)
8585 pci_unregister_driver(&pqi_pci_driver);
8586 sas_release_transport(pqi_sas_transport_template);
8589 module_init(pqi_init);
8590 module_exit(pqi_cleanup);
8592 static void __attribute__((unused)) verify_structures(void)
8594 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8595 sis_host_to_ctrl_doorbell) != 0x20);
8596 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8597 sis_interrupt_mask) != 0x34);
8598 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8599 sis_ctrl_to_host_doorbell) != 0x9c);
8600 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8601 sis_ctrl_to_host_doorbell_clear) != 0xa0);
8602 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8603 sis_driver_scratch) != 0xb0);
8604 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8605 sis_firmware_status) != 0xbc);
8606 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8607 sis_mailbox) != 0x1000);
8608 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8609 pqi_registers) != 0x4000);
8611 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8613 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8615 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8616 response_queue_id) != 0x4);
8617 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8619 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
8621 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8623 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8624 service_response) != 0x1);
8625 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8626 data_present) != 0x2);
8627 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8629 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8630 residual_count) != 0x4);
8631 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8632 data_length) != 0x8);
8633 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8635 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8637 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
8639 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8640 data_in_result) != 0x0);
8641 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8642 data_out_result) != 0x1);
8643 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8645 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8647 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8648 status_qualifier) != 0x6);
8649 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8650 sense_data_length) != 0x8);
8651 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8652 response_data_length) != 0xa);
8653 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8654 data_in_transferred) != 0xc);
8655 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8656 data_out_transferred) != 0x10);
8657 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8659 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
8661 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8663 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8664 function_and_status_code) != 0x8);
8665 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8666 max_admin_iq_elements) != 0x10);
8667 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8668 max_admin_oq_elements) != 0x11);
8669 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8670 admin_iq_element_length) != 0x12);
8671 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8672 admin_oq_element_length) != 0x13);
8673 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8674 max_reset_timeout) != 0x14);
8675 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8676 legacy_intx_status) != 0x18);
8677 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8678 legacy_intx_mask_set) != 0x1c);
8679 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8680 legacy_intx_mask_clear) != 0x20);
8681 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8682 device_status) != 0x40);
8683 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8684 admin_iq_pi_offset) != 0x48);
8685 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8686 admin_oq_ci_offset) != 0x50);
8687 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8688 admin_iq_element_array_addr) != 0x58);
8689 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8690 admin_oq_element_array_addr) != 0x60);
8691 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8692 admin_iq_ci_addr) != 0x68);
8693 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8694 admin_oq_pi_addr) != 0x70);
8695 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8696 admin_iq_num_elements) != 0x78);
8697 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8698 admin_oq_num_elements) != 0x79);
8699 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8700 admin_queue_int_msg_num) != 0x7a);
8701 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8702 device_error) != 0x80);
8703 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8704 error_details) != 0x88);
8705 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8706 device_reset) != 0x90);
8707 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8708 power_action) != 0x94);
8709 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
8711 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8712 header.iu_type) != 0);
8713 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8714 header.iu_length) != 2);
8715 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8716 header.work_area) != 6);
8717 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8719 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8720 function_code) != 10);
8721 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8722 data.report_device_capability.buffer_length) != 44);
8723 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8724 data.report_device_capability.sg_descriptor) != 48);
8725 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8726 data.create_operational_iq.queue_id) != 12);
8727 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8728 data.create_operational_iq.element_array_addr) != 16);
8729 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8730 data.create_operational_iq.ci_addr) != 24);
8731 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8732 data.create_operational_iq.num_elements) != 32);
8733 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8734 data.create_operational_iq.element_length) != 34);
8735 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8736 data.create_operational_iq.queue_protocol) != 36);
8737 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8738 data.create_operational_oq.queue_id) != 12);
8739 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8740 data.create_operational_oq.element_array_addr) != 16);
8741 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8742 data.create_operational_oq.pi_addr) != 24);
8743 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8744 data.create_operational_oq.num_elements) != 32);
8745 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8746 data.create_operational_oq.element_length) != 34);
8747 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8748 data.create_operational_oq.queue_protocol) != 36);
8749 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8750 data.create_operational_oq.int_msg_num) != 40);
8751 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8752 data.create_operational_oq.coalescing_count) != 42);
8753 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8754 data.create_operational_oq.min_coalescing_time) != 44);
8755 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8756 data.create_operational_oq.max_coalescing_time) != 48);
8757 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8758 data.delete_operational_queue.queue_id) != 12);
8759 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
8760 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
8761 data.create_operational_iq) != 64 - 11);
8762 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
8763 data.create_operational_oq) != 64 - 11);
8764 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
8765 data.delete_operational_queue) != 64 - 11);
8767 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8768 header.iu_type) != 0);
8769 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8770 header.iu_length) != 2);
8771 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8772 header.work_area) != 6);
8773 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8775 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8776 function_code) != 10);
8777 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8779 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8780 data.create_operational_iq.status_descriptor) != 12);
8781 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8782 data.create_operational_iq.iq_pi_offset) != 16);
8783 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8784 data.create_operational_oq.status_descriptor) != 12);
8785 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8786 data.create_operational_oq.oq_ci_offset) != 16);
8787 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
8789 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8790 header.iu_type) != 0);
8791 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8792 header.iu_length) != 2);
8793 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8794 header.response_queue_id) != 4);
8795 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8796 header.work_area) != 6);
8797 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8799 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8801 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8802 buffer_length) != 12);
8803 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8805 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8806 protocol_specific) != 24);
8807 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8808 error_index) != 27);
8809 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8811 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8813 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8814 sg_descriptors) != 64);
8815 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
8816 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8818 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8819 header.iu_type) != 0);
8820 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8821 header.iu_length) != 2);
8822 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8823 header.response_queue_id) != 4);
8824 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8825 header.work_area) != 6);
8826 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8828 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8830 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8831 buffer_length) != 16);
8832 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8833 data_encryption_key_index) != 22);
8834 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8835 encrypt_tweak_lower) != 24);
8836 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8837 encrypt_tweak_upper) != 28);
8838 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8840 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8841 error_index) != 48);
8842 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8843 num_sg_descriptors) != 50);
8844 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8846 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8848 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8849 sg_descriptors) != 64);
8850 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
8851 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8853 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8854 header.iu_type) != 0);
8855 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8856 header.iu_length) != 2);
8857 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8859 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8860 error_index) != 10);
8862 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8863 header.iu_type) != 0);
8864 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8865 header.iu_length) != 2);
8866 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8867 header.response_queue_id) != 4);
8868 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8870 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8871 data.report_event_configuration.buffer_length) != 12);
8872 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8873 data.report_event_configuration.sg_descriptors) != 16);
8874 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8875 data.set_event_configuration.global_event_oq_id) != 10);
8876 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8877 data.set_event_configuration.buffer_length) != 12);
8878 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8879 data.set_event_configuration.sg_descriptors) != 16);
8881 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8882 max_inbound_iu_length) != 6);
8883 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8884 max_outbound_iu_length) != 14);
8885 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
8887 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8889 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8890 iq_arbitration_priority_support_bitmask) != 8);
8891 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8892 maximum_aw_a) != 9);
8893 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8894 maximum_aw_b) != 10);
8895 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8896 maximum_aw_c) != 11);
8897 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8898 max_inbound_queues) != 16);
8899 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8900 max_elements_per_iq) != 18);
8901 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8902 max_iq_element_length) != 24);
8903 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8904 min_iq_element_length) != 26);
8905 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8906 max_outbound_queues) != 30);
8907 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8908 max_elements_per_oq) != 32);
8909 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8910 intr_coalescing_time_granularity) != 34);
8911 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8912 max_oq_element_length) != 36);
8913 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8914 min_oq_element_length) != 38);
8915 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8916 iu_layer_descriptors) != 64);
8917 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
8919 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8921 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8923 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
8925 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8926 num_event_descriptors) != 2);
8927 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8930 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
8931 ARRAY_SIZE(pqi_supported_event_types));
8933 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8934 header.iu_type) != 0);
8935 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8936 header.iu_length) != 2);
8937 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8939 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8941 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8942 additional_event_id) != 12);
8943 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8945 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
8947 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8948 header.iu_type) != 0);
8949 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8950 header.iu_length) != 2);
8951 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8953 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8955 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8956 additional_event_id) != 12);
8957 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
8959 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8960 header.iu_type) != 0);
8961 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8962 header.iu_length) != 2);
8963 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8965 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8967 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8969 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8971 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8972 protocol_specific) != 24);
8973 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8974 outbound_queue_id_to_manage) != 26);
8975 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8976 request_id_to_manage) != 28);
8977 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8978 task_management_function) != 30);
8979 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
8981 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8982 header.iu_type) != 0);
8983 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8984 header.iu_length) != 2);
8985 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8987 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8989 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8990 additional_response_info) != 12);
8991 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8992 response_code) != 15);
8993 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
8995 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8996 configured_logical_drive_count) != 0);
8997 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8998 configuration_signature) != 1);
8999 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9000 firmware_version) != 5);
9001 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9002 extended_logical_unit_count) != 154);
9003 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9004 firmware_build_number) != 190);
9005 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9006 controller_mode) != 292);
9008 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9009 phys_bay_in_box) != 115);
9010 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9011 device_type) != 120);
9012 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9013 redundant_path_present_map) != 1736);
9014 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9015 active_path_number) != 1738);
9016 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9017 alternate_paths_phys_connector) != 1739);
9018 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9019 alternate_paths_phys_box_on_port) != 1755);
9020 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9021 current_queue_depth_limit) != 1796);
9022 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
9024 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
9025 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
9026 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
9027 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9028 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
9029 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9030 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
9031 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
9032 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9033 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
9034 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
9035 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9037 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
9038 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
9039 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);