* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
* with an optional trailing '-' followed by a byte value (0-255).
*/
-#define HPSA_DRIVER_VERSION "3.4.20-0"
+#define HPSA_DRIVER_VERSION "3.4.20-125"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
}
offload_enabled = hdev->offload_enabled;
spin_unlock_irqrestore(&h->lock, flags);
- return snprintf(buf, 20, "%d\n", offload_enabled);
+
+ if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
+ return snprintf(buf, 20, "%d\n", offload_enabled);
+ else
+ return snprintf(buf, 40, "%s\n",
+ "Not applicable for a controller");
}
#define MAX_PATHS 8
dev->model,
label,
dev->offload_config ? '+' : '-',
- dev->offload_enabled ? '+' : '-',
+ dev->offload_to_be_enabled ? '+' : '-',
dev->expose_device);
}
(*nadded)++;
hpsa_show_dev_msg(KERN_INFO, h, device,
device->expose_device ? "added" : "masked");
- device->offload_to_be_enabled = device->offload_enabled;
- device->offload_enabled = 0;
return 0;
}
-/* Update an entry in h->dev[] array. */
+/*
+ * Called during a scan operation.
+ *
+ * Update an entry in h->dev[] array.
+ */
static void hpsa_scsi_update_entry(struct ctlr_info *h,
int entry, struct hpsa_scsi_dev_t *new_entry)
{
- int offload_enabled;
/* assumes h->devlock is held */
BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
/* Raid level changed. */
h->dev[entry]->raid_level = new_entry->raid_level;
+ /*
+ * ioacccel_handle may have changed for a dual domain disk
+ */
+ h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+
/* Raid offload parameters changed. Careful about the ordering. */
- if (new_entry->offload_config && new_entry->offload_enabled) {
+ if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
/*
* if drive is newly offload_enabled, we want to copy the
* raid map data first. If previously offload_enabled and
* offload_config were set, raid map data had better be
- * the same as it was before. if raid map data is changed
+ * the same as it was before. If raid map data has changed
* then it had better be the case that
* h->dev[entry]->offload_enabled is currently 0.
*/
h->dev[entry]->raid_map = new_entry->raid_map;
h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
}
- if (new_entry->hba_ioaccel_enabled) {
+ if (new_entry->offload_to_be_enabled) {
h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
}
/*
* We can turn off ioaccel offload now, but need to delay turning
- * it on until we can update h->dev[entry]->phys_disk[], but we
+ * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
* can't do that until all the devices are updated.
*/
- h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
- if (!new_entry->offload_enabled)
+ h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
+
+ /*
+ * turn ioaccel off immediately if told to do so.
+ */
+ if (!new_entry->offload_to_be_enabled)
h->dev[entry]->offload_enabled = 0;
- offload_enabled = h->dev[entry]->offload_enabled;
- h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
- h->dev[entry]->offload_enabled = offload_enabled;
}
/* Replace an entry from h->dev[] array. */
h->dev[entry] = new_entry;
added[*nadded] = new_entry;
(*nadded)++;
+
hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
- new_entry->offload_to_be_enabled = new_entry->offload_enabled;
- new_entry->offload_enabled = 0;
}
/* Remove an entry from h->dev[] array. */
return 1;
if (dev1->offload_config != dev2->offload_config)
return 1;
- if (dev1->offload_enabled != dev2->offload_enabled)
+ if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
return 1;
if (!is_logical_dev_addr_mode(dev1->scsi3addr))
if (dev1->queue_depth != dev2->queue_depth)
return 1;
+ /*
+ * This can happen for dual domain devices. An active
+ * path change causes the ioaccel handle to change
+ *
+ * for example note the handle differences between p0 and p1
+ * Device WWN ,WWN hash,Handle
+ * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
+ * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
+ */
+ if (dev1->ioaccel_handle != dev2->ioaccel_handle)
+ return 1;
return 0;
}
* be 0, but we'll turn it off here just in case
*/
if (!logical_drive->phys_disk[i]) {
+ dev_warn(&h->pdev->dev,
+ "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
+ __func__,
+ h->scsi_host->host_no, logical_drive->bus,
+ logical_drive->target, logical_drive->lun);
logical_drive->offload_enabled = 0;
logical_drive->offload_to_be_enabled = 0;
logical_drive->queue_depth = 8;
* way too high for partial stripe writes
*/
logical_drive->queue_depth = qdepth;
- else
- logical_drive->queue_depth = h->nr_cmds;
+ else {
+ if (logical_drive->external)
+ logical_drive->queue_depth = EXTERNAL_QD;
+ else
+ logical_drive->queue_depth = h->nr_cmds;
+ }
}
static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
/*
* If offload is currently enabled, the RAID map and
* phys_disk[] assignment *better* not be changing
- * and since it isn't changing, we do not need to
- * update it.
+ * because we would be changing ioaccel phsy_disk[] pointers
+ * on a ioaccel volume processing I/O requests.
+ *
+ * If an ioaccel volume status changed, initially because it was
+ * re-configured and thus underwent a transformation, or
+ * a drive failed, we would have received a state change
+ * request and ioaccel should have been turned off. When the
+ * transformation completes, we get another state change
+ * request to turn ioaccel back on. In this case, we need
+ * to update the ioaccel information.
+ *
+ * Thus: If it is not currently enabled, but will be after
+ * the scan completes, make sure the ioaccel pointers
+ * are up to date.
*/
- if (dev[i]->offload_enabled)
- continue;
- hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
+ if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
+ hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
}
}
break;
if (++waits > 20)
break;
+ msleep(1000);
+ }
+
+ if (waits > 20)
dev_warn(&h->pdev->dev,
"%s: removing device with %d outstanding commands!\n",
__func__, cmds);
- msleep(1000);
- }
}
static void hpsa_remove_device(struct ctlr_info *h,
if (!h->scsi_host)
return;
+ /*
+ * Allow for commands to drain
+ */
+ device->removed = 1;
+ hpsa_wait_for_outstanding_commands_for_dev(h, device);
+
if (is_logical_device(device)) { /* RAID */
sdev = scsi_device_lookup(h->scsi_host, device->bus,
device->target, device->lun);
}
} else { /* HBA */
- device->removed = 1;
- hpsa_wait_for_outstanding_commands_for_dev(h, device);
-
hpsa_remove_sas_device(device);
}
}
}
hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
- /* Now that h->dev[]->phys_disk[] is coherent, we can enable
+ /*
+ * Now that h->dev[]->phys_disk[] is coherent, we can enable
* any logical drives that need it enabled.
+ *
+ * The raid map should be current by now.
+ *
+ * We are updating the device list used for I/O requests.
*/
for (i = 0; i < h->ndevices; i++) {
if (h->dev[i] == NULL)
/*
* Any RAID offload error results in retry which will use
- * the normal I/O path so the controller can handle whatever's
+ * the normal I/O path so the controller can handle whatever is
* wrong.
*/
if (is_logical_device(dev) &&
}
}
+static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
+ u8 page, u8 *buf, size_t bufsize)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_alloc(h);
+ if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ if (rc)
+ goto out;
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ }
+out:
+ cmd_free(h, c);
+ return rc;
+}
+
+static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
+ u8 *scsi3addr)
+{
+ u8 *buf;
+ u64 sa = 0;
+ int rc = 0;
+
+ buf = kzalloc(1024, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
+ buf, 1024);
+
+ if (rc)
+ goto out;
+
+ sa = get_unaligned_be64(buf+12);
+
+out:
+ kfree(buf);
+ return sa;
+}
+
static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
u16 page, unsigned char *buf,
unsigned char bufsize)
goto out;
}
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
return -1;
}
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
goto out;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(h, c);
bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
+ encl_dev->sas_address =
+ hpsa_get_enclosure_logical_identifier(h, scsi3addr);
+
if (encl_dev->target == -1 || encl_dev->lun == -1) {
rc = IO_OK;
goto out;
c->Request.CDB[5] = 0;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
if (rc)
goto out;
dev->sas_address = sa;
}
+static void hpsa_ext_ctrl_present(struct ctlr_info *h,
+ struct ReportExtendedLUNdata *physdev)
+{
+ u32 nphysicals;
+ int i;
+
+ if (h->discovery_polling)
+ return;
+
+ nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
+
+ for (i = 0; i < nphysicals; i++) {
+ if (physdev->LUN[i].device_type ==
+ BMIC_DEVICE_TYPE_CONTROLLER
+ && !is_hba_lunid(physdev->LUN[i].lunid)) {
+ dev_info(&h->pdev->dev,
+ "External controller present, activate discovery polling and disable rld caching\n");
+ hpsa_disable_rld_caching(h);
+ h->discovery_polling = 1;
+ break;
+ }
+ }
+}
+
/* Get a device id from inquiry page 0x83 */
static bool hpsa_vpd_page_supported(struct ctlr_info *h,
unsigned char scsi3addr[], u8 page)
return true;
}
+/*
+ * Called during a scan operation.
+ * Sets ioaccel status on the new device list, not the existing device list
+ *
+ * The device list used during I/O will be updated later in
+ * adjust_hpsa_scsi_table.
+ */
static void hpsa_get_ioaccel_status(struct ctlr_info *h,
unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
{
this_device->offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
if (this_device->offload_config) {
- this_device->offload_enabled =
+ this_device->offload_to_be_enabled =
!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
if (hpsa_get_raid_map(h, scsi3addr, this_device))
- this_device->offload_enabled = 0;
+ this_device->offload_to_be_enabled = 0;
}
- this_device->offload_to_be_enabled = this_device->offload_enabled;
+
out:
kfree(buf);
return;
if (extended_response)
c->Request.CDB[1] = extended_response;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
if (rc) {
cmd_free(h, c);
return HPSA_VPD_LV_STATUS_UNSUPPORTED;
*/
ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
+ hpsa_ext_ctrl_present(h, physdev_list);
+
/* Allocate the per device structures */
for (i = 0; i < ndevs_to_allocate; i++) {
if (i >= HPSA_MAX_DEVICES) {
int phys_dev_index = i - (raid_ctlr_position == 0);
bool skip_device = false;
+ memset(tmpdevice, 0, sizeof(*tmpdevice));
+
physical_device = i < nphysicals + (raid_ctlr_position == 0);
/* Figure out where the LUN ID info is coming from */
continue;
}
- /* Get device type, vendor, model, device id */
+ /* Get device type, vendor, model, device id, raid_map */
rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
&is_OBDR);
if (rc == -ENOMEM) {
figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
this_device = currentsd[ncurrent];
- /* Turn on discovery_polling if there are ext target devices.
- * Event-based change notification is unreliable for those.
- */
- if (!h->discovery_polling) {
- if (tmpdevice->external) {
- h->discovery_polling = 1;
- dev_info(&h->pdev->dev,
- "External target, activate discovery polling.\n");
- }
- }
-
-
*this_device = *tmpdevice;
this_device->physical_device = physical_device;
c->Request.CDB[0] = HPSA_INQUIRY;
c->Request.CDB[4] = size & 0xFF;
break;
+ case RECEIVE_DIAGNOSTIC:
+ c->Request.CDBLen = 6;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = cmd;
+ c->Request.CDB[1] = 1;
+ c->Request.CDB[2] = 1;
+ c->Request.CDB[3] = (size >> 8) & 0xFF;
+ c->Request.CDB[4] = size & 0xFF;
+ break;
case HPSA_REPORT_LOG:
case HPSA_REPORT_PHYS:
/* Talking to controller so It's a physical command
spin_unlock_irqrestore(&h->lock, flags);
dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
lockup_detected, h->heartbeat_sample_interval / HZ);
+ if (lockup_detected == 0xffff0000) {
+ dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
+ writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
+ }
pci_disable_device(h->pdev);
fail_all_outstanding_cmds(h);
}
return false;
}
-static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+/*
+ * Set ioaccel status for all ioaccel volumes.
+ *
+ * Called from monitor controller worker (hpsa_event_monitor_worker)
+ *
+ * A Volume (or Volumes that comprise an Array set may be undergoing a
+ * transformation, so we will be turning off ioaccel for all volumes that
+ * make up the Array.
+ */
+static void hpsa_set_ioaccel_status(struct ctlr_info *h)
{
+ int rc;
int i;
+ u8 ioaccel_status;
+ unsigned char *buf;
+ struct hpsa_scsi_dev_t *device;
+
+ if (!h)
+ return;
+
+ buf = kmalloc(64, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ /*
+ * Run through current device list used during I/O requests.
+ */
+ for (i = 0; i < h->ndevices; i++) {
+ device = h->dev[i];
+
+ if (!device)
+ continue;
+ if (!device->scsi3addr)
+ continue;
+ if (!hpsa_vpd_page_supported(h, device->scsi3addr,
+ HPSA_VPD_LV_IOACCEL_STATUS))
+ continue;
+
+ memset(buf, 0, 64);
+
+ rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
+ VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
+ buf, 64);
+ if (rc != 0)
+ continue;
+
+ ioaccel_status = buf[IOACCEL_STATUS_BYTE];
+ device->offload_config =
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+ if (device->offload_config)
+ device->offload_to_be_enabled =
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+
+ /*
+ * Immediately turn off ioaccel for any volume the
+ * controller tells us to. Some of the reasons could be:
+ * transformation - change to the LVs of an Array.
+ * degraded volume - component failure
+ *
+ * If ioaccel is to be re-enabled, re-enable later during the
+ * scan operation so the driver can get a fresh raidmap
+ * before turning ioaccel back on.
+ *
+ */
+ if (!device->offload_to_be_enabled)
+ device->offload_enabled = 0;
+ }
+
+ kfree(buf);
+}
+
+static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+{
char *event_type;
if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
event_type = "configuration change";
/* Stop sending new RAID offload reqs via the IO accelerator */
scsi_block_requests(h->scsi_host);
- for (i = 0; i < h->ndevices; i++) {
- h->dev[i]->offload_enabled = 0;
- h->dev[i]->offload_to_be_enabled = 0;
- }
+ hpsa_set_ioaccel_status(h);
hpsa_drain_accel_commands(h);
/* Set 'accelerator path config change' bit */
dev_warn(&h->pdev->dev,
writel(h->events, &(h->cfgtable->clear_event_notify));
writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_clear_event_notify_ack(h);
-#if 0
- writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- hpsa_wait_for_mode_change_ack(h);
-#endif
}
return;
}
if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
hpsa_perform_rescan(h);
} else if (h->discovery_polling) {
- hpsa_disable_rld_caching(h);
if (hpsa_luns_changed(h)) {
dev_info(&h->pdev->dev,
"driver discovery polling rescan.\n");
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_TODEVICE, NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
destroy_workqueue(h->rescan_ctlr_wq);
destroy_workqueue(h->resubmit_wq);
+ hpsa_delete_sas_host(h);
+
/*
* Call before disabling interrupts.
* scsi_remove_host can trigger I/O operations especially
h->lockup_detected = NULL; /* init_one 2 */
/* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
- hpsa_delete_sas_host(h);
-
kfree(h); /* init_one 1 */
}
struct sas_phy *phy = hpsa_sas_phy->phy;
sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
- sas_phy_free(phy);
if (hpsa_sas_phy->added_to_port)
list_del(&hpsa_sas_phy->phy_list_entry);
+ sas_phy_delete(phy);
kfree(hpsa_sas_phy);
}
struct hpsa_sas_port *hpsa_sas_port;
struct hpsa_sas_phy *hpsa_sas_phy;
- parent_dev = &h->scsi_host->shost_gendev;
+ parent_dev = &h->scsi_host->shost_dev;
hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
if (!hpsa_sas_node)
static int
hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
{
- *identifier = 0;
+ *identifier = rphy->identify.sas_address;
return 0;
}