#define BFAD_FW_FILE_CT "ctfw.bin"
#define BFAD_FW_FILE_CB "cbfw.bin"
+MODULE_FIRMWARE(BFAD_FW_FILE_CT);
+MODULE_FIRMWARE(BFAD_FW_FILE_CB);
u32 *
bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
if (bfad_supported_fc4s & (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
/* For FCP type 0x08 */
fc_host_supported_fc4s(host)[2] = 1;
- if (bfad_supported_fc4s | BFA_PORT_ROLE_FCP_IPFC)
+ if (bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
/* For LLC/SNAP type 0x05 */
fc_host_supported_fc4s(host)[3] = 0x20;
/* For fibre channel services type 0x20 */
eindex = handle;
estr->event_source = 0;
- if (eindex >= MAX_EVENTS) {
+ if (eindex < 0 || eindex >= MAX_EVENTS) {
spin_unlock_irqrestore(&ha->smp_lock, flags);
return eindex;
}
return rc;
}
+/**
+ * ipr_isr_eh - Interrupt service routine error handler
+ * @ioa_cfg: ioa config struct
+ * @msg: message to log
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
+{
+ ioa_cfg->errors_logged++;
+ dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
+
+ if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+ ioa_cfg->sdt_state = GET_DUMP;
+
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+}
+
/**
* ipr_isr - Interrupt service routine
* @irq: irq number
volatile u32 int_reg, int_mask_reg;
u32 ioasc;
u16 cmd_index;
+ int num_hrrq = 0;
struct ipr_cmnd *ipr_cmd;
irqreturn_t rc = IRQ_NONE;
IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
- ioa_cfg->errors_logged++;
- dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
-
- if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
- ioa_cfg->sdt_state = GET_DUMP;
-
- ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return IRQ_HANDLED;
}
if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
- writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+ do {
+ writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+ } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
+ num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
+
+ if (int_reg & IPR_PCII_HRRQ_UPDATED) {
+ ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return IRQ_HANDLED;
+ }
+
} else
break;
}
#define IPR_IOA_MAX_SECTORS 32767
#define IPR_VSET_MAX_SECTORS 512
#define IPR_MAX_CDB_LEN 16
+#define IPR_MAX_HRRQ_RETRIES 3
#define IPR_DEFAULT_BUS_WIDTH 16
#define IPR_80MBs_SCSI_RATE ((80 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8))
}
}
- res = 0;
}
return res;
ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
- ioadl[0].flags |= cpu_to_le32(IOADL_FLAGS_READ_LAST);
+ ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
ioadl[0].data_len = cpu_to_le32(rcb_size);
ioadl[0].address = cpu_to_le32(dma);
ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
- ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
+ ioadl->flags = IOADL_FLAGS_LAST_DESC;
/* request sense might be called as part of error response processing
* which runs in tasklets context. It is possible that mid-layer might
ioadl[i].flags = 0;
}
/* setup last descriptor */
- ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
+ ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
return 0;
}
}
/* setup the last descriptor */
- ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
+ ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
return 0;
}
cpu_to_le32(sizeof(struct pmcraid_config_table));
ioadl = &(ioarcb->add_data.u.ioadl[0]);
- ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
+ ioadl->flags = IOADL_FLAGS_LAST_DESC;
ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
fail_host_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
+ job->reply->reply_payload_rcv_len = 0;
job->reply->result = ret;
job->reply_len = sizeof(uint32_t);
fc_bsg_jobdone(job);
fail_rport_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
+ job->reply->reply_payload_rcv_len = 0;
job->reply->result = ret;
job->reply_len = sizeof(uint32_t);
fc_bsg_jobdone(job);
/* check if we have the msgcode value at least */
if (job->request_len < sizeof(uint32_t)) {
BUG_ON(job->reply_len < sizeof(uint32_t));
+ job->reply->reply_payload_rcv_len = 0;
job->reply->result = -ENOMSG;
job->reply_len = sizeof(uint32_t);
fc_bsg_jobdone(job);
static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
{
- switch (target_type) {
- case 1:
- if (shost->prot_capabilities & SHOST_DIF_TYPE1_PROTECTION)
- return target_type;
- case 2:
- if (shost->prot_capabilities & SHOST_DIF_TYPE2_PROTECTION)
- return target_type;
- case 3:
- if (shost->prot_capabilities & SHOST_DIF_TYPE3_PROTECTION)
- return target_type;
- }
+ static unsigned char cap[] = { 0,
+ SHOST_DIF_TYPE1_PROTECTION,
+ SHOST_DIF_TYPE2_PROTECTION,
+ SHOST_DIF_TYPE3_PROTECTION };
- return 0;
+ return shost->prot_capabilities & cap[target_type] ? target_type : 0;
}
static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
{
#if defined(CONFIG_BLK_DEV_INTEGRITY)
- switch (target_type) {
- case 0: return shost->prot_capabilities & SHOST_DIX_TYPE0_PROTECTION;
- case 1: return shost->prot_capabilities & SHOST_DIX_TYPE1_PROTECTION;
- case 2: return shost->prot_capabilities & SHOST_DIX_TYPE2_PROTECTION;
- case 3: return shost->prot_capabilities & SHOST_DIX_TYPE3_PROTECTION;
- }
+ static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
+ SHOST_DIX_TYPE1_PROTECTION,
+ SHOST_DIX_TYPE2_PROTECTION,
+ SHOST_DIX_TYPE3_PROTECTION };
+
+ return shost->prot_capabilities & cap[target_type];
#endif
return 0;
}