[SCSI] ipr: Reduce queuecommand lock time
authorBrian King <brking@linux.vnet.ibm.com>
Tue, 17 Jul 2012 13:13:52 +0000 (08:13 -0500)
committerJames Bottomley <JBottomley@Parallels.com>
Fri, 24 Aug 2012 09:10:27 +0000 (13:10 +0400)
Reduce the amount of time the host lock is held in queuecommand
for improved performance.

[jejb: fix up checkpatch noise]
Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
drivers/scsi/ipr.c

index dacc784ad2d614859a4d4d039476487bdbefae6c..b2994e2cf01762181bcb5e4b1d209b95ce7a4b14 100644 (file)
@@ -620,24 +620,38 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
 }
 
 /**
- * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
+ * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
  * @ioa_cfg:   ioa config struct
  *
  * Return value:
  *     pointer to ipr command struct
  **/
 static
-struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
 {
        struct ipr_cmnd *ipr_cmd;
 
        ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
        list_del(&ipr_cmd->queue);
-       ipr_init_ipr_cmnd(ipr_cmd);
 
        return ipr_cmd;
 }
 
+/**
+ * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
+ * @ioa_cfg:   ioa config struct
+ *
+ * Return value:
+ *     pointer to ipr command struct
+ **/
+static
+struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+{
+       struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+       ipr_init_ipr_cmnd(ipr_cmd);
+       return ipr_cmd;
+}
+
 /**
  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
  * @ioa_cfg:   ioa config struct
@@ -5783,8 +5797,8 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
 
 /**
  * ipr_queuecommand - Queue a mid-layer request
+ * @shost:             scsi host struct
  * @scsi_cmd:  scsi command struct
- * @done:              done function
  *
  * This function queues a request generated by the mid-layer.
  *
@@ -5793,61 +5807,58 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
  *     SCSI_MLQUEUE_DEVICE_BUSY if device is busy
  *     SCSI_MLQUEUE_HOST_BUSY if host is busy
  **/
-static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
-                           void (*done) (struct scsi_cmnd *))
+static int ipr_queuecommand(struct Scsi_Host *shost,
+                           struct scsi_cmnd *scsi_cmd)
 {
        struct ipr_ioa_cfg *ioa_cfg;
        struct ipr_resource_entry *res;
        struct ipr_ioarcb *ioarcb;
        struct ipr_cmnd *ipr_cmd;
+       unsigned long lock_flags;
        int rc = 0;
 
-       scsi_cmd->scsi_done = done;
-       ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
-       res = scsi_cmd->device->hostdata;
+       ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+
+       spin_lock_irqsave(shost->host_lock, lock_flags);
        scsi_cmd->result = (DID_OK << 16);
+       res = scsi_cmd->device->hostdata;
 
        /*
         * We are currently blocking all devices due to a host reset
         * We have told the host to stop giving us new requests, but
         * ERP ops don't count. FIXME
         */
-       if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
+       if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
+               spin_unlock_irqrestore(shost->host_lock, lock_flags);
                return SCSI_MLQUEUE_HOST_BUSY;
+       }
 
        /*
         * FIXME - Create scsi_set_host_offline interface
         *  and the ioa_is_dead check can be removed
         */
        if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
-               memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
-               scsi_cmd->result = (DID_NO_CONNECT << 16);
-               scsi_cmd->scsi_done(scsi_cmd);
-               return 0;
+               spin_unlock_irqrestore(shost->host_lock, lock_flags);
+               goto err_nodev;
        }
 
        if (ipr_is_gata(res) && res->sata_port)
                return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
 
-       ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+       ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+       spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+       ipr_init_ipr_cmnd(ipr_cmd);
        ioarcb = &ipr_cmd->ioarcb;
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
 
        memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
        ipr_cmd->scsi_cmd = scsi_cmd;
-       ioarcb->res_handle = res->res_handle;
        ipr_cmd->done = ipr_scsi_done;
-       ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
 
        if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
                if (scsi_cmd->underflow == 0)
                        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
 
-               if (res->needs_sync_complete) {
-                       ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
-                       res->needs_sync_complete = 0;
-               }
-
                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
                if (ipr_is_gscsi(res))
                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
@@ -5866,16 +5877,41 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
                        rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
        }
 
-       if (unlikely(rc != 0)) {
-               list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       spin_lock_irqsave(shost->host_lock, lock_flags);
+       if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
+               list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+               spin_unlock_irqrestore(shost->host_lock, lock_flags);
+               if (!rc)
+                       scsi_dma_unmap(scsi_cmd);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
+       if (unlikely(ioa_cfg->ioa_is_dead)) {
+               list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+               spin_unlock_irqrestore(shost->host_lock, lock_flags);
+               scsi_dma_unmap(scsi_cmd);
+               goto err_nodev;
+       }
+
+       ioarcb->res_handle = res->res_handle;
+       if (res->needs_sync_complete) {
+               ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
+               res->needs_sync_complete = 0;
+       }
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+       ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
        ipr_send_command(ipr_cmd);
+       spin_unlock_irqrestore(shost->host_lock, lock_flags);
        return 0;
-}
 
-static DEF_SCSI_QCMD(ipr_queuecommand)
+err_nodev:
+       spin_lock_irqsave(shost->host_lock, lock_flags);
+       memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+       scsi_cmd->result = (DID_NO_CONNECT << 16);
+       scsi_cmd->scsi_done(scsi_cmd);
+       spin_unlock_irqrestore(shost->host_lock, lock_flags);
+       return 0;
+}
 
 /**
  * ipr_ioctl - IOCTL handler