Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[sfrench/cifs-2.6.git] / drivers / scsi / qla2xxx / qla_os.c
index c6ef83d0d99b893ccd5976da44dfc5e93218dd1b..677f82fdf56fd174c2c033b852431a91ab5b0fe6 100644 (file)
@@ -285,6 +285,27 @@ MODULE_PARM_DESC(qla2xuseresexchforels,
                 "Reserve 1/2 of emergency exchanges for ELS.\n"
                 " 0 (default): disabled");
 
+int ql2xprotmask;
+module_param(ql2xprotmask, int, 0644);
+MODULE_PARM_DESC(ql2xprotmask,
+                "Override DIF/DIX protection capabilities mask\n"
+                "Default is 0 which sets protection mask based on "
+                "capabilities reported by HBA firmware.\n");
+
+int ql2xprotguard;
+module_param(ql2xprotguard, int, 0644);
+MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
+                "  0 -- Let HBA firmware decide\n"
+                "  1 -- Force T10 CRC\n"
+                "  2 -- Force IP checksum\n");
+
+int ql2xdifbundlinginternalbuffers;
+module_param(ql2xdifbundlinginternalbuffers, int, 0644);
+MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
+    "Force using internal buffers for DIF information\n"
+    "0 (Default). Based on check.\n"
+    "1 Force using internal buffers\n");
+
 /*
  * SCSI host template entry points
  */
@@ -804,7 +825,44 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
                ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
                ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
                mempool_free(ctx1, ha->ctx_mempool);
+               sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
+       }
+       if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
+               struct crc_context *difctx = sp->u.scmd.ctx;
+               struct dsd_dma *dif_dsd, *nxt_dsd;
+
+               list_for_each_entry_safe(dif_dsd, nxt_dsd,
+                   &difctx->ldif_dma_hndl_list, list) {
+                       list_del(&dif_dsd->list);
+                       dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
+                           dif_dsd->dsd_list_dma);
+                       kfree(dif_dsd);
+                       difctx->no_dif_bundl--;
+               }
+
+               list_for_each_entry_safe(dif_dsd, nxt_dsd,
+                   &difctx->ldif_dsd_list, list) {
+                       list_del(&dif_dsd->list);
+                       dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
+                           dif_dsd->dsd_list_dma);
+                       kfree(dif_dsd);
+                       difctx->no_ldif_dsd--;
+               }
+
+               if (difctx->no_ldif_dsd) {
+                       ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
+                           "%s: difctx->no_ldif_dsd=%x\n",
+                           __func__, difctx->no_ldif_dsd);
+               }
+
+               if (difctx->no_dif_bundl) {
+                       ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
+                           "%s: difctx->no_dif_bundl=%x\n",
+                           __func__, difctx->no_dif_bundl);
+               }
+               sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
        }
+
 end:
        CMD_SP(cmd) = NULL;
        qla2xxx_rel_qpair_sp(sp->qpair, sp);
@@ -3342,13 +3400,16 @@ skip_dpc:
                            "Registering for DIF/DIX type 1 and 3 protection.\n");
                        if (ql2xenabledif == 1)
                                prot = SHOST_DIX_TYPE0_PROTECTION;
-                       scsi_host_set_prot(host,
-                           prot | SHOST_DIF_TYPE1_PROTECTION
-                           | SHOST_DIF_TYPE2_PROTECTION
-                           | SHOST_DIF_TYPE3_PROTECTION
-                           | SHOST_DIX_TYPE1_PROTECTION
-                           | SHOST_DIX_TYPE2_PROTECTION
-                           | SHOST_DIX_TYPE3_PROTECTION);
+                       if (ql2xprotmask)
+                               scsi_host_set_prot(host, ql2xprotmask);
+                       else
+                               scsi_host_set_prot(host,
+                                   prot | SHOST_DIF_TYPE1_PROTECTION
+                                   | SHOST_DIF_TYPE2_PROTECTION
+                                   | SHOST_DIF_TYPE3_PROTECTION
+                                   | SHOST_DIX_TYPE1_PROTECTION
+                                   | SHOST_DIX_TYPE2_PROTECTION
+                                   | SHOST_DIX_TYPE3_PROTECTION);
 
                        guard = SHOST_DIX_GUARD_CRC;
 
@@ -3356,7 +3417,10 @@ skip_dpc:
                            (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
                                guard |= SHOST_DIX_GUARD_IP;
 
-                       scsi_host_set_guard(host, guard);
+                       if (ql2xprotguard)
+                               scsi_host_set_guard(host, ql2xprotguard);
+                       else
+                               scsi_host_set_guard(host, guard);
                } else
                        base_vha->flags.difdix_supported = 0;
        }
@@ -3997,9 +4061,86 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
                            "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
                        goto fail_dl_dma_pool;
                }
+
+               if (ql2xenabledif) {
+                       u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE;
+                       struct dsd_dma *dsd, *nxt;
+                       uint i;
+                       /* Creata a DMA pool of buffers for DIF bundling */
+                       ha->dif_bundl_pool = dma_pool_create(name,
+                           &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0);
+                       if (!ha->dif_bundl_pool) {
+                               ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
+                                   "%s: failed create dif_bundl_pool\n",
+                                   __func__);
+                               goto fail_dif_bundl_dma_pool;
+                       }
+
+                       INIT_LIST_HEAD(&ha->pool.good.head);
+                       INIT_LIST_HEAD(&ha->pool.unusable.head);
+                       ha->pool.good.count = 0;
+                       ha->pool.unusable.count = 0;
+                       for (i = 0; i < 128; i++) {
+                               dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC);
+                               if (!dsd) {
+                                       ql_dbg_pci(ql_dbg_init, ha->pdev,
+                                           0xe0ee, "%s: failed alloc dsd\n",
+                                           __func__);
+                                       return 1;
+                               }
+                               ha->dif_bundle_kallocs++;
+
+                               dsd->dsd_addr = dma_pool_alloc(
+                                   ha->dif_bundl_pool, GFP_ATOMIC,
+                                   &dsd->dsd_list_dma);
+                               if (!dsd->dsd_addr) {
+                                       ql_dbg_pci(ql_dbg_init, ha->pdev,
+                                           0xe0ee,
+                                           "%s: failed alloc ->dsd_addr\n",
+                                           __func__);
+                                       kfree(dsd);
+                                       ha->dif_bundle_kallocs--;
+                                       continue;
+                               }
+                               ha->dif_bundle_dma_allocs++;
+
+                               /*
+                                * if DMA buffer crosses 4G boundary,
+                                * put it on bad list
+                                */
+                               if (MSD(dsd->dsd_list_dma) ^
+                                   MSD(dsd->dsd_list_dma + bufsize)) {
+                                       list_add_tail(&dsd->list,
+                                           &ha->pool.unusable.head);
+                                       ha->pool.unusable.count++;
+                               } else {
+                                       list_add_tail(&dsd->list,
+                                           &ha->pool.good.head);
+                                       ha->pool.good.count++;
+                               }
+                       }
+
+                       /* return the good ones back to the pool */
+                       list_for_each_entry_safe(dsd, nxt,
+                           &ha->pool.good.head, list) {
+                               list_del(&dsd->list);
+                               dma_pool_free(ha->dif_bundl_pool,
+                                   dsd->dsd_addr, dsd->dsd_list_dma);
+                               ha->dif_bundle_dma_allocs--;
+                               kfree(dsd);
+                               ha->dif_bundle_kallocs--;
+                       }
+
+                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
+                           "%s: dif dma pool (good=%u unusable=%u)\n",
+                           __func__, ha->pool.good.count,
+                           ha->pool.unusable.count);
+               }
+
                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
-                   "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
-                   ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
+                   "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n",
+                   ha->dl_dma_pool, ha->fcp_cmnd_dma_pool,
+                   ha->dif_bundl_pool);
        }
 
        /* Allocate memory for SNS commands */
@@ -4164,6 +4305,24 @@ fail_free_ms_iocb:
                dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
                    ha->sns_cmd, ha->sns_cmd_dma);
 fail_dma_pool:
+       if (ql2xenabledif) {
+               struct dsd_dma *dsd, *nxt;
+
+               list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
+                   list) {
+                       list_del(&dsd->list);
+                       dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
+                           dsd->dsd_list_dma);
+                       ha->dif_bundle_dma_allocs--;
+                       kfree(dsd);
+                       ha->dif_bundle_kallocs--;
+                       ha->pool.unusable.count--;
+               }
+               dma_pool_destroy(ha->dif_bundl_pool);
+               ha->dif_bundl_pool = NULL;
+       }
+
+fail_dif_bundl_dma_pool:
        if (IS_QLA82XX(ha) || ql2xenabledif) {
                dma_pool_destroy(ha->fcp_cmnd_dma_pool);
                ha->fcp_cmnd_dma_pool = NULL;
@@ -4544,6 +4703,32 @@ qla2x00_mem_free(struct qla_hw_data *ha)
 
        mempool_destroy(ha->ctx_mempool);
 
+       if (ql2xenabledif) {
+               struct dsd_dma *dsd, *nxt;
+
+               list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
+                                        list) {
+                       list_del(&dsd->list);
+                       dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
+                                     dsd->dsd_list_dma);
+                       ha->dif_bundle_dma_allocs--;
+                       kfree(dsd);
+                       ha->dif_bundle_kallocs--;
+                       ha->pool.unusable.count--;
+               }
+               list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
+                       list_del(&dsd->list);
+                       dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
+                                     dsd->dsd_list_dma);
+                       ha->dif_bundle_dma_allocs--;
+                       kfree(dsd);
+                       ha->dif_bundle_kallocs--;
+               }
+       }
+
+       if (ha->dif_bundl_pool)
+               dma_pool_destroy(ha->dif_bundl_pool);
+
        qlt_mem_free(ha);
 
        if (ha->init_cb)
@@ -5019,14 +5204,14 @@ qla2x00_do_work(struct scsi_qla_host *vha)
        struct qla_work_evt *e, *tmp;
        unsigned long flags;
        LIST_HEAD(work);
+       int rc;
 
        spin_lock_irqsave(&vha->work_lock, flags);
        list_splice_init(&vha->work_list, &work);
        spin_unlock_irqrestore(&vha->work_lock, flags);
 
        list_for_each_entry_safe(e, tmp, &work, list) {
-               list_del_init(&e->list);
-
+               rc = QLA_SUCCESS;
                switch (e->type) {
                case QLA_EVT_AEN:
                        fc_host_post_event(vha->host, fc_get_event_number(),
@@ -5040,7 +5225,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
                            e->u.logio.data);
                        break;
                case QLA_EVT_ASYNC_LOGOUT:
-                       qla2x00_async_logout(vha, e->u.logio.fcport);
+                       rc = qla2x00_async_logout(vha, e->u.logio.fcport);
                        break;
                case QLA_EVT_ASYNC_LOGOUT_DONE:
                        qla2x00_async_logout_done(vha, e->u.logio.fcport,
@@ -5085,7 +5270,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
                        qla24xx_do_nack_work(vha, e);
                        break;
                case QLA_EVT_ASYNC_PRLO:
-                       qla2x00_async_prlo(vha, e->u.logio.fcport);
+                       rc = qla2x00_async_prlo(vha, e->u.logio.fcport);
                        break;
                case QLA_EVT_ASYNC_PRLO_DONE:
                        qla2x00_async_prlo_done(vha, e->u.logio.fcport,
@@ -5118,6 +5303,15 @@ qla2x00_do_work(struct scsi_qla_host *vha)
                            e->u.fcport.fcport, false);
                        break;
                }
+
+               if (rc == EAGAIN) {
+                       /* put 'work' at head of 'vha->work_list' */
+                       spin_lock_irqsave(&vha->work_lock, flags);
+                       list_splice(&work, &vha->work_list);
+                       spin_unlock_irqrestore(&vha->work_lock, flags);
+                       break;
+               }
+               list_del_init(&e->list);
                if (e->flags & QLA_EVT_FLAG_FREE)
                        kfree(e);
 
@@ -6930,13 +7124,64 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
        ha->flags.eeh_busy = 0;
 }
 
+static void
+qla_pci_reset_prepare(struct pci_dev *pdev)
+{
+       scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+       struct qla_hw_data *ha = base_vha->hw;
+       struct qla_qpair *qpair;
+
+       ql_log(ql_log_warn, base_vha, 0xffff,
+           "%s.\n", __func__);
+
+       /*
+        * PCI FLR/function reset is about to reset the
+        * slot. Stop the chip to stop all DMA access.
+        * It is assumed that pci_reset_done will be called
+        * after FLR to resume Chip operation.
+        */
+       ha->flags.eeh_busy = 1;
+       mutex_lock(&ha->mq_lock);
+       list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
+               qpair->online = 0;
+       mutex_unlock(&ha->mq_lock);
+
+       set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+       qla2x00_abort_isp_cleanup(base_vha);
+       qla2x00_abort_all_cmds(base_vha, DID_RESET << 16);
+}
+
+static void
+qla_pci_reset_done(struct pci_dev *pdev)
+{
+       scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+       struct qla_hw_data *ha = base_vha->hw;
+       struct qla_qpair *qpair;
+
+       ql_log(ql_log_warn, base_vha, 0xffff,
+           "%s.\n", __func__);
+
+       /*
+        * FLR just completed by PCI layer. Resume adapter
+        */
+       ha->flags.eeh_busy = 0;
+       mutex_lock(&ha->mq_lock);
+       list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
+               qpair->online = 1;
+       mutex_unlock(&ha->mq_lock);
+
+       base_vha->flags.online = 1;
+       ha->isp_ops->abort_isp(base_vha);
+       clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+}
+
 static int qla2xxx_map_queues(struct Scsi_Host *shost)
 {
        int rc;
        scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
        struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
 
-       if (USER_CTRL_IRQ(vha->hw))
+       if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
                rc = blk_mq_map_queues(qmap);
        else
                rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
@@ -6948,6 +7193,8 @@ static const struct pci_error_handlers qla2xxx_err_handler = {
        .mmio_enabled = qla2xxx_pci_mmio_enabled,
        .slot_reset = qla2xxx_pci_slot_reset,
        .resume = qla2xxx_pci_resume,
+       .reset_prepare = qla_pci_reset_prepare,
+       .reset_done = qla_pci_reset_done,
 };
 
 static struct pci_device_id qla2xxx_pci_tbl[] = {