Merge tag 'dma-mapping-5.20-2022-08-06' of git://git.infradead.org/users/hch/dma...
[sfrench/cifs-2.6.git] / drivers / nvme / host / pci.c
index c6bf83d74560b9d990dce4792f4dc47d9dc3259b..de1b4463142db7909a0daff782b722b3983c6d43 100644 (file)
@@ -659,7 +659,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
 
        prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
        if (!prp_list) {
-               iod->first_dma = dma_addr;
                iod->npages = -1;
                return BLK_STS_RESOURCE;
        }
@@ -1333,7 +1332,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
                 "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
 }
 
-static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
+static enum blk_eh_timer_return nvme_timeout(struct request *req)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = iod->nvmeq;
@@ -1424,8 +1423,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
 
        dev_warn(nvmeq->dev->ctrl.device,
-               "I/O %d QID %d timeout, aborting\n",
-                req->tag, nvmeq->qid);
+               "I/O %d (%s) QID %d timeout, aborting\n",
+                req->tag,
+                nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
+                nvmeq->qid);
 
        abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
                                         BLK_MQ_REQ_NOWAIT);
@@ -1749,42 +1750,40 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
                 * queue to flush these to completion.
                 */
                nvme_start_admin_queue(&dev->ctrl);
-               blk_cleanup_queue(dev->ctrl.admin_q);
+               blk_mq_destroy_queue(dev->ctrl.admin_q);
                blk_mq_free_tag_set(&dev->admin_tagset);
        }
 }
 
-static int nvme_alloc_admin_tags(struct nvme_dev *dev)
+static int nvme_pci_alloc_admin_tag_set(struct nvme_dev *dev)
 {
-       if (!dev->ctrl.admin_q) {
-               dev->admin_tagset.ops = &nvme_mq_admin_ops;
-               dev->admin_tagset.nr_hw_queues = 1;
+       struct blk_mq_tag_set *set = &dev->admin_tagset;
 
-               dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
-               dev->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
-               dev->admin_tagset.numa_node = dev->ctrl.numa_node;
-               dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
-               dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
-               dev->admin_tagset.driver_data = dev;
+       set->ops = &nvme_mq_admin_ops;
+       set->nr_hw_queues = 1;
 
-               if (blk_mq_alloc_tag_set(&dev->admin_tagset))
-                       return -ENOMEM;
-               dev->ctrl.admin_tagset = &dev->admin_tagset;
+       set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+       set->timeout = NVME_ADMIN_TIMEOUT;
+       set->numa_node = dev->ctrl.numa_node;
+       set->cmd_size = sizeof(struct nvme_iod);
+       set->flags = BLK_MQ_F_NO_SCHED;
+       set->driver_data = dev;
 
-               dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
-               if (IS_ERR(dev->ctrl.admin_q)) {
-                       blk_mq_free_tag_set(&dev->admin_tagset);
-                       dev->ctrl.admin_q = NULL;
-                       return -ENOMEM;
-               }
-               if (!blk_get_queue(dev->ctrl.admin_q)) {
-                       nvme_dev_remove_admin(dev);
-                       dev->ctrl.admin_q = NULL;
-                       return -ENODEV;
-               }
-       } else
-               nvme_start_admin_queue(&dev->ctrl);
+       if (blk_mq_alloc_tag_set(set))
+               return -ENOMEM;
+       dev->ctrl.admin_tagset = set;
 
+       dev->ctrl.admin_q = blk_mq_init_queue(set);
+       if (IS_ERR(dev->ctrl.admin_q)) {
+               blk_mq_free_tag_set(set);
+               dev->ctrl.admin_q = NULL;
+               return -ENOMEM;
+       }
+       if (!blk_get_queue(dev->ctrl.admin_q)) {
+               nvme_dev_remove_admin(dev);
+               dev->ctrl.admin_q = NULL;
+               return -ENODEV;
+       }
        return 0;
 }
 
@@ -2523,47 +2522,45 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
        return true;
 }
 
-static void nvme_dev_add(struct nvme_dev *dev)
+static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
 {
+       struct blk_mq_tag_set * set = &dev->tagset;
        int ret;
 
-       if (!dev->ctrl.tagset) {
-               dev->tagset.ops = &nvme_mq_ops;
-               dev->tagset.nr_hw_queues = dev->online_queues - 1;
-               dev->tagset.nr_maps = 2; /* default + read */
-               if (dev->io_queues[HCTX_TYPE_POLL])
-                       dev->tagset.nr_maps++;
-               dev->tagset.timeout = NVME_IO_TIMEOUT;
-               dev->tagset.numa_node = dev->ctrl.numa_node;
-               dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
-                                               BLK_MQ_MAX_DEPTH) - 1;
-               dev->tagset.cmd_size = sizeof(struct nvme_iod);
-               dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
-               dev->tagset.driver_data = dev;
-
-               /*
-                * Some Apple controllers requires tags to be unique
-                * across admin and IO queue, so reserve the first 32
-                * tags of the IO queue.
-                */
-               if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
-                       dev->tagset.reserved_tags = NVME_AQ_DEPTH;
+       set->ops = &nvme_mq_ops;
+       set->nr_hw_queues = dev->online_queues - 1;
+       set->nr_maps = 2; /* default + read */
+       if (dev->io_queues[HCTX_TYPE_POLL])
+               set->nr_maps++;
+       set->timeout = NVME_IO_TIMEOUT;
+       set->numa_node = dev->ctrl.numa_node;
+       set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
+       set->cmd_size = sizeof(struct nvme_iod);
+       set->flags = BLK_MQ_F_SHOULD_MERGE;
+       set->driver_data = dev;
 
-               ret = blk_mq_alloc_tag_set(&dev->tagset);
-               if (ret) {
-                       dev_warn(dev->ctrl.device,
-                               "IO queues tagset allocation failed %d\n", ret);
-                       return;
-               }
-               dev->ctrl.tagset = &dev->tagset;
-       } else {
-               blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+       /*
+        * Some Apple controllers requires tags to be unique
+        * across admin and IO queue, so reserve the first 32
+        * tags of the IO queue.
+        */
+       if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
+               set->reserved_tags = NVME_AQ_DEPTH;
 
-               /* Free previously allocated queues that are no longer usable */
-               nvme_free_queues(dev, dev->online_queues);
+       ret = blk_mq_alloc_tag_set(set);
+       if (ret) {
+               dev_warn(dev->ctrl.device,
+                       "IO queues tagset allocation failed %d\n", ret);
+               return;
        }
+       dev->ctrl.tagset = set;
+}
 
-       nvme_dbbuf_set(dev);
+static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
+{
+       blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+       /* free previously allocated queues that are no longer usable */
+       nvme_free_queues(dev, dev->online_queues);
 }
 
 static int nvme_pci_enable(struct nvme_dev *dev)
@@ -2679,8 +2676,13 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
        mutex_lock(&dev->shutdown_lock);
-       if (pci_device_is_present(pdev) && pci_is_enabled(pdev)) {
-               u32 csts = readl(dev->bar + NVME_REG_CSTS);
+       if (pci_is_enabled(pdev)) {
+               u32 csts;
+
+               if (pci_device_is_present(pdev))
+                       csts = readl(dev->bar + NVME_REG_CSTS);
+               else
+                       csts = ~0;
 
                if (dev->ctrl.state == NVME_CTRL_LIVE ||
                    dev->ctrl.state == NVME_CTRL_RESETTING) {
@@ -2709,10 +2711,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
        nvme_pci_disable(dev);
        nvme_reap_pending_cqes(dev);
 
-       blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
-       blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
-       blk_mq_tagset_wait_completed_request(&dev->tagset);
-       blk_mq_tagset_wait_completed_request(&dev->admin_tagset);
+       nvme_cancel_tagset(&dev->ctrl);
+       nvme_cancel_admin_tagset(&dev->ctrl);
 
        /*
         * The driver will not be starting up queues again if shutting down so
@@ -2826,9 +2826,13 @@ static void nvme_reset_work(struct work_struct *work)
        if (result)
                goto out_unlock;
 
-       result = nvme_alloc_admin_tags(dev);
-       if (result)
-               goto out_unlock;
+       if (!dev->ctrl.admin_q) {
+               result = nvme_pci_alloc_admin_tag_set(dev);
+               if (result)
+                       goto out_unlock;
+       } else {
+               nvme_start_admin_queue(&dev->ctrl);
+       }
 
        /*
         * Limit the max command size to prevent iod->sg allocations going
@@ -2907,7 +2911,11 @@ static void nvme_reset_work(struct work_struct *work)
        } else {
                nvme_start_queues(&dev->ctrl);
                nvme_wait_freeze(&dev->ctrl);
-               nvme_dev_add(dev);
+               if (!dev->ctrl.tagset)
+                       nvme_pci_alloc_tag_set(dev);
+               else
+                       nvme_pci_update_nr_queues(dev);
+               nvme_dbbuf_set(dev);
                nvme_unfreeze(&dev->ctrl);
        }
 
@@ -3460,15 +3468,21 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_DEVICE(0x1987, 0x5012),   /* Phison E12 */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1987, 0x5016),   /* Phison E16 */
-               .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+               .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+                               NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1b4b, 0x1092),   /* Lexar 256 GB SSD */
                .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+       { PCI_DEVICE(0x1cc1, 0x33f8),   /* ADATA IM2P33F8ABR1 1 TB */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x10ec, 0x5762),   /* ADATA SX6000LNP */
-               .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+               .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+                               NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1cc1, 0x8201),   /* ADATA SX8200PNP 512GB */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+        { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
+               .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
        { PCI_DEVICE(0x1c5c, 0x1504),   /* SK Hynix PC400 */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x1c5c, 0x174a),   /* SK Hynix P31 SSD */
@@ -3499,6 +3513,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1e49, 0x0041),   /* ZHITAI TiPro7000 NVMe SSD */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+       { PCI_DEVICE(0xc0a9, 0x540a),   /* Crucial P2 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
                .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
@@ -3519,10 +3535,6 @@ static const struct pci_device_id nvme_id_table[] = {
                                NVME_QUIRK_128_BYTES_SQES |
                                NVME_QUIRK_SHARED_TAGS |
                                NVME_QUIRK_SKIP_CID_GEN },
-       { PCI_DEVICE(0x144d, 0xa808),   /* Samsung X5 */
-               .driver_data =  NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
-                               NVME_QUIRK_NO_DEEPEST_PS |
-                               NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { 0, }
 };