Merge tag 'nvme-6.7-2023-11-22' of git://git.infradead.org/nvme into block-6.7
authorJens Axboe <axboe@kernel.dk>
Wed, 22 Nov 2023 17:19:27 +0000 (10:19 -0700)
committerJens Axboe <axboe@kernel.dk>
Wed, 22 Nov 2023 17:19:27 +0000 (10:19 -0700)
Pull NVMe fixes from Keith:

"nvme fixes for Linux 6.7

 - TCP TLS fixes (Hannes)
 - Authentifaction fixes (Mark, Hannes)
 - Properly terminate target names (Christoph)"

* tag 'nvme-6.7-2023-11-22' of git://git.infradead.org/nvme:
  nvme: move nvme_stop_keep_alive() back to original position
  nvmet-tcp: always initialize tls_handshake_tmo_work
  nvmet: nul-terminate the NQNs passed in the connect command
  nvme: blank out authentication fabrics options if not configured
  nvme: catch errors from nvme_configure_metadata()
  nvme-tcp: only evaluate 'tls' option if TLS is selected
  nvme-auth: set explanation code for failure2 msgs
  nvme-auth: unlock mutex in one place only

drivers/nvme/host/auth.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fc.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/fabrics-cmd.c
drivers/nvme/target/tcp.c

index 48328e36e93bc423974f5089a4ee5fd0bbcc9a6d..72c0525c75f503bb56c7c246c733f9eea57e44ab 100644 (file)
@@ -757,12 +757,11 @@ static void nvme_queue_auth_work(struct work_struct *work)
                __func__, chap->qid);
        mutex_lock(&ctrl->dhchap_auth_mutex);
        ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
+       mutex_unlock(&ctrl->dhchap_auth_mutex);
        if (ret) {
-               mutex_unlock(&ctrl->dhchap_auth_mutex);
                chap->error = ret;
                goto fail2;
        }
-       mutex_unlock(&ctrl->dhchap_auth_mutex);
 
        /* DH-HMAC-CHAP Step 3: send reply */
        dev_dbg(ctrl->device, "%s: qid %d send reply\n",
@@ -839,6 +838,8 @@ static void nvme_queue_auth_work(struct work_struct *work)
        }
 
 fail2:
+       if (chap->status == 0)
+               chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
        dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
                __func__, chap->qid, chap->status);
        tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
index 88b54cdcbd683cd3e7f0a26742aff156ad4c57b5..46a4c9c5ea9625c7feb11d970ebb95cd4ce8be2b 100644 (file)
@@ -482,7 +482,6 @@ EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
 
 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
 {
-       nvme_stop_keep_alive(ctrl);
        if (ctrl->admin_tagset) {
                blk_mq_tagset_busy_iter(ctrl->admin_tagset,
                                nvme_cancel_request, ctrl);
@@ -1814,16 +1813,18 @@ set_pi:
        return ret;
 }
 
-static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
 {
        struct nvme_ctrl *ctrl = ns->ctrl;
+       int ret;
 
-       if (nvme_init_ms(ns, id))
-               return;
+       ret = nvme_init_ms(ns, id);
+       if (ret)
+               return ret;
 
        ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
        if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
-               return;
+               return 0;
 
        if (ctrl->ops->flags & NVME_F_FABRICS) {
                /*
@@ -1832,7 +1833,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
                 * remap the separate metadata buffer from the block layer.
                 */
                if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
-                       return;
+                       return 0;
 
                ns->features |= NVME_NS_EXT_LBAS;
 
@@ -1859,6 +1860,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
                else
                        ns->features |= NVME_NS_METADATA_SUPPORTED;
        }
+       return 0;
 }
 
 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
@@ -2032,7 +2034,11 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
        ns->lba_shift = id->lbaf[lbaf].ds;
        nvme_set_queue_limits(ns->ctrl, ns->queue);
 
-       nvme_configure_metadata(ns, id);
+       ret = nvme_configure_metadata(ns, id);
+       if (ret < 0) {
+               blk_mq_unfreeze_queue(ns->disk->queue);
+               goto out;
+       }
        nvme_set_chunk_sectors(ns, id);
        nvme_update_disk_info(ns->disk, ns, id);
 
@@ -4348,6 +4354,7 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
 {
        nvme_mpath_stop(ctrl);
        nvme_auth_stop(ctrl);
+       nvme_stop_keep_alive(ctrl);
        nvme_stop_failfast_work(ctrl);
        flush_work(&ctrl->async_event_work);
        cancel_work_sync(&ctrl->fw_act_work);
index 4673ead69c5f9d9b9e51729ece745b56363989de..aa88606a44c40f2b88c1cb8c0286d57632960755 100644 (file)
@@ -667,8 +667,10 @@ static const match_table_t opt_tokens = {
 #endif
        { NVMF_OPT_FAIL_FAST_TMO,       "fast_io_fail_tmo=%d"   },
        { NVMF_OPT_DISCOVERY,           "discovery"             },
+#ifdef CONFIG_NVME_HOST_AUTH
        { NVMF_OPT_DHCHAP_SECRET,       "dhchap_secret=%s"      },
        { NVMF_OPT_DHCHAP_CTRL_SECRET,  "dhchap_ctrl_secret=%s" },
+#endif
 #ifdef CONFIG_NVME_TCP_TLS
        { NVMF_OPT_TLS,                 "tls"                   },
 #endif
index 49c3e46eaa1eee13b1174044104072dc6390990f..9f9a3b35dc64d3ea03c6fea85599279e2b46b3da 100644 (file)
@@ -2530,12 +2530,6 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
         * clean up the admin queue. Same thing as above.
         */
        nvme_quiesce_admin_queue(&ctrl->ctrl);
-
-       /*
-        * Open-coding nvme_cancel_admin_tagset() as fc
-        * is not using nvme_cancel_request().
-        */
-       nvme_stop_keep_alive(&ctrl->ctrl);
        blk_sync_queue(ctrl->ctrl.admin_q);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -3138,11 +3132,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        nvme_unquiesce_admin_queue(&ctrl->ctrl);
 
        ret = nvme_init_ctrl_finish(&ctrl->ctrl, false);
-       if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
-               ret = -EIO;
        if (ret)
                goto out_disconnect_admin_queue;
-
+       if (test_bit(ASSOC_FAILED, &ctrl->flags)) {
+               ret = -EIO;
+               goto out_stop_keep_alive;
+       }
        /* sanity checks */
 
        /* FC-NVME does not have other data in the capsule */
@@ -3150,7 +3145,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
                dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
                                ctrl->ctrl.icdoff);
                ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
-               goto out_disconnect_admin_queue;
+               goto out_stop_keep_alive;
        }
 
        /* FC-NVME supports normal SGL Data Block Descriptors */
@@ -3158,7 +3153,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
                dev_err(ctrl->ctrl.device,
                        "Mandatory sgls are not supported!\n");
                ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
-               goto out_disconnect_admin_queue;
+               goto out_stop_keep_alive;
        }
 
        if (opts->queue_size > ctrl->ctrl.maxcmd) {
@@ -3205,6 +3200,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 
 out_term_aen_ops:
        nvme_fc_term_aen_ops(ctrl);
+out_stop_keep_alive:
+       nvme_stop_keep_alive(&ctrl->ctrl);
 out_disconnect_admin_queue:
        dev_warn(ctrl->ctrl.device,
                "NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n",
index a7fea4cbacd753cfdf59b2a71c1420c7499b561e..6d178d5559204dc522bd5513663032134da2a410 100644 (file)
@@ -1080,6 +1080,7 @@ destroy_io:
                nvme_rdma_free_io_queues(ctrl);
        }
 destroy_admin:
+       nvme_stop_keep_alive(&ctrl->ctrl);
        nvme_quiesce_admin_queue(&ctrl->ctrl);
        blk_sync_queue(ctrl->ctrl.admin_q);
        nvme_rdma_stop_queue(&ctrl->queues[0]);
index 89661a9cf850d493d0ff6e69b60a5525154cfbc4..ddcd23fb8b75d1f1b726c3b0c110bae58c1b02de 100644 (file)
@@ -1916,7 +1916,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
        int ret;
        key_serial_t pskid = 0;
 
-       if (ctrl->opts->tls) {
+       if (IS_ENABLED(CONFIG_NVME_TCP_TLS) && ctrl->opts->tls) {
                if (ctrl->opts->tls_key)
                        pskid = key_serial(ctrl->opts->tls_key);
                else
@@ -2237,6 +2237,7 @@ destroy_io:
                nvme_tcp_destroy_io_queues(ctrl, new);
        }
 destroy_admin:
+       nvme_stop_keep_alive(ctrl);
        nvme_tcp_teardown_admin_queue(ctrl, false);
        return ret;
 }
index 43b5bd8bb6a52dc807a62cc29b2d17eb194c0ed9..d8da840a1c0ed1e9c383d59c11227f7fddfe607d 100644 (file)
@@ -244,6 +244,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
                goto out;
        }
 
+       d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+       d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
        status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
                                  le32_to_cpu(c->kato), &ctrl);
        if (status)
@@ -313,6 +315,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
                goto out;
        }
 
+       d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+       d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
        ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
                                   le16_to_cpu(d->cntlid), req);
        if (!ctrl) {
index 92b74d0b8686a673c7d049d52d49971f77e7be9f..4cc27856aa8fefc53d2a77044ea3a3ef927c8ba5 100644 (file)
@@ -1854,6 +1854,8 @@ static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
        }
        return ret;
 }
+#else
+static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {}
 #endif
 
 static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
@@ -1911,9 +1913,9 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
        list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
        mutex_unlock(&nvmet_tcp_queue_mutex);
 
-#ifdef CONFIG_NVME_TARGET_TCP_TLS
        INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
                          nvmet_tcp_tls_handshake_timeout);
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
        if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
                struct sock *sk = queue->sock->sk;