Merge tag 'hid-for-linus-2024020101' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / nvme / target / tcp.c
index 4cc27856aa8fefc53d2a77044ea3a3ef927c8ba5..6a1e6bb80062d4753501e07cbcba43870fc00eeb 100644 (file)
@@ -24,6 +24,8 @@
 #include "nvmet.h"
 
 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
+#define NVMET_TCP_MAXH2CDATA           0x400000 /* 16M arbitrary limit */
+#define NVMET_TCP_BACKLOG 128
 
 static int param_store_val(const char *str, int *val, int min, int max)
 {
@@ -923,7 +925,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
        icresp->hdr.pdo = 0;
        icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
        icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
-       icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
+       icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
        icresp->cpda = 0;
        if (queue->hdr_digest)
                icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
@@ -978,13 +980,13 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
 {
        struct nvme_tcp_data_pdu *data = &queue->pdu.data;
        struct nvmet_tcp_cmd *cmd;
+       unsigned int exp_data_len;
 
        if (likely(queue->nr_cmds)) {
                if (unlikely(data->ttag >= queue->nr_cmds)) {
                        pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
                                queue->idx, data->ttag, queue->nr_cmds);
-                       nvmet_tcp_fatal_error(queue);
-                       return -EPROTO;
+                       goto err_proto;
                }
                cmd = &queue->cmds[data->ttag];
        } else {
@@ -995,19 +997,32 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
                pr_err("ttag %u unexpected data offset %u (expected %u)\n",
                        data->ttag, le32_to_cpu(data->data_offset),
                        cmd->rbytes_done);
-               /* FIXME: use path and transport errors */
-               nvmet_req_complete(&cmd->req,
-                       NVME_SC_INVALID_FIELD | NVME_SC_DNR);
-               return -EPROTO;
+               goto err_proto;
        }
 
+       exp_data_len = le32_to_cpu(data->hdr.plen) -
+                       nvmet_tcp_hdgst_len(queue) -
+                       nvmet_tcp_ddgst_len(queue) -
+                       sizeof(*data);
+
        cmd->pdu_len = le32_to_cpu(data->data_length);
+       if (unlikely(cmd->pdu_len != exp_data_len ||
+                    cmd->pdu_len == 0 ||
+                    cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
+               pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
+               goto err_proto;
+       }
        cmd->pdu_recv = 0;
        nvmet_tcp_build_pdu_iovec(cmd);
        queue->cmd = cmd;
        queue->rcv_state = NVMET_TCP_RECV_DATA;
 
        return 0;
+
+err_proto:
+       /* FIXME: use proper transport errors */
+       nvmet_tcp_fatal_error(queue);
+       return -EPROTO;
 }
 
 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
@@ -1768,7 +1783,7 @@ static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
                 (int)sizeof(struct nvme_tcp_icreq_pdu));
        if (hdr->type == nvme_tcp_icreq &&
            hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
-           hdr->plen == (__le32)sizeof(struct nvme_tcp_icreq_pdu)) {
+           hdr->plen == cpu_to_le32(sizeof(struct nvme_tcp_icreq_pdu))) {
                pr_debug("queue %d: icreq detected\n",
                         queue->idx);
                return len;
@@ -2053,7 +2068,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
                goto err_sock;
        }
 
-       ret = kernel_listen(port->sock, 128);
+       ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG);
        if (ret) {
                pr_err("failed to listen %d on port sock\n", ret);
                goto err_sock;
@@ -2119,8 +2134,19 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
                container_of(sq, struct nvmet_tcp_queue, nvme_sq);
 
        if (sq->qid == 0) {
-               /* Let inflight controller teardown complete */
-               flush_workqueue(nvmet_wq);
+               struct nvmet_tcp_queue *q;
+               int pending = 0;
+
+               /* Check for pending controller teardown */
+               mutex_lock(&nvmet_tcp_queue_mutex);
+               list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) {
+                       if (q->nvme_sq.ctrl == sq->ctrl &&
+                           q->state == NVMET_TCP_Q_DISCONNECTING)
+                               pending++;
+               }
+               mutex_unlock(&nvmet_tcp_queue_mutex);
+               if (pending > NVMET_TCP_BACKLOG)
+                       return NVME_SC_CONNECT_CTRL_BUSY;
        }
 
        queue->nr_cmds = sq->size * 2;