1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/key.h>
12 #include <linux/nvme-tcp.h>
13 #include <linux/nvme-keyring.h>
17 #include <net/tls_prot.h>
18 #include <net/handshake.h>
19 #include <linux/inet.h>
20 #include <linux/llist.h>
21 #include <crypto/hash.h>
22 #include <trace/events/sock.h>
26 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
28 static int param_store_val(const char *str, int *val, int min, int max)
32 ret = kstrtoint(str, 10, &new_val);
36 if (new_val < min || new_val > max)
43 static int set_params(const char *str, const struct kernel_param *kp)
45 return param_store_val(str, kp->arg, 0, INT_MAX);
48 static const struct kernel_param_ops set_param_ops = {
53 /* Define the socket priority to use for connections were it is desirable
54 * that the NIC consider performing optimized packet processing or filtering.
55 * A non-zero value being sufficient to indicate general consideration of any
56 * possible optimization. Making it a module param allows for alternative
57 * values that may be unique for some NIC implementations.
59 static int so_priority;
60 device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
61 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
63 /* Define a time period (in usecs) that io_work() shall sample an activated
64 * queue before determining it to be idle. This optional module behavior
65 * can enable NIC solutions that support socket optimized packet processing
66 * using advanced interrupt moderation techniques.
68 static int idle_poll_period_usecs;
69 device_param_cb(idle_poll_period_usecs, &set_param_ops,
70 &idle_poll_period_usecs, 0644);
71 MODULE_PARM_DESC(idle_poll_period_usecs,
72 "nvmet tcp io_work poll till idle time period in usecs: Default 0");
74 #ifdef CONFIG_NVME_TARGET_TCP_TLS
76 * TLS handshake timeout
78 static int tls_handshake_timeout = 10;
79 module_param(tls_handshake_timeout, int, 0644);
80 MODULE_PARM_DESC(tls_handshake_timeout,
81 "nvme TLS handshake timeout in seconds (default 10)");
84 #define NVMET_TCP_RECV_BUDGET 8
85 #define NVMET_TCP_SEND_BUDGET 8
86 #define NVMET_TCP_IO_WORK_BUDGET 64
88 enum nvmet_tcp_send_state {
89 NVMET_TCP_SEND_DATA_PDU,
93 NVMET_TCP_SEND_RESPONSE
96 enum nvmet_tcp_recv_state {
104 NVMET_TCP_F_INIT_FAILED = (1 << 0),
107 struct nvmet_tcp_cmd {
108 struct nvmet_tcp_queue *queue;
109 struct nvmet_req req;
111 struct nvme_tcp_cmd_pdu *cmd_pdu;
112 struct nvme_tcp_rsp_pdu *rsp_pdu;
113 struct nvme_tcp_data_pdu *data_pdu;
114 struct nvme_tcp_r2t_pdu *r2t_pdu;
122 char recv_cbuf[CMSG_LEN(sizeof(char))];
123 struct msghdr recv_msg;
127 struct list_head entry;
128 struct llist_node lentry;
132 struct scatterlist *cur_sg;
133 enum nvmet_tcp_send_state state;
139 enum nvmet_tcp_queue_state {
140 NVMET_TCP_Q_CONNECTING,
141 NVMET_TCP_Q_TLS_HANDSHAKE,
143 NVMET_TCP_Q_DISCONNECTING,
147 struct nvmet_tcp_queue {
149 struct nvmet_tcp_port *port;
150 struct work_struct io_work;
151 struct nvmet_cq nvme_cq;
152 struct nvmet_sq nvme_sq;
156 struct nvmet_tcp_cmd *cmds;
157 unsigned int nr_cmds;
158 struct list_head free_list;
159 struct llist_head resp_list;
160 struct list_head resp_send_list;
162 struct nvmet_tcp_cmd *snd_cmd;
167 enum nvmet_tcp_recv_state rcv_state;
168 struct nvmet_tcp_cmd *cmd;
169 union nvme_tcp_pdu pdu;
174 struct ahash_request *snd_hash;
175 struct ahash_request *rcv_hash;
178 key_serial_t tls_pskid;
179 struct delayed_work tls_handshake_tmo_work;
181 unsigned long poll_end;
183 spinlock_t state_lock;
184 enum nvmet_tcp_queue_state state;
186 struct sockaddr_storage sockaddr;
187 struct sockaddr_storage sockaddr_peer;
188 struct work_struct release_work;
191 struct list_head queue_list;
193 struct nvmet_tcp_cmd connect;
195 struct page_frag_cache pf_cache;
197 void (*data_ready)(struct sock *);
198 void (*state_change)(struct sock *);
199 void (*write_space)(struct sock *);
202 struct nvmet_tcp_port {
204 struct work_struct accept_work;
205 struct nvmet_port *nport;
206 struct sockaddr_storage addr;
207 void (*data_ready)(struct sock *);
210 static DEFINE_IDA(nvmet_tcp_queue_ida);
211 static LIST_HEAD(nvmet_tcp_queue_list);
212 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
214 static struct workqueue_struct *nvmet_tcp_wq;
215 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
216 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
217 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
219 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
220 struct nvmet_tcp_cmd *cmd)
222 if (unlikely(!queue->nr_cmds)) {
223 /* We didn't allocate cmds yet, send 0xffff */
227 return cmd - queue->cmds;
230 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
232 return nvme_is_write(cmd->req.cmd) &&
233 cmd->rbytes_done < cmd->req.transfer_len;
236 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
238 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
241 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
243 return !nvme_is_write(cmd->req.cmd) &&
244 cmd->req.transfer_len > 0 &&
245 !cmd->req.cqe->status;
248 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
250 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
254 static inline struct nvmet_tcp_cmd *
255 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
257 struct nvmet_tcp_cmd *cmd;
259 cmd = list_first_entry_or_null(&queue->free_list,
260 struct nvmet_tcp_cmd, entry);
263 list_del_init(&cmd->entry);
265 cmd->rbytes_done = cmd->wbytes_done = 0;
273 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
275 if (unlikely(cmd == &cmd->queue->connect))
278 list_add_tail(&cmd->entry, &cmd->queue->free_list);
281 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
283 return queue->sock->sk->sk_incoming_cpu;
286 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
288 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
291 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
293 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
296 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
297 void *pdu, size_t len)
299 struct scatterlist sg;
301 sg_init_one(&sg, pdu, len);
302 ahash_request_set_crypt(hash, &sg, pdu + len, len);
303 crypto_ahash_digest(hash);
306 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
307 void *pdu, size_t len)
309 struct nvme_tcp_hdr *hdr = pdu;
313 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
314 pr_err("queue %d: header digest enabled but no header digest\n",
319 recv_digest = *(__le32 *)(pdu + hdr->hlen);
320 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
321 exp_digest = *(__le32 *)(pdu + hdr->hlen);
322 if (recv_digest != exp_digest) {
323 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
324 queue->idx, le32_to_cpu(recv_digest),
325 le32_to_cpu(exp_digest));
332 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
334 struct nvme_tcp_hdr *hdr = pdu;
335 u8 digest_len = nvmet_tcp_hdgst_len(queue);
338 len = le32_to_cpu(hdr->plen) - hdr->hlen -
339 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
341 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
342 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
349 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
352 sgl_free(cmd->req.sg);
357 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
359 struct bio_vec *iov = cmd->iov;
360 struct scatterlist *sg;
361 u32 length, offset, sg_offset;
364 length = cmd->pdu_len;
365 nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
366 offset = cmd->rbytes_done;
367 cmd->sg_idx = offset / PAGE_SIZE;
368 sg_offset = offset % PAGE_SIZE;
369 sg = &cmd->req.sg[cmd->sg_idx];
372 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
374 bvec_set_page(iov, sg_page(sg), iov_len,
375 sg->offset + sg_offset);
383 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
384 nr_pages, cmd->pdu_len);
387 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
389 queue->rcv_state = NVMET_TCP_RECV_ERR;
390 if (queue->nvme_sq.ctrl)
391 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
393 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
396 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
398 queue->rcv_state = NVMET_TCP_RECV_ERR;
399 if (status == -EPIPE || status == -ECONNRESET)
400 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
402 nvmet_tcp_fatal_error(queue);
405 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
407 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
408 u32 len = le32_to_cpu(sgl->length);
413 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
414 NVME_SGL_FMT_OFFSET)) {
415 if (!nvme_is_write(cmd->req.cmd))
416 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
418 if (len > cmd->req.port->inline_data_size)
419 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
422 cmd->req.transfer_len += len;
424 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
426 return NVME_SC_INTERNAL;
427 cmd->cur_sg = cmd->req.sg;
429 if (nvmet_tcp_has_data_in(cmd)) {
430 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
431 sizeof(*cmd->iov), GFP_KERNEL);
438 nvmet_tcp_free_cmd_buffers(cmd);
439 return NVME_SC_INTERNAL;
442 static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
443 struct nvmet_tcp_cmd *cmd)
445 ahash_request_set_crypt(hash, cmd->req.sg,
446 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
447 crypto_ahash_digest(hash);
450 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
452 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
453 struct nvmet_tcp_queue *queue = cmd->queue;
454 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
455 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
458 cmd->state = NVMET_TCP_SEND_DATA_PDU;
460 pdu->hdr.type = nvme_tcp_c2h_data;
461 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
462 NVME_TCP_F_DATA_SUCCESS : 0);
463 pdu->hdr.hlen = sizeof(*pdu);
464 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
466 cpu_to_le32(pdu->hdr.hlen + hdgst +
467 cmd->req.transfer_len + ddgst);
468 pdu->command_id = cmd->req.cqe->command_id;
469 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
470 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
472 if (queue->data_digest) {
473 pdu->hdr.flags |= NVME_TCP_F_DDGST;
474 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
477 if (cmd->queue->hdr_digest) {
478 pdu->hdr.flags |= NVME_TCP_F_HDGST;
479 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
483 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
485 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
486 struct nvmet_tcp_queue *queue = cmd->queue;
487 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
490 cmd->state = NVMET_TCP_SEND_R2T;
492 pdu->hdr.type = nvme_tcp_r2t;
494 pdu->hdr.hlen = sizeof(*pdu);
496 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
498 pdu->command_id = cmd->req.cmd->common.command_id;
499 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
500 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
501 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
502 if (cmd->queue->hdr_digest) {
503 pdu->hdr.flags |= NVME_TCP_F_HDGST;
504 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
508 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
510 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
511 struct nvmet_tcp_queue *queue = cmd->queue;
512 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
515 cmd->state = NVMET_TCP_SEND_RESPONSE;
517 pdu->hdr.type = nvme_tcp_rsp;
519 pdu->hdr.hlen = sizeof(*pdu);
521 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
522 if (cmd->queue->hdr_digest) {
523 pdu->hdr.flags |= NVME_TCP_F_HDGST;
524 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
528 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
530 struct llist_node *node;
531 struct nvmet_tcp_cmd *cmd;
533 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
534 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
535 list_add(&cmd->entry, &queue->resp_send_list);
536 queue->send_list_len++;
540 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
542 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
543 struct nvmet_tcp_cmd, entry);
544 if (!queue->snd_cmd) {
545 nvmet_tcp_process_resp_list(queue);
547 list_first_entry_or_null(&queue->resp_send_list,
548 struct nvmet_tcp_cmd, entry);
549 if (unlikely(!queue->snd_cmd))
553 list_del_init(&queue->snd_cmd->entry);
554 queue->send_list_len--;
556 if (nvmet_tcp_need_data_out(queue->snd_cmd))
557 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
558 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
559 nvmet_setup_r2t_pdu(queue->snd_cmd);
561 nvmet_setup_response_pdu(queue->snd_cmd);
563 return queue->snd_cmd;
566 static void nvmet_tcp_queue_response(struct nvmet_req *req)
568 struct nvmet_tcp_cmd *cmd =
569 container_of(req, struct nvmet_tcp_cmd, req);
570 struct nvmet_tcp_queue *queue = cmd->queue;
571 struct nvme_sgl_desc *sgl;
574 if (unlikely(cmd == queue->cmd)) {
575 sgl = &cmd->req.cmd->common.dptr.sgl;
576 len = le32_to_cpu(sgl->length);
579 * Wait for inline data before processing the response.
580 * Avoid using helpers, this might happen before
581 * nvmet_req_init is completed.
583 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
584 len && len <= cmd->req.port->inline_data_size &&
585 nvme_is_write(cmd->req.cmd))
589 llist_add(&cmd->lentry, &queue->resp_list);
590 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
593 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
595 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
596 nvmet_tcp_queue_response(&cmd->req);
598 cmd->req.execute(&cmd->req);
601 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
603 struct msghdr msg = {
604 .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
607 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
608 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
611 bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
612 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
613 ret = sock_sendmsg(cmd->queue->sock, &msg);
623 cmd->state = NVMET_TCP_SEND_DATA;
628 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
630 struct nvmet_tcp_queue *queue = cmd->queue;
633 while (cmd->cur_sg) {
634 struct msghdr msg = {
635 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
637 struct page *page = sg_page(cmd->cur_sg);
639 u32 left = cmd->cur_sg->length - cmd->offset;
641 if ((!last_in_batch && cmd->queue->send_list_len) ||
642 cmd->wbytes_done + left < cmd->req.transfer_len ||
643 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
644 msg.msg_flags |= MSG_MORE;
646 bvec_set_page(&bvec, page, left, cmd->offset);
647 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
648 ret = sock_sendmsg(cmd->queue->sock, &msg);
653 cmd->wbytes_done += ret;
656 if (cmd->offset == cmd->cur_sg->length) {
657 cmd->cur_sg = sg_next(cmd->cur_sg);
662 if (queue->data_digest) {
663 cmd->state = NVMET_TCP_SEND_DDGST;
666 if (queue->nvme_sq.sqhd_disabled) {
667 cmd->queue->snd_cmd = NULL;
668 nvmet_tcp_put_cmd(cmd);
670 nvmet_setup_response_pdu(cmd);
674 if (queue->nvme_sq.sqhd_disabled)
675 nvmet_tcp_free_cmd_buffers(cmd);
681 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
684 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
686 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
687 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
690 if (!last_in_batch && cmd->queue->send_list_len)
691 msg.msg_flags |= MSG_MORE;
693 msg.msg_flags |= MSG_EOR;
695 bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
696 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
697 ret = sock_sendmsg(cmd->queue->sock, &msg);
706 nvmet_tcp_free_cmd_buffers(cmd);
707 cmd->queue->snd_cmd = NULL;
708 nvmet_tcp_put_cmd(cmd);
712 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
714 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
716 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
717 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
720 if (!last_in_batch && cmd->queue->send_list_len)
721 msg.msg_flags |= MSG_MORE;
723 msg.msg_flags |= MSG_EOR;
725 bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
726 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
727 ret = sock_sendmsg(cmd->queue->sock, &msg);
736 cmd->queue->snd_cmd = NULL;
740 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
742 struct nvmet_tcp_queue *queue = cmd->queue;
743 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
744 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
746 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
751 if (!last_in_batch && cmd->queue->send_list_len)
752 msg.msg_flags |= MSG_MORE;
754 msg.msg_flags |= MSG_EOR;
756 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
757 if (unlikely(ret <= 0))
766 if (queue->nvme_sq.sqhd_disabled) {
767 cmd->queue->snd_cmd = NULL;
768 nvmet_tcp_put_cmd(cmd);
770 nvmet_setup_response_pdu(cmd);
775 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
778 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
781 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
782 cmd = nvmet_tcp_fetch_cmd(queue);
787 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
788 ret = nvmet_try_send_data_pdu(cmd);
793 if (cmd->state == NVMET_TCP_SEND_DATA) {
794 ret = nvmet_try_send_data(cmd, last_in_batch);
799 if (cmd->state == NVMET_TCP_SEND_DDGST) {
800 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
805 if (cmd->state == NVMET_TCP_SEND_R2T) {
806 ret = nvmet_try_send_r2t(cmd, last_in_batch);
811 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
812 ret = nvmet_try_send_response(cmd, last_in_batch);
824 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
825 int budget, int *sends)
829 for (i = 0; i < budget; i++) {
830 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
831 if (unlikely(ret < 0)) {
832 nvmet_tcp_socket_error(queue, ret);
834 } else if (ret == 0) {
843 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
846 queue->left = sizeof(struct nvme_tcp_hdr);
848 queue->rcv_state = NVMET_TCP_RECV_PDU;
851 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
853 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
855 ahash_request_free(queue->rcv_hash);
856 ahash_request_free(queue->snd_hash);
857 crypto_free_ahash(tfm);
860 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
862 struct crypto_ahash *tfm;
864 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
868 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
869 if (!queue->snd_hash)
871 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
873 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
874 if (!queue->rcv_hash)
876 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
880 ahash_request_free(queue->snd_hash);
882 crypto_free_ahash(tfm);
887 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
889 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
890 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
891 struct msghdr msg = {};
895 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
896 pr_err("bad nvme-tcp pdu length (%d)\n",
897 le32_to_cpu(icreq->hdr.plen));
898 nvmet_tcp_fatal_error(queue);
901 if (icreq->pfv != NVME_TCP_PFV_1_0) {
902 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
906 if (icreq->hpda != 0) {
907 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
912 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
913 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
914 if (queue->hdr_digest || queue->data_digest) {
915 ret = nvmet_tcp_alloc_crypto(queue);
920 memset(icresp, 0, sizeof(*icresp));
921 icresp->hdr.type = nvme_tcp_icresp;
922 icresp->hdr.hlen = sizeof(*icresp);
924 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
925 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
926 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
928 if (queue->hdr_digest)
929 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
930 if (queue->data_digest)
931 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
933 iov.iov_base = icresp;
934 iov.iov_len = sizeof(*icresp);
935 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
937 queue->state = NVMET_TCP_Q_FAILED;
938 return ret; /* queue removal will cleanup */
941 queue->state = NVMET_TCP_Q_LIVE;
942 nvmet_prepare_receive_pdu(queue);
946 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
947 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
949 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
953 * This command has not been processed yet, hence we are trying to
954 * figure out if there is still pending data left to receive. If
955 * we don't, we can simply prepare for the next pdu and bail out,
956 * otherwise we will need to prepare a buffer and receive the
957 * stale data before continuing forward.
959 if (!nvme_is_write(cmd->req.cmd) || !data_len ||
960 data_len > cmd->req.port->inline_data_size) {
961 nvmet_prepare_receive_pdu(queue);
965 ret = nvmet_tcp_map_data(cmd);
967 pr_err("queue %d: failed to map data\n", queue->idx);
968 nvmet_tcp_fatal_error(queue);
972 queue->rcv_state = NVMET_TCP_RECV_DATA;
973 nvmet_tcp_build_pdu_iovec(cmd);
974 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
977 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
979 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
980 struct nvmet_tcp_cmd *cmd;
982 if (likely(queue->nr_cmds)) {
983 if (unlikely(data->ttag >= queue->nr_cmds)) {
984 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
985 queue->idx, data->ttag, queue->nr_cmds);
986 nvmet_tcp_fatal_error(queue);
989 cmd = &queue->cmds[data->ttag];
991 cmd = &queue->connect;
994 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
995 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
996 data->ttag, le32_to_cpu(data->data_offset),
998 /* FIXME: use path and transport errors */
999 nvmet_req_complete(&cmd->req,
1000 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
1004 cmd->pdu_len = le32_to_cpu(data->data_length);
1006 nvmet_tcp_build_pdu_iovec(cmd);
1008 queue->rcv_state = NVMET_TCP_RECV_DATA;
1013 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
1015 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1016 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
1017 struct nvmet_req *req;
1020 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1021 if (hdr->type != nvme_tcp_icreq) {
1022 pr_err("unexpected pdu type (%d) before icreq\n",
1024 nvmet_tcp_fatal_error(queue);
1027 return nvmet_tcp_handle_icreq(queue);
1030 if (unlikely(hdr->type == nvme_tcp_icreq)) {
1031 pr_err("queue %d: received icreq pdu in state %d\n",
1032 queue->idx, queue->state);
1033 nvmet_tcp_fatal_error(queue);
1037 if (hdr->type == nvme_tcp_h2c_data) {
1038 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1044 queue->cmd = nvmet_tcp_get_cmd(queue);
1045 if (unlikely(!queue->cmd)) {
1046 /* This should never happen */
1047 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1048 queue->idx, queue->nr_cmds, queue->send_list_len,
1049 nvme_cmd->common.opcode);
1050 nvmet_tcp_fatal_error(queue);
1054 req = &queue->cmd->req;
1055 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1057 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1058 &queue->nvme_sq, &nvmet_tcp_ops))) {
1059 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1060 req->cmd, req->cmd->common.command_id,
1061 req->cmd->common.opcode,
1062 le32_to_cpu(req->cmd->common.dptr.sgl.length));
1064 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1068 ret = nvmet_tcp_map_data(queue->cmd);
1069 if (unlikely(ret)) {
1070 pr_err("queue %d: failed to map data\n", queue->idx);
1071 if (nvmet_tcp_has_inline_data(queue->cmd))
1072 nvmet_tcp_fatal_error(queue);
1074 nvmet_req_complete(req, ret);
1079 if (nvmet_tcp_need_data_in(queue->cmd)) {
1080 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1081 queue->rcv_state = NVMET_TCP_RECV_DATA;
1082 nvmet_tcp_build_pdu_iovec(queue->cmd);
1086 nvmet_tcp_queue_response(&queue->cmd->req);
1090 queue->cmd->req.execute(&queue->cmd->req);
1092 nvmet_prepare_receive_pdu(queue);
1096 static const u8 nvme_tcp_pdu_sizes[] = {
1097 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
1098 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
1099 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
1102 static inline u8 nvmet_tcp_pdu_size(u8 type)
1106 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1107 nvme_tcp_pdu_sizes[idx]) ?
1108 nvme_tcp_pdu_sizes[idx] : 0;
1111 static inline bool nvmet_tcp_pdu_valid(u8 type)
1114 case nvme_tcp_icreq:
1116 case nvme_tcp_h2c_data:
1124 static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
1125 struct msghdr *msg, char *cbuf)
1127 struct cmsghdr *cmsg = (struct cmsghdr *)cbuf;
1128 u8 ctype, level, description;
1131 ctype = tls_get_record_type(queue->sock->sk, cmsg);
1135 case TLS_RECORD_TYPE_DATA:
1137 case TLS_RECORD_TYPE_ALERT:
1138 tls_alert_recv(queue->sock->sk, msg, &level, &description);
1139 if (level == TLS_ALERT_LEVEL_FATAL) {
1140 pr_err("queue %d: TLS Alert desc %u\n",
1141 queue->idx, description);
1144 pr_warn("queue %d: TLS Alert desc %u\n",
1145 queue->idx, description);
1150 /* discard this record type */
1151 pr_err("queue %d: TLS record %d unhandled\n",
1159 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1161 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1164 char cbuf[CMSG_LEN(sizeof(char))] = {};
1165 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1168 iov.iov_base = (void *)&queue->pdu + queue->offset;
1169 iov.iov_len = queue->left;
1170 if (queue->tls_pskid) {
1171 msg.msg_control = cbuf;
1172 msg.msg_controllen = sizeof(cbuf);
1174 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1175 iov.iov_len, msg.msg_flags);
1176 if (unlikely(len < 0))
1178 if (queue->tls_pskid) {
1179 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1184 queue->offset += len;
1189 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1190 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1192 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1193 pr_err("unexpected pdu type %d\n", hdr->type);
1194 nvmet_tcp_fatal_error(queue);
1198 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1199 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1203 queue->left = hdr->hlen - queue->offset + hdgst;
1207 if (queue->hdr_digest &&
1208 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1209 nvmet_tcp_fatal_error(queue); /* fatal */
1213 if (queue->data_digest &&
1214 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1215 nvmet_tcp_fatal_error(queue); /* fatal */
1219 return nvmet_tcp_done_recv_pdu(queue);
1222 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1224 struct nvmet_tcp_queue *queue = cmd->queue;
1226 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
1228 queue->left = NVME_TCP_DIGEST_LENGTH;
1229 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1232 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1234 struct nvmet_tcp_cmd *cmd = queue->cmd;
1237 while (msg_data_left(&cmd->recv_msg)) {
1238 len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1239 cmd->recv_msg.msg_flags);
1242 if (queue->tls_pskid) {
1243 ret = nvmet_tcp_tls_record_ok(cmd->queue,
1244 &cmd->recv_msg, cmd->recv_cbuf);
1249 cmd->pdu_recv += len;
1250 cmd->rbytes_done += len;
1253 if (queue->data_digest) {
1254 nvmet_tcp_prep_recv_ddgst(cmd);
1258 if (cmd->rbytes_done == cmd->req.transfer_len)
1259 nvmet_tcp_execute_request(cmd);
1261 nvmet_prepare_receive_pdu(queue);
1265 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1267 struct nvmet_tcp_cmd *cmd = queue->cmd;
1269 char cbuf[CMSG_LEN(sizeof(char))] = {};
1270 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1272 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1273 .iov_len = queue->left
1276 if (queue->tls_pskid) {
1277 msg.msg_control = cbuf;
1278 msg.msg_controllen = sizeof(cbuf);
1280 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1281 iov.iov_len, msg.msg_flags);
1282 if (unlikely(len < 0))
1284 if (queue->tls_pskid) {
1285 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1290 queue->offset += len;
1295 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1296 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1297 queue->idx, cmd->req.cmd->common.command_id,
1298 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1299 le32_to_cpu(cmd->exp_ddgst));
1300 nvmet_req_uninit(&cmd->req);
1301 nvmet_tcp_free_cmd_buffers(cmd);
1302 nvmet_tcp_fatal_error(queue);
1307 if (cmd->rbytes_done == cmd->req.transfer_len)
1308 nvmet_tcp_execute_request(cmd);
1312 nvmet_prepare_receive_pdu(queue);
1316 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1320 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1323 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1324 result = nvmet_tcp_try_recv_pdu(queue);
1329 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1330 result = nvmet_tcp_try_recv_data(queue);
1335 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1336 result = nvmet_tcp_try_recv_ddgst(queue);
1343 if (result == -EAGAIN)
1350 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1351 int budget, int *recvs)
1355 for (i = 0; i < budget; i++) {
1356 ret = nvmet_tcp_try_recv_one(queue);
1357 if (unlikely(ret < 0)) {
1358 nvmet_tcp_socket_error(queue, ret);
1360 } else if (ret == 0) {
1369 static void nvmet_tcp_release_queue(struct kref *kref)
1371 struct nvmet_tcp_queue *queue =
1372 container_of(kref, struct nvmet_tcp_queue, kref);
1374 WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING);
1375 queue_work(nvmet_wq, &queue->release_work);
1378 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1380 spin_lock_bh(&queue->state_lock);
1381 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1382 /* Socket closed during handshake */
1383 tls_handshake_cancel(queue->sock->sk);
1385 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1386 queue->state = NVMET_TCP_Q_DISCONNECTING;
1387 kref_put(&queue->kref, nvmet_tcp_release_queue);
1389 spin_unlock_bh(&queue->state_lock);
1392 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1394 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1397 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1400 if (!idle_poll_period_usecs)
1404 nvmet_tcp_arm_queue_deadline(queue);
1406 return !time_after(jiffies, queue->poll_end);
1409 static void nvmet_tcp_io_work(struct work_struct *w)
1411 struct nvmet_tcp_queue *queue =
1412 container_of(w, struct nvmet_tcp_queue, io_work);
1419 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1425 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1431 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1434 * Requeue the worker if idle deadline period is in progress or any
1435 * ops activity was recorded during the do-while loop above.
1437 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1438 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1441 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1442 struct nvmet_tcp_cmd *c)
1444 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1447 c->req.port = queue->port->nport;
1449 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1450 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1453 c->req.cmd = &c->cmd_pdu->cmd;
1455 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1456 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1459 c->req.cqe = &c->rsp_pdu->cqe;
1461 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1462 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1466 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1467 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1471 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1472 c->recv_msg.msg_control = c->recv_cbuf;
1473 c->recv_msg.msg_controllen = sizeof(c->recv_cbuf);
1475 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1477 list_add_tail(&c->entry, &queue->free_list);
1481 page_frag_free(c->data_pdu);
1483 page_frag_free(c->rsp_pdu);
1485 page_frag_free(c->cmd_pdu);
1489 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1491 page_frag_free(c->r2t_pdu);
1492 page_frag_free(c->data_pdu);
1493 page_frag_free(c->rsp_pdu);
1494 page_frag_free(c->cmd_pdu);
1497 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1499 struct nvmet_tcp_cmd *cmds;
1500 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1502 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1506 for (i = 0; i < nr_cmds; i++) {
1507 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1517 nvmet_tcp_free_cmd(cmds + i);
1523 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1525 struct nvmet_tcp_cmd *cmds = queue->cmds;
1528 for (i = 0; i < queue->nr_cmds; i++)
1529 nvmet_tcp_free_cmd(cmds + i);
1531 nvmet_tcp_free_cmd(&queue->connect);
1535 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1537 struct socket *sock = queue->sock;
1539 write_lock_bh(&sock->sk->sk_callback_lock);
1540 sock->sk->sk_data_ready = queue->data_ready;
1541 sock->sk->sk_state_change = queue->state_change;
1542 sock->sk->sk_write_space = queue->write_space;
1543 sock->sk->sk_user_data = NULL;
1544 write_unlock_bh(&sock->sk->sk_callback_lock);
1547 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1549 struct nvmet_tcp_cmd *cmd = queue->cmds;
1552 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1553 if (nvmet_tcp_need_data_in(cmd))
1554 nvmet_req_uninit(&cmd->req);
1557 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1558 /* failed in connect */
1559 nvmet_req_uninit(&queue->connect.req);
1563 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1565 struct nvmet_tcp_cmd *cmd = queue->cmds;
1568 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1569 if (nvmet_tcp_need_data_in(cmd))
1570 nvmet_tcp_free_cmd_buffers(cmd);
1573 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
1574 nvmet_tcp_free_cmd_buffers(&queue->connect);
1577 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1580 struct nvmet_tcp_queue *queue =
1581 container_of(w, struct nvmet_tcp_queue, release_work);
1583 mutex_lock(&nvmet_tcp_queue_mutex);
1584 list_del_init(&queue->queue_list);
1585 mutex_unlock(&nvmet_tcp_queue_mutex);
1587 nvmet_tcp_restore_socket_callbacks(queue);
1588 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
1589 cancel_work_sync(&queue->io_work);
1590 /* stop accepting incoming data */
1591 queue->rcv_state = NVMET_TCP_RECV_ERR;
1593 nvmet_tcp_uninit_data_in_cmds(queue);
1594 nvmet_sq_destroy(&queue->nvme_sq);
1595 cancel_work_sync(&queue->io_work);
1596 nvmet_tcp_free_cmd_data_in_buffers(queue);
1597 /* ->sock will be released by fput() */
1598 fput(queue->sock->file);
1599 nvmet_tcp_free_cmds(queue);
1600 if (queue->hdr_digest || queue->data_digest)
1601 nvmet_tcp_free_crypto(queue);
1602 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1603 page = virt_to_head_page(queue->pf_cache.va);
1604 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1608 static void nvmet_tcp_data_ready(struct sock *sk)
1610 struct nvmet_tcp_queue *queue;
1612 trace_sk_data_ready(sk);
1614 read_lock_bh(&sk->sk_callback_lock);
1615 queue = sk->sk_user_data;
1616 if (likely(queue)) {
1617 if (queue->data_ready)
1618 queue->data_ready(sk);
1619 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)
1620 queue_work_on(queue_cpu(queue), nvmet_tcp_wq,
1623 read_unlock_bh(&sk->sk_callback_lock);
1626 static void nvmet_tcp_write_space(struct sock *sk)
1628 struct nvmet_tcp_queue *queue;
1630 read_lock_bh(&sk->sk_callback_lock);
1631 queue = sk->sk_user_data;
1632 if (unlikely(!queue))
1635 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1636 queue->write_space(sk);
1640 if (sk_stream_is_writeable(sk)) {
1641 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1642 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1645 read_unlock_bh(&sk->sk_callback_lock);
1648 static void nvmet_tcp_state_change(struct sock *sk)
1650 struct nvmet_tcp_queue *queue;
1652 read_lock_bh(&sk->sk_callback_lock);
1653 queue = sk->sk_user_data;
1657 switch (sk->sk_state) {
1662 case TCP_CLOSE_WAIT:
1665 nvmet_tcp_schedule_release_queue(queue);
1668 pr_warn("queue %d unhandled state %d\n",
1669 queue->idx, sk->sk_state);
1672 read_unlock_bh(&sk->sk_callback_lock);
1675 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1677 struct socket *sock = queue->sock;
1678 struct inet_sock *inet = inet_sk(sock->sk);
1681 ret = kernel_getsockname(sock,
1682 (struct sockaddr *)&queue->sockaddr);
1686 ret = kernel_getpeername(sock,
1687 (struct sockaddr *)&queue->sockaddr_peer);
1692 * Cleanup whatever is sitting in the TCP transmit queue on socket
1693 * close. This is done to prevent stale data from being sent should
1694 * the network connection be restored before TCP times out.
1696 sock_no_linger(sock->sk);
1698 if (so_priority > 0)
1699 sock_set_priority(sock->sk, so_priority);
1701 /* Set socket type of service */
1702 if (inet->rcv_tos > 0)
1703 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1706 write_lock_bh(&sock->sk->sk_callback_lock);
1707 if (sock->sk->sk_state != TCP_ESTABLISHED) {
1709 * If the socket is already closing, don't even start
1714 sock->sk->sk_user_data = queue;
1715 queue->data_ready = sock->sk->sk_data_ready;
1716 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1717 queue->state_change = sock->sk->sk_state_change;
1718 sock->sk->sk_state_change = nvmet_tcp_state_change;
1719 queue->write_space = sock->sk->sk_write_space;
1720 sock->sk->sk_write_space = nvmet_tcp_write_space;
1721 if (idle_poll_period_usecs)
1722 nvmet_tcp_arm_queue_deadline(queue);
1723 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1725 write_unlock_bh(&sock->sk->sk_callback_lock);
1730 #ifdef CONFIG_NVME_TARGET_TCP_TLS
1731 static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
1733 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1736 .iov_base = (u8 *)&queue->pdu + queue->offset,
1737 .iov_len = sizeof(struct nvme_tcp_hdr),
1739 char cbuf[CMSG_LEN(sizeof(char))] = {};
1740 struct msghdr msg = {
1741 .msg_control = cbuf,
1742 .msg_controllen = sizeof(cbuf),
1743 .msg_flags = MSG_PEEK,
1746 if (nvmet_port_secure_channel_required(queue->port->nport))
1749 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1750 iov.iov_len, msg.msg_flags);
1751 if (unlikely(len < 0)) {
1752 pr_debug("queue %d: peek error %d\n",
1757 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1761 if (len < sizeof(struct nvme_tcp_hdr)) {
1762 pr_debug("queue %d: short read, %d bytes missing\n",
1763 queue->idx, (int)iov.iov_len - len);
1766 pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n",
1767 queue->idx, hdr->type, hdr->hlen, hdr->plen,
1768 (int)sizeof(struct nvme_tcp_icreq_pdu));
1769 if (hdr->type == nvme_tcp_icreq &&
1770 hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
1771 hdr->plen == (__le32)sizeof(struct nvme_tcp_icreq_pdu)) {
1772 pr_debug("queue %d: icreq detected\n",
1779 static void nvmet_tcp_tls_handshake_done(void *data, int status,
1780 key_serial_t peerid)
1782 struct nvmet_tcp_queue *queue = data;
1784 pr_debug("queue %d: TLS handshake done, key %x, status %d\n",
1785 queue->idx, peerid, status);
1786 spin_lock_bh(&queue->state_lock);
1787 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
1788 spin_unlock_bh(&queue->state_lock);
1792 queue->tls_pskid = peerid;
1793 queue->state = NVMET_TCP_Q_CONNECTING;
1795 queue->state = NVMET_TCP_Q_FAILED;
1796 spin_unlock_bh(&queue->state_lock);
1798 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
1800 nvmet_tcp_schedule_release_queue(queue);
1802 nvmet_tcp_set_queue_sock(queue);
1803 kref_put(&queue->kref, nvmet_tcp_release_queue);
1806 static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w)
1808 struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w),
1809 struct nvmet_tcp_queue, tls_handshake_tmo_work);
1811 pr_warn("queue %d: TLS handshake timeout\n", queue->idx);
1813 * If tls_handshake_cancel() fails we've lost the race with
1814 * nvmet_tcp_tls_handshake_done() */
1815 if (!tls_handshake_cancel(queue->sock->sk))
1817 spin_lock_bh(&queue->state_lock);
1818 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
1819 spin_unlock_bh(&queue->state_lock);
1822 queue->state = NVMET_TCP_Q_FAILED;
1823 spin_unlock_bh(&queue->state_lock);
1824 nvmet_tcp_schedule_release_queue(queue);
1825 kref_put(&queue->kref, nvmet_tcp_release_queue);
1828 static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
1830 int ret = -EOPNOTSUPP;
1831 struct tls_handshake_args args;
1833 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
1834 pr_warn("cannot start TLS in state %d\n", queue->state);
1838 kref_get(&queue->kref);
1839 pr_debug("queue %d: TLS ServerHello\n", queue->idx);
1840 memset(&args, 0, sizeof(args));
1841 args.ta_sock = queue->sock;
1842 args.ta_done = nvmet_tcp_tls_handshake_done;
1843 args.ta_data = queue;
1844 args.ta_keyring = key_serial(queue->port->nport->keyring);
1845 args.ta_timeout_ms = tls_handshake_timeout * 1000;
1847 ret = tls_server_hello_psk(&args, GFP_KERNEL);
1849 kref_put(&queue->kref, nvmet_tcp_release_queue);
1850 pr_err("failed to start TLS, err=%d\n", ret);
1852 queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work,
1853 tls_handshake_timeout * HZ);
1858 static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {}
1861 static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1862 struct socket *newsock)
1864 struct nvmet_tcp_queue *queue;
1865 struct file *sock_file = NULL;
1868 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1874 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1875 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1876 kref_init(&queue->kref);
1877 queue->sock = newsock;
1880 spin_lock_init(&queue->state_lock);
1881 if (queue->port->nport->disc_addr.tsas.tcp.sectype ==
1882 NVMF_TCP_SECTYPE_TLS13)
1883 queue->state = NVMET_TCP_Q_TLS_HANDSHAKE;
1885 queue->state = NVMET_TCP_Q_CONNECTING;
1886 INIT_LIST_HEAD(&queue->free_list);
1887 init_llist_head(&queue->resp_list);
1888 INIT_LIST_HEAD(&queue->resp_send_list);
1890 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
1891 if (IS_ERR(sock_file)) {
1892 ret = PTR_ERR(sock_file);
1893 goto out_free_queue;
1896 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1897 if (queue->idx < 0) {
1902 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1904 goto out_ida_remove;
1906 ret = nvmet_sq_init(&queue->nvme_sq);
1908 goto out_free_connect;
1910 nvmet_prepare_receive_pdu(queue);
1912 mutex_lock(&nvmet_tcp_queue_mutex);
1913 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1914 mutex_unlock(&nvmet_tcp_queue_mutex);
1916 INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
1917 nvmet_tcp_tls_handshake_timeout);
1918 #ifdef CONFIG_NVME_TARGET_TCP_TLS
1919 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1920 struct sock *sk = queue->sock->sk;
1922 /* Restore the default callbacks before starting upcall */
1923 read_lock_bh(&sk->sk_callback_lock);
1924 sk->sk_user_data = NULL;
1925 sk->sk_data_ready = port->data_ready;
1926 read_unlock_bh(&sk->sk_callback_lock);
1927 if (!nvmet_tcp_try_peek_pdu(queue)) {
1928 if (!nvmet_tcp_tls_handshake(queue))
1930 /* TLS handshake failed, terminate the connection */
1931 goto out_destroy_sq;
1933 /* Not a TLS connection, continue with normal processing */
1934 queue->state = NVMET_TCP_Q_CONNECTING;
1938 ret = nvmet_tcp_set_queue_sock(queue);
1940 goto out_destroy_sq;
1944 mutex_lock(&nvmet_tcp_queue_mutex);
1945 list_del_init(&queue->queue_list);
1946 mutex_unlock(&nvmet_tcp_queue_mutex);
1947 nvmet_sq_destroy(&queue->nvme_sq);
1949 nvmet_tcp_free_cmd(&queue->connect);
1951 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1953 fput(queue->sock->file);
1957 pr_err("failed to allocate queue, error %d\n", ret);
1959 sock_release(newsock);
1962 static void nvmet_tcp_accept_work(struct work_struct *w)
1964 struct nvmet_tcp_port *port =
1965 container_of(w, struct nvmet_tcp_port, accept_work);
1966 struct socket *newsock;
1970 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1973 pr_warn("failed to accept err=%d\n", ret);
1976 nvmet_tcp_alloc_queue(port, newsock);
1980 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1982 struct nvmet_tcp_port *port;
1984 trace_sk_data_ready(sk);
1986 read_lock_bh(&sk->sk_callback_lock);
1987 port = sk->sk_user_data;
1991 if (sk->sk_state == TCP_LISTEN)
1992 queue_work(nvmet_wq, &port->accept_work);
1994 read_unlock_bh(&sk->sk_callback_lock);
1997 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1999 struct nvmet_tcp_port *port;
2000 __kernel_sa_family_t af;
2003 port = kzalloc(sizeof(*port), GFP_KERNEL);
2007 switch (nport->disc_addr.adrfam) {
2008 case NVMF_ADDR_FAMILY_IP4:
2011 case NVMF_ADDR_FAMILY_IP6:
2015 pr_err("address family %d not supported\n",
2016 nport->disc_addr.adrfam);
2021 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
2022 nport->disc_addr.trsvcid, &port->addr);
2024 pr_err("malformed ip/port passed: %s:%s\n",
2025 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
2029 port->nport = nport;
2030 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
2031 if (port->nport->inline_data_size < 0)
2032 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
2034 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
2035 IPPROTO_TCP, &port->sock);
2037 pr_err("failed to create a socket\n");
2041 port->sock->sk->sk_user_data = port;
2042 port->data_ready = port->sock->sk->sk_data_ready;
2043 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
2044 sock_set_reuseaddr(port->sock->sk);
2045 tcp_sock_set_nodelay(port->sock->sk);
2046 if (so_priority > 0)
2047 sock_set_priority(port->sock->sk, so_priority);
2049 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
2050 sizeof(port->addr));
2052 pr_err("failed to bind port socket %d\n", ret);
2056 ret = kernel_listen(port->sock, 128);
2058 pr_err("failed to listen %d on port sock\n", ret);
2063 pr_info("enabling port %d (%pISpc)\n",
2064 le16_to_cpu(nport->disc_addr.portid), &port->addr);
2069 sock_release(port->sock);
2075 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
2077 struct nvmet_tcp_queue *queue;
2079 mutex_lock(&nvmet_tcp_queue_mutex);
2080 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2081 if (queue->port == port)
2082 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2083 mutex_unlock(&nvmet_tcp_queue_mutex);
2086 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
2088 struct nvmet_tcp_port *port = nport->priv;
2090 write_lock_bh(&port->sock->sk->sk_callback_lock);
2091 port->sock->sk->sk_data_ready = port->data_ready;
2092 port->sock->sk->sk_user_data = NULL;
2093 write_unlock_bh(&port->sock->sk->sk_callback_lock);
2094 cancel_work_sync(&port->accept_work);
2096 * Destroy the remaining queues, which are not belong to any
2099 nvmet_tcp_destroy_port_queues(port);
2101 sock_release(port->sock);
2105 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
2107 struct nvmet_tcp_queue *queue;
2109 mutex_lock(&nvmet_tcp_queue_mutex);
2110 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2111 if (queue->nvme_sq.ctrl == ctrl)
2112 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2113 mutex_unlock(&nvmet_tcp_queue_mutex);
2116 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
2118 struct nvmet_tcp_queue *queue =
2119 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
2122 /* Let inflight controller teardown complete */
2123 flush_workqueue(nvmet_wq);
2126 queue->nr_cmds = sq->size * 2;
2127 if (nvmet_tcp_alloc_cmds(queue))
2128 return NVME_SC_INTERNAL;
2132 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
2133 struct nvmet_port *nport, char *traddr)
2135 struct nvmet_tcp_port *port = nport->priv;
2137 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
2138 struct nvmet_tcp_cmd *cmd =
2139 container_of(req, struct nvmet_tcp_cmd, req);
2140 struct nvmet_tcp_queue *queue = cmd->queue;
2142 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
2144 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
2148 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
2149 .owner = THIS_MODULE,
2150 .type = NVMF_TRTYPE_TCP,
2152 .add_port = nvmet_tcp_add_port,
2153 .remove_port = nvmet_tcp_remove_port,
2154 .queue_response = nvmet_tcp_queue_response,
2155 .delete_ctrl = nvmet_tcp_delete_ctrl,
2156 .install_queue = nvmet_tcp_install_queue,
2157 .disc_traddr = nvmet_tcp_disc_port_addr,
2160 static int __init nvmet_tcp_init(void)
2164 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
2165 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2169 ret = nvmet_register_transport(&nvmet_tcp_ops);
2175 destroy_workqueue(nvmet_tcp_wq);
2179 static void __exit nvmet_tcp_exit(void)
2181 struct nvmet_tcp_queue *queue;
2183 nvmet_unregister_transport(&nvmet_tcp_ops);
2185 flush_workqueue(nvmet_wq);
2186 mutex_lock(&nvmet_tcp_queue_mutex);
2187 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2188 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2189 mutex_unlock(&nvmet_tcp_queue_mutex);
2190 flush_workqueue(nvmet_wq);
2192 destroy_workqueue(nvmet_tcp_wq);
2195 module_init(nvmet_tcp_init);
2196 module_exit(nvmet_tcp_exit);
2198 MODULE_LICENSE("GPL v2");
2199 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */