1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/key.h>
12 #include <linux/nvme-tcp.h>
13 #include <linux/nvme-keyring.h>
17 #include <net/tls_prot.h>
18 #include <net/handshake.h>
19 #include <linux/blk-mq.h>
20 #include <crypto/hash.h>
21 #include <net/busy_poll.h>
22 #include <trace/events/sock.h>
27 struct nvme_tcp_queue;
29 /* Define the socket priority to use for connections were it is desirable
30 * that the NIC consider performing optimized packet processing or filtering.
31 * A non-zero value being sufficient to indicate general consideration of any
32 * possible optimization. Making it a module param allows for alternative
33 * values that may be unique for some NIC implementations.
35 static int so_priority;
36 module_param(so_priority, int, 0644);
37 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
39 #ifdef CONFIG_NVME_TCP_TLS
41 * TLS handshake timeout
43 static int tls_handshake_timeout = 10;
44 module_param(tls_handshake_timeout, int, 0644);
45 MODULE_PARM_DESC(tls_handshake_timeout,
46 "nvme TLS handshake timeout in seconds (default 10)");
49 #ifdef CONFIG_DEBUG_LOCK_ALLOC
50 /* lockdep can detect a circular dependency of the form
51 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
52 * because dependencies are tracked for both nvme-tcp and user contexts. Using
53 * a separate class prevents lockdep from conflating nvme-tcp socket use with
54 * user-space socket API use.
56 static struct lock_class_key nvme_tcp_sk_key[2];
57 static struct lock_class_key nvme_tcp_slock_key[2];
59 static void nvme_tcp_reclassify_socket(struct socket *sock)
61 struct sock *sk = sock->sk;
63 if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
66 switch (sk->sk_family) {
68 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
69 &nvme_tcp_slock_key[0],
70 "sk_lock-AF_INET-NVME",
74 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
75 &nvme_tcp_slock_key[1],
76 "sk_lock-AF_INET6-NVME",
84 static void nvme_tcp_reclassify_socket(struct socket *sock) { }
87 enum nvme_tcp_send_state {
88 NVME_TCP_SEND_CMD_PDU = 0,
89 NVME_TCP_SEND_H2C_PDU,
94 struct nvme_tcp_request {
95 struct nvme_request req;
97 struct nvme_tcp_queue *queue;
105 struct list_head entry;
106 struct llist_node lentry;
109 struct bio *curr_bio;
110 struct iov_iter iter;
115 enum nvme_tcp_send_state state;
118 enum nvme_tcp_queue_flags {
119 NVME_TCP_Q_ALLOCATED = 0,
121 NVME_TCP_Q_POLLING = 2,
124 enum nvme_tcp_recv_state {
125 NVME_TCP_RECV_PDU = 0,
130 struct nvme_tcp_ctrl;
131 struct nvme_tcp_queue {
133 struct work_struct io_work;
136 struct mutex queue_lock;
137 struct mutex send_mutex;
138 struct llist_head req_list;
139 struct list_head send_list;
145 size_t data_remaining;
146 size_t ddgst_remaining;
150 struct nvme_tcp_request *request;
153 size_t cmnd_capsule_len;
154 struct nvme_tcp_ctrl *ctrl;
160 struct ahash_request *rcv_hash;
161 struct ahash_request *snd_hash;
164 #ifdef CONFIG_NVME_TCP_TLS
165 struct completion tls_complete;
168 struct page_frag_cache pf_cache;
170 void (*state_change)(struct sock *);
171 void (*data_ready)(struct sock *);
172 void (*write_space)(struct sock *);
175 struct nvme_tcp_ctrl {
176 /* read only in the hot path */
177 struct nvme_tcp_queue *queues;
178 struct blk_mq_tag_set tag_set;
180 /* other member variables */
181 struct list_head list;
182 struct blk_mq_tag_set admin_tag_set;
183 struct sockaddr_storage addr;
184 struct sockaddr_storage src_addr;
185 struct nvme_ctrl ctrl;
187 struct work_struct err_work;
188 struct delayed_work connect_work;
189 struct nvme_tcp_request async_req;
190 u32 io_queues[HCTX_MAX_TYPES];
193 static LIST_HEAD(nvme_tcp_ctrl_list);
194 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
195 static struct workqueue_struct *nvme_tcp_wq;
196 static const struct blk_mq_ops nvme_tcp_mq_ops;
197 static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
198 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
200 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
202 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
205 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
207 return queue - queue->ctrl->queues;
210 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
212 u32 queue_idx = nvme_tcp_queue_id(queue);
215 return queue->ctrl->admin_tag_set.tags[queue_idx];
216 return queue->ctrl->tag_set.tags[queue_idx - 1];
219 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
221 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
224 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
226 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
229 static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
234 static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
236 /* use the pdu space in the back for the data pdu */
237 return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
238 sizeof(struct nvme_tcp_data_pdu);
241 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
243 if (nvme_is_fabrics(req->req.cmd))
244 return NVME_TCP_ADMIN_CCSZ;
245 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
248 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
250 return req == &req->queue->ctrl->async_req;
253 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
257 if (unlikely(nvme_tcp_async_req(req)))
258 return false; /* async events don't have a request */
260 rq = blk_mq_rq_from_pdu(req);
262 return rq_data_dir(rq) == WRITE && req->data_len &&
263 req->data_len <= nvme_tcp_inline_data_size(req);
266 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
268 return req->iter.bvec->bv_page;
271 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
273 return req->iter.bvec->bv_offset + req->iter.iov_offset;
276 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
278 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
279 req->pdu_len - req->pdu_sent);
282 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
284 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
285 req->pdu_len - req->pdu_sent : 0;
288 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
291 return nvme_tcp_pdu_data_left(req) <= len;
294 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
297 struct request *rq = blk_mq_rq_from_pdu(req);
303 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
304 vec = &rq->special_vec;
306 size = blk_rq_payload_bytes(rq);
309 struct bio *bio = req->curr_bio;
313 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
315 bio_for_each_bvec(bv, bio, bi) {
318 size = bio->bi_iter.bi_size;
319 offset = bio->bi_iter.bi_bvec_done;
322 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
323 req->iter.iov_offset = offset;
326 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
329 req->data_sent += len;
330 req->pdu_sent += len;
331 iov_iter_advance(&req->iter, len);
332 if (!iov_iter_count(&req->iter) &&
333 req->data_sent < req->data_len) {
334 req->curr_bio = req->curr_bio->bi_next;
335 nvme_tcp_init_iter(req, ITER_SOURCE);
339 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
343 /* drain the send queue as much as we can... */
345 ret = nvme_tcp_try_send(queue);
349 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
351 return !list_empty(&queue->send_list) ||
352 !llist_empty(&queue->req_list);
355 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
356 bool sync, bool last)
358 struct nvme_tcp_queue *queue = req->queue;
361 empty = llist_add(&req->lentry, &queue->req_list) &&
362 list_empty(&queue->send_list) && !queue->request;
365 * if we're the first on the send_list and we can try to send
366 * directly, otherwise queue io_work. Also, only do that if we
367 * are on the same cpu, so we don't introduce contention.
369 if (queue->io_cpu == raw_smp_processor_id() &&
370 sync && empty && mutex_trylock(&queue->send_mutex)) {
371 nvme_tcp_send_all(queue);
372 mutex_unlock(&queue->send_mutex);
375 if (last && nvme_tcp_queue_more(queue))
376 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
379 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
381 struct nvme_tcp_request *req;
382 struct llist_node *node;
384 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
385 req = llist_entry(node, struct nvme_tcp_request, lentry);
386 list_add(&req->entry, &queue->send_list);
390 static inline struct nvme_tcp_request *
391 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
393 struct nvme_tcp_request *req;
395 req = list_first_entry_or_null(&queue->send_list,
396 struct nvme_tcp_request, entry);
398 nvme_tcp_process_req_list(queue);
399 req = list_first_entry_or_null(&queue->send_list,
400 struct nvme_tcp_request, entry);
405 list_del(&req->entry);
409 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
412 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
413 crypto_ahash_final(hash);
416 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
417 struct page *page, off_t off, size_t len)
419 struct scatterlist sg;
421 sg_init_table(&sg, 1);
422 sg_set_page(&sg, page, len, off);
423 ahash_request_set_crypt(hash, &sg, NULL, len);
424 crypto_ahash_update(hash);
427 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
428 void *pdu, size_t len)
430 struct scatterlist sg;
432 sg_init_one(&sg, pdu, len);
433 ahash_request_set_crypt(hash, &sg, pdu + len, len);
434 crypto_ahash_digest(hash);
437 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
438 void *pdu, size_t pdu_len)
440 struct nvme_tcp_hdr *hdr = pdu;
444 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
445 dev_err(queue->ctrl->ctrl.device,
446 "queue %d: header digest flag is cleared\n",
447 nvme_tcp_queue_id(queue));
451 recv_digest = *(__le32 *)(pdu + hdr->hlen);
452 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
453 exp_digest = *(__le32 *)(pdu + hdr->hlen);
454 if (recv_digest != exp_digest) {
455 dev_err(queue->ctrl->ctrl.device,
456 "header digest error: recv %#x expected %#x\n",
457 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
464 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
466 struct nvme_tcp_hdr *hdr = pdu;
467 u8 digest_len = nvme_tcp_hdgst_len(queue);
470 len = le32_to_cpu(hdr->plen) - hdr->hlen -
471 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
473 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
474 dev_err(queue->ctrl->ctrl.device,
475 "queue %d: data digest flag is cleared\n",
476 nvme_tcp_queue_id(queue));
479 crypto_ahash_init(queue->rcv_hash);
484 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
485 struct request *rq, unsigned int hctx_idx)
487 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
489 page_frag_free(req->pdu);
492 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
493 struct request *rq, unsigned int hctx_idx,
494 unsigned int numa_node)
496 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
497 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
498 struct nvme_tcp_cmd_pdu *pdu;
499 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
500 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
501 u8 hdgst = nvme_tcp_hdgst_len(queue);
503 req->pdu = page_frag_alloc(&queue->pf_cache,
504 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
505 GFP_KERNEL | __GFP_ZERO);
511 nvme_req(rq)->ctrl = &ctrl->ctrl;
512 nvme_req(rq)->cmd = &pdu->cmd;
517 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
518 unsigned int hctx_idx)
520 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
521 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
523 hctx->driver_data = queue;
527 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
528 unsigned int hctx_idx)
530 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
531 struct nvme_tcp_queue *queue = &ctrl->queues[0];
533 hctx->driver_data = queue;
537 static enum nvme_tcp_recv_state
538 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
540 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
541 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
545 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
547 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
548 nvme_tcp_hdgst_len(queue);
549 queue->pdu_offset = 0;
550 queue->data_remaining = -1;
551 queue->ddgst_remaining = 0;
554 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
556 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
559 dev_warn(ctrl->device, "starting error recovery\n");
560 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
563 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
564 struct nvme_completion *cqe)
566 struct nvme_tcp_request *req;
569 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
571 dev_err(queue->ctrl->ctrl.device,
572 "got bad cqe.command_id %#x on queue %d\n",
573 cqe->command_id, nvme_tcp_queue_id(queue));
574 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
578 req = blk_mq_rq_to_pdu(rq);
579 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
580 req->status = cqe->status;
582 if (!nvme_try_complete_req(rq, req->status, cqe->result))
583 nvme_complete_rq(rq);
589 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
590 struct nvme_tcp_data_pdu *pdu)
594 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
596 dev_err(queue->ctrl->ctrl.device,
597 "got bad c2hdata.command_id %#x on queue %d\n",
598 pdu->command_id, nvme_tcp_queue_id(queue));
602 if (!blk_rq_payload_bytes(rq)) {
603 dev_err(queue->ctrl->ctrl.device,
604 "queue %d tag %#x unexpected data\n",
605 nvme_tcp_queue_id(queue), rq->tag);
609 queue->data_remaining = le32_to_cpu(pdu->data_length);
611 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
612 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
613 dev_err(queue->ctrl->ctrl.device,
614 "queue %d tag %#x SUCCESS set but not last PDU\n",
615 nvme_tcp_queue_id(queue), rq->tag);
616 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
623 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
624 struct nvme_tcp_rsp_pdu *pdu)
626 struct nvme_completion *cqe = &pdu->cqe;
630 * AEN requests are special as they don't time out and can
631 * survive any kind of queue freeze and often don't respond to
632 * aborts. We don't even bother to allocate a struct request
633 * for them but rather special case them here.
635 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
637 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
640 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
645 static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
647 struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
648 struct nvme_tcp_queue *queue = req->queue;
649 struct request *rq = blk_mq_rq_from_pdu(req);
650 u32 h2cdata_sent = req->pdu_len;
651 u8 hdgst = nvme_tcp_hdgst_len(queue);
652 u8 ddgst = nvme_tcp_ddgst_len(queue);
654 req->state = NVME_TCP_SEND_H2C_PDU;
656 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
658 req->h2cdata_left -= req->pdu_len;
659 req->h2cdata_offset += h2cdata_sent;
661 memset(data, 0, sizeof(*data));
662 data->hdr.type = nvme_tcp_h2c_data;
663 if (!req->h2cdata_left)
664 data->hdr.flags = NVME_TCP_F_DATA_LAST;
665 if (queue->hdr_digest)
666 data->hdr.flags |= NVME_TCP_F_HDGST;
667 if (queue->data_digest)
668 data->hdr.flags |= NVME_TCP_F_DDGST;
669 data->hdr.hlen = sizeof(*data);
670 data->hdr.pdo = data->hdr.hlen + hdgst;
672 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
673 data->ttag = req->ttag;
674 data->command_id = nvme_cid(rq);
675 data->data_offset = cpu_to_le32(req->h2cdata_offset);
676 data->data_length = cpu_to_le32(req->pdu_len);
679 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
680 struct nvme_tcp_r2t_pdu *pdu)
682 struct nvme_tcp_request *req;
684 u32 r2t_length = le32_to_cpu(pdu->r2t_length);
685 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
687 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
689 dev_err(queue->ctrl->ctrl.device,
690 "got bad r2t.command_id %#x on queue %d\n",
691 pdu->command_id, nvme_tcp_queue_id(queue));
694 req = blk_mq_rq_to_pdu(rq);
696 if (unlikely(!r2t_length)) {
697 dev_err(queue->ctrl->ctrl.device,
698 "req %d r2t len is %u, probably a bug...\n",
699 rq->tag, r2t_length);
703 if (unlikely(req->data_sent + r2t_length > req->data_len)) {
704 dev_err(queue->ctrl->ctrl.device,
705 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
706 rq->tag, r2t_length, req->data_len, req->data_sent);
710 if (unlikely(r2t_offset < req->data_sent)) {
711 dev_err(queue->ctrl->ctrl.device,
712 "req %d unexpected r2t offset %u (expected %zu)\n",
713 rq->tag, r2t_offset, req->data_sent);
718 req->h2cdata_left = r2t_length;
719 req->h2cdata_offset = r2t_offset;
720 req->ttag = pdu->ttag;
722 nvme_tcp_setup_h2c_data_pdu(req);
723 nvme_tcp_queue_request(req, false, true);
728 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
729 unsigned int *offset, size_t *len)
731 struct nvme_tcp_hdr *hdr;
732 char *pdu = queue->pdu;
733 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
736 ret = skb_copy_bits(skb, *offset,
737 &pdu[queue->pdu_offset], rcv_len);
741 queue->pdu_remaining -= rcv_len;
742 queue->pdu_offset += rcv_len;
745 if (queue->pdu_remaining)
749 if (queue->hdr_digest) {
750 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
756 if (queue->data_digest) {
757 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
763 case nvme_tcp_c2h_data:
764 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
766 nvme_tcp_init_recv_ctx(queue);
767 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
769 nvme_tcp_init_recv_ctx(queue);
770 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
772 dev_err(queue->ctrl->ctrl.device,
773 "unsupported pdu type (%d)\n", hdr->type);
778 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
780 union nvme_result res = {};
782 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
783 nvme_complete_rq(rq);
786 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
787 unsigned int *offset, size_t *len)
789 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
791 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
792 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
797 recv_len = min_t(size_t, *len, queue->data_remaining);
801 if (!iov_iter_count(&req->iter)) {
802 req->curr_bio = req->curr_bio->bi_next;
805 * If we don`t have any bios it means that controller
806 * sent more data than we requested, hence error
808 if (!req->curr_bio) {
809 dev_err(queue->ctrl->ctrl.device,
810 "queue %d no space in request %#x",
811 nvme_tcp_queue_id(queue), rq->tag);
812 nvme_tcp_init_recv_ctx(queue);
815 nvme_tcp_init_iter(req, ITER_DEST);
818 /* we can read only from what is left in this bio */
819 recv_len = min_t(size_t, recv_len,
820 iov_iter_count(&req->iter));
822 if (queue->data_digest)
823 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
824 &req->iter, recv_len, queue->rcv_hash);
826 ret = skb_copy_datagram_iter(skb, *offset,
827 &req->iter, recv_len);
829 dev_err(queue->ctrl->ctrl.device,
830 "queue %d failed to copy request %#x data",
831 nvme_tcp_queue_id(queue), rq->tag);
837 queue->data_remaining -= recv_len;
840 if (!queue->data_remaining) {
841 if (queue->data_digest) {
842 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
843 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
845 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
846 nvme_tcp_end_request(rq,
847 le16_to_cpu(req->status));
850 nvme_tcp_init_recv_ctx(queue);
857 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
858 struct sk_buff *skb, unsigned int *offset, size_t *len)
860 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
861 char *ddgst = (char *)&queue->recv_ddgst;
862 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
863 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
866 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
870 queue->ddgst_remaining -= recv_len;
873 if (queue->ddgst_remaining)
876 if (queue->recv_ddgst != queue->exp_ddgst) {
877 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
879 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
881 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
883 dev_err(queue->ctrl->ctrl.device,
884 "data digest error: recv %#x expected %#x\n",
885 le32_to_cpu(queue->recv_ddgst),
886 le32_to_cpu(queue->exp_ddgst));
889 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
890 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
892 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
894 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
898 nvme_tcp_init_recv_ctx(queue);
902 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
903 unsigned int offset, size_t len)
905 struct nvme_tcp_queue *queue = desc->arg.data;
906 size_t consumed = len;
909 if (unlikely(!queue->rd_enabled))
913 switch (nvme_tcp_recv_state(queue)) {
914 case NVME_TCP_RECV_PDU:
915 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
917 case NVME_TCP_RECV_DATA:
918 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
920 case NVME_TCP_RECV_DDGST:
921 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
927 dev_err(queue->ctrl->ctrl.device,
928 "receive failed: %d\n", result);
929 queue->rd_enabled = false;
930 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
938 static void nvme_tcp_data_ready(struct sock *sk)
940 struct nvme_tcp_queue *queue;
942 trace_sk_data_ready(sk);
944 read_lock_bh(&sk->sk_callback_lock);
945 queue = sk->sk_user_data;
946 if (likely(queue && queue->rd_enabled) &&
947 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
948 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
949 read_unlock_bh(&sk->sk_callback_lock);
952 static void nvme_tcp_write_space(struct sock *sk)
954 struct nvme_tcp_queue *queue;
956 read_lock_bh(&sk->sk_callback_lock);
957 queue = sk->sk_user_data;
958 if (likely(queue && sk_stream_is_writeable(sk))) {
959 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
960 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
962 read_unlock_bh(&sk->sk_callback_lock);
965 static void nvme_tcp_state_change(struct sock *sk)
967 struct nvme_tcp_queue *queue;
969 read_lock_bh(&sk->sk_callback_lock);
970 queue = sk->sk_user_data;
974 switch (sk->sk_state) {
980 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
983 dev_info(queue->ctrl->ctrl.device,
984 "queue %d socket state %d\n",
985 nvme_tcp_queue_id(queue), sk->sk_state);
988 queue->state_change(sk);
990 read_unlock_bh(&sk->sk_callback_lock);
993 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
995 queue->request = NULL;
998 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
1000 if (nvme_tcp_async_req(req)) {
1001 union nvme_result res = {};
1003 nvme_complete_async_event(&req->queue->ctrl->ctrl,
1004 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
1006 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
1007 NVME_SC_HOST_PATH_ERROR);
1011 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
1013 struct nvme_tcp_queue *queue = req->queue;
1014 int req_data_len = req->data_len;
1015 u32 h2cdata_left = req->h2cdata_left;
1018 struct bio_vec bvec;
1019 struct msghdr msg = {
1020 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
1022 struct page *page = nvme_tcp_req_cur_page(req);
1023 size_t offset = nvme_tcp_req_cur_offset(req);
1024 size_t len = nvme_tcp_req_cur_length(req);
1025 bool last = nvme_tcp_pdu_last_send(req, len);
1026 int req_data_sent = req->data_sent;
1029 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
1030 msg.msg_flags |= MSG_EOR;
1032 msg.msg_flags |= MSG_MORE;
1034 if (!sendpage_ok(page))
1035 msg.msg_flags &= ~MSG_SPLICE_PAGES;
1037 bvec_set_page(&bvec, page, len, offset);
1038 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1039 ret = sock_sendmsg(queue->sock, &msg);
1043 if (queue->data_digest)
1044 nvme_tcp_ddgst_update(queue->snd_hash, page,
1048 * update the request iterator except for the last payload send
1049 * in the request where we don't want to modify it as we may
1050 * compete with the RX path completing the request.
1052 if (req_data_sent + ret < req_data_len)
1053 nvme_tcp_advance_req(req, ret);
1055 /* fully successful last send in current PDU */
1056 if (last && ret == len) {
1057 if (queue->data_digest) {
1058 nvme_tcp_ddgst_final(queue->snd_hash,
1060 req->state = NVME_TCP_SEND_DDGST;
1064 nvme_tcp_setup_h2c_data_pdu(req);
1066 nvme_tcp_done_send_req(queue);
1074 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1076 struct nvme_tcp_queue *queue = req->queue;
1077 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
1078 struct bio_vec bvec;
1079 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
1080 bool inline_data = nvme_tcp_has_inline_data(req);
1081 u8 hdgst = nvme_tcp_hdgst_len(queue);
1082 int len = sizeof(*pdu) + hdgst - req->offset;
1085 if (inline_data || nvme_tcp_queue_more(queue))
1086 msg.msg_flags |= MSG_MORE;
1088 msg.msg_flags |= MSG_EOR;
1090 if (queue->hdr_digest && !req->offset)
1091 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1093 bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1094 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1095 ret = sock_sendmsg(queue->sock, &msg);
1096 if (unlikely(ret <= 0))
1102 req->state = NVME_TCP_SEND_DATA;
1103 if (queue->data_digest)
1104 crypto_ahash_init(queue->snd_hash);
1106 nvme_tcp_done_send_req(queue);
1115 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1117 struct nvme_tcp_queue *queue = req->queue;
1118 struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
1119 struct bio_vec bvec;
1120 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_MORE, };
1121 u8 hdgst = nvme_tcp_hdgst_len(queue);
1122 int len = sizeof(*pdu) - req->offset + hdgst;
1125 if (queue->hdr_digest && !req->offset)
1126 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1128 if (!req->h2cdata_left)
1129 msg.msg_flags |= MSG_SPLICE_PAGES;
1131 bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1132 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1133 ret = sock_sendmsg(queue->sock, &msg);
1134 if (unlikely(ret <= 0))
1139 req->state = NVME_TCP_SEND_DATA;
1140 if (queue->data_digest)
1141 crypto_ahash_init(queue->snd_hash);
1149 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1151 struct nvme_tcp_queue *queue = req->queue;
1152 size_t offset = req->offset;
1153 u32 h2cdata_left = req->h2cdata_left;
1155 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1157 .iov_base = (u8 *)&req->ddgst + req->offset,
1158 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1161 if (nvme_tcp_queue_more(queue))
1162 msg.msg_flags |= MSG_MORE;
1164 msg.msg_flags |= MSG_EOR;
1166 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1167 if (unlikely(ret <= 0))
1170 if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
1172 nvme_tcp_setup_h2c_data_pdu(req);
1174 nvme_tcp_done_send_req(queue);
1182 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1184 struct nvme_tcp_request *req;
1185 unsigned int noreclaim_flag;
1188 if (!queue->request) {
1189 queue->request = nvme_tcp_fetch_request(queue);
1190 if (!queue->request)
1193 req = queue->request;
1195 noreclaim_flag = memalloc_noreclaim_save();
1196 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1197 ret = nvme_tcp_try_send_cmd_pdu(req);
1200 if (!nvme_tcp_has_inline_data(req))
1204 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1205 ret = nvme_tcp_try_send_data_pdu(req);
1210 if (req->state == NVME_TCP_SEND_DATA) {
1211 ret = nvme_tcp_try_send_data(req);
1216 if (req->state == NVME_TCP_SEND_DDGST)
1217 ret = nvme_tcp_try_send_ddgst(req);
1219 if (ret == -EAGAIN) {
1221 } else if (ret < 0) {
1222 dev_err(queue->ctrl->ctrl.device,
1223 "failed to send request %d\n", ret);
1224 nvme_tcp_fail_request(queue->request);
1225 nvme_tcp_done_send_req(queue);
1228 memalloc_noreclaim_restore(noreclaim_flag);
1232 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1234 struct socket *sock = queue->sock;
1235 struct sock *sk = sock->sk;
1236 read_descriptor_t rd_desc;
1239 rd_desc.arg.data = queue;
1243 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1248 static void nvme_tcp_io_work(struct work_struct *w)
1250 struct nvme_tcp_queue *queue =
1251 container_of(w, struct nvme_tcp_queue, io_work);
1252 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1255 bool pending = false;
1258 if (mutex_trylock(&queue->send_mutex)) {
1259 result = nvme_tcp_try_send(queue);
1260 mutex_unlock(&queue->send_mutex);
1263 else if (unlikely(result < 0))
1267 result = nvme_tcp_try_recv(queue);
1270 else if (unlikely(result < 0))
1273 if (!pending || !queue->rd_enabled)
1276 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1278 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1281 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1283 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1285 ahash_request_free(queue->rcv_hash);
1286 ahash_request_free(queue->snd_hash);
1287 crypto_free_ahash(tfm);
1290 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1292 struct crypto_ahash *tfm;
1294 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1296 return PTR_ERR(tfm);
1298 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1299 if (!queue->snd_hash)
1301 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1303 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1304 if (!queue->rcv_hash)
1306 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1310 ahash_request_free(queue->snd_hash);
1312 crypto_free_ahash(tfm);
1316 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1318 struct nvme_tcp_request *async = &ctrl->async_req;
1320 page_frag_free(async->pdu);
1323 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1325 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1326 struct nvme_tcp_request *async = &ctrl->async_req;
1327 u8 hdgst = nvme_tcp_hdgst_len(queue);
1329 async->pdu = page_frag_alloc(&queue->pf_cache,
1330 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1331 GFP_KERNEL | __GFP_ZERO);
1335 async->queue = &ctrl->queues[0];
1339 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1342 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1343 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1344 unsigned int noreclaim_flag;
1346 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1349 if (queue->hdr_digest || queue->data_digest)
1350 nvme_tcp_free_crypto(queue);
1352 if (queue->pf_cache.va) {
1353 page = virt_to_head_page(queue->pf_cache.va);
1354 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1355 queue->pf_cache.va = NULL;
1358 noreclaim_flag = memalloc_noreclaim_save();
1359 /* ->sock will be released by fput() */
1360 fput(queue->sock->file);
1362 memalloc_noreclaim_restore(noreclaim_flag);
1365 mutex_destroy(&queue->send_mutex);
1366 mutex_destroy(&queue->queue_lock);
1369 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1371 struct nvme_tcp_icreq_pdu *icreq;
1372 struct nvme_tcp_icresp_pdu *icresp;
1373 char cbuf[CMSG_LEN(sizeof(char))] = {};
1375 struct msghdr msg = {};
1377 bool ctrl_hdgst, ctrl_ddgst;
1381 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1385 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1391 icreq->hdr.type = nvme_tcp_icreq;
1392 icreq->hdr.hlen = sizeof(*icreq);
1394 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1395 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1396 icreq->maxr2t = 0; /* single inflight r2t supported */
1397 icreq->hpda = 0; /* no alignment constraint */
1398 if (queue->hdr_digest)
1399 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1400 if (queue->data_digest)
1401 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1403 iov.iov_base = icreq;
1404 iov.iov_len = sizeof(*icreq);
1405 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1407 pr_warn("queue %d: failed to send icreq, error %d\n",
1408 nvme_tcp_queue_id(queue), ret);
1412 memset(&msg, 0, sizeof(msg));
1413 iov.iov_base = icresp;
1414 iov.iov_len = sizeof(*icresp);
1415 if (queue->ctrl->ctrl.opts->tls) {
1416 msg.msg_control = cbuf;
1417 msg.msg_controllen = sizeof(cbuf);
1419 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1420 iov.iov_len, msg.msg_flags);
1422 pr_warn("queue %d: failed to receive icresp, error %d\n",
1423 nvme_tcp_queue_id(queue), ret);
1427 if (queue->ctrl->ctrl.opts->tls) {
1428 ctype = tls_get_record_type(queue->sock->sk,
1429 (struct cmsghdr *)cbuf);
1430 if (ctype != TLS_RECORD_TYPE_DATA) {
1431 pr_err("queue %d: unhandled TLS record %d\n",
1432 nvme_tcp_queue_id(queue), ctype);
1437 if (icresp->hdr.type != nvme_tcp_icresp) {
1438 pr_err("queue %d: bad type returned %d\n",
1439 nvme_tcp_queue_id(queue), icresp->hdr.type);
1443 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1444 pr_err("queue %d: bad pdu length returned %d\n",
1445 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1449 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1450 pr_err("queue %d: bad pfv returned %d\n",
1451 nvme_tcp_queue_id(queue), icresp->pfv);
1455 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1456 if ((queue->data_digest && !ctrl_ddgst) ||
1457 (!queue->data_digest && ctrl_ddgst)) {
1458 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1459 nvme_tcp_queue_id(queue),
1460 queue->data_digest ? "enabled" : "disabled",
1461 ctrl_ddgst ? "enabled" : "disabled");
1465 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1466 if ((queue->hdr_digest && !ctrl_hdgst) ||
1467 (!queue->hdr_digest && ctrl_hdgst)) {
1468 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1469 nvme_tcp_queue_id(queue),
1470 queue->hdr_digest ? "enabled" : "disabled",
1471 ctrl_hdgst ? "enabled" : "disabled");
1475 if (icresp->cpda != 0) {
1476 pr_err("queue %d: unsupported cpda returned %d\n",
1477 nvme_tcp_queue_id(queue), icresp->cpda);
1481 maxh2cdata = le32_to_cpu(icresp->maxdata);
1482 if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
1483 pr_err("queue %d: invalid maxh2cdata returned %u\n",
1484 nvme_tcp_queue_id(queue), maxh2cdata);
1487 queue->maxh2cdata = maxh2cdata;
1497 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1499 return nvme_tcp_queue_id(queue) == 0;
1502 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1504 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1505 int qid = nvme_tcp_queue_id(queue);
1507 return !nvme_tcp_admin_queue(queue) &&
1508 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1511 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1513 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1514 int qid = nvme_tcp_queue_id(queue);
1516 return !nvme_tcp_admin_queue(queue) &&
1517 !nvme_tcp_default_queue(queue) &&
1518 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1519 ctrl->io_queues[HCTX_TYPE_READ];
1522 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1524 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1525 int qid = nvme_tcp_queue_id(queue);
1527 return !nvme_tcp_admin_queue(queue) &&
1528 !nvme_tcp_default_queue(queue) &&
1529 !nvme_tcp_read_queue(queue) &&
1530 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1531 ctrl->io_queues[HCTX_TYPE_READ] +
1532 ctrl->io_queues[HCTX_TYPE_POLL];
1535 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1537 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1538 int qid = nvme_tcp_queue_id(queue);
1541 if (nvme_tcp_default_queue(queue))
1543 else if (nvme_tcp_read_queue(queue))
1544 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1545 else if (nvme_tcp_poll_queue(queue))
1546 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1547 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1548 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1551 #ifdef CONFIG_NVME_TCP_TLS
1552 static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
1554 struct nvme_tcp_queue *queue = data;
1555 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1556 int qid = nvme_tcp_queue_id(queue);
1557 struct key *tls_key;
1559 dev_dbg(ctrl->ctrl.device, "queue %d: TLS handshake done, key %x, status %d\n",
1560 qid, pskid, status);
1563 queue->tls_err = -status;
1567 tls_key = key_lookup(pskid);
1568 if (IS_ERR(tls_key)) {
1569 dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n",
1571 queue->tls_err = -ENOKEY;
1573 ctrl->ctrl.tls_key = tls_key;
1578 complete(&queue->tls_complete);
1581 static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
1582 struct nvme_tcp_queue *queue,
1585 int qid = nvme_tcp_queue_id(queue);
1587 struct tls_handshake_args args;
1588 unsigned long tmo = tls_handshake_timeout * HZ;
1589 key_serial_t keyring = nvme_keyring_id();
1591 dev_dbg(nctrl->device, "queue %d: start TLS with key %x\n",
1593 memset(&args, 0, sizeof(args));
1594 args.ta_sock = queue->sock;
1595 args.ta_done = nvme_tcp_tls_done;
1596 args.ta_data = queue;
1597 args.ta_my_peerids[0] = pskid;
1598 args.ta_num_peerids = 1;
1599 if (nctrl->opts->keyring)
1600 keyring = key_serial(nctrl->opts->keyring);
1601 args.ta_keyring = keyring;
1602 args.ta_timeout_ms = tls_handshake_timeout * 1000;
1603 queue->tls_err = -EOPNOTSUPP;
1604 init_completion(&queue->tls_complete);
1605 ret = tls_client_hello_psk(&args, GFP_KERNEL);
1607 dev_err(nctrl->device, "queue %d: failed to start TLS: %d\n",
1611 ret = wait_for_completion_interruptible_timeout(&queue->tls_complete, tmo);
1616 dev_err(nctrl->device,
1617 "queue %d: TLS handshake failed, error %d\n",
1619 tls_handshake_cancel(queue->sock->sk);
1621 dev_dbg(nctrl->device,
1622 "queue %d: TLS handshake complete, error %d\n",
1623 qid, queue->tls_err);
1624 ret = queue->tls_err;
1629 static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
1630 struct nvme_tcp_queue *queue,
1633 return -EPROTONOSUPPORT;
1637 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
1640 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1641 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1642 int ret, rcv_pdu_size;
1643 struct file *sock_file;
1645 mutex_init(&queue->queue_lock);
1647 init_llist_head(&queue->req_list);
1648 INIT_LIST_HEAD(&queue->send_list);
1649 mutex_init(&queue->send_mutex);
1650 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1653 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1655 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1656 NVME_TCP_ADMIN_CCSZ;
1658 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1659 IPPROTO_TCP, &queue->sock);
1661 dev_err(nctrl->device,
1662 "failed to create socket: %d\n", ret);
1663 goto err_destroy_mutex;
1666 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
1667 if (IS_ERR(sock_file)) {
1668 ret = PTR_ERR(sock_file);
1669 goto err_destroy_mutex;
1671 nvme_tcp_reclassify_socket(queue->sock);
1673 /* Single syn retry */
1674 tcp_sock_set_syncnt(queue->sock->sk, 1);
1676 /* Set TCP no delay */
1677 tcp_sock_set_nodelay(queue->sock->sk);
1680 * Cleanup whatever is sitting in the TCP transmit queue on socket
1681 * close. This is done to prevent stale data from being sent should
1682 * the network connection be restored before TCP times out.
1684 sock_no_linger(queue->sock->sk);
1686 if (so_priority > 0)
1687 sock_set_priority(queue->sock->sk, so_priority);
1689 /* Set socket type of service */
1690 if (nctrl->opts->tos >= 0)
1691 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1693 /* Set 10 seconds timeout for icresp recvmsg */
1694 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1696 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1697 queue->sock->sk->sk_use_task_frag = false;
1698 nvme_tcp_set_queue_io_cpu(queue);
1699 queue->request = NULL;
1700 queue->data_remaining = 0;
1701 queue->ddgst_remaining = 0;
1702 queue->pdu_remaining = 0;
1703 queue->pdu_offset = 0;
1704 sk_set_memalloc(queue->sock->sk);
1706 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1707 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1708 sizeof(ctrl->src_addr));
1710 dev_err(nctrl->device,
1711 "failed to bind queue %d socket %d\n",
1717 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1718 char *iface = nctrl->opts->host_iface;
1719 sockptr_t optval = KERNEL_SOCKPTR(iface);
1721 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1722 optval, strlen(iface));
1724 dev_err(nctrl->device,
1725 "failed to bind to interface %s queue %d err %d\n",
1731 queue->hdr_digest = nctrl->opts->hdr_digest;
1732 queue->data_digest = nctrl->opts->data_digest;
1733 if (queue->hdr_digest || queue->data_digest) {
1734 ret = nvme_tcp_alloc_crypto(queue);
1736 dev_err(nctrl->device,
1737 "failed to allocate queue %d crypto\n", qid);
1742 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1743 nvme_tcp_hdgst_len(queue);
1744 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1750 dev_dbg(nctrl->device, "connecting queue %d\n",
1751 nvme_tcp_queue_id(queue));
1753 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1754 sizeof(ctrl->addr), 0);
1756 dev_err(nctrl->device,
1757 "failed to connect socket: %d\n", ret);
1761 /* If PSKs are configured try to start TLS */
1763 ret = nvme_tcp_start_tls(nctrl, queue, pskid);
1765 goto err_init_connect;
1768 ret = nvme_tcp_init_connection(queue);
1770 goto err_init_connect;
1772 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1777 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1781 if (queue->hdr_digest || queue->data_digest)
1782 nvme_tcp_free_crypto(queue);
1784 /* ->sock will be released by fput() */
1785 fput(queue->sock->file);
1788 mutex_destroy(&queue->send_mutex);
1789 mutex_destroy(&queue->queue_lock);
1793 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
1795 struct socket *sock = queue->sock;
1797 write_lock_bh(&sock->sk->sk_callback_lock);
1798 sock->sk->sk_user_data = NULL;
1799 sock->sk->sk_data_ready = queue->data_ready;
1800 sock->sk->sk_state_change = queue->state_change;
1801 sock->sk->sk_write_space = queue->write_space;
1802 write_unlock_bh(&sock->sk->sk_callback_lock);
1805 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1807 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1808 nvme_tcp_restore_sock_ops(queue);
1809 cancel_work_sync(&queue->io_work);
1812 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1814 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1815 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1817 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1820 mutex_lock(&queue->queue_lock);
1821 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1822 __nvme_tcp_stop_queue(queue);
1823 mutex_unlock(&queue->queue_lock);
1826 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
1828 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1829 queue->sock->sk->sk_user_data = queue;
1830 queue->state_change = queue->sock->sk->sk_state_change;
1831 queue->data_ready = queue->sock->sk->sk_data_ready;
1832 queue->write_space = queue->sock->sk->sk_write_space;
1833 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1834 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1835 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1836 #ifdef CONFIG_NET_RX_BUSY_POLL
1837 queue->sock->sk->sk_ll_usec = 1;
1839 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1842 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1844 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1845 struct nvme_tcp_queue *queue = &ctrl->queues[idx];
1848 queue->rd_enabled = true;
1849 nvme_tcp_init_recv_ctx(queue);
1850 nvme_tcp_setup_sock_ops(queue);
1853 ret = nvmf_connect_io_queue(nctrl, idx);
1855 ret = nvmf_connect_admin_queue(nctrl);
1858 set_bit(NVME_TCP_Q_LIVE, &queue->flags);
1860 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1861 __nvme_tcp_stop_queue(queue);
1862 dev_err(nctrl->device,
1863 "failed to connect queue: %d ret=%d\n", idx, ret);
1868 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1870 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1871 cancel_work_sync(&ctrl->async_event_work);
1872 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1873 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1876 nvme_tcp_free_queue(ctrl, 0);
1879 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1883 for (i = 1; i < ctrl->queue_count; i++)
1884 nvme_tcp_free_queue(ctrl, i);
1887 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1891 for (i = 1; i < ctrl->queue_count; i++)
1892 nvme_tcp_stop_queue(ctrl, i);
1895 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
1896 int first, int last)
1900 for (i = first; i < last; i++) {
1901 ret = nvme_tcp_start_queue(ctrl, i);
1903 goto out_stop_queues;
1909 for (i--; i >= first; i--)
1910 nvme_tcp_stop_queue(ctrl, i);
1914 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1917 key_serial_t pskid = 0;
1919 if (ctrl->opts->tls) {
1920 if (ctrl->opts->tls_key)
1921 pskid = key_serial(ctrl->opts->tls_key);
1923 pskid = nvme_tls_psk_default(ctrl->opts->keyring,
1924 ctrl->opts->host->nqn,
1925 ctrl->opts->subsysnqn);
1927 dev_err(ctrl->device, "no valid PSK found\n");
1929 goto out_free_queue;
1933 ret = nvme_tcp_alloc_queue(ctrl, 0, pskid);
1935 goto out_free_queue;
1937 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1939 goto out_free_queue;
1944 nvme_tcp_free_queue(ctrl, 0);
1948 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1952 if (ctrl->opts->tls && !ctrl->tls_key) {
1953 dev_err(ctrl->device, "no PSK negotiated\n");
1956 for (i = 1; i < ctrl->queue_count; i++) {
1957 ret = nvme_tcp_alloc_queue(ctrl, i,
1958 key_serial(ctrl->tls_key));
1960 goto out_free_queues;
1966 for (i--; i >= 1; i--)
1967 nvme_tcp_free_queue(ctrl, i);
1972 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1974 unsigned int nr_io_queues;
1977 nr_io_queues = nvmf_nr_io_queues(ctrl->opts);
1978 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1982 if (nr_io_queues == 0) {
1983 dev_err(ctrl->device,
1984 "unable to set any I/O queues\n");
1988 ctrl->queue_count = nr_io_queues + 1;
1989 dev_info(ctrl->device,
1990 "creating %d I/O queues.\n", nr_io_queues);
1992 nvmf_set_io_queues(ctrl->opts, nr_io_queues,
1993 to_tcp_ctrl(ctrl)->io_queues);
1994 return __nvme_tcp_alloc_io_queues(ctrl);
1997 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1999 nvme_tcp_stop_io_queues(ctrl);
2001 nvme_remove_io_tag_set(ctrl);
2002 nvme_tcp_free_io_queues(ctrl);
2005 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
2009 ret = nvme_tcp_alloc_io_queues(ctrl);
2014 ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
2016 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
2017 sizeof(struct nvme_tcp_request));
2019 goto out_free_io_queues;
2023 * Only start IO queues for which we have allocated the tagset
2024 * and limitted it to the available queues. On reconnects, the
2025 * queue number might have changed.
2027 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
2028 ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
2030 goto out_cleanup_connect_q;
2033 nvme_start_freeze(ctrl);
2034 nvme_unquiesce_io_queues(ctrl);
2035 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
2037 * If we timed out waiting for freeze we are likely to
2038 * be stuck. Fail the controller initialization just
2042 nvme_unfreeze(ctrl);
2043 goto out_wait_freeze_timed_out;
2045 blk_mq_update_nr_hw_queues(ctrl->tagset,
2046 ctrl->queue_count - 1);
2047 nvme_unfreeze(ctrl);
2051 * If the number of queues has increased (reconnect case)
2052 * start all new queues now.
2054 ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
2055 ctrl->tagset->nr_hw_queues + 1);
2057 goto out_wait_freeze_timed_out;
2061 out_wait_freeze_timed_out:
2062 nvme_quiesce_io_queues(ctrl);
2063 nvme_sync_io_queues(ctrl);
2064 nvme_tcp_stop_io_queues(ctrl);
2065 out_cleanup_connect_q:
2066 nvme_cancel_tagset(ctrl);
2068 nvme_remove_io_tag_set(ctrl);
2070 nvme_tcp_free_io_queues(ctrl);
2074 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
2076 nvme_tcp_stop_queue(ctrl, 0);
2078 nvme_remove_admin_tag_set(ctrl);
2079 nvme_tcp_free_admin_queue(ctrl);
2082 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
2086 error = nvme_tcp_alloc_admin_queue(ctrl);
2091 error = nvme_alloc_admin_tag_set(ctrl,
2092 &to_tcp_ctrl(ctrl)->admin_tag_set,
2093 &nvme_tcp_admin_mq_ops,
2094 sizeof(struct nvme_tcp_request));
2096 goto out_free_queue;
2099 error = nvme_tcp_start_queue(ctrl, 0);
2101 goto out_cleanup_tagset;
2103 error = nvme_enable_ctrl(ctrl);
2105 goto out_stop_queue;
2107 nvme_unquiesce_admin_queue(ctrl);
2109 error = nvme_init_ctrl_finish(ctrl, false);
2111 goto out_quiesce_queue;
2116 nvme_quiesce_admin_queue(ctrl);
2117 blk_sync_queue(ctrl->admin_q);
2119 nvme_tcp_stop_queue(ctrl, 0);
2120 nvme_cancel_admin_tagset(ctrl);
2123 nvme_remove_admin_tag_set(ctrl);
2125 nvme_tcp_free_admin_queue(ctrl);
2129 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2132 nvme_quiesce_admin_queue(ctrl);
2133 blk_sync_queue(ctrl->admin_q);
2134 nvme_tcp_stop_queue(ctrl, 0);
2135 nvme_cancel_admin_tagset(ctrl);
2137 nvme_unquiesce_admin_queue(ctrl);
2138 nvme_tcp_destroy_admin_queue(ctrl, remove);
2141 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2144 if (ctrl->queue_count <= 1)
2146 nvme_quiesce_admin_queue(ctrl);
2147 nvme_quiesce_io_queues(ctrl);
2148 nvme_sync_io_queues(ctrl);
2149 nvme_tcp_stop_io_queues(ctrl);
2150 nvme_cancel_tagset(ctrl);
2152 nvme_unquiesce_io_queues(ctrl);
2153 nvme_tcp_destroy_io_queues(ctrl, remove);
2156 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2158 /* If we are resetting/deleting then do nothing */
2159 if (ctrl->state != NVME_CTRL_CONNECTING) {
2160 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
2161 ctrl->state == NVME_CTRL_LIVE);
2165 if (nvmf_should_reconnect(ctrl)) {
2166 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2167 ctrl->opts->reconnect_delay);
2168 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2169 ctrl->opts->reconnect_delay * HZ);
2171 dev_info(ctrl->device, "Removing controller...\n");
2172 nvme_delete_ctrl(ctrl);
2176 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2178 struct nvmf_ctrl_options *opts = ctrl->opts;
2181 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2187 dev_err(ctrl->device, "icdoff is not supported!\n");
2191 if (!nvme_ctrl_sgl_supported(ctrl)) {
2193 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2197 if (opts->queue_size > ctrl->sqsize + 1)
2198 dev_warn(ctrl->device,
2199 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2200 opts->queue_size, ctrl->sqsize + 1);
2202 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2203 dev_warn(ctrl->device,
2204 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2205 ctrl->sqsize + 1, ctrl->maxcmd);
2206 ctrl->sqsize = ctrl->maxcmd - 1;
2209 if (ctrl->queue_count > 1) {
2210 ret = nvme_tcp_configure_io_queues(ctrl, new);
2215 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2217 * state change failure is ok if we started ctrl delete,
2218 * unless we're during creation of a new controller to
2219 * avoid races with teardown flow.
2221 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2222 ctrl->state != NVME_CTRL_DELETING_NOIO);
2228 nvme_start_ctrl(ctrl);
2232 if (ctrl->queue_count > 1) {
2233 nvme_quiesce_io_queues(ctrl);
2234 nvme_sync_io_queues(ctrl);
2235 nvme_tcp_stop_io_queues(ctrl);
2236 nvme_cancel_tagset(ctrl);
2237 nvme_tcp_destroy_io_queues(ctrl, new);
2240 nvme_tcp_teardown_admin_queue(ctrl, false);
2244 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2246 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2247 struct nvme_tcp_ctrl, connect_work);
2248 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2250 ++ctrl->nr_reconnects;
2252 if (nvme_tcp_setup_ctrl(ctrl, false))
2255 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2256 ctrl->nr_reconnects);
2258 ctrl->nr_reconnects = 0;
2263 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2264 ctrl->nr_reconnects);
2265 nvme_tcp_reconnect_or_remove(ctrl);
2268 static void nvme_tcp_error_recovery_work(struct work_struct *work)
2270 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2271 struct nvme_tcp_ctrl, err_work);
2272 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2274 nvme_stop_keep_alive(ctrl);
2275 flush_work(&ctrl->async_event_work);
2276 nvme_tcp_teardown_io_queues(ctrl, false);
2277 /* unquiesce to fail fast pending requests */
2278 nvme_unquiesce_io_queues(ctrl);
2279 nvme_tcp_teardown_admin_queue(ctrl, false);
2280 nvme_unquiesce_admin_queue(ctrl);
2281 nvme_auth_stop(ctrl);
2283 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2284 /* state change failure is ok if we started ctrl delete */
2285 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2286 ctrl->state != NVME_CTRL_DELETING_NOIO);
2290 nvme_tcp_reconnect_or_remove(ctrl);
2293 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2295 nvme_tcp_teardown_io_queues(ctrl, shutdown);
2296 nvme_quiesce_admin_queue(ctrl);
2297 nvme_disable_ctrl(ctrl, shutdown);
2298 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2301 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2303 nvme_tcp_teardown_ctrl(ctrl, true);
2306 static void nvme_reset_ctrl_work(struct work_struct *work)
2308 struct nvme_ctrl *ctrl =
2309 container_of(work, struct nvme_ctrl, reset_work);
2311 nvme_stop_ctrl(ctrl);
2312 nvme_tcp_teardown_ctrl(ctrl, false);
2314 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2315 /* state change failure is ok if we started ctrl delete */
2316 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2317 ctrl->state != NVME_CTRL_DELETING_NOIO);
2321 if (nvme_tcp_setup_ctrl(ctrl, false))
2327 ++ctrl->nr_reconnects;
2328 nvme_tcp_reconnect_or_remove(ctrl);
2331 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
2333 flush_work(&to_tcp_ctrl(ctrl)->err_work);
2334 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2337 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2339 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2341 if (list_empty(&ctrl->list))
2344 mutex_lock(&nvme_tcp_ctrl_mutex);
2345 list_del(&ctrl->list);
2346 mutex_unlock(&nvme_tcp_ctrl_mutex);
2348 nvmf_free_options(nctrl->opts);
2350 kfree(ctrl->queues);
2354 static void nvme_tcp_set_sg_null(struct nvme_command *c)
2356 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2360 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2361 NVME_SGL_FMT_TRANSPORT_A;
2364 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2365 struct nvme_command *c, u32 data_len)
2367 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2369 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2370 sg->length = cpu_to_le32(data_len);
2371 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2374 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2377 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2380 sg->length = cpu_to_le32(data_len);
2381 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2382 NVME_SGL_FMT_TRANSPORT_A;
2385 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2387 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2388 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2389 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2390 struct nvme_command *cmd = &pdu->cmd;
2391 u8 hdgst = nvme_tcp_hdgst_len(queue);
2393 memset(pdu, 0, sizeof(*pdu));
2394 pdu->hdr.type = nvme_tcp_cmd;
2395 if (queue->hdr_digest)
2396 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2397 pdu->hdr.hlen = sizeof(*pdu);
2398 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2400 cmd->common.opcode = nvme_admin_async_event;
2401 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2402 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2403 nvme_tcp_set_sg_null(cmd);
2405 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2406 ctrl->async_req.offset = 0;
2407 ctrl->async_req.curr_bio = NULL;
2408 ctrl->async_req.data_len = 0;
2410 nvme_tcp_queue_request(&ctrl->async_req, true, true);
2413 static void nvme_tcp_complete_timed_out(struct request *rq)
2415 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2416 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2418 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2419 nvmf_complete_timed_out_request(rq);
2422 static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
2424 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2425 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2426 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2427 u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
2428 int qid = nvme_tcp_queue_id(req->queue);
2430 dev_warn(ctrl->device,
2431 "queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
2432 nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
2433 opc, nvme_opcode_str(qid, opc, fctype));
2435 if (ctrl->state != NVME_CTRL_LIVE) {
2437 * If we are resetting, connecting or deleting we should
2438 * complete immediately because we may block controller
2439 * teardown or setup sequence
2440 * - ctrl disable/shutdown fabrics requests
2441 * - connect requests
2442 * - initialization admin requests
2443 * - I/O requests that entered after unquiescing and
2444 * the controller stopped responding
2446 * All other requests should be cancelled by the error
2447 * recovery work, so it's fine that we fail it here.
2449 nvme_tcp_complete_timed_out(rq);
2454 * LIVE state should trigger the normal error recovery which will
2455 * handle completing this request.
2457 nvme_tcp_error_recovery(ctrl);
2458 return BLK_EH_RESET_TIMER;
2461 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2464 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2465 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2466 struct nvme_command *c = &pdu->cmd;
2468 c->common.flags |= NVME_CMD_SGL_METABUF;
2470 if (!blk_rq_nr_phys_segments(rq))
2471 nvme_tcp_set_sg_null(c);
2472 else if (rq_data_dir(rq) == WRITE &&
2473 req->data_len <= nvme_tcp_inline_data_size(req))
2474 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2476 nvme_tcp_set_sg_host_data(c, req->data_len);
2481 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2484 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2485 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2486 struct nvme_tcp_queue *queue = req->queue;
2487 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2490 ret = nvme_setup_cmd(ns, rq);
2494 req->state = NVME_TCP_SEND_CMD_PDU;
2495 req->status = cpu_to_le16(NVME_SC_SUCCESS);
2500 req->h2cdata_left = 0;
2501 req->data_len = blk_rq_nr_phys_segments(rq) ?
2502 blk_rq_payload_bytes(rq) : 0;
2503 req->curr_bio = rq->bio;
2504 if (req->curr_bio && req->data_len)
2505 nvme_tcp_init_iter(req, rq_data_dir(rq));
2507 if (rq_data_dir(rq) == WRITE &&
2508 req->data_len <= nvme_tcp_inline_data_size(req))
2509 req->pdu_len = req->data_len;
2511 pdu->hdr.type = nvme_tcp_cmd;
2513 if (queue->hdr_digest)
2514 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2515 if (queue->data_digest && req->pdu_len) {
2516 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2517 ddgst = nvme_tcp_ddgst_len(queue);
2519 pdu->hdr.hlen = sizeof(*pdu);
2520 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2522 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2524 ret = nvme_tcp_map_data(queue, rq);
2525 if (unlikely(ret)) {
2526 nvme_cleanup_cmd(rq);
2527 dev_err(queue->ctrl->ctrl.device,
2528 "Failed to map data (%d)\n", ret);
2535 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2537 struct nvme_tcp_queue *queue = hctx->driver_data;
2539 if (!llist_empty(&queue->req_list))
2540 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2543 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2544 const struct blk_mq_queue_data *bd)
2546 struct nvme_ns *ns = hctx->queue->queuedata;
2547 struct nvme_tcp_queue *queue = hctx->driver_data;
2548 struct request *rq = bd->rq;
2549 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2550 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2553 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2554 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2556 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2560 nvme_start_request(rq);
2562 nvme_tcp_queue_request(req, true, bd->last);
2567 static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2569 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
2571 nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
2574 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
2576 struct nvme_tcp_queue *queue = hctx->driver_data;
2577 struct sock *sk = queue->sock->sk;
2579 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2582 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2583 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2584 sk_busy_loop(sk, true);
2585 nvme_tcp_try_recv(queue);
2586 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2587 return queue->nr_cqe;
2590 static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2592 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
2593 struct sockaddr_storage src_addr;
2596 len = nvmf_get_address(ctrl, buf, size);
2598 mutex_lock(&queue->queue_lock);
2600 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2602 ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
2605 len--; /* strip trailing newline */
2606 len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
2607 (len) ? "," : "", &src_addr);
2610 mutex_unlock(&queue->queue_lock);
2615 static const struct blk_mq_ops nvme_tcp_mq_ops = {
2616 .queue_rq = nvme_tcp_queue_rq,
2617 .commit_rqs = nvme_tcp_commit_rqs,
2618 .complete = nvme_complete_rq,
2619 .init_request = nvme_tcp_init_request,
2620 .exit_request = nvme_tcp_exit_request,
2621 .init_hctx = nvme_tcp_init_hctx,
2622 .timeout = nvme_tcp_timeout,
2623 .map_queues = nvme_tcp_map_queues,
2624 .poll = nvme_tcp_poll,
2627 static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2628 .queue_rq = nvme_tcp_queue_rq,
2629 .complete = nvme_complete_rq,
2630 .init_request = nvme_tcp_init_request,
2631 .exit_request = nvme_tcp_exit_request,
2632 .init_hctx = nvme_tcp_init_admin_hctx,
2633 .timeout = nvme_tcp_timeout,
2636 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2638 .module = THIS_MODULE,
2639 .flags = NVME_F_FABRICS | NVME_F_BLOCKING,
2640 .reg_read32 = nvmf_reg_read32,
2641 .reg_read64 = nvmf_reg_read64,
2642 .reg_write32 = nvmf_reg_write32,
2643 .free_ctrl = nvme_tcp_free_ctrl,
2644 .submit_async_event = nvme_tcp_submit_async_event,
2645 .delete_ctrl = nvme_tcp_delete_ctrl,
2646 .get_address = nvme_tcp_get_address,
2647 .stop_ctrl = nvme_tcp_stop_ctrl,
2651 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2653 struct nvme_tcp_ctrl *ctrl;
2656 mutex_lock(&nvme_tcp_ctrl_mutex);
2657 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2658 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2662 mutex_unlock(&nvme_tcp_ctrl_mutex);
2667 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2668 struct nvmf_ctrl_options *opts)
2670 struct nvme_tcp_ctrl *ctrl;
2673 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2675 return ERR_PTR(-ENOMEM);
2677 INIT_LIST_HEAD(&ctrl->list);
2678 ctrl->ctrl.opts = opts;
2679 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2680 opts->nr_poll_queues + 1;
2681 ctrl->ctrl.sqsize = opts->queue_size - 1;
2682 ctrl->ctrl.kato = opts->kato;
2684 INIT_DELAYED_WORK(&ctrl->connect_work,
2685 nvme_tcp_reconnect_ctrl_work);
2686 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2687 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2689 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2691 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2692 if (!opts->trsvcid) {
2696 opts->mask |= NVMF_OPT_TRSVCID;
2699 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2700 opts->traddr, opts->trsvcid, &ctrl->addr);
2702 pr_err("malformed address passed: %s:%s\n",
2703 opts->traddr, opts->trsvcid);
2707 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2708 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2709 opts->host_traddr, NULL, &ctrl->src_addr);
2711 pr_err("malformed src address passed: %s\n",
2717 if (opts->mask & NVMF_OPT_HOST_IFACE) {
2718 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
2719 pr_err("invalid interface passed: %s\n",
2726 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2731 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2733 if (!ctrl->queues) {
2738 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2740 goto out_kfree_queues;
2742 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2745 goto out_uninit_ctrl;
2748 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2750 goto out_uninit_ctrl;
2752 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2753 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
2755 mutex_lock(&nvme_tcp_ctrl_mutex);
2756 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2757 mutex_unlock(&nvme_tcp_ctrl_mutex);
2762 nvme_uninit_ctrl(&ctrl->ctrl);
2763 nvme_put_ctrl(&ctrl->ctrl);
2766 return ERR_PTR(ret);
2768 kfree(ctrl->queues);
2771 return ERR_PTR(ret);
2774 static struct nvmf_transport_ops nvme_tcp_transport = {
2776 .module = THIS_MODULE,
2777 .required_opts = NVMF_OPT_TRADDR,
2778 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2779 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2780 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2781 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2782 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE | NVMF_OPT_TLS |
2783 NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY,
2784 .create_ctrl = nvme_tcp_create_ctrl,
2787 static int __init nvme_tcp_init_module(void)
2789 BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
2790 BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
2791 BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
2792 BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24);
2793 BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24);
2794 BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128);
2795 BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
2796 BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
2798 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2799 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2803 nvmf_register_transport(&nvme_tcp_transport);
2807 static void __exit nvme_tcp_cleanup_module(void)
2809 struct nvme_tcp_ctrl *ctrl;
2811 nvmf_unregister_transport(&nvme_tcp_transport);
2813 mutex_lock(&nvme_tcp_ctrl_mutex);
2814 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2815 nvme_delete_ctrl(&ctrl->ctrl);
2816 mutex_unlock(&nvme_tcp_ctrl_mutex);
2817 flush_workqueue(nvme_delete_wq);
2819 destroy_workqueue(nvme_tcp_wq);
2822 module_init(nvme_tcp_init_module);
2823 module_exit(nvme_tcp_cleanup_module);
2825 MODULE_LICENSE("GPL v2");