2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
31 /* *************************** Data Structures/Defines ****************** */
34 #define NVMET_LS_CTX_COUNT 4
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
42 struct nvmet_fc_ls_iod {
43 struct nvmefc_tgt_ls_req *lsreq;
44 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
46 struct list_head ls_list; /* tgtport->ls_list */
48 struct nvmet_fc_tgtport *tgtport;
49 struct nvmet_fc_tgt_assoc *assoc;
56 struct scatterlist sg[2];
58 struct work_struct work;
59 } __aligned(sizeof(unsigned long long));
61 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
62 #define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
64 enum nvmet_fcp_datadir {
71 struct nvmet_fc_fcp_iod {
72 struct nvmefc_tgt_fcp_req *fcpreq;
74 struct nvme_fc_cmd_iu cmdiubuf;
75 struct nvme_fc_ersp_iu rspiubuf;
77 struct scatterlist *data_sg;
81 enum nvmet_fcp_datadir io_dir;
89 struct work_struct work;
90 struct work_struct done_work;
92 struct nvmet_fc_tgtport *tgtport;
93 struct nvmet_fc_tgt_queue *queue;
95 struct list_head fcp_list; /* tgtport->fcp_list */
98 struct nvmet_fc_tgtport {
100 struct nvmet_fc_target_port fc_target_port;
102 struct list_head tgt_list; /* nvmet_fc_target_list */
103 struct device *dev; /* dev for dma mapping */
104 struct nvmet_fc_target_template *ops;
106 struct nvmet_fc_ls_iod *iod;
108 struct list_head ls_list;
109 struct list_head ls_busylist;
110 struct list_head assoc_list;
111 struct ida assoc_cnt;
112 struct nvmet_port *port;
117 struct nvmet_fc_defer_fcp_req {
118 struct list_head req_list;
119 struct nvmefc_tgt_fcp_req *fcp_req;
122 struct nvmet_fc_tgt_queue {
134 struct nvmet_port *port;
135 struct nvmet_cq nvme_cq;
136 struct nvmet_sq nvme_sq;
137 struct nvmet_fc_tgt_assoc *assoc;
138 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
139 struct list_head fod_list;
140 struct list_head pending_cmd_list;
141 struct list_head avail_defer_list;
142 struct workqueue_struct *work_q;
144 } __aligned(sizeof(unsigned long long));
146 struct nvmet_fc_tgt_assoc {
149 struct nvmet_fc_tgtport *tgtport;
150 struct list_head a_list;
151 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
157 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
159 return (iodptr - iodptr->tgtport->iod);
163 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
165 return (fodptr - fodptr->queue->fod);
170 * Association and Connection IDs:
172 * Association ID will have random number in upper 6 bytes and zero
175 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
177 * note: Association ID = Connection ID for queue 0
179 #define BYTES_FOR_QID sizeof(u16)
180 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
181 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
184 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
186 return (assoc->association_id | qid);
190 nvmet_fc_getassociationid(u64 connectionid)
192 return connectionid & ~NVMET_FC_QUEUEID_MASK;
196 nvmet_fc_getqueueid(u64 connectionid)
198 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
201 static inline struct nvmet_fc_tgtport *
202 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
204 return container_of(targetport, struct nvmet_fc_tgtport,
208 static inline struct nvmet_fc_fcp_iod *
209 nvmet_req_to_fod(struct nvmet_req *nvme_req)
211 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
215 /* *************************** Globals **************************** */
218 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
220 static LIST_HEAD(nvmet_fc_target_list);
221 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
224 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
225 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
226 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
227 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
228 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
229 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
230 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
231 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
232 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
233 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
234 struct nvmet_fc_fcp_iod *fod);
237 /* *********************** FC-NVME DMA Handling **************************** */
240 * The fcloop device passes in a NULL device pointer. Real LLD's will
241 * pass in a valid device pointer. If NULL is passed to the dma mapping
242 * routines, depending on the platform, it may or may not succeed, and
246 * Wrapper all the dma routines and check the dev pointer.
248 * If simple mappings (return just a dma address, we'll noop them,
249 * returning a dma address of 0.
251 * On more complex mappings (dma_map_sg), a pseudo routine fills
252 * in the scatter list, setting all dma addresses to 0.
255 static inline dma_addr_t
256 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
257 enum dma_data_direction dir)
259 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
263 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
265 return dev ? dma_mapping_error(dev, dma_addr) : 0;
269 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
270 enum dma_data_direction dir)
273 dma_unmap_single(dev, addr, size, dir);
277 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
278 enum dma_data_direction dir)
281 dma_sync_single_for_cpu(dev, addr, size, dir);
285 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
286 enum dma_data_direction dir)
289 dma_sync_single_for_device(dev, addr, size, dir);
292 /* pseudo dma_map_sg call */
294 fc_map_sg(struct scatterlist *sg, int nents)
296 struct scatterlist *s;
299 WARN_ON(nents == 0 || sg[0].length == 0);
301 for_each_sg(sg, s, nents, i) {
303 #ifdef CONFIG_NEED_SG_DMA_LENGTH
304 s->dma_length = s->length;
311 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
312 enum dma_data_direction dir)
314 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
318 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
319 enum dma_data_direction dir)
322 dma_unmap_sg(dev, sg, nents, dir);
326 /* *********************** FC-NVME Port Management ************************ */
330 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
332 struct nvmet_fc_ls_iod *iod;
335 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
342 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
343 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
344 iod->tgtport = tgtport;
345 list_add_tail(&iod->ls_list, &tgtport->ls_list);
347 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
352 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
354 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
355 NVME_FC_MAX_LS_BUFFER_SIZE,
357 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
365 list_del(&iod->ls_list);
366 for (iod--, i--; i >= 0; iod--, i--) {
367 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
368 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
370 list_del(&iod->ls_list);
379 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
381 struct nvmet_fc_ls_iod *iod = tgtport->iod;
384 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
385 fc_dma_unmap_single(tgtport->dev,
386 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
389 list_del(&iod->ls_list);
394 static struct nvmet_fc_ls_iod *
395 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
397 struct nvmet_fc_ls_iod *iod;
400 spin_lock_irqsave(&tgtport->lock, flags);
401 iod = list_first_entry_or_null(&tgtport->ls_list,
402 struct nvmet_fc_ls_iod, ls_list);
404 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
405 spin_unlock_irqrestore(&tgtport->lock, flags);
411 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
412 struct nvmet_fc_ls_iod *iod)
416 spin_lock_irqsave(&tgtport->lock, flags);
417 list_move(&iod->ls_list, &tgtport->ls_list);
418 spin_unlock_irqrestore(&tgtport->lock, flags);
422 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
423 struct nvmet_fc_tgt_queue *queue)
425 struct nvmet_fc_fcp_iod *fod = queue->fod;
428 for (i = 0; i < queue->sqsize; fod++, i++) {
429 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
430 INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
431 fod->tgtport = tgtport;
435 fod->aborted = false;
437 list_add_tail(&fod->fcp_list, &queue->fod_list);
438 spin_lock_init(&fod->flock);
440 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
441 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
442 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
443 list_del(&fod->fcp_list);
444 for (fod--, i--; i >= 0; fod--, i--) {
445 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
446 sizeof(fod->rspiubuf),
449 list_del(&fod->fcp_list);
458 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
459 struct nvmet_fc_tgt_queue *queue)
461 struct nvmet_fc_fcp_iod *fod = queue->fod;
464 for (i = 0; i < queue->sqsize; fod++, i++) {
466 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
467 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
471 static struct nvmet_fc_fcp_iod *
472 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
474 struct nvmet_fc_fcp_iod *fod;
476 lockdep_assert_held(&queue->qlock);
478 fod = list_first_entry_or_null(&queue->fod_list,
479 struct nvmet_fc_fcp_iod, fcp_list);
481 list_del(&fod->fcp_list);
484 * no queue reference is taken, as it was taken by the
485 * queue lookup just prior to the allocation. The iod
486 * will "inherit" that reference.
494 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
495 struct nvmet_fc_tgt_queue *queue,
496 struct nvmefc_tgt_fcp_req *fcpreq)
498 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
501 * put all admin cmds on hw queue id 0. All io commands go to
502 * the respective hw queue based on a modulo basis
504 fcpreq->hwqid = queue->qid ?
505 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
507 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
508 queue_work_on(queue->cpu, queue->work_q, &fod->work);
510 nvmet_fc_handle_fcp_rqst(tgtport, fod);
514 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
515 struct nvmet_fc_fcp_iod *fod)
517 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
518 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
519 struct nvmet_fc_defer_fcp_req *deferfcp;
522 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
523 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
525 fcpreq->nvmet_fc_private = NULL;
529 fod->aborted = false;
530 fod->writedataactive = false;
533 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
535 spin_lock_irqsave(&queue->qlock, flags);
536 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
537 struct nvmet_fc_defer_fcp_req, req_list);
539 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
540 spin_unlock_irqrestore(&queue->qlock, flags);
542 /* Release reference taken at queue lookup and fod allocation */
543 nvmet_fc_tgt_q_put(queue);
547 /* Re-use the fod for the next pending cmd that was deferred */
548 list_del(&deferfcp->req_list);
550 fcpreq = deferfcp->fcp_req;
552 /* deferfcp can be reused for another IO at a later date */
553 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
555 spin_unlock_irqrestore(&queue->qlock, flags);
557 /* Save NVME CMD IO in fod */
558 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
560 /* Setup new fcpreq to be processed */
561 fcpreq->rspaddr = NULL;
563 fcpreq->nvmet_fc_private = fod;
564 fod->fcpreq = fcpreq;
567 /* inform LLDD IO is now being processed */
568 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
570 /* Submit deferred IO for processing */
571 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
574 * Leave the queue lookup get reference taken when
575 * fod was originally allocated.
580 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
584 if (tgtport->ops->max_hw_queues == 1)
585 return WORK_CPU_UNBOUND;
587 /* Simple cpu selection based on qid modulo active cpu count */
588 idx = !qid ? 0 : (qid - 1) % num_active_cpus();
590 /* find the n'th active cpu */
591 for (cpu = 0, cnt = 0; ; ) {
592 if (cpu_active(cpu)) {
597 cpu = (cpu + 1) % num_possible_cpus();
603 static struct nvmet_fc_tgt_queue *
604 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
607 struct nvmet_fc_tgt_queue *queue;
611 if (qid >= NVMET_NR_QUEUES)
614 queue = kzalloc((sizeof(*queue) +
615 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
620 if (!nvmet_fc_tgt_a_get(assoc))
623 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
624 assoc->tgtport->fc_target_port.port_num,
629 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
631 queue->sqsize = sqsize;
632 queue->assoc = assoc;
633 queue->port = assoc->tgtport->port;
634 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
635 INIT_LIST_HEAD(&queue->fod_list);
636 INIT_LIST_HEAD(&queue->avail_defer_list);
637 INIT_LIST_HEAD(&queue->pending_cmd_list);
638 atomic_set(&queue->connected, 0);
639 atomic_set(&queue->sqtail, 0);
640 atomic_set(&queue->rsn, 1);
641 atomic_set(&queue->zrspcnt, 0);
642 spin_lock_init(&queue->qlock);
643 kref_init(&queue->ref);
645 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
647 ret = nvmet_sq_init(&queue->nvme_sq);
649 goto out_fail_iodlist;
651 WARN_ON(assoc->queues[qid]);
652 spin_lock_irqsave(&assoc->tgtport->lock, flags);
653 assoc->queues[qid] = queue;
654 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
659 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
660 destroy_workqueue(queue->work_q);
662 nvmet_fc_tgt_a_put(assoc);
670 nvmet_fc_tgt_queue_free(struct kref *ref)
672 struct nvmet_fc_tgt_queue *queue =
673 container_of(ref, struct nvmet_fc_tgt_queue, ref);
676 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
677 queue->assoc->queues[queue->qid] = NULL;
678 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
680 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
682 nvmet_fc_tgt_a_put(queue->assoc);
684 destroy_workqueue(queue->work_q);
690 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
692 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
696 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
698 return kref_get_unless_zero(&queue->ref);
703 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
705 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
706 struct nvmet_fc_fcp_iod *fod = queue->fod;
707 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
709 int i, writedataactive;
712 disconnect = atomic_xchg(&queue->connected, 0);
714 spin_lock_irqsave(&queue->qlock, flags);
715 /* about outstanding io's */
716 for (i = 0; i < queue->sqsize; fod++, i++) {
718 spin_lock(&fod->flock);
720 writedataactive = fod->writedataactive;
721 spin_unlock(&fod->flock);
723 * only call lldd abort routine if waiting for
724 * writedata. other outstanding ops should finish
727 if (writedataactive) {
728 spin_lock(&fod->flock);
730 spin_unlock(&fod->flock);
731 tgtport->ops->fcp_abort(
732 &tgtport->fc_target_port, fod->fcpreq);
737 /* Cleanup defer'ed IOs in queue */
738 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
740 list_del(&deferfcp->req_list);
745 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
746 struct nvmet_fc_defer_fcp_req, req_list);
750 list_del(&deferfcp->req_list);
751 spin_unlock_irqrestore(&queue->qlock, flags);
753 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
756 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
759 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
764 spin_lock_irqsave(&queue->qlock, flags);
766 spin_unlock_irqrestore(&queue->qlock, flags);
768 flush_workqueue(queue->work_q);
771 nvmet_sq_destroy(&queue->nvme_sq);
773 nvmet_fc_tgt_q_put(queue);
776 static struct nvmet_fc_tgt_queue *
777 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
780 struct nvmet_fc_tgt_assoc *assoc;
781 struct nvmet_fc_tgt_queue *queue;
782 u64 association_id = nvmet_fc_getassociationid(connection_id);
783 u16 qid = nvmet_fc_getqueueid(connection_id);
786 spin_lock_irqsave(&tgtport->lock, flags);
787 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
788 if (association_id == assoc->association_id) {
789 queue = assoc->queues[qid];
791 (!atomic_read(&queue->connected) ||
792 !nvmet_fc_tgt_q_get(queue)))
794 spin_unlock_irqrestore(&tgtport->lock, flags);
798 spin_unlock_irqrestore(&tgtport->lock, flags);
802 static struct nvmet_fc_tgt_assoc *
803 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
805 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
809 bool needrandom = true;
811 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
815 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
819 if (!nvmet_fc_tgtport_get(tgtport))
822 assoc->tgtport = tgtport;
824 INIT_LIST_HEAD(&assoc->a_list);
825 kref_init(&assoc->ref);
828 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
829 ran = ran << BYTES_FOR_QID_SHIFT;
831 spin_lock_irqsave(&tgtport->lock, flags);
833 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
834 if (ran == tmpassoc->association_id) {
839 assoc->association_id = ran;
840 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
842 spin_unlock_irqrestore(&tgtport->lock, flags);
848 ida_simple_remove(&tgtport->assoc_cnt, idx);
855 nvmet_fc_target_assoc_free(struct kref *ref)
857 struct nvmet_fc_tgt_assoc *assoc =
858 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
859 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
862 spin_lock_irqsave(&tgtport->lock, flags);
863 list_del(&assoc->a_list);
864 spin_unlock_irqrestore(&tgtport->lock, flags);
865 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
867 nvmet_fc_tgtport_put(tgtport);
871 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
873 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
877 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
879 return kref_get_unless_zero(&assoc->ref);
883 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
885 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
886 struct nvmet_fc_tgt_queue *queue;
890 spin_lock_irqsave(&tgtport->lock, flags);
891 for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
892 queue = assoc->queues[i];
894 if (!nvmet_fc_tgt_q_get(queue))
896 spin_unlock_irqrestore(&tgtport->lock, flags);
897 nvmet_fc_delete_target_queue(queue);
898 nvmet_fc_tgt_q_put(queue);
899 spin_lock_irqsave(&tgtport->lock, flags);
902 spin_unlock_irqrestore(&tgtport->lock, flags);
904 nvmet_fc_tgt_a_put(assoc);
907 static struct nvmet_fc_tgt_assoc *
908 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
911 struct nvmet_fc_tgt_assoc *assoc;
912 struct nvmet_fc_tgt_assoc *ret = NULL;
915 spin_lock_irqsave(&tgtport->lock, flags);
916 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
917 if (association_id == assoc->association_id) {
919 nvmet_fc_tgt_a_get(assoc);
923 spin_unlock_irqrestore(&tgtport->lock, flags);
930 * nvme_fc_register_targetport - transport entry point called by an
931 * LLDD to register the existence of a local
932 * NVME subystem FC port.
933 * @pinfo: pointer to information about the port to be registered
934 * @template: LLDD entrypoints and operational parameters for the port
935 * @dev: physical hardware device node port corresponds to. Will be
936 * used for DMA mappings
937 * @portptr: pointer to a local port pointer. Upon success, the routine
938 * will allocate a nvme_fc_local_port structure and place its
939 * address in the local port pointer. Upon failure, local port
940 * pointer will be set to NULL.
943 * a completion status. Must be 0 upon success; a negative errno
944 * (ex: -ENXIO) upon failure.
947 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
948 struct nvmet_fc_target_template *template,
950 struct nvmet_fc_target_port **portptr)
952 struct nvmet_fc_tgtport *newrec;
956 if (!template->xmt_ls_rsp || !template->fcp_op ||
957 !template->fcp_abort ||
958 !template->fcp_req_release || !template->targetport_delete ||
959 !template->max_hw_queues || !template->max_sgl_segments ||
960 !template->max_dif_sgl_segments || !template->dma_boundary) {
962 goto out_regtgt_failed;
965 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
969 goto out_regtgt_failed;
972 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
978 if (!get_device(dev) && dev) {
983 newrec->fc_target_port.node_name = pinfo->node_name;
984 newrec->fc_target_port.port_name = pinfo->port_name;
985 newrec->fc_target_port.private = &newrec[1];
986 newrec->fc_target_port.port_id = pinfo->port_id;
987 newrec->fc_target_port.port_num = idx;
988 INIT_LIST_HEAD(&newrec->tgt_list);
990 newrec->ops = template;
991 spin_lock_init(&newrec->lock);
992 INIT_LIST_HEAD(&newrec->ls_list);
993 INIT_LIST_HEAD(&newrec->ls_busylist);
994 INIT_LIST_HEAD(&newrec->assoc_list);
995 kref_init(&newrec->ref);
996 ida_init(&newrec->assoc_cnt);
997 newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
998 template->max_sgl_segments);
1000 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1003 goto out_free_newrec;
1006 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1007 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1008 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1010 *portptr = &newrec->fc_target_port;
1016 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1023 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1027 nvmet_fc_free_tgtport(struct kref *ref)
1029 struct nvmet_fc_tgtport *tgtport =
1030 container_of(ref, struct nvmet_fc_tgtport, ref);
1031 struct device *dev = tgtport->dev;
1032 unsigned long flags;
1034 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1035 list_del(&tgtport->tgt_list);
1036 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1038 nvmet_fc_free_ls_iodlist(tgtport);
1040 /* let the LLDD know we've finished tearing it down */
1041 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1043 ida_simple_remove(&nvmet_fc_tgtport_cnt,
1044 tgtport->fc_target_port.port_num);
1046 ida_destroy(&tgtport->assoc_cnt);
1054 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1056 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1060 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1062 return kref_get_unless_zero(&tgtport->ref);
1066 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1068 struct nvmet_fc_tgt_assoc *assoc, *next;
1069 unsigned long flags;
1071 spin_lock_irqsave(&tgtport->lock, flags);
1072 list_for_each_entry_safe(assoc, next,
1073 &tgtport->assoc_list, a_list) {
1074 if (!nvmet_fc_tgt_a_get(assoc))
1076 spin_unlock_irqrestore(&tgtport->lock, flags);
1077 nvmet_fc_delete_target_assoc(assoc);
1078 nvmet_fc_tgt_a_put(assoc);
1079 spin_lock_irqsave(&tgtport->lock, flags);
1081 spin_unlock_irqrestore(&tgtport->lock, flags);
1085 * nvmet layer has called to terminate an association
1088 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1090 struct nvmet_fc_tgtport *tgtport, *next;
1091 struct nvmet_fc_tgt_assoc *assoc;
1092 struct nvmet_fc_tgt_queue *queue;
1093 unsigned long flags;
1094 bool found_ctrl = false;
1096 /* this is a bit ugly, but don't want to make locks layered */
1097 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1098 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1100 if (!nvmet_fc_tgtport_get(tgtport))
1102 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1104 spin_lock_irqsave(&tgtport->lock, flags);
1105 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1106 queue = assoc->queues[0];
1107 if (queue && queue->nvme_sq.ctrl == ctrl) {
1108 if (nvmet_fc_tgt_a_get(assoc))
1113 spin_unlock_irqrestore(&tgtport->lock, flags);
1115 nvmet_fc_tgtport_put(tgtport);
1118 nvmet_fc_delete_target_assoc(assoc);
1119 nvmet_fc_tgt_a_put(assoc);
1123 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1125 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1129 * nvme_fc_unregister_targetport - transport entry point called by an
1130 * LLDD to deregister/remove a previously
1131 * registered a local NVME subsystem FC port.
1132 * @tgtport: pointer to the (registered) target port that is to be
1136 * a completion status. Must be 0 upon success; a negative errno
1137 * (ex: -ENXIO) upon failure.
1140 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1142 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1144 /* terminate any outstanding associations */
1145 __nvmet_fc_free_assocs(tgtport);
1147 nvmet_fc_tgtport_put(tgtport);
1151 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1154 /* *********************** FC-NVME LS Handling **************************** */
1158 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1160 struct fcnvme_ls_acc_hdr *acc = buf;
1162 acc->w0.ls_cmd = ls_cmd;
1163 acc->desc_list_len = desc_len;
1164 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1165 acc->rqst.desc_len =
1166 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1167 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1171 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1172 u8 reason, u8 explanation, u8 vendor)
1174 struct fcnvme_ls_rjt *rjt = buf;
1176 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1177 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1179 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1180 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1181 rjt->rjt.reason_code = reason;
1182 rjt->rjt.reason_explanation = explanation;
1183 rjt->rjt.vendor = vendor;
1185 return sizeof(struct fcnvme_ls_rjt);
1188 /* Validation Error indexes into the string table below */
1191 VERR_CR_ASSOC_LEN = 1,
1192 VERR_CR_ASSOC_RQST_LEN = 2,
1193 VERR_CR_ASSOC_CMD = 3,
1194 VERR_CR_ASSOC_CMD_LEN = 4,
1195 VERR_ERSP_RATIO = 5,
1196 VERR_ASSOC_ALLOC_FAIL = 6,
1197 VERR_QUEUE_ALLOC_FAIL = 7,
1198 VERR_CR_CONN_LEN = 8,
1199 VERR_CR_CONN_RQST_LEN = 9,
1201 VERR_ASSOC_ID_LEN = 11,
1204 VERR_CONN_ID_LEN = 14,
1206 VERR_CR_CONN_CMD = 16,
1207 VERR_CR_CONN_CMD_LEN = 17,
1208 VERR_DISCONN_LEN = 18,
1209 VERR_DISCONN_RQST_LEN = 19,
1210 VERR_DISCONN_CMD = 20,
1211 VERR_DISCONN_CMD_LEN = 21,
1212 VERR_DISCONN_SCOPE = 22,
1214 VERR_RS_RQST_LEN = 24,
1216 VERR_RS_CMD_LEN = 26,
1221 static char *validation_errors[] = {
1223 "Bad CR_ASSOC Length",
1224 "Bad CR_ASSOC Rqst Length",
1226 "Bad CR_ASSOC Cmd Length",
1228 "Association Allocation Failed",
1229 "Queue Allocation Failed",
1230 "Bad CR_CONN Length",
1231 "Bad CR_CONN Rqst Length",
1232 "Not Association ID",
1233 "Bad Association ID Length",
1235 "Not Connection ID",
1236 "Bad Connection ID Length",
1239 "Bad CR_CONN Cmd Length",
1240 "Bad DISCONN Length",
1241 "Bad DISCONN Rqst Length",
1243 "Bad DISCONN Cmd Length",
1244 "Bad Disconnect Scope",
1246 "Bad RS Rqst Length",
1248 "Bad RS Cmd Length",
1250 "Bad RS Relative Offset",
1254 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1255 struct nvmet_fc_ls_iod *iod)
1257 struct fcnvme_ls_cr_assoc_rqst *rqst =
1258 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1259 struct fcnvme_ls_cr_assoc_acc *acc =
1260 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1261 struct nvmet_fc_tgt_queue *queue;
1264 memset(acc, 0, sizeof(*acc));
1267 * FC-NVME spec changes. There are initiators sending different
1268 * lengths as padding sizes for Create Association Cmd descriptor
1270 * Accept anything of "minimum" length. Assume format per 1.15
1271 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1272 * trailing pad length is.
1274 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1275 ret = VERR_CR_ASSOC_LEN;
1276 else if (be32_to_cpu(rqst->desc_list_len) <
1277 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1278 ret = VERR_CR_ASSOC_RQST_LEN;
1279 else if (rqst->assoc_cmd.desc_tag !=
1280 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1281 ret = VERR_CR_ASSOC_CMD;
1282 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1283 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1284 ret = VERR_CR_ASSOC_CMD_LEN;
1285 else if (!rqst->assoc_cmd.ersp_ratio ||
1286 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1287 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1288 ret = VERR_ERSP_RATIO;
1291 /* new association w/ admin queue */
1292 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1294 ret = VERR_ASSOC_ALLOC_FAIL;
1296 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1297 be16_to_cpu(rqst->assoc_cmd.sqsize));
1299 ret = VERR_QUEUE_ALLOC_FAIL;
1304 dev_err(tgtport->dev,
1305 "Create Association LS failed: %s\n",
1306 validation_errors[ret]);
1307 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1308 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1309 FCNVME_RJT_RC_LOGIC,
1310 FCNVME_RJT_EXP_NONE, 0);
1314 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1315 atomic_set(&queue->connected, 1);
1316 queue->sqhd = 0; /* best place to init value */
1318 /* format a response */
1320 iod->lsreq->rsplen = sizeof(*acc);
1322 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1324 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1325 FCNVME_LS_CREATE_ASSOCIATION);
1326 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1327 acc->associd.desc_len =
1329 sizeof(struct fcnvme_lsdesc_assoc_id));
1330 acc->associd.association_id =
1331 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1332 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1333 acc->connectid.desc_len =
1335 sizeof(struct fcnvme_lsdesc_conn_id));
1336 acc->connectid.connection_id = acc->associd.association_id;
1340 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1341 struct nvmet_fc_ls_iod *iod)
1343 struct fcnvme_ls_cr_conn_rqst *rqst =
1344 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1345 struct fcnvme_ls_cr_conn_acc *acc =
1346 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1347 struct nvmet_fc_tgt_queue *queue;
1350 memset(acc, 0, sizeof(*acc));
1352 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1353 ret = VERR_CR_CONN_LEN;
1354 else if (rqst->desc_list_len !=
1356 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1357 ret = VERR_CR_CONN_RQST_LEN;
1358 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1359 ret = VERR_ASSOC_ID;
1360 else if (rqst->associd.desc_len !=
1362 sizeof(struct fcnvme_lsdesc_assoc_id)))
1363 ret = VERR_ASSOC_ID_LEN;
1364 else if (rqst->connect_cmd.desc_tag !=
1365 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1366 ret = VERR_CR_CONN_CMD;
1367 else if (rqst->connect_cmd.desc_len !=
1369 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1370 ret = VERR_CR_CONN_CMD_LEN;
1371 else if (!rqst->connect_cmd.ersp_ratio ||
1372 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1373 be16_to_cpu(rqst->connect_cmd.sqsize)))
1374 ret = VERR_ERSP_RATIO;
1378 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1379 be64_to_cpu(rqst->associd.association_id));
1381 ret = VERR_NO_ASSOC;
1383 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1384 be16_to_cpu(rqst->connect_cmd.qid),
1385 be16_to_cpu(rqst->connect_cmd.sqsize));
1387 ret = VERR_QUEUE_ALLOC_FAIL;
1389 /* release get taken in nvmet_fc_find_target_assoc */
1390 nvmet_fc_tgt_a_put(iod->assoc);
1395 dev_err(tgtport->dev,
1396 "Create Connection LS failed: %s\n",
1397 validation_errors[ret]);
1398 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1399 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1400 (ret == VERR_NO_ASSOC) ?
1401 FCNVME_RJT_RC_INV_ASSOC :
1402 FCNVME_RJT_RC_LOGIC,
1403 FCNVME_RJT_EXP_NONE, 0);
1407 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1408 atomic_set(&queue->connected, 1);
1409 queue->sqhd = 0; /* best place to init value */
1411 /* format a response */
1413 iod->lsreq->rsplen = sizeof(*acc);
1415 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1416 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1417 FCNVME_LS_CREATE_CONNECTION);
1418 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1419 acc->connectid.desc_len =
1421 sizeof(struct fcnvme_lsdesc_conn_id));
1422 acc->connectid.connection_id =
1423 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1424 be16_to_cpu(rqst->connect_cmd.qid)));
1428 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1429 struct nvmet_fc_ls_iod *iod)
1431 struct fcnvme_ls_disconnect_rqst *rqst =
1432 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1433 struct fcnvme_ls_disconnect_acc *acc =
1434 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1435 struct nvmet_fc_tgt_queue *queue = NULL;
1436 struct nvmet_fc_tgt_assoc *assoc;
1438 bool del_assoc = false;
1440 memset(acc, 0, sizeof(*acc));
1442 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1443 ret = VERR_DISCONN_LEN;
1444 else if (rqst->desc_list_len !=
1446 sizeof(struct fcnvme_ls_disconnect_rqst)))
1447 ret = VERR_DISCONN_RQST_LEN;
1448 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1449 ret = VERR_ASSOC_ID;
1450 else if (rqst->associd.desc_len !=
1452 sizeof(struct fcnvme_lsdesc_assoc_id)))
1453 ret = VERR_ASSOC_ID_LEN;
1454 else if (rqst->discon_cmd.desc_tag !=
1455 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1456 ret = VERR_DISCONN_CMD;
1457 else if (rqst->discon_cmd.desc_len !=
1459 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1460 ret = VERR_DISCONN_CMD_LEN;
1461 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1462 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1463 ret = VERR_DISCONN_SCOPE;
1465 /* match an active association */
1466 assoc = nvmet_fc_find_target_assoc(tgtport,
1467 be64_to_cpu(rqst->associd.association_id));
1470 if (rqst->discon_cmd.scope ==
1471 FCNVME_DISCONN_CONNECTION) {
1472 queue = nvmet_fc_find_target_queue(tgtport,
1474 rqst->discon_cmd.id));
1476 nvmet_fc_tgt_a_put(assoc);
1481 ret = VERR_NO_ASSOC;
1485 dev_err(tgtport->dev,
1486 "Disconnect LS failed: %s\n",
1487 validation_errors[ret]);
1488 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1489 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1490 (ret == VERR_NO_ASSOC) ?
1491 FCNVME_RJT_RC_INV_ASSOC :
1492 (ret == VERR_NO_CONN) ?
1493 FCNVME_RJT_RC_INV_CONN :
1494 FCNVME_RJT_RC_LOGIC,
1495 FCNVME_RJT_EXP_NONE, 0);
1499 /* format a response */
1501 iod->lsreq->rsplen = sizeof(*acc);
1503 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1505 sizeof(struct fcnvme_ls_disconnect_acc)),
1506 FCNVME_LS_DISCONNECT);
1509 /* are we to delete a Connection ID (queue) */
1511 int qid = queue->qid;
1513 nvmet_fc_delete_target_queue(queue);
1515 /* release the get taken by find_target_queue */
1516 nvmet_fc_tgt_q_put(queue);
1518 /* tear association down if io queue terminated */
1523 /* release get taken in nvmet_fc_find_target_assoc */
1524 nvmet_fc_tgt_a_put(iod->assoc);
1527 nvmet_fc_delete_target_assoc(iod->assoc);
1531 /* *********************** NVME Ctrl Routines **************************** */
1534 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1536 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1539 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1541 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1542 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1544 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1545 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1546 nvmet_fc_free_ls_iod(tgtport, iod);
1547 nvmet_fc_tgtport_put(tgtport);
1551 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1552 struct nvmet_fc_ls_iod *iod)
1556 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1557 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1559 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1561 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1565 * Actual processing routine for received FC-NVME LS Requests from the LLD
1568 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1569 struct nvmet_fc_ls_iod *iod)
1571 struct fcnvme_ls_rqst_w0 *w0 =
1572 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1574 iod->lsreq->nvmet_fc_private = iod;
1575 iod->lsreq->rspbuf = iod->rspbuf;
1576 iod->lsreq->rspdma = iod->rspdma;
1577 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1578 /* Be preventative. handlers will later set to valid length */
1579 iod->lsreq->rsplen = 0;
1585 * parse request input, execute the request, and format the
1588 switch (w0->ls_cmd) {
1589 case FCNVME_LS_CREATE_ASSOCIATION:
1590 /* Creates Association and initial Admin Queue/Connection */
1591 nvmet_fc_ls_create_association(tgtport, iod);
1593 case FCNVME_LS_CREATE_CONNECTION:
1594 /* Creates an IO Queue/Connection */
1595 nvmet_fc_ls_create_connection(tgtport, iod);
1597 case FCNVME_LS_DISCONNECT:
1598 /* Terminate a Queue/Connection or the Association */
1599 nvmet_fc_ls_disconnect(tgtport, iod);
1602 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1603 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1604 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1607 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1611 * Actual processing routine for received FC-NVME LS Requests from the LLD
1614 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1616 struct nvmet_fc_ls_iod *iod =
1617 container_of(work, struct nvmet_fc_ls_iod, work);
1618 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1620 nvmet_fc_handle_ls_rqst(tgtport, iod);
1625 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1626 * upon the reception of a NVME LS request.
1628 * The nvmet-fc layer will copy payload to an internal structure for
1629 * processing. As such, upon completion of the routine, the LLDD may
1630 * immediately free/reuse the LS request buffer passed in the call.
1632 * If this routine returns error, the LLDD should abort the exchange.
1634 * @tgtport: pointer to the (registered) target port the LS was
1636 * @lsreq: pointer to a lsreq request structure to be used to reference
1637 * the exchange corresponding to the LS.
1638 * @lsreqbuf: pointer to the buffer containing the LS Request
1639 * @lsreqbuf_len: length, in bytes, of the received LS request
1642 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1643 struct nvmefc_tgt_ls_req *lsreq,
1644 void *lsreqbuf, u32 lsreqbuf_len)
1646 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1647 struct nvmet_fc_ls_iod *iod;
1649 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1652 if (!nvmet_fc_tgtport_get(tgtport))
1655 iod = nvmet_fc_alloc_ls_iod(tgtport);
1657 nvmet_fc_tgtport_put(tgtport);
1663 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1664 iod->rqstdatalen = lsreqbuf_len;
1666 schedule_work(&iod->work);
1670 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1674 * **********************
1675 * Start of FCP handling
1676 * **********************
1680 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1682 struct scatterlist *sg;
1685 u32 page_len, length;
1688 length = fod->total_length;
1689 nent = DIV_ROUND_UP(length, PAGE_SIZE);
1690 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1694 sg_init_table(sg, nent);
1697 page_len = min_t(u32, length, PAGE_SIZE);
1699 page = alloc_page(GFP_KERNEL);
1701 goto out_free_pages;
1703 sg_set_page(&sg[i], page, page_len, 0);
1709 fod->data_sg_cnt = nent;
1710 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1711 ((fod->io_dir == NVMET_FCP_WRITE) ?
1712 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1713 /* note: write from initiator perspective */
1720 __free_page(sg_page(&sg[i]));
1723 fod->data_sg = NULL;
1724 fod->data_sg_cnt = 0;
1726 return NVME_SC_INTERNAL;
1730 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1732 struct scatterlist *sg;
1735 if (!fod->data_sg || !fod->data_sg_cnt)
1738 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1739 ((fod->io_dir == NVMET_FCP_WRITE) ?
1740 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1741 for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1742 __free_page(sg_page(sg));
1743 kfree(fod->data_sg);
1744 fod->data_sg = NULL;
1745 fod->data_sg_cnt = 0;
1750 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1754 /* egad, this is ugly. And sqtail is just a best guess */
1755 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1757 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1758 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1763 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1766 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1767 struct nvmet_fc_fcp_iod *fod)
1769 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1770 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1771 struct nvme_completion *cqe = &ersp->cqe;
1772 u32 *cqewd = (u32 *)cqe;
1773 bool send_ersp = false;
1774 u32 rsn, rspcnt, xfr_length;
1776 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1777 xfr_length = fod->total_length;
1779 xfr_length = fod->offset;
1782 * check to see if we can send a 0's rsp.
1783 * Note: to send a 0's response, the NVME-FC host transport will
1784 * recreate the CQE. The host transport knows: sq id, SQHD (last
1785 * seen in an ersp), and command_id. Thus it will create a
1786 * zero-filled CQE with those known fields filled in. Transport
1787 * must send an ersp for any condition where the cqe won't match
1790 * Here are the FC-NVME mandated cases where we must send an ersp:
1791 * every N responses, where N=ersp_ratio
1792 * force fabric commands to send ersp's (not in FC-NVME but good
1794 * normal cmds: any time status is non-zero, or status is zero
1795 * but words 0 or 1 are non-zero.
1796 * the SQ is 90% or more full
1797 * the cmd is a fused command
1798 * transferred data length not equal to cmd iu length
1800 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1801 if (!(rspcnt % fod->queue->ersp_ratio) ||
1802 sqe->opcode == nvme_fabrics_command ||
1803 xfr_length != fod->total_length ||
1804 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1805 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1806 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1809 /* re-set the fields */
1810 fod->fcpreq->rspaddr = ersp;
1811 fod->fcpreq->rspdma = fod->rspdma;
1814 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1815 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1817 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1818 rsn = atomic_inc_return(&fod->queue->rsn);
1819 ersp->rsn = cpu_to_be32(rsn);
1820 ersp->xfrd_len = cpu_to_be32(xfr_length);
1821 fod->fcpreq->rsplen = sizeof(*ersp);
1824 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1825 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1828 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1831 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1832 struct nvmet_fc_fcp_iod *fod)
1834 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1836 /* data no longer needed */
1837 nvmet_fc_free_tgt_pgs(fod);
1840 * if an ABTS was received or we issued the fcp_abort early
1841 * don't call abort routine again.
1843 /* no need to take lock - lock was taken earlier to get here */
1845 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1847 nvmet_fc_free_fcp_iod(fod->queue, fod);
1851 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1852 struct nvmet_fc_fcp_iod *fod)
1856 fod->fcpreq->op = NVMET_FCOP_RSP;
1857 fod->fcpreq->timeout = 0;
1859 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1861 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1863 nvmet_fc_abort_op(tgtport, fod);
1867 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1868 struct nvmet_fc_fcp_iod *fod, u8 op)
1870 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1871 unsigned long flags;
1876 fcpreq->offset = fod->offset;
1877 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1879 tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
1880 (fod->total_length - fod->offset));
1881 fcpreq->transfer_length = tlen;
1882 fcpreq->transferred_length = 0;
1883 fcpreq->fcp_error = 0;
1886 fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
1887 fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
1890 * If the last READDATA request: check if LLDD supports
1891 * combined xfr with response.
1893 if ((op == NVMET_FCOP_READDATA) &&
1894 ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1895 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1896 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1897 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1900 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1903 * should be ok to set w/o lock as its in the thread of
1904 * execution (not an async timer routine) and doesn't
1905 * contend with any clearing action
1909 if (op == NVMET_FCOP_WRITEDATA) {
1910 spin_lock_irqsave(&fod->flock, flags);
1911 fod->writedataactive = false;
1912 spin_unlock_irqrestore(&fod->flock, flags);
1913 nvmet_req_complete(&fod->req,
1914 NVME_SC_FC_TRANSPORT_ERROR);
1915 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1916 fcpreq->fcp_error = ret;
1917 fcpreq->transferred_length = 0;
1918 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1924 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1926 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1927 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1929 /* if in the middle of an io and we need to tear down */
1931 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1932 nvmet_req_complete(&fod->req,
1933 NVME_SC_FC_TRANSPORT_ERROR);
1937 nvmet_fc_abort_op(tgtport, fod);
1945 * actual done handler for FCP operations when completed by the lldd
1948 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1950 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1951 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1952 unsigned long flags;
1955 spin_lock_irqsave(&fod->flock, flags);
1957 fod->writedataactive = false;
1958 spin_unlock_irqrestore(&fod->flock, flags);
1960 switch (fcpreq->op) {
1962 case NVMET_FCOP_WRITEDATA:
1963 if (__nvmet_fc_fod_op_abort(fod, abort))
1965 if (fcpreq->fcp_error ||
1966 fcpreq->transferred_length != fcpreq->transfer_length) {
1967 spin_lock(&fod->flock);
1969 spin_unlock(&fod->flock);
1971 nvmet_req_complete(&fod->req,
1972 NVME_SC_FC_TRANSPORT_ERROR);
1976 fod->offset += fcpreq->transferred_length;
1977 if (fod->offset != fod->total_length) {
1978 spin_lock_irqsave(&fod->flock, flags);
1979 fod->writedataactive = true;
1980 spin_unlock_irqrestore(&fod->flock, flags);
1982 /* transfer the next chunk */
1983 nvmet_fc_transfer_fcp_data(tgtport, fod,
1984 NVMET_FCOP_WRITEDATA);
1988 /* data transfer complete, resume with nvmet layer */
1990 fod->req.execute(&fod->req);
1994 case NVMET_FCOP_READDATA:
1995 case NVMET_FCOP_READDATA_RSP:
1996 if (__nvmet_fc_fod_op_abort(fod, abort))
1998 if (fcpreq->fcp_error ||
1999 fcpreq->transferred_length != fcpreq->transfer_length) {
2000 nvmet_fc_abort_op(tgtport, fod);
2006 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2007 /* data no longer needed */
2008 nvmet_fc_free_tgt_pgs(fod);
2009 nvmet_fc_free_fcp_iod(fod->queue, fod);
2013 fod->offset += fcpreq->transferred_length;
2014 if (fod->offset != fod->total_length) {
2015 /* transfer the next chunk */
2016 nvmet_fc_transfer_fcp_data(tgtport, fod,
2017 NVMET_FCOP_READDATA);
2021 /* data transfer complete, send response */
2023 /* data no longer needed */
2024 nvmet_fc_free_tgt_pgs(fod);
2026 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2030 case NVMET_FCOP_RSP:
2031 if (__nvmet_fc_fod_op_abort(fod, abort))
2033 nvmet_fc_free_fcp_iod(fod->queue, fod);
2042 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
2044 struct nvmet_fc_fcp_iod *fod =
2045 container_of(work, struct nvmet_fc_fcp_iod, done_work);
2047 nvmet_fc_fod_op_done(fod);
2051 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2053 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2054 struct nvmet_fc_tgt_queue *queue = fod->queue;
2056 if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
2057 /* context switch so completion is not in ISR context */
2058 queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
2060 nvmet_fc_fod_op_done(fod);
2064 * actual completion handler after execution by the nvmet layer
2067 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2068 struct nvmet_fc_fcp_iod *fod, int status)
2070 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2071 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2072 unsigned long flags;
2075 spin_lock_irqsave(&fod->flock, flags);
2077 spin_unlock_irqrestore(&fod->flock, flags);
2079 /* if we have a CQE, snoop the last sq_head value */
2081 fod->queue->sqhd = cqe->sq_head;
2084 nvmet_fc_abort_op(tgtport, fod);
2088 /* if an error handling the cmd post initial parsing */
2090 /* fudge up a failed CQE status for our transport error */
2091 memset(cqe, 0, sizeof(*cqe));
2092 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2093 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2094 cqe->command_id = sqe->command_id;
2095 cqe->status = cpu_to_le16(status);
2099 * try to push the data even if the SQE status is non-zero.
2100 * There may be a status where data still was intended to
2103 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2104 /* push the data over before sending rsp */
2105 nvmet_fc_transfer_fcp_data(tgtport, fod,
2106 NVMET_FCOP_READDATA);
2110 /* writes & no data - fall thru */
2113 /* data no longer needed */
2114 nvmet_fc_free_tgt_pgs(fod);
2116 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2121 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2123 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2124 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2126 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2131 * Actual processing routine for received FC-NVME LS Requests from the LLD
2134 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2135 struct nvmet_fc_fcp_iod *fod)
2137 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2141 * Fused commands are currently not supported in the linux
2144 * As such, the implementation of the FC transport does not
2145 * look at the fused commands and order delivery to the upper
2146 * layer until we have both based on csn.
2149 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2151 fod->total_length = be32_to_cpu(cmdiu->data_len);
2152 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2153 fod->io_dir = NVMET_FCP_WRITE;
2154 if (!nvme_is_write(&cmdiu->sqe))
2155 goto transport_error;
2156 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2157 fod->io_dir = NVMET_FCP_READ;
2158 if (nvme_is_write(&cmdiu->sqe))
2159 goto transport_error;
2161 fod->io_dir = NVMET_FCP_NODATA;
2162 if (fod->total_length)
2163 goto transport_error;
2166 fod->req.cmd = &fod->cmdiubuf.sqe;
2167 fod->req.rsp = &fod->rspiubuf.cqe;
2168 fod->req.port = fod->queue->port;
2170 /* ensure nvmet handlers will set cmd handler callback */
2171 fod->req.execute = NULL;
2173 /* clear any response payload */
2174 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2176 fod->data_sg = NULL;
2177 fod->data_sg_cnt = 0;
2179 ret = nvmet_req_init(&fod->req,
2180 &fod->queue->nvme_cq,
2181 &fod->queue->nvme_sq,
2182 &nvmet_fc_tgt_fcp_ops);
2184 /* bad SQE content or invalid ctrl state */
2185 /* nvmet layer has already called op done to send rsp. */
2189 /* keep a running counter of tail position */
2190 atomic_inc(&fod->queue->sqtail);
2192 if (fod->total_length) {
2193 ret = nvmet_fc_alloc_tgt_pgs(fod);
2195 nvmet_req_complete(&fod->req, ret);
2199 fod->req.sg = fod->data_sg;
2200 fod->req.sg_cnt = fod->data_sg_cnt;
2203 if (fod->io_dir == NVMET_FCP_WRITE) {
2204 /* pull the data over before invoking nvmet layer */
2205 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2212 * can invoke the nvmet_layer now. If read data, cmd completion will
2216 fod->req.execute(&fod->req);
2221 nvmet_fc_abort_op(tgtport, fod);
2225 * Actual processing routine for received FC-NVME LS Requests from the LLD
2228 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2230 struct nvmet_fc_fcp_iod *fod =
2231 container_of(work, struct nvmet_fc_fcp_iod, work);
2232 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2234 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2238 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2239 * upon the reception of a NVME FCP CMD IU.
2241 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2242 * layer for processing.
2244 * The nvmet_fc layer allocates a local job structure (struct
2245 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2246 * CMD IU buffer to the job structure. As such, on a successful
2247 * completion (returns 0), the LLDD may immediately free/reuse
2248 * the CMD IU buffer passed in the call.
2250 * However, in some circumstances, due to the packetized nature of FC
2251 * and the api of the FC LLDD which may issue a hw command to send the
2252 * response, but the LLDD may not get the hw completion for that command
2253 * and upcall the nvmet_fc layer before a new command may be
2254 * asynchronously received - its possible for a command to be received
2255 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2256 * the appearance of more commands received than fits in the sq.
2257 * To alleviate this scenario, a temporary queue is maintained in the
2258 * transport for pending LLDD requests waiting for a queue job structure.
2259 * In these "overrun" cases, a temporary queue element is allocated
2260 * the LLDD request and CMD iu buffer information remembered, and the
2261 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2262 * structure is freed, it is immediately reallocated for anything on the
2263 * pending request list. The LLDDs defer_rcv() callback is called,
2264 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2265 * is then started normally with the transport.
2267 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2268 * the completion as successful but must not reuse the CMD IU buffer
2269 * until the LLDD's defer_rcv() callback has been called for the
2270 * corresponding struct nvmefc_tgt_fcp_req pointer.
2272 * If there is any other condition in which an error occurs, the
2273 * transport will return a non-zero status indicating the error.
2274 * In all cases other than -EOVERFLOW, the transport has not accepted the
2275 * request and the LLDD should abort the exchange.
2277 * @target_port: pointer to the (registered) target port the FCP CMD IU
2279 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2280 * the exchange corresponding to the FCP Exchange.
2281 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2282 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2285 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2286 struct nvmefc_tgt_fcp_req *fcpreq,
2287 void *cmdiubuf, u32 cmdiubuf_len)
2289 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2290 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2291 struct nvmet_fc_tgt_queue *queue;
2292 struct nvmet_fc_fcp_iod *fod;
2293 struct nvmet_fc_defer_fcp_req *deferfcp;
2294 unsigned long flags;
2296 /* validate iu, so the connection id can be used to find the queue */
2297 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2298 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2299 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2300 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2303 queue = nvmet_fc_find_target_queue(tgtport,
2304 be64_to_cpu(cmdiu->connection_id));
2309 * note: reference taken by find_target_queue
2310 * After successful fod allocation, the fod will inherit the
2311 * ownership of that reference and will remove the reference
2312 * when the fod is freed.
2315 spin_lock_irqsave(&queue->qlock, flags);
2317 fod = nvmet_fc_alloc_fcp_iod(queue);
2319 spin_unlock_irqrestore(&queue->qlock, flags);
2321 fcpreq->nvmet_fc_private = fod;
2322 fod->fcpreq = fcpreq;
2324 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2326 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2331 if (!tgtport->ops->defer_rcv) {
2332 spin_unlock_irqrestore(&queue->qlock, flags);
2333 /* release the queue lookup reference */
2334 nvmet_fc_tgt_q_put(queue);
2338 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2339 struct nvmet_fc_defer_fcp_req, req_list);
2341 /* Just re-use one that was previously allocated */
2342 list_del(&deferfcp->req_list);
2344 spin_unlock_irqrestore(&queue->qlock, flags);
2346 /* Now we need to dynamically allocate one */
2347 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2349 /* release the queue lookup reference */
2350 nvmet_fc_tgt_q_put(queue);
2353 spin_lock_irqsave(&queue->qlock, flags);
2356 /* For now, use rspaddr / rsplen to save payload information */
2357 fcpreq->rspaddr = cmdiubuf;
2358 fcpreq->rsplen = cmdiubuf_len;
2359 deferfcp->fcp_req = fcpreq;
2361 /* defer processing till a fod becomes available */
2362 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2364 /* NOTE: the queue lookup reference is still valid */
2366 spin_unlock_irqrestore(&queue->qlock, flags);
2370 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2373 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2374 * upon the reception of an ABTS for a FCP command
2376 * Notify the transport that an ABTS has been received for a FCP command
2377 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2378 * LLDD believes the command is still being worked on
2379 * (template_ops->fcp_req_release() has not been called).
2381 * The transport will wait for any outstanding work (an op to the LLDD,
2382 * which the lldd should complete with error due to the ABTS; or the
2383 * completion from the nvmet layer of the nvme command), then will
2384 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2385 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2386 * to the ABTS either after return from this function (assuming any
2387 * outstanding op work has been terminated) or upon the callback being
2390 * @target_port: pointer to the (registered) target port the FCP CMD IU
2392 * @fcpreq: pointer to the fcpreq request structure that corresponds
2393 * to the exchange that received the ABTS.
2396 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2397 struct nvmefc_tgt_fcp_req *fcpreq)
2399 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2400 struct nvmet_fc_tgt_queue *queue;
2401 unsigned long flags;
2403 if (!fod || fod->fcpreq != fcpreq)
2404 /* job appears to have already completed, ignore abort */
2409 spin_lock_irqsave(&queue->qlock, flags);
2412 * mark as abort. The abort handler, invoked upon completion
2413 * of any work, will detect the aborted status and do the
2416 spin_lock(&fod->flock);
2418 fod->aborted = true;
2419 spin_unlock(&fod->flock);
2421 spin_unlock_irqrestore(&queue->qlock, flags);
2423 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2426 struct nvmet_fc_traddr {
2432 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2436 if (match_u64(sstr, &token64))
2444 * This routine validates and extracts the WWN's from the TRADDR string.
2445 * As kernel parsers need the 0x to determine number base, universally
2446 * build string to parse with 0x prefix before parsing name strings.
2449 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2451 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2452 substring_t wwn = { name, &name[sizeof(name)-1] };
2453 int nnoffset, pnoffset;
2455 /* validate it string one of the 2 allowed formats */
2456 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2457 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2458 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2459 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2460 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2461 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2462 NVME_FC_TRADDR_OXNNLEN;
2463 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2464 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2465 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2466 "pn-", NVME_FC_TRADDR_NNLEN))) {
2467 nnoffset = NVME_FC_TRADDR_NNLEN;
2468 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2474 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2476 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2477 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2480 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2481 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2487 pr_warn("%s: bad traddr string\n", __func__);
2492 nvmet_fc_add_port(struct nvmet_port *port)
2494 struct nvmet_fc_tgtport *tgtport;
2495 struct nvmet_fc_traddr traddr = { 0L, 0L };
2496 unsigned long flags;
2499 /* validate the address info */
2500 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2501 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2504 /* map the traddr address info to a target port */
2506 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2507 sizeof(port->disc_addr.traddr));
2512 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2513 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2514 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2515 (tgtport->fc_target_port.port_name == traddr.pn)) {
2516 /* a FC port can only be 1 nvmet port id */
2517 if (!tgtport->port) {
2518 tgtport->port = port;
2519 port->priv = tgtport;
2520 nvmet_fc_tgtport_get(tgtport);
2527 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2532 nvmet_fc_remove_port(struct nvmet_port *port)
2534 struct nvmet_fc_tgtport *tgtport = port->priv;
2535 unsigned long flags;
2537 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2538 if (tgtport->port == port) {
2539 nvmet_fc_tgtport_put(tgtport);
2540 tgtport->port = NULL;
2542 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2545 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2546 .owner = THIS_MODULE,
2547 .type = NVMF_TRTYPE_FC,
2549 .add_port = nvmet_fc_add_port,
2550 .remove_port = nvmet_fc_remove_port,
2551 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2552 .delete_ctrl = nvmet_fc_delete_ctrl,
2555 static int __init nvmet_fc_init_module(void)
2557 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2560 static void __exit nvmet_fc_exit_module(void)
2562 /* sanity check - all lports should be removed */
2563 if (!list_empty(&nvmet_fc_target_list))
2564 pr_warn("%s: targetport list not empty\n", __func__);
2566 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2568 ida_destroy(&nvmet_fc_tgtport_cnt);
2571 module_init(nvmet_fc_init_module);
2572 module_exit(nvmet_fc_exit_module);
2574 MODULE_LICENSE("GPL v2");