Merge remote-tracking branches 'regmap/topic/const' and 'regmap/topic/hwspinlock...
[sfrench/cifs-2.6.git] / drivers / nvme / target / fc.c
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  *
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
25
26 #include "nvmet.h"
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
29
30
31 /* *************************** Data Structures/Defines ****************** */
32
33
34 #define NVMET_LS_CTX_COUNT              4
35
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE              2048
38
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
41
42 struct nvmet_fc_ls_iod {
43         struct nvmefc_tgt_ls_req        *lsreq;
44         struct nvmefc_tgt_fcp_req       *fcpreq;        /* only if RS */
45
46         struct list_head                ls_list;        /* tgtport->ls_list */
47
48         struct nvmet_fc_tgtport         *tgtport;
49         struct nvmet_fc_tgt_assoc       *assoc;
50
51         u8                              *rqstbuf;
52         u8                              *rspbuf;
53         u16                             rqstdatalen;
54         dma_addr_t                      rspdma;
55
56         struct scatterlist              sg[2];
57
58         struct work_struct              work;
59 } __aligned(sizeof(unsigned long long));
60
61 #define NVMET_FC_MAX_SEQ_LENGTH         (256 * 1024)
62 #define NVMET_FC_MAX_XFR_SGENTS         (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
63
64 enum nvmet_fcp_datadir {
65         NVMET_FCP_NODATA,
66         NVMET_FCP_WRITE,
67         NVMET_FCP_READ,
68         NVMET_FCP_ABORTED,
69 };
70
71 struct nvmet_fc_fcp_iod {
72         struct nvmefc_tgt_fcp_req       *fcpreq;
73
74         struct nvme_fc_cmd_iu           cmdiubuf;
75         struct nvme_fc_ersp_iu          rspiubuf;
76         dma_addr_t                      rspdma;
77         struct scatterlist              *data_sg;
78         int                             data_sg_cnt;
79         u32                             total_length;
80         u32                             offset;
81         enum nvmet_fcp_datadir          io_dir;
82         bool                            active;
83         bool                            abort;
84         bool                            aborted;
85         bool                            writedataactive;
86         spinlock_t                      flock;
87
88         struct nvmet_req                req;
89         struct work_struct              work;
90         struct work_struct              done_work;
91
92         struct nvmet_fc_tgtport         *tgtport;
93         struct nvmet_fc_tgt_queue       *queue;
94
95         struct list_head                fcp_list;       /* tgtport->fcp_list */
96 };
97
98 struct nvmet_fc_tgtport {
99
100         struct nvmet_fc_target_port     fc_target_port;
101
102         struct list_head                tgt_list; /* nvmet_fc_target_list */
103         struct device                   *dev;   /* dev for dma mapping */
104         struct nvmet_fc_target_template *ops;
105
106         struct nvmet_fc_ls_iod          *iod;
107         spinlock_t                      lock;
108         struct list_head                ls_list;
109         struct list_head                ls_busylist;
110         struct list_head                assoc_list;
111         struct ida                      assoc_cnt;
112         struct nvmet_port               *port;
113         struct kref                     ref;
114         u32                             max_sg_cnt;
115 };
116
117 struct nvmet_fc_defer_fcp_req {
118         struct list_head                req_list;
119         struct nvmefc_tgt_fcp_req       *fcp_req;
120 };
121
122 struct nvmet_fc_tgt_queue {
123         bool                            ninetypercent;
124         u16                             qid;
125         u16                             sqsize;
126         u16                             ersp_ratio;
127         __le16                          sqhd;
128         int                             cpu;
129         atomic_t                        connected;
130         atomic_t                        sqtail;
131         atomic_t                        zrspcnt;
132         atomic_t                        rsn;
133         spinlock_t                      qlock;
134         struct nvmet_port               *port;
135         struct nvmet_cq                 nvme_cq;
136         struct nvmet_sq                 nvme_sq;
137         struct nvmet_fc_tgt_assoc       *assoc;
138         struct nvmet_fc_fcp_iod         *fod;           /* array of fcp_iods */
139         struct list_head                fod_list;
140         struct list_head                pending_cmd_list;
141         struct list_head                avail_defer_list;
142         struct workqueue_struct         *work_q;
143         struct kref                     ref;
144 } __aligned(sizeof(unsigned long long));
145
146 struct nvmet_fc_tgt_assoc {
147         u64                             association_id;
148         u32                             a_id;
149         struct nvmet_fc_tgtport         *tgtport;
150         struct list_head                a_list;
151         struct nvmet_fc_tgt_queue       *queues[NVMET_NR_QUEUES + 1];
152         struct kref                     ref;
153 };
154
155
156 static inline int
157 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
158 {
159         return (iodptr - iodptr->tgtport->iod);
160 }
161
162 static inline int
163 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
164 {
165         return (fodptr - fodptr->queue->fod);
166 }
167
168
169 /*
170  * Association and Connection IDs:
171  *
172  * Association ID will have random number in upper 6 bytes and zero
173  *   in lower 2 bytes
174  *
175  * Connection IDs will be Association ID with QID or'd in lower 2 bytes
176  *
177  * note: Association ID = Connection ID for queue 0
178  */
179 #define BYTES_FOR_QID                   sizeof(u16)
180 #define BYTES_FOR_QID_SHIFT             (BYTES_FOR_QID * 8)
181 #define NVMET_FC_QUEUEID_MASK           ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
182
183 static inline u64
184 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
185 {
186         return (assoc->association_id | qid);
187 }
188
189 static inline u64
190 nvmet_fc_getassociationid(u64 connectionid)
191 {
192         return connectionid & ~NVMET_FC_QUEUEID_MASK;
193 }
194
195 static inline u16
196 nvmet_fc_getqueueid(u64 connectionid)
197 {
198         return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
199 }
200
201 static inline struct nvmet_fc_tgtport *
202 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
203 {
204         return container_of(targetport, struct nvmet_fc_tgtport,
205                                  fc_target_port);
206 }
207
208 static inline struct nvmet_fc_fcp_iod *
209 nvmet_req_to_fod(struct nvmet_req *nvme_req)
210 {
211         return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
212 }
213
214
215 /* *************************** Globals **************************** */
216
217
218 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
219
220 static LIST_HEAD(nvmet_fc_target_list);
221 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
222
223
224 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
225 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
226 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
227 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
228 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
229 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
230 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
231 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
232 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
233 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
234                                         struct nvmet_fc_fcp_iod *fod);
235
236
237 /* *********************** FC-NVME DMA Handling **************************** */
238
239 /*
240  * The fcloop device passes in a NULL device pointer. Real LLD's will
241  * pass in a valid device pointer. If NULL is passed to the dma mapping
242  * routines, depending on the platform, it may or may not succeed, and
243  * may crash.
244  *
245  * As such:
246  * Wrapper all the dma routines and check the dev pointer.
247  *
248  * If simple mappings (return just a dma address, we'll noop them,
249  * returning a dma address of 0.
250  *
251  * On more complex mappings (dma_map_sg), a pseudo routine fills
252  * in the scatter list, setting all dma addresses to 0.
253  */
254
255 static inline dma_addr_t
256 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
257                 enum dma_data_direction dir)
258 {
259         return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
260 }
261
262 static inline int
263 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
264 {
265         return dev ? dma_mapping_error(dev, dma_addr) : 0;
266 }
267
268 static inline void
269 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
270         enum dma_data_direction dir)
271 {
272         if (dev)
273                 dma_unmap_single(dev, addr, size, dir);
274 }
275
276 static inline void
277 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
278                 enum dma_data_direction dir)
279 {
280         if (dev)
281                 dma_sync_single_for_cpu(dev, addr, size, dir);
282 }
283
284 static inline void
285 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
286                 enum dma_data_direction dir)
287 {
288         if (dev)
289                 dma_sync_single_for_device(dev, addr, size, dir);
290 }
291
292 /* pseudo dma_map_sg call */
293 static int
294 fc_map_sg(struct scatterlist *sg, int nents)
295 {
296         struct scatterlist *s;
297         int i;
298
299         WARN_ON(nents == 0 || sg[0].length == 0);
300
301         for_each_sg(sg, s, nents, i) {
302                 s->dma_address = 0L;
303 #ifdef CONFIG_NEED_SG_DMA_LENGTH
304                 s->dma_length = s->length;
305 #endif
306         }
307         return nents;
308 }
309
310 static inline int
311 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
312                 enum dma_data_direction dir)
313 {
314         return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
315 }
316
317 static inline void
318 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
319                 enum dma_data_direction dir)
320 {
321         if (dev)
322                 dma_unmap_sg(dev, sg, nents, dir);
323 }
324
325
326 /* *********************** FC-NVME Port Management ************************ */
327
328
329 static int
330 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
331 {
332         struct nvmet_fc_ls_iod *iod;
333         int i;
334
335         iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
336                         GFP_KERNEL);
337         if (!iod)
338                 return -ENOMEM;
339
340         tgtport->iod = iod;
341
342         for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
343                 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
344                 iod->tgtport = tgtport;
345                 list_add_tail(&iod->ls_list, &tgtport->ls_list);
346
347                 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
348                         GFP_KERNEL);
349                 if (!iod->rqstbuf)
350                         goto out_fail;
351
352                 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
353
354                 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
355                                                 NVME_FC_MAX_LS_BUFFER_SIZE,
356                                                 DMA_TO_DEVICE);
357                 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
358                         goto out_fail;
359         }
360
361         return 0;
362
363 out_fail:
364         kfree(iod->rqstbuf);
365         list_del(&iod->ls_list);
366         for (iod--, i--; i >= 0; iod--, i--) {
367                 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
368                                 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
369                 kfree(iod->rqstbuf);
370                 list_del(&iod->ls_list);
371         }
372
373         kfree(iod);
374
375         return -EFAULT;
376 }
377
378 static void
379 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
380 {
381         struct nvmet_fc_ls_iod *iod = tgtport->iod;
382         int i;
383
384         for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
385                 fc_dma_unmap_single(tgtport->dev,
386                                 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
387                                 DMA_TO_DEVICE);
388                 kfree(iod->rqstbuf);
389                 list_del(&iod->ls_list);
390         }
391         kfree(tgtport->iod);
392 }
393
394 static struct nvmet_fc_ls_iod *
395 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
396 {
397         struct nvmet_fc_ls_iod *iod;
398         unsigned long flags;
399
400         spin_lock_irqsave(&tgtport->lock, flags);
401         iod = list_first_entry_or_null(&tgtport->ls_list,
402                                         struct nvmet_fc_ls_iod, ls_list);
403         if (iod)
404                 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
405         spin_unlock_irqrestore(&tgtport->lock, flags);
406         return iod;
407 }
408
409
410 static void
411 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
412                         struct nvmet_fc_ls_iod *iod)
413 {
414         unsigned long flags;
415
416         spin_lock_irqsave(&tgtport->lock, flags);
417         list_move(&iod->ls_list, &tgtport->ls_list);
418         spin_unlock_irqrestore(&tgtport->lock, flags);
419 }
420
421 static void
422 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
423                                 struct nvmet_fc_tgt_queue *queue)
424 {
425         struct nvmet_fc_fcp_iod *fod = queue->fod;
426         int i;
427
428         for (i = 0; i < queue->sqsize; fod++, i++) {
429                 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
430                 INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
431                 fod->tgtport = tgtport;
432                 fod->queue = queue;
433                 fod->active = false;
434                 fod->abort = false;
435                 fod->aborted = false;
436                 fod->fcpreq = NULL;
437                 list_add_tail(&fod->fcp_list, &queue->fod_list);
438                 spin_lock_init(&fod->flock);
439
440                 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
441                                         sizeof(fod->rspiubuf), DMA_TO_DEVICE);
442                 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
443                         list_del(&fod->fcp_list);
444                         for (fod--, i--; i >= 0; fod--, i--) {
445                                 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
446                                                 sizeof(fod->rspiubuf),
447                                                 DMA_TO_DEVICE);
448                                 fod->rspdma = 0L;
449                                 list_del(&fod->fcp_list);
450                         }
451
452                         return;
453                 }
454         }
455 }
456
457 static void
458 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
459                                 struct nvmet_fc_tgt_queue *queue)
460 {
461         struct nvmet_fc_fcp_iod *fod = queue->fod;
462         int i;
463
464         for (i = 0; i < queue->sqsize; fod++, i++) {
465                 if (fod->rspdma)
466                         fc_dma_unmap_single(tgtport->dev, fod->rspdma,
467                                 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
468         }
469 }
470
471 static struct nvmet_fc_fcp_iod *
472 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
473 {
474         struct nvmet_fc_fcp_iod *fod;
475
476         lockdep_assert_held(&queue->qlock);
477
478         fod = list_first_entry_or_null(&queue->fod_list,
479                                         struct nvmet_fc_fcp_iod, fcp_list);
480         if (fod) {
481                 list_del(&fod->fcp_list);
482                 fod->active = true;
483                 /*
484                  * no queue reference is taken, as it was taken by the
485                  * queue lookup just prior to the allocation. The iod
486                  * will "inherit" that reference.
487                  */
488         }
489         return fod;
490 }
491
492
493 static void
494 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
495                        struct nvmet_fc_tgt_queue *queue,
496                        struct nvmefc_tgt_fcp_req *fcpreq)
497 {
498         struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
499
500         /*
501          * put all admin cmds on hw queue id 0. All io commands go to
502          * the respective hw queue based on a modulo basis
503          */
504         fcpreq->hwqid = queue->qid ?
505                         ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
506
507         if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
508                 queue_work_on(queue->cpu, queue->work_q, &fod->work);
509         else
510                 nvmet_fc_handle_fcp_rqst(tgtport, fod);
511 }
512
513 static void
514 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
515                         struct nvmet_fc_fcp_iod *fod)
516 {
517         struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
518         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
519         struct nvmet_fc_defer_fcp_req *deferfcp;
520         unsigned long flags;
521
522         fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
523                                 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
524
525         fcpreq->nvmet_fc_private = NULL;
526
527         fod->active = false;
528         fod->abort = false;
529         fod->aborted = false;
530         fod->writedataactive = false;
531         fod->fcpreq = NULL;
532
533         tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
534
535         spin_lock_irqsave(&queue->qlock, flags);
536         deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
537                                 struct nvmet_fc_defer_fcp_req, req_list);
538         if (!deferfcp) {
539                 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
540                 spin_unlock_irqrestore(&queue->qlock, flags);
541
542                 /* Release reference taken at queue lookup and fod allocation */
543                 nvmet_fc_tgt_q_put(queue);
544                 return;
545         }
546
547         /* Re-use the fod for the next pending cmd that was deferred */
548         list_del(&deferfcp->req_list);
549
550         fcpreq = deferfcp->fcp_req;
551
552         /* deferfcp can be reused for another IO at a later date */
553         list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
554
555         spin_unlock_irqrestore(&queue->qlock, flags);
556
557         /* Save NVME CMD IO in fod */
558         memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
559
560         /* Setup new fcpreq to be processed */
561         fcpreq->rspaddr = NULL;
562         fcpreq->rsplen  = 0;
563         fcpreq->nvmet_fc_private = fod;
564         fod->fcpreq = fcpreq;
565         fod->active = true;
566
567         /* inform LLDD IO is now being processed */
568         tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
569
570         /* Submit deferred IO for processing */
571         nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
572
573         /*
574          * Leave the queue lookup get reference taken when
575          * fod was originally allocated.
576          */
577 }
578
579 static int
580 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
581 {
582         int cpu, idx, cnt;
583
584         if (tgtport->ops->max_hw_queues == 1)
585                 return WORK_CPU_UNBOUND;
586
587         /* Simple cpu selection based on qid modulo active cpu count */
588         idx = !qid ? 0 : (qid - 1) % num_active_cpus();
589
590         /* find the n'th active cpu */
591         for (cpu = 0, cnt = 0; ; ) {
592                 if (cpu_active(cpu)) {
593                         if (cnt == idx)
594                                 break;
595                         cnt++;
596                 }
597                 cpu = (cpu + 1) % num_possible_cpus();
598         }
599
600         return cpu;
601 }
602
603 static struct nvmet_fc_tgt_queue *
604 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
605                         u16 qid, u16 sqsize)
606 {
607         struct nvmet_fc_tgt_queue *queue;
608         unsigned long flags;
609         int ret;
610
611         if (qid > NVMET_NR_QUEUES)
612                 return NULL;
613
614         queue = kzalloc((sizeof(*queue) +
615                                 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
616                                 GFP_KERNEL);
617         if (!queue)
618                 return NULL;
619
620         if (!nvmet_fc_tgt_a_get(assoc))
621                 goto out_free_queue;
622
623         queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
624                                 assoc->tgtport->fc_target_port.port_num,
625                                 assoc->a_id, qid);
626         if (!queue->work_q)
627                 goto out_a_put;
628
629         queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
630         queue->qid = qid;
631         queue->sqsize = sqsize;
632         queue->assoc = assoc;
633         queue->port = assoc->tgtport->port;
634         queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
635         INIT_LIST_HEAD(&queue->fod_list);
636         INIT_LIST_HEAD(&queue->avail_defer_list);
637         INIT_LIST_HEAD(&queue->pending_cmd_list);
638         atomic_set(&queue->connected, 0);
639         atomic_set(&queue->sqtail, 0);
640         atomic_set(&queue->rsn, 1);
641         atomic_set(&queue->zrspcnt, 0);
642         spin_lock_init(&queue->qlock);
643         kref_init(&queue->ref);
644
645         nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
646
647         ret = nvmet_sq_init(&queue->nvme_sq);
648         if (ret)
649                 goto out_fail_iodlist;
650
651         WARN_ON(assoc->queues[qid]);
652         spin_lock_irqsave(&assoc->tgtport->lock, flags);
653         assoc->queues[qid] = queue;
654         spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
655
656         return queue;
657
658 out_fail_iodlist:
659         nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
660         destroy_workqueue(queue->work_q);
661 out_a_put:
662         nvmet_fc_tgt_a_put(assoc);
663 out_free_queue:
664         kfree(queue);
665         return NULL;
666 }
667
668
669 static void
670 nvmet_fc_tgt_queue_free(struct kref *ref)
671 {
672         struct nvmet_fc_tgt_queue *queue =
673                 container_of(ref, struct nvmet_fc_tgt_queue, ref);
674         unsigned long flags;
675
676         spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
677         queue->assoc->queues[queue->qid] = NULL;
678         spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
679
680         nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
681
682         nvmet_fc_tgt_a_put(queue->assoc);
683
684         destroy_workqueue(queue->work_q);
685
686         kfree(queue);
687 }
688
689 static void
690 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
691 {
692         kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
693 }
694
695 static int
696 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
697 {
698         return kref_get_unless_zero(&queue->ref);
699 }
700
701
702 static void
703 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
704 {
705         struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
706         struct nvmet_fc_fcp_iod *fod = queue->fod;
707         struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
708         unsigned long flags;
709         int i, writedataactive;
710         bool disconnect;
711
712         disconnect = atomic_xchg(&queue->connected, 0);
713
714         spin_lock_irqsave(&queue->qlock, flags);
715         /* about outstanding io's */
716         for (i = 0; i < queue->sqsize; fod++, i++) {
717                 if (fod->active) {
718                         spin_lock(&fod->flock);
719                         fod->abort = true;
720                         writedataactive = fod->writedataactive;
721                         spin_unlock(&fod->flock);
722                         /*
723                          * only call lldd abort routine if waiting for
724                          * writedata. other outstanding ops should finish
725                          * on their own.
726                          */
727                         if (writedataactive) {
728                                 spin_lock(&fod->flock);
729                                 fod->aborted = true;
730                                 spin_unlock(&fod->flock);
731                                 tgtport->ops->fcp_abort(
732                                         &tgtport->fc_target_port, fod->fcpreq);
733                         }
734                 }
735         }
736
737         /* Cleanup defer'ed IOs in queue */
738         list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
739                                 req_list) {
740                 list_del(&deferfcp->req_list);
741                 kfree(deferfcp);
742         }
743
744         for (;;) {
745                 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
746                                 struct nvmet_fc_defer_fcp_req, req_list);
747                 if (!deferfcp)
748                         break;
749
750                 list_del(&deferfcp->req_list);
751                 spin_unlock_irqrestore(&queue->qlock, flags);
752
753                 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
754                                 deferfcp->fcp_req);
755
756                 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
757                                 deferfcp->fcp_req);
758
759                 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
760                                 deferfcp->fcp_req);
761
762                 kfree(deferfcp);
763
764                 spin_lock_irqsave(&queue->qlock, flags);
765         }
766         spin_unlock_irqrestore(&queue->qlock, flags);
767
768         flush_workqueue(queue->work_q);
769
770         if (disconnect)
771                 nvmet_sq_destroy(&queue->nvme_sq);
772
773         nvmet_fc_tgt_q_put(queue);
774 }
775
776 static struct nvmet_fc_tgt_queue *
777 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
778                                 u64 connection_id)
779 {
780         struct nvmet_fc_tgt_assoc *assoc;
781         struct nvmet_fc_tgt_queue *queue;
782         u64 association_id = nvmet_fc_getassociationid(connection_id);
783         u16 qid = nvmet_fc_getqueueid(connection_id);
784         unsigned long flags;
785
786         if (qid > NVMET_NR_QUEUES)
787                 return NULL;
788
789         spin_lock_irqsave(&tgtport->lock, flags);
790         list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
791                 if (association_id == assoc->association_id) {
792                         queue = assoc->queues[qid];
793                         if (queue &&
794                             (!atomic_read(&queue->connected) ||
795                              !nvmet_fc_tgt_q_get(queue)))
796                                 queue = NULL;
797                         spin_unlock_irqrestore(&tgtport->lock, flags);
798                         return queue;
799                 }
800         }
801         spin_unlock_irqrestore(&tgtport->lock, flags);
802         return NULL;
803 }
804
805 static struct nvmet_fc_tgt_assoc *
806 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
807 {
808         struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
809         unsigned long flags;
810         u64 ran;
811         int idx;
812         bool needrandom = true;
813
814         assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
815         if (!assoc)
816                 return NULL;
817
818         idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
819         if (idx < 0)
820                 goto out_free_assoc;
821
822         if (!nvmet_fc_tgtport_get(tgtport))
823                 goto out_ida_put;
824
825         assoc->tgtport = tgtport;
826         assoc->a_id = idx;
827         INIT_LIST_HEAD(&assoc->a_list);
828         kref_init(&assoc->ref);
829
830         while (needrandom) {
831                 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
832                 ran = ran << BYTES_FOR_QID_SHIFT;
833
834                 spin_lock_irqsave(&tgtport->lock, flags);
835                 needrandom = false;
836                 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
837                         if (ran == tmpassoc->association_id) {
838                                 needrandom = true;
839                                 break;
840                         }
841                 if (!needrandom) {
842                         assoc->association_id = ran;
843                         list_add_tail(&assoc->a_list, &tgtport->assoc_list);
844                 }
845                 spin_unlock_irqrestore(&tgtport->lock, flags);
846         }
847
848         return assoc;
849
850 out_ida_put:
851         ida_simple_remove(&tgtport->assoc_cnt, idx);
852 out_free_assoc:
853         kfree(assoc);
854         return NULL;
855 }
856
857 static void
858 nvmet_fc_target_assoc_free(struct kref *ref)
859 {
860         struct nvmet_fc_tgt_assoc *assoc =
861                 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
862         struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
863         unsigned long flags;
864
865         spin_lock_irqsave(&tgtport->lock, flags);
866         list_del(&assoc->a_list);
867         spin_unlock_irqrestore(&tgtport->lock, flags);
868         ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
869         kfree(assoc);
870         nvmet_fc_tgtport_put(tgtport);
871 }
872
873 static void
874 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
875 {
876         kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
877 }
878
879 static int
880 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
881 {
882         return kref_get_unless_zero(&assoc->ref);
883 }
884
885 static void
886 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
887 {
888         struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
889         struct nvmet_fc_tgt_queue *queue;
890         unsigned long flags;
891         int i;
892
893         spin_lock_irqsave(&tgtport->lock, flags);
894         for (i = NVMET_NR_QUEUES; i >= 0; i--) {
895                 queue = assoc->queues[i];
896                 if (queue) {
897                         if (!nvmet_fc_tgt_q_get(queue))
898                                 continue;
899                         spin_unlock_irqrestore(&tgtport->lock, flags);
900                         nvmet_fc_delete_target_queue(queue);
901                         nvmet_fc_tgt_q_put(queue);
902                         spin_lock_irqsave(&tgtport->lock, flags);
903                 }
904         }
905         spin_unlock_irqrestore(&tgtport->lock, flags);
906
907         nvmet_fc_tgt_a_put(assoc);
908 }
909
910 static struct nvmet_fc_tgt_assoc *
911 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
912                                 u64 association_id)
913 {
914         struct nvmet_fc_tgt_assoc *assoc;
915         struct nvmet_fc_tgt_assoc *ret = NULL;
916         unsigned long flags;
917
918         spin_lock_irqsave(&tgtport->lock, flags);
919         list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
920                 if (association_id == assoc->association_id) {
921                         ret = assoc;
922                         nvmet_fc_tgt_a_get(assoc);
923                         break;
924                 }
925         }
926         spin_unlock_irqrestore(&tgtport->lock, flags);
927
928         return ret;
929 }
930
931
932 /**
933  * nvme_fc_register_targetport - transport entry point called by an
934  *                              LLDD to register the existence of a local
935  *                              NVME subystem FC port.
936  * @pinfo:     pointer to information about the port to be registered
937  * @template:  LLDD entrypoints and operational parameters for the port
938  * @dev:       physical hardware device node port corresponds to. Will be
939  *             used for DMA mappings
940  * @portptr:   pointer to a local port pointer. Upon success, the routine
941  *             will allocate a nvme_fc_local_port structure and place its
942  *             address in the local port pointer. Upon failure, local port
943  *             pointer will be set to NULL.
944  *
945  * Returns:
946  * a completion status. Must be 0 upon success; a negative errno
947  * (ex: -ENXIO) upon failure.
948  */
949 int
950 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
951                         struct nvmet_fc_target_template *template,
952                         struct device *dev,
953                         struct nvmet_fc_target_port **portptr)
954 {
955         struct nvmet_fc_tgtport *newrec;
956         unsigned long flags;
957         int ret, idx;
958
959         if (!template->xmt_ls_rsp || !template->fcp_op ||
960             !template->fcp_abort ||
961             !template->fcp_req_release || !template->targetport_delete ||
962             !template->max_hw_queues || !template->max_sgl_segments ||
963             !template->max_dif_sgl_segments || !template->dma_boundary) {
964                 ret = -EINVAL;
965                 goto out_regtgt_failed;
966         }
967
968         newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
969                          GFP_KERNEL);
970         if (!newrec) {
971                 ret = -ENOMEM;
972                 goto out_regtgt_failed;
973         }
974
975         idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
976         if (idx < 0) {
977                 ret = -ENOSPC;
978                 goto out_fail_kfree;
979         }
980
981         if (!get_device(dev) && dev) {
982                 ret = -ENODEV;
983                 goto out_ida_put;
984         }
985
986         newrec->fc_target_port.node_name = pinfo->node_name;
987         newrec->fc_target_port.port_name = pinfo->port_name;
988         newrec->fc_target_port.private = &newrec[1];
989         newrec->fc_target_port.port_id = pinfo->port_id;
990         newrec->fc_target_port.port_num = idx;
991         INIT_LIST_HEAD(&newrec->tgt_list);
992         newrec->dev = dev;
993         newrec->ops = template;
994         spin_lock_init(&newrec->lock);
995         INIT_LIST_HEAD(&newrec->ls_list);
996         INIT_LIST_HEAD(&newrec->ls_busylist);
997         INIT_LIST_HEAD(&newrec->assoc_list);
998         kref_init(&newrec->ref);
999         ida_init(&newrec->assoc_cnt);
1000         newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
1001                                         template->max_sgl_segments);
1002
1003         ret = nvmet_fc_alloc_ls_iodlist(newrec);
1004         if (ret) {
1005                 ret = -ENOMEM;
1006                 goto out_free_newrec;
1007         }
1008
1009         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1010         list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1011         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1012
1013         *portptr = &newrec->fc_target_port;
1014         return 0;
1015
1016 out_free_newrec:
1017         put_device(dev);
1018 out_ida_put:
1019         ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1020 out_fail_kfree:
1021         kfree(newrec);
1022 out_regtgt_failed:
1023         *portptr = NULL;
1024         return ret;
1025 }
1026 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1027
1028
1029 static void
1030 nvmet_fc_free_tgtport(struct kref *ref)
1031 {
1032         struct nvmet_fc_tgtport *tgtport =
1033                 container_of(ref, struct nvmet_fc_tgtport, ref);
1034         struct device *dev = tgtport->dev;
1035         unsigned long flags;
1036
1037         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1038         list_del(&tgtport->tgt_list);
1039         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1040
1041         nvmet_fc_free_ls_iodlist(tgtport);
1042
1043         /* let the LLDD know we've finished tearing it down */
1044         tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1045
1046         ida_simple_remove(&nvmet_fc_tgtport_cnt,
1047                         tgtport->fc_target_port.port_num);
1048
1049         ida_destroy(&tgtport->assoc_cnt);
1050
1051         kfree(tgtport);
1052
1053         put_device(dev);
1054 }
1055
1056 static void
1057 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1058 {
1059         kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1060 }
1061
1062 static int
1063 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1064 {
1065         return kref_get_unless_zero(&tgtport->ref);
1066 }
1067
1068 static void
1069 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1070 {
1071         struct nvmet_fc_tgt_assoc *assoc, *next;
1072         unsigned long flags;
1073
1074         spin_lock_irqsave(&tgtport->lock, flags);
1075         list_for_each_entry_safe(assoc, next,
1076                                 &tgtport->assoc_list, a_list) {
1077                 if (!nvmet_fc_tgt_a_get(assoc))
1078                         continue;
1079                 spin_unlock_irqrestore(&tgtport->lock, flags);
1080                 nvmet_fc_delete_target_assoc(assoc);
1081                 nvmet_fc_tgt_a_put(assoc);
1082                 spin_lock_irqsave(&tgtport->lock, flags);
1083         }
1084         spin_unlock_irqrestore(&tgtport->lock, flags);
1085 }
1086
1087 /*
1088  * nvmet layer has called to terminate an association
1089  */
1090 static void
1091 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1092 {
1093         struct nvmet_fc_tgtport *tgtport, *next;
1094         struct nvmet_fc_tgt_assoc *assoc;
1095         struct nvmet_fc_tgt_queue *queue;
1096         unsigned long flags;
1097         bool found_ctrl = false;
1098
1099         /* this is a bit ugly, but don't want to make locks layered */
1100         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1101         list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1102                         tgt_list) {
1103                 if (!nvmet_fc_tgtport_get(tgtport))
1104                         continue;
1105                 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1106
1107                 spin_lock_irqsave(&tgtport->lock, flags);
1108                 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1109                         queue = assoc->queues[0];
1110                         if (queue && queue->nvme_sq.ctrl == ctrl) {
1111                                 if (nvmet_fc_tgt_a_get(assoc))
1112                                         found_ctrl = true;
1113                                 break;
1114                         }
1115                 }
1116                 spin_unlock_irqrestore(&tgtport->lock, flags);
1117
1118                 nvmet_fc_tgtport_put(tgtport);
1119
1120                 if (found_ctrl) {
1121                         nvmet_fc_delete_target_assoc(assoc);
1122                         nvmet_fc_tgt_a_put(assoc);
1123                         return;
1124                 }
1125
1126                 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1127         }
1128         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1129 }
1130
1131 /**
1132  * nvme_fc_unregister_targetport - transport entry point called by an
1133  *                              LLDD to deregister/remove a previously
1134  *                              registered a local NVME subsystem FC port.
1135  * @tgtport: pointer to the (registered) target port that is to be
1136  *           deregistered.
1137  *
1138  * Returns:
1139  * a completion status. Must be 0 upon success; a negative errno
1140  * (ex: -ENXIO) upon failure.
1141  */
1142 int
1143 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1144 {
1145         struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1146
1147         /* terminate any outstanding associations */
1148         __nvmet_fc_free_assocs(tgtport);
1149
1150         nvmet_fc_tgtport_put(tgtport);
1151
1152         return 0;
1153 }
1154 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1155
1156
1157 /* *********************** FC-NVME LS Handling **************************** */
1158
1159
1160 static void
1161 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1162 {
1163         struct fcnvme_ls_acc_hdr *acc = buf;
1164
1165         acc->w0.ls_cmd = ls_cmd;
1166         acc->desc_list_len = desc_len;
1167         acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1168         acc->rqst.desc_len =
1169                         fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1170         acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1171 }
1172
1173 static int
1174 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1175                         u8 reason, u8 explanation, u8 vendor)
1176 {
1177         struct fcnvme_ls_rjt *rjt = buf;
1178
1179         nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1180                         fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1181                         ls_cmd);
1182         rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1183         rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1184         rjt->rjt.reason_code = reason;
1185         rjt->rjt.reason_explanation = explanation;
1186         rjt->rjt.vendor = vendor;
1187
1188         return sizeof(struct fcnvme_ls_rjt);
1189 }
1190
1191 /* Validation Error indexes into the string table below */
1192 enum {
1193         VERR_NO_ERROR           = 0,
1194         VERR_CR_ASSOC_LEN       = 1,
1195         VERR_CR_ASSOC_RQST_LEN  = 2,
1196         VERR_CR_ASSOC_CMD       = 3,
1197         VERR_CR_ASSOC_CMD_LEN   = 4,
1198         VERR_ERSP_RATIO         = 5,
1199         VERR_ASSOC_ALLOC_FAIL   = 6,
1200         VERR_QUEUE_ALLOC_FAIL   = 7,
1201         VERR_CR_CONN_LEN        = 8,
1202         VERR_CR_CONN_RQST_LEN   = 9,
1203         VERR_ASSOC_ID           = 10,
1204         VERR_ASSOC_ID_LEN       = 11,
1205         VERR_NO_ASSOC           = 12,
1206         VERR_CONN_ID            = 13,
1207         VERR_CONN_ID_LEN        = 14,
1208         VERR_NO_CONN            = 15,
1209         VERR_CR_CONN_CMD        = 16,
1210         VERR_CR_CONN_CMD_LEN    = 17,
1211         VERR_DISCONN_LEN        = 18,
1212         VERR_DISCONN_RQST_LEN   = 19,
1213         VERR_DISCONN_CMD        = 20,
1214         VERR_DISCONN_CMD_LEN    = 21,
1215         VERR_DISCONN_SCOPE      = 22,
1216         VERR_RS_LEN             = 23,
1217         VERR_RS_RQST_LEN        = 24,
1218         VERR_RS_CMD             = 25,
1219         VERR_RS_CMD_LEN         = 26,
1220         VERR_RS_RCTL            = 27,
1221         VERR_RS_RO              = 28,
1222 };
1223
1224 static char *validation_errors[] = {
1225         "OK",
1226         "Bad CR_ASSOC Length",
1227         "Bad CR_ASSOC Rqst Length",
1228         "Not CR_ASSOC Cmd",
1229         "Bad CR_ASSOC Cmd Length",
1230         "Bad Ersp Ratio",
1231         "Association Allocation Failed",
1232         "Queue Allocation Failed",
1233         "Bad CR_CONN Length",
1234         "Bad CR_CONN Rqst Length",
1235         "Not Association ID",
1236         "Bad Association ID Length",
1237         "No Association",
1238         "Not Connection ID",
1239         "Bad Connection ID Length",
1240         "No Connection",
1241         "Not CR_CONN Cmd",
1242         "Bad CR_CONN Cmd Length",
1243         "Bad DISCONN Length",
1244         "Bad DISCONN Rqst Length",
1245         "Not DISCONN Cmd",
1246         "Bad DISCONN Cmd Length",
1247         "Bad Disconnect Scope",
1248         "Bad RS Length",
1249         "Bad RS Rqst Length",
1250         "Not RS Cmd",
1251         "Bad RS Cmd Length",
1252         "Bad RS R_CTL",
1253         "Bad RS Relative Offset",
1254 };
1255
1256 static void
1257 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1258                         struct nvmet_fc_ls_iod *iod)
1259 {
1260         struct fcnvme_ls_cr_assoc_rqst *rqst =
1261                                 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1262         struct fcnvme_ls_cr_assoc_acc *acc =
1263                                 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1264         struct nvmet_fc_tgt_queue *queue;
1265         int ret = 0;
1266
1267         memset(acc, 0, sizeof(*acc));
1268
1269         /*
1270          * FC-NVME spec changes. There are initiators sending different
1271          * lengths as padding sizes for Create Association Cmd descriptor
1272          * was incorrect.
1273          * Accept anything of "minimum" length. Assume format per 1.15
1274          * spec (with HOSTID reduced to 16 bytes), ignore how long the
1275          * trailing pad length is.
1276          */
1277         if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1278                 ret = VERR_CR_ASSOC_LEN;
1279         else if (be32_to_cpu(rqst->desc_list_len) <
1280                         FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1281                 ret = VERR_CR_ASSOC_RQST_LEN;
1282         else if (rqst->assoc_cmd.desc_tag !=
1283                         cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1284                 ret = VERR_CR_ASSOC_CMD;
1285         else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1286                         FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1287                 ret = VERR_CR_ASSOC_CMD_LEN;
1288         else if (!rqst->assoc_cmd.ersp_ratio ||
1289                  (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1290                                 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1291                 ret = VERR_ERSP_RATIO;
1292
1293         else {
1294                 /* new association w/ admin queue */
1295                 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1296                 if (!iod->assoc)
1297                         ret = VERR_ASSOC_ALLOC_FAIL;
1298                 else {
1299                         queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1300                                         be16_to_cpu(rqst->assoc_cmd.sqsize));
1301                         if (!queue)
1302                                 ret = VERR_QUEUE_ALLOC_FAIL;
1303                 }
1304         }
1305
1306         if (ret) {
1307                 dev_err(tgtport->dev,
1308                         "Create Association LS failed: %s\n",
1309                         validation_errors[ret]);
1310                 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1311                                 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1312                                 FCNVME_RJT_RC_LOGIC,
1313                                 FCNVME_RJT_EXP_NONE, 0);
1314                 return;
1315         }
1316
1317         queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1318         atomic_set(&queue->connected, 1);
1319         queue->sqhd = 0;        /* best place to init value */
1320
1321         /* format a response */
1322
1323         iod->lsreq->rsplen = sizeof(*acc);
1324
1325         nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1326                         fcnvme_lsdesc_len(
1327                                 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1328                         FCNVME_LS_CREATE_ASSOCIATION);
1329         acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1330         acc->associd.desc_len =
1331                         fcnvme_lsdesc_len(
1332                                 sizeof(struct fcnvme_lsdesc_assoc_id));
1333         acc->associd.association_id =
1334                         cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1335         acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1336         acc->connectid.desc_len =
1337                         fcnvme_lsdesc_len(
1338                                 sizeof(struct fcnvme_lsdesc_conn_id));
1339         acc->connectid.connection_id = acc->associd.association_id;
1340 }
1341
1342 static void
1343 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1344                         struct nvmet_fc_ls_iod *iod)
1345 {
1346         struct fcnvme_ls_cr_conn_rqst *rqst =
1347                                 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1348         struct fcnvme_ls_cr_conn_acc *acc =
1349                                 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1350         struct nvmet_fc_tgt_queue *queue;
1351         int ret = 0;
1352
1353         memset(acc, 0, sizeof(*acc));
1354
1355         if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1356                 ret = VERR_CR_CONN_LEN;
1357         else if (rqst->desc_list_len !=
1358                         fcnvme_lsdesc_len(
1359                                 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1360                 ret = VERR_CR_CONN_RQST_LEN;
1361         else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1362                 ret = VERR_ASSOC_ID;
1363         else if (rqst->associd.desc_len !=
1364                         fcnvme_lsdesc_len(
1365                                 sizeof(struct fcnvme_lsdesc_assoc_id)))
1366                 ret = VERR_ASSOC_ID_LEN;
1367         else if (rqst->connect_cmd.desc_tag !=
1368                         cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1369                 ret = VERR_CR_CONN_CMD;
1370         else if (rqst->connect_cmd.desc_len !=
1371                         fcnvme_lsdesc_len(
1372                                 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1373                 ret = VERR_CR_CONN_CMD_LEN;
1374         else if (!rqst->connect_cmd.ersp_ratio ||
1375                  (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1376                                 be16_to_cpu(rqst->connect_cmd.sqsize)))
1377                 ret = VERR_ERSP_RATIO;
1378
1379         else {
1380                 /* new io queue */
1381                 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1382                                 be64_to_cpu(rqst->associd.association_id));
1383                 if (!iod->assoc)
1384                         ret = VERR_NO_ASSOC;
1385                 else {
1386                         queue = nvmet_fc_alloc_target_queue(iod->assoc,
1387                                         be16_to_cpu(rqst->connect_cmd.qid),
1388                                         be16_to_cpu(rqst->connect_cmd.sqsize));
1389                         if (!queue)
1390                                 ret = VERR_QUEUE_ALLOC_FAIL;
1391
1392                         /* release get taken in nvmet_fc_find_target_assoc */
1393                         nvmet_fc_tgt_a_put(iod->assoc);
1394                 }
1395         }
1396
1397         if (ret) {
1398                 dev_err(tgtport->dev,
1399                         "Create Connection LS failed: %s\n",
1400                         validation_errors[ret]);
1401                 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1402                                 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1403                                 (ret == VERR_NO_ASSOC) ?
1404                                         FCNVME_RJT_RC_INV_ASSOC :
1405                                         FCNVME_RJT_RC_LOGIC,
1406                                 FCNVME_RJT_EXP_NONE, 0);
1407                 return;
1408         }
1409
1410         queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1411         atomic_set(&queue->connected, 1);
1412         queue->sqhd = 0;        /* best place to init value */
1413
1414         /* format a response */
1415
1416         iod->lsreq->rsplen = sizeof(*acc);
1417
1418         nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1419                         fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1420                         FCNVME_LS_CREATE_CONNECTION);
1421         acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1422         acc->connectid.desc_len =
1423                         fcnvme_lsdesc_len(
1424                                 sizeof(struct fcnvme_lsdesc_conn_id));
1425         acc->connectid.connection_id =
1426                         cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1427                                 be16_to_cpu(rqst->connect_cmd.qid)));
1428 }
1429
1430 static void
1431 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1432                         struct nvmet_fc_ls_iod *iod)
1433 {
1434         struct fcnvme_ls_disconnect_rqst *rqst =
1435                         (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1436         struct fcnvme_ls_disconnect_acc *acc =
1437                         (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1438         struct nvmet_fc_tgt_queue *queue = NULL;
1439         struct nvmet_fc_tgt_assoc *assoc;
1440         int ret = 0;
1441         bool del_assoc = false;
1442
1443         memset(acc, 0, sizeof(*acc));
1444
1445         if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1446                 ret = VERR_DISCONN_LEN;
1447         else if (rqst->desc_list_len !=
1448                         fcnvme_lsdesc_len(
1449                                 sizeof(struct fcnvme_ls_disconnect_rqst)))
1450                 ret = VERR_DISCONN_RQST_LEN;
1451         else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1452                 ret = VERR_ASSOC_ID;
1453         else if (rqst->associd.desc_len !=
1454                         fcnvme_lsdesc_len(
1455                                 sizeof(struct fcnvme_lsdesc_assoc_id)))
1456                 ret = VERR_ASSOC_ID_LEN;
1457         else if (rqst->discon_cmd.desc_tag !=
1458                         cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1459                 ret = VERR_DISCONN_CMD;
1460         else if (rqst->discon_cmd.desc_len !=
1461                         fcnvme_lsdesc_len(
1462                                 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1463                 ret = VERR_DISCONN_CMD_LEN;
1464         else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1465                         (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1466                 ret = VERR_DISCONN_SCOPE;
1467         else {
1468                 /* match an active association */
1469                 assoc = nvmet_fc_find_target_assoc(tgtport,
1470                                 be64_to_cpu(rqst->associd.association_id));
1471                 iod->assoc = assoc;
1472                 if (assoc) {
1473                         if (rqst->discon_cmd.scope ==
1474                                         FCNVME_DISCONN_CONNECTION) {
1475                                 queue = nvmet_fc_find_target_queue(tgtport,
1476                                                 be64_to_cpu(
1477                                                         rqst->discon_cmd.id));
1478                                 if (!queue) {
1479                                         nvmet_fc_tgt_a_put(assoc);
1480                                         ret = VERR_NO_CONN;
1481                                 }
1482                         }
1483                 } else
1484                         ret = VERR_NO_ASSOC;
1485         }
1486
1487         if (ret) {
1488                 dev_err(tgtport->dev,
1489                         "Disconnect LS failed: %s\n",
1490                         validation_errors[ret]);
1491                 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1492                                 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1493                                 (ret == VERR_NO_ASSOC) ?
1494                                         FCNVME_RJT_RC_INV_ASSOC :
1495                                         (ret == VERR_NO_CONN) ?
1496                                                 FCNVME_RJT_RC_INV_CONN :
1497                                                 FCNVME_RJT_RC_LOGIC,
1498                                 FCNVME_RJT_EXP_NONE, 0);
1499                 return;
1500         }
1501
1502         /* format a response */
1503
1504         iod->lsreq->rsplen = sizeof(*acc);
1505
1506         nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1507                         fcnvme_lsdesc_len(
1508                                 sizeof(struct fcnvme_ls_disconnect_acc)),
1509                         FCNVME_LS_DISCONNECT);
1510
1511
1512         /* are we to delete a Connection ID (queue) */
1513         if (queue) {
1514                 int qid = queue->qid;
1515
1516                 nvmet_fc_delete_target_queue(queue);
1517
1518                 /* release the get taken by find_target_queue */
1519                 nvmet_fc_tgt_q_put(queue);
1520
1521                 /* tear association down if io queue terminated */
1522                 if (!qid)
1523                         del_assoc = true;
1524         }
1525
1526         /* release get taken in nvmet_fc_find_target_assoc */
1527         nvmet_fc_tgt_a_put(iod->assoc);
1528
1529         if (del_assoc)
1530                 nvmet_fc_delete_target_assoc(iod->assoc);
1531 }
1532
1533
1534 /* *********************** NVME Ctrl Routines **************************** */
1535
1536
1537 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1538
1539 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1540
1541 static void
1542 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1543 {
1544         struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1545         struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1546
1547         fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1548                                 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1549         nvmet_fc_free_ls_iod(tgtport, iod);
1550         nvmet_fc_tgtport_put(tgtport);
1551 }
1552
1553 static void
1554 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1555                                 struct nvmet_fc_ls_iod *iod)
1556 {
1557         int ret;
1558
1559         fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1560                                   NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1561
1562         ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1563         if (ret)
1564                 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1565 }
1566
1567 /*
1568  * Actual processing routine for received FC-NVME LS Requests from the LLD
1569  */
1570 static void
1571 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1572                         struct nvmet_fc_ls_iod *iod)
1573 {
1574         struct fcnvme_ls_rqst_w0 *w0 =
1575                         (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1576
1577         iod->lsreq->nvmet_fc_private = iod;
1578         iod->lsreq->rspbuf = iod->rspbuf;
1579         iod->lsreq->rspdma = iod->rspdma;
1580         iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1581         /* Be preventative. handlers will later set to valid length */
1582         iod->lsreq->rsplen = 0;
1583
1584         iod->assoc = NULL;
1585
1586         /*
1587          * handlers:
1588          *   parse request input, execute the request, and format the
1589          *   LS response
1590          */
1591         switch (w0->ls_cmd) {
1592         case FCNVME_LS_CREATE_ASSOCIATION:
1593                 /* Creates Association and initial Admin Queue/Connection */
1594                 nvmet_fc_ls_create_association(tgtport, iod);
1595                 break;
1596         case FCNVME_LS_CREATE_CONNECTION:
1597                 /* Creates an IO Queue/Connection */
1598                 nvmet_fc_ls_create_connection(tgtport, iod);
1599                 break;
1600         case FCNVME_LS_DISCONNECT:
1601                 /* Terminate a Queue/Connection or the Association */
1602                 nvmet_fc_ls_disconnect(tgtport, iod);
1603                 break;
1604         default:
1605                 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1606                                 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1607                                 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1608         }
1609
1610         nvmet_fc_xmt_ls_rsp(tgtport, iod);
1611 }
1612
1613 /*
1614  * Actual processing routine for received FC-NVME LS Requests from the LLD
1615  */
1616 static void
1617 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1618 {
1619         struct nvmet_fc_ls_iod *iod =
1620                 container_of(work, struct nvmet_fc_ls_iod, work);
1621         struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1622
1623         nvmet_fc_handle_ls_rqst(tgtport, iod);
1624 }
1625
1626
1627 /**
1628  * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1629  *                       upon the reception of a NVME LS request.
1630  *
1631  * The nvmet-fc layer will copy payload to an internal structure for
1632  * processing.  As such, upon completion of the routine, the LLDD may
1633  * immediately free/reuse the LS request buffer passed in the call.
1634  *
1635  * If this routine returns error, the LLDD should abort the exchange.
1636  *
1637  * @tgtport:    pointer to the (registered) target port the LS was
1638  *              received on.
1639  * @lsreq:      pointer to a lsreq request structure to be used to reference
1640  *              the exchange corresponding to the LS.
1641  * @lsreqbuf:   pointer to the buffer containing the LS Request
1642  * @lsreqbuf_len: length, in bytes, of the received LS request
1643  */
1644 int
1645 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1646                         struct nvmefc_tgt_ls_req *lsreq,
1647                         void *lsreqbuf, u32 lsreqbuf_len)
1648 {
1649         struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1650         struct nvmet_fc_ls_iod *iod;
1651
1652         if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1653                 return -E2BIG;
1654
1655         if (!nvmet_fc_tgtport_get(tgtport))
1656                 return -ESHUTDOWN;
1657
1658         iod = nvmet_fc_alloc_ls_iod(tgtport);
1659         if (!iod) {
1660                 nvmet_fc_tgtport_put(tgtport);
1661                 return -ENOENT;
1662         }
1663
1664         iod->lsreq = lsreq;
1665         iod->fcpreq = NULL;
1666         memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1667         iod->rqstdatalen = lsreqbuf_len;
1668
1669         schedule_work(&iod->work);
1670
1671         return 0;
1672 }
1673 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1674
1675
1676 /*
1677  * **********************
1678  * Start of FCP handling
1679  * **********************
1680  */
1681
1682 static int
1683 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1684 {
1685         struct scatterlist *sg;
1686         struct page *page;
1687         unsigned int nent;
1688         u32 page_len, length;
1689         int i = 0;
1690
1691         length = fod->total_length;
1692         nent = DIV_ROUND_UP(length, PAGE_SIZE);
1693         sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1694         if (!sg)
1695                 goto out;
1696
1697         sg_init_table(sg, nent);
1698
1699         while (length) {
1700                 page_len = min_t(u32, length, PAGE_SIZE);
1701
1702                 page = alloc_page(GFP_KERNEL);
1703                 if (!page)
1704                         goto out_free_pages;
1705
1706                 sg_set_page(&sg[i], page, page_len, 0);
1707                 length -= page_len;
1708                 i++;
1709         }
1710
1711         fod->data_sg = sg;
1712         fod->data_sg_cnt = nent;
1713         fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1714                                 ((fod->io_dir == NVMET_FCP_WRITE) ?
1715                                         DMA_FROM_DEVICE : DMA_TO_DEVICE));
1716                                 /* note: write from initiator perspective */
1717
1718         return 0;
1719
1720 out_free_pages:
1721         while (i > 0) {
1722                 i--;
1723                 __free_page(sg_page(&sg[i]));
1724         }
1725         kfree(sg);
1726         fod->data_sg = NULL;
1727         fod->data_sg_cnt = 0;
1728 out:
1729         return NVME_SC_INTERNAL;
1730 }
1731
1732 static void
1733 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1734 {
1735         struct scatterlist *sg;
1736         int count;
1737
1738         if (!fod->data_sg || !fod->data_sg_cnt)
1739                 return;
1740
1741         fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1742                                 ((fod->io_dir == NVMET_FCP_WRITE) ?
1743                                         DMA_FROM_DEVICE : DMA_TO_DEVICE));
1744         for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1745                 __free_page(sg_page(sg));
1746         kfree(fod->data_sg);
1747         fod->data_sg = NULL;
1748         fod->data_sg_cnt = 0;
1749 }
1750
1751
1752 static bool
1753 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1754 {
1755         u32 sqtail, used;
1756
1757         /* egad, this is ugly. And sqtail is just a best guess */
1758         sqtail = atomic_read(&q->sqtail) % q->sqsize;
1759
1760         used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1761         return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1762 }
1763
1764 /*
1765  * Prep RSP payload.
1766  * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1767  */
1768 static void
1769 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1770                                 struct nvmet_fc_fcp_iod *fod)
1771 {
1772         struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1773         struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1774         struct nvme_completion *cqe = &ersp->cqe;
1775         u32 *cqewd = (u32 *)cqe;
1776         bool send_ersp = false;
1777         u32 rsn, rspcnt, xfr_length;
1778
1779         if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1780                 xfr_length = fod->total_length;
1781         else
1782                 xfr_length = fod->offset;
1783
1784         /*
1785          * check to see if we can send a 0's rsp.
1786          *   Note: to send a 0's response, the NVME-FC host transport will
1787          *   recreate the CQE. The host transport knows: sq id, SQHD (last
1788          *   seen in an ersp), and command_id. Thus it will create a
1789          *   zero-filled CQE with those known fields filled in. Transport
1790          *   must send an ersp for any condition where the cqe won't match
1791          *   this.
1792          *
1793          * Here are the FC-NVME mandated cases where we must send an ersp:
1794          *  every N responses, where N=ersp_ratio
1795          *  force fabric commands to send ersp's (not in FC-NVME but good
1796          *    practice)
1797          *  normal cmds: any time status is non-zero, or status is zero
1798          *     but words 0 or 1 are non-zero.
1799          *  the SQ is 90% or more full
1800          *  the cmd is a fused command
1801          *  transferred data length not equal to cmd iu length
1802          */
1803         rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1804         if (!(rspcnt % fod->queue->ersp_ratio) ||
1805             sqe->opcode == nvme_fabrics_command ||
1806             xfr_length != fod->total_length ||
1807             (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1808             (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1809             queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1810                 send_ersp = true;
1811
1812         /* re-set the fields */
1813         fod->fcpreq->rspaddr = ersp;
1814         fod->fcpreq->rspdma = fod->rspdma;
1815
1816         if (!send_ersp) {
1817                 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1818                 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1819         } else {
1820                 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1821                 rsn = atomic_inc_return(&fod->queue->rsn);
1822                 ersp->rsn = cpu_to_be32(rsn);
1823                 ersp->xfrd_len = cpu_to_be32(xfr_length);
1824                 fod->fcpreq->rsplen = sizeof(*ersp);
1825         }
1826
1827         fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1828                                   sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1829 }
1830
1831 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1832
1833 static void
1834 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1835                                 struct nvmet_fc_fcp_iod *fod)
1836 {
1837         struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1838
1839         /* data no longer needed */
1840         nvmet_fc_free_tgt_pgs(fod);
1841
1842         /*
1843          * if an ABTS was received or we issued the fcp_abort early
1844          * don't call abort routine again.
1845          */
1846         /* no need to take lock - lock was taken earlier to get here */
1847         if (!fod->aborted)
1848                 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1849
1850         nvmet_fc_free_fcp_iod(fod->queue, fod);
1851 }
1852
1853 static void
1854 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1855                                 struct nvmet_fc_fcp_iod *fod)
1856 {
1857         int ret;
1858
1859         fod->fcpreq->op = NVMET_FCOP_RSP;
1860         fod->fcpreq->timeout = 0;
1861
1862         nvmet_fc_prep_fcp_rsp(tgtport, fod);
1863
1864         ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1865         if (ret)
1866                 nvmet_fc_abort_op(tgtport, fod);
1867 }
1868
1869 static void
1870 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1871                                 struct nvmet_fc_fcp_iod *fod, u8 op)
1872 {
1873         struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1874         unsigned long flags;
1875         u32 tlen;
1876         int ret;
1877
1878         fcpreq->op = op;
1879         fcpreq->offset = fod->offset;
1880         fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1881
1882         tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
1883                         (fod->total_length - fod->offset));
1884         fcpreq->transfer_length = tlen;
1885         fcpreq->transferred_length = 0;
1886         fcpreq->fcp_error = 0;
1887         fcpreq->rsplen = 0;
1888
1889         fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
1890         fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
1891
1892         /*
1893          * If the last READDATA request: check if LLDD supports
1894          * combined xfr with response.
1895          */
1896         if ((op == NVMET_FCOP_READDATA) &&
1897             ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1898             (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1899                 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1900                 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1901         }
1902
1903         ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1904         if (ret) {
1905                 /*
1906                  * should be ok to set w/o lock as its in the thread of
1907                  * execution (not an async timer routine) and doesn't
1908                  * contend with any clearing action
1909                  */
1910                 fod->abort = true;
1911
1912                 if (op == NVMET_FCOP_WRITEDATA) {
1913                         spin_lock_irqsave(&fod->flock, flags);
1914                         fod->writedataactive = false;
1915                         spin_unlock_irqrestore(&fod->flock, flags);
1916                         nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1917                 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1918                         fcpreq->fcp_error = ret;
1919                         fcpreq->transferred_length = 0;
1920                         nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1921                 }
1922         }
1923 }
1924
1925 static inline bool
1926 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1927 {
1928         struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1929         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1930
1931         /* if in the middle of an io and we need to tear down */
1932         if (abort) {
1933                 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1934                         nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1935                         return true;
1936                 }
1937
1938                 nvmet_fc_abort_op(tgtport, fod);
1939                 return true;
1940         }
1941
1942         return false;
1943 }
1944
1945 /*
1946  * actual done handler for FCP operations when completed by the lldd
1947  */
1948 static void
1949 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1950 {
1951         struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1952         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1953         unsigned long flags;
1954         bool abort;
1955
1956         spin_lock_irqsave(&fod->flock, flags);
1957         abort = fod->abort;
1958         fod->writedataactive = false;
1959         spin_unlock_irqrestore(&fod->flock, flags);
1960
1961         switch (fcpreq->op) {
1962
1963         case NVMET_FCOP_WRITEDATA:
1964                 if (__nvmet_fc_fod_op_abort(fod, abort))
1965                         return;
1966                 if (fcpreq->fcp_error ||
1967                     fcpreq->transferred_length != fcpreq->transfer_length) {
1968                         spin_lock(&fod->flock);
1969                         fod->abort = true;
1970                         spin_unlock(&fod->flock);
1971
1972                         nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1973                         return;
1974                 }
1975
1976                 fod->offset += fcpreq->transferred_length;
1977                 if (fod->offset != fod->total_length) {
1978                         spin_lock_irqsave(&fod->flock, flags);
1979                         fod->writedataactive = true;
1980                         spin_unlock_irqrestore(&fod->flock, flags);
1981
1982                         /* transfer the next chunk */
1983                         nvmet_fc_transfer_fcp_data(tgtport, fod,
1984                                                 NVMET_FCOP_WRITEDATA);
1985                         return;
1986                 }
1987
1988                 /* data transfer complete, resume with nvmet layer */
1989
1990                 fod->req.execute(&fod->req);
1991
1992                 break;
1993
1994         case NVMET_FCOP_READDATA:
1995         case NVMET_FCOP_READDATA_RSP:
1996                 if (__nvmet_fc_fod_op_abort(fod, abort))
1997                         return;
1998                 if (fcpreq->fcp_error ||
1999                     fcpreq->transferred_length != fcpreq->transfer_length) {
2000                         nvmet_fc_abort_op(tgtport, fod);
2001                         return;
2002                 }
2003
2004                 /* success */
2005
2006                 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2007                         /* data no longer needed */
2008                         nvmet_fc_free_tgt_pgs(fod);
2009                         nvmet_fc_free_fcp_iod(fod->queue, fod);
2010                         return;
2011                 }
2012
2013                 fod->offset += fcpreq->transferred_length;
2014                 if (fod->offset != fod->total_length) {
2015                         /* transfer the next chunk */
2016                         nvmet_fc_transfer_fcp_data(tgtport, fod,
2017                                                 NVMET_FCOP_READDATA);
2018                         return;
2019                 }
2020
2021                 /* data transfer complete, send response */
2022
2023                 /* data no longer needed */
2024                 nvmet_fc_free_tgt_pgs(fod);
2025
2026                 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2027
2028                 break;
2029
2030         case NVMET_FCOP_RSP:
2031                 if (__nvmet_fc_fod_op_abort(fod, abort))
2032                         return;
2033                 nvmet_fc_free_fcp_iod(fod->queue, fod);
2034                 break;
2035
2036         default:
2037                 break;
2038         }
2039 }
2040
2041 static void
2042 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
2043 {
2044         struct nvmet_fc_fcp_iod *fod =
2045                 container_of(work, struct nvmet_fc_fcp_iod, done_work);
2046
2047         nvmet_fc_fod_op_done(fod);
2048 }
2049
2050 static void
2051 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2052 {
2053         struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2054         struct nvmet_fc_tgt_queue *queue = fod->queue;
2055
2056         if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
2057                 /* context switch so completion is not in ISR context */
2058                 queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
2059         else
2060                 nvmet_fc_fod_op_done(fod);
2061 }
2062
2063 /*
2064  * actual completion handler after execution by the nvmet layer
2065  */
2066 static void
2067 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2068                         struct nvmet_fc_fcp_iod *fod, int status)
2069 {
2070         struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2071         struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2072         unsigned long flags;
2073         bool abort;
2074
2075         spin_lock_irqsave(&fod->flock, flags);
2076         abort = fod->abort;
2077         spin_unlock_irqrestore(&fod->flock, flags);
2078
2079         /* if we have a CQE, snoop the last sq_head value */
2080         if (!status)
2081                 fod->queue->sqhd = cqe->sq_head;
2082
2083         if (abort) {
2084                 nvmet_fc_abort_op(tgtport, fod);
2085                 return;
2086         }
2087
2088         /* if an error handling the cmd post initial parsing */
2089         if (status) {
2090                 /* fudge up a failed CQE status for our transport error */
2091                 memset(cqe, 0, sizeof(*cqe));
2092                 cqe->sq_head = fod->queue->sqhd;        /* echo last cqe sqhd */
2093                 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2094                 cqe->command_id = sqe->command_id;
2095                 cqe->status = cpu_to_le16(status);
2096         } else {
2097
2098                 /*
2099                  * try to push the data even if the SQE status is non-zero.
2100                  * There may be a status where data still was intended to
2101                  * be moved
2102                  */
2103                 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2104                         /* push the data over before sending rsp */
2105                         nvmet_fc_transfer_fcp_data(tgtport, fod,
2106                                                 NVMET_FCOP_READDATA);
2107                         return;
2108                 }
2109
2110                 /* writes & no data - fall thru */
2111         }
2112
2113         /* data no longer needed */
2114         nvmet_fc_free_tgt_pgs(fod);
2115
2116         nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2117 }
2118
2119
2120 static void
2121 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2122 {
2123         struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2124         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2125
2126         __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2127 }
2128
2129
2130 /*
2131  * Actual processing routine for received FC-NVME LS Requests from the LLD
2132  */
2133 static void
2134 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2135                         struct nvmet_fc_fcp_iod *fod)
2136 {
2137         struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2138         int ret;
2139
2140         /*
2141          * Fused commands are currently not supported in the linux
2142          * implementation.
2143          *
2144          * As such, the implementation of the FC transport does not
2145          * look at the fused commands and order delivery to the upper
2146          * layer until we have both based on csn.
2147          */
2148
2149         fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2150
2151         fod->total_length = be32_to_cpu(cmdiu->data_len);
2152         if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2153                 fod->io_dir = NVMET_FCP_WRITE;
2154                 if (!nvme_is_write(&cmdiu->sqe))
2155                         goto transport_error;
2156         } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2157                 fod->io_dir = NVMET_FCP_READ;
2158                 if (nvme_is_write(&cmdiu->sqe))
2159                         goto transport_error;
2160         } else {
2161                 fod->io_dir = NVMET_FCP_NODATA;
2162                 if (fod->total_length)
2163                         goto transport_error;
2164         }
2165
2166         fod->req.cmd = &fod->cmdiubuf.sqe;
2167         fod->req.rsp = &fod->rspiubuf.cqe;
2168         fod->req.port = fod->queue->port;
2169
2170         /* ensure nvmet handlers will set cmd handler callback */
2171         fod->req.execute = NULL;
2172
2173         /* clear any response payload */
2174         memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2175
2176         fod->data_sg = NULL;
2177         fod->data_sg_cnt = 0;
2178
2179         ret = nvmet_req_init(&fod->req,
2180                                 &fod->queue->nvme_cq,
2181                                 &fod->queue->nvme_sq,
2182                                 &nvmet_fc_tgt_fcp_ops);
2183         if (!ret) {
2184                 /* bad SQE content or invalid ctrl state */
2185                 /* nvmet layer has already called op done to send rsp. */
2186                 return;
2187         }
2188
2189         /* keep a running counter of tail position */
2190         atomic_inc(&fod->queue->sqtail);
2191
2192         if (fod->total_length) {
2193                 ret = nvmet_fc_alloc_tgt_pgs(fod);
2194                 if (ret) {
2195                         nvmet_req_complete(&fod->req, ret);
2196                         return;
2197                 }
2198         }
2199         fod->req.sg = fod->data_sg;
2200         fod->req.sg_cnt = fod->data_sg_cnt;
2201         fod->offset = 0;
2202
2203         if (fod->io_dir == NVMET_FCP_WRITE) {
2204                 /* pull the data over before invoking nvmet layer */
2205                 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2206                 return;
2207         }
2208
2209         /*
2210          * Reads or no data:
2211          *
2212          * can invoke the nvmet_layer now. If read data, cmd completion will
2213          * push the data
2214          */
2215
2216         fod->req.execute(&fod->req);
2217
2218         return;
2219
2220 transport_error:
2221         nvmet_fc_abort_op(tgtport, fod);
2222 }
2223
2224 /*
2225  * Actual processing routine for received FC-NVME LS Requests from the LLD
2226  */
2227 static void
2228 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2229 {
2230         struct nvmet_fc_fcp_iod *fod =
2231                 container_of(work, struct nvmet_fc_fcp_iod, work);
2232         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2233
2234         nvmet_fc_handle_fcp_rqst(tgtport, fod);
2235 }
2236
2237 /**
2238  * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2239  *                       upon the reception of a NVME FCP CMD IU.
2240  *
2241  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2242  * layer for processing.
2243  *
2244  * The nvmet_fc layer allocates a local job structure (struct
2245  * nvmet_fc_fcp_iod) from the queue for the io and copies the
2246  * CMD IU buffer to the job structure. As such, on a successful
2247  * completion (returns 0), the LLDD may immediately free/reuse
2248  * the CMD IU buffer passed in the call.
2249  *
2250  * However, in some circumstances, due to the packetized nature of FC
2251  * and the api of the FC LLDD which may issue a hw command to send the
2252  * response, but the LLDD may not get the hw completion for that command
2253  * and upcall the nvmet_fc layer before a new command may be
2254  * asynchronously received - its possible for a command to be received
2255  * before the LLDD and nvmet_fc have recycled the job structure. It gives
2256  * the appearance of more commands received than fits in the sq.
2257  * To alleviate this scenario, a temporary queue is maintained in the
2258  * transport for pending LLDD requests waiting for a queue job structure.
2259  * In these "overrun" cases, a temporary queue element is allocated
2260  * the LLDD request and CMD iu buffer information remembered, and the
2261  * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2262  * structure is freed, it is immediately reallocated for anything on the
2263  * pending request list. The LLDDs defer_rcv() callback is called,
2264  * informing the LLDD that it may reuse the CMD IU buffer, and the io
2265  * is then started normally with the transport.
2266  *
2267  * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2268  * the completion as successful but must not reuse the CMD IU buffer
2269  * until the LLDD's defer_rcv() callback has been called for the
2270  * corresponding struct nvmefc_tgt_fcp_req pointer.
2271  *
2272  * If there is any other condition in which an error occurs, the
2273  * transport will return a non-zero status indicating the error.
2274  * In all cases other than -EOVERFLOW, the transport has not accepted the
2275  * request and the LLDD should abort the exchange.
2276  *
2277  * @target_port: pointer to the (registered) target port the FCP CMD IU
2278  *              was received on.
2279  * @fcpreq:     pointer to a fcpreq request structure to be used to reference
2280  *              the exchange corresponding to the FCP Exchange.
2281  * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
2282  * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2283  */
2284 int
2285 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2286                         struct nvmefc_tgt_fcp_req *fcpreq,
2287                         void *cmdiubuf, u32 cmdiubuf_len)
2288 {
2289         struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2290         struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2291         struct nvmet_fc_tgt_queue *queue;
2292         struct nvmet_fc_fcp_iod *fod;
2293         struct nvmet_fc_defer_fcp_req *deferfcp;
2294         unsigned long flags;
2295
2296         /* validate iu, so the connection id can be used to find the queue */
2297         if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2298                         (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2299                         (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2300                         (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2301                 return -EIO;
2302
2303         queue = nvmet_fc_find_target_queue(tgtport,
2304                                 be64_to_cpu(cmdiu->connection_id));
2305         if (!queue)
2306                 return -ENOTCONN;
2307
2308         /*
2309          * note: reference taken by find_target_queue
2310          * After successful fod allocation, the fod will inherit the
2311          * ownership of that reference and will remove the reference
2312          * when the fod is freed.
2313          */
2314
2315         spin_lock_irqsave(&queue->qlock, flags);
2316
2317         fod = nvmet_fc_alloc_fcp_iod(queue);
2318         if (fod) {
2319                 spin_unlock_irqrestore(&queue->qlock, flags);
2320
2321                 fcpreq->nvmet_fc_private = fod;
2322                 fod->fcpreq = fcpreq;
2323
2324                 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2325
2326                 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2327
2328                 return 0;
2329         }
2330
2331         if (!tgtport->ops->defer_rcv) {
2332                 spin_unlock_irqrestore(&queue->qlock, flags);
2333                 /* release the queue lookup reference */
2334                 nvmet_fc_tgt_q_put(queue);
2335                 return -ENOENT;
2336         }
2337
2338         deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2339                         struct nvmet_fc_defer_fcp_req, req_list);
2340         if (deferfcp) {
2341                 /* Just re-use one that was previously allocated */
2342                 list_del(&deferfcp->req_list);
2343         } else {
2344                 spin_unlock_irqrestore(&queue->qlock, flags);
2345
2346                 /* Now we need to dynamically allocate one */
2347                 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2348                 if (!deferfcp) {
2349                         /* release the queue lookup reference */
2350                         nvmet_fc_tgt_q_put(queue);
2351                         return -ENOMEM;
2352                 }
2353                 spin_lock_irqsave(&queue->qlock, flags);
2354         }
2355
2356         /* For now, use rspaddr / rsplen to save payload information */
2357         fcpreq->rspaddr = cmdiubuf;
2358         fcpreq->rsplen  = cmdiubuf_len;
2359         deferfcp->fcp_req = fcpreq;
2360
2361         /* defer processing till a fod becomes available */
2362         list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2363
2364         /* NOTE: the queue lookup reference is still valid */
2365
2366         spin_unlock_irqrestore(&queue->qlock, flags);
2367
2368         return -EOVERFLOW;
2369 }
2370 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2371
2372 /**
2373  * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2374  *                       upon the reception of an ABTS for a FCP command
2375  *
2376  * Notify the transport that an ABTS has been received for a FCP command
2377  * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2378  * LLDD believes the command is still being worked on
2379  * (template_ops->fcp_req_release() has not been called).
2380  *
2381  * The transport will wait for any outstanding work (an op to the LLDD,
2382  * which the lldd should complete with error due to the ABTS; or the
2383  * completion from the nvmet layer of the nvme command), then will
2384  * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2385  * return the i/o context to the LLDD.  The LLDD may send the BA_ACC
2386  * to the ABTS either after return from this function (assuming any
2387  * outstanding op work has been terminated) or upon the callback being
2388  * called.
2389  *
2390  * @target_port: pointer to the (registered) target port the FCP CMD IU
2391  *              was received on.
2392  * @fcpreq:     pointer to the fcpreq request structure that corresponds
2393  *              to the exchange that received the ABTS.
2394  */
2395 void
2396 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2397                         struct nvmefc_tgt_fcp_req *fcpreq)
2398 {
2399         struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2400         struct nvmet_fc_tgt_queue *queue;
2401         unsigned long flags;
2402
2403         if (!fod || fod->fcpreq != fcpreq)
2404                 /* job appears to have already completed, ignore abort */
2405                 return;
2406
2407         queue = fod->queue;
2408
2409         spin_lock_irqsave(&queue->qlock, flags);
2410         if (fod->active) {
2411                 /*
2412                  * mark as abort. The abort handler, invoked upon completion
2413                  * of any work, will detect the aborted status and do the
2414                  * callback.
2415                  */
2416                 spin_lock(&fod->flock);
2417                 fod->abort = true;
2418                 fod->aborted = true;
2419                 spin_unlock(&fod->flock);
2420         }
2421         spin_unlock_irqrestore(&queue->qlock, flags);
2422 }
2423 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2424
2425
2426 struct nvmet_fc_traddr {
2427         u64     nn;
2428         u64     pn;
2429 };
2430
2431 static int
2432 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2433 {
2434         u64 token64;
2435
2436         if (match_u64(sstr, &token64))
2437                 return -EINVAL;
2438         *val = token64;
2439
2440         return 0;
2441 }
2442
2443 /*
2444  * This routine validates and extracts the WWN's from the TRADDR string.
2445  * As kernel parsers need the 0x to determine number base, universally
2446  * build string to parse with 0x prefix before parsing name strings.
2447  */
2448 static int
2449 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2450 {
2451         char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2452         substring_t wwn = { name, &name[sizeof(name)-1] };
2453         int nnoffset, pnoffset;
2454
2455         /* validate it string one of the 2 allowed formats */
2456         if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2457                         !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2458                         !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2459                                 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2460                 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2461                 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2462                                                 NVME_FC_TRADDR_OXNNLEN;
2463         } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2464                         !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2465                         !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2466                                 "pn-", NVME_FC_TRADDR_NNLEN))) {
2467                 nnoffset = NVME_FC_TRADDR_NNLEN;
2468                 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2469         } else
2470                 goto out_einval;
2471
2472         name[0] = '0';
2473         name[1] = 'x';
2474         name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2475
2476         memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2477         if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2478                 goto out_einval;
2479
2480         memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2481         if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2482                 goto out_einval;
2483
2484         return 0;
2485
2486 out_einval:
2487         pr_warn("%s: bad traddr string\n", __func__);
2488         return -EINVAL;
2489 }
2490
2491 static int
2492 nvmet_fc_add_port(struct nvmet_port *port)
2493 {
2494         struct nvmet_fc_tgtport *tgtport;
2495         struct nvmet_fc_traddr traddr = { 0L, 0L };
2496         unsigned long flags;
2497         int ret;
2498
2499         /* validate the address info */
2500         if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2501             (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2502                 return -EINVAL;
2503
2504         /* map the traddr address info to a target port */
2505
2506         ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2507                         sizeof(port->disc_addr.traddr));
2508         if (ret)
2509                 return ret;
2510
2511         ret = -ENXIO;
2512         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2513         list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2514                 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2515                     (tgtport->fc_target_port.port_name == traddr.pn)) {
2516                         /* a FC port can only be 1 nvmet port id */
2517                         if (!tgtport->port) {
2518                                 tgtport->port = port;
2519                                 port->priv = tgtport;
2520                                 nvmet_fc_tgtport_get(tgtport);
2521                                 ret = 0;
2522                         } else
2523                                 ret = -EALREADY;
2524                         break;
2525                 }
2526         }
2527         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2528         return ret;
2529 }
2530
2531 static void
2532 nvmet_fc_remove_port(struct nvmet_port *port)
2533 {
2534         struct nvmet_fc_tgtport *tgtport = port->priv;
2535         unsigned long flags;
2536         bool matched = false;
2537
2538         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2539         if (tgtport->port == port) {
2540                 matched = true;
2541                 tgtport->port = NULL;
2542         }
2543         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2544
2545         if (matched)
2546                 nvmet_fc_tgtport_put(tgtport);
2547 }
2548
2549 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2550         .owner                  = THIS_MODULE,
2551         .type                   = NVMF_TRTYPE_FC,
2552         .msdbd                  = 1,
2553         .add_port               = nvmet_fc_add_port,
2554         .remove_port            = nvmet_fc_remove_port,
2555         .queue_response         = nvmet_fc_fcp_nvme_cmd_done,
2556         .delete_ctrl            = nvmet_fc_delete_ctrl,
2557 };
2558
2559 static int __init nvmet_fc_init_module(void)
2560 {
2561         return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2562 }
2563
2564 static void __exit nvmet_fc_exit_module(void)
2565 {
2566         /* sanity check - all lports should be removed */
2567         if (!list_empty(&nvmet_fc_target_list))
2568                 pr_warn("%s: targetport list not empty\n", __func__);
2569
2570         nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2571
2572         ida_destroy(&nvmet_fc_tgtport_cnt);
2573 }
2574
2575 module_init(nvmet_fc_init_module);
2576 module_exit(nvmet_fc_exit_module);
2577
2578 MODULE_LICENSE("GPL v2");