Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / nvme / host / rdma.c
1 /*
2  * NVMe over Fabrics RDMA host code.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/string.h>
20 #include <linux/atomic.h>
21 #include <linux/blk-mq.h>
22 #include <linux/types.h>
23 #include <linux/list.h>
24 #include <linux/mutex.h>
25 #include <linux/scatterlist.h>
26 #include <linux/nvme.h>
27 #include <asm/unaligned.h>
28
29 #include <rdma/ib_verbs.h>
30 #include <rdma/rdma_cm.h>
31 #include <linux/nvme-rdma.h>
32
33 #include "nvme.h"
34 #include "fabrics.h"
35
36
37 #define NVME_RDMA_CONNECT_TIMEOUT_MS    1000            /* 1 second */
38
39 #define NVME_RDMA_MAX_SEGMENT_SIZE      0xffffff        /* 24-bit SGL field */
40
41 #define NVME_RDMA_MAX_SEGMENTS          256
42
43 #define NVME_RDMA_MAX_INLINE_SEGMENTS   1
44
45 static const char *const nvme_rdma_cm_status_strs[] = {
46         [NVME_RDMA_CM_INVALID_LEN]      = "invalid length",
47         [NVME_RDMA_CM_INVALID_RECFMT]   = "invalid record format",
48         [NVME_RDMA_CM_INVALID_QID]      = "invalid queue ID",
49         [NVME_RDMA_CM_INVALID_HSQSIZE]  = "invalid host SQ size",
50         [NVME_RDMA_CM_INVALID_HRQSIZE]  = "invalid host RQ size",
51         [NVME_RDMA_CM_NO_RSC]           = "resource not found",
52         [NVME_RDMA_CM_INVALID_IRD]      = "invalid IRD",
53         [NVME_RDMA_CM_INVALID_ORD]      = "Invalid ORD",
54 };
55
56 static const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
57 {
58         size_t index = status;
59
60         if (index < ARRAY_SIZE(nvme_rdma_cm_status_strs) &&
61             nvme_rdma_cm_status_strs[index])
62                 return nvme_rdma_cm_status_strs[index];
63         else
64                 return "unrecognized reason";
65 };
66
67 /*
68  * We handle AEN commands ourselves and don't even let the
69  * block layer know about them.
70  */
71 #define NVME_RDMA_NR_AEN_COMMANDS      1
72 #define NVME_RDMA_AQ_BLKMQ_DEPTH       \
73         (NVMF_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
74
75 struct nvme_rdma_device {
76         struct ib_device       *dev;
77         struct ib_pd           *pd;
78         struct kref             ref;
79         struct list_head        entry;
80 };
81
82 struct nvme_rdma_qe {
83         struct ib_cqe           cqe;
84         void                    *data;
85         u64                     dma;
86 };
87
88 struct nvme_rdma_queue;
89 struct nvme_rdma_request {
90         struct nvme_request     req;
91         struct ib_mr            *mr;
92         struct nvme_rdma_qe     sqe;
93         struct ib_sge           sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
94         u32                     num_sge;
95         int                     nents;
96         bool                    inline_data;
97         struct ib_reg_wr        reg_wr;
98         struct ib_cqe           reg_cqe;
99         struct nvme_rdma_queue  *queue;
100         struct sg_table         sg_table;
101         struct scatterlist      first_sgl[];
102 };
103
104 enum nvme_rdma_queue_flags {
105         NVME_RDMA_Q_CONNECTED = (1 << 0),
106         NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
107         NVME_RDMA_Q_DELETING = (1 << 2),
108         NVME_RDMA_Q_LIVE = (1 << 3),
109 };
110
111 struct nvme_rdma_queue {
112         struct nvme_rdma_qe     *rsp_ring;
113         u8                      sig_count;
114         int                     queue_size;
115         size_t                  cmnd_capsule_len;
116         struct nvme_rdma_ctrl   *ctrl;
117         struct nvme_rdma_device *device;
118         struct ib_cq            *ib_cq;
119         struct ib_qp            *qp;
120
121         unsigned long           flags;
122         struct rdma_cm_id       *cm_id;
123         int                     cm_error;
124         struct completion       cm_done;
125 };
126
127 struct nvme_rdma_ctrl {
128         /* read and written in the hot path */
129         spinlock_t              lock;
130
131         /* read only in the hot path */
132         struct nvme_rdma_queue  *queues;
133         u32                     queue_count;
134
135         /* other member variables */
136         struct blk_mq_tag_set   tag_set;
137         struct work_struct      delete_work;
138         struct work_struct      reset_work;
139         struct work_struct      err_work;
140
141         struct nvme_rdma_qe     async_event_sqe;
142
143         int                     reconnect_delay;
144         struct delayed_work     reconnect_work;
145
146         struct list_head        list;
147
148         struct blk_mq_tag_set   admin_tag_set;
149         struct nvme_rdma_device *device;
150
151         u64                     cap;
152         u32                     max_fr_pages;
153
154         union {
155                 struct sockaddr addr;
156                 struct sockaddr_in addr_in;
157         };
158
159         struct nvme_ctrl        ctrl;
160 };
161
162 static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
163 {
164         return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
165 }
166
167 static LIST_HEAD(device_list);
168 static DEFINE_MUTEX(device_list_mutex);
169
170 static LIST_HEAD(nvme_rdma_ctrl_list);
171 static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
172
173 static struct workqueue_struct *nvme_rdma_wq;
174
175 /*
176  * Disabling this option makes small I/O goes faster, but is fundamentally
177  * unsafe.  With it turned off we will have to register a global rkey that
178  * allows read and write access to all physical memory.
179  */
180 static bool register_always = true;
181 module_param(register_always, bool, 0444);
182 MODULE_PARM_DESC(register_always,
183          "Use memory registration even for contiguous memory regions");
184
185 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
186                 struct rdma_cm_event *event);
187 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
188
189 /* XXX: really should move to a generic header sooner or later.. */
190 static inline void put_unaligned_le24(u32 val, u8 *p)
191 {
192         *p++ = val;
193         *p++ = val >> 8;
194         *p++ = val >> 16;
195 }
196
197 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
198 {
199         return queue - queue->ctrl->queues;
200 }
201
202 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
203 {
204         return queue->cmnd_capsule_len - sizeof(struct nvme_command);
205 }
206
207 static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
208                 size_t capsule_size, enum dma_data_direction dir)
209 {
210         ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
211         kfree(qe->data);
212 }
213
214 static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
215                 size_t capsule_size, enum dma_data_direction dir)
216 {
217         qe->data = kzalloc(capsule_size, GFP_KERNEL);
218         if (!qe->data)
219                 return -ENOMEM;
220
221         qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
222         if (ib_dma_mapping_error(ibdev, qe->dma)) {
223                 kfree(qe->data);
224                 return -ENOMEM;
225         }
226
227         return 0;
228 }
229
230 static void nvme_rdma_free_ring(struct ib_device *ibdev,
231                 struct nvme_rdma_qe *ring, size_t ib_queue_size,
232                 size_t capsule_size, enum dma_data_direction dir)
233 {
234         int i;
235
236         for (i = 0; i < ib_queue_size; i++)
237                 nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
238         kfree(ring);
239 }
240
241 static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
242                 size_t ib_queue_size, size_t capsule_size,
243                 enum dma_data_direction dir)
244 {
245         struct nvme_rdma_qe *ring;
246         int i;
247
248         ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
249         if (!ring)
250                 return NULL;
251
252         for (i = 0; i < ib_queue_size; i++) {
253                 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
254                         goto out_free_ring;
255         }
256
257         return ring;
258
259 out_free_ring:
260         nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
261         return NULL;
262 }
263
264 static void nvme_rdma_qp_event(struct ib_event *event, void *context)
265 {
266         pr_debug("QP event %s (%d)\n",
267                  ib_event_msg(event->event), event->event);
268
269 }
270
271 static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
272 {
273         wait_for_completion_interruptible_timeout(&queue->cm_done,
274                         msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
275         return queue->cm_error;
276 }
277
278 static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
279 {
280         struct nvme_rdma_device *dev = queue->device;
281         struct ib_qp_init_attr init_attr;
282         int ret;
283
284         memset(&init_attr, 0, sizeof(init_attr));
285         init_attr.event_handler = nvme_rdma_qp_event;
286         /* +1 for drain */
287         init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
288         /* +1 for drain */
289         init_attr.cap.max_recv_wr = queue->queue_size + 1;
290         init_attr.cap.max_recv_sge = 1;
291         init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS;
292         init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
293         init_attr.qp_type = IB_QPT_RC;
294         init_attr.send_cq = queue->ib_cq;
295         init_attr.recv_cq = queue->ib_cq;
296
297         ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
298
299         queue->qp = queue->cm_id->qp;
300         return ret;
301 }
302
303 static int nvme_rdma_reinit_request(void *data, struct request *rq)
304 {
305         struct nvme_rdma_ctrl *ctrl = data;
306         struct nvme_rdma_device *dev = ctrl->device;
307         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
308         int ret = 0;
309
310         if (!req->mr->need_inval)
311                 goto out;
312
313         ib_dereg_mr(req->mr);
314
315         req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
316                         ctrl->max_fr_pages);
317         if (IS_ERR(req->mr)) {
318                 ret = PTR_ERR(req->mr);
319                 req->mr = NULL;
320                 goto out;
321         }
322
323         req->mr->need_inval = false;
324
325 out:
326         return ret;
327 }
328
329 static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
330                 struct request *rq, unsigned int queue_idx)
331 {
332         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
333         struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
334         struct nvme_rdma_device *dev = queue->device;
335
336         if (req->mr)
337                 ib_dereg_mr(req->mr);
338
339         nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
340                         DMA_TO_DEVICE);
341 }
342
343 static void nvme_rdma_exit_request(void *data, struct request *rq,
344                                 unsigned int hctx_idx, unsigned int rq_idx)
345 {
346         return __nvme_rdma_exit_request(data, rq, hctx_idx + 1);
347 }
348
349 static void nvme_rdma_exit_admin_request(void *data, struct request *rq,
350                                 unsigned int hctx_idx, unsigned int rq_idx)
351 {
352         return __nvme_rdma_exit_request(data, rq, 0);
353 }
354
355 static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
356                 struct request *rq, unsigned int queue_idx)
357 {
358         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
359         struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
360         struct nvme_rdma_device *dev = queue->device;
361         struct ib_device *ibdev = dev->dev;
362         int ret;
363
364         BUG_ON(queue_idx >= ctrl->queue_count);
365
366         ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
367                         DMA_TO_DEVICE);
368         if (ret)
369                 return ret;
370
371         req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
372                         ctrl->max_fr_pages);
373         if (IS_ERR(req->mr)) {
374                 ret = PTR_ERR(req->mr);
375                 goto out_free_qe;
376         }
377
378         req->queue = queue;
379
380         return 0;
381
382 out_free_qe:
383         nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
384                         DMA_TO_DEVICE);
385         return -ENOMEM;
386 }
387
388 static int nvme_rdma_init_request(void *data, struct request *rq,
389                                 unsigned int hctx_idx, unsigned int rq_idx,
390                                 unsigned int numa_node)
391 {
392         return __nvme_rdma_init_request(data, rq, hctx_idx + 1);
393 }
394
395 static int nvme_rdma_init_admin_request(void *data, struct request *rq,
396                                 unsigned int hctx_idx, unsigned int rq_idx,
397                                 unsigned int numa_node)
398 {
399         return __nvme_rdma_init_request(data, rq, 0);
400 }
401
402 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
403                 unsigned int hctx_idx)
404 {
405         struct nvme_rdma_ctrl *ctrl = data;
406         struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
407
408         BUG_ON(hctx_idx >= ctrl->queue_count);
409
410         hctx->driver_data = queue;
411         return 0;
412 }
413
414 static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
415                 unsigned int hctx_idx)
416 {
417         struct nvme_rdma_ctrl *ctrl = data;
418         struct nvme_rdma_queue *queue = &ctrl->queues[0];
419
420         BUG_ON(hctx_idx != 0);
421
422         hctx->driver_data = queue;
423         return 0;
424 }
425
426 static void nvme_rdma_free_dev(struct kref *ref)
427 {
428         struct nvme_rdma_device *ndev =
429                 container_of(ref, struct nvme_rdma_device, ref);
430
431         mutex_lock(&device_list_mutex);
432         list_del(&ndev->entry);
433         mutex_unlock(&device_list_mutex);
434
435         ib_dealloc_pd(ndev->pd);
436         kfree(ndev);
437 }
438
439 static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
440 {
441         kref_put(&dev->ref, nvme_rdma_free_dev);
442 }
443
444 static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
445 {
446         return kref_get_unless_zero(&dev->ref);
447 }
448
449 static struct nvme_rdma_device *
450 nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
451 {
452         struct nvme_rdma_device *ndev;
453
454         mutex_lock(&device_list_mutex);
455         list_for_each_entry(ndev, &device_list, entry) {
456                 if (ndev->dev->node_guid == cm_id->device->node_guid &&
457                     nvme_rdma_dev_get(ndev))
458                         goto out_unlock;
459         }
460
461         ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
462         if (!ndev)
463                 goto out_err;
464
465         ndev->dev = cm_id->device;
466         kref_init(&ndev->ref);
467
468         ndev->pd = ib_alloc_pd(ndev->dev,
469                 register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
470         if (IS_ERR(ndev->pd))
471                 goto out_free_dev;
472
473         if (!(ndev->dev->attrs.device_cap_flags &
474               IB_DEVICE_MEM_MGT_EXTENSIONS)) {
475                 dev_err(&ndev->dev->dev,
476                         "Memory registrations not supported.\n");
477                 goto out_free_pd;
478         }
479
480         list_add(&ndev->entry, &device_list);
481 out_unlock:
482         mutex_unlock(&device_list_mutex);
483         return ndev;
484
485 out_free_pd:
486         ib_dealloc_pd(ndev->pd);
487 out_free_dev:
488         kfree(ndev);
489 out_err:
490         mutex_unlock(&device_list_mutex);
491         return NULL;
492 }
493
494 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
495 {
496         struct nvme_rdma_device *dev;
497         struct ib_device *ibdev;
498
499         if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
500                 return;
501
502         dev = queue->device;
503         ibdev = dev->dev;
504         rdma_destroy_qp(queue->cm_id);
505         ib_free_cq(queue->ib_cq);
506
507         nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
508                         sizeof(struct nvme_completion), DMA_FROM_DEVICE);
509
510         nvme_rdma_dev_put(dev);
511 }
512
513 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
514                 struct nvme_rdma_device *dev)
515 {
516         struct ib_device *ibdev = dev->dev;
517         const int send_wr_factor = 3;                   /* MR, SEND, INV */
518         const int cq_factor = send_wr_factor + 1;       /* + RECV */
519         int comp_vector, idx = nvme_rdma_queue_idx(queue);
520
521         int ret;
522
523         queue->device = dev;
524
525         /*
526          * The admin queue is barely used once the controller is live, so don't
527          * bother to spread it out.
528          */
529         if (idx == 0)
530                 comp_vector = 0;
531         else
532                 comp_vector = idx % ibdev->num_comp_vectors;
533
534
535         /* +1 for ib_stop_cq */
536         queue->ib_cq = ib_alloc_cq(dev->dev, queue,
537                                 cq_factor * queue->queue_size + 1, comp_vector,
538                                 IB_POLL_SOFTIRQ);
539         if (IS_ERR(queue->ib_cq)) {
540                 ret = PTR_ERR(queue->ib_cq);
541                 goto out;
542         }
543
544         ret = nvme_rdma_create_qp(queue, send_wr_factor);
545         if (ret)
546                 goto out_destroy_ib_cq;
547
548         queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
549                         sizeof(struct nvme_completion), DMA_FROM_DEVICE);
550         if (!queue->rsp_ring) {
551                 ret = -ENOMEM;
552                 goto out_destroy_qp;
553         }
554         set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
555
556         return 0;
557
558 out_destroy_qp:
559         ib_destroy_qp(queue->qp);
560 out_destroy_ib_cq:
561         ib_free_cq(queue->ib_cq);
562 out:
563         return ret;
564 }
565
566 static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
567                 int idx, size_t queue_size)
568 {
569         struct nvme_rdma_queue *queue;
570         int ret;
571
572         queue = &ctrl->queues[idx];
573         queue->ctrl = ctrl;
574         init_completion(&queue->cm_done);
575
576         if (idx > 0)
577                 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
578         else
579                 queue->cmnd_capsule_len = sizeof(struct nvme_command);
580
581         queue->queue_size = queue_size;
582
583         queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
584                         RDMA_PS_TCP, IB_QPT_RC);
585         if (IS_ERR(queue->cm_id)) {
586                 dev_info(ctrl->ctrl.device,
587                         "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
588                 return PTR_ERR(queue->cm_id);
589         }
590
591         queue->cm_error = -ETIMEDOUT;
592         ret = rdma_resolve_addr(queue->cm_id, NULL, &ctrl->addr,
593                         NVME_RDMA_CONNECT_TIMEOUT_MS);
594         if (ret) {
595                 dev_info(ctrl->ctrl.device,
596                         "rdma_resolve_addr failed (%d).\n", ret);
597                 goto out_destroy_cm_id;
598         }
599
600         ret = nvme_rdma_wait_for_cm(queue);
601         if (ret) {
602                 dev_info(ctrl->ctrl.device,
603                         "rdma_resolve_addr wait failed (%d).\n", ret);
604                 goto out_destroy_cm_id;
605         }
606
607         clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
608         set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
609
610         return 0;
611
612 out_destroy_cm_id:
613         nvme_rdma_destroy_queue_ib(queue);
614         rdma_destroy_id(queue->cm_id);
615         return ret;
616 }
617
618 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
619 {
620         rdma_disconnect(queue->cm_id);
621         ib_drain_qp(queue->qp);
622 }
623
624 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
625 {
626         nvme_rdma_destroy_queue_ib(queue);
627         rdma_destroy_id(queue->cm_id);
628 }
629
630 static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
631 {
632         if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
633                 return;
634         nvme_rdma_stop_queue(queue);
635         nvme_rdma_free_queue(queue);
636 }
637
638 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
639 {
640         int i;
641
642         for (i = 1; i < ctrl->queue_count; i++)
643                 nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
644 }
645
646 static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
647 {
648         int i, ret = 0;
649
650         for (i = 1; i < ctrl->queue_count; i++) {
651                 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
652                 if (ret) {
653                         dev_info(ctrl->ctrl.device,
654                                 "failed to connect i/o queue: %d\n", ret);
655                         goto out_free_queues;
656                 }
657                 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
658         }
659
660         return 0;
661
662 out_free_queues:
663         nvme_rdma_free_io_queues(ctrl);
664         return ret;
665 }
666
667 static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
668 {
669         int i, ret;
670
671         for (i = 1; i < ctrl->queue_count; i++) {
672                 ret = nvme_rdma_init_queue(ctrl, i,
673                                            ctrl->ctrl.opts->queue_size);
674                 if (ret) {
675                         dev_info(ctrl->ctrl.device,
676                                 "failed to initialize i/o queue: %d\n", ret);
677                         goto out_free_queues;
678                 }
679         }
680
681         return 0;
682
683 out_free_queues:
684         for (i--; i >= 1; i--)
685                 nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
686
687         return ret;
688 }
689
690 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
691 {
692         nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
693                         sizeof(struct nvme_command), DMA_TO_DEVICE);
694         nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
695         blk_cleanup_queue(ctrl->ctrl.admin_q);
696         blk_mq_free_tag_set(&ctrl->admin_tag_set);
697         nvme_rdma_dev_put(ctrl->device);
698 }
699
700 static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
701 {
702         struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
703
704         if (list_empty(&ctrl->list))
705                 goto free_ctrl;
706
707         mutex_lock(&nvme_rdma_ctrl_mutex);
708         list_del(&ctrl->list);
709         mutex_unlock(&nvme_rdma_ctrl_mutex);
710
711         kfree(ctrl->queues);
712         nvmf_free_options(nctrl->opts);
713 free_ctrl:
714         kfree(ctrl);
715 }
716
717 static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
718 {
719         struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
720                         struct nvme_rdma_ctrl, reconnect_work);
721         bool changed;
722         int ret;
723
724         if (ctrl->queue_count > 1) {
725                 nvme_rdma_free_io_queues(ctrl);
726
727                 ret = blk_mq_reinit_tagset(&ctrl->tag_set);
728                 if (ret)
729                         goto requeue;
730         }
731
732         nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
733
734         ret = blk_mq_reinit_tagset(&ctrl->admin_tag_set);
735         if (ret)
736                 goto requeue;
737
738         ret = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
739         if (ret)
740                 goto requeue;
741
742         blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
743
744         ret = nvmf_connect_admin_queue(&ctrl->ctrl);
745         if (ret)
746                 goto stop_admin_q;
747
748         set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
749
750         ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
751         if (ret)
752                 goto stop_admin_q;
753
754         nvme_start_keep_alive(&ctrl->ctrl);
755
756         if (ctrl->queue_count > 1) {
757                 ret = nvme_rdma_init_io_queues(ctrl);
758                 if (ret)
759                         goto stop_admin_q;
760
761                 ret = nvme_rdma_connect_io_queues(ctrl);
762                 if (ret)
763                         goto stop_admin_q;
764         }
765
766         changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
767         WARN_ON_ONCE(!changed);
768
769         if (ctrl->queue_count > 1) {
770                 nvme_start_queues(&ctrl->ctrl);
771                 nvme_queue_scan(&ctrl->ctrl);
772                 nvme_queue_async_events(&ctrl->ctrl);
773         }
774
775         dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
776
777         return;
778
779 stop_admin_q:
780         blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
781 requeue:
782         /* Make sure we are not resetting/deleting */
783         if (ctrl->ctrl.state == NVME_CTRL_RECONNECTING) {
784                 dev_info(ctrl->ctrl.device,
785                         "Failed reconnect attempt, requeueing...\n");
786                 queue_delayed_work(nvme_rdma_wq, &ctrl->reconnect_work,
787                                         ctrl->reconnect_delay * HZ);
788         }
789 }
790
791 static void nvme_rdma_error_recovery_work(struct work_struct *work)
792 {
793         struct nvme_rdma_ctrl *ctrl = container_of(work,
794                         struct nvme_rdma_ctrl, err_work);
795         int i;
796
797         nvme_stop_keep_alive(&ctrl->ctrl);
798
799         for (i = 0; i < ctrl->queue_count; i++) {
800                 clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
801                 clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
802         }
803
804         if (ctrl->queue_count > 1)
805                 nvme_stop_queues(&ctrl->ctrl);
806         blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
807
808         /* We must take care of fastfail/requeue all our inflight requests */
809         if (ctrl->queue_count > 1)
810                 blk_mq_tagset_busy_iter(&ctrl->tag_set,
811                                         nvme_cancel_request, &ctrl->ctrl);
812         blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
813                                 nvme_cancel_request, &ctrl->ctrl);
814
815         dev_info(ctrl->ctrl.device, "reconnecting in %d seconds\n",
816                 ctrl->reconnect_delay);
817
818         queue_delayed_work(nvme_rdma_wq, &ctrl->reconnect_work,
819                                 ctrl->reconnect_delay * HZ);
820 }
821
822 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
823 {
824         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
825                 return;
826
827         queue_work(nvme_rdma_wq, &ctrl->err_work);
828 }
829
830 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
831                 const char *op)
832 {
833         struct nvme_rdma_queue *queue = cq->cq_context;
834         struct nvme_rdma_ctrl *ctrl = queue->ctrl;
835
836         if (ctrl->ctrl.state == NVME_CTRL_LIVE)
837                 dev_info(ctrl->ctrl.device,
838                              "%s for CQE 0x%p failed with status %s (%d)\n",
839                              op, wc->wr_cqe,
840                              ib_wc_status_msg(wc->status), wc->status);
841         nvme_rdma_error_recovery(ctrl);
842 }
843
844 static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
845 {
846         if (unlikely(wc->status != IB_WC_SUCCESS))
847                 nvme_rdma_wr_error(cq, wc, "MEMREG");
848 }
849
850 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
851 {
852         if (unlikely(wc->status != IB_WC_SUCCESS))
853                 nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
854 }
855
856 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
857                 struct nvme_rdma_request *req)
858 {
859         struct ib_send_wr *bad_wr;
860         struct ib_send_wr wr = {
861                 .opcode             = IB_WR_LOCAL_INV,
862                 .next               = NULL,
863                 .num_sge            = 0,
864                 .send_flags         = 0,
865                 .ex.invalidate_rkey = req->mr->rkey,
866         };
867
868         req->reg_cqe.done = nvme_rdma_inv_rkey_done;
869         wr.wr_cqe = &req->reg_cqe;
870
871         return ib_post_send(queue->qp, &wr, &bad_wr);
872 }
873
874 static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
875                 struct request *rq)
876 {
877         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
878         struct nvme_rdma_ctrl *ctrl = queue->ctrl;
879         struct nvme_rdma_device *dev = queue->device;
880         struct ib_device *ibdev = dev->dev;
881         int res;
882
883         if (!blk_rq_bytes(rq))
884                 return;
885
886         if (req->mr->need_inval) {
887                 res = nvme_rdma_inv_rkey(queue, req);
888                 if (res < 0) {
889                         dev_err(ctrl->ctrl.device,
890                                 "Queueing INV WR for rkey %#x failed (%d)\n",
891                                 req->mr->rkey, res);
892                         nvme_rdma_error_recovery(queue->ctrl);
893                 }
894         }
895
896         ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
897                         req->nents, rq_data_dir(rq) ==
898                                     WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
899
900         nvme_cleanup_cmd(rq);
901         sg_free_table_chained(&req->sg_table, true);
902 }
903
904 static int nvme_rdma_set_sg_null(struct nvme_command *c)
905 {
906         struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
907
908         sg->addr = 0;
909         put_unaligned_le24(0, sg->length);
910         put_unaligned_le32(0, sg->key);
911         sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
912         return 0;
913 }
914
915 static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
916                 struct nvme_rdma_request *req, struct nvme_command *c)
917 {
918         struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
919
920         req->sge[1].addr = sg_dma_address(req->sg_table.sgl);
921         req->sge[1].length = sg_dma_len(req->sg_table.sgl);
922         req->sge[1].lkey = queue->device->pd->local_dma_lkey;
923
924         sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
925         sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
926         sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
927
928         req->inline_data = true;
929         req->num_sge++;
930         return 0;
931 }
932
933 static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
934                 struct nvme_rdma_request *req, struct nvme_command *c)
935 {
936         struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
937
938         sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
939         put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
940         put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
941         sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
942         return 0;
943 }
944
945 static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
946                 struct nvme_rdma_request *req, struct nvme_command *c,
947                 int count)
948 {
949         struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
950         int nr;
951
952         nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
953         if (nr < count) {
954                 if (nr < 0)
955                         return nr;
956                 return -EINVAL;
957         }
958
959         ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
960
961         req->reg_cqe.done = nvme_rdma_memreg_done;
962         memset(&req->reg_wr, 0, sizeof(req->reg_wr));
963         req->reg_wr.wr.opcode = IB_WR_REG_MR;
964         req->reg_wr.wr.wr_cqe = &req->reg_cqe;
965         req->reg_wr.wr.num_sge = 0;
966         req->reg_wr.mr = req->mr;
967         req->reg_wr.key = req->mr->rkey;
968         req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
969                              IB_ACCESS_REMOTE_READ |
970                              IB_ACCESS_REMOTE_WRITE;
971
972         req->mr->need_inval = true;
973
974         sg->addr = cpu_to_le64(req->mr->iova);
975         put_unaligned_le24(req->mr->length, sg->length);
976         put_unaligned_le32(req->mr->rkey, sg->key);
977         sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) |
978                         NVME_SGL_FMT_INVALIDATE;
979
980         return 0;
981 }
982
983 static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
984                 struct request *rq, struct nvme_command *c)
985 {
986         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
987         struct nvme_rdma_device *dev = queue->device;
988         struct ib_device *ibdev = dev->dev;
989         int count, ret;
990
991         req->num_sge = 1;
992         req->inline_data = false;
993         req->mr->need_inval = false;
994
995         c->common.flags |= NVME_CMD_SGL_METABUF;
996
997         if (!blk_rq_bytes(rq))
998                 return nvme_rdma_set_sg_null(c);
999
1000         req->sg_table.sgl = req->first_sgl;
1001         ret = sg_alloc_table_chained(&req->sg_table,
1002                         blk_rq_nr_phys_segments(rq), req->sg_table.sgl);
1003         if (ret)
1004                 return -ENOMEM;
1005
1006         req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
1007
1008         count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
1009                     rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1010         if (unlikely(count <= 0)) {
1011                 sg_free_table_chained(&req->sg_table, true);
1012                 return -EIO;
1013         }
1014
1015         if (count == 1) {
1016                 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
1017                     blk_rq_payload_bytes(rq) <=
1018                                 nvme_rdma_inline_data_size(queue))
1019                         return nvme_rdma_map_sg_inline(queue, req, c);
1020
1021                 if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
1022                         return nvme_rdma_map_sg_single(queue, req, c);
1023         }
1024
1025         return nvme_rdma_map_sg_fr(queue, req, c, count);
1026 }
1027
1028 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
1029 {
1030         if (unlikely(wc->status != IB_WC_SUCCESS))
1031                 nvme_rdma_wr_error(cq, wc, "SEND");
1032 }
1033
1034 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1035                 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
1036                 struct ib_send_wr *first, bool flush)
1037 {
1038         struct ib_send_wr wr, *bad_wr;
1039         int ret;
1040
1041         sge->addr   = qe->dma;
1042         sge->length = sizeof(struct nvme_command),
1043         sge->lkey   = queue->device->pd->local_dma_lkey;
1044
1045         qe->cqe.done = nvme_rdma_send_done;
1046
1047         wr.next       = NULL;
1048         wr.wr_cqe     = &qe->cqe;
1049         wr.sg_list    = sge;
1050         wr.num_sge    = num_sge;
1051         wr.opcode     = IB_WR_SEND;
1052         wr.send_flags = 0;
1053
1054         /*
1055          * Unsignalled send completions are another giant desaster in the
1056          * IB Verbs spec:  If we don't regularly post signalled sends
1057          * the send queue will fill up and only a QP reset will rescue us.
1058          * Would have been way to obvious to handle this in hardware or
1059          * at least the RDMA stack..
1060          *
1061          * This messy and racy code sniplet is copy and pasted from the iSER
1062          * initiator, and the magic '32' comes from there as well.
1063          *
1064          * Always signal the flushes. The magic request used for the flush
1065          * sequencer is not allocated in our driver's tagset and it's
1066          * triggered to be freed by blk_cleanup_queue(). So we need to
1067          * always mark it as signaled to ensure that the "wr_cqe", which is
1068          * embeded in request's payload, is not freed when __ib_process_cq()
1069          * calls wr_cqe->done().
1070          */
1071         if ((++queue->sig_count % 32) == 0 || flush)
1072                 wr.send_flags |= IB_SEND_SIGNALED;
1073
1074         if (first)
1075                 first->next = &wr;
1076         else
1077                 first = &wr;
1078
1079         ret = ib_post_send(queue->qp, first, &bad_wr);
1080         if (ret) {
1081                 dev_err(queue->ctrl->ctrl.device,
1082                              "%s failed with error code %d\n", __func__, ret);
1083         }
1084         return ret;
1085 }
1086
1087 static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
1088                 struct nvme_rdma_qe *qe)
1089 {
1090         struct ib_recv_wr wr, *bad_wr;
1091         struct ib_sge list;
1092         int ret;
1093
1094         list.addr   = qe->dma;
1095         list.length = sizeof(struct nvme_completion);
1096         list.lkey   = queue->device->pd->local_dma_lkey;
1097
1098         qe->cqe.done = nvme_rdma_recv_done;
1099
1100         wr.next     = NULL;
1101         wr.wr_cqe   = &qe->cqe;
1102         wr.sg_list  = &list;
1103         wr.num_sge  = 1;
1104
1105         ret = ib_post_recv(queue->qp, &wr, &bad_wr);
1106         if (ret) {
1107                 dev_err(queue->ctrl->ctrl.device,
1108                         "%s failed with error code %d\n", __func__, ret);
1109         }
1110         return ret;
1111 }
1112
1113 static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
1114 {
1115         u32 queue_idx = nvme_rdma_queue_idx(queue);
1116
1117         if (queue_idx == 0)
1118                 return queue->ctrl->admin_tag_set.tags[queue_idx];
1119         return queue->ctrl->tag_set.tags[queue_idx - 1];
1120 }
1121
1122 static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
1123 {
1124         struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
1125         struct nvme_rdma_queue *queue = &ctrl->queues[0];
1126         struct ib_device *dev = queue->device->dev;
1127         struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
1128         struct nvme_command *cmd = sqe->data;
1129         struct ib_sge sge;
1130         int ret;
1131
1132         if (WARN_ON_ONCE(aer_idx != 0))
1133                 return;
1134
1135         ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
1136
1137         memset(cmd, 0, sizeof(*cmd));
1138         cmd->common.opcode = nvme_admin_async_event;
1139         cmd->common.command_id = NVME_RDMA_AQ_BLKMQ_DEPTH;
1140         cmd->common.flags |= NVME_CMD_SGL_METABUF;
1141         nvme_rdma_set_sg_null(cmd);
1142
1143         ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
1144                         DMA_TO_DEVICE);
1145
1146         ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
1147         WARN_ON_ONCE(ret);
1148 }
1149
1150 static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
1151                 struct nvme_completion *cqe, struct ib_wc *wc, int tag)
1152 {
1153         struct request *rq;
1154         struct nvme_rdma_request *req;
1155         int ret = 0;
1156
1157         rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
1158         if (!rq) {
1159                 dev_err(queue->ctrl->ctrl.device,
1160                         "tag 0x%x on QP %#x not found\n",
1161                         cqe->command_id, queue->qp->qp_num);
1162                 nvme_rdma_error_recovery(queue->ctrl);
1163                 return ret;
1164         }
1165         req = blk_mq_rq_to_pdu(rq);
1166
1167         if (rq->tag == tag)
1168                 ret = 1;
1169
1170         if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
1171             wc->ex.invalidate_rkey == req->mr->rkey)
1172                 req->mr->need_inval = false;
1173
1174         req->req.result = cqe->result;
1175         blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
1176         return ret;
1177 }
1178
1179 static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
1180 {
1181         struct nvme_rdma_qe *qe =
1182                 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
1183         struct nvme_rdma_queue *queue = cq->cq_context;
1184         struct ib_device *ibdev = queue->device->dev;
1185         struct nvme_completion *cqe = qe->data;
1186         const size_t len = sizeof(struct nvme_completion);
1187         int ret = 0;
1188
1189         if (unlikely(wc->status != IB_WC_SUCCESS)) {
1190                 nvme_rdma_wr_error(cq, wc, "RECV");
1191                 return 0;
1192         }
1193
1194         ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1195         /*
1196          * AEN requests are special as they don't time out and can
1197          * survive any kind of queue freeze and often don't respond to
1198          * aborts.  We don't even bother to allocate a struct request
1199          * for them but rather special case them here.
1200          */
1201         if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
1202                         cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
1203                 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1204                                 &cqe->result);
1205         else
1206                 ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
1207         ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1208
1209         nvme_rdma_post_recv(queue, qe);
1210         return ret;
1211 }
1212
1213 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1214 {
1215         __nvme_rdma_recv_done(cq, wc, -1);
1216 }
1217
1218 static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
1219 {
1220         int ret, i;
1221
1222         for (i = 0; i < queue->queue_size; i++) {
1223                 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
1224                 if (ret)
1225                         goto out_destroy_queue_ib;
1226         }
1227
1228         return 0;
1229
1230 out_destroy_queue_ib:
1231         nvme_rdma_destroy_queue_ib(queue);
1232         return ret;
1233 }
1234
1235 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
1236                 struct rdma_cm_event *ev)
1237 {
1238         struct rdma_cm_id *cm_id = queue->cm_id;
1239         int status = ev->status;
1240         const char *rej_msg;
1241         const struct nvme_rdma_cm_rej *rej_data;
1242         u8 rej_data_len;
1243
1244         rej_msg = rdma_reject_msg(cm_id, status);
1245         rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);
1246
1247         if (rej_data && rej_data_len >= sizeof(u16)) {
1248                 u16 sts = le16_to_cpu(rej_data->sts);
1249
1250                 dev_err(queue->ctrl->ctrl.device,
1251                       "Connect rejected: status %d (%s) nvme status %d (%s).\n",
1252                       status, rej_msg, sts, nvme_rdma_cm_msg(sts));
1253         } else {
1254                 dev_err(queue->ctrl->ctrl.device,
1255                         "Connect rejected: status %d (%s).\n", status, rej_msg);
1256         }
1257
1258         return -ECONNRESET;
1259 }
1260
1261 static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
1262 {
1263         struct nvme_rdma_device *dev;
1264         int ret;
1265
1266         dev = nvme_rdma_find_get_device(queue->cm_id);
1267         if (!dev) {
1268                 dev_err(queue->cm_id->device->dma_device,
1269                         "no client data found!\n");
1270                 return -ECONNREFUSED;
1271         }
1272
1273         ret = nvme_rdma_create_queue_ib(queue, dev);
1274         if (ret) {
1275                 nvme_rdma_dev_put(dev);
1276                 goto out;
1277         }
1278
1279         ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
1280         if (ret) {
1281                 dev_err(queue->ctrl->ctrl.device,
1282                         "rdma_resolve_route failed (%d).\n",
1283                         queue->cm_error);
1284                 goto out_destroy_queue;
1285         }
1286
1287         return 0;
1288
1289 out_destroy_queue:
1290         nvme_rdma_destroy_queue_ib(queue);
1291 out:
1292         return ret;
1293 }
1294
1295 static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
1296 {
1297         struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1298         struct rdma_conn_param param = { };
1299         struct nvme_rdma_cm_req priv = { };
1300         int ret;
1301
1302         param.qp_num = queue->qp->qp_num;
1303         param.flow_control = 1;
1304
1305         param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
1306         /* maximum retry count */
1307         param.retry_count = 7;
1308         param.rnr_retry_count = 7;
1309         param.private_data = &priv;
1310         param.private_data_len = sizeof(priv);
1311
1312         priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1313         priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
1314         /*
1315          * set the admin queue depth to the minimum size
1316          * specified by the Fabrics standard.
1317          */
1318         if (priv.qid == 0) {
1319                 priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
1320                 priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
1321         } else {
1322                 /*
1323                  * current interpretation of the fabrics spec
1324                  * is at minimum you make hrqsize sqsize+1, or a
1325                  * 1's based representation of sqsize.
1326                  */
1327                 priv.hrqsize = cpu_to_le16(queue->queue_size);
1328                 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
1329         }
1330
1331         ret = rdma_connect(queue->cm_id, &param);
1332         if (ret) {
1333                 dev_err(ctrl->ctrl.device,
1334                         "rdma_connect failed (%d).\n", ret);
1335                 goto out_destroy_queue_ib;
1336         }
1337
1338         return 0;
1339
1340 out_destroy_queue_ib:
1341         nvme_rdma_destroy_queue_ib(queue);
1342         return ret;
1343 }
1344
1345 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
1346                 struct rdma_cm_event *ev)
1347 {
1348         struct nvme_rdma_queue *queue = cm_id->context;
1349         int cm_error = 0;
1350
1351         dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
1352                 rdma_event_msg(ev->event), ev->event,
1353                 ev->status, cm_id);
1354
1355         switch (ev->event) {
1356         case RDMA_CM_EVENT_ADDR_RESOLVED:
1357                 cm_error = nvme_rdma_addr_resolved(queue);
1358                 break;
1359         case RDMA_CM_EVENT_ROUTE_RESOLVED:
1360                 cm_error = nvme_rdma_route_resolved(queue);
1361                 break;
1362         case RDMA_CM_EVENT_ESTABLISHED:
1363                 queue->cm_error = nvme_rdma_conn_established(queue);
1364                 /* complete cm_done regardless of success/failure */
1365                 complete(&queue->cm_done);
1366                 return 0;
1367         case RDMA_CM_EVENT_REJECTED:
1368                 cm_error = nvme_rdma_conn_rejected(queue, ev);
1369                 break;
1370         case RDMA_CM_EVENT_ADDR_ERROR:
1371         case RDMA_CM_EVENT_ROUTE_ERROR:
1372         case RDMA_CM_EVENT_CONNECT_ERROR:
1373         case RDMA_CM_EVENT_UNREACHABLE:
1374                 dev_dbg(queue->ctrl->ctrl.device,
1375                         "CM error event %d\n", ev->event);
1376                 cm_error = -ECONNRESET;
1377                 break;
1378         case RDMA_CM_EVENT_DISCONNECTED:
1379         case RDMA_CM_EVENT_ADDR_CHANGE:
1380         case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1381                 dev_dbg(queue->ctrl->ctrl.device,
1382                         "disconnect received - connection closed\n");
1383                 nvme_rdma_error_recovery(queue->ctrl);
1384                 break;
1385         case RDMA_CM_EVENT_DEVICE_REMOVAL:
1386                 /* device removal is handled via the ib_client API */
1387                 break;
1388         default:
1389                 dev_err(queue->ctrl->ctrl.device,
1390                         "Unexpected RDMA CM event (%d)\n", ev->event);
1391                 nvme_rdma_error_recovery(queue->ctrl);
1392                 break;
1393         }
1394
1395         if (cm_error) {
1396                 queue->cm_error = cm_error;
1397                 complete(&queue->cm_done);
1398         }
1399
1400         return 0;
1401 }
1402
1403 static enum blk_eh_timer_return
1404 nvme_rdma_timeout(struct request *rq, bool reserved)
1405 {
1406         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1407
1408         /* queue error recovery */
1409         nvme_rdma_error_recovery(req->queue->ctrl);
1410
1411         /* fail with DNR on cmd timeout */
1412         rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
1413
1414         return BLK_EH_HANDLED;
1415 }
1416
1417 /*
1418  * We cannot accept any other command until the Connect command has completed.
1419  */
1420 static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1421                 struct request *rq)
1422 {
1423         if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
1424                 struct nvme_command *cmd = nvme_req(rq)->cmd;
1425
1426                 if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
1427                     cmd->common.opcode != nvme_fabrics_command ||
1428                     cmd->fabrics.fctype != nvme_fabrics_type_connect)
1429                         return false;
1430         }
1431
1432         return true;
1433 }
1434
1435 static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1436                 const struct blk_mq_queue_data *bd)
1437 {
1438         struct nvme_ns *ns = hctx->queue->queuedata;
1439         struct nvme_rdma_queue *queue = hctx->driver_data;
1440         struct request *rq = bd->rq;
1441         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1442         struct nvme_rdma_qe *sqe = &req->sqe;
1443         struct nvme_command *c = sqe->data;
1444         bool flush = false;
1445         struct ib_device *dev;
1446         int ret;
1447
1448         WARN_ON_ONCE(rq->tag < 0);
1449
1450         if (!nvme_rdma_queue_is_ready(queue, rq))
1451                 return BLK_MQ_RQ_QUEUE_BUSY;
1452
1453         dev = queue->device->dev;
1454         ib_dma_sync_single_for_cpu(dev, sqe->dma,
1455                         sizeof(struct nvme_command), DMA_TO_DEVICE);
1456
1457         ret = nvme_setup_cmd(ns, rq, c);
1458         if (ret != BLK_MQ_RQ_QUEUE_OK)
1459                 return ret;
1460
1461         blk_mq_start_request(rq);
1462
1463         ret = nvme_rdma_map_data(queue, rq, c);
1464         if (ret < 0) {
1465                 dev_err(queue->ctrl->ctrl.device,
1466                              "Failed to map data (%d)\n", ret);
1467                 nvme_cleanup_cmd(rq);
1468                 goto err;
1469         }
1470
1471         ib_dma_sync_single_for_device(dev, sqe->dma,
1472                         sizeof(struct nvme_command), DMA_TO_DEVICE);
1473
1474         if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
1475                 flush = true;
1476         ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
1477                         req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
1478         if (ret) {
1479                 nvme_rdma_unmap_data(queue, rq);
1480                 goto err;
1481         }
1482
1483         return BLK_MQ_RQ_QUEUE_OK;
1484 err:
1485         return (ret == -ENOMEM || ret == -EAGAIN) ?
1486                 BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
1487 }
1488
1489 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
1490 {
1491         struct nvme_rdma_queue *queue = hctx->driver_data;
1492         struct ib_cq *cq = queue->ib_cq;
1493         struct ib_wc wc;
1494         int found = 0;
1495
1496         ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1497         while (ib_poll_cq(cq, 1, &wc) > 0) {
1498                 struct ib_cqe *cqe = wc.wr_cqe;
1499
1500                 if (cqe) {
1501                         if (cqe->done == nvme_rdma_recv_done)
1502                                 found |= __nvme_rdma_recv_done(cq, &wc, tag);
1503                         else
1504                                 cqe->done(cq, &wc);
1505                 }
1506         }
1507
1508         return found;
1509 }
1510
1511 static void nvme_rdma_complete_rq(struct request *rq)
1512 {
1513         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1514         struct nvme_rdma_queue *queue = req->queue;
1515         int error = 0;
1516
1517         nvme_rdma_unmap_data(queue, rq);
1518
1519         if (unlikely(rq->errors)) {
1520                 if (nvme_req_needs_retry(rq, rq->errors)) {
1521                         nvme_requeue_req(rq);
1522                         return;
1523                 }
1524
1525                 if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
1526                         error = rq->errors;
1527                 else
1528                         error = nvme_error_status(rq->errors);
1529         }
1530
1531         blk_mq_end_request(rq, error);
1532 }
1533
1534 static struct blk_mq_ops nvme_rdma_mq_ops = {
1535         .queue_rq       = nvme_rdma_queue_rq,
1536         .complete       = nvme_rdma_complete_rq,
1537         .init_request   = nvme_rdma_init_request,
1538         .exit_request   = nvme_rdma_exit_request,
1539         .reinit_request = nvme_rdma_reinit_request,
1540         .init_hctx      = nvme_rdma_init_hctx,
1541         .poll           = nvme_rdma_poll,
1542         .timeout        = nvme_rdma_timeout,
1543 };
1544
1545 static struct blk_mq_ops nvme_rdma_admin_mq_ops = {
1546         .queue_rq       = nvme_rdma_queue_rq,
1547         .complete       = nvme_rdma_complete_rq,
1548         .init_request   = nvme_rdma_init_admin_request,
1549         .exit_request   = nvme_rdma_exit_admin_request,
1550         .reinit_request = nvme_rdma_reinit_request,
1551         .init_hctx      = nvme_rdma_init_admin_hctx,
1552         .timeout        = nvme_rdma_timeout,
1553 };
1554
1555 static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
1556 {
1557         int error;
1558
1559         error = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
1560         if (error)
1561                 return error;
1562
1563         ctrl->device = ctrl->queues[0].device;
1564
1565         /*
1566          * We need a reference on the device as long as the tag_set is alive,
1567          * as the MRs in the request structures need a valid ib_device.
1568          */
1569         error = -EINVAL;
1570         if (!nvme_rdma_dev_get(ctrl->device))
1571                 goto out_free_queue;
1572
1573         ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
1574                 ctrl->device->dev->attrs.max_fast_reg_page_list_len);
1575
1576         memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
1577         ctrl->admin_tag_set.ops = &nvme_rdma_admin_mq_ops;
1578         ctrl->admin_tag_set.queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH;
1579         ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
1580         ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
1581         ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
1582                 SG_CHUNK_SIZE * sizeof(struct scatterlist);
1583         ctrl->admin_tag_set.driver_data = ctrl;
1584         ctrl->admin_tag_set.nr_hw_queues = 1;
1585         ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
1586
1587         error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
1588         if (error)
1589                 goto out_put_dev;
1590
1591         ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
1592         if (IS_ERR(ctrl->ctrl.admin_q)) {
1593                 error = PTR_ERR(ctrl->ctrl.admin_q);
1594                 goto out_free_tagset;
1595         }
1596
1597         error = nvmf_connect_admin_queue(&ctrl->ctrl);
1598         if (error)
1599                 goto out_cleanup_queue;
1600
1601         set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
1602
1603         error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
1604         if (error) {
1605                 dev_err(ctrl->ctrl.device,
1606                         "prop_get NVME_REG_CAP failed\n");
1607                 goto out_cleanup_queue;
1608         }
1609
1610         ctrl->ctrl.sqsize =
1611                 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
1612
1613         error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
1614         if (error)
1615                 goto out_cleanup_queue;
1616
1617         ctrl->ctrl.max_hw_sectors =
1618                 (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9);
1619
1620         error = nvme_init_identify(&ctrl->ctrl);
1621         if (error)
1622                 goto out_cleanup_queue;
1623
1624         error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
1625                         &ctrl->async_event_sqe, sizeof(struct nvme_command),
1626                         DMA_TO_DEVICE);
1627         if (error)
1628                 goto out_cleanup_queue;
1629
1630         nvme_start_keep_alive(&ctrl->ctrl);
1631
1632         return 0;
1633
1634 out_cleanup_queue:
1635         blk_cleanup_queue(ctrl->ctrl.admin_q);
1636 out_free_tagset:
1637         /* disconnect and drain the queue before freeing the tagset */
1638         nvme_rdma_stop_queue(&ctrl->queues[0]);
1639         blk_mq_free_tag_set(&ctrl->admin_tag_set);
1640 out_put_dev:
1641         nvme_rdma_dev_put(ctrl->device);
1642 out_free_queue:
1643         nvme_rdma_free_queue(&ctrl->queues[0]);
1644         return error;
1645 }
1646
1647 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
1648 {
1649         nvme_stop_keep_alive(&ctrl->ctrl);
1650         cancel_work_sync(&ctrl->err_work);
1651         cancel_delayed_work_sync(&ctrl->reconnect_work);
1652
1653         if (ctrl->queue_count > 1) {
1654                 nvme_stop_queues(&ctrl->ctrl);
1655                 blk_mq_tagset_busy_iter(&ctrl->tag_set,
1656                                         nvme_cancel_request, &ctrl->ctrl);
1657                 nvme_rdma_free_io_queues(ctrl);
1658         }
1659
1660         if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
1661                 nvme_shutdown_ctrl(&ctrl->ctrl);
1662
1663         blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
1664         blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
1665                                 nvme_cancel_request, &ctrl->ctrl);
1666         nvme_rdma_destroy_admin_queue(ctrl);
1667 }
1668
1669 static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
1670 {
1671         nvme_uninit_ctrl(&ctrl->ctrl);
1672         if (shutdown)
1673                 nvme_rdma_shutdown_ctrl(ctrl);
1674
1675         if (ctrl->ctrl.tagset) {
1676                 blk_cleanup_queue(ctrl->ctrl.connect_q);
1677                 blk_mq_free_tag_set(&ctrl->tag_set);
1678                 nvme_rdma_dev_put(ctrl->device);
1679         }
1680
1681         nvme_put_ctrl(&ctrl->ctrl);
1682 }
1683
1684 static void nvme_rdma_del_ctrl_work(struct work_struct *work)
1685 {
1686         struct nvme_rdma_ctrl *ctrl = container_of(work,
1687                                 struct nvme_rdma_ctrl, delete_work);
1688
1689         __nvme_rdma_remove_ctrl(ctrl, true);
1690 }
1691
1692 static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
1693 {
1694         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
1695                 return -EBUSY;
1696
1697         if (!queue_work(nvme_rdma_wq, &ctrl->delete_work))
1698                 return -EBUSY;
1699
1700         return 0;
1701 }
1702
1703 static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
1704 {
1705         struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
1706         int ret = 0;
1707
1708         /*
1709          * Keep a reference until all work is flushed since
1710          * __nvme_rdma_del_ctrl can free the ctrl mem
1711          */
1712         if (!kref_get_unless_zero(&ctrl->ctrl.kref))
1713                 return -EBUSY;
1714         ret = __nvme_rdma_del_ctrl(ctrl);
1715         if (!ret)
1716                 flush_work(&ctrl->delete_work);
1717         nvme_put_ctrl(&ctrl->ctrl);
1718         return ret;
1719 }
1720
1721 static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
1722 {
1723         struct nvme_rdma_ctrl *ctrl = container_of(work,
1724                                 struct nvme_rdma_ctrl, delete_work);
1725
1726         __nvme_rdma_remove_ctrl(ctrl, false);
1727 }
1728
1729 static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
1730 {
1731         struct nvme_rdma_ctrl *ctrl = container_of(work,
1732                                         struct nvme_rdma_ctrl, reset_work);
1733         int ret;
1734         bool changed;
1735
1736         nvme_rdma_shutdown_ctrl(ctrl);
1737
1738         ret = nvme_rdma_configure_admin_queue(ctrl);
1739         if (ret) {
1740                 /* ctrl is already shutdown, just remove the ctrl */
1741                 INIT_WORK(&ctrl->delete_work, nvme_rdma_remove_ctrl_work);
1742                 goto del_dead_ctrl;
1743         }
1744
1745         if (ctrl->queue_count > 1) {
1746                 ret = blk_mq_reinit_tagset(&ctrl->tag_set);
1747                 if (ret)
1748                         goto del_dead_ctrl;
1749
1750                 ret = nvme_rdma_init_io_queues(ctrl);
1751                 if (ret)
1752                         goto del_dead_ctrl;
1753
1754                 ret = nvme_rdma_connect_io_queues(ctrl);
1755                 if (ret)
1756                         goto del_dead_ctrl;
1757         }
1758
1759         changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
1760         WARN_ON_ONCE(!changed);
1761
1762         if (ctrl->queue_count > 1) {
1763                 nvme_start_queues(&ctrl->ctrl);
1764                 nvme_queue_scan(&ctrl->ctrl);
1765                 nvme_queue_async_events(&ctrl->ctrl);
1766         }
1767
1768         return;
1769
1770 del_dead_ctrl:
1771         /* Deleting this dead controller... */
1772         dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
1773         WARN_ON(!queue_work(nvme_rdma_wq, &ctrl->delete_work));
1774 }
1775
1776 static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
1777 {
1778         struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
1779
1780         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
1781                 return -EBUSY;
1782
1783         if (!queue_work(nvme_rdma_wq, &ctrl->reset_work))
1784                 return -EBUSY;
1785
1786         flush_work(&ctrl->reset_work);
1787
1788         return 0;
1789 }
1790
1791 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
1792         .name                   = "rdma",
1793         .module                 = THIS_MODULE,
1794         .is_fabrics             = true,
1795         .reg_read32             = nvmf_reg_read32,
1796         .reg_read64             = nvmf_reg_read64,
1797         .reg_write32            = nvmf_reg_write32,
1798         .reset_ctrl             = nvme_rdma_reset_ctrl,
1799         .free_ctrl              = nvme_rdma_free_ctrl,
1800         .submit_async_event     = nvme_rdma_submit_async_event,
1801         .delete_ctrl            = nvme_rdma_del_ctrl,
1802         .get_subsysnqn          = nvmf_get_subsysnqn,
1803         .get_address            = nvmf_get_address,
1804 };
1805
1806 static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
1807 {
1808         struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
1809         int ret;
1810
1811         ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
1812         if (ret)
1813                 return ret;
1814
1815         ctrl->queue_count = opts->nr_io_queues + 1;
1816         if (ctrl->queue_count < 2)
1817                 return 0;
1818
1819         dev_info(ctrl->ctrl.device,
1820                 "creating %d I/O queues.\n", opts->nr_io_queues);
1821
1822         ret = nvme_rdma_init_io_queues(ctrl);
1823         if (ret)
1824                 return ret;
1825
1826         /*
1827          * We need a reference on the device as long as the tag_set is alive,
1828          * as the MRs in the request structures need a valid ib_device.
1829          */
1830         ret = -EINVAL;
1831         if (!nvme_rdma_dev_get(ctrl->device))
1832                 goto out_free_io_queues;
1833
1834         memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
1835         ctrl->tag_set.ops = &nvme_rdma_mq_ops;
1836         ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
1837         ctrl->tag_set.reserved_tags = 1; /* fabric connect */
1838         ctrl->tag_set.numa_node = NUMA_NO_NODE;
1839         ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1840         ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
1841                 SG_CHUNK_SIZE * sizeof(struct scatterlist);
1842         ctrl->tag_set.driver_data = ctrl;
1843         ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
1844         ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
1845
1846         ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
1847         if (ret)
1848                 goto out_put_dev;
1849         ctrl->ctrl.tagset = &ctrl->tag_set;
1850
1851         ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
1852         if (IS_ERR(ctrl->ctrl.connect_q)) {
1853                 ret = PTR_ERR(ctrl->ctrl.connect_q);
1854                 goto out_free_tag_set;
1855         }
1856
1857         ret = nvme_rdma_connect_io_queues(ctrl);
1858         if (ret)
1859                 goto out_cleanup_connect_q;
1860
1861         return 0;
1862
1863 out_cleanup_connect_q:
1864         blk_cleanup_queue(ctrl->ctrl.connect_q);
1865 out_free_tag_set:
1866         blk_mq_free_tag_set(&ctrl->tag_set);
1867 out_put_dev:
1868         nvme_rdma_dev_put(ctrl->device);
1869 out_free_io_queues:
1870         nvme_rdma_free_io_queues(ctrl);
1871         return ret;
1872 }
1873
1874 static int nvme_rdma_parse_ipaddr(struct sockaddr_in *in_addr, char *p)
1875 {
1876         u8 *addr = (u8 *)&in_addr->sin_addr.s_addr;
1877         size_t buflen = strlen(p);
1878
1879         /* XXX: handle IPv6 addresses */
1880
1881         if (buflen > INET_ADDRSTRLEN)
1882                 return -EINVAL;
1883         if (in4_pton(p, buflen, addr, '\0', NULL) == 0)
1884                 return -EINVAL;
1885         in_addr->sin_family = AF_INET;
1886         return 0;
1887 }
1888
1889 static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1890                 struct nvmf_ctrl_options *opts)
1891 {
1892         struct nvme_rdma_ctrl *ctrl;
1893         int ret;
1894         bool changed;
1895
1896         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1897         if (!ctrl)
1898                 return ERR_PTR(-ENOMEM);
1899         ctrl->ctrl.opts = opts;
1900         INIT_LIST_HEAD(&ctrl->list);
1901
1902         ret = nvme_rdma_parse_ipaddr(&ctrl->addr_in, opts->traddr);
1903         if (ret) {
1904                 pr_err("malformed IP address passed: %s\n", opts->traddr);
1905                 goto out_free_ctrl;
1906         }
1907
1908         if (opts->mask & NVMF_OPT_TRSVCID) {
1909                 u16 port;
1910
1911                 ret = kstrtou16(opts->trsvcid, 0, &port);
1912                 if (ret)
1913                         goto out_free_ctrl;
1914
1915                 ctrl->addr_in.sin_port = cpu_to_be16(port);
1916         } else {
1917                 ctrl->addr_in.sin_port = cpu_to_be16(NVME_RDMA_IP_PORT);
1918         }
1919
1920         ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
1921                                 0 /* no quirks, we're perfect! */);
1922         if (ret)
1923                 goto out_free_ctrl;
1924
1925         ctrl->reconnect_delay = opts->reconnect_delay;
1926         INIT_DELAYED_WORK(&ctrl->reconnect_work,
1927                         nvme_rdma_reconnect_ctrl_work);
1928         INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
1929         INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
1930         INIT_WORK(&ctrl->reset_work, nvme_rdma_reset_ctrl_work);
1931         spin_lock_init(&ctrl->lock);
1932
1933         ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
1934         ctrl->ctrl.sqsize = opts->queue_size - 1;
1935         ctrl->ctrl.kato = opts->kato;
1936
1937         ret = -ENOMEM;
1938         ctrl->queues = kcalloc(ctrl->queue_count, sizeof(*ctrl->queues),
1939                                 GFP_KERNEL);
1940         if (!ctrl->queues)
1941                 goto out_uninit_ctrl;
1942
1943         ret = nvme_rdma_configure_admin_queue(ctrl);
1944         if (ret)
1945                 goto out_kfree_queues;
1946
1947         /* sanity check icdoff */
1948         if (ctrl->ctrl.icdoff) {
1949                 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
1950                 goto out_remove_admin_queue;
1951         }
1952
1953         /* sanity check keyed sgls */
1954         if (!(ctrl->ctrl.sgls & (1 << 20))) {
1955                 dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n");
1956                 goto out_remove_admin_queue;
1957         }
1958
1959         if (opts->queue_size > ctrl->ctrl.maxcmd) {
1960                 /* warn if maxcmd is lower than queue_size */
1961                 dev_warn(ctrl->ctrl.device,
1962                         "queue_size %zu > ctrl maxcmd %u, clamping down\n",
1963                         opts->queue_size, ctrl->ctrl.maxcmd);
1964                 opts->queue_size = ctrl->ctrl.maxcmd;
1965         }
1966
1967         if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
1968                 /* warn if sqsize is lower than queue_size */
1969                 dev_warn(ctrl->ctrl.device,
1970                         "queue_size %zu > ctrl sqsize %u, clamping down\n",
1971                         opts->queue_size, ctrl->ctrl.sqsize + 1);
1972                 opts->queue_size = ctrl->ctrl.sqsize + 1;
1973         }
1974
1975         if (opts->nr_io_queues) {
1976                 ret = nvme_rdma_create_io_queues(ctrl);
1977                 if (ret)
1978                         goto out_remove_admin_queue;
1979         }
1980
1981         changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
1982         WARN_ON_ONCE(!changed);
1983
1984         dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
1985                 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
1986
1987         kref_get(&ctrl->ctrl.kref);
1988
1989         mutex_lock(&nvme_rdma_ctrl_mutex);
1990         list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
1991         mutex_unlock(&nvme_rdma_ctrl_mutex);
1992
1993         if (opts->nr_io_queues) {
1994                 nvme_queue_scan(&ctrl->ctrl);
1995                 nvme_queue_async_events(&ctrl->ctrl);
1996         }
1997
1998         return &ctrl->ctrl;
1999
2000 out_remove_admin_queue:
2001         nvme_stop_keep_alive(&ctrl->ctrl);
2002         nvme_rdma_destroy_admin_queue(ctrl);
2003 out_kfree_queues:
2004         kfree(ctrl->queues);
2005 out_uninit_ctrl:
2006         nvme_uninit_ctrl(&ctrl->ctrl);
2007         nvme_put_ctrl(&ctrl->ctrl);
2008         if (ret > 0)
2009                 ret = -EIO;
2010         return ERR_PTR(ret);
2011 out_free_ctrl:
2012         kfree(ctrl);
2013         return ERR_PTR(ret);
2014 }
2015
2016 static struct nvmf_transport_ops nvme_rdma_transport = {
2017         .name           = "rdma",
2018         .required_opts  = NVMF_OPT_TRADDR,
2019         .allowed_opts   = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY,
2020         .create_ctrl    = nvme_rdma_create_ctrl,
2021 };
2022
2023 static void nvme_rdma_add_one(struct ib_device *ib_device)
2024 {
2025 }
2026
2027 static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
2028 {
2029         struct nvme_rdma_ctrl *ctrl;
2030
2031         /* Delete all controllers using this device */
2032         mutex_lock(&nvme_rdma_ctrl_mutex);
2033         list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2034                 if (ctrl->device->dev != ib_device)
2035                         continue;
2036                 dev_info(ctrl->ctrl.device,
2037                         "Removing ctrl: NQN \"%s\", addr %pISp\n",
2038                         ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2039                 __nvme_rdma_del_ctrl(ctrl);
2040         }
2041         mutex_unlock(&nvme_rdma_ctrl_mutex);
2042
2043         flush_workqueue(nvme_rdma_wq);
2044 }
2045
2046 static struct ib_client nvme_rdma_ib_client = {
2047         .name   = "nvme_rdma",
2048         .add = nvme_rdma_add_one,
2049         .remove = nvme_rdma_remove_one
2050 };
2051
2052 static int __init nvme_rdma_init_module(void)
2053 {
2054         int ret;
2055
2056         nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
2057         if (!nvme_rdma_wq)
2058                 return -ENOMEM;
2059
2060         ret = ib_register_client(&nvme_rdma_ib_client);
2061         if (ret) {
2062                 destroy_workqueue(nvme_rdma_wq);
2063                 return ret;
2064         }
2065
2066         nvmf_register_transport(&nvme_rdma_transport);
2067         return 0;
2068 }
2069
2070 static void __exit nvme_rdma_cleanup_module(void)
2071 {
2072         nvmf_unregister_transport(&nvme_rdma_transport);
2073         ib_unregister_client(&nvme_rdma_ib_client);
2074         destroy_workqueue(nvme_rdma_wq);
2075 }
2076
2077 module_init(nvme_rdma_init_module);
2078 module_exit(nvme_rdma_cleanup_module);
2079
2080 MODULE_LICENSE("GPL v2");