perf annotate: Show full source location with 'l' hotkey
[sfrench/cifs-2.6.git] / drivers / nvme / target / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common code for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
12
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15
16 #include "nvmet.h"
17
18 struct workqueue_struct *buffered_io_wq;
19 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
20 static DEFINE_IDA(cntlid_ida);
21
22 /*
23  * This read/write semaphore is used to synchronize access to configuration
24  * information on a target system that will result in discovery log page
25  * information change for at least one host.
26  * The full list of resources to protected by this semaphore is:
27  *
28  *  - subsystems list
29  *  - per-subsystem allowed hosts list
30  *  - allow_any_host subsystem attribute
31  *  - nvmet_genctr
32  *  - the nvmet_transports array
33  *
34  * When updating any of those lists/structures write lock should be obtained,
35  * while when reading (popolating discovery log page or checking host-subsystem
36  * link) read lock is obtained to allow concurrent reads.
37  */
38 DECLARE_RWSEM(nvmet_config_sem);
39
40 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
41 u64 nvmet_ana_chgcnt;
42 DECLARE_RWSEM(nvmet_ana_sem);
43
44 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
45 {
46         u16 status;
47
48         switch (errno) {
49         case 0:
50                 status = NVME_SC_SUCCESS;
51                 break;
52         case -ENOSPC:
53                 req->error_loc = offsetof(struct nvme_rw_command, length);
54                 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
55                 break;
56         case -EREMOTEIO:
57                 req->error_loc = offsetof(struct nvme_rw_command, slba);
58                 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
59                 break;
60         case -EOPNOTSUPP:
61                 req->error_loc = offsetof(struct nvme_common_command, opcode);
62                 switch (req->cmd->common.opcode) {
63                 case nvme_cmd_dsm:
64                 case nvme_cmd_write_zeroes:
65                         status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
66                         break;
67                 default:
68                         status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
69                 }
70                 break;
71         case -ENODATA:
72                 req->error_loc = offsetof(struct nvme_rw_command, nsid);
73                 status = NVME_SC_ACCESS_DENIED;
74                 break;
75         case -EIO:
76                 fallthrough;
77         default:
78                 req->error_loc = offsetof(struct nvme_common_command, opcode);
79                 status = NVME_SC_INTERNAL | NVME_SC_DNR;
80         }
81
82         return status;
83 }
84
85 u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
86 {
87         pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
88                  req->sq->qid);
89
90         req->error_loc = offsetof(struct nvme_common_command, opcode);
91         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
92 }
93
94 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
95                 const char *subsysnqn);
96
97 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
98                 size_t len)
99 {
100         if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
101                 req->error_loc = offsetof(struct nvme_common_command, dptr);
102                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
103         }
104         return 0;
105 }
106
107 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
108 {
109         if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
110                 req->error_loc = offsetof(struct nvme_common_command, dptr);
111                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
112         }
113         return 0;
114 }
115
116 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
117 {
118         if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
119                 req->error_loc = offsetof(struct nvme_common_command, dptr);
120                 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
121         }
122         return 0;
123 }
124
125 static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
126 {
127         unsigned long nsid = 0;
128         struct nvmet_ns *cur;
129         unsigned long idx;
130
131         xa_for_each(&subsys->namespaces, idx, cur)
132                 nsid = cur->nsid;
133
134         return nsid;
135 }
136
137 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
138 {
139         return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
140 }
141
142 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
143 {
144         u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
145         struct nvmet_req *req;
146
147         mutex_lock(&ctrl->lock);
148         while (ctrl->nr_async_event_cmds) {
149                 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
150                 mutex_unlock(&ctrl->lock);
151                 nvmet_req_complete(req, status);
152                 mutex_lock(&ctrl->lock);
153         }
154         mutex_unlock(&ctrl->lock);
155 }
156
157 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
158 {
159         struct nvmet_async_event *aen;
160         struct nvmet_req *req;
161
162         mutex_lock(&ctrl->lock);
163         while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
164                 aen = list_first_entry(&ctrl->async_events,
165                                        struct nvmet_async_event, entry);
166                 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
167                 nvmet_set_result(req, nvmet_async_event_result(aen));
168
169                 list_del(&aen->entry);
170                 kfree(aen);
171
172                 mutex_unlock(&ctrl->lock);
173                 trace_nvmet_async_event(ctrl, req->cqe->result.u32);
174                 nvmet_req_complete(req, 0);
175                 mutex_lock(&ctrl->lock);
176         }
177         mutex_unlock(&ctrl->lock);
178 }
179
180 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
181 {
182         struct nvmet_async_event *aen, *tmp;
183
184         mutex_lock(&ctrl->lock);
185         list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
186                 list_del(&aen->entry);
187                 kfree(aen);
188         }
189         mutex_unlock(&ctrl->lock);
190 }
191
192 static void nvmet_async_event_work(struct work_struct *work)
193 {
194         struct nvmet_ctrl *ctrl =
195                 container_of(work, struct nvmet_ctrl, async_event_work);
196
197         nvmet_async_events_process(ctrl);
198 }
199
200 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
201                 u8 event_info, u8 log_page)
202 {
203         struct nvmet_async_event *aen;
204
205         aen = kmalloc(sizeof(*aen), GFP_KERNEL);
206         if (!aen)
207                 return;
208
209         aen->event_type = event_type;
210         aen->event_info = event_info;
211         aen->log_page = log_page;
212
213         mutex_lock(&ctrl->lock);
214         list_add_tail(&aen->entry, &ctrl->async_events);
215         mutex_unlock(&ctrl->lock);
216
217         schedule_work(&ctrl->async_event_work);
218 }
219
220 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
221 {
222         u32 i;
223
224         mutex_lock(&ctrl->lock);
225         if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
226                 goto out_unlock;
227
228         for (i = 0; i < ctrl->nr_changed_ns; i++) {
229                 if (ctrl->changed_ns_list[i] == nsid)
230                         goto out_unlock;
231         }
232
233         if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
234                 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
235                 ctrl->nr_changed_ns = U32_MAX;
236                 goto out_unlock;
237         }
238
239         ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
240 out_unlock:
241         mutex_unlock(&ctrl->lock);
242 }
243
244 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
245 {
246         struct nvmet_ctrl *ctrl;
247
248         lockdep_assert_held(&subsys->lock);
249
250         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
251                 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
252                 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
253                         continue;
254                 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
255                                 NVME_AER_NOTICE_NS_CHANGED,
256                                 NVME_LOG_CHANGED_NS);
257         }
258 }
259
260 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
261                 struct nvmet_port *port)
262 {
263         struct nvmet_ctrl *ctrl;
264
265         mutex_lock(&subsys->lock);
266         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
267                 if (port && ctrl->port != port)
268                         continue;
269                 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
270                         continue;
271                 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
272                                 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
273         }
274         mutex_unlock(&subsys->lock);
275 }
276
277 void nvmet_port_send_ana_event(struct nvmet_port *port)
278 {
279         struct nvmet_subsys_link *p;
280
281         down_read(&nvmet_config_sem);
282         list_for_each_entry(p, &port->subsystems, entry)
283                 nvmet_send_ana_event(p->subsys, port);
284         up_read(&nvmet_config_sem);
285 }
286
287 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
288 {
289         int ret = 0;
290
291         down_write(&nvmet_config_sem);
292         if (nvmet_transports[ops->type])
293                 ret = -EINVAL;
294         else
295                 nvmet_transports[ops->type] = ops;
296         up_write(&nvmet_config_sem);
297
298         return ret;
299 }
300 EXPORT_SYMBOL_GPL(nvmet_register_transport);
301
302 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
303 {
304         down_write(&nvmet_config_sem);
305         nvmet_transports[ops->type] = NULL;
306         up_write(&nvmet_config_sem);
307 }
308 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
309
310 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
311 {
312         struct nvmet_ctrl *ctrl;
313
314         mutex_lock(&subsys->lock);
315         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
316                 if (ctrl->port == port)
317                         ctrl->ops->delete_ctrl(ctrl);
318         }
319         mutex_unlock(&subsys->lock);
320 }
321
322 int nvmet_enable_port(struct nvmet_port *port)
323 {
324         const struct nvmet_fabrics_ops *ops;
325         int ret;
326
327         lockdep_assert_held(&nvmet_config_sem);
328
329         ops = nvmet_transports[port->disc_addr.trtype];
330         if (!ops) {
331                 up_write(&nvmet_config_sem);
332                 request_module("nvmet-transport-%d", port->disc_addr.trtype);
333                 down_write(&nvmet_config_sem);
334                 ops = nvmet_transports[port->disc_addr.trtype];
335                 if (!ops) {
336                         pr_err("transport type %d not supported\n",
337                                 port->disc_addr.trtype);
338                         return -EINVAL;
339                 }
340         }
341
342         if (!try_module_get(ops->owner))
343                 return -EINVAL;
344
345         /*
346          * If the user requested PI support and the transport isn't pi capable,
347          * don't enable the port.
348          */
349         if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
350                 pr_err("T10-PI is not supported by transport type %d\n",
351                        port->disc_addr.trtype);
352                 ret = -EINVAL;
353                 goto out_put;
354         }
355
356         ret = ops->add_port(port);
357         if (ret)
358                 goto out_put;
359
360         /* If the transport didn't set inline_data_size, then disable it. */
361         if (port->inline_data_size < 0)
362                 port->inline_data_size = 0;
363
364         port->enabled = true;
365         port->tr_ops = ops;
366         return 0;
367
368 out_put:
369         module_put(ops->owner);
370         return ret;
371 }
372
373 void nvmet_disable_port(struct nvmet_port *port)
374 {
375         const struct nvmet_fabrics_ops *ops;
376
377         lockdep_assert_held(&nvmet_config_sem);
378
379         port->enabled = false;
380         port->tr_ops = NULL;
381
382         ops = nvmet_transports[port->disc_addr.trtype];
383         ops->remove_port(port);
384         module_put(ops->owner);
385 }
386
387 static void nvmet_keep_alive_timer(struct work_struct *work)
388 {
389         struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
390                         struct nvmet_ctrl, ka_work);
391         bool cmd_seen = ctrl->cmd_seen;
392
393         ctrl->cmd_seen = false;
394         if (cmd_seen) {
395                 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
396                         ctrl->cntlid);
397                 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
398                 return;
399         }
400
401         pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
402                 ctrl->cntlid, ctrl->kato);
403
404         nvmet_ctrl_fatal_error(ctrl);
405 }
406
407 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
408 {
409         if (unlikely(ctrl->kato == 0))
410                 return;
411
412         pr_debug("ctrl %d start keep-alive timer for %d secs\n",
413                 ctrl->cntlid, ctrl->kato);
414
415         INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
416         schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
417 }
418
419 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
420 {
421         if (unlikely(ctrl->kato == 0))
422                 return;
423
424         pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
425
426         cancel_delayed_work_sync(&ctrl->ka_work);
427 }
428
429 u16 nvmet_req_find_ns(struct nvmet_req *req)
430 {
431         u32 nsid = le32_to_cpu(req->cmd->common.nsid);
432
433         req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
434         if (unlikely(!req->ns)) {
435                 req->error_loc = offsetof(struct nvme_common_command, nsid);
436                 return NVME_SC_INVALID_NS | NVME_SC_DNR;
437         }
438
439         percpu_ref_get(&req->ns->ref);
440         return NVME_SC_SUCCESS;
441 }
442
443 static void nvmet_destroy_namespace(struct percpu_ref *ref)
444 {
445         struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
446
447         complete(&ns->disable_done);
448 }
449
450 void nvmet_put_namespace(struct nvmet_ns *ns)
451 {
452         percpu_ref_put(&ns->ref);
453 }
454
455 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
456 {
457         nvmet_bdev_ns_disable(ns);
458         nvmet_file_ns_disable(ns);
459 }
460
461 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
462 {
463         int ret;
464         struct pci_dev *p2p_dev;
465
466         if (!ns->use_p2pmem)
467                 return 0;
468
469         if (!ns->bdev) {
470                 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
471                 return -EINVAL;
472         }
473
474         if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
475                 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
476                        ns->device_path);
477                 return -EINVAL;
478         }
479
480         if (ns->p2p_dev) {
481                 ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
482                 if (ret < 0)
483                         return -EINVAL;
484         } else {
485                 /*
486                  * Right now we just check that there is p2pmem available so
487                  * we can report an error to the user right away if there
488                  * is not. We'll find the actual device to use once we
489                  * setup the controller when the port's device is available.
490                  */
491
492                 p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
493                 if (!p2p_dev) {
494                         pr_err("no peer-to-peer memory is available for %s\n",
495                                ns->device_path);
496                         return -EINVAL;
497                 }
498
499                 pci_dev_put(p2p_dev);
500         }
501
502         return 0;
503 }
504
505 /*
506  * Note: ctrl->subsys->lock should be held when calling this function
507  */
508 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
509                                     struct nvmet_ns *ns)
510 {
511         struct device *clients[2];
512         struct pci_dev *p2p_dev;
513         int ret;
514
515         if (!ctrl->p2p_client || !ns->use_p2pmem)
516                 return;
517
518         if (ns->p2p_dev) {
519                 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
520                 if (ret < 0)
521                         return;
522
523                 p2p_dev = pci_dev_get(ns->p2p_dev);
524         } else {
525                 clients[0] = ctrl->p2p_client;
526                 clients[1] = nvmet_ns_dev(ns);
527
528                 p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
529                 if (!p2p_dev) {
530                         pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
531                                dev_name(ctrl->p2p_client), ns->device_path);
532                         return;
533                 }
534         }
535
536         ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
537         if (ret < 0)
538                 pci_dev_put(p2p_dev);
539
540         pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
541                 ns->nsid);
542 }
543
544 void nvmet_ns_revalidate(struct nvmet_ns *ns)
545 {
546         loff_t oldsize = ns->size;
547
548         if (ns->bdev)
549                 nvmet_bdev_ns_revalidate(ns);
550         else
551                 nvmet_file_ns_revalidate(ns);
552
553         if (oldsize != ns->size)
554                 nvmet_ns_changed(ns->subsys, ns->nsid);
555 }
556
557 int nvmet_ns_enable(struct nvmet_ns *ns)
558 {
559         struct nvmet_subsys *subsys = ns->subsys;
560         struct nvmet_ctrl *ctrl;
561         int ret;
562
563         mutex_lock(&subsys->lock);
564         ret = 0;
565
566         if (nvmet_passthru_ctrl(subsys)) {
567                 pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
568                 goto out_unlock;
569         }
570
571         if (ns->enabled)
572                 goto out_unlock;
573
574         ret = -EMFILE;
575         if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
576                 goto out_unlock;
577
578         ret = nvmet_bdev_ns_enable(ns);
579         if (ret == -ENOTBLK)
580                 ret = nvmet_file_ns_enable(ns);
581         if (ret)
582                 goto out_unlock;
583
584         ret = nvmet_p2pmem_ns_enable(ns);
585         if (ret)
586                 goto out_dev_disable;
587
588         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
589                 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
590
591         ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
592                                 0, GFP_KERNEL);
593         if (ret)
594                 goto out_dev_put;
595
596         if (ns->nsid > subsys->max_nsid)
597                 subsys->max_nsid = ns->nsid;
598
599         ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
600         if (ret)
601                 goto out_restore_subsys_maxnsid;
602
603         subsys->nr_namespaces++;
604
605         nvmet_ns_changed(subsys, ns->nsid);
606         ns->enabled = true;
607         ret = 0;
608 out_unlock:
609         mutex_unlock(&subsys->lock);
610         return ret;
611
612 out_restore_subsys_maxnsid:
613         subsys->max_nsid = nvmet_max_nsid(subsys);
614         percpu_ref_exit(&ns->ref);
615 out_dev_put:
616         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
617                 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
618 out_dev_disable:
619         nvmet_ns_dev_disable(ns);
620         goto out_unlock;
621 }
622
623 void nvmet_ns_disable(struct nvmet_ns *ns)
624 {
625         struct nvmet_subsys *subsys = ns->subsys;
626         struct nvmet_ctrl *ctrl;
627
628         mutex_lock(&subsys->lock);
629         if (!ns->enabled)
630                 goto out_unlock;
631
632         ns->enabled = false;
633         xa_erase(&ns->subsys->namespaces, ns->nsid);
634         if (ns->nsid == subsys->max_nsid)
635                 subsys->max_nsid = nvmet_max_nsid(subsys);
636
637         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
638                 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
639
640         mutex_unlock(&subsys->lock);
641
642         /*
643          * Now that we removed the namespaces from the lookup list, we
644          * can kill the per_cpu ref and wait for any remaining references
645          * to be dropped, as well as a RCU grace period for anyone only
646          * using the namepace under rcu_read_lock().  Note that we can't
647          * use call_rcu here as we need to ensure the namespaces have
648          * been fully destroyed before unloading the module.
649          */
650         percpu_ref_kill(&ns->ref);
651         synchronize_rcu();
652         wait_for_completion(&ns->disable_done);
653         percpu_ref_exit(&ns->ref);
654
655         mutex_lock(&subsys->lock);
656
657         subsys->nr_namespaces--;
658         nvmet_ns_changed(subsys, ns->nsid);
659         nvmet_ns_dev_disable(ns);
660 out_unlock:
661         mutex_unlock(&subsys->lock);
662 }
663
664 void nvmet_ns_free(struct nvmet_ns *ns)
665 {
666         nvmet_ns_disable(ns);
667
668         down_write(&nvmet_ana_sem);
669         nvmet_ana_group_enabled[ns->anagrpid]--;
670         up_write(&nvmet_ana_sem);
671
672         kfree(ns->device_path);
673         kfree(ns);
674 }
675
676 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
677 {
678         struct nvmet_ns *ns;
679
680         ns = kzalloc(sizeof(*ns), GFP_KERNEL);
681         if (!ns)
682                 return NULL;
683
684         init_completion(&ns->disable_done);
685
686         ns->nsid = nsid;
687         ns->subsys = subsys;
688
689         down_write(&nvmet_ana_sem);
690         ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
691         nvmet_ana_group_enabled[ns->anagrpid]++;
692         up_write(&nvmet_ana_sem);
693
694         uuid_gen(&ns->uuid);
695         ns->buffered_io = false;
696
697         return ns;
698 }
699
700 static void nvmet_update_sq_head(struct nvmet_req *req)
701 {
702         if (req->sq->size) {
703                 u32 old_sqhd, new_sqhd;
704
705                 do {
706                         old_sqhd = req->sq->sqhd;
707                         new_sqhd = (old_sqhd + 1) % req->sq->size;
708                 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
709                                         old_sqhd);
710         }
711         req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
712 }
713
714 static void nvmet_set_error(struct nvmet_req *req, u16 status)
715 {
716         struct nvmet_ctrl *ctrl = req->sq->ctrl;
717         struct nvme_error_slot *new_error_slot;
718         unsigned long flags;
719
720         req->cqe->status = cpu_to_le16(status << 1);
721
722         if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
723                 return;
724
725         spin_lock_irqsave(&ctrl->error_lock, flags);
726         ctrl->err_counter++;
727         new_error_slot =
728                 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
729
730         new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
731         new_error_slot->sqid = cpu_to_le16(req->sq->qid);
732         new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
733         new_error_slot->status_field = cpu_to_le16(status << 1);
734         new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
735         new_error_slot->lba = cpu_to_le64(req->error_slba);
736         new_error_slot->nsid = req->cmd->common.nsid;
737         spin_unlock_irqrestore(&ctrl->error_lock, flags);
738
739         /* set the more bit for this request */
740         req->cqe->status |= cpu_to_le16(1 << 14);
741 }
742
743 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
744 {
745         if (!req->sq->sqhd_disabled)
746                 nvmet_update_sq_head(req);
747         req->cqe->sq_id = cpu_to_le16(req->sq->qid);
748         req->cqe->command_id = req->cmd->common.command_id;
749
750         if (unlikely(status))
751                 nvmet_set_error(req, status);
752
753         trace_nvmet_req_complete(req);
754
755         if (req->ns)
756                 nvmet_put_namespace(req->ns);
757         req->ops->queue_response(req);
758 }
759
760 void nvmet_req_complete(struct nvmet_req *req, u16 status)
761 {
762         __nvmet_req_complete(req, status);
763         percpu_ref_put(&req->sq->ref);
764 }
765 EXPORT_SYMBOL_GPL(nvmet_req_complete);
766
767 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
768                 u16 qid, u16 size)
769 {
770         cq->qid = qid;
771         cq->size = size;
772 }
773
774 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
775                 u16 qid, u16 size)
776 {
777         sq->sqhd = 0;
778         sq->qid = qid;
779         sq->size = size;
780
781         ctrl->sqs[qid] = sq;
782 }
783
784 static void nvmet_confirm_sq(struct percpu_ref *ref)
785 {
786         struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
787
788         complete(&sq->confirm_done);
789 }
790
791 void nvmet_sq_destroy(struct nvmet_sq *sq)
792 {
793         struct nvmet_ctrl *ctrl = sq->ctrl;
794
795         /*
796          * If this is the admin queue, complete all AERs so that our
797          * queue doesn't have outstanding requests on it.
798          */
799         if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
800                 nvmet_async_events_failall(ctrl);
801         percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
802         wait_for_completion(&sq->confirm_done);
803         wait_for_completion(&sq->free_done);
804         percpu_ref_exit(&sq->ref);
805
806         if (ctrl) {
807                 nvmet_ctrl_put(ctrl);
808                 sq->ctrl = NULL; /* allows reusing the queue later */
809         }
810 }
811 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
812
813 static void nvmet_sq_free(struct percpu_ref *ref)
814 {
815         struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
816
817         complete(&sq->free_done);
818 }
819
820 int nvmet_sq_init(struct nvmet_sq *sq)
821 {
822         int ret;
823
824         ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
825         if (ret) {
826                 pr_err("percpu_ref init failed!\n");
827                 return ret;
828         }
829         init_completion(&sq->free_done);
830         init_completion(&sq->confirm_done);
831
832         return 0;
833 }
834 EXPORT_SYMBOL_GPL(nvmet_sq_init);
835
836 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
837                 struct nvmet_ns *ns)
838 {
839         enum nvme_ana_state state = port->ana_state[ns->anagrpid];
840
841         if (unlikely(state == NVME_ANA_INACCESSIBLE))
842                 return NVME_SC_ANA_INACCESSIBLE;
843         if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
844                 return NVME_SC_ANA_PERSISTENT_LOSS;
845         if (unlikely(state == NVME_ANA_CHANGE))
846                 return NVME_SC_ANA_TRANSITION;
847         return 0;
848 }
849
850 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
851 {
852         if (unlikely(req->ns->readonly)) {
853                 switch (req->cmd->common.opcode) {
854                 case nvme_cmd_read:
855                 case nvme_cmd_flush:
856                         break;
857                 default:
858                         return NVME_SC_NS_WRITE_PROTECTED;
859                 }
860         }
861
862         return 0;
863 }
864
865 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
866 {
867         struct nvme_command *cmd = req->cmd;
868         u16 ret;
869
870         ret = nvmet_check_ctrl_status(req, cmd);
871         if (unlikely(ret))
872                 return ret;
873
874         if (nvmet_req_passthru_ctrl(req))
875                 return nvmet_parse_passthru_io_cmd(req);
876
877         ret = nvmet_req_find_ns(req);
878         if (unlikely(ret))
879                 return ret;
880
881         ret = nvmet_check_ana_state(req->port, req->ns);
882         if (unlikely(ret)) {
883                 req->error_loc = offsetof(struct nvme_common_command, nsid);
884                 return ret;
885         }
886         ret = nvmet_io_cmd_check_access(req);
887         if (unlikely(ret)) {
888                 req->error_loc = offsetof(struct nvme_common_command, nsid);
889                 return ret;
890         }
891
892         if (req->ns->file)
893                 return nvmet_file_parse_io_cmd(req);
894
895         return nvmet_bdev_parse_io_cmd(req);
896 }
897
898 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
899                 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
900 {
901         u8 flags = req->cmd->common.flags;
902         u16 status;
903
904         req->cq = cq;
905         req->sq = sq;
906         req->ops = ops;
907         req->sg = NULL;
908         req->metadata_sg = NULL;
909         req->sg_cnt = 0;
910         req->metadata_sg_cnt = 0;
911         req->transfer_len = 0;
912         req->metadata_len = 0;
913         req->cqe->status = 0;
914         req->cqe->sq_head = 0;
915         req->ns = NULL;
916         req->error_loc = NVMET_NO_ERROR_LOC;
917         req->error_slba = 0;
918
919         /* no support for fused commands yet */
920         if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
921                 req->error_loc = offsetof(struct nvme_common_command, flags);
922                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
923                 goto fail;
924         }
925
926         /*
927          * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
928          * contains an address of a single contiguous physical buffer that is
929          * byte aligned.
930          */
931         if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
932                 req->error_loc = offsetof(struct nvme_common_command, flags);
933                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
934                 goto fail;
935         }
936
937         if (unlikely(!req->sq->ctrl))
938                 /* will return an error for any non-connect command: */
939                 status = nvmet_parse_connect_cmd(req);
940         else if (likely(req->sq->qid != 0))
941                 status = nvmet_parse_io_cmd(req);
942         else
943                 status = nvmet_parse_admin_cmd(req);
944
945         if (status)
946                 goto fail;
947
948         trace_nvmet_req_init(req, req->cmd);
949
950         if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
951                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
952                 goto fail;
953         }
954
955         if (sq->ctrl)
956                 sq->ctrl->cmd_seen = true;
957
958         return true;
959
960 fail:
961         __nvmet_req_complete(req, status);
962         return false;
963 }
964 EXPORT_SYMBOL_GPL(nvmet_req_init);
965
966 void nvmet_req_uninit(struct nvmet_req *req)
967 {
968         percpu_ref_put(&req->sq->ref);
969         if (req->ns)
970                 nvmet_put_namespace(req->ns);
971 }
972 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
973
974 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
975 {
976         if (unlikely(len != req->transfer_len)) {
977                 req->error_loc = offsetof(struct nvme_common_command, dptr);
978                 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
979                 return false;
980         }
981
982         return true;
983 }
984 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
985
986 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
987 {
988         if (unlikely(data_len > req->transfer_len)) {
989                 req->error_loc = offsetof(struct nvme_common_command, dptr);
990                 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
991                 return false;
992         }
993
994         return true;
995 }
996
997 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
998 {
999         return req->transfer_len - req->metadata_len;
1000 }
1001
1002 static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
1003 {
1004         req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
1005                         nvmet_data_transfer_len(req));
1006         if (!req->sg)
1007                 goto out_err;
1008
1009         if (req->metadata_len) {
1010                 req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
1011                                 &req->metadata_sg_cnt, req->metadata_len);
1012                 if (!req->metadata_sg)
1013                         goto out_free_sg;
1014         }
1015         return 0;
1016 out_free_sg:
1017         pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1018 out_err:
1019         return -ENOMEM;
1020 }
1021
1022 static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
1023 {
1024         if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
1025                 return false;
1026
1027         if (req->sq->ctrl && req->sq->qid && req->ns) {
1028                 req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
1029                                                  req->ns->nsid);
1030                 if (req->p2p_dev)
1031                         return true;
1032         }
1033
1034         req->p2p_dev = NULL;
1035         return false;
1036 }
1037
1038 int nvmet_req_alloc_sgls(struct nvmet_req *req)
1039 {
1040         if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
1041                 return 0;
1042
1043         req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1044                             &req->sg_cnt);
1045         if (unlikely(!req->sg))
1046                 goto out;
1047
1048         if (req->metadata_len) {
1049                 req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1050                                              &req->metadata_sg_cnt);
1051                 if (unlikely(!req->metadata_sg))
1052                         goto out_free;
1053         }
1054
1055         return 0;
1056 out_free:
1057         sgl_free(req->sg);
1058 out:
1059         return -ENOMEM;
1060 }
1061 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1062
1063 void nvmet_req_free_sgls(struct nvmet_req *req)
1064 {
1065         if (req->p2p_dev) {
1066                 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1067                 if (req->metadata_sg)
1068                         pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1069         } else {
1070                 sgl_free(req->sg);
1071                 if (req->metadata_sg)
1072                         sgl_free(req->metadata_sg);
1073         }
1074
1075         req->sg = NULL;
1076         req->metadata_sg = NULL;
1077         req->sg_cnt = 0;
1078         req->metadata_sg_cnt = 0;
1079 }
1080 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1081
1082 static inline bool nvmet_cc_en(u32 cc)
1083 {
1084         return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1085 }
1086
1087 static inline u8 nvmet_cc_css(u32 cc)
1088 {
1089         return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1090 }
1091
1092 static inline u8 nvmet_cc_mps(u32 cc)
1093 {
1094         return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1095 }
1096
1097 static inline u8 nvmet_cc_ams(u32 cc)
1098 {
1099         return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1100 }
1101
1102 static inline u8 nvmet_cc_shn(u32 cc)
1103 {
1104         return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1105 }
1106
1107 static inline u8 nvmet_cc_iosqes(u32 cc)
1108 {
1109         return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1110 }
1111
1112 static inline u8 nvmet_cc_iocqes(u32 cc)
1113 {
1114         return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1115 }
1116
1117 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1118 {
1119         lockdep_assert_held(&ctrl->lock);
1120
1121         if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1122             nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
1123             nvmet_cc_mps(ctrl->cc) != 0 ||
1124             nvmet_cc_ams(ctrl->cc) != 0 ||
1125             nvmet_cc_css(ctrl->cc) != 0) {
1126                 ctrl->csts = NVME_CSTS_CFS;
1127                 return;
1128         }
1129
1130         ctrl->csts = NVME_CSTS_RDY;
1131
1132         /*
1133          * Controllers that are not yet enabled should not really enforce the
1134          * keep alive timeout, but we still want to track a timeout and cleanup
1135          * in case a host died before it enabled the controller.  Hence, simply
1136          * reset the keep alive timer when the controller is enabled.
1137          */
1138         if (ctrl->kato)
1139                 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1140 }
1141
1142 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1143 {
1144         lockdep_assert_held(&ctrl->lock);
1145
1146         /* XXX: tear down queues? */
1147         ctrl->csts &= ~NVME_CSTS_RDY;
1148         ctrl->cc = 0;
1149 }
1150
1151 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1152 {
1153         u32 old;
1154
1155         mutex_lock(&ctrl->lock);
1156         old = ctrl->cc;
1157         ctrl->cc = new;
1158
1159         if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1160                 nvmet_start_ctrl(ctrl);
1161         if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1162                 nvmet_clear_ctrl(ctrl);
1163         if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1164                 nvmet_clear_ctrl(ctrl);
1165                 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1166         }
1167         if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1168                 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1169         mutex_unlock(&ctrl->lock);
1170 }
1171
1172 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1173 {
1174         /* command sets supported: NVMe command set: */
1175         ctrl->cap = (1ULL << 37);
1176         /* CC.EN timeout in 500msec units: */
1177         ctrl->cap |= (15ULL << 24);
1178         /* maximum queue entries supported: */
1179         ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1180 }
1181
1182 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
1183                 struct nvmet_req *req, struct nvmet_ctrl **ret)
1184 {
1185         struct nvmet_subsys *subsys;
1186         struct nvmet_ctrl *ctrl;
1187         u16 status = 0;
1188
1189         subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1190         if (!subsys) {
1191                 pr_warn("connect request for invalid subsystem %s!\n",
1192                         subsysnqn);
1193                 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1194                 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1195         }
1196
1197         mutex_lock(&subsys->lock);
1198         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1199                 if (ctrl->cntlid == cntlid) {
1200                         if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1201                                 pr_warn("hostnqn mismatch.\n");
1202                                 continue;
1203                         }
1204                         if (!kref_get_unless_zero(&ctrl->ref))
1205                                 continue;
1206
1207                         *ret = ctrl;
1208                         goto out;
1209                 }
1210         }
1211
1212         pr_warn("could not find controller %d for subsys %s / host %s\n",
1213                 cntlid, subsysnqn, hostnqn);
1214         req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1215         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1216
1217 out:
1218         mutex_unlock(&subsys->lock);
1219         nvmet_subsys_put(subsys);
1220         return status;
1221 }
1222
1223 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
1224 {
1225         if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1226                 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1227                        cmd->common.opcode, req->sq->qid);
1228                 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1229         }
1230
1231         if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1232                 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1233                        cmd->common.opcode, req->sq->qid);
1234                 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1235         }
1236         return 0;
1237 }
1238
1239 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1240 {
1241         struct nvmet_host_link *p;
1242
1243         lockdep_assert_held(&nvmet_config_sem);
1244
1245         if (subsys->allow_any_host)
1246                 return true;
1247
1248         if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
1249                 return true;
1250
1251         list_for_each_entry(p, &subsys->hosts, entry) {
1252                 if (!strcmp(nvmet_host_name(p->host), hostnqn))
1253                         return true;
1254         }
1255
1256         return false;
1257 }
1258
1259 /*
1260  * Note: ctrl->subsys->lock should be held when calling this function
1261  */
1262 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1263                 struct nvmet_req *req)
1264 {
1265         struct nvmet_ns *ns;
1266         unsigned long idx;
1267
1268         if (!req->p2p_client)
1269                 return;
1270
1271         ctrl->p2p_client = get_device(req->p2p_client);
1272
1273         xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1274                 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1275 }
1276
1277 /*
1278  * Note: ctrl->subsys->lock should be held when calling this function
1279  */
1280 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1281 {
1282         struct radix_tree_iter iter;
1283         void __rcu **slot;
1284
1285         radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1286                 pci_dev_put(radix_tree_deref_slot(slot));
1287
1288         put_device(ctrl->p2p_client);
1289 }
1290
1291 static void nvmet_fatal_error_handler(struct work_struct *work)
1292 {
1293         struct nvmet_ctrl *ctrl =
1294                         container_of(work, struct nvmet_ctrl, fatal_err_work);
1295
1296         pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1297         ctrl->ops->delete_ctrl(ctrl);
1298 }
1299
1300 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1301                 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1302 {
1303         struct nvmet_subsys *subsys;
1304         struct nvmet_ctrl *ctrl;
1305         int ret;
1306         u16 status;
1307
1308         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1309         subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1310         if (!subsys) {
1311                 pr_warn("connect request for invalid subsystem %s!\n",
1312                         subsysnqn);
1313                 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1314                 goto out;
1315         }
1316
1317         status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1318         down_read(&nvmet_config_sem);
1319         if (!nvmet_host_allowed(subsys, hostnqn)) {
1320                 pr_info("connect by host %s for subsystem %s not allowed\n",
1321                         hostnqn, subsysnqn);
1322                 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1323                 up_read(&nvmet_config_sem);
1324                 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1325                 goto out_put_subsystem;
1326         }
1327         up_read(&nvmet_config_sem);
1328
1329         status = NVME_SC_INTERNAL;
1330         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1331         if (!ctrl)
1332                 goto out_put_subsystem;
1333         mutex_init(&ctrl->lock);
1334
1335         nvmet_init_cap(ctrl);
1336
1337         ctrl->port = req->port;
1338
1339         INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1340         INIT_LIST_HEAD(&ctrl->async_events);
1341         INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1342         INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1343
1344         memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1345         memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1346
1347         kref_init(&ctrl->ref);
1348         ctrl->subsys = subsys;
1349         WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1350
1351         ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1352                         sizeof(__le32), GFP_KERNEL);
1353         if (!ctrl->changed_ns_list)
1354                 goto out_free_ctrl;
1355
1356         ctrl->sqs = kcalloc(subsys->max_qid + 1,
1357                         sizeof(struct nvmet_sq *),
1358                         GFP_KERNEL);
1359         if (!ctrl->sqs)
1360                 goto out_free_changed_ns_list;
1361
1362         if (subsys->cntlid_min > subsys->cntlid_max)
1363                 goto out_free_changed_ns_list;
1364
1365         ret = ida_simple_get(&cntlid_ida,
1366                              subsys->cntlid_min, subsys->cntlid_max,
1367                              GFP_KERNEL);
1368         if (ret < 0) {
1369                 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1370                 goto out_free_sqs;
1371         }
1372         ctrl->cntlid = ret;
1373
1374         ctrl->ops = req->ops;
1375
1376         /*
1377          * Discovery controllers may use some arbitrary high value
1378          * in order to cleanup stale discovery sessions
1379          */
1380         if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
1381                 kato = NVMET_DISC_KATO_MS;
1382
1383         /* keep-alive timeout in seconds */
1384         ctrl->kato = DIV_ROUND_UP(kato, 1000);
1385
1386         ctrl->err_counter = 0;
1387         spin_lock_init(&ctrl->error_lock);
1388
1389         nvmet_start_keep_alive_timer(ctrl);
1390
1391         mutex_lock(&subsys->lock);
1392         list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1393         nvmet_setup_p2p_ns_map(ctrl, req);
1394         mutex_unlock(&subsys->lock);
1395
1396         *ctrlp = ctrl;
1397         return 0;
1398
1399 out_free_sqs:
1400         kfree(ctrl->sqs);
1401 out_free_changed_ns_list:
1402         kfree(ctrl->changed_ns_list);
1403 out_free_ctrl:
1404         kfree(ctrl);
1405 out_put_subsystem:
1406         nvmet_subsys_put(subsys);
1407 out:
1408         return status;
1409 }
1410
1411 static void nvmet_ctrl_free(struct kref *ref)
1412 {
1413         struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1414         struct nvmet_subsys *subsys = ctrl->subsys;
1415
1416         mutex_lock(&subsys->lock);
1417         nvmet_release_p2p_ns_map(ctrl);
1418         list_del(&ctrl->subsys_entry);
1419         mutex_unlock(&subsys->lock);
1420
1421         nvmet_stop_keep_alive_timer(ctrl);
1422
1423         flush_work(&ctrl->async_event_work);
1424         cancel_work_sync(&ctrl->fatal_err_work);
1425
1426         ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1427
1428         nvmet_async_events_free(ctrl);
1429         kfree(ctrl->sqs);
1430         kfree(ctrl->changed_ns_list);
1431         kfree(ctrl);
1432
1433         nvmet_subsys_put(subsys);
1434 }
1435
1436 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1437 {
1438         kref_put(&ctrl->ref, nvmet_ctrl_free);
1439 }
1440
1441 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1442 {
1443         mutex_lock(&ctrl->lock);
1444         if (!(ctrl->csts & NVME_CSTS_CFS)) {
1445                 ctrl->csts |= NVME_CSTS_CFS;
1446                 schedule_work(&ctrl->fatal_err_work);
1447         }
1448         mutex_unlock(&ctrl->lock);
1449 }
1450 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1451
1452 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1453                 const char *subsysnqn)
1454 {
1455         struct nvmet_subsys_link *p;
1456
1457         if (!port)
1458                 return NULL;
1459
1460         if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1461                 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1462                         return NULL;
1463                 return nvmet_disc_subsys;
1464         }
1465
1466         down_read(&nvmet_config_sem);
1467         list_for_each_entry(p, &port->subsystems, entry) {
1468                 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1469                                 NVMF_NQN_SIZE)) {
1470                         if (!kref_get_unless_zero(&p->subsys->ref))
1471                                 break;
1472                         up_read(&nvmet_config_sem);
1473                         return p->subsys;
1474                 }
1475         }
1476         up_read(&nvmet_config_sem);
1477         return NULL;
1478 }
1479
1480 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1481                 enum nvme_subsys_type type)
1482 {
1483         struct nvmet_subsys *subsys;
1484
1485         subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1486         if (!subsys)
1487                 return ERR_PTR(-ENOMEM);
1488
1489         subsys->ver = NVMET_DEFAULT_VS;
1490         /* generate a random serial number as our controllers are ephemeral: */
1491         get_random_bytes(&subsys->serial, sizeof(subsys->serial));
1492
1493         switch (type) {
1494         case NVME_NQN_NVME:
1495                 subsys->max_qid = NVMET_NR_QUEUES;
1496                 break;
1497         case NVME_NQN_DISC:
1498                 subsys->max_qid = 0;
1499                 break;
1500         default:
1501                 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1502                 kfree(subsys);
1503                 return ERR_PTR(-EINVAL);
1504         }
1505         subsys->type = type;
1506         subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1507                         GFP_KERNEL);
1508         if (!subsys->subsysnqn) {
1509                 kfree(subsys);
1510                 return ERR_PTR(-ENOMEM);
1511         }
1512         subsys->cntlid_min = NVME_CNTLID_MIN;
1513         subsys->cntlid_max = NVME_CNTLID_MAX;
1514         kref_init(&subsys->ref);
1515
1516         mutex_init(&subsys->lock);
1517         xa_init(&subsys->namespaces);
1518         INIT_LIST_HEAD(&subsys->ctrls);
1519         INIT_LIST_HEAD(&subsys->hosts);
1520
1521         return subsys;
1522 }
1523
1524 static void nvmet_subsys_free(struct kref *ref)
1525 {
1526         struct nvmet_subsys *subsys =
1527                 container_of(ref, struct nvmet_subsys, ref);
1528
1529         WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1530
1531         xa_destroy(&subsys->namespaces);
1532         nvmet_passthru_subsys_free(subsys);
1533
1534         kfree(subsys->subsysnqn);
1535         kfree_rcu(subsys->model, rcuhead);
1536         kfree(subsys);
1537 }
1538
1539 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1540 {
1541         struct nvmet_ctrl *ctrl;
1542
1543         mutex_lock(&subsys->lock);
1544         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1545                 ctrl->ops->delete_ctrl(ctrl);
1546         mutex_unlock(&subsys->lock);
1547 }
1548
1549 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1550 {
1551         kref_put(&subsys->ref, nvmet_subsys_free);
1552 }
1553
1554 static int __init nvmet_init(void)
1555 {
1556         int error;
1557
1558         nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1559
1560         buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1561                         WQ_MEM_RECLAIM, 0);
1562         if (!buffered_io_wq) {
1563                 error = -ENOMEM;
1564                 goto out;
1565         }
1566
1567         error = nvmet_init_discovery();
1568         if (error)
1569                 goto out_free_work_queue;
1570
1571         error = nvmet_init_configfs();
1572         if (error)
1573                 goto out_exit_discovery;
1574         return 0;
1575
1576 out_exit_discovery:
1577         nvmet_exit_discovery();
1578 out_free_work_queue:
1579         destroy_workqueue(buffered_io_wq);
1580 out:
1581         return error;
1582 }
1583
1584 static void __exit nvmet_exit(void)
1585 {
1586         nvmet_exit_configfs();
1587         nvmet_exit_discovery();
1588         ida_destroy(&cntlid_ida);
1589         destroy_workqueue(buffered_io_wq);
1590
1591         BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1592         BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1593 }
1594
1595 module_init(nvmet_init);
1596 module_exit(nvmet_exit);
1597
1598 MODULE_LICENSE("GPL v2");