1 // SPDX-License-Identifier: GPL-2.0
3 * Common code for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
13 #include <generated/utsrelease.h>
15 #define CREATE_TRACE_POINTS
20 struct kmem_cache *nvmet_bvec_cache;
21 struct workqueue_struct *buffered_io_wq;
22 struct workqueue_struct *zbd_wq;
23 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
24 static DEFINE_IDA(cntlid_ida);
26 struct workqueue_struct *nvmet_wq;
27 EXPORT_SYMBOL_GPL(nvmet_wq);
30 * This read/write semaphore is used to synchronize access to configuration
31 * information on a target system that will result in discovery log page
32 * information change for at least one host.
33 * The full list of resources to protected by this semaphore is:
36 * - per-subsystem allowed hosts list
37 * - allow_any_host subsystem attribute
39 * - the nvmet_transports array
41 * When updating any of those lists/structures write lock should be obtained,
42 * while when reading (popolating discovery log page or checking host-subsystem
43 * link) read lock is obtained to allow concurrent reads.
45 DECLARE_RWSEM(nvmet_config_sem);
47 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
49 DECLARE_RWSEM(nvmet_ana_sem);
51 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
55 return NVME_SC_SUCCESS;
57 req->error_loc = offsetof(struct nvme_rw_command, length);
58 return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
60 req->error_loc = offsetof(struct nvme_rw_command, slba);
61 return NVME_SC_LBA_RANGE | NVME_SC_DNR;
63 req->error_loc = offsetof(struct nvme_common_command, opcode);
64 switch (req->cmd->common.opcode) {
66 case nvme_cmd_write_zeroes:
67 return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
69 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
73 req->error_loc = offsetof(struct nvme_rw_command, nsid);
74 return NVME_SC_ACCESS_DENIED;
78 req->error_loc = offsetof(struct nvme_common_command, opcode);
79 return NVME_SC_INTERNAL | NVME_SC_DNR;
83 u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
85 pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
88 req->error_loc = offsetof(struct nvme_common_command, opcode);
89 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
92 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
93 const char *subsysnqn);
95 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
98 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
99 req->error_loc = offsetof(struct nvme_common_command, dptr);
100 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
105 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
107 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
108 req->error_loc = offsetof(struct nvme_common_command, dptr);
109 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
114 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
116 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
117 req->error_loc = offsetof(struct nvme_common_command, dptr);
118 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
123 static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
125 struct nvmet_ns *cur;
129 xa_for_each(&subsys->namespaces, idx, cur)
135 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
137 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
140 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
142 struct nvmet_req *req;
144 mutex_lock(&ctrl->lock);
145 while (ctrl->nr_async_event_cmds) {
146 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
147 mutex_unlock(&ctrl->lock);
148 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
149 mutex_lock(&ctrl->lock);
151 mutex_unlock(&ctrl->lock);
154 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
156 struct nvmet_async_event *aen;
157 struct nvmet_req *req;
159 mutex_lock(&ctrl->lock);
160 while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
161 aen = list_first_entry(&ctrl->async_events,
162 struct nvmet_async_event, entry);
163 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
164 nvmet_set_result(req, nvmet_async_event_result(aen));
166 list_del(&aen->entry);
169 mutex_unlock(&ctrl->lock);
170 trace_nvmet_async_event(ctrl, req->cqe->result.u32);
171 nvmet_req_complete(req, 0);
172 mutex_lock(&ctrl->lock);
174 mutex_unlock(&ctrl->lock);
177 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
179 struct nvmet_async_event *aen, *tmp;
181 mutex_lock(&ctrl->lock);
182 list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
183 list_del(&aen->entry);
186 mutex_unlock(&ctrl->lock);
189 static void nvmet_async_event_work(struct work_struct *work)
191 struct nvmet_ctrl *ctrl =
192 container_of(work, struct nvmet_ctrl, async_event_work);
194 nvmet_async_events_process(ctrl);
197 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
198 u8 event_info, u8 log_page)
200 struct nvmet_async_event *aen;
202 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
206 aen->event_type = event_type;
207 aen->event_info = event_info;
208 aen->log_page = log_page;
210 mutex_lock(&ctrl->lock);
211 list_add_tail(&aen->entry, &ctrl->async_events);
212 mutex_unlock(&ctrl->lock);
214 queue_work(nvmet_wq, &ctrl->async_event_work);
217 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
221 mutex_lock(&ctrl->lock);
222 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
225 for (i = 0; i < ctrl->nr_changed_ns; i++) {
226 if (ctrl->changed_ns_list[i] == nsid)
230 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
231 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
232 ctrl->nr_changed_ns = U32_MAX;
236 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
238 mutex_unlock(&ctrl->lock);
241 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
243 struct nvmet_ctrl *ctrl;
245 lockdep_assert_held(&subsys->lock);
247 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
248 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
249 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
251 nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
252 NVME_AER_NOTICE_NS_CHANGED,
253 NVME_LOG_CHANGED_NS);
257 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
258 struct nvmet_port *port)
260 struct nvmet_ctrl *ctrl;
262 mutex_lock(&subsys->lock);
263 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
264 if (port && ctrl->port != port)
266 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
268 nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
269 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
271 mutex_unlock(&subsys->lock);
274 void nvmet_port_send_ana_event(struct nvmet_port *port)
276 struct nvmet_subsys_link *p;
278 down_read(&nvmet_config_sem);
279 list_for_each_entry(p, &port->subsystems, entry)
280 nvmet_send_ana_event(p->subsys, port);
281 up_read(&nvmet_config_sem);
284 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
288 down_write(&nvmet_config_sem);
289 if (nvmet_transports[ops->type])
292 nvmet_transports[ops->type] = ops;
293 up_write(&nvmet_config_sem);
297 EXPORT_SYMBOL_GPL(nvmet_register_transport);
299 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
301 down_write(&nvmet_config_sem);
302 nvmet_transports[ops->type] = NULL;
303 up_write(&nvmet_config_sem);
305 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
307 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
309 struct nvmet_ctrl *ctrl;
311 mutex_lock(&subsys->lock);
312 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
313 if (ctrl->port == port)
314 ctrl->ops->delete_ctrl(ctrl);
316 mutex_unlock(&subsys->lock);
319 int nvmet_enable_port(struct nvmet_port *port)
321 const struct nvmet_fabrics_ops *ops;
324 lockdep_assert_held(&nvmet_config_sem);
326 ops = nvmet_transports[port->disc_addr.trtype];
328 up_write(&nvmet_config_sem);
329 request_module("nvmet-transport-%d", port->disc_addr.trtype);
330 down_write(&nvmet_config_sem);
331 ops = nvmet_transports[port->disc_addr.trtype];
333 pr_err("transport type %d not supported\n",
334 port->disc_addr.trtype);
339 if (!try_module_get(ops->owner))
343 * If the user requested PI support and the transport isn't pi capable,
344 * don't enable the port.
346 if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
347 pr_err("T10-PI is not supported by transport type %d\n",
348 port->disc_addr.trtype);
353 ret = ops->add_port(port);
357 /* If the transport didn't set inline_data_size, then disable it. */
358 if (port->inline_data_size < 0)
359 port->inline_data_size = 0;
361 port->enabled = true;
366 module_put(ops->owner);
370 void nvmet_disable_port(struct nvmet_port *port)
372 const struct nvmet_fabrics_ops *ops;
374 lockdep_assert_held(&nvmet_config_sem);
376 port->enabled = false;
379 ops = nvmet_transports[port->disc_addr.trtype];
380 ops->remove_port(port);
381 module_put(ops->owner);
384 static void nvmet_keep_alive_timer(struct work_struct *work)
386 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
387 struct nvmet_ctrl, ka_work);
388 bool reset_tbkas = ctrl->reset_tbkas;
390 ctrl->reset_tbkas = false;
392 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
394 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
398 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
399 ctrl->cntlid, ctrl->kato);
401 nvmet_ctrl_fatal_error(ctrl);
404 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
406 if (unlikely(ctrl->kato == 0))
409 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
410 ctrl->cntlid, ctrl->kato);
412 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
415 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
417 if (unlikely(ctrl->kato == 0))
420 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
422 cancel_delayed_work_sync(&ctrl->ka_work);
425 u16 nvmet_req_find_ns(struct nvmet_req *req)
427 u32 nsid = le32_to_cpu(req->cmd->common.nsid);
429 req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
430 if (unlikely(!req->ns)) {
431 req->error_loc = offsetof(struct nvme_common_command, nsid);
432 return NVME_SC_INVALID_NS | NVME_SC_DNR;
435 percpu_ref_get(&req->ns->ref);
436 return NVME_SC_SUCCESS;
439 static void nvmet_destroy_namespace(struct percpu_ref *ref)
441 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
443 complete(&ns->disable_done);
446 void nvmet_put_namespace(struct nvmet_ns *ns)
448 percpu_ref_put(&ns->ref);
451 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
453 nvmet_bdev_ns_disable(ns);
454 nvmet_file_ns_disable(ns);
457 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
460 struct pci_dev *p2p_dev;
466 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
470 if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
471 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
477 ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
482 * Right now we just check that there is p2pmem available so
483 * we can report an error to the user right away if there
484 * is not. We'll find the actual device to use once we
485 * setup the controller when the port's device is available.
488 p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
490 pr_err("no peer-to-peer memory is available for %s\n",
495 pci_dev_put(p2p_dev);
502 * Note: ctrl->subsys->lock should be held when calling this function
504 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
507 struct device *clients[2];
508 struct pci_dev *p2p_dev;
511 if (!ctrl->p2p_client || !ns->use_p2pmem)
515 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
519 p2p_dev = pci_dev_get(ns->p2p_dev);
521 clients[0] = ctrl->p2p_client;
522 clients[1] = nvmet_ns_dev(ns);
524 p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
526 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
527 dev_name(ctrl->p2p_client), ns->device_path);
532 ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
534 pci_dev_put(p2p_dev);
536 pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
540 bool nvmet_ns_revalidate(struct nvmet_ns *ns)
542 loff_t oldsize = ns->size;
545 nvmet_bdev_ns_revalidate(ns);
547 nvmet_file_ns_revalidate(ns);
549 return oldsize != ns->size;
552 int nvmet_ns_enable(struct nvmet_ns *ns)
554 struct nvmet_subsys *subsys = ns->subsys;
555 struct nvmet_ctrl *ctrl;
558 mutex_lock(&subsys->lock);
561 if (nvmet_is_passthru_subsys(subsys)) {
562 pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
570 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
573 ret = nvmet_bdev_ns_enable(ns);
575 ret = nvmet_file_ns_enable(ns);
579 ret = nvmet_p2pmem_ns_enable(ns);
581 goto out_dev_disable;
583 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
584 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
586 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
591 if (ns->nsid > subsys->max_nsid)
592 subsys->max_nsid = ns->nsid;
594 ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
596 goto out_restore_subsys_maxnsid;
598 subsys->nr_namespaces++;
600 nvmet_ns_changed(subsys, ns->nsid);
604 mutex_unlock(&subsys->lock);
607 out_restore_subsys_maxnsid:
608 subsys->max_nsid = nvmet_max_nsid(subsys);
609 percpu_ref_exit(&ns->ref);
611 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
612 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
614 nvmet_ns_dev_disable(ns);
618 void nvmet_ns_disable(struct nvmet_ns *ns)
620 struct nvmet_subsys *subsys = ns->subsys;
621 struct nvmet_ctrl *ctrl;
623 mutex_lock(&subsys->lock);
628 xa_erase(&ns->subsys->namespaces, ns->nsid);
629 if (ns->nsid == subsys->max_nsid)
630 subsys->max_nsid = nvmet_max_nsid(subsys);
632 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
633 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
635 mutex_unlock(&subsys->lock);
638 * Now that we removed the namespaces from the lookup list, we
639 * can kill the per_cpu ref and wait for any remaining references
640 * to be dropped, as well as a RCU grace period for anyone only
641 * using the namepace under rcu_read_lock(). Note that we can't
642 * use call_rcu here as we need to ensure the namespaces have
643 * been fully destroyed before unloading the module.
645 percpu_ref_kill(&ns->ref);
647 wait_for_completion(&ns->disable_done);
648 percpu_ref_exit(&ns->ref);
650 mutex_lock(&subsys->lock);
652 subsys->nr_namespaces--;
653 nvmet_ns_changed(subsys, ns->nsid);
654 nvmet_ns_dev_disable(ns);
656 mutex_unlock(&subsys->lock);
659 void nvmet_ns_free(struct nvmet_ns *ns)
661 nvmet_ns_disable(ns);
663 down_write(&nvmet_ana_sem);
664 nvmet_ana_group_enabled[ns->anagrpid]--;
665 up_write(&nvmet_ana_sem);
667 kfree(ns->device_path);
671 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
675 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
679 init_completion(&ns->disable_done);
684 down_write(&nvmet_ana_sem);
685 ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
686 nvmet_ana_group_enabled[ns->anagrpid]++;
687 up_write(&nvmet_ana_sem);
690 ns->buffered_io = false;
691 ns->csi = NVME_CSI_NVM;
696 static void nvmet_update_sq_head(struct nvmet_req *req)
699 u32 old_sqhd, new_sqhd;
701 old_sqhd = READ_ONCE(req->sq->sqhd);
703 new_sqhd = (old_sqhd + 1) % req->sq->size;
704 } while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd));
706 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
709 static void nvmet_set_error(struct nvmet_req *req, u16 status)
711 struct nvmet_ctrl *ctrl = req->sq->ctrl;
712 struct nvme_error_slot *new_error_slot;
715 req->cqe->status = cpu_to_le16(status << 1);
717 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
720 spin_lock_irqsave(&ctrl->error_lock, flags);
723 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
725 new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
726 new_error_slot->sqid = cpu_to_le16(req->sq->qid);
727 new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
728 new_error_slot->status_field = cpu_to_le16(status << 1);
729 new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
730 new_error_slot->lba = cpu_to_le64(req->error_slba);
731 new_error_slot->nsid = req->cmd->common.nsid;
732 spin_unlock_irqrestore(&ctrl->error_lock, flags);
734 /* set the more bit for this request */
735 req->cqe->status |= cpu_to_le16(1 << 14);
738 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
740 struct nvmet_ns *ns = req->ns;
742 if (!req->sq->sqhd_disabled)
743 nvmet_update_sq_head(req);
744 req->cqe->sq_id = cpu_to_le16(req->sq->qid);
745 req->cqe->command_id = req->cmd->common.command_id;
747 if (unlikely(status))
748 nvmet_set_error(req, status);
750 trace_nvmet_req_complete(req);
752 req->ops->queue_response(req);
754 nvmet_put_namespace(ns);
757 void nvmet_req_complete(struct nvmet_req *req, u16 status)
759 struct nvmet_sq *sq = req->sq;
761 __nvmet_req_complete(req, status);
762 percpu_ref_put(&sq->ref);
764 EXPORT_SYMBOL_GPL(nvmet_req_complete);
766 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
773 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
783 static void nvmet_confirm_sq(struct percpu_ref *ref)
785 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
787 complete(&sq->confirm_done);
790 void nvmet_sq_destroy(struct nvmet_sq *sq)
792 struct nvmet_ctrl *ctrl = sq->ctrl;
795 * If this is the admin queue, complete all AERs so that our
796 * queue doesn't have outstanding requests on it.
798 if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
799 nvmet_async_events_failall(ctrl);
800 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
801 wait_for_completion(&sq->confirm_done);
802 wait_for_completion(&sq->free_done);
803 percpu_ref_exit(&sq->ref);
804 nvmet_auth_sq_free(sq);
808 * The teardown flow may take some time, and the host may not
809 * send us keep-alive during this period, hence reset the
810 * traffic based keep-alive timer so we don't trigger a
811 * controller teardown as a result of a keep-alive expiration.
813 ctrl->reset_tbkas = true;
814 sq->ctrl->sqs[sq->qid] = NULL;
815 nvmet_ctrl_put(ctrl);
816 sq->ctrl = NULL; /* allows reusing the queue later */
819 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
821 static void nvmet_sq_free(struct percpu_ref *ref)
823 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
825 complete(&sq->free_done);
828 int nvmet_sq_init(struct nvmet_sq *sq)
832 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
834 pr_err("percpu_ref init failed!\n");
837 init_completion(&sq->free_done);
838 init_completion(&sq->confirm_done);
839 nvmet_auth_sq_init(sq);
843 EXPORT_SYMBOL_GPL(nvmet_sq_init);
845 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
848 enum nvme_ana_state state = port->ana_state[ns->anagrpid];
850 if (unlikely(state == NVME_ANA_INACCESSIBLE))
851 return NVME_SC_ANA_INACCESSIBLE;
852 if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
853 return NVME_SC_ANA_PERSISTENT_LOSS;
854 if (unlikely(state == NVME_ANA_CHANGE))
855 return NVME_SC_ANA_TRANSITION;
859 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
861 if (unlikely(req->ns->readonly)) {
862 switch (req->cmd->common.opcode) {
867 return NVME_SC_NS_WRITE_PROTECTED;
874 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
876 struct nvme_command *cmd = req->cmd;
879 if (nvme_is_fabrics(cmd))
880 return nvmet_parse_fabrics_io_cmd(req);
882 if (unlikely(!nvmet_check_auth_status(req)))
883 return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
885 ret = nvmet_check_ctrl_status(req);
889 if (nvmet_is_passthru_req(req))
890 return nvmet_parse_passthru_io_cmd(req);
892 ret = nvmet_req_find_ns(req);
896 ret = nvmet_check_ana_state(req->port, req->ns);
898 req->error_loc = offsetof(struct nvme_common_command, nsid);
901 ret = nvmet_io_cmd_check_access(req);
903 req->error_loc = offsetof(struct nvme_common_command, nsid);
907 switch (req->ns->csi) {
910 return nvmet_file_parse_io_cmd(req);
911 return nvmet_bdev_parse_io_cmd(req);
913 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
914 return nvmet_bdev_zns_parse_io_cmd(req);
915 return NVME_SC_INVALID_IO_CMD_SET;
917 return NVME_SC_INVALID_IO_CMD_SET;
921 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
922 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
924 u8 flags = req->cmd->common.flags;
931 req->metadata_sg = NULL;
933 req->metadata_sg_cnt = 0;
934 req->transfer_len = 0;
935 req->metadata_len = 0;
936 req->cqe->status = 0;
937 req->cqe->sq_head = 0;
939 req->error_loc = NVMET_NO_ERROR_LOC;
942 /* no support for fused commands yet */
943 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
944 req->error_loc = offsetof(struct nvme_common_command, flags);
945 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
950 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
951 * contains an address of a single contiguous physical buffer that is
954 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
955 req->error_loc = offsetof(struct nvme_common_command, flags);
956 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
960 if (unlikely(!req->sq->ctrl))
961 /* will return an error for any non-connect command: */
962 status = nvmet_parse_connect_cmd(req);
963 else if (likely(req->sq->qid != 0))
964 status = nvmet_parse_io_cmd(req);
966 status = nvmet_parse_admin_cmd(req);
971 trace_nvmet_req_init(req, req->cmd);
973 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
974 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
979 sq->ctrl->reset_tbkas = true;
984 __nvmet_req_complete(req, status);
987 EXPORT_SYMBOL_GPL(nvmet_req_init);
989 void nvmet_req_uninit(struct nvmet_req *req)
991 percpu_ref_put(&req->sq->ref);
993 nvmet_put_namespace(req->ns);
995 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
997 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
999 if (unlikely(len != req->transfer_len)) {
1000 req->error_loc = offsetof(struct nvme_common_command, dptr);
1001 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
1007 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
1009 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
1011 if (unlikely(data_len > req->transfer_len)) {
1012 req->error_loc = offsetof(struct nvme_common_command, dptr);
1013 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
1020 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
1022 return req->transfer_len - req->metadata_len;
1025 static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
1026 struct nvmet_req *req)
1028 req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
1029 nvmet_data_transfer_len(req));
1033 if (req->metadata_len) {
1034 req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
1035 &req->metadata_sg_cnt, req->metadata_len);
1036 if (!req->metadata_sg)
1040 req->p2p_dev = p2p_dev;
1044 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1049 static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
1051 if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
1052 !req->sq->ctrl || !req->sq->qid || !req->ns)
1054 return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
1057 int nvmet_req_alloc_sgls(struct nvmet_req *req)
1059 struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
1061 if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
1064 req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1066 if (unlikely(!req->sg))
1069 if (req->metadata_len) {
1070 req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1071 &req->metadata_sg_cnt);
1072 if (unlikely(!req->metadata_sg))
1082 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1084 void nvmet_req_free_sgls(struct nvmet_req *req)
1087 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1088 if (req->metadata_sg)
1089 pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1090 req->p2p_dev = NULL;
1093 if (req->metadata_sg)
1094 sgl_free(req->metadata_sg);
1098 req->metadata_sg = NULL;
1100 req->metadata_sg_cnt = 0;
1102 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1104 static inline bool nvmet_cc_en(u32 cc)
1106 return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1109 static inline u8 nvmet_cc_css(u32 cc)
1111 return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1114 static inline u8 nvmet_cc_mps(u32 cc)
1116 return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1119 static inline u8 nvmet_cc_ams(u32 cc)
1121 return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1124 static inline u8 nvmet_cc_shn(u32 cc)
1126 return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1129 static inline u8 nvmet_cc_iosqes(u32 cc)
1131 return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1134 static inline u8 nvmet_cc_iocqes(u32 cc)
1136 return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1139 static inline bool nvmet_css_supported(u8 cc_css)
1141 switch (cc_css << NVME_CC_CSS_SHIFT) {
1142 case NVME_CC_CSS_NVM:
1143 case NVME_CC_CSS_CSI:
1150 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1152 lockdep_assert_held(&ctrl->lock);
1155 * Only I/O controllers should verify iosqes,iocqes.
1156 * Strictly speaking, the spec says a discovery controller
1157 * should verify iosqes,iocqes are zeroed, however that
1158 * would break backwards compatibility, so don't enforce it.
1160 if (!nvmet_is_disc_subsys(ctrl->subsys) &&
1161 (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1162 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
1163 ctrl->csts = NVME_CSTS_CFS;
1167 if (nvmet_cc_mps(ctrl->cc) != 0 ||
1168 nvmet_cc_ams(ctrl->cc) != 0 ||
1169 !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
1170 ctrl->csts = NVME_CSTS_CFS;
1174 ctrl->csts = NVME_CSTS_RDY;
1177 * Controllers that are not yet enabled should not really enforce the
1178 * keep alive timeout, but we still want to track a timeout and cleanup
1179 * in case a host died before it enabled the controller. Hence, simply
1180 * reset the keep alive timer when the controller is enabled.
1183 mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
1186 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1188 lockdep_assert_held(&ctrl->lock);
1190 /* XXX: tear down queues? */
1191 ctrl->csts &= ~NVME_CSTS_RDY;
1195 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1199 mutex_lock(&ctrl->lock);
1203 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1204 nvmet_start_ctrl(ctrl);
1205 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1206 nvmet_clear_ctrl(ctrl);
1207 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1208 nvmet_clear_ctrl(ctrl);
1209 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1211 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1212 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1213 mutex_unlock(&ctrl->lock);
1216 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1218 /* command sets supported: NVMe command set: */
1219 ctrl->cap = (1ULL << 37);
1220 /* Controller supports one or more I/O Command Sets */
1221 ctrl->cap |= (1ULL << 43);
1222 /* CC.EN timeout in 500msec units: */
1223 ctrl->cap |= (15ULL << 24);
1224 /* maximum queue entries supported: */
1225 if (ctrl->ops->get_max_queue_size)
1226 ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
1228 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1230 if (nvmet_is_passthru_subsys(ctrl->subsys))
1231 nvmet_passthrough_override_cap(ctrl);
1234 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
1235 const char *hostnqn, u16 cntlid,
1236 struct nvmet_req *req)
1238 struct nvmet_ctrl *ctrl = NULL;
1239 struct nvmet_subsys *subsys;
1241 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1243 pr_warn("connect request for invalid subsystem %s!\n",
1245 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1249 mutex_lock(&subsys->lock);
1250 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1251 if (ctrl->cntlid == cntlid) {
1252 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1253 pr_warn("hostnqn mismatch.\n");
1256 if (!kref_get_unless_zero(&ctrl->ref))
1264 ctrl = NULL; /* ctrl not found */
1265 pr_warn("could not find controller %d for subsys %s / host %s\n",
1266 cntlid, subsysnqn, hostnqn);
1267 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1270 mutex_unlock(&subsys->lock);
1271 nvmet_subsys_put(subsys);
1276 u16 nvmet_check_ctrl_status(struct nvmet_req *req)
1278 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1279 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1280 req->cmd->common.opcode, req->sq->qid);
1281 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1284 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1285 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1286 req->cmd->common.opcode, req->sq->qid);
1287 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1290 if (unlikely(!nvmet_check_auth_status(req))) {
1291 pr_warn("qid %d not authenticated\n", req->sq->qid);
1292 return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
1297 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1299 struct nvmet_host_link *p;
1301 lockdep_assert_held(&nvmet_config_sem);
1303 if (subsys->allow_any_host)
1306 if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
1309 list_for_each_entry(p, &subsys->hosts, entry) {
1310 if (!strcmp(nvmet_host_name(p->host), hostnqn))
1318 * Note: ctrl->subsys->lock should be held when calling this function
1320 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1321 struct nvmet_req *req)
1323 struct nvmet_ns *ns;
1326 if (!req->p2p_client)
1329 ctrl->p2p_client = get_device(req->p2p_client);
1331 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1332 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1336 * Note: ctrl->subsys->lock should be held when calling this function
1338 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1340 struct radix_tree_iter iter;
1343 radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1344 pci_dev_put(radix_tree_deref_slot(slot));
1346 put_device(ctrl->p2p_client);
1349 static void nvmet_fatal_error_handler(struct work_struct *work)
1351 struct nvmet_ctrl *ctrl =
1352 container_of(work, struct nvmet_ctrl, fatal_err_work);
1354 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1355 ctrl->ops->delete_ctrl(ctrl);
1358 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1359 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1361 struct nvmet_subsys *subsys;
1362 struct nvmet_ctrl *ctrl;
1366 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1367 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1369 pr_warn("connect request for invalid subsystem %s!\n",
1371 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1372 req->error_loc = offsetof(struct nvme_common_command, dptr);
1376 down_read(&nvmet_config_sem);
1377 if (!nvmet_host_allowed(subsys, hostnqn)) {
1378 pr_info("connect by host %s for subsystem %s not allowed\n",
1379 hostnqn, subsysnqn);
1380 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1381 up_read(&nvmet_config_sem);
1382 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1383 req->error_loc = offsetof(struct nvme_common_command, dptr);
1384 goto out_put_subsystem;
1386 up_read(&nvmet_config_sem);
1388 status = NVME_SC_INTERNAL;
1389 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1391 goto out_put_subsystem;
1392 mutex_init(&ctrl->lock);
1394 ctrl->port = req->port;
1395 ctrl->ops = req->ops;
1397 #ifdef CONFIG_NVME_TARGET_PASSTHRU
1398 /* By default, set loop targets to clear IDS by default */
1399 if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
1400 subsys->clear_ids = 1;
1403 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1404 INIT_LIST_HEAD(&ctrl->async_events);
1405 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1406 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1407 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
1409 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1410 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1412 kref_init(&ctrl->ref);
1413 ctrl->subsys = subsys;
1414 nvmet_init_cap(ctrl);
1415 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1417 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1418 sizeof(__le32), GFP_KERNEL);
1419 if (!ctrl->changed_ns_list)
1422 ctrl->sqs = kcalloc(subsys->max_qid + 1,
1423 sizeof(struct nvmet_sq *),
1426 goto out_free_changed_ns_list;
1428 ret = ida_alloc_range(&cntlid_ida,
1429 subsys->cntlid_min, subsys->cntlid_max,
1432 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1438 * Discovery controllers may use some arbitrary high value
1439 * in order to cleanup stale discovery sessions
1441 if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
1442 kato = NVMET_DISC_KATO_MS;
1444 /* keep-alive timeout in seconds */
1445 ctrl->kato = DIV_ROUND_UP(kato, 1000);
1447 ctrl->err_counter = 0;
1448 spin_lock_init(&ctrl->error_lock);
1450 nvmet_start_keep_alive_timer(ctrl);
1452 mutex_lock(&subsys->lock);
1453 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1454 nvmet_setup_p2p_ns_map(ctrl, req);
1455 mutex_unlock(&subsys->lock);
1462 out_free_changed_ns_list:
1463 kfree(ctrl->changed_ns_list);
1467 nvmet_subsys_put(subsys);
1472 static void nvmet_ctrl_free(struct kref *ref)
1474 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1475 struct nvmet_subsys *subsys = ctrl->subsys;
1477 mutex_lock(&subsys->lock);
1478 nvmet_release_p2p_ns_map(ctrl);
1479 list_del(&ctrl->subsys_entry);
1480 mutex_unlock(&subsys->lock);
1482 nvmet_stop_keep_alive_timer(ctrl);
1484 flush_work(&ctrl->async_event_work);
1485 cancel_work_sync(&ctrl->fatal_err_work);
1487 nvmet_destroy_auth(ctrl);
1489 ida_free(&cntlid_ida, ctrl->cntlid);
1491 nvmet_async_events_free(ctrl);
1493 kfree(ctrl->changed_ns_list);
1496 nvmet_subsys_put(subsys);
1499 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1501 kref_put(&ctrl->ref, nvmet_ctrl_free);
1504 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1506 mutex_lock(&ctrl->lock);
1507 if (!(ctrl->csts & NVME_CSTS_CFS)) {
1508 ctrl->csts |= NVME_CSTS_CFS;
1509 queue_work(nvmet_wq, &ctrl->fatal_err_work);
1511 mutex_unlock(&ctrl->lock);
1513 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1515 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1516 const char *subsysnqn)
1518 struct nvmet_subsys_link *p;
1523 if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1524 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1526 return nvmet_disc_subsys;
1529 down_read(&nvmet_config_sem);
1530 list_for_each_entry(p, &port->subsystems, entry) {
1531 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1533 if (!kref_get_unless_zero(&p->subsys->ref))
1535 up_read(&nvmet_config_sem);
1539 up_read(&nvmet_config_sem);
1543 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1544 enum nvme_subsys_type type)
1546 struct nvmet_subsys *subsys;
1547 char serial[NVMET_SN_MAX_SIZE / 2];
1550 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1552 return ERR_PTR(-ENOMEM);
1554 subsys->ver = NVMET_DEFAULT_VS;
1555 /* generate a random serial number as our controllers are ephemeral: */
1556 get_random_bytes(&serial, sizeof(serial));
1557 bin2hex(subsys->serial, &serial, sizeof(serial));
1559 subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
1560 if (!subsys->model_number) {
1565 subsys->ieee_oui = 0;
1567 subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL);
1568 if (!subsys->firmware_rev) {
1575 subsys->max_qid = NVMET_NR_QUEUES;
1579 subsys->max_qid = 0;
1582 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1586 subsys->type = type;
1587 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1589 if (!subsys->subsysnqn) {
1593 subsys->cntlid_min = NVME_CNTLID_MIN;
1594 subsys->cntlid_max = NVME_CNTLID_MAX;
1595 kref_init(&subsys->ref);
1597 mutex_init(&subsys->lock);
1598 xa_init(&subsys->namespaces);
1599 INIT_LIST_HEAD(&subsys->ctrls);
1600 INIT_LIST_HEAD(&subsys->hosts);
1605 kfree(subsys->firmware_rev);
1607 kfree(subsys->model_number);
1610 return ERR_PTR(ret);
1613 static void nvmet_subsys_free(struct kref *ref)
1615 struct nvmet_subsys *subsys =
1616 container_of(ref, struct nvmet_subsys, ref);
1618 WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1620 xa_destroy(&subsys->namespaces);
1621 nvmet_passthru_subsys_free(subsys);
1623 kfree(subsys->subsysnqn);
1624 kfree(subsys->model_number);
1625 kfree(subsys->firmware_rev);
1629 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1631 struct nvmet_ctrl *ctrl;
1633 mutex_lock(&subsys->lock);
1634 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1635 ctrl->ops->delete_ctrl(ctrl);
1636 mutex_unlock(&subsys->lock);
1639 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1641 kref_put(&subsys->ref, nvmet_subsys_free);
1644 static int __init nvmet_init(void)
1646 int error = -ENOMEM;
1648 nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1650 nvmet_bvec_cache = kmem_cache_create("nvmet-bvec",
1651 NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0,
1652 SLAB_HWCACHE_ALIGN, NULL);
1653 if (!nvmet_bvec_cache)
1656 zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
1658 goto out_destroy_bvec_cache;
1660 buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1662 if (!buffered_io_wq)
1663 goto out_free_zbd_work_queue;
1665 nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
1667 goto out_free_buffered_work_queue;
1669 error = nvmet_init_discovery();
1671 goto out_free_nvmet_work_queue;
1673 error = nvmet_init_configfs();
1675 goto out_exit_discovery;
1679 nvmet_exit_discovery();
1680 out_free_nvmet_work_queue:
1681 destroy_workqueue(nvmet_wq);
1682 out_free_buffered_work_queue:
1683 destroy_workqueue(buffered_io_wq);
1684 out_free_zbd_work_queue:
1685 destroy_workqueue(zbd_wq);
1686 out_destroy_bvec_cache:
1687 kmem_cache_destroy(nvmet_bvec_cache);
1691 static void __exit nvmet_exit(void)
1693 nvmet_exit_configfs();
1694 nvmet_exit_discovery();
1695 ida_destroy(&cntlid_ida);
1696 destroy_workqueue(nvmet_wq);
1697 destroy_workqueue(buffered_io_wq);
1698 destroy_workqueue(zbd_wq);
1699 kmem_cache_destroy(nvmet_bvec_cache);
1701 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1702 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1705 module_init(nvmet_init);
1706 module_exit(nvmet_exit);
1708 MODULE_DESCRIPTION("NVMe target core framework");
1709 MODULE_LICENSE("GPL v2");