1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe Over Fabrics Target Passthrough command implementation.
5 * Copyright (c) 2017-2018 Western Digital Corporation or its
7 * Copyright (c) 2019-2020, Eideticom Inc.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/module.h>
13 #include "../host/nvme.h"
16 MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
19 * xarray to maintain one passthru subsystem per nvme controller.
21 static DEFINE_XARRAY(passthru_subsystems);
23 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
26 * Multiple command set support can only be declared if the underlying
27 * controller actually supports it.
29 if (!nvme_multi_css(ctrl->subsys->passthru_ctrl))
30 ctrl->cap &= ~(1ULL << 43);
33 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
35 struct nvmet_ctrl *ctrl = req->sq->ctrl;
36 struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
37 u16 status = NVME_SC_SUCCESS;
38 struct nvme_id_ctrl *id;
39 unsigned int max_hw_sectors;
42 id = kzalloc(sizeof(*id), GFP_KERNEL);
44 return NVME_SC_INTERNAL;
46 status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
50 id->cntlid = cpu_to_le16(ctrl->cntlid);
51 id->ver = cpu_to_le32(ctrl->subsys->ver);
54 * The passthru NVMe driver may have a limit on the number of segments
55 * which depends on the host's memory fragementation. To solve this,
56 * ensure mdts is limited to the pages equal to the number of segments.
58 max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
59 pctrl->max_hw_sectors);
62 * nvmet_passthru_map_sg is limitted to using a single bio so limit
63 * the mdts based on BIO_MAX_VECS as well
65 max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9),
68 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
70 id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
74 * We export aerl limit for the fabrics controller, update this when
75 * passthru based aerl support is added.
77 id->aerl = NVMET_ASYNC_EVENTS - 1;
79 /* emulate kas as most of the PCIe ctrl don't have a support for kas */
80 id->kas = cpu_to_le16(NVMET_KAS);
82 /* don't support host memory buffer */
86 id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
87 id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
88 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
90 /* don't support fuse commands */
93 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
94 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
95 id->sgls |= cpu_to_le32(1 << 2);
96 if (req->port->inline_data_size)
97 id->sgls |= cpu_to_le32(1 << 20);
100 * When passsthru controller is setup using nvme-loop transport it will
101 * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
102 * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
103 * code path with duplicate ctr subsynqn. In order to prevent that we
104 * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
106 memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
108 /* use fabric id-ctrl values */
109 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
110 req->port->inline_data_size) / 16);
111 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
113 id->msdbd = ctrl->ops->msdbd;
115 /* Support multipath connections with fabrics */
118 /* Disable reservations, see nvmet_parse_passthru_io_cmd() */
119 id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
121 status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
128 static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
130 u16 status = NVME_SC_SUCCESS;
131 struct nvme_id_ns *id;
134 id = kzalloc(sizeof(*id), GFP_KERNEL);
136 return NVME_SC_INTERNAL;
138 status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
142 for (i = 0; i < (id->nlbaf + 1); i++)
144 memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
146 id->flbas = id->flbas & ~(1 << 4);
149 * Presently the NVMEof target code does not support sending
150 * metadata, so we must disable it here. This should be updated
151 * once target starts supporting metadata.
155 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
162 static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
164 struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
165 struct request *rq = req->p.rq;
168 status = nvme_execute_passthru_rq(rq);
170 if (status == NVME_SC_SUCCESS &&
171 req->cmd->common.opcode == nvme_admin_identify) {
172 switch (req->cmd->identify.cns) {
173 case NVME_ID_CNS_CTRL:
174 nvmet_passthru_override_id_ctrl(req);
177 nvmet_passthru_override_id_ns(req);
180 } else if (status < 0)
181 status = NVME_SC_INTERNAL;
183 req->cqe->result = nvme_req(rq)->result;
184 nvmet_req_complete(req, status);
185 blk_mq_free_request(rq);
188 static void nvmet_passthru_req_done(struct request *rq,
189 blk_status_t blk_status)
191 struct nvmet_req *req = rq->end_io_data;
193 req->cqe->result = nvme_req(rq)->result;
194 nvmet_req_complete(req, nvme_req(rq)->status);
195 blk_mq_free_request(rq);
198 static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
200 struct scatterlist *sg;
204 if (req->sg_cnt > BIO_MAX_VECS)
207 if (nvmet_use_inline_bvec(req)) {
208 bio = &req->p.inline_bio;
209 bio_init(bio, NULL, req->inline_bvec,
210 ARRAY_SIZE(req->inline_bvec), req_op(rq));
212 bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
214 bio->bi_end_io = bio_put;
217 for_each_sg(req->sg, sg, req->sg_cnt, i) {
218 if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
219 sg->offset) < sg->length) {
220 nvmet_req_bio_put(req, bio);
225 blk_rq_bio_prep(rq, bio, req->sg_cnt);
230 static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
232 struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
233 struct request_queue *q = ctrl->admin_q;
234 struct nvme_ns *ns = NULL;
235 struct request *rq = NULL;
236 unsigned int timeout;
241 if (likely(req->sq->qid != 0)) {
242 u32 nsid = le32_to_cpu(req->cmd->common.nsid);
244 ns = nvme_find_get_ns(ctrl, nsid);
246 pr_err("failed to get passthru ns nsid:%u\n", nsid);
247 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
252 timeout = nvmet_req_subsys(req)->io_timeout;
254 timeout = nvmet_req_subsys(req)->admin_timeout;
257 rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
259 status = NVME_SC_INTERNAL;
262 nvme_init_request(rq, req->cmd);
265 rq->timeout = timeout;
268 ret = nvmet_passthru_map_sg(req, rq);
270 status = NVME_SC_INTERNAL;
276 * If there are effects for the command we are about to execute, or
277 * an end_req function we need to use nvme_execute_passthru_rq()
278 * synchronously in a work item seeing the end_req function and
279 * nvme_passthru_end() can't be called in the request done callback
280 * which is typically in interrupt context.
282 effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
283 if (req->p.use_workqueue || effects) {
284 INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
286 schedule_work(&req->p.work);
288 rq->end_io_data = req;
289 blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
298 blk_mq_free_request(rq);
303 nvmet_req_complete(req, status);
307 * We need to emulate set host behaviour to ensure that any requested
308 * behaviour of the target's host matches the requested behaviour
309 * of the device's host and fail otherwise.
311 static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
313 struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
314 struct nvme_feat_host_behavior *host;
315 u16 status = NVME_SC_INTERNAL;
318 host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
320 goto out_complete_req;
322 ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
323 host, sizeof(*host), NULL);
327 status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
331 if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
332 pr_warn("target host has requested different behaviour from the local host\n");
333 status = NVME_SC_INTERNAL;
339 nvmet_req_complete(req, status);
342 static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
344 req->p.use_workqueue = false;
345 req->execute = nvmet_passthru_execute_cmd;
346 return NVME_SC_SUCCESS;
349 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
351 /* Reject any commands with non-sgl flags set (ie. fused commands) */
352 if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
353 return NVME_SC_INVALID_FIELD;
355 switch (req->cmd->common.opcode) {
356 case nvme_cmd_resv_register:
357 case nvme_cmd_resv_report:
358 case nvme_cmd_resv_acquire:
359 case nvme_cmd_resv_release:
361 * Reservations cannot be supported properly because the
362 * underlying device has no way of differentiating different
363 * hosts that connect via fabrics. This could potentially be
364 * emulated in the future if regular targets grow support for
367 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
370 return nvmet_setup_passthru_command(req);
374 * Only features that are emulated or specifically allowed in the list are
375 * passed down to the controller. This function implements the allow list for
376 * both get and set features.
378 static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
380 switch (le32_to_cpu(req->cmd->features.fid)) {
381 case NVME_FEAT_ARBITRATION:
382 case NVME_FEAT_POWER_MGMT:
383 case NVME_FEAT_LBA_RANGE:
384 case NVME_FEAT_TEMP_THRESH:
385 case NVME_FEAT_ERR_RECOVERY:
386 case NVME_FEAT_VOLATILE_WC:
387 case NVME_FEAT_WRITE_ATOMIC:
388 case NVME_FEAT_AUTO_PST:
389 case NVME_FEAT_TIMESTAMP:
391 case NVME_FEAT_NOPSC:
393 case NVME_FEAT_PLM_CONFIG:
394 case NVME_FEAT_PLM_WINDOW:
395 case NVME_FEAT_HOST_BEHAVIOR:
396 case NVME_FEAT_SANITIZE:
397 case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
398 return nvmet_setup_passthru_command(req);
400 case NVME_FEAT_ASYNC_EVENT:
401 /* There is no support for forwarding ASYNC events */
402 case NVME_FEAT_IRQ_COALESCE:
403 case NVME_FEAT_IRQ_CONFIG:
404 /* The IRQ settings will not apply to the target controller */
405 case NVME_FEAT_HOST_MEM_BUF:
407 * Any HMB that's set will not be passed through and will
408 * not work as expected
410 case NVME_FEAT_SW_PROGRESS:
412 * The Pre-Boot Software Load Count doesn't make much
413 * sense for a target to export
415 case NVME_FEAT_RESV_MASK:
416 case NVME_FEAT_RESV_PERSIST:
417 /* No reservations, see nvmet_parse_passthru_io_cmd() */
419 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
423 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
425 /* Reject any commands with non-sgl flags set (ie. fused commands) */
426 if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
427 return NVME_SC_INVALID_FIELD;
430 * Passthru all vendor specific commands
432 if (req->cmd->common.opcode >= nvme_admin_vendor_start)
433 return nvmet_setup_passthru_command(req);
435 switch (req->cmd->common.opcode) {
436 case nvme_admin_async_event:
437 req->execute = nvmet_execute_async_event;
438 return NVME_SC_SUCCESS;
439 case nvme_admin_keep_alive:
441 * Most PCIe ctrls don't support keep alive cmd, we route keep
442 * alive to the non-passthru mode. In future please change this
443 * code when PCIe ctrls with keep alive support available.
445 req->execute = nvmet_execute_keep_alive;
446 return NVME_SC_SUCCESS;
447 case nvme_admin_set_features:
448 switch (le32_to_cpu(req->cmd->features.fid)) {
449 case NVME_FEAT_ASYNC_EVENT:
451 case NVME_FEAT_NUM_QUEUES:
452 case NVME_FEAT_HOST_ID:
453 req->execute = nvmet_execute_set_features;
454 return NVME_SC_SUCCESS;
455 case NVME_FEAT_HOST_BEHAVIOR:
456 req->execute = nvmet_passthru_set_host_behaviour;
457 return NVME_SC_SUCCESS;
459 return nvmet_passthru_get_set_features(req);
462 case nvme_admin_get_features:
463 switch (le32_to_cpu(req->cmd->features.fid)) {
464 case NVME_FEAT_ASYNC_EVENT:
466 case NVME_FEAT_NUM_QUEUES:
467 case NVME_FEAT_HOST_ID:
468 req->execute = nvmet_execute_get_features;
469 return NVME_SC_SUCCESS;
471 return nvmet_passthru_get_set_features(req);
474 case nvme_admin_identify:
475 switch (req->cmd->identify.cns) {
476 case NVME_ID_CNS_CTRL:
477 req->execute = nvmet_passthru_execute_cmd;
478 req->p.use_workqueue = true;
479 return NVME_SC_SUCCESS;
480 case NVME_ID_CNS_CS_CTRL:
481 switch (req->cmd->identify.csi) {
483 req->execute = nvmet_passthru_execute_cmd;
484 req->p.use_workqueue = true;
485 return NVME_SC_SUCCESS;
487 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
489 req->execute = nvmet_passthru_execute_cmd;
490 req->p.use_workqueue = true;
491 return NVME_SC_SUCCESS;
492 case NVME_ID_CNS_CS_NS:
493 switch (req->cmd->identify.csi) {
495 req->execute = nvmet_passthru_execute_cmd;
496 req->p.use_workqueue = true;
497 return NVME_SC_SUCCESS;
499 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
501 return nvmet_setup_passthru_command(req);
503 case nvme_admin_get_log_page:
504 return nvmet_setup_passthru_command(req);
506 /* Reject commands not in the allowlist above */
507 return nvmet_report_invalid_opcode(req);
511 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
513 struct nvme_ctrl *ctrl;
518 mutex_lock(&subsys->lock);
519 if (!subsys->passthru_ctrl_path)
521 if (subsys->passthru_ctrl)
524 if (subsys->nr_namespaces) {
525 pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
529 file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
535 ctrl = nvme_ctrl_from_file(file);
537 pr_err("failed to open nvme controller %s\n",
538 subsys->passthru_ctrl_path);
543 old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
545 if (xa_is_err(old)) {
553 subsys->passthru_ctrl = ctrl;
554 subsys->ver = ctrl->vs;
556 if (subsys->ver < NVME_VS(1, 2, 1)) {
557 pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
558 NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
559 NVME_TERTIARY(subsys->ver));
560 subsys->ver = NVME_VS(1, 2, 1);
563 __module_get(subsys->passthru_ctrl->ops->module);
567 filp_close(file, NULL);
569 mutex_unlock(&subsys->lock);
573 static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
575 if (subsys->passthru_ctrl) {
576 xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
577 module_put(subsys->passthru_ctrl->ops->module);
578 nvme_put_ctrl(subsys->passthru_ctrl);
580 subsys->passthru_ctrl = NULL;
581 subsys->ver = NVMET_DEFAULT_VS;
584 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
586 mutex_lock(&subsys->lock);
587 __nvmet_passthru_ctrl_disable(subsys);
588 mutex_unlock(&subsys->lock);
591 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
593 mutex_lock(&subsys->lock);
594 __nvmet_passthru_ctrl_disable(subsys);
595 mutex_unlock(&subsys->lock);
596 kfree(subsys->passthru_ctrl_path);