1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
11 #include <generated/utsrelease.h>
12 #include <asm/unaligned.h>
15 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
17 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
20 len += le16_to_cpu(cmd->get_log_page.numdl);
21 /* NUMD is a 0's based value */
28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
30 switch (cdw10 & 0xff) {
31 case NVME_FEAT_HOST_ID:
32 return sizeof(req->sq->ctrl->hostid);
38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
40 return le64_to_cpu(cmd->get_log_page.lpo);
43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
45 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
50 struct nvmet_ctrl *ctrl = req->sq->ctrl;
56 spin_lock_irqsave(&ctrl->error_lock, flags);
57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
59 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 sizeof(struct nvme_error_slot)))
65 slot = NVMET_ERROR_LOG_SLOTS - 1;
68 offset += sizeof(struct nvme_error_slot);
70 spin_unlock_irqrestore(&ctrl->error_lock, flags);
71 nvmet_req_complete(req, 0);
74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
75 struct nvme_smart_log *slog)
77 u64 host_reads, host_writes, data_units_read, data_units_written;
80 status = nvmet_req_find_ns(req);
84 /* we don't have the right data for file backed ns */
86 return NVME_SC_SUCCESS;
88 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
90 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
91 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
93 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
95 put_unaligned_le64(host_reads, &slog->host_reads[0]);
96 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
97 put_unaligned_le64(host_writes, &slog->host_writes[0]);
98 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
100 return NVME_SC_SUCCESS;
103 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
104 struct nvme_smart_log *slog)
106 u64 host_reads = 0, host_writes = 0;
107 u64 data_units_read = 0, data_units_written = 0;
109 struct nvmet_ctrl *ctrl;
112 ctrl = req->sq->ctrl;
113 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
114 /* we don't have the right data for file backed ns */
117 host_reads += part_stat_read(ns->bdev, ios[READ]);
118 data_units_read += DIV_ROUND_UP(
119 part_stat_read(ns->bdev, sectors[READ]), 1000);
120 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
121 data_units_written += DIV_ROUND_UP(
122 part_stat_read(ns->bdev, sectors[WRITE]), 1000);
125 put_unaligned_le64(host_reads, &slog->host_reads[0]);
126 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
127 put_unaligned_le64(host_writes, &slog->host_writes[0]);
128 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
130 return NVME_SC_SUCCESS;
133 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
135 struct nvme_smart_log *log;
136 u16 status = NVME_SC_INTERNAL;
139 if (req->transfer_len != sizeof(*log))
142 log = kzalloc(sizeof(*log), GFP_KERNEL);
146 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
147 status = nvmet_get_smart_log_all(req, log);
149 status = nvmet_get_smart_log_nsid(req, log);
153 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
154 put_unaligned_le64(req->sq->ctrl->err_counter,
155 &log->num_err_log_entries);
156 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
158 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
162 nvmet_req_complete(req, status);
165 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
167 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
168 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
169 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
170 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
171 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
172 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
173 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
175 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
176 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
177 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
178 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
179 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
182 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
184 log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0);
185 log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0);
186 log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0);
189 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
191 struct nvme_effects_log *log;
192 u16 status = NVME_SC_SUCCESS;
194 log = kzalloc(sizeof(*log), GFP_KERNEL);
196 status = NVME_SC_INTERNAL;
200 switch (req->cmd->get_log_page.csi) {
202 nvmet_get_cmd_effects_nvm(log);
205 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
206 status = NVME_SC_INVALID_IO_CMD_SET;
209 nvmet_get_cmd_effects_nvm(log);
210 nvmet_get_cmd_effects_zns(log);
213 status = NVME_SC_INVALID_LOG_PAGE;
217 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
221 nvmet_req_complete(req, status);
224 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
226 struct nvmet_ctrl *ctrl = req->sq->ctrl;
227 u16 status = NVME_SC_INTERNAL;
230 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
233 mutex_lock(&ctrl->lock);
234 if (ctrl->nr_changed_ns == U32_MAX)
235 len = sizeof(__le32);
237 len = ctrl->nr_changed_ns * sizeof(__le32);
238 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
240 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
241 ctrl->nr_changed_ns = 0;
242 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
243 mutex_unlock(&ctrl->lock);
245 nvmet_req_complete(req, status);
248 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
249 struct nvme_ana_group_desc *desc)
251 struct nvmet_ctrl *ctrl = req->sq->ctrl;
256 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
257 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
258 if (ns->anagrpid == grpid)
259 desc->nsids[count++] = cpu_to_le32(ns->nsid);
262 desc->grpid = cpu_to_le32(grpid);
263 desc->nnsids = cpu_to_le32(count);
264 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
265 desc->state = req->port->ana_state[grpid];
266 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
267 return struct_size(desc, nsids, count);
270 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
272 struct nvme_ana_rsp_hdr hdr = { 0, };
273 struct nvme_ana_group_desc *desc;
274 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
280 status = NVME_SC_INTERNAL;
281 desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
286 down_read(&nvmet_ana_sem);
287 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
288 if (!nvmet_ana_group_enabled[grpid])
290 len = nvmet_format_ana_group(req, grpid, desc);
291 status = nvmet_copy_to_sgl(req, offset, desc, len);
297 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
298 if (nvmet_ana_group_enabled[grpid])
302 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
303 hdr.ngrps = cpu_to_le16(ngrps);
304 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
305 up_read(&nvmet_ana_sem);
309 /* copy the header last once we know the number of groups */
310 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
312 nvmet_req_complete(req, status);
315 static void nvmet_execute_get_log_page(struct nvmet_req *req)
317 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
320 switch (req->cmd->get_log_page.lid) {
322 return nvmet_execute_get_log_page_error(req);
324 return nvmet_execute_get_log_page_smart(req);
325 case NVME_LOG_FW_SLOT:
327 * We only support a single firmware slot which always is
328 * active, so we can zero out the whole firmware slot log and
329 * still claim to fully implement this mandatory log page.
331 return nvmet_execute_get_log_page_noop(req);
332 case NVME_LOG_CHANGED_NS:
333 return nvmet_execute_get_log_changed_ns(req);
334 case NVME_LOG_CMD_EFFECTS:
335 return nvmet_execute_get_log_cmd_effects_ns(req);
337 return nvmet_execute_get_log_page_ana(req);
339 pr_debug("unhandled lid %d on qid %d\n",
340 req->cmd->get_log_page.lid, req->sq->qid);
341 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
342 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
345 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
347 struct nvmet_ctrl *ctrl = req->sq->ctrl;
348 struct nvmet_subsys *subsys = ctrl->subsys;
349 struct nvme_id_ctrl *id;
350 u32 cmd_capsule_size;
353 if (!subsys->subsys_discovered) {
354 mutex_lock(&subsys->lock);
355 subsys->subsys_discovered = true;
356 mutex_unlock(&subsys->lock);
359 id = kzalloc(sizeof(*id), GFP_KERNEL);
361 status = NVME_SC_INTERNAL;
365 /* XXX: figure out how to assign real vendors IDs. */
369 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
370 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
371 strlen(subsys->model_number), ' ');
372 memcpy_and_pad(id->fr, sizeof(id->fr),
373 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
377 if (nvmet_is_disc_subsys(ctrl->subsys))
378 id->cntrltype = NVME_CTRL_DISC;
380 id->cntrltype = NVME_CTRL_IO;
383 * XXX: figure out how we can assign a IEEE OUI, but until then
384 * the safest is to leave it as zeroes.
387 /* we support multiple ports, multiples hosts and ANA: */
388 id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
391 /* Limit MDTS according to transport capability */
392 if (ctrl->ops->get_mdts)
393 id->mdts = ctrl->ops->get_mdts(ctrl);
397 id->cntlid = cpu_to_le16(ctrl->cntlid);
398 id->ver = cpu_to_le32(ctrl->subsys->ver);
400 /* XXX: figure out what to do about RTD3R/RTD3 */
401 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
402 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
403 NVME_CTRL_ATTR_TBKAS);
408 * We don't really have a practical limit on the number of abort
409 * comands. But we don't do anything useful for abort either, so
410 * no point in allowing more abort commands than the spec requires.
414 id->aerl = NVMET_ASYNC_EVENTS - 1;
416 /* first slot is read-only, only one slot supported */
417 id->frmw = (1 << 0) | (1 << 1);
418 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
419 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
422 /* We support keep-alive timeout in granularity of seconds */
423 id->kas = cpu_to_le16(NVMET_KAS);
425 id->sqes = (0x6 << 4) | 0x6;
426 id->cqes = (0x4 << 4) | 0x4;
428 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
429 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
431 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
432 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
433 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
434 NVME_CTRL_ONCS_WRITE_ZEROES);
436 /* XXX: don't report vwc if the underlying device is write through */
437 id->vwc = NVME_CTRL_VWC_PRESENT;
440 * We can't support atomic writes bigger than a LBA without support
441 * from the backend device.
446 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
447 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
448 id->sgls |= cpu_to_le32(1 << 2);
449 if (req->port->inline_data_size)
450 id->sgls |= cpu_to_le32(1 << 20);
452 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
455 * Max command capsule size is sqe + in-capsule data size.
456 * Disable in-capsule data for Metadata capable controllers.
458 cmd_capsule_size = sizeof(struct nvme_command);
459 if (!ctrl->pi_support)
460 cmd_capsule_size += req->port->inline_data_size;
461 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
463 /* Max response capsule size is cqe */
464 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
466 id->msdbd = ctrl->ops->msdbd;
468 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
469 id->anatt = 10; /* random value */
470 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
471 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
474 * Meh, we don't really support any power state. Fake up the same
475 * values that qemu does.
477 id->psd[0].max_power = cpu_to_le16(0x9c4);
478 id->psd[0].entry_lat = cpu_to_le32(0x10);
479 id->psd[0].exit_lat = cpu_to_le32(0x4);
481 id->nwpc = 1 << 0; /* write protect and no write protect */
483 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
487 nvmet_req_complete(req, status);
490 static void nvmet_execute_identify_ns(struct nvmet_req *req)
492 struct nvme_id_ns *id;
495 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
496 req->error_loc = offsetof(struct nvme_identify, nsid);
497 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
501 id = kzalloc(sizeof(*id), GFP_KERNEL);
503 status = NVME_SC_INTERNAL;
507 /* return an all zeroed buffer if we can't find an active namespace */
508 status = nvmet_req_find_ns(req);
514 if (nvmet_ns_revalidate(req->ns)) {
515 mutex_lock(&req->ns->subsys->lock);
516 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
517 mutex_unlock(&req->ns->subsys->lock);
521 * nuse = ncap = nsze isn't always true, but we have no way to find
522 * that out from the underlying device.
524 id->ncap = id->nsze =
525 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
526 switch (req->port->ana_state[req->ns->anagrpid]) {
527 case NVME_ANA_INACCESSIBLE:
528 case NVME_ANA_PERSISTENT_LOSS:
536 nvmet_bdev_set_limits(req->ns->bdev, id);
539 * We just provide a single LBA format that matches what the
540 * underlying device reports.
546 * Our namespace might always be shared. Not just with other
547 * controllers, but also with any other user of the block device.
549 id->nmic = NVME_NS_NMIC_SHARED;
550 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
552 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
554 id->lbaf[0].ds = req->ns->blksize_shift;
556 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
557 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
558 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
559 NVME_NS_DPC_PI_TYPE3;
560 id->mc = NVME_MC_EXTENDED_LBA;
561 id->dps = req->ns->pi_type;
562 id->flbas = NVME_NS_FLBAS_META_EXT;
563 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
566 if (req->ns->readonly)
567 id->nsattr |= (1 << 0);
570 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
574 nvmet_req_complete(req, status);
577 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
579 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
580 struct nvmet_ctrl *ctrl = req->sq->ctrl;
583 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
588 list = kzalloc(buf_size, GFP_KERNEL);
590 status = NVME_SC_INTERNAL;
594 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
595 if (ns->nsid <= min_nsid)
597 list[i++] = cpu_to_le32(ns->nsid);
598 if (i == buf_size / sizeof(__le32))
602 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
606 nvmet_req_complete(req, status);
609 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
610 void *id, off_t *off)
612 struct nvme_ns_id_desc desc = {
618 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
621 *off += sizeof(desc);
623 status = nvmet_copy_to_sgl(req, *off, id, len);
631 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
636 status = nvmet_req_find_ns(req);
640 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
641 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
643 &req->ns->uuid, &off);
647 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
648 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
650 &req->ns->nguid, &off);
655 status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
657 &req->ns->csi, &off);
661 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
662 off) != NVME_IDENTIFY_DATA_SIZE - off)
663 status = NVME_SC_INTERNAL | NVME_SC_DNR;
666 nvmet_req_complete(req, status);
669 static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
671 switch (req->cmd->identify.csi) {
673 nvmet_execute_identify_desclist(req);
676 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
677 nvmet_execute_identify_desclist(req);
686 static void nvmet_execute_identify(struct nvmet_req *req)
688 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
691 switch (req->cmd->identify.cns) {
693 switch (req->cmd->identify.csi) {
695 return nvmet_execute_identify_ns(req);
700 case NVME_ID_CNS_CS_NS:
701 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
702 switch (req->cmd->identify.csi) {
704 return nvmet_execute_identify_cns_cs_ns(req);
710 case NVME_ID_CNS_CTRL:
711 switch (req->cmd->identify.csi) {
713 return nvmet_execute_identify_ctrl(req);
716 case NVME_ID_CNS_CS_CTRL:
717 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
718 switch (req->cmd->identify.csi) {
720 return nvmet_execute_identify_cns_cs_ctrl(req);
726 case NVME_ID_CNS_NS_ACTIVE_LIST:
727 switch (req->cmd->identify.csi) {
729 return nvmet_execute_identify_nslist(req);
734 case NVME_ID_CNS_NS_DESC_LIST:
735 if (nvmet_handle_identify_desclist(req) == true)
740 nvmet_req_cns_error_complete(req);
744 * A "minimum viable" abort implementation: the command is mandatory in the
745 * spec, but we are not required to do any useful work. We couldn't really
746 * do a useful abort, so don't bother even with waiting for the command
747 * to be exectuted and return immediately telling the command to abort
750 static void nvmet_execute_abort(struct nvmet_req *req)
752 if (!nvmet_check_transfer_len(req, 0))
754 nvmet_set_result(req, 1);
755 nvmet_req_complete(req, 0);
758 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
763 status = nvmet_file_flush(req);
765 status = nvmet_bdev_flush(req);
768 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
772 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
774 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
775 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
778 status = nvmet_req_find_ns(req);
782 mutex_lock(&subsys->lock);
783 switch (write_protect) {
784 case NVME_NS_WRITE_PROTECT:
785 req->ns->readonly = true;
786 status = nvmet_write_protect_flush_sync(req);
788 req->ns->readonly = false;
790 case NVME_NS_NO_WRITE_PROTECT:
791 req->ns->readonly = false;
799 nvmet_ns_changed(subsys, req->ns->nsid);
800 mutex_unlock(&subsys->lock);
804 u16 nvmet_set_feat_kato(struct nvmet_req *req)
806 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
808 nvmet_stop_keep_alive_timer(req->sq->ctrl);
809 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
810 nvmet_start_keep_alive_timer(req->sq->ctrl);
812 nvmet_set_result(req, req->sq->ctrl->kato);
817 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
819 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
822 req->error_loc = offsetof(struct nvme_common_command, cdw11);
823 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
826 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
827 nvmet_set_result(req, val32);
832 void nvmet_execute_set_features(struct nvmet_req *req)
834 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
835 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
836 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
841 if (!nvmet_check_transfer_len(req, 0))
844 switch (cdw10 & 0xff) {
845 case NVME_FEAT_NUM_QUEUES:
846 ncqr = (cdw11 >> 16) & 0xffff;
847 nsqr = cdw11 & 0xffff;
848 if (ncqr == 0xffff || nsqr == 0xffff) {
849 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
852 nvmet_set_result(req,
853 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
856 status = nvmet_set_feat_kato(req);
858 case NVME_FEAT_ASYNC_EVENT:
859 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
861 case NVME_FEAT_HOST_ID:
862 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
864 case NVME_FEAT_WRITE_PROTECT:
865 status = nvmet_set_feat_write_protect(req);
868 req->error_loc = offsetof(struct nvme_common_command, cdw10);
869 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
873 nvmet_req_complete(req, status);
876 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
878 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
881 result = nvmet_req_find_ns(req);
885 mutex_lock(&subsys->lock);
886 if (req->ns->readonly == true)
887 result = NVME_NS_WRITE_PROTECT;
889 result = NVME_NS_NO_WRITE_PROTECT;
890 nvmet_set_result(req, result);
891 mutex_unlock(&subsys->lock);
896 void nvmet_get_feat_kato(struct nvmet_req *req)
898 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
901 void nvmet_get_feat_async_event(struct nvmet_req *req)
903 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
906 void nvmet_execute_get_features(struct nvmet_req *req)
908 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
909 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
912 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
915 switch (cdw10 & 0xff) {
917 * These features are mandatory in the spec, but we don't
918 * have a useful way to implement them. We'll eventually
919 * need to come up with some fake values for these.
922 case NVME_FEAT_ARBITRATION:
924 case NVME_FEAT_POWER_MGMT:
926 case NVME_FEAT_TEMP_THRESH:
928 case NVME_FEAT_ERR_RECOVERY:
930 case NVME_FEAT_IRQ_COALESCE:
932 case NVME_FEAT_IRQ_CONFIG:
934 case NVME_FEAT_WRITE_ATOMIC:
937 case NVME_FEAT_ASYNC_EVENT:
938 nvmet_get_feat_async_event(req);
940 case NVME_FEAT_VOLATILE_WC:
941 nvmet_set_result(req, 1);
943 case NVME_FEAT_NUM_QUEUES:
944 nvmet_set_result(req,
945 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
948 nvmet_get_feat_kato(req);
950 case NVME_FEAT_HOST_ID:
951 /* need 128-bit host identifier flag */
952 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
954 offsetof(struct nvme_common_command, cdw11);
955 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
959 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
960 sizeof(req->sq->ctrl->hostid));
962 case NVME_FEAT_WRITE_PROTECT:
963 status = nvmet_get_feat_write_protect(req);
967 offsetof(struct nvme_common_command, cdw10);
968 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
972 nvmet_req_complete(req, status);
975 void nvmet_execute_async_event(struct nvmet_req *req)
977 struct nvmet_ctrl *ctrl = req->sq->ctrl;
979 if (!nvmet_check_transfer_len(req, 0))
982 mutex_lock(&ctrl->lock);
983 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
984 mutex_unlock(&ctrl->lock);
985 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
988 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
989 mutex_unlock(&ctrl->lock);
991 queue_work(nvmet_wq, &ctrl->async_event_work);
994 void nvmet_execute_keep_alive(struct nvmet_req *req)
996 struct nvmet_ctrl *ctrl = req->sq->ctrl;
999 if (!nvmet_check_transfer_len(req, 0))
1003 status = NVME_SC_KA_TIMEOUT_INVALID;
1007 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1008 ctrl->cntlid, ctrl->kato);
1009 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1011 nvmet_req_complete(req, status);
1014 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1016 struct nvme_command *cmd = req->cmd;
1019 if (nvme_is_fabrics(cmd))
1020 return nvmet_parse_fabrics_cmd(req);
1021 if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1022 return nvmet_parse_discovery_cmd(req);
1024 ret = nvmet_check_ctrl_status(req);
1028 if (nvmet_is_passthru_req(req))
1029 return nvmet_parse_passthru_admin_cmd(req);
1031 switch (cmd->common.opcode) {
1032 case nvme_admin_get_log_page:
1033 req->execute = nvmet_execute_get_log_page;
1035 case nvme_admin_identify:
1036 req->execute = nvmet_execute_identify;
1038 case nvme_admin_abort_cmd:
1039 req->execute = nvmet_execute_abort;
1041 case nvme_admin_set_features:
1042 req->execute = nvmet_execute_set_features;
1044 case nvme_admin_get_features:
1045 req->execute = nvmet_execute_get_features;
1047 case nvme_admin_async_event:
1048 req->execute = nvmet_execute_async_event;
1050 case nvme_admin_keep_alive:
1051 req->execute = nvmet_execute_keep_alive;
1054 return nvmet_report_invalid_opcode(req);