Merge tag 'for-5.14/drivers-2021-06-29' of git://git.kernel.dk/linux-block
[sfrench/cifs-2.6.git] / drivers / nvme / target / core.c
index b20b8d0a114416b3e9643dcc7bbf65ea6a01e910..ac7210a3ea1c54c734c83cef69d9c6604191490f 100644 (file)
@@ -16,6 +16,7 @@
 #include "nvmet.h"
 
 struct workqueue_struct *buffered_io_wq;
+struct workqueue_struct *zbd_wq;
 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
 static DEFINE_IDA(cntlid_ida);
 
@@ -43,43 +44,34 @@ DECLARE_RWSEM(nvmet_ana_sem);
 
 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
 {
-       u16 status;
-
        switch (errno) {
        case 0:
-               status = NVME_SC_SUCCESS;
-               break;
+               return NVME_SC_SUCCESS;
        case -ENOSPC:
                req->error_loc = offsetof(struct nvme_rw_command, length);
-               status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
-               break;
+               return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
        case -EREMOTEIO:
                req->error_loc = offsetof(struct nvme_rw_command, slba);
-               status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
-               break;
+               return  NVME_SC_LBA_RANGE | NVME_SC_DNR;
        case -EOPNOTSUPP:
                req->error_loc = offsetof(struct nvme_common_command, opcode);
                switch (req->cmd->common.opcode) {
                case nvme_cmd_dsm:
                case nvme_cmd_write_zeroes:
-                       status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
-                       break;
+                       return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
                default:
-                       status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+                       return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
                }
                break;
        case -ENODATA:
                req->error_loc = offsetof(struct nvme_rw_command, nsid);
-               status = NVME_SC_ACCESS_DENIED;
-               break;
+               return NVME_SC_ACCESS_DENIED;
        case -EIO:
                fallthrough;
        default:
                req->error_loc = offsetof(struct nvme_common_command, opcode);
-               status = NVME_SC_INTERNAL | NVME_SC_DNR;
+               return NVME_SC_INTERNAL | NVME_SC_DNR;
        }
-
-       return status;
 }
 
 u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
@@ -122,11 +114,11 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
        return 0;
 }
 
-static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
+static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
 {
-       unsigned long nsid = 0;
        struct nvmet_ns *cur;
        unsigned long idx;
+       u32 nsid = 0;
 
        xa_for_each(&subsys->namespaces, idx, cur)
                nsid = cur->nsid;
@@ -141,14 +133,13 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
 
 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
 {
-       u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
        struct nvmet_req *req;
 
        mutex_lock(&ctrl->lock);
        while (ctrl->nr_async_event_cmds) {
                req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
                mutex_unlock(&ctrl->lock);
-               nvmet_req_complete(req, status);
+               nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
                mutex_lock(&ctrl->lock);
        }
        mutex_unlock(&ctrl->lock);
@@ -412,7 +403,6 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
        pr_debug("ctrl %d start keep-alive timer for %d secs\n",
                ctrl->cntlid, ctrl->kato);
 
-       INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
        schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
 }
 
@@ -693,6 +683,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
 
        uuid_gen(&ns->uuid);
        ns->buffered_io = false;
+       ns->csi = NVME_CSI_NVM;
 
        return ns;
 }
@@ -895,10 +886,18 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
                return ret;
        }
 
-       if (req->ns->file)
-               return nvmet_file_parse_io_cmd(req);
-
-       return nvmet_bdev_parse_io_cmd(req);
+       switch (req->ns->csi) {
+       case NVME_CSI_NVM:
+               if (req->ns->file)
+                       return nvmet_file_parse_io_cmd(req);
+               return nvmet_bdev_parse_io_cmd(req);
+       case NVME_CSI_ZNS:
+               if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+                       return nvmet_bdev_zns_parse_io_cmd(req);
+               return NVME_SC_INVALID_IO_CMD_SET;
+       default:
+               return NVME_SC_INVALID_IO_CMD_SET;
+       }
 }
 
 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
@@ -1119,6 +1118,17 @@ static inline u8 nvmet_cc_iocqes(u32 cc)
        return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
 }
 
+static inline bool nvmet_css_supported(u8 cc_css)
+{
+       switch (cc_css <<= NVME_CC_CSS_SHIFT) {
+       case NVME_CC_CSS_NVM:
+       case NVME_CC_CSS_CSI:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
 {
        lockdep_assert_held(&ctrl->lock);
@@ -1138,7 +1148,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
 
        if (nvmet_cc_mps(ctrl->cc) != 0 ||
            nvmet_cc_ams(ctrl->cc) != 0 ||
-           nvmet_cc_css(ctrl->cc) != 0) {
+           !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
                ctrl->csts = NVME_CSTS_CFS;
                return;
        }
@@ -1189,6 +1199,8 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
 {
        /* command sets supported: NVMe command set: */
        ctrl->cap = (1ULL << 37);
+       /* Controller supports one or more I/O Command Sets */
+       ctrl->cap |= (1ULL << 43);
        /* CC.EN timeout in 500msec units: */
        ctrl->cap |= (15ULL << 24);
        /* maximum queue entries supported: */
@@ -1358,6 +1370,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
        INIT_LIST_HEAD(&ctrl->async_events);
        INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
        INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
+       INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
 
        memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
        memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@@ -1499,6 +1512,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
                enum nvme_subsys_type type)
 {
        struct nvmet_subsys *subsys;
+       char serial[NVMET_SN_MAX_SIZE / 2];
+       int ret;
 
        subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
        if (!subsys)
@@ -1506,7 +1521,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
 
        subsys->ver = NVMET_DEFAULT_VS;
        /* generate a random serial number as our controllers are ephemeral: */
-       get_random_bytes(&subsys->serial, sizeof(subsys->serial));
+       get_random_bytes(&serial, sizeof(serial));
+       bin2hex(subsys->serial, &serial, sizeof(serial));
+
+       subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
+       if (!subsys->model_number) {
+               ret = -ENOMEM;
+               goto free_subsys;
+       }
 
        switch (type) {
        case NVME_NQN_NVME:
@@ -1517,15 +1539,15 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
                break;
        default:
                pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
-               kfree(subsys);
-               return ERR_PTR(-EINVAL);
+               ret = -EINVAL;
+               goto free_mn;
        }
        subsys->type = type;
        subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
                        GFP_KERNEL);
        if (!subsys->subsysnqn) {
-               kfree(subsys);
-               return ERR_PTR(-ENOMEM);
+               ret = -ENOMEM;
+               goto free_mn;
        }
        subsys->cntlid_min = NVME_CNTLID_MIN;
        subsys->cntlid_max = NVME_CNTLID_MAX;
@@ -1537,6 +1559,12 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
        INIT_LIST_HEAD(&subsys->hosts);
 
        return subsys;
+
+free_mn:
+       kfree(subsys->model_number);
+free_subsys:
+       kfree(subsys);
+       return ERR_PTR(ret);
 }
 
 static void nvmet_subsys_free(struct kref *ref)
@@ -1575,11 +1603,15 @@ static int __init nvmet_init(void)
 
        nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
 
+       zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
+       if (!zbd_wq)
+               return -ENOMEM;
+
        buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
                        WQ_MEM_RECLAIM, 0);
        if (!buffered_io_wq) {
                error = -ENOMEM;
-               goto out;
+               goto out_free_zbd_work_queue;
        }
 
        error = nvmet_init_discovery();
@@ -1595,7 +1627,8 @@ out_exit_discovery:
        nvmet_exit_discovery();
 out_free_work_queue:
        destroy_workqueue(buffered_io_wq);
-out:
+out_free_zbd_work_queue:
+       destroy_workqueue(zbd_wq);
        return error;
 }
 
@@ -1605,6 +1638,7 @@ static void __exit nvmet_exit(void)
        nvmet_exit_discovery();
        ida_destroy(&cntlid_ida);
        destroy_workqueue(buffered_io_wq);
+       destroy_workqueue(zbd_wq);
 
        BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
        BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);