1 // SPDX-License-Identifier: GPL-2.0
3 * Discovery service for the NVMe over Fabrics target.
4 * Copyright (C) 2016 Intel Corporation. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/slab.h>
8 #include <generated/utsrelease.h>
11 struct nvmet_subsys *nvmet_disc_subsys;
13 static u64 nvmet_genctr;
15 static void __nvmet_disc_changed(struct nvmet_port *port,
16 struct nvmet_ctrl *ctrl)
18 if (ctrl->port != port)
21 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
24 nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
25 NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
28 void nvmet_port_disc_changed(struct nvmet_port *port,
29 struct nvmet_subsys *subsys)
31 struct nvmet_ctrl *ctrl;
33 lockdep_assert_held(&nvmet_config_sem);
36 mutex_lock(&nvmet_disc_subsys->lock);
37 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
38 if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
41 __nvmet_disc_changed(port, ctrl);
43 mutex_unlock(&nvmet_disc_subsys->lock);
45 /* If transport can signal change, notify transport */
46 if (port->tr_ops && port->tr_ops->discovery_chg)
47 port->tr_ops->discovery_chg(port);
50 static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
51 struct nvmet_subsys *subsys,
52 struct nvmet_host *host)
54 struct nvmet_ctrl *ctrl;
56 mutex_lock(&nvmet_disc_subsys->lock);
57 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
58 if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
61 __nvmet_disc_changed(port, ctrl);
63 mutex_unlock(&nvmet_disc_subsys->lock);
66 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
67 struct nvmet_host *host)
69 struct nvmet_port *port;
70 struct nvmet_subsys_link *s;
72 lockdep_assert_held(&nvmet_config_sem);
75 list_for_each_entry(port, nvmet_ports, global_entry)
76 list_for_each_entry(s, &port->subsystems, entry) {
77 if (s->subsys != subsys)
79 __nvmet_subsys_disc_changed(port, subsys, host);
83 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
85 down_write(&nvmet_config_sem);
86 if (list_empty(&port->entry)) {
87 list_add_tail(&port->entry, &parent->referrals);
89 nvmet_port_disc_changed(parent, NULL);
91 up_write(&nvmet_config_sem);
94 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port)
96 down_write(&nvmet_config_sem);
97 if (!list_empty(&port->entry)) {
98 port->enabled = false;
99 list_del_init(&port->entry);
100 nvmet_port_disc_changed(parent, NULL);
102 up_write(&nvmet_config_sem);
105 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
106 struct nvmet_port *port, char *subsys_nqn, char *traddr,
109 struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
111 e->trtype = port->disc_addr.trtype;
112 e->adrfam = port->disc_addr.adrfam;
113 e->treq = port->disc_addr.treq;
114 e->portid = port->disc_addr.portid;
115 /* we support only dynamic controllers */
116 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
117 e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
119 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
120 memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
121 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
122 strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
126 * nvmet_set_disc_traddr - set a correct discovery log entry traddr
128 * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses
129 * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply
130 * must not contain that "any" IP address. If the transport implements
131 * .disc_traddr, use it. this callback will set the discovery traddr
132 * from the req->port address in case the port in question listens
135 static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
138 if (req->ops->disc_traddr)
139 req->ops->disc_traddr(req, port, traddr);
141 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
144 static size_t discovery_log_entries(struct nvmet_req *req)
146 struct nvmet_ctrl *ctrl = req->sq->ctrl;
147 struct nvmet_subsys_link *p;
148 struct nvmet_port *r;
151 list_for_each_entry(p, &req->port->subsystems, entry) {
152 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
156 list_for_each_entry(r, &req->port->referrals, entry)
161 static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
163 const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
164 struct nvmet_ctrl *ctrl = req->sq->ctrl;
165 struct nvmf_disc_rsp_page_hdr *hdr;
166 u64 offset = nvmet_get_log_page_offset(req->cmd);
167 size_t data_len = nvmet_get_log_page_len(req->cmd);
169 struct nvmet_subsys_link *p;
170 struct nvmet_port *r;
174 char traddr[NVMF_TRADDR_SIZE];
176 if (!nvmet_check_transfer_len(req, data_len))
179 if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
181 offsetof(struct nvme_get_log_page_command, lid);
182 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
186 /* Spec requires dword aligned offsets */
189 offsetof(struct nvme_get_log_page_command, lpo);
190 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
195 * Make sure we're passing at least a buffer of response header size.
196 * If host provided data len is less than the header size, only the
197 * number of bytes requested by host will be sent to host.
199 down_read(&nvmet_config_sem);
200 alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
201 buffer = kzalloc(alloc_len, GFP_KERNEL);
203 up_read(&nvmet_config_sem);
204 status = NVME_SC_INTERNAL;
209 nvmet_set_disc_traddr(req, req->port, traddr);
211 nvmet_format_discovery_entry(hdr, req->port,
212 nvmet_disc_subsys->subsysnqn,
213 traddr, NVME_NQN_CURR, numrec);
216 list_for_each_entry(p, &req->port->subsystems, entry) {
217 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
220 nvmet_format_discovery_entry(hdr, req->port,
221 p->subsys->subsysnqn, traddr,
222 NVME_NQN_NVME, numrec);
226 list_for_each_entry(r, &req->port->referrals, entry) {
227 nvmet_format_discovery_entry(hdr, r,
228 NVME_DISC_SUBSYS_NAME,
230 NVME_NQN_DISC, numrec);
234 hdr->genctr = cpu_to_le64(nvmet_genctr);
235 hdr->numrec = cpu_to_le64(numrec);
236 hdr->recfmt = cpu_to_le16(0);
238 nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
240 up_read(&nvmet_config_sem);
242 status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
245 nvmet_req_complete(req, status);
248 static void nvmet_execute_disc_identify(struct nvmet_req *req)
250 struct nvmet_ctrl *ctrl = req->sq->ctrl;
251 struct nvme_id_ctrl *id;
254 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
257 if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
258 req->error_loc = offsetof(struct nvme_identify, cns);
259 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
263 id = kzalloc(sizeof(*id), GFP_KERNEL);
265 status = NVME_SC_INTERNAL;
269 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
270 memset(id->fr, ' ', sizeof(id->fr));
271 memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number,
272 strlen(ctrl->subsys->model_number), ' ');
273 memcpy_and_pad(id->fr, sizeof(id->fr),
274 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
276 id->cntrltype = NVME_CTRL_DISC;
278 /* no limit on data transfer sizes for now */
280 id->cntlid = cpu_to_le16(ctrl->cntlid);
281 id->ver = cpu_to_le32(ctrl->subsys->ver);
284 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
285 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
287 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
288 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
289 id->sgls |= cpu_to_le32(1 << 2);
290 if (req->port->inline_data_size)
291 id->sgls |= cpu_to_le32(1 << 20);
293 id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
295 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
297 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
301 nvmet_req_complete(req, status);
304 static void nvmet_execute_disc_set_features(struct nvmet_req *req)
306 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
309 if (!nvmet_check_transfer_len(req, 0))
312 switch (cdw10 & 0xff) {
314 stat = nvmet_set_feat_kato(req);
316 case NVME_FEAT_ASYNC_EVENT:
317 stat = nvmet_set_feat_async_event(req,
318 NVMET_DISC_AEN_CFG_OPTIONAL);
322 offsetof(struct nvme_common_command, cdw10);
323 stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
327 nvmet_req_complete(req, stat);
330 static void nvmet_execute_disc_get_features(struct nvmet_req *req)
332 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
335 if (!nvmet_check_transfer_len(req, 0))
338 switch (cdw10 & 0xff) {
340 nvmet_get_feat_kato(req);
342 case NVME_FEAT_ASYNC_EVENT:
343 nvmet_get_feat_async_event(req);
347 offsetof(struct nvme_common_command, cdw10);
348 stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
352 nvmet_req_complete(req, stat);
355 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
357 struct nvme_command *cmd = req->cmd;
359 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
360 pr_err("got cmd %d while not ready\n",
363 offsetof(struct nvme_common_command, opcode);
364 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
367 switch (cmd->common.opcode) {
368 case nvme_admin_set_features:
369 req->execute = nvmet_execute_disc_set_features;
371 case nvme_admin_get_features:
372 req->execute = nvmet_execute_disc_get_features;
374 case nvme_admin_async_event:
375 req->execute = nvmet_execute_async_event;
377 case nvme_admin_keep_alive:
378 req->execute = nvmet_execute_keep_alive;
380 case nvme_admin_get_log_page:
381 req->execute = nvmet_execute_disc_get_log_page;
383 case nvme_admin_identify:
384 req->execute = nvmet_execute_disc_identify;
387 pr_debug("unhandled cmd %d\n", cmd->common.opcode);
388 req->error_loc = offsetof(struct nvme_common_command, opcode);
389 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
394 int __init nvmet_init_discovery(void)
397 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_CURR);
398 return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
401 void nvmet_exit_discovery(void)
403 nvmet_subsys_put(nvmet_disc_subsys);