Merge tag 'nvme-6.9-2024-03-21' of git://git.infradead.org/nvme into block-6.9
[sfrench/cifs-2.6.git] / drivers / nvme / host / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVM Express device driver
4  * Copyright (c) 2011-2014, Intel Corporation.
5  */
6
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/compat.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/hdreg.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/pr.h>
20 #include <linux/ptrace.h>
21 #include <linux/nvme_ioctl.h>
22 #include <linux/pm_qos.h>
23 #include <linux/ratelimit.h>
24 #include <asm/unaligned.h>
25
26 #include "nvme.h"
27 #include "fabrics.h"
28 #include <linux/nvme-auth.h>
29
30 #define CREATE_TRACE_POINTS
31 #include "trace.h"
32
33 #define NVME_MINORS             (1U << MINORBITS)
34
35 struct nvme_ns_info {
36         struct nvme_ns_ids ids;
37         u32 nsid;
38         __le32 anagrpid;
39         bool is_shared;
40         bool is_readonly;
41         bool is_ready;
42         bool is_removed;
43 };
44
45 unsigned int admin_timeout = 60;
46 module_param(admin_timeout, uint, 0644);
47 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
48 EXPORT_SYMBOL_GPL(admin_timeout);
49
50 unsigned int nvme_io_timeout = 30;
51 module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
52 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
53 EXPORT_SYMBOL_GPL(nvme_io_timeout);
54
55 static unsigned char shutdown_timeout = 5;
56 module_param(shutdown_timeout, byte, 0644);
57 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
58
59 static u8 nvme_max_retries = 5;
60 module_param_named(max_retries, nvme_max_retries, byte, 0644);
61 MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
62
63 static unsigned long default_ps_max_latency_us = 100000;
64 module_param(default_ps_max_latency_us, ulong, 0644);
65 MODULE_PARM_DESC(default_ps_max_latency_us,
66                  "max power saving latency for new devices; use PM QOS to change per device");
67
68 static bool force_apst;
69 module_param(force_apst, bool, 0644);
70 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
71
72 static unsigned long apst_primary_timeout_ms = 100;
73 module_param(apst_primary_timeout_ms, ulong, 0644);
74 MODULE_PARM_DESC(apst_primary_timeout_ms,
75         "primary APST timeout in ms");
76
77 static unsigned long apst_secondary_timeout_ms = 2000;
78 module_param(apst_secondary_timeout_ms, ulong, 0644);
79 MODULE_PARM_DESC(apst_secondary_timeout_ms,
80         "secondary APST timeout in ms");
81
82 static unsigned long apst_primary_latency_tol_us = 15000;
83 module_param(apst_primary_latency_tol_us, ulong, 0644);
84 MODULE_PARM_DESC(apst_primary_latency_tol_us,
85         "primary APST latency tolerance in us");
86
87 static unsigned long apst_secondary_latency_tol_us = 100000;
88 module_param(apst_secondary_latency_tol_us, ulong, 0644);
89 MODULE_PARM_DESC(apst_secondary_latency_tol_us,
90         "secondary APST latency tolerance in us");
91
92 /*
93  * nvme_wq - hosts nvme related works that are not reset or delete
94  * nvme_reset_wq - hosts nvme reset works
95  * nvme_delete_wq - hosts nvme delete works
96  *
97  * nvme_wq will host works such as scan, aen handling, fw activation,
98  * keep-alive, periodic reconnects etc. nvme_reset_wq
99  * runs reset works which also flush works hosted on nvme_wq for
100  * serialization purposes. nvme_delete_wq host controller deletion
101  * works which flush reset works for serialization.
102  */
103 struct workqueue_struct *nvme_wq;
104 EXPORT_SYMBOL_GPL(nvme_wq);
105
106 struct workqueue_struct *nvme_reset_wq;
107 EXPORT_SYMBOL_GPL(nvme_reset_wq);
108
109 struct workqueue_struct *nvme_delete_wq;
110 EXPORT_SYMBOL_GPL(nvme_delete_wq);
111
112 static LIST_HEAD(nvme_subsystems);
113 static DEFINE_MUTEX(nvme_subsystems_lock);
114
115 static DEFINE_IDA(nvme_instance_ida);
116 static dev_t nvme_ctrl_base_chr_devt;
117 static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env);
118 static const struct class nvme_class = {
119         .name = "nvme",
120         .dev_uevent = nvme_class_uevent,
121 };
122
123 static const struct class nvme_subsys_class = {
124         .name = "nvme-subsystem",
125 };
126
127 static DEFINE_IDA(nvme_ns_chr_minor_ida);
128 static dev_t nvme_ns_chr_devt;
129 static const struct class nvme_ns_chr_class = {
130         .name = "nvme-generic",
131 };
132
133 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
134 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
135                                            unsigned nsid);
136 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
137                                    struct nvme_command *cmd);
138
139 void nvme_queue_scan(struct nvme_ctrl *ctrl)
140 {
141         /*
142          * Only new queue scan work when admin and IO queues are both alive
143          */
144         if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
145                 queue_work(nvme_wq, &ctrl->scan_work);
146 }
147
148 /*
149  * Use this function to proceed with scheduling reset_work for a controller
150  * that had previously been set to the resetting state. This is intended for
151  * code paths that can't be interrupted by other reset attempts. A hot removal
152  * may prevent this from succeeding.
153  */
154 int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
155 {
156         if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING)
157                 return -EBUSY;
158         if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
159                 return -EBUSY;
160         return 0;
161 }
162 EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
163
164 static void nvme_failfast_work(struct work_struct *work)
165 {
166         struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
167                         struct nvme_ctrl, failfast_work);
168
169         if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING)
170                 return;
171
172         set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
173         dev_info(ctrl->device, "failfast expired\n");
174         nvme_kick_requeue_lists(ctrl);
175 }
176
177 static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
178 {
179         if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
180                 return;
181
182         schedule_delayed_work(&ctrl->failfast_work,
183                               ctrl->opts->fast_io_fail_tmo * HZ);
184 }
185
186 static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
187 {
188         if (!ctrl->opts)
189                 return;
190
191         cancel_delayed_work_sync(&ctrl->failfast_work);
192         clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
193 }
194
195
196 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
197 {
198         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
199                 return -EBUSY;
200         if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
201                 return -EBUSY;
202         return 0;
203 }
204 EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
205
206 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
207 {
208         int ret;
209
210         ret = nvme_reset_ctrl(ctrl);
211         if (!ret) {
212                 flush_work(&ctrl->reset_work);
213                 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
214                         ret = -ENETRESET;
215         }
216
217         return ret;
218 }
219
220 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
221 {
222         dev_info(ctrl->device,
223                  "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
224
225         flush_work(&ctrl->reset_work);
226         nvme_stop_ctrl(ctrl);
227         nvme_remove_namespaces(ctrl);
228         ctrl->ops->delete_ctrl(ctrl);
229         nvme_uninit_ctrl(ctrl);
230 }
231
232 static void nvme_delete_ctrl_work(struct work_struct *work)
233 {
234         struct nvme_ctrl *ctrl =
235                 container_of(work, struct nvme_ctrl, delete_work);
236
237         nvme_do_delete_ctrl(ctrl);
238 }
239
240 int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
241 {
242         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
243                 return -EBUSY;
244         if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
245                 return -EBUSY;
246         return 0;
247 }
248 EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
249
250 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
251 {
252         /*
253          * Keep a reference until nvme_do_delete_ctrl() complete,
254          * since ->delete_ctrl can free the controller.
255          */
256         nvme_get_ctrl(ctrl);
257         if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
258                 nvme_do_delete_ctrl(ctrl);
259         nvme_put_ctrl(ctrl);
260 }
261
262 static blk_status_t nvme_error_status(u16 status)
263 {
264         switch (status & 0x7ff) {
265         case NVME_SC_SUCCESS:
266                 return BLK_STS_OK;
267         case NVME_SC_CAP_EXCEEDED:
268                 return BLK_STS_NOSPC;
269         case NVME_SC_LBA_RANGE:
270         case NVME_SC_CMD_INTERRUPTED:
271         case NVME_SC_NS_NOT_READY:
272                 return BLK_STS_TARGET;
273         case NVME_SC_BAD_ATTRIBUTES:
274         case NVME_SC_ONCS_NOT_SUPPORTED:
275         case NVME_SC_INVALID_OPCODE:
276         case NVME_SC_INVALID_FIELD:
277         case NVME_SC_INVALID_NS:
278                 return BLK_STS_NOTSUPP;
279         case NVME_SC_WRITE_FAULT:
280         case NVME_SC_READ_ERROR:
281         case NVME_SC_UNWRITTEN_BLOCK:
282         case NVME_SC_ACCESS_DENIED:
283         case NVME_SC_READ_ONLY:
284         case NVME_SC_COMPARE_FAILED:
285                 return BLK_STS_MEDIUM;
286         case NVME_SC_GUARD_CHECK:
287         case NVME_SC_APPTAG_CHECK:
288         case NVME_SC_REFTAG_CHECK:
289         case NVME_SC_INVALID_PI:
290                 return BLK_STS_PROTECTION;
291         case NVME_SC_RESERVATION_CONFLICT:
292                 return BLK_STS_RESV_CONFLICT;
293         case NVME_SC_HOST_PATH_ERROR:
294                 return BLK_STS_TRANSPORT;
295         case NVME_SC_ZONE_TOO_MANY_ACTIVE:
296                 return BLK_STS_ZONE_ACTIVE_RESOURCE;
297         case NVME_SC_ZONE_TOO_MANY_OPEN:
298                 return BLK_STS_ZONE_OPEN_RESOURCE;
299         default:
300                 return BLK_STS_IOERR;
301         }
302 }
303
304 static void nvme_retry_req(struct request *req)
305 {
306         unsigned long delay = 0;
307         u16 crd;
308
309         /* The mask and shift result must be <= 3 */
310         crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
311         if (crd)
312                 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
313
314         nvme_req(req)->retries++;
315         blk_mq_requeue_request(req, false);
316         blk_mq_delay_kick_requeue_list(req->q, delay);
317 }
318
319 static void nvme_log_error(struct request *req)
320 {
321         struct nvme_ns *ns = req->q->queuedata;
322         struct nvme_request *nr = nvme_req(req);
323
324         if (ns) {
325                 pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %u blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
326                        ns->disk ? ns->disk->disk_name : "?",
327                        nvme_get_opcode_str(nr->cmd->common.opcode),
328                        nr->cmd->common.opcode,
329                        nvme_sect_to_lba(ns->head, blk_rq_pos(req)),
330                        blk_rq_bytes(req) >> ns->head->lba_shift,
331                        nvme_get_error_status_str(nr->status),
332                        nr->status >> 8 & 7,     /* Status Code Type */
333                        nr->status & 0xff,       /* Status Code */
334                        nr->status & NVME_SC_MORE ? "MORE " : "",
335                        nr->status & NVME_SC_DNR  ? "DNR "  : "");
336                 return;
337         }
338
339         pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
340                            dev_name(nr->ctrl->device),
341                            nvme_get_admin_opcode_str(nr->cmd->common.opcode),
342                            nr->cmd->common.opcode,
343                            nvme_get_error_status_str(nr->status),
344                            nr->status >> 8 & 7, /* Status Code Type */
345                            nr->status & 0xff,   /* Status Code */
346                            nr->status & NVME_SC_MORE ? "MORE " : "",
347                            nr->status & NVME_SC_DNR  ? "DNR "  : "");
348 }
349
350 static void nvme_log_err_passthru(struct request *req)
351 {
352         struct nvme_ns *ns = req->q->queuedata;
353         struct nvme_request *nr = nvme_req(req);
354
355         pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s"
356                 "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n",
357                 ns ? ns->disk->disk_name : dev_name(nr->ctrl->device),
358                 ns ? nvme_get_opcode_str(nr->cmd->common.opcode) :
359                      nvme_get_admin_opcode_str(nr->cmd->common.opcode),
360                 nr->cmd->common.opcode,
361                 nvme_get_error_status_str(nr->status),
362                 nr->status >> 8 & 7,    /* Status Code Type */
363                 nr->status & 0xff,      /* Status Code */
364                 nr->status & NVME_SC_MORE ? "MORE " : "",
365                 nr->status & NVME_SC_DNR  ? "DNR "  : "",
366                 nr->cmd->common.cdw10,
367                 nr->cmd->common.cdw11,
368                 nr->cmd->common.cdw12,
369                 nr->cmd->common.cdw13,
370                 nr->cmd->common.cdw14,
371                 nr->cmd->common.cdw14);
372 }
373
374 enum nvme_disposition {
375         COMPLETE,
376         RETRY,
377         FAILOVER,
378         AUTHENTICATE,
379 };
380
381 static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
382 {
383         if (likely(nvme_req(req)->status == 0))
384                 return COMPLETE;
385
386         if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
387                 return AUTHENTICATE;
388
389         if (blk_noretry_request(req) ||
390             (nvme_req(req)->status & NVME_SC_DNR) ||
391             nvme_req(req)->retries >= nvme_max_retries)
392                 return COMPLETE;
393
394         if (req->cmd_flags & REQ_NVME_MPATH) {
395                 if (nvme_is_path_error(nvme_req(req)->status) ||
396                     blk_queue_dying(req->q))
397                         return FAILOVER;
398         } else {
399                 if (blk_queue_dying(req->q))
400                         return COMPLETE;
401         }
402
403         return RETRY;
404 }
405
406 static inline void nvme_end_req_zoned(struct request *req)
407 {
408         if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
409             req_op(req) == REQ_OP_ZONE_APPEND) {
410                 struct nvme_ns *ns = req->q->queuedata;
411
412                 req->__sector = nvme_lba_to_sect(ns->head,
413                         le64_to_cpu(nvme_req(req)->result.u64));
414         }
415 }
416
417 static inline void nvme_end_req(struct request *req)
418 {
419         blk_status_t status = nvme_error_status(nvme_req(req)->status);
420
421         if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
422                 if (blk_rq_is_passthrough(req))
423                         nvme_log_err_passthru(req);
424                 else
425                         nvme_log_error(req);
426         }
427         nvme_end_req_zoned(req);
428         nvme_trace_bio_complete(req);
429         if (req->cmd_flags & REQ_NVME_MPATH)
430                 nvme_mpath_end_request(req);
431         blk_mq_end_request(req, status);
432 }
433
434 void nvme_complete_rq(struct request *req)
435 {
436         struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
437
438         trace_nvme_complete_rq(req);
439         nvme_cleanup_cmd(req);
440
441         /*
442          * Completions of long-running commands should not be able to
443          * defer sending of periodic keep alives, since the controller
444          * may have completed processing such commands a long time ago
445          * (arbitrarily close to command submission time).
446          * req->deadline - req->timeout is the command submission time
447          * in jiffies.
448          */
449         if (ctrl->kas &&
450             req->deadline - req->timeout >= ctrl->ka_last_check_time)
451                 ctrl->comp_seen = true;
452
453         switch (nvme_decide_disposition(req)) {
454         case COMPLETE:
455                 nvme_end_req(req);
456                 return;
457         case RETRY:
458                 nvme_retry_req(req);
459                 return;
460         case FAILOVER:
461                 nvme_failover_req(req);
462                 return;
463         case AUTHENTICATE:
464 #ifdef CONFIG_NVME_HOST_AUTH
465                 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
466                 nvme_retry_req(req);
467 #else
468                 nvme_end_req(req);
469 #endif
470                 return;
471         }
472 }
473 EXPORT_SYMBOL_GPL(nvme_complete_rq);
474
475 void nvme_complete_batch_req(struct request *req)
476 {
477         trace_nvme_complete_rq(req);
478         nvme_cleanup_cmd(req);
479         nvme_end_req_zoned(req);
480 }
481 EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
482
483 /*
484  * Called to unwind from ->queue_rq on a failed command submission so that the
485  * multipathing code gets called to potentially failover to another path.
486  * The caller needs to unwind all transport specific resource allocations and
487  * must return propagate the return value.
488  */
489 blk_status_t nvme_host_path_error(struct request *req)
490 {
491         nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
492         blk_mq_set_request_complete(req);
493         nvme_complete_rq(req);
494         return BLK_STS_OK;
495 }
496 EXPORT_SYMBOL_GPL(nvme_host_path_error);
497
498 bool nvme_cancel_request(struct request *req, void *data)
499 {
500         dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
501                                 "Cancelling I/O %d", req->tag);
502
503         /* don't abort one completed or idle request */
504         if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT)
505                 return true;
506
507         nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
508         nvme_req(req)->flags |= NVME_REQ_CANCELLED;
509         blk_mq_complete_request(req);
510         return true;
511 }
512 EXPORT_SYMBOL_GPL(nvme_cancel_request);
513
514 void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
515 {
516         if (ctrl->tagset) {
517                 blk_mq_tagset_busy_iter(ctrl->tagset,
518                                 nvme_cancel_request, ctrl);
519                 blk_mq_tagset_wait_completed_request(ctrl->tagset);
520         }
521 }
522 EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
523
524 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
525 {
526         if (ctrl->admin_tagset) {
527                 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
528                                 nvme_cancel_request, ctrl);
529                 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
530         }
531 }
532 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
533
534 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
535                 enum nvme_ctrl_state new_state)
536 {
537         enum nvme_ctrl_state old_state;
538         unsigned long flags;
539         bool changed = false;
540
541         spin_lock_irqsave(&ctrl->lock, flags);
542
543         old_state = nvme_ctrl_state(ctrl);
544         switch (new_state) {
545         case NVME_CTRL_LIVE:
546                 switch (old_state) {
547                 case NVME_CTRL_NEW:
548                 case NVME_CTRL_RESETTING:
549                 case NVME_CTRL_CONNECTING:
550                         changed = true;
551                         fallthrough;
552                 default:
553                         break;
554                 }
555                 break;
556         case NVME_CTRL_RESETTING:
557                 switch (old_state) {
558                 case NVME_CTRL_NEW:
559                 case NVME_CTRL_LIVE:
560                         changed = true;
561                         fallthrough;
562                 default:
563                         break;
564                 }
565                 break;
566         case NVME_CTRL_CONNECTING:
567                 switch (old_state) {
568                 case NVME_CTRL_NEW:
569                 case NVME_CTRL_RESETTING:
570                         changed = true;
571                         fallthrough;
572                 default:
573                         break;
574                 }
575                 break;
576         case NVME_CTRL_DELETING:
577                 switch (old_state) {
578                 case NVME_CTRL_LIVE:
579                 case NVME_CTRL_RESETTING:
580                 case NVME_CTRL_CONNECTING:
581                         changed = true;
582                         fallthrough;
583                 default:
584                         break;
585                 }
586                 break;
587         case NVME_CTRL_DELETING_NOIO:
588                 switch (old_state) {
589                 case NVME_CTRL_DELETING:
590                 case NVME_CTRL_DEAD:
591                         changed = true;
592                         fallthrough;
593                 default:
594                         break;
595                 }
596                 break;
597         case NVME_CTRL_DEAD:
598                 switch (old_state) {
599                 case NVME_CTRL_DELETING:
600                         changed = true;
601                         fallthrough;
602                 default:
603                         break;
604                 }
605                 break;
606         default:
607                 break;
608         }
609
610         if (changed) {
611                 WRITE_ONCE(ctrl->state, new_state);
612                 wake_up_all(&ctrl->state_wq);
613         }
614
615         spin_unlock_irqrestore(&ctrl->lock, flags);
616         if (!changed)
617                 return false;
618
619         if (new_state == NVME_CTRL_LIVE) {
620                 if (old_state == NVME_CTRL_CONNECTING)
621                         nvme_stop_failfast_work(ctrl);
622                 nvme_kick_requeue_lists(ctrl);
623         } else if (new_state == NVME_CTRL_CONNECTING &&
624                 old_state == NVME_CTRL_RESETTING) {
625                 nvme_start_failfast_work(ctrl);
626         }
627         return changed;
628 }
629 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
630
631 /*
632  * Returns true for sink states that can't ever transition back to live.
633  */
634 static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
635 {
636         switch (nvme_ctrl_state(ctrl)) {
637         case NVME_CTRL_NEW:
638         case NVME_CTRL_LIVE:
639         case NVME_CTRL_RESETTING:
640         case NVME_CTRL_CONNECTING:
641                 return false;
642         case NVME_CTRL_DELETING:
643         case NVME_CTRL_DELETING_NOIO:
644         case NVME_CTRL_DEAD:
645                 return true;
646         default:
647                 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
648                 return true;
649         }
650 }
651
652 /*
653  * Waits for the controller state to be resetting, or returns false if it is
654  * not possible to ever transition to that state.
655  */
656 bool nvme_wait_reset(struct nvme_ctrl *ctrl)
657 {
658         wait_event(ctrl->state_wq,
659                    nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
660                    nvme_state_terminal(ctrl));
661         return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING;
662 }
663 EXPORT_SYMBOL_GPL(nvme_wait_reset);
664
665 static void nvme_free_ns_head(struct kref *ref)
666 {
667         struct nvme_ns_head *head =
668                 container_of(ref, struct nvme_ns_head, ref);
669
670         nvme_mpath_remove_disk(head);
671         ida_free(&head->subsys->ns_ida, head->instance);
672         cleanup_srcu_struct(&head->srcu);
673         nvme_put_subsystem(head->subsys);
674         kfree(head);
675 }
676
677 bool nvme_tryget_ns_head(struct nvme_ns_head *head)
678 {
679         return kref_get_unless_zero(&head->ref);
680 }
681
682 void nvme_put_ns_head(struct nvme_ns_head *head)
683 {
684         kref_put(&head->ref, nvme_free_ns_head);
685 }
686
687 static void nvme_free_ns(struct kref *kref)
688 {
689         struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
690
691         put_disk(ns->disk);
692         nvme_put_ns_head(ns->head);
693         nvme_put_ctrl(ns->ctrl);
694         kfree(ns);
695 }
696
697 static inline bool nvme_get_ns(struct nvme_ns *ns)
698 {
699         return kref_get_unless_zero(&ns->kref);
700 }
701
702 void nvme_put_ns(struct nvme_ns *ns)
703 {
704         kref_put(&ns->kref, nvme_free_ns);
705 }
706 EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
707
708 static inline void nvme_clear_nvme_request(struct request *req)
709 {
710         nvme_req(req)->status = 0;
711         nvme_req(req)->retries = 0;
712         nvme_req(req)->flags = 0;
713         req->rq_flags |= RQF_DONTPREP;
714 }
715
716 /* initialize a passthrough request */
717 void nvme_init_request(struct request *req, struct nvme_command *cmd)
718 {
719         struct nvme_request *nr = nvme_req(req);
720         bool logging_enabled;
721
722         if (req->q->queuedata) {
723                 struct nvme_ns *ns = req->q->disk->private_data;
724
725                 logging_enabled = ns->head->passthru_err_log_enabled;
726                 req->timeout = NVME_IO_TIMEOUT;
727         } else { /* no queuedata implies admin queue */
728                 logging_enabled = nr->ctrl->passthru_err_log_enabled;
729                 req->timeout = NVME_ADMIN_TIMEOUT;
730         }
731
732         if (!logging_enabled)
733                 req->rq_flags |= RQF_QUIET;
734
735         /* passthru commands should let the driver set the SGL flags */
736         cmd->common.flags &= ~NVME_CMD_SGL_ALL;
737
738         req->cmd_flags |= REQ_FAILFAST_DRIVER;
739         if (req->mq_hctx->type == HCTX_TYPE_POLL)
740                 req->cmd_flags |= REQ_POLLED;
741         nvme_clear_nvme_request(req);
742         memcpy(nr->cmd, cmd, sizeof(*cmd));
743 }
744 EXPORT_SYMBOL_GPL(nvme_init_request);
745
746 /*
747  * For something we're not in a state to send to the device the default action
748  * is to busy it and retry it after the controller state is recovered.  However,
749  * if the controller is deleting or if anything is marked for failfast or
750  * nvme multipath it is immediately failed.
751  *
752  * Note: commands used to initialize the controller will be marked for failfast.
753  * Note: nvme cli/ioctl commands are marked for failfast.
754  */
755 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
756                 struct request *rq)
757 {
758         enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
759
760         if (state != NVME_CTRL_DELETING_NOIO &&
761             state != NVME_CTRL_DELETING &&
762             state != NVME_CTRL_DEAD &&
763             !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
764             !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
765                 return BLK_STS_RESOURCE;
766         return nvme_host_path_error(rq);
767 }
768 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
769
770 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
771                 bool queue_live, enum nvme_ctrl_state state)
772 {
773         struct nvme_request *req = nvme_req(rq);
774
775         /*
776          * currently we have a problem sending passthru commands
777          * on the admin_q if the controller is not LIVE because we can't
778          * make sure that they are going out after the admin connect,
779          * controller enable and/or other commands in the initialization
780          * sequence. until the controller will be LIVE, fail with
781          * BLK_STS_RESOURCE so that they will be rescheduled.
782          */
783         if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
784                 return false;
785
786         if (ctrl->ops->flags & NVME_F_FABRICS) {
787                 /*
788                  * Only allow commands on a live queue, except for the connect
789                  * command, which is require to set the queue live in the
790                  * appropinquate states.
791                  */
792                 switch (state) {
793                 case NVME_CTRL_CONNECTING:
794                         if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
795                             (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
796                              req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
797                              req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
798                                 return true;
799                         break;
800                 default:
801                         break;
802                 case NVME_CTRL_DEAD:
803                         return false;
804                 }
805         }
806
807         return queue_live;
808 }
809 EXPORT_SYMBOL_GPL(__nvme_check_ready);
810
811 static inline void nvme_setup_flush(struct nvme_ns *ns,
812                 struct nvme_command *cmnd)
813 {
814         memset(cmnd, 0, sizeof(*cmnd));
815         cmnd->common.opcode = nvme_cmd_flush;
816         cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
817 }
818
819 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
820                 struct nvme_command *cmnd)
821 {
822         unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
823         struct nvme_dsm_range *range;
824         struct bio *bio;
825
826         /*
827          * Some devices do not consider the DSM 'Number of Ranges' field when
828          * determining how much data to DMA. Always allocate memory for maximum
829          * number of segments to prevent device reading beyond end of buffer.
830          */
831         static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
832
833         range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
834         if (!range) {
835                 /*
836                  * If we fail allocation our range, fallback to the controller
837                  * discard page. If that's also busy, it's safe to return
838                  * busy, as we know we can make progress once that's freed.
839                  */
840                 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
841                         return BLK_STS_RESOURCE;
842
843                 range = page_address(ns->ctrl->discard_page);
844         }
845
846         if (queue_max_discard_segments(req->q) == 1) {
847                 u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req));
848                 u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9);
849
850                 range[0].cattr = cpu_to_le32(0);
851                 range[0].nlb = cpu_to_le32(nlb);
852                 range[0].slba = cpu_to_le64(slba);
853                 n = 1;
854         } else {
855                 __rq_for_each_bio(bio, req) {
856                         u64 slba = nvme_sect_to_lba(ns->head,
857                                                     bio->bi_iter.bi_sector);
858                         u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift;
859
860                         if (n < segments) {
861                                 range[n].cattr = cpu_to_le32(0);
862                                 range[n].nlb = cpu_to_le32(nlb);
863                                 range[n].slba = cpu_to_le64(slba);
864                         }
865                         n++;
866                 }
867         }
868
869         if (WARN_ON_ONCE(n != segments)) {
870                 if (virt_to_page(range) == ns->ctrl->discard_page)
871                         clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
872                 else
873                         kfree(range);
874                 return BLK_STS_IOERR;
875         }
876
877         memset(cmnd, 0, sizeof(*cmnd));
878         cmnd->dsm.opcode = nvme_cmd_dsm;
879         cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
880         cmnd->dsm.nr = cpu_to_le32(segments - 1);
881         cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
882
883         bvec_set_virt(&req->special_vec, range, alloc_size);
884         req->rq_flags |= RQF_SPECIAL_PAYLOAD;
885
886         return BLK_STS_OK;
887 }
888
889 static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
890                               struct request *req)
891 {
892         u32 upper, lower;
893         u64 ref48;
894
895         /* both rw and write zeroes share the same reftag format */
896         switch (ns->head->guard_type) {
897         case NVME_NVM_NS_16B_GUARD:
898                 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
899                 break;
900         case NVME_NVM_NS_64B_GUARD:
901                 ref48 = ext_pi_ref_tag(req);
902                 lower = lower_32_bits(ref48);
903                 upper = upper_32_bits(ref48);
904
905                 cmnd->rw.reftag = cpu_to_le32(lower);
906                 cmnd->rw.cdw3 = cpu_to_le32(upper);
907                 break;
908         default:
909                 break;
910         }
911 }
912
913 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
914                 struct request *req, struct nvme_command *cmnd)
915 {
916         memset(cmnd, 0, sizeof(*cmnd));
917
918         if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
919                 return nvme_setup_discard(ns, req, cmnd);
920
921         cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
922         cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
923         cmnd->write_zeroes.slba =
924                 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
925         cmnd->write_zeroes.length =
926                 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
927
928         if (!(req->cmd_flags & REQ_NOUNMAP) &&
929             (ns->head->features & NVME_NS_DEAC))
930                 cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC);
931
932         if (nvme_ns_has_pi(ns->head)) {
933                 cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
934
935                 switch (ns->head->pi_type) {
936                 case NVME_NS_DPS_PI_TYPE1:
937                 case NVME_NS_DPS_PI_TYPE2:
938                         nvme_set_ref_tag(ns, cmnd, req);
939                         break;
940                 }
941         }
942
943         return BLK_STS_OK;
944 }
945
946 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
947                 struct request *req, struct nvme_command *cmnd,
948                 enum nvme_opcode op)
949 {
950         u16 control = 0;
951         u32 dsmgmt = 0;
952
953         if (req->cmd_flags & REQ_FUA)
954                 control |= NVME_RW_FUA;
955         if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
956                 control |= NVME_RW_LR;
957
958         if (req->cmd_flags & REQ_RAHEAD)
959                 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
960
961         cmnd->rw.opcode = op;
962         cmnd->rw.flags = 0;
963         cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
964         cmnd->rw.cdw2 = 0;
965         cmnd->rw.cdw3 = 0;
966         cmnd->rw.metadata = 0;
967         cmnd->rw.slba =
968                 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
969         cmnd->rw.length =
970                 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
971         cmnd->rw.reftag = 0;
972         cmnd->rw.apptag = 0;
973         cmnd->rw.appmask = 0;
974
975         if (ns->head->ms) {
976                 /*
977                  * If formated with metadata, the block layer always provides a
978                  * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
979                  * we enable the PRACT bit for protection information or set the
980                  * namespace capacity to zero to prevent any I/O.
981                  */
982                 if (!blk_integrity_rq(req)) {
983                         if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
984                                 return BLK_STS_NOTSUPP;
985                         control |= NVME_RW_PRINFO_PRACT;
986                 }
987
988                 switch (ns->head->pi_type) {
989                 case NVME_NS_DPS_PI_TYPE3:
990                         control |= NVME_RW_PRINFO_PRCHK_GUARD;
991                         break;
992                 case NVME_NS_DPS_PI_TYPE1:
993                 case NVME_NS_DPS_PI_TYPE2:
994                         control |= NVME_RW_PRINFO_PRCHK_GUARD |
995                                         NVME_RW_PRINFO_PRCHK_REF;
996                         if (op == nvme_cmd_zone_append)
997                                 control |= NVME_RW_APPEND_PIREMAP;
998                         nvme_set_ref_tag(ns, cmnd, req);
999                         break;
1000                 }
1001         }
1002
1003         cmnd->rw.control = cpu_to_le16(control);
1004         cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
1005         return 0;
1006 }
1007
1008 void nvme_cleanup_cmd(struct request *req)
1009 {
1010         if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
1011                 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
1012
1013                 if (req->special_vec.bv_page == ctrl->discard_page)
1014                         clear_bit_unlock(0, &ctrl->discard_page_busy);
1015                 else
1016                         kfree(bvec_virt(&req->special_vec));
1017         }
1018 }
1019 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
1020
1021 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
1022 {
1023         struct nvme_command *cmd = nvme_req(req)->cmd;
1024         blk_status_t ret = BLK_STS_OK;
1025
1026         if (!(req->rq_flags & RQF_DONTPREP))
1027                 nvme_clear_nvme_request(req);
1028
1029         switch (req_op(req)) {
1030         case REQ_OP_DRV_IN:
1031         case REQ_OP_DRV_OUT:
1032                 /* these are setup prior to execution in nvme_init_request() */
1033                 break;
1034         case REQ_OP_FLUSH:
1035                 nvme_setup_flush(ns, cmd);
1036                 break;
1037         case REQ_OP_ZONE_RESET_ALL:
1038         case REQ_OP_ZONE_RESET:
1039                 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
1040                 break;
1041         case REQ_OP_ZONE_OPEN:
1042                 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
1043                 break;
1044         case REQ_OP_ZONE_CLOSE:
1045                 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
1046                 break;
1047         case REQ_OP_ZONE_FINISH:
1048                 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
1049                 break;
1050         case REQ_OP_WRITE_ZEROES:
1051                 ret = nvme_setup_write_zeroes(ns, req, cmd);
1052                 break;
1053         case REQ_OP_DISCARD:
1054                 ret = nvme_setup_discard(ns, req, cmd);
1055                 break;
1056         case REQ_OP_READ:
1057                 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
1058                 break;
1059         case REQ_OP_WRITE:
1060                 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
1061                 break;
1062         case REQ_OP_ZONE_APPEND:
1063                 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
1064                 break;
1065         default:
1066                 WARN_ON_ONCE(1);
1067                 return BLK_STS_IOERR;
1068         }
1069
1070         cmd->common.command_id = nvme_cid(req);
1071         trace_nvme_setup_cmd(req, cmd);
1072         return ret;
1073 }
1074 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
1075
1076 /*
1077  * Return values:
1078  * 0:  success
1079  * >0: nvme controller's cqe status response
1080  * <0: kernel error in lieu of controller response
1081  */
1082 int nvme_execute_rq(struct request *rq, bool at_head)
1083 {
1084         blk_status_t status;
1085
1086         status = blk_execute_rq(rq, at_head);
1087         if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
1088                 return -EINTR;
1089         if (nvme_req(rq)->status)
1090                 return nvme_req(rq)->status;
1091         return blk_status_to_errno(status);
1092 }
1093 EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
1094
1095 /*
1096  * Returns 0 on success.  If the result is negative, it's a Linux error code;
1097  * if the result is positive, it's an NVM Express status code
1098  */
1099 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1100                 union nvme_result *result, void *buffer, unsigned bufflen,
1101                 int qid, nvme_submit_flags_t flags)
1102 {
1103         struct request *req;
1104         int ret;
1105         blk_mq_req_flags_t blk_flags = 0;
1106
1107         if (flags & NVME_SUBMIT_NOWAIT)
1108                 blk_flags |= BLK_MQ_REQ_NOWAIT;
1109         if (flags & NVME_SUBMIT_RESERVED)
1110                 blk_flags |= BLK_MQ_REQ_RESERVED;
1111         if (qid == NVME_QID_ANY)
1112                 req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags);
1113         else
1114                 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags,
1115                                                 qid - 1);
1116
1117         if (IS_ERR(req))
1118                 return PTR_ERR(req);
1119         nvme_init_request(req, cmd);
1120         if (flags & NVME_SUBMIT_RETRY)
1121                 req->cmd_flags &= ~REQ_FAILFAST_DRIVER;
1122
1123         if (buffer && bufflen) {
1124                 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
1125                 if (ret)
1126                         goto out;
1127         }
1128
1129         ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD);
1130         if (result && ret >= 0)
1131                 *result = nvme_req(req)->result;
1132  out:
1133         blk_mq_free_request(req);
1134         return ret;
1135 }
1136 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
1137
1138 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1139                 void *buffer, unsigned bufflen)
1140 {
1141         return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
1142                         NVME_QID_ANY, 0);
1143 }
1144 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
1145
1146 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1147 {
1148         u32 effects = 0;
1149
1150         if (ns) {
1151                 effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
1152                 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1153                         dev_warn_once(ctrl->device,
1154                                 "IO command:%02x has unusual effects:%08x\n",
1155                                 opcode, effects);
1156
1157                 /*
1158                  * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues,
1159                  * which would deadlock when done on an I/O command.  Note that
1160                  * We already warn about an unusual effect above.
1161                  */
1162                 effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
1163         } else {
1164                 effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1165
1166                 /* Ignore execution restrictions if any relaxation bits are set */
1167                 if (effects & NVME_CMD_EFFECTS_CSER_MASK)
1168                         effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
1169         }
1170
1171         return effects;
1172 }
1173 EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
1174
1175 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1176 {
1177         u32 effects = nvme_command_effects(ctrl, ns, opcode);
1178
1179         /*
1180          * For simplicity, IO to all namespaces is quiesced even if the command
1181          * effects say only one namespace is affected.
1182          */
1183         if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1184                 mutex_lock(&ctrl->scan_lock);
1185                 mutex_lock(&ctrl->subsys->lock);
1186                 nvme_mpath_start_freeze(ctrl->subsys);
1187                 nvme_mpath_wait_freeze(ctrl->subsys);
1188                 nvme_start_freeze(ctrl);
1189                 nvme_wait_freeze(ctrl);
1190         }
1191         return effects;
1192 }
1193 EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU);
1194
1195 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
1196                        struct nvme_command *cmd, int status)
1197 {
1198         if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1199                 nvme_unfreeze(ctrl);
1200                 nvme_mpath_unfreeze(ctrl->subsys);
1201                 mutex_unlock(&ctrl->subsys->lock);
1202                 mutex_unlock(&ctrl->scan_lock);
1203         }
1204         if (effects & NVME_CMD_EFFECTS_CCC) {
1205                 if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY,
1206                                       &ctrl->flags)) {
1207                         dev_info(ctrl->device,
1208 "controller capabilities changed, reset may be required to take effect.\n");
1209                 }
1210         }
1211         if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
1212                 nvme_queue_scan(ctrl);
1213                 flush_work(&ctrl->scan_work);
1214         }
1215         if (ns)
1216                 return;
1217
1218         switch (cmd->common.opcode) {
1219         case nvme_admin_set_features:
1220                 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
1221                 case NVME_FEAT_KATO:
1222                         /*
1223                          * Keep alive commands interval on the host should be
1224                          * updated when KATO is modified by Set Features
1225                          * commands.
1226                          */
1227                         if (!status)
1228                                 nvme_update_keep_alive(ctrl, cmd);
1229                         break;
1230                 default:
1231                         break;
1232                 }
1233                 break;
1234         default:
1235                 break;
1236         }
1237 }
1238 EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
1239
1240 /*
1241  * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
1242  * 
1243  *   The host should send Keep Alive commands at half of the Keep Alive Timeout
1244  *   accounting for transport roundtrip times [..].
1245  */
1246 static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
1247 {
1248         unsigned long delay = ctrl->kato * HZ / 2;
1249
1250         /*
1251          * When using Traffic Based Keep Alive, we need to run
1252          * nvme_keep_alive_work at twice the normal frequency, as one
1253          * command completion can postpone sending a keep alive command
1254          * by up to twice the delay between runs.
1255          */
1256         if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
1257                 delay /= 2;
1258         return delay;
1259 }
1260
1261 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
1262 {
1263         unsigned long now = jiffies;
1264         unsigned long delay = nvme_keep_alive_work_period(ctrl);
1265         unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay;
1266
1267         if (time_after(now, ka_next_check_tm))
1268                 delay = 0;
1269         else
1270                 delay = ka_next_check_tm - now;
1271
1272         queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
1273 }
1274
1275 static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
1276                                                  blk_status_t status)
1277 {
1278         struct nvme_ctrl *ctrl = rq->end_io_data;
1279         unsigned long flags;
1280         bool startka = false;
1281         unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
1282         unsigned long delay = nvme_keep_alive_work_period(ctrl);
1283
1284         /*
1285          * Subtract off the keepalive RTT so nvme_keep_alive_work runs
1286          * at the desired frequency.
1287          */
1288         if (rtt <= delay) {
1289                 delay -= rtt;
1290         } else {
1291                 dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
1292                          jiffies_to_msecs(rtt));
1293                 delay = 0;
1294         }
1295
1296         blk_mq_free_request(rq);
1297
1298         if (status) {
1299                 dev_err(ctrl->device,
1300                         "failed nvme_keep_alive_end_io error=%d\n",
1301                                 status);
1302                 return RQ_END_IO_NONE;
1303         }
1304
1305         ctrl->ka_last_check_time = jiffies;
1306         ctrl->comp_seen = false;
1307         spin_lock_irqsave(&ctrl->lock, flags);
1308         if (ctrl->state == NVME_CTRL_LIVE ||
1309             ctrl->state == NVME_CTRL_CONNECTING)
1310                 startka = true;
1311         spin_unlock_irqrestore(&ctrl->lock, flags);
1312         if (startka)
1313                 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
1314         return RQ_END_IO_NONE;
1315 }
1316
1317 static void nvme_keep_alive_work(struct work_struct *work)
1318 {
1319         struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
1320                         struct nvme_ctrl, ka_work);
1321         bool comp_seen = ctrl->comp_seen;
1322         struct request *rq;
1323
1324         ctrl->ka_last_check_time = jiffies;
1325
1326         if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
1327                 dev_dbg(ctrl->device,
1328                         "reschedule traffic based keep-alive timer\n");
1329                 ctrl->comp_seen = false;
1330                 nvme_queue_keep_alive_work(ctrl);
1331                 return;
1332         }
1333
1334         rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
1335                                   BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
1336         if (IS_ERR(rq)) {
1337                 /* allocation failure, reset the controller */
1338                 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
1339                 nvme_reset_ctrl(ctrl);
1340                 return;
1341         }
1342         nvme_init_request(rq, &ctrl->ka_cmd);
1343
1344         rq->timeout = ctrl->kato * HZ;
1345         rq->end_io = nvme_keep_alive_end_io;
1346         rq->end_io_data = ctrl;
1347         blk_execute_rq_nowait(rq, false);
1348 }
1349
1350 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
1351 {
1352         if (unlikely(ctrl->kato == 0))
1353                 return;
1354
1355         nvme_queue_keep_alive_work(ctrl);
1356 }
1357
1358 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1359 {
1360         if (unlikely(ctrl->kato == 0))
1361                 return;
1362
1363         cancel_delayed_work_sync(&ctrl->ka_work);
1364 }
1365 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
1366
1367 static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
1368                                    struct nvme_command *cmd)
1369 {
1370         unsigned int new_kato =
1371                 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
1372
1373         dev_info(ctrl->device,
1374                  "keep alive interval updated from %u ms to %u ms\n",
1375                  ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
1376
1377         nvme_stop_keep_alive(ctrl);
1378         ctrl->kato = new_kato;
1379         nvme_start_keep_alive(ctrl);
1380 }
1381
1382 /*
1383  * In NVMe 1.0 the CNS field was just a binary controller or namespace
1384  * flag, thus sending any new CNS opcodes has a big chance of not working.
1385  * Qemu unfortunately had that bug after reporting a 1.1 version compliance
1386  * (but not for any later version).
1387  */
1388 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
1389 {
1390         if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
1391                 return ctrl->vs < NVME_VS(1, 2, 0);
1392         return ctrl->vs < NVME_VS(1, 1, 0);
1393 }
1394
1395 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1396 {
1397         struct nvme_command c = { };
1398         int error;
1399
1400         /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1401         c.identify.opcode = nvme_admin_identify;
1402         c.identify.cns = NVME_ID_CNS_CTRL;
1403
1404         *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
1405         if (!*id)
1406                 return -ENOMEM;
1407
1408         error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1409                         sizeof(struct nvme_id_ctrl));
1410         if (error) {
1411                 kfree(*id);
1412                 *id = NULL;
1413         }
1414         return error;
1415 }
1416
1417 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1418                 struct nvme_ns_id_desc *cur, bool *csi_seen)
1419 {
1420         const char *warn_str = "ctrl returned bogus length:";
1421         void *data = cur;
1422
1423         switch (cur->nidt) {
1424         case NVME_NIDT_EUI64:
1425                 if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1426                         dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1427                                  warn_str, cur->nidl);
1428                         return -1;
1429                 }
1430                 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1431                         return NVME_NIDT_EUI64_LEN;
1432                 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1433                 return NVME_NIDT_EUI64_LEN;
1434         case NVME_NIDT_NGUID:
1435                 if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1436                         dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1437                                  warn_str, cur->nidl);
1438                         return -1;
1439                 }
1440                 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1441                         return NVME_NIDT_NGUID_LEN;
1442                 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1443                 return NVME_NIDT_NGUID_LEN;
1444         case NVME_NIDT_UUID:
1445                 if (cur->nidl != NVME_NIDT_UUID_LEN) {
1446                         dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1447                                  warn_str, cur->nidl);
1448                         return -1;
1449                 }
1450                 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1451                         return NVME_NIDT_UUID_LEN;
1452                 uuid_copy(&ids->uuid, data + sizeof(*cur));
1453                 return NVME_NIDT_UUID_LEN;
1454         case NVME_NIDT_CSI:
1455                 if (cur->nidl != NVME_NIDT_CSI_LEN) {
1456                         dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
1457                                  warn_str, cur->nidl);
1458                         return -1;
1459                 }
1460                 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
1461                 *csi_seen = true;
1462                 return NVME_NIDT_CSI_LEN;
1463         default:
1464                 /* Skip unknown types */
1465                 return cur->nidl;
1466         }
1467 }
1468
1469 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
1470                 struct nvme_ns_info *info)
1471 {
1472         struct nvme_command c = { };
1473         bool csi_seen = false;
1474         int status, pos, len;
1475         void *data;
1476
1477         if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
1478                 return 0;
1479         if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
1480                 return 0;
1481
1482         c.identify.opcode = nvme_admin_identify;
1483         c.identify.nsid = cpu_to_le32(info->nsid);
1484         c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1485
1486         data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1487         if (!data)
1488                 return -ENOMEM;
1489
1490         status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1491                                       NVME_IDENTIFY_DATA_SIZE);
1492         if (status) {
1493                 dev_warn(ctrl->device,
1494                         "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
1495                         info->nsid, status);
1496                 goto free_data;
1497         }
1498
1499         for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1500                 struct nvme_ns_id_desc *cur = data + pos;
1501
1502                 if (cur->nidl == 0)
1503                         break;
1504
1505                 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
1506                 if (len < 0)
1507                         break;
1508
1509                 len += sizeof(*cur);
1510         }
1511
1512         if (nvme_multi_css(ctrl) && !csi_seen) {
1513                 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
1514                          info->nsid);
1515                 status = -EINVAL;
1516         }
1517
1518 free_data:
1519         kfree(data);
1520         return status;
1521 }
1522
1523 int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
1524                         struct nvme_id_ns **id)
1525 {
1526         struct nvme_command c = { };
1527         int error;
1528
1529         /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1530         c.identify.opcode = nvme_admin_identify;
1531         c.identify.nsid = cpu_to_le32(nsid);
1532         c.identify.cns = NVME_ID_CNS_NS;
1533
1534         *id = kmalloc(sizeof(**id), GFP_KERNEL);
1535         if (!*id)
1536                 return -ENOMEM;
1537
1538         error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1539         if (error) {
1540                 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1541                 kfree(*id);
1542                 *id = NULL;
1543         }
1544         return error;
1545 }
1546
1547 static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
1548                 struct nvme_ns_info *info)
1549 {
1550         struct nvme_ns_ids *ids = &info->ids;
1551         struct nvme_id_ns *id;
1552         int ret;
1553
1554         ret = nvme_identify_ns(ctrl, info->nsid, &id);
1555         if (ret)
1556                 return ret;
1557
1558         if (id->ncap == 0) {
1559                 /* namespace not allocated or attached */
1560                 info->is_removed = true;
1561                 ret = -ENODEV;
1562                 goto error;
1563         }
1564
1565         info->anagrpid = id->anagrpid;
1566         info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
1567         info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
1568         info->is_ready = true;
1569         if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
1570                 dev_info(ctrl->device,
1571                          "Ignoring bogus Namespace Identifiers\n");
1572         } else {
1573                 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1574                     !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1575                         memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
1576                 if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1577                     !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1578                         memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
1579         }
1580
1581 error:
1582         kfree(id);
1583         return ret;
1584 }
1585
1586 static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
1587                 struct nvme_ns_info *info)
1588 {
1589         struct nvme_id_ns_cs_indep *id;
1590         struct nvme_command c = {
1591                 .identify.opcode        = nvme_admin_identify,
1592                 .identify.nsid          = cpu_to_le32(info->nsid),
1593                 .identify.cns           = NVME_ID_CNS_NS_CS_INDEP,
1594         };
1595         int ret;
1596
1597         id = kmalloc(sizeof(*id), GFP_KERNEL);
1598         if (!id)
1599                 return -ENOMEM;
1600
1601         ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
1602         if (!ret) {
1603                 info->anagrpid = id->anagrpid;
1604                 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
1605                 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
1606                 info->is_ready = id->nstat & NVME_NSTAT_NRDY;
1607         }
1608         kfree(id);
1609         return ret;
1610 }
1611
1612 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1613                 unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1614 {
1615         union nvme_result res = { 0 };
1616         struct nvme_command c = { };
1617         int ret;
1618
1619         c.features.opcode = op;
1620         c.features.fid = cpu_to_le32(fid);
1621         c.features.dword11 = cpu_to_le32(dword11);
1622
1623         ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1624                         buffer, buflen, NVME_QID_ANY, 0);
1625         if (ret >= 0 && result)
1626                 *result = le32_to_cpu(res.u32);
1627         return ret;
1628 }
1629
1630 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1631                       unsigned int dword11, void *buffer, size_t buflen,
1632                       u32 *result)
1633 {
1634         return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1635                              buflen, result);
1636 }
1637 EXPORT_SYMBOL_GPL(nvme_set_features);
1638
1639 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1640                       unsigned int dword11, void *buffer, size_t buflen,
1641                       u32 *result)
1642 {
1643         return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1644                              buflen, result);
1645 }
1646 EXPORT_SYMBOL_GPL(nvme_get_features);
1647
1648 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1649 {
1650         u32 q_count = (*count - 1) | ((*count - 1) << 16);
1651         u32 result;
1652         int status, nr_io_queues;
1653
1654         status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1655                         &result);
1656         if (status < 0)
1657                 return status;
1658
1659         /*
1660          * Degraded controllers might return an error when setting the queue
1661          * count.  We still want to be able to bring them online and offer
1662          * access to the admin queue, as that might be only way to fix them up.
1663          */
1664         if (status > 0) {
1665                 dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1666                 *count = 0;
1667         } else {
1668                 nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1669                 *count = min(*count, nr_io_queues);
1670         }
1671
1672         return 0;
1673 }
1674 EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1675
1676 #define NVME_AEN_SUPPORTED \
1677         (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1678          NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1679
1680 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1681 {
1682         u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1683         int status;
1684
1685         if (!supported_aens)
1686                 return;
1687
1688         status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1689                         NULL, 0, &result);
1690         if (status)
1691                 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1692                          supported_aens);
1693
1694         queue_work(nvme_wq, &ctrl->async_event_work);
1695 }
1696
1697 static int nvme_ns_open(struct nvme_ns *ns)
1698 {
1699
1700         /* should never be called due to GENHD_FL_HIDDEN */
1701         if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
1702                 goto fail;
1703         if (!nvme_get_ns(ns))
1704                 goto fail;
1705         if (!try_module_get(ns->ctrl->ops->module))
1706                 goto fail_put_ns;
1707
1708         return 0;
1709
1710 fail_put_ns:
1711         nvme_put_ns(ns);
1712 fail:
1713         return -ENXIO;
1714 }
1715
1716 static void nvme_ns_release(struct nvme_ns *ns)
1717 {
1718
1719         module_put(ns->ctrl->ops->module);
1720         nvme_put_ns(ns);
1721 }
1722
1723 static int nvme_open(struct gendisk *disk, blk_mode_t mode)
1724 {
1725         return nvme_ns_open(disk->private_data);
1726 }
1727
1728 static void nvme_release(struct gendisk *disk)
1729 {
1730         nvme_ns_release(disk->private_data);
1731 }
1732
1733 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1734 {
1735         /* some standard values */
1736         geo->heads = 1 << 6;
1737         geo->sectors = 1 << 5;
1738         geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1739         return 0;
1740 }
1741
1742 static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head)
1743 {
1744         struct blk_integrity integrity = { };
1745
1746         blk_integrity_unregister(disk);
1747
1748         if (!head->ms)
1749                 return true;
1750
1751         /*
1752          * PI can always be supported as we can ask the controller to simply
1753          * insert/strip it, which is not possible for other kinds of metadata.
1754          */
1755         if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) ||
1756             !(head->features & NVME_NS_METADATA_SUPPORTED))
1757                 return nvme_ns_has_pi(head);
1758
1759         switch (head->pi_type) {
1760         case NVME_NS_DPS_PI_TYPE3:
1761                 switch (head->guard_type) {
1762                 case NVME_NVM_NS_16B_GUARD:
1763                         integrity.profile = &t10_pi_type3_crc;
1764                         integrity.tag_size = sizeof(u16) + sizeof(u32);
1765                         integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1766                         break;
1767                 case NVME_NVM_NS_64B_GUARD:
1768                         integrity.profile = &ext_pi_type3_crc64;
1769                         integrity.tag_size = sizeof(u16) + 6;
1770                         integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1771                         break;
1772                 default:
1773                         integrity.profile = NULL;
1774                         break;
1775                 }
1776                 break;
1777         case NVME_NS_DPS_PI_TYPE1:
1778         case NVME_NS_DPS_PI_TYPE2:
1779                 switch (head->guard_type) {
1780                 case NVME_NVM_NS_16B_GUARD:
1781                         integrity.profile = &t10_pi_type1_crc;
1782                         integrity.tag_size = sizeof(u16);
1783                         integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1784                         break;
1785                 case NVME_NVM_NS_64B_GUARD:
1786                         integrity.profile = &ext_pi_type1_crc64;
1787                         integrity.tag_size = sizeof(u16);
1788                         integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1789                         break;
1790                 default:
1791                         integrity.profile = NULL;
1792                         break;
1793                 }
1794                 break;
1795         default:
1796                 integrity.profile = NULL;
1797                 break;
1798         }
1799
1800         integrity.tuple_size = head->ms;
1801         integrity.pi_offset = head->pi_offset;
1802         blk_integrity_register(disk, &integrity);
1803         return true;
1804 }
1805
1806 static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
1807 {
1808         struct nvme_ctrl *ctrl = ns->ctrl;
1809
1810         if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
1811                 lim->max_hw_discard_sectors =
1812                         nvme_lba_to_sect(ns->head, ctrl->dmrsl);
1813         else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
1814                 lim->max_hw_discard_sectors = UINT_MAX;
1815         else
1816                 lim->max_hw_discard_sectors = 0;
1817
1818         lim->discard_granularity = lim->logical_block_size;
1819
1820         if (ctrl->dmrl)
1821                 lim->max_discard_segments = ctrl->dmrl;
1822         else
1823                 lim->max_discard_segments = NVME_DSM_MAX_RANGES;
1824 }
1825
1826 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1827 {
1828         return uuid_equal(&a->uuid, &b->uuid) &&
1829                 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1830                 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
1831                 a->csi == b->csi;
1832 }
1833
1834 static int nvme_identify_ns_nvm(struct nvme_ctrl *ctrl, unsigned int nsid,
1835                 struct nvme_id_ns_nvm **nvmp)
1836 {
1837         struct nvme_command c = {
1838                 .identify.opcode        = nvme_admin_identify,
1839                 .identify.nsid          = cpu_to_le32(nsid),
1840                 .identify.cns           = NVME_ID_CNS_CS_NS,
1841                 .identify.csi           = NVME_CSI_NVM,
1842         };
1843         struct nvme_id_ns_nvm *nvm;
1844         int ret;
1845
1846         nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
1847         if (!nvm)
1848                 return -ENOMEM;
1849
1850         ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm));
1851         if (ret)
1852                 kfree(nvm);
1853         else
1854                 *nvmp = nvm;
1855         return ret;
1856 }
1857
1858 static void nvme_configure_pi_elbas(struct nvme_ns_head *head,
1859                 struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm)
1860 {
1861         u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]);
1862
1863         /* no support for storage tag formats right now */
1864         if (nvme_elbaf_sts(elbaf))
1865                 return;
1866
1867         head->guard_type = nvme_elbaf_guard_type(elbaf);
1868         switch (head->guard_type) {
1869         case NVME_NVM_NS_64B_GUARD:
1870                 head->pi_size = sizeof(struct crc64_pi_tuple);
1871                 break;
1872         case NVME_NVM_NS_16B_GUARD:
1873                 head->pi_size = sizeof(struct t10_pi_tuple);
1874                 break;
1875         default:
1876                 break;
1877         }
1878 }
1879
1880 static void nvme_configure_metadata(struct nvme_ctrl *ctrl,
1881                 struct nvme_ns_head *head, struct nvme_id_ns *id,
1882                 struct nvme_id_ns_nvm *nvm)
1883 {
1884         head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1885         head->pi_type = 0;
1886         head->pi_size = 0;
1887         head->pi_offset = 0;
1888         head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms);
1889         if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1890                 return;
1891
1892         if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
1893                 nvme_configure_pi_elbas(head, id, nvm);
1894         } else {
1895                 head->pi_size = sizeof(struct t10_pi_tuple);
1896                 head->guard_type = NVME_NVM_NS_16B_GUARD;
1897         }
1898
1899         if (head->pi_size && head->ms >= head->pi_size)
1900                 head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1901         if (!(id->dps & NVME_NS_DPS_PI_FIRST))
1902                 head->pi_offset = head->ms - head->pi_size;
1903
1904         if (ctrl->ops->flags & NVME_F_FABRICS) {
1905                 /*
1906                  * The NVMe over Fabrics specification only supports metadata as
1907                  * part of the extended data LBA.  We rely on HCA/HBA support to
1908                  * remap the separate metadata buffer from the block layer.
1909                  */
1910                 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
1911                         return;
1912
1913                 head->features |= NVME_NS_EXT_LBAS;
1914
1915                 /*
1916                  * The current fabrics transport drivers support namespace
1917                  * metadata formats only if nvme_ns_has_pi() returns true.
1918                  * Suppress support for all other formats so the namespace will
1919                  * have a 0 capacity and not be usable through the block stack.
1920                  *
1921                  * Note, this check will need to be modified if any drivers
1922                  * gain the ability to use other metadata formats.
1923                  */
1924                 if (ctrl->max_integrity_segments && nvme_ns_has_pi(head))
1925                         head->features |= NVME_NS_METADATA_SUPPORTED;
1926         } else {
1927                 /*
1928                  * For PCIe controllers, we can't easily remap the separate
1929                  * metadata buffer from the block layer and thus require a
1930                  * separate metadata buffer for block layer metadata/PI support.
1931                  * We allow extended LBAs for the passthrough interface, though.
1932                  */
1933                 if (id->flbas & NVME_NS_FLBAS_META_EXT)
1934                         head->features |= NVME_NS_EXT_LBAS;
1935                 else
1936                         head->features |= NVME_NS_METADATA_SUPPORTED;
1937         }
1938 }
1939
1940 static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
1941 {
1942         return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1;
1943 }
1944
1945 static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl,
1946                 struct queue_limits *lim)
1947 {
1948         lim->max_hw_sectors = ctrl->max_hw_sectors;
1949         lim->max_segments = min_t(u32, USHRT_MAX,
1950                 min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments));
1951         lim->max_integrity_segments = ctrl->max_integrity_segments;
1952         lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1;
1953         lim->max_segment_size = UINT_MAX;
1954         lim->dma_alignment = 3;
1955 }
1956
1957 static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
1958                 struct queue_limits *lim)
1959 {
1960         struct nvme_ns_head *head = ns->head;
1961         u32 bs = 1U << head->lba_shift;
1962         u32 atomic_bs, phys_bs, io_opt = 0;
1963         bool valid = true;
1964
1965         /*
1966          * The block layer can't support LBA sizes larger than the page size
1967          * or smaller than a sector size yet, so catch this early and don't
1968          * allow block I/O.
1969          */
1970         if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) {
1971                 bs = (1 << 9);
1972                 valid = false;
1973         }
1974
1975         atomic_bs = phys_bs = bs;
1976         if (id->nabo == 0) {
1977                 /*
1978                  * Bit 1 indicates whether NAWUPF is defined for this namespace
1979                  * and whether it should be used instead of AWUPF. If NAWUPF ==
1980                  * 0 then AWUPF must be used instead.
1981                  */
1982                 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1983                         atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
1984                 else
1985                         atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1986         }
1987
1988         if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1989                 /* NPWG = Namespace Preferred Write Granularity */
1990                 phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1991                 /* NOWS = Namespace Optimal Write Size */
1992                 io_opt = bs * (1 + le16_to_cpu(id->nows));
1993         }
1994
1995         /*
1996          * Linux filesystems assume writing a single physical block is
1997          * an atomic operation. Hence limit the physical block size to the
1998          * value of the Atomic Write Unit Power Fail parameter.
1999          */
2000         lim->logical_block_size = bs;
2001         lim->physical_block_size = min(phys_bs, atomic_bs);
2002         lim->io_min = phys_bs;
2003         lim->io_opt = io_opt;
2004         if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
2005                 lim->max_write_zeroes_sectors = UINT_MAX;
2006         else
2007                 lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
2008         return valid;
2009 }
2010
2011 static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
2012 {
2013         return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
2014 }
2015
2016 static inline bool nvme_first_scan(struct gendisk *disk)
2017 {
2018         /* nvme_alloc_ns() scans the disk prior to adding it */
2019         return !disk_live(disk);
2020 }
2021
2022 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id,
2023                 struct queue_limits *lim)
2024 {
2025         struct nvme_ctrl *ctrl = ns->ctrl;
2026         u32 iob;
2027
2028         if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
2029             is_power_of_2(ctrl->max_hw_sectors))
2030                 iob = ctrl->max_hw_sectors;
2031         else
2032                 iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob));
2033
2034         if (!iob)
2035                 return;
2036
2037         if (!is_power_of_2(iob)) {
2038                 if (nvme_first_scan(ns->disk))
2039                         pr_warn("%s: ignoring unaligned IO boundary:%u\n",
2040                                 ns->disk->disk_name, iob);
2041                 return;
2042         }
2043
2044         if (blk_queue_is_zoned(ns->disk->queue)) {
2045                 if (nvme_first_scan(ns->disk))
2046                         pr_warn("%s: ignoring zoned namespace IO boundary\n",
2047                                 ns->disk->disk_name);
2048                 return;
2049         }
2050
2051         lim->chunk_sectors = iob;
2052 }
2053
2054 static int nvme_update_ns_info_generic(struct nvme_ns *ns,
2055                 struct nvme_ns_info *info)
2056 {
2057         struct queue_limits lim;
2058         int ret;
2059
2060         blk_mq_freeze_queue(ns->disk->queue);
2061         lim = queue_limits_start_update(ns->disk->queue);
2062         nvme_set_ctrl_limits(ns->ctrl, &lim);
2063         ret = queue_limits_commit_update(ns->disk->queue, &lim);
2064         set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
2065         blk_mq_unfreeze_queue(ns->disk->queue);
2066
2067         /* Hide the block-interface for these devices */
2068         if (!ret)
2069                 ret = -ENODEV;
2070         return ret;
2071 }
2072
2073 static int nvme_update_ns_info_block(struct nvme_ns *ns,
2074                 struct nvme_ns_info *info)
2075 {
2076         bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT;
2077         struct queue_limits lim;
2078         struct nvme_id_ns_nvm *nvm = NULL;
2079         struct nvme_id_ns *id;
2080         sector_t capacity;
2081         unsigned lbaf;
2082         int ret;
2083
2084         ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
2085         if (ret)
2086                 return ret;
2087
2088         if (id->ncap == 0) {
2089                 /* namespace not allocated or attached */
2090                 info->is_removed = true;
2091                 ret = -ENODEV;
2092                 goto out;
2093         }
2094
2095         if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
2096                 ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
2097                 if (ret < 0)
2098                         goto out;
2099         }
2100
2101         blk_mq_freeze_queue(ns->disk->queue);
2102         lbaf = nvme_lbaf_index(id->flbas);
2103         ns->head->lba_shift = id->lbaf[lbaf].ds;
2104         ns->head->nuse = le64_to_cpu(id->nuse);
2105         capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
2106
2107         lim = queue_limits_start_update(ns->disk->queue);
2108         nvme_set_ctrl_limits(ns->ctrl, &lim);
2109         nvme_configure_metadata(ns->ctrl, ns->head, id, nvm);
2110         nvme_set_chunk_sectors(ns, id, &lim);
2111         if (!nvme_update_disk_info(ns, id, &lim))
2112                 capacity = 0;
2113         nvme_config_discard(ns, &lim);
2114         if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
2115             ns->head->ids.csi == NVME_CSI_ZNS) {
2116                 ret = nvme_update_zone_info(ns, lbaf, &lim);
2117                 if (ret) {
2118                         blk_mq_unfreeze_queue(ns->disk->queue);
2119                         goto out;
2120                 }
2121         }
2122         ret = queue_limits_commit_update(ns->disk->queue, &lim);
2123         if (ret) {
2124                 blk_mq_unfreeze_queue(ns->disk->queue);
2125                 goto out;
2126         }
2127
2128         /*
2129          * Register a metadata profile for PI, or the plain non-integrity NVMe
2130          * metadata masquerading as Type 0 if supported, otherwise reject block
2131          * I/O to namespaces with metadata except when the namespace supports
2132          * PI, as it can strip/insert in that case.
2133          */
2134         if (!nvme_init_integrity(ns->disk, ns->head))
2135                 capacity = 0;
2136
2137         set_capacity_and_notify(ns->disk, capacity);
2138
2139         /*
2140          * Only set the DEAC bit if the device guarantees that reads from
2141          * deallocated data return zeroes.  While the DEAC bit does not
2142          * require that, it must be a no-op if reads from deallocated data
2143          * do not return zeroes.
2144          */
2145         if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
2146                 ns->head->features |= NVME_NS_DEAC;
2147         set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
2148         blk_queue_write_cache(ns->disk->queue, vwc, vwc);
2149         set_bit(NVME_NS_READY, &ns->flags);
2150         blk_mq_unfreeze_queue(ns->disk->queue);
2151
2152         if (blk_queue_is_zoned(ns->queue)) {
2153                 ret = blk_revalidate_disk_zones(ns->disk, NULL);
2154                 if (ret && !nvme_first_scan(ns->disk))
2155                         goto out;
2156         }
2157
2158         ret = 0;
2159 out:
2160         kfree(nvm);
2161         kfree(id);
2162         return ret;
2163 }
2164
2165 static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
2166 {
2167         bool unsupported = false;
2168         int ret;
2169
2170         switch (info->ids.csi) {
2171         case NVME_CSI_ZNS:
2172                 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
2173                         dev_info(ns->ctrl->device,
2174         "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
2175                                 info->nsid);
2176                         ret = nvme_update_ns_info_generic(ns, info);
2177                         break;
2178                 }
2179                 ret = nvme_update_ns_info_block(ns, info);
2180                 break;
2181         case NVME_CSI_NVM:
2182                 ret = nvme_update_ns_info_block(ns, info);
2183                 break;
2184         default:
2185                 dev_info(ns->ctrl->device,
2186                         "block device for nsid %u not supported (csi %u)\n",
2187                         info->nsid, info->ids.csi);
2188                 ret = nvme_update_ns_info_generic(ns, info);
2189                 break;
2190         }
2191
2192         /*
2193          * If probing fails due an unsupported feature, hide the block device,
2194          * but still allow other access.
2195          */
2196         if (ret == -ENODEV) {
2197                 ns->disk->flags |= GENHD_FL_HIDDEN;
2198                 set_bit(NVME_NS_READY, &ns->flags);
2199                 unsupported = true;
2200                 ret = 0;
2201         }
2202
2203         if (!ret && nvme_ns_head_multipath(ns->head)) {
2204                 struct queue_limits lim;
2205
2206                 blk_mq_freeze_queue(ns->head->disk->queue);
2207                 if (unsupported)
2208                         ns->head->disk->flags |= GENHD_FL_HIDDEN;
2209                 else
2210                         nvme_init_integrity(ns->head->disk, ns->head);
2211                 set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
2212                 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
2213                 nvme_mpath_revalidate_paths(ns);
2214
2215                 lim = queue_limits_start_update(ns->head->disk->queue);
2216                 queue_limits_stack_bdev(&lim, ns->disk->part0, 0,
2217                                         ns->head->disk->disk_name);
2218                 ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
2219                 blk_mq_unfreeze_queue(ns->head->disk->queue);
2220         }
2221
2222         return ret;
2223 }
2224
2225 #ifdef CONFIG_BLK_SED_OPAL
2226 static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
2227                 bool send)
2228 {
2229         struct nvme_ctrl *ctrl = data;
2230         struct nvme_command cmd = { };
2231
2232         if (send)
2233                 cmd.common.opcode = nvme_admin_security_send;
2234         else
2235                 cmd.common.opcode = nvme_admin_security_recv;
2236         cmd.common.nsid = 0;
2237         cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
2238         cmd.common.cdw11 = cpu_to_le32(len);
2239
2240         return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
2241                         NVME_QID_ANY, NVME_SUBMIT_AT_HEAD);
2242 }
2243
2244 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
2245 {
2246         if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) {
2247                 if (!ctrl->opal_dev)
2248                         ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit);
2249                 else if (was_suspended)
2250                         opal_unlock_from_suspend(ctrl->opal_dev);
2251         } else {
2252                 free_opal_dev(ctrl->opal_dev);
2253                 ctrl->opal_dev = NULL;
2254         }
2255 }
2256 #else
2257 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
2258 {
2259 }
2260 #endif /* CONFIG_BLK_SED_OPAL */
2261
2262 #ifdef CONFIG_BLK_DEV_ZONED
2263 static int nvme_report_zones(struct gendisk *disk, sector_t sector,
2264                 unsigned int nr_zones, report_zones_cb cb, void *data)
2265 {
2266         return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
2267                         data);
2268 }
2269 #else
2270 #define nvme_report_zones       NULL
2271 #endif /* CONFIG_BLK_DEV_ZONED */
2272
2273 const struct block_device_operations nvme_bdev_ops = {
2274         .owner          = THIS_MODULE,
2275         .ioctl          = nvme_ioctl,
2276         .compat_ioctl   = blkdev_compat_ptr_ioctl,
2277         .open           = nvme_open,
2278         .release        = nvme_release,
2279         .getgeo         = nvme_getgeo,
2280         .report_zones   = nvme_report_zones,
2281         .pr_ops         = &nvme_pr_ops,
2282 };
2283
2284 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val,
2285                 u32 timeout, const char *op)
2286 {
2287         unsigned long timeout_jiffies = jiffies + timeout * HZ;
2288         u32 csts;
2289         int ret;
2290
2291         while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2292                 if (csts == ~0)
2293                         return -ENODEV;
2294                 if ((csts & mask) == val)
2295                         break;
2296
2297                 usleep_range(1000, 2000);
2298                 if (fatal_signal_pending(current))
2299                         return -EINTR;
2300                 if (time_after(jiffies, timeout_jiffies)) {
2301                         dev_err(ctrl->device,
2302                                 "Device not ready; aborting %s, CSTS=0x%x\n",
2303                                 op, csts);
2304                         return -ENODEV;
2305                 }
2306         }
2307
2308         return ret;
2309 }
2310
2311 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2312 {
2313         int ret;
2314
2315         ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2316         if (shutdown)
2317                 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2318         else
2319                 ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2320
2321         ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2322         if (ret)
2323                 return ret;
2324
2325         if (shutdown) {
2326                 return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK,
2327                                        NVME_CSTS_SHST_CMPLT,
2328                                        ctrl->shutdown_timeout, "shutdown");
2329         }
2330         if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2331                 msleep(NVME_QUIRK_DELAY_AMOUNT);
2332         return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0,
2333                                (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset");
2334 }
2335 EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2336
2337 int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2338 {
2339         unsigned dev_page_min;
2340         u32 timeout;
2341         int ret;
2342
2343         ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2344         if (ret) {
2345                 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2346                 return ret;
2347         }
2348         dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2349
2350         if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
2351                 dev_err(ctrl->device,
2352                         "Minimum device page size %u too large for host (%u)\n",
2353                         1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
2354                 return -ENODEV;
2355         }
2356
2357         if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
2358                 ctrl->ctrl_config = NVME_CC_CSS_CSI;
2359         else
2360                 ctrl->ctrl_config = NVME_CC_CSS_NVM;
2361
2362         if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
2363                 ctrl->ctrl_config |= NVME_CC_CRIME;
2364
2365         ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2366         ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2367         ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2368         ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2369         if (ret)
2370                 return ret;
2371
2372         /* Flush write to device (required if transport is PCI) */
2373         ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
2374         if (ret)
2375                 return ret;
2376
2377         /* CAP value may change after initial CC write */
2378         ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2379         if (ret)
2380                 return ret;
2381
2382         timeout = NVME_CAP_TIMEOUT(ctrl->cap);
2383         if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
2384                 u32 crto, ready_timeout;
2385
2386                 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
2387                 if (ret) {
2388                         dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
2389                                 ret);
2390                         return ret;
2391                 }
2392
2393                 /*
2394                  * CRTO should always be greater or equal to CAP.TO, but some
2395                  * devices are known to get this wrong. Use the larger of the
2396                  * two values.
2397                  */
2398                 if (ctrl->ctrl_config & NVME_CC_CRIME)
2399                         ready_timeout = NVME_CRTO_CRIMT(crto);
2400                 else
2401                         ready_timeout = NVME_CRTO_CRWMT(crto);
2402
2403                 if (ready_timeout < timeout)
2404                         dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
2405                                       crto, ctrl->cap);
2406                 else
2407                         timeout = ready_timeout;
2408         }
2409
2410         ctrl->ctrl_config |= NVME_CC_ENABLE;
2411         ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2412         if (ret)
2413                 return ret;
2414         return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY,
2415                                (timeout + 1) / 2, "initialisation");
2416 }
2417 EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2418
2419 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2420 {
2421         __le64 ts;
2422         int ret;
2423
2424         if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2425                 return 0;
2426
2427         ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2428         ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2429                         NULL);
2430         if (ret)
2431                 dev_warn_once(ctrl->device,
2432                         "could not set timestamp (%d)\n", ret);
2433         return ret;
2434 }
2435
2436 static int nvme_configure_host_options(struct nvme_ctrl *ctrl)
2437 {
2438         struct nvme_feat_host_behavior *host;
2439         u8 acre = 0, lbafee = 0;
2440         int ret;
2441
2442         /* Don't bother enabling the feature if retry delay is not reported */
2443         if (ctrl->crdt[0])
2444                 acre = NVME_ENABLE_ACRE;
2445         if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)
2446                 lbafee = NVME_ENABLE_LBAFEE;
2447
2448         if (!acre && !lbafee)
2449                 return 0;
2450
2451         host = kzalloc(sizeof(*host), GFP_KERNEL);
2452         if (!host)
2453                 return 0;
2454
2455         host->acre = acre;
2456         host->lbafee = lbafee;
2457         ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2458                                 host, sizeof(*host), NULL);
2459         kfree(host);
2460         return ret;
2461 }
2462
2463 /*
2464  * The function checks whether the given total (exlat + enlat) latency of
2465  * a power state allows the latter to be used as an APST transition target.
2466  * It does so by comparing the latency to the primary and secondary latency
2467  * tolerances defined by module params. If there's a match, the corresponding
2468  * timeout value is returned and the matching tolerance index (1 or 2) is
2469  * reported.
2470  */
2471 static bool nvme_apst_get_transition_time(u64 total_latency,
2472                 u64 *transition_time, unsigned *last_index)
2473 {
2474         if (total_latency <= apst_primary_latency_tol_us) {
2475                 if (*last_index == 1)
2476                         return false;
2477                 *last_index = 1;
2478                 *transition_time = apst_primary_timeout_ms;
2479                 return true;
2480         }
2481         if (apst_secondary_timeout_ms &&
2482                 total_latency <= apst_secondary_latency_tol_us) {
2483                 if (*last_index <= 2)
2484                         return false;
2485                 *last_index = 2;
2486                 *transition_time = apst_secondary_timeout_ms;
2487                 return true;
2488         }
2489         return false;
2490 }
2491
2492 /*
2493  * APST (Autonomous Power State Transition) lets us program a table of power
2494  * state transitions that the controller will perform automatically.
2495  *
2496  * Depending on module params, one of the two supported techniques will be used:
2497  *
2498  * - If the parameters provide explicit timeouts and tolerances, they will be
2499  *   used to build a table with up to 2 non-operational states to transition to.
2500  *   The default parameter values were selected based on the values used by
2501  *   Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
2502  *   regeneration of the APST table in the event of switching between external
2503  *   and battery power, the timeouts and tolerances reflect a compromise
2504  *   between values used by Microsoft for AC and battery scenarios.
2505  * - If not, we'll configure the table with a simple heuristic: we are willing
2506  *   to spend at most 2% of the time transitioning between power states.
2507  *   Therefore, when running in any given state, we will enter the next
2508  *   lower-power non-operational state after waiting 50 * (enlat + exlat)
2509  *   microseconds, as long as that state's exit latency is under the requested
2510  *   maximum latency.
2511  *
2512  * We will not autonomously enter any non-operational state for which the total
2513  * latency exceeds ps_max_latency_us.
2514  *
2515  * Users can set ps_max_latency_us to zero to turn off APST.
2516  */
2517 static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2518 {
2519         struct nvme_feat_auto_pst *table;
2520         unsigned apste = 0;
2521         u64 max_lat_us = 0;
2522         __le64 target = 0;
2523         int max_ps = -1;
2524         int state;
2525         int ret;
2526         unsigned last_lt_index = UINT_MAX;
2527
2528         /*
2529          * If APST isn't supported or if we haven't been initialized yet,
2530          * then don't do anything.
2531          */
2532         if (!ctrl->apsta)
2533                 return 0;
2534
2535         if (ctrl->npss > 31) {
2536                 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2537                 return 0;
2538         }
2539
2540         table = kzalloc(sizeof(*table), GFP_KERNEL);
2541         if (!table)
2542                 return 0;
2543
2544         if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2545                 /* Turn off APST. */
2546                 dev_dbg(ctrl->device, "APST disabled\n");
2547                 goto done;
2548         }
2549
2550         /*
2551          * Walk through all states from lowest- to highest-power.
2552          * According to the spec, lower-numbered states use more power.  NPSS,
2553          * despite the name, is the index of the lowest-power state, not the
2554          * number of states.
2555          */
2556         for (state = (int)ctrl->npss; state >= 0; state--) {
2557                 u64 total_latency_us, exit_latency_us, transition_ms;
2558
2559                 if (target)
2560                         table->entries[state] = target;
2561
2562                 /*
2563                  * Don't allow transitions to the deepest state if it's quirked
2564                  * off.
2565                  */
2566                 if (state == ctrl->npss &&
2567                     (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2568                         continue;
2569
2570                 /*
2571                  * Is this state a useful non-operational state for higher-power
2572                  * states to autonomously transition to?
2573                  */
2574                 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
2575                         continue;
2576
2577                 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2578                 if (exit_latency_us > ctrl->ps_max_latency_us)
2579                         continue;
2580
2581                 total_latency_us = exit_latency_us +
2582                         le32_to_cpu(ctrl->psd[state].entry_lat);
2583
2584                 /*
2585                  * This state is good. It can be used as the APST idle target
2586                  * for higher power states.
2587                  */
2588                 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
2589                         if (!nvme_apst_get_transition_time(total_latency_us,
2590                                         &transition_ms, &last_lt_index))
2591                                 continue;
2592                 } else {
2593                         transition_ms = total_latency_us + 19;
2594                         do_div(transition_ms, 20);
2595                         if (transition_ms > (1 << 24) - 1)
2596                                 transition_ms = (1 << 24) - 1;
2597                 }
2598
2599                 target = cpu_to_le64((state << 3) | (transition_ms << 8));
2600                 if (max_ps == -1)
2601                         max_ps = state;
2602                 if (total_latency_us > max_lat_us)
2603                         max_lat_us = total_latency_us;
2604         }
2605
2606         if (max_ps == -1)
2607                 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2608         else
2609                 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2610                         max_ps, max_lat_us, (int)sizeof(*table), table);
2611         apste = 1;
2612
2613 done:
2614         ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2615                                 table, sizeof(*table), NULL);
2616         if (ret)
2617                 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2618         kfree(table);
2619         return ret;
2620 }
2621
2622 static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2623 {
2624         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2625         u64 latency;
2626
2627         switch (val) {
2628         case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2629         case PM_QOS_LATENCY_ANY:
2630                 latency = U64_MAX;
2631                 break;
2632
2633         default:
2634                 latency = val;
2635         }
2636
2637         if (ctrl->ps_max_latency_us != latency) {
2638                 ctrl->ps_max_latency_us = latency;
2639                 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
2640                         nvme_configure_apst(ctrl);
2641         }
2642 }
2643
2644 struct nvme_core_quirk_entry {
2645         /*
2646          * NVMe model and firmware strings are padded with spaces.  For
2647          * simplicity, strings in the quirk table are padded with NULLs
2648          * instead.
2649          */
2650         u16 vid;
2651         const char *mn;
2652         const char *fr;
2653         unsigned long quirks;
2654 };
2655
2656 static const struct nvme_core_quirk_entry core_quirks[] = {
2657         {
2658                 /*
2659                  * This Toshiba device seems to die using any APST states.  See:
2660                  * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2661                  */
2662                 .vid = 0x1179,
2663                 .mn = "THNSF5256GPUK TOSHIBA",
2664                 .quirks = NVME_QUIRK_NO_APST,
2665         },
2666         {
2667                 /*
2668                  * This LiteON CL1-3D*-Q11 firmware version has a race
2669                  * condition associated with actions related to suspend to idle
2670                  * LiteON has resolved the problem in future firmware
2671                  */
2672                 .vid = 0x14a4,
2673                 .fr = "22301111",
2674                 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2675         },
2676         {
2677                 /*
2678                  * This Kioxia CD6-V Series / HPE PE8030 device times out and
2679                  * aborts I/O during any load, but more easily reproducible
2680                  * with discards (fstrim).
2681                  *
2682                  * The device is left in a state where it is also not possible
2683                  * to use "nvme set-feature" to disable APST, but booting with
2684                  * nvme_core.default_ps_max_latency=0 works.
2685                  */
2686                 .vid = 0x1e0f,
2687                 .mn = "KCD6XVUL6T40",
2688                 .quirks = NVME_QUIRK_NO_APST,
2689         },
2690         {
2691                 /*
2692                  * The external Samsung X5 SSD fails initialization without a
2693                  * delay before checking if it is ready and has a whole set of
2694                  * other problems.  To make this even more interesting, it
2695                  * shares the PCI ID with internal Samsung 970 Evo Plus that
2696                  * does not need or want these quirks.
2697                  */
2698                 .vid = 0x144d,
2699                 .mn = "Samsung Portable SSD X5",
2700                 .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
2701                           NVME_QUIRK_NO_DEEPEST_PS |
2702                           NVME_QUIRK_IGNORE_DEV_SUBNQN,
2703         }
2704 };
2705
2706 /* match is null-terminated but idstr is space-padded. */
2707 static bool string_matches(const char *idstr, const char *match, size_t len)
2708 {
2709         size_t matchlen;
2710
2711         if (!match)
2712                 return true;
2713
2714         matchlen = strlen(match);
2715         WARN_ON_ONCE(matchlen > len);
2716
2717         if (memcmp(idstr, match, matchlen))
2718                 return false;
2719
2720         for (; matchlen < len; matchlen++)
2721                 if (idstr[matchlen] != ' ')
2722                         return false;
2723
2724         return true;
2725 }
2726
2727 static bool quirk_matches(const struct nvme_id_ctrl *id,
2728                           const struct nvme_core_quirk_entry *q)
2729 {
2730         return q->vid == le16_to_cpu(id->vid) &&
2731                 string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2732                 string_matches(id->fr, q->fr, sizeof(id->fr));
2733 }
2734
2735 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2736                 struct nvme_id_ctrl *id)
2737 {
2738         size_t nqnlen;
2739         int off;
2740
2741         if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2742                 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2743                 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2744                         strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2745                         return;
2746                 }
2747
2748                 if (ctrl->vs >= NVME_VS(1, 2, 1))
2749                         dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2750         }
2751
2752         /*
2753          * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe
2754          * Base Specification 2.0.  It is slightly different from the format
2755          * specified there due to historic reasons, and we can't change it now.
2756          */
2757         off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2758                         "nqn.2014.08.org.nvmexpress:%04x%04x",
2759                         le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2760         memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2761         off += sizeof(id->sn);
2762         memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2763         off += sizeof(id->mn);
2764         memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2765 }
2766
2767 static void nvme_release_subsystem(struct device *dev)
2768 {
2769         struct nvme_subsystem *subsys =
2770                 container_of(dev, struct nvme_subsystem, dev);
2771
2772         if (subsys->instance >= 0)
2773                 ida_free(&nvme_instance_ida, subsys->instance);
2774         kfree(subsys);
2775 }
2776
2777 static void nvme_destroy_subsystem(struct kref *ref)
2778 {
2779         struct nvme_subsystem *subsys =
2780                         container_of(ref, struct nvme_subsystem, ref);
2781
2782         mutex_lock(&nvme_subsystems_lock);
2783         list_del(&subsys->entry);
2784         mutex_unlock(&nvme_subsystems_lock);
2785
2786         ida_destroy(&subsys->ns_ida);
2787         device_del(&subsys->dev);
2788         put_device(&subsys->dev);
2789 }
2790
2791 static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2792 {
2793         kref_put(&subsys->ref, nvme_destroy_subsystem);
2794 }
2795
2796 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2797 {
2798         struct nvme_subsystem *subsys;
2799
2800         lockdep_assert_held(&nvme_subsystems_lock);
2801
2802         /*
2803          * Fail matches for discovery subsystems. This results
2804          * in each discovery controller bound to a unique subsystem.
2805          * This avoids issues with validating controller values
2806          * that can only be true when there is a single unique subsystem.
2807          * There may be multiple and completely independent entities
2808          * that provide discovery controllers.
2809          */
2810         if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
2811                 return NULL;
2812
2813         list_for_each_entry(subsys, &nvme_subsystems, entry) {
2814                 if (strcmp(subsys->subnqn, subsysnqn))
2815                         continue;
2816                 if (!kref_get_unless_zero(&subsys->ref))
2817                         continue;
2818                 return subsys;
2819         }
2820
2821         return NULL;
2822 }
2823
2824 static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
2825 {
2826         return ctrl->opts && ctrl->opts->discovery_nqn;
2827 }
2828
2829 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
2830                 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2831 {
2832         struct nvme_ctrl *tmp;
2833
2834         lockdep_assert_held(&nvme_subsystems_lock);
2835
2836         list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2837                 if (nvme_state_terminal(tmp))
2838                         continue;
2839
2840                 if (tmp->cntlid == ctrl->cntlid) {
2841                         dev_err(ctrl->device,
2842                                 "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
2843                                 ctrl->cntlid, dev_name(tmp->device),
2844                                 subsys->subnqn);
2845                         return false;
2846                 }
2847
2848                 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2849                     nvme_discovery_ctrl(ctrl))
2850                         continue;
2851
2852                 dev_err(ctrl->device,
2853                         "Subsystem does not support multiple controllers\n");
2854                 return false;
2855         }
2856
2857         return true;
2858 }
2859
2860 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2861 {
2862         struct nvme_subsystem *subsys, *found;
2863         int ret;
2864
2865         subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2866         if (!subsys)
2867                 return -ENOMEM;
2868
2869         subsys->instance = -1;
2870         mutex_init(&subsys->lock);
2871         kref_init(&subsys->ref);
2872         INIT_LIST_HEAD(&subsys->ctrls);
2873         INIT_LIST_HEAD(&subsys->nsheads);
2874         nvme_init_subnqn(subsys, ctrl, id);
2875         memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2876         memcpy(subsys->model, id->mn, sizeof(subsys->model));
2877         subsys->vendor_id = le16_to_cpu(id->vid);
2878         subsys->cmic = id->cmic;
2879
2880         /* Versions prior to 1.4 don't necessarily report a valid type */
2881         if (id->cntrltype == NVME_CTRL_DISC ||
2882             !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
2883                 subsys->subtype = NVME_NQN_DISC;
2884         else
2885                 subsys->subtype = NVME_NQN_NVME;
2886
2887         if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
2888                 dev_err(ctrl->device,
2889                         "Subsystem %s is not a discovery controller",
2890                         subsys->subnqn);
2891                 kfree(subsys);
2892                 return -EINVAL;
2893         }
2894         subsys->awupf = le16_to_cpu(id->awupf);
2895         nvme_mpath_default_iopolicy(subsys);
2896
2897         subsys->dev.class = &nvme_subsys_class;
2898         subsys->dev.release = nvme_release_subsystem;
2899         subsys->dev.groups = nvme_subsys_attrs_groups;
2900         dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
2901         device_initialize(&subsys->dev);
2902
2903         mutex_lock(&nvme_subsystems_lock);
2904         found = __nvme_find_get_subsystem(subsys->subnqn);
2905         if (found) {
2906                 put_device(&subsys->dev);
2907                 subsys = found;
2908
2909                 if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2910                         ret = -EINVAL;
2911                         goto out_put_subsystem;
2912                 }
2913         } else {
2914                 ret = device_add(&subsys->dev);
2915                 if (ret) {
2916                         dev_err(ctrl->device,
2917                                 "failed to register subsystem device.\n");
2918                         put_device(&subsys->dev);
2919                         goto out_unlock;
2920                 }
2921                 ida_init(&subsys->ns_ida);
2922                 list_add_tail(&subsys->entry, &nvme_subsystems);
2923         }
2924
2925         ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2926                                 dev_name(ctrl->device));
2927         if (ret) {
2928                 dev_err(ctrl->device,
2929                         "failed to create sysfs link from subsystem.\n");
2930                 goto out_put_subsystem;
2931         }
2932
2933         if (!found)
2934                 subsys->instance = ctrl->instance;
2935         ctrl->subsys = subsys;
2936         list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2937         mutex_unlock(&nvme_subsystems_lock);
2938         return 0;
2939
2940 out_put_subsystem:
2941         nvme_put_subsystem(subsys);
2942 out_unlock:
2943         mutex_unlock(&nvme_subsystems_lock);
2944         return ret;
2945 }
2946
2947 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
2948                 void *log, size_t size, u64 offset)
2949 {
2950         struct nvme_command c = { };
2951         u32 dwlen = nvme_bytes_to_numd(size);
2952
2953         c.get_log_page.opcode = nvme_admin_get_log_page;
2954         c.get_log_page.nsid = cpu_to_le32(nsid);
2955         c.get_log_page.lid = log_page;
2956         c.get_log_page.lsp = lsp;
2957         c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2958         c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2959         c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2960         c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2961         c.get_log_page.csi = csi;
2962
2963         return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2964 }
2965
2966 static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
2967                                 struct nvme_effects_log **log)
2968 {
2969         struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
2970         int ret;
2971
2972         if (cel)
2973                 goto out;
2974
2975         cel = kzalloc(sizeof(*cel), GFP_KERNEL);
2976         if (!cel)
2977                 return -ENOMEM;
2978
2979         ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
2980                         cel, sizeof(*cel), 0);
2981         if (ret) {
2982                 kfree(cel);
2983                 return ret;
2984         }
2985
2986         xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
2987 out:
2988         *log = cel;
2989         return 0;
2990 }
2991
2992 static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
2993 {
2994         u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
2995
2996         if (check_shl_overflow(1U, units + page_shift - 9, &val))
2997                 return UINT_MAX;
2998         return val;
2999 }
3000
3001 static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
3002 {
3003         struct nvme_command c = { };
3004         struct nvme_id_ctrl_nvm *id;
3005         int ret;
3006
3007         /*
3008          * Even though NVMe spec explicitly states that MDTS is not applicable
3009          * to the write-zeroes, we are cautious and limit the size to the
3010          * controllers max_hw_sectors value, which is based on the MDTS field
3011          * and possibly other limiting factors.
3012          */
3013         if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
3014             !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
3015                 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
3016         else
3017                 ctrl->max_zeroes_sectors = 0;
3018
3019         if (ctrl->subsys->subtype != NVME_NQN_NVME ||
3020             nvme_ctrl_limited_cns(ctrl) ||
3021             test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
3022                 return 0;
3023
3024         id = kzalloc(sizeof(*id), GFP_KERNEL);
3025         if (!id)
3026                 return -ENOMEM;
3027
3028         c.identify.opcode = nvme_admin_identify;
3029         c.identify.cns = NVME_ID_CNS_CS_CTRL;
3030         c.identify.csi = NVME_CSI_NVM;
3031
3032         ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
3033         if (ret)
3034                 goto free_data;
3035
3036         ctrl->dmrl = id->dmrl;
3037         ctrl->dmrsl = le32_to_cpu(id->dmrsl);
3038         if (id->wzsl)
3039                 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
3040
3041 free_data:
3042         if (ret > 0)
3043                 set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags);
3044         kfree(id);
3045         return ret;
3046 }
3047
3048 static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
3049 {
3050         struct nvme_effects_log *log = ctrl->effects;
3051
3052         log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
3053                                                 NVME_CMD_EFFECTS_NCC |
3054                                                 NVME_CMD_EFFECTS_CSE_MASK);
3055         log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
3056                                                 NVME_CMD_EFFECTS_CSE_MASK);
3057
3058         /*
3059          * The spec says the result of a security receive command depends on
3060          * the previous security send command. As such, many vendors log this
3061          * command as one to submitted only when no other commands to the same
3062          * namespace are outstanding. The intention is to tell the host to
3063          * prevent mixing security send and receive.
3064          *
3065          * This driver can only enforce such exclusive access against IO
3066          * queues, though. We are not readily able to enforce such a rule for
3067          * two commands to the admin queue, which is the only queue that
3068          * matters for this command.
3069          *
3070          * Rather than blindly freezing the IO queues for this effect that
3071          * doesn't even apply to IO, mask it off.
3072          */
3073         log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK);
3074
3075         log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3076         log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3077         log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3078 }
3079
3080 static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
3081 {
3082         int ret = 0;
3083
3084         if (ctrl->effects)
3085                 return 0;
3086
3087         if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
3088                 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3089                 if (ret < 0)
3090                         return ret;
3091         }
3092
3093         if (!ctrl->effects) {
3094                 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
3095                 if (!ctrl->effects)
3096                         return -ENOMEM;
3097                 xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL);
3098         }
3099
3100         nvme_init_known_nvm_effects(ctrl);
3101         return 0;
3102 }
3103
3104 static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
3105 {
3106         /*
3107          * In fabrics we need to verify the cntlid matches the
3108          * admin connect
3109          */
3110         if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3111                 dev_err(ctrl->device,
3112                         "Mismatching cntlid: Connect %u vs Identify %u, rejecting\n",
3113                         ctrl->cntlid, le16_to_cpu(id->cntlid));
3114                 return -EINVAL;
3115         }
3116
3117         if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
3118                 dev_err(ctrl->device,
3119                         "keep-alive support is mandatory for fabrics\n");
3120                 return -EINVAL;
3121         }
3122
3123         if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) {
3124                 dev_err(ctrl->device,
3125                         "I/O queue command capsule supported size %d < 4\n",
3126                         ctrl->ioccsz);
3127                 return -EINVAL;
3128         }
3129
3130         if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) {
3131                 dev_err(ctrl->device,
3132                         "I/O queue response capsule supported size %d < 1\n",
3133                         ctrl->iorcsz);
3134                 return -EINVAL;
3135         }
3136
3137         if (!ctrl->maxcmd) {
3138                 dev_err(ctrl->device, "Maximum outstanding commands is 0\n");
3139                 return -EINVAL;
3140         }
3141
3142         return 0;
3143 }
3144
3145 static int nvme_init_identify(struct nvme_ctrl *ctrl)
3146 {
3147         struct queue_limits lim;
3148         struct nvme_id_ctrl *id;
3149         u32 max_hw_sectors;
3150         bool prev_apst_enabled;
3151         int ret;
3152
3153         ret = nvme_identify_ctrl(ctrl, &id);
3154         if (ret) {
3155                 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
3156                 return -EIO;
3157         }
3158
3159         if (!(ctrl->ops->flags & NVME_F_FABRICS))
3160                 ctrl->cntlid = le16_to_cpu(id->cntlid);
3161
3162         if (!ctrl->identified) {
3163                 unsigned int i;
3164
3165                 /*
3166                  * Check for quirks.  Quirk can depend on firmware version,
3167                  * so, in principle, the set of quirks present can change
3168                  * across a reset.  As a possible future enhancement, we
3169                  * could re-scan for quirks every time we reinitialize
3170                  * the device, but we'd have to make sure that the driver
3171                  * behaves intelligently if the quirks change.
3172                  */
3173                 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
3174                         if (quirk_matches(id, &core_quirks[i]))
3175                                 ctrl->quirks |= core_quirks[i].quirks;
3176                 }
3177
3178                 ret = nvme_init_subsystem(ctrl, id);
3179                 if (ret)
3180                         goto out_free;
3181
3182                 ret = nvme_init_effects(ctrl, id);
3183                 if (ret)
3184                         goto out_free;
3185         }
3186         memcpy(ctrl->subsys->firmware_rev, id->fr,
3187                sizeof(ctrl->subsys->firmware_rev));
3188
3189         if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
3190                 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
3191                 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
3192         }
3193
3194         ctrl->crdt[0] = le16_to_cpu(id->crdt1);
3195         ctrl->crdt[1] = le16_to_cpu(id->crdt2);
3196         ctrl->crdt[2] = le16_to_cpu(id->crdt3);
3197
3198         ctrl->oacs = le16_to_cpu(id->oacs);
3199         ctrl->oncs = le16_to_cpu(id->oncs);
3200         ctrl->mtfa = le16_to_cpu(id->mtfa);
3201         ctrl->oaes = le32_to_cpu(id->oaes);
3202         ctrl->wctemp = le16_to_cpu(id->wctemp);
3203         ctrl->cctemp = le16_to_cpu(id->cctemp);
3204
3205         atomic_set(&ctrl->abort_limit, id->acl + 1);
3206         ctrl->vwc = id->vwc;
3207         if (id->mdts)
3208                 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
3209         else
3210                 max_hw_sectors = UINT_MAX;
3211         ctrl->max_hw_sectors =
3212                 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
3213
3214         lim = queue_limits_start_update(ctrl->admin_q);
3215         nvme_set_ctrl_limits(ctrl, &lim);
3216         ret = queue_limits_commit_update(ctrl->admin_q, &lim);
3217         if (ret)
3218                 goto out_free;
3219
3220         ctrl->sgls = le32_to_cpu(id->sgls);
3221         ctrl->kas = le16_to_cpu(id->kas);
3222         ctrl->max_namespaces = le32_to_cpu(id->mnan);
3223         ctrl->ctratt = le32_to_cpu(id->ctratt);
3224
3225         ctrl->cntrltype = id->cntrltype;
3226         ctrl->dctype = id->dctype;
3227
3228         if (id->rtd3e) {
3229                 /* us -> s */
3230                 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
3231
3232                 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
3233                                                  shutdown_timeout, 60);
3234
3235                 if (ctrl->shutdown_timeout != shutdown_timeout)
3236                         dev_info(ctrl->device,
3237                                  "D3 entry latency set to %u seconds\n",
3238                                  ctrl->shutdown_timeout);
3239         } else
3240                 ctrl->shutdown_timeout = shutdown_timeout;
3241
3242         ctrl->npss = id->npss;
3243         ctrl->apsta = id->apsta;
3244         prev_apst_enabled = ctrl->apst_enabled;
3245         if (ctrl->quirks & NVME_QUIRK_NO_APST) {
3246                 if (force_apst && id->apsta) {
3247                         dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3248                         ctrl->apst_enabled = true;
3249                 } else {
3250                         ctrl->apst_enabled = false;
3251                 }
3252         } else {
3253                 ctrl->apst_enabled = id->apsta;
3254         }
3255         memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
3256
3257         if (ctrl->ops->flags & NVME_F_FABRICS) {
3258                 ctrl->icdoff = le16_to_cpu(id->icdoff);
3259                 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
3260                 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
3261                 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
3262
3263                 ret = nvme_check_ctrl_fabric_info(ctrl, id);
3264                 if (ret)
3265                         goto out_free;
3266         } else {
3267                 ctrl->hmpre = le32_to_cpu(id->hmpre);
3268                 ctrl->hmmin = le32_to_cpu(id->hmmin);
3269                 ctrl->hmminds = le32_to_cpu(id->hmminds);
3270                 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3271         }
3272
3273         ret = nvme_mpath_init_identify(ctrl, id);
3274         if (ret < 0)
3275                 goto out_free;
3276
3277         if (ctrl->apst_enabled && !prev_apst_enabled)
3278                 dev_pm_qos_expose_latency_tolerance(ctrl->device);
3279         else if (!ctrl->apst_enabled && prev_apst_enabled)
3280                 dev_pm_qos_hide_latency_tolerance(ctrl->device);
3281
3282 out_free:
3283         kfree(id);
3284         return ret;
3285 }
3286
3287 /*
3288  * Initialize the cached copies of the Identify data and various controller
3289  * register in our nvme_ctrl structure.  This should be called as soon as
3290  * the admin queue is fully up and running.
3291  */
3292 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
3293 {
3294         int ret;
3295
3296         ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
3297         if (ret) {
3298                 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
3299                 return ret;
3300         }
3301
3302         ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
3303
3304         if (ctrl->vs >= NVME_VS(1, 1, 0))
3305                 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
3306
3307         ret = nvme_init_identify(ctrl);
3308         if (ret)
3309                 return ret;
3310
3311         ret = nvme_configure_apst(ctrl);
3312         if (ret < 0)
3313                 return ret;
3314
3315         ret = nvme_configure_timestamp(ctrl);
3316         if (ret < 0)
3317                 return ret;
3318
3319         ret = nvme_configure_host_options(ctrl);
3320         if (ret < 0)
3321                 return ret;
3322
3323         nvme_configure_opal(ctrl, was_suspended);
3324
3325         if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
3326                 /*
3327                  * Do not return errors unless we are in a controller reset,
3328                  * the controller works perfectly fine without hwmon.
3329                  */
3330                 ret = nvme_hwmon_init(ctrl);
3331                 if (ret == -EINTR)
3332                         return ret;
3333         }
3334
3335         clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
3336         ctrl->identified = true;
3337
3338         nvme_start_keep_alive(ctrl);
3339
3340         return 0;
3341 }
3342 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
3343
3344 static int nvme_dev_open(struct inode *inode, struct file *file)
3345 {
3346         struct nvme_ctrl *ctrl =
3347                 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3348
3349         switch (nvme_ctrl_state(ctrl)) {
3350         case NVME_CTRL_LIVE:
3351                 break;
3352         default:
3353                 return -EWOULDBLOCK;
3354         }
3355
3356         nvme_get_ctrl(ctrl);
3357         if (!try_module_get(ctrl->ops->module)) {
3358                 nvme_put_ctrl(ctrl);
3359                 return -EINVAL;
3360         }
3361
3362         file->private_data = ctrl;
3363         return 0;
3364 }
3365
3366 static int nvme_dev_release(struct inode *inode, struct file *file)
3367 {
3368         struct nvme_ctrl *ctrl =
3369                 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3370
3371         module_put(ctrl->ops->module);
3372         nvme_put_ctrl(ctrl);
3373         return 0;
3374 }
3375
3376 static const struct file_operations nvme_dev_fops = {
3377         .owner          = THIS_MODULE,
3378         .open           = nvme_dev_open,
3379         .release        = nvme_dev_release,
3380         .unlocked_ioctl = nvme_dev_ioctl,
3381         .compat_ioctl   = compat_ptr_ioctl,
3382         .uring_cmd      = nvme_dev_uring_cmd,
3383 };
3384
3385 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
3386                 unsigned nsid)
3387 {
3388         struct nvme_ns_head *h;
3389
3390         lockdep_assert_held(&ctrl->subsys->lock);
3391
3392         list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
3393                 /*
3394                  * Private namespaces can share NSIDs under some conditions.
3395                  * In that case we can't use the same ns_head for namespaces
3396                  * with the same NSID.
3397                  */
3398                 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
3399                         continue;
3400                 if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
3401                         return h;
3402         }
3403
3404         return NULL;
3405 }
3406
3407 static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
3408                 struct nvme_ns_ids *ids)
3409 {
3410         bool has_uuid = !uuid_is_null(&ids->uuid);
3411         bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
3412         bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
3413         struct nvme_ns_head *h;
3414
3415         lockdep_assert_held(&subsys->lock);
3416
3417         list_for_each_entry(h, &subsys->nsheads, entry) {
3418                 if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
3419                         return -EINVAL;
3420                 if (has_nguid &&
3421                     memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
3422                         return -EINVAL;
3423                 if (has_eui64 &&
3424                     memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
3425                         return -EINVAL;
3426         }
3427
3428         return 0;
3429 }
3430
3431 static void nvme_cdev_rel(struct device *dev)
3432 {
3433         ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
3434 }
3435
3436 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
3437 {
3438         cdev_device_del(cdev, cdev_device);
3439         put_device(cdev_device);
3440 }
3441
3442 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
3443                 const struct file_operations *fops, struct module *owner)
3444 {
3445         int minor, ret;
3446
3447         minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL);
3448         if (minor < 0)
3449                 return minor;
3450         cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
3451         cdev_device->class = &nvme_ns_chr_class;
3452         cdev_device->release = nvme_cdev_rel;
3453         device_initialize(cdev_device);
3454         cdev_init(cdev, fops);
3455         cdev->owner = owner;
3456         ret = cdev_device_add(cdev, cdev_device);
3457         if (ret)
3458                 put_device(cdev_device);
3459
3460         return ret;
3461 }
3462
3463 static int nvme_ns_chr_open(struct inode *inode, struct file *file)
3464 {
3465         return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
3466 }
3467
3468 static int nvme_ns_chr_release(struct inode *inode, struct file *file)
3469 {
3470         nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
3471         return 0;
3472 }
3473
3474 static const struct file_operations nvme_ns_chr_fops = {
3475         .owner          = THIS_MODULE,
3476         .open           = nvme_ns_chr_open,
3477         .release        = nvme_ns_chr_release,
3478         .unlocked_ioctl = nvme_ns_chr_ioctl,
3479         .compat_ioctl   = compat_ptr_ioctl,
3480         .uring_cmd      = nvme_ns_chr_uring_cmd,
3481         .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
3482 };
3483
3484 static int nvme_add_ns_cdev(struct nvme_ns *ns)
3485 {
3486         int ret;
3487
3488         ns->cdev_device.parent = ns->ctrl->device;
3489         ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
3490                            ns->ctrl->instance, ns->head->instance);
3491         if (ret)
3492                 return ret;
3493
3494         return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
3495                              ns->ctrl->ops->module);
3496 }
3497
3498 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3499                 struct nvme_ns_info *info)
3500 {
3501         struct nvme_ns_head *head;
3502         size_t size = sizeof(*head);
3503         int ret = -ENOMEM;
3504
3505 #ifdef CONFIG_NVME_MULTIPATH
3506         size += num_possible_nodes() * sizeof(struct nvme_ns *);
3507 #endif
3508
3509         head = kzalloc(size, GFP_KERNEL);
3510         if (!head)
3511                 goto out;
3512         ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
3513         if (ret < 0)
3514                 goto out_free_head;
3515         head->instance = ret;
3516         INIT_LIST_HEAD(&head->list);
3517         ret = init_srcu_struct(&head->srcu);
3518         if (ret)
3519                 goto out_ida_remove;
3520         head->subsys = ctrl->subsys;
3521         head->ns_id = info->nsid;
3522         head->ids = info->ids;
3523         head->shared = info->is_shared;
3524         ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1);
3525         ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE);
3526         kref_init(&head->ref);
3527
3528         if (head->ids.csi) {
3529                 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
3530                 if (ret)
3531                         goto out_cleanup_srcu;
3532         } else
3533                 head->effects = ctrl->effects;
3534
3535         ret = nvme_mpath_alloc_disk(ctrl, head);
3536         if (ret)
3537                 goto out_cleanup_srcu;
3538
3539         list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3540
3541         kref_get(&ctrl->subsys->ref);
3542
3543         return head;
3544 out_cleanup_srcu:
3545         cleanup_srcu_struct(&head->srcu);
3546 out_ida_remove:
3547         ida_free(&ctrl->subsys->ns_ida, head->instance);
3548 out_free_head:
3549         kfree(head);
3550 out:
3551         if (ret > 0)
3552                 ret = blk_status_to_errno(nvme_error_status(ret));
3553         return ERR_PTR(ret);
3554 }
3555
3556 static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
3557                 struct nvme_ns_ids *ids)
3558 {
3559         struct nvme_subsystem *s;
3560         int ret = 0;
3561
3562         /*
3563          * Note that this check is racy as we try to avoid holding the global
3564          * lock over the whole ns_head creation.  But it is only intended as
3565          * a sanity check anyway.
3566          */
3567         mutex_lock(&nvme_subsystems_lock);
3568         list_for_each_entry(s, &nvme_subsystems, entry) {
3569                 if (s == this)
3570                         continue;
3571                 mutex_lock(&s->lock);
3572                 ret = nvme_subsys_check_duplicate_ids(s, ids);
3573                 mutex_unlock(&s->lock);
3574                 if (ret)
3575                         break;
3576         }
3577         mutex_unlock(&nvme_subsystems_lock);
3578
3579         return ret;
3580 }
3581
3582 static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
3583 {
3584         struct nvme_ctrl *ctrl = ns->ctrl;
3585         struct nvme_ns_head *head = NULL;
3586         int ret;
3587
3588         ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
3589         if (ret) {
3590                 /*
3591                  * We've found two different namespaces on two different
3592                  * subsystems that report the same ID.  This is pretty nasty
3593                  * for anything that actually requires unique device
3594                  * identification.  In the kernel we need this for multipathing,
3595                  * and in user space the /dev/disk/by-id/ links rely on it.
3596                  *
3597                  * If the device also claims to be multi-path capable back off
3598                  * here now and refuse the probe the second device as this is a
3599                  * recipe for data corruption.  If not this is probably a
3600                  * cheap consumer device if on the PCIe bus, so let the user
3601                  * proceed and use the shiny toy, but warn that with changing
3602                  * probing order (which due to our async probing could just be
3603                  * device taking longer to startup) the other device could show
3604                  * up at any time.
3605                  */
3606                 nvme_print_device_info(ctrl);
3607                 if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
3608                     ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
3609                      info->is_shared)) {
3610                         dev_err(ctrl->device,
3611                                 "ignoring nsid %d because of duplicate IDs\n",
3612                                 info->nsid);
3613                         return ret;
3614                 }
3615
3616                 dev_err(ctrl->device,
3617                         "clearing duplicate IDs for nsid %d\n", info->nsid);
3618                 dev_err(ctrl->device,
3619                         "use of /dev/disk/by-id/ may cause data corruption\n");
3620                 memset(&info->ids.nguid, 0, sizeof(info->ids.nguid));
3621                 memset(&info->ids.uuid, 0, sizeof(info->ids.uuid));
3622                 memset(&info->ids.eui64, 0, sizeof(info->ids.eui64));
3623                 ctrl->quirks |= NVME_QUIRK_BOGUS_NID;
3624         }
3625
3626         mutex_lock(&ctrl->subsys->lock);
3627         head = nvme_find_ns_head(ctrl, info->nsid);
3628         if (!head) {
3629                 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
3630                 if (ret) {
3631                         dev_err(ctrl->device,
3632                                 "duplicate IDs in subsystem for nsid %d\n",
3633                                 info->nsid);
3634                         goto out_unlock;
3635                 }
3636                 head = nvme_alloc_ns_head(ctrl, info);
3637                 if (IS_ERR(head)) {
3638                         ret = PTR_ERR(head);
3639                         goto out_unlock;
3640                 }
3641         } else {
3642                 ret = -EINVAL;
3643                 if (!info->is_shared || !head->shared) {
3644                         dev_err(ctrl->device,
3645                                 "Duplicate unshared namespace %d\n",
3646                                 info->nsid);
3647                         goto out_put_ns_head;
3648                 }
3649                 if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
3650                         dev_err(ctrl->device,
3651                                 "IDs don't match for shared namespace %d\n",
3652                                         info->nsid);
3653                         goto out_put_ns_head;
3654                 }
3655
3656                 if (!multipath) {
3657                         dev_warn(ctrl->device,
3658                                 "Found shared namespace %d, but multipathing not supported.\n",
3659                                 info->nsid);
3660                         dev_warn_once(ctrl->device,
3661                                 "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
3662                 }
3663         }
3664
3665         list_add_tail_rcu(&ns->siblings, &head->list);
3666         ns->head = head;
3667         mutex_unlock(&ctrl->subsys->lock);
3668         return 0;
3669
3670 out_put_ns_head:
3671         nvme_put_ns_head(head);
3672 out_unlock:
3673         mutex_unlock(&ctrl->subsys->lock);
3674         return ret;
3675 }
3676
3677 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3678 {
3679         struct nvme_ns *ns, *ret = NULL;
3680
3681         down_read(&ctrl->namespaces_rwsem);
3682         list_for_each_entry(ns, &ctrl->namespaces, list) {
3683                 if (ns->head->ns_id == nsid) {
3684                         if (!nvme_get_ns(ns))
3685                                 continue;
3686                         ret = ns;
3687                         break;
3688                 }
3689                 if (ns->head->ns_id > nsid)
3690                         break;
3691         }
3692         up_read(&ctrl->namespaces_rwsem);
3693         return ret;
3694 }
3695 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
3696
3697 /*
3698  * Add the namespace to the controller list while keeping the list ordered.
3699  */
3700 static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
3701 {
3702         struct nvme_ns *tmp;
3703
3704         list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
3705                 if (tmp->head->ns_id < ns->head->ns_id) {
3706                         list_add(&ns->list, &tmp->list);
3707                         return;
3708                 }
3709         }
3710         list_add(&ns->list, &ns->ctrl->namespaces);
3711 }
3712
3713 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
3714 {
3715         struct nvme_ns *ns;
3716         struct gendisk *disk;
3717         int node = ctrl->numa_node;
3718
3719         ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3720         if (!ns)
3721                 return;
3722
3723         disk = blk_mq_alloc_disk(ctrl->tagset, NULL, ns);
3724         if (IS_ERR(disk))
3725                 goto out_free_ns;
3726         disk->fops = &nvme_bdev_ops;
3727         disk->private_data = ns;
3728
3729         ns->disk = disk;
3730         ns->queue = disk->queue;
3731
3732         if (ctrl->opts && ctrl->opts->data_digest)
3733                 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
3734
3735         blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3736         if (ctrl->ops->supports_pci_p2pdma &&
3737             ctrl->ops->supports_pci_p2pdma(ctrl))
3738                 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3739
3740         ns->ctrl = ctrl;
3741         kref_init(&ns->kref);
3742
3743         if (nvme_init_ns_head(ns, info))
3744                 goto out_cleanup_disk;
3745
3746         /*
3747          * If multipathing is enabled, the device name for all disks and not
3748          * just those that represent shared namespaces needs to be based on the
3749          * subsystem instance.  Using the controller instance for private
3750          * namespaces could lead to naming collisions between shared and private
3751          * namespaces if they don't use a common numbering scheme.
3752          *
3753          * If multipathing is not enabled, disk names must use the controller
3754          * instance as shared namespaces will show up as multiple block
3755          * devices.
3756          */
3757         if (nvme_ns_head_multipath(ns->head)) {
3758                 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
3759                         ctrl->instance, ns->head->instance);
3760                 disk->flags |= GENHD_FL_HIDDEN;
3761         } else if (multipath) {
3762                 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
3763                         ns->head->instance);
3764         } else {
3765                 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
3766                         ns->head->instance);
3767         }
3768
3769         if (nvme_update_ns_info(ns, info))
3770                 goto out_unlink_ns;
3771
3772         down_write(&ctrl->namespaces_rwsem);
3773         /*
3774          * Ensure that no namespaces are added to the ctrl list after the queues
3775          * are frozen, thereby avoiding a deadlock between scan and reset.
3776          */
3777         if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
3778                 up_write(&ctrl->namespaces_rwsem);
3779                 goto out_unlink_ns;
3780         }
3781         nvme_ns_add_to_ctrl_list(ns);
3782         up_write(&ctrl->namespaces_rwsem);
3783         nvme_get_ctrl(ctrl);
3784
3785         if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups))
3786                 goto out_cleanup_ns_from_list;
3787
3788         if (!nvme_ns_head_multipath(ns->head))
3789                 nvme_add_ns_cdev(ns);
3790
3791         nvme_mpath_add_disk(ns, info->anagrpid);
3792         nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3793
3794         /*
3795          * Set ns->disk->device->driver_data to ns so we can access
3796          * ns->head->passthru_err_log_enabled in
3797          * nvme_io_passthru_err_log_enabled_[store | show]().
3798          */
3799         dev_set_drvdata(disk_to_dev(ns->disk), ns);
3800
3801         return;
3802
3803  out_cleanup_ns_from_list:
3804         nvme_put_ctrl(ctrl);
3805         down_write(&ctrl->namespaces_rwsem);
3806         list_del_init(&ns->list);
3807         up_write(&ctrl->namespaces_rwsem);
3808  out_unlink_ns:
3809         mutex_lock(&ctrl->subsys->lock);
3810         list_del_rcu(&ns->siblings);
3811         if (list_empty(&ns->head->list))
3812                 list_del_init(&ns->head->entry);
3813         mutex_unlock(&ctrl->subsys->lock);
3814         nvme_put_ns_head(ns->head);
3815  out_cleanup_disk:
3816         put_disk(disk);
3817  out_free_ns:
3818         kfree(ns);
3819 }
3820
3821 static void nvme_ns_remove(struct nvme_ns *ns)
3822 {
3823         bool last_path = false;
3824
3825         if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3826                 return;
3827
3828         clear_bit(NVME_NS_READY, &ns->flags);
3829         set_capacity(ns->disk, 0);
3830         nvme_fault_inject_fini(&ns->fault_inject);
3831
3832         /*
3833          * Ensure that !NVME_NS_READY is seen by other threads to prevent
3834          * this ns going back into current_path.
3835          */
3836         synchronize_srcu(&ns->head->srcu);
3837
3838         /* wait for concurrent submissions */
3839         if (nvme_mpath_clear_current_path(ns))
3840                 synchronize_srcu(&ns->head->srcu);
3841
3842         mutex_lock(&ns->ctrl->subsys->lock);
3843         list_del_rcu(&ns->siblings);
3844         if (list_empty(&ns->head->list)) {
3845                 list_del_init(&ns->head->entry);
3846                 last_path = true;
3847         }
3848         mutex_unlock(&ns->ctrl->subsys->lock);
3849
3850         /* guarantee not available in head->list */
3851         synchronize_srcu(&ns->head->srcu);
3852
3853         if (!nvme_ns_head_multipath(ns->head))
3854                 nvme_cdev_del(&ns->cdev, &ns->cdev_device);
3855         del_gendisk(ns->disk);
3856
3857         down_write(&ns->ctrl->namespaces_rwsem);
3858         list_del_init(&ns->list);
3859         up_write(&ns->ctrl->namespaces_rwsem);
3860
3861         if (last_path)
3862                 nvme_mpath_shutdown_disk(ns->head);
3863         nvme_put_ns(ns);
3864 }
3865
3866 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
3867 {
3868         struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
3869
3870         if (ns) {
3871                 nvme_ns_remove(ns);
3872                 nvme_put_ns(ns);
3873         }
3874 }
3875
3876 static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
3877 {
3878         int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
3879
3880         if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
3881                 dev_err(ns->ctrl->device,
3882                         "identifiers changed for nsid %d\n", ns->head->ns_id);
3883                 goto out;
3884         }
3885
3886         ret = nvme_update_ns_info(ns, info);
3887 out:
3888         /*
3889          * Only remove the namespace if we got a fatal error back from the
3890          * device, otherwise ignore the error and just move on.
3891          *
3892          * TODO: we should probably schedule a delayed retry here.
3893          */
3894         if (ret > 0 && (ret & NVME_SC_DNR))
3895                 nvme_ns_remove(ns);
3896 }
3897
3898 static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3899 {
3900         struct nvme_ns_info info = { .nsid = nsid };
3901         struct nvme_ns *ns;
3902         int ret;
3903
3904         if (nvme_identify_ns_descs(ctrl, &info))
3905                 return;
3906
3907         if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
3908                 dev_warn(ctrl->device,
3909                         "command set not reported for nsid: %d\n", nsid);
3910                 return;
3911         }
3912
3913         /*
3914          * If available try to use the Command Set Idependent Identify Namespace
3915          * data structure to find all the generic information that is needed to
3916          * set up a namespace.  If not fall back to the legacy version.
3917          */
3918         if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
3919             (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS))
3920                 ret = nvme_ns_info_from_id_cs_indep(ctrl, &info);
3921         else
3922                 ret = nvme_ns_info_from_identify(ctrl, &info);
3923
3924         if (info.is_removed)
3925                 nvme_ns_remove_by_nsid(ctrl, nsid);
3926
3927         /*
3928          * Ignore the namespace if it is not ready. We will get an AEN once it
3929          * becomes ready and restart the scan.
3930          */
3931         if (ret || !info.is_ready)
3932                 return;
3933
3934         ns = nvme_find_get_ns(ctrl, nsid);
3935         if (ns) {
3936                 nvme_validate_ns(ns, &info);
3937                 nvme_put_ns(ns);
3938         } else {
3939                 nvme_alloc_ns(ctrl, &info);
3940         }
3941 }
3942
3943 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3944                                         unsigned nsid)
3945 {
3946         struct nvme_ns *ns, *next;
3947         LIST_HEAD(rm_list);
3948
3949         down_write(&ctrl->namespaces_rwsem);
3950         list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3951                 if (ns->head->ns_id > nsid)
3952                         list_move_tail(&ns->list, &rm_list);
3953         }
3954         up_write(&ctrl->namespaces_rwsem);
3955
3956         list_for_each_entry_safe(ns, next, &rm_list, list)
3957                 nvme_ns_remove(ns);
3958
3959 }
3960
3961 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
3962 {
3963         const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
3964         __le32 *ns_list;
3965         u32 prev = 0;
3966         int ret = 0, i;
3967
3968         ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
3969         if (!ns_list)
3970                 return -ENOMEM;
3971
3972         for (;;) {
3973                 struct nvme_command cmd = {
3974                         .identify.opcode        = nvme_admin_identify,
3975                         .identify.cns           = NVME_ID_CNS_NS_ACTIVE_LIST,
3976                         .identify.nsid          = cpu_to_le32(prev),
3977                 };
3978
3979                 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
3980                                             NVME_IDENTIFY_DATA_SIZE);
3981                 if (ret) {
3982                         dev_warn(ctrl->device,
3983                                 "Identify NS List failed (status=0x%x)\n", ret);
3984                         goto free;
3985                 }
3986
3987                 for (i = 0; i < nr_entries; i++) {
3988                         u32 nsid = le32_to_cpu(ns_list[i]);
3989
3990                         if (!nsid)      /* end of the list? */
3991                                 goto out;
3992                         nvme_scan_ns(ctrl, nsid);
3993                         while (++prev < nsid)
3994                                 nvme_ns_remove_by_nsid(ctrl, prev);
3995                 }
3996         }
3997  out:
3998         nvme_remove_invalid_namespaces(ctrl, prev);
3999  free:
4000         kfree(ns_list);
4001         return ret;
4002 }
4003
4004 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
4005 {
4006         struct nvme_id_ctrl *id;
4007         u32 nn, i;
4008
4009         if (nvme_identify_ctrl(ctrl, &id))
4010                 return;
4011         nn = le32_to_cpu(id->nn);
4012         kfree(id);
4013
4014         for (i = 1; i <= nn; i++)
4015                 nvme_scan_ns(ctrl, i);
4016
4017         nvme_remove_invalid_namespaces(ctrl, nn);
4018 }
4019
4020 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
4021 {
4022         size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
4023         __le32 *log;
4024         int error;
4025
4026         log = kzalloc(log_size, GFP_KERNEL);
4027         if (!log)
4028                 return;
4029
4030         /*
4031          * We need to read the log to clear the AEN, but we don't want to rely
4032          * on it for the changed namespace information as userspace could have
4033          * raced with us in reading the log page, which could cause us to miss
4034          * updates.
4035          */
4036         error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
4037                         NVME_CSI_NVM, log, log_size, 0);
4038         if (error)
4039                 dev_warn(ctrl->device,
4040                         "reading changed ns log failed: %d\n", error);
4041
4042         kfree(log);
4043 }
4044
4045 static void nvme_scan_work(struct work_struct *work)
4046 {
4047         struct nvme_ctrl *ctrl =
4048                 container_of(work, struct nvme_ctrl, scan_work);
4049         int ret;
4050
4051         /* No tagset on a live ctrl means IO queues could not created */
4052         if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
4053                 return;
4054
4055         /*
4056          * Identify controller limits can change at controller reset due to
4057          * new firmware download, even though it is not common we cannot ignore
4058          * such scenario. Controller's non-mdts limits are reported in the unit
4059          * of logical blocks that is dependent on the format of attached
4060          * namespace. Hence re-read the limits at the time of ns allocation.
4061          */
4062         ret = nvme_init_non_mdts_limits(ctrl);
4063         if (ret < 0) {
4064                 dev_warn(ctrl->device,
4065                         "reading non-mdts-limits failed: %d\n", ret);
4066                 return;
4067         }
4068
4069         if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
4070                 dev_info(ctrl->device, "rescanning namespaces.\n");
4071                 nvme_clear_changed_ns_log(ctrl);
4072         }
4073
4074         mutex_lock(&ctrl->scan_lock);
4075         if (nvme_ctrl_limited_cns(ctrl)) {
4076                 nvme_scan_ns_sequential(ctrl);
4077         } else {
4078                 /*
4079                  * Fall back to sequential scan if DNR is set to handle broken
4080                  * devices which should support Identify NS List (as per the VS
4081                  * they report) but don't actually support it.
4082                  */
4083                 ret = nvme_scan_ns_list(ctrl);
4084                 if (ret > 0 && ret & NVME_SC_DNR)
4085                         nvme_scan_ns_sequential(ctrl);
4086         }
4087         mutex_unlock(&ctrl->scan_lock);
4088 }
4089
4090 /*
4091  * This function iterates the namespace list unlocked to allow recovery from
4092  * controller failure. It is up to the caller to ensure the namespace list is
4093  * not modified by scan work while this function is executing.
4094  */
4095 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
4096 {
4097         struct nvme_ns *ns, *next;
4098         LIST_HEAD(ns_list);
4099
4100         /*
4101          * make sure to requeue I/O to all namespaces as these
4102          * might result from the scan itself and must complete
4103          * for the scan_work to make progress
4104          */
4105         nvme_mpath_clear_ctrl_paths(ctrl);
4106
4107         /*
4108          * Unquiesce io queues so any pending IO won't hang, especially
4109          * those submitted from scan work
4110          */
4111         nvme_unquiesce_io_queues(ctrl);
4112
4113         /* prevent racing with ns scanning */
4114         flush_work(&ctrl->scan_work);
4115
4116         /*
4117          * The dead states indicates the controller was not gracefully
4118          * disconnected. In that case, we won't be able to flush any data while
4119          * removing the namespaces' disks; fail all the queues now to avoid
4120          * potentially having to clean up the failed sync later.
4121          */
4122         if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD)
4123                 nvme_mark_namespaces_dead(ctrl);
4124
4125         /* this is a no-op when called from the controller reset handler */
4126         nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
4127
4128         down_write(&ctrl->namespaces_rwsem);
4129         list_splice_init(&ctrl->namespaces, &ns_list);
4130         up_write(&ctrl->namespaces_rwsem);
4131
4132         list_for_each_entry_safe(ns, next, &ns_list, list)
4133                 nvme_ns_remove(ns);
4134 }
4135 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
4136
4137 static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env)
4138 {
4139         const struct nvme_ctrl *ctrl =
4140                 container_of(dev, struct nvme_ctrl, ctrl_device);
4141         struct nvmf_ctrl_options *opts = ctrl->opts;
4142         int ret;
4143
4144         ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
4145         if (ret)
4146                 return ret;
4147
4148         if (opts) {
4149                 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
4150                 if (ret)
4151                         return ret;
4152
4153                 ret = add_uevent_var(env, "NVME_TRSVCID=%s",
4154                                 opts->trsvcid ?: "none");
4155                 if (ret)
4156                         return ret;
4157
4158                 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
4159                                 opts->host_traddr ?: "none");
4160                 if (ret)
4161                         return ret;
4162
4163                 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
4164                                 opts->host_iface ?: "none");
4165         }
4166         return ret;
4167 }
4168
4169 static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
4170 {
4171         char *envp[2] = { envdata, NULL };
4172
4173         kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4174 }
4175
4176 static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
4177 {
4178         char *envp[2] = { NULL, NULL };
4179         u32 aen_result = ctrl->aen_result;
4180
4181         ctrl->aen_result = 0;
4182         if (!aen_result)
4183                 return;
4184
4185         envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
4186         if (!envp[0])
4187                 return;
4188         kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4189         kfree(envp[0]);
4190 }
4191
4192 static void nvme_async_event_work(struct work_struct *work)
4193 {
4194         struct nvme_ctrl *ctrl =
4195                 container_of(work, struct nvme_ctrl, async_event_work);
4196
4197         nvme_aen_uevent(ctrl);
4198
4199         /*
4200          * The transport drivers must guarantee AER submission here is safe by
4201          * flushing ctrl async_event_work after changing the controller state
4202          * from LIVE and before freeing the admin queue.
4203         */
4204         if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
4205                 ctrl->ops->submit_async_event(ctrl);
4206 }
4207
4208 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
4209 {
4210
4211         u32 csts;
4212
4213         if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
4214                 return false;
4215
4216         if (csts == ~0)
4217                 return false;
4218
4219         return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
4220 }
4221
4222 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
4223 {
4224         struct nvme_fw_slot_info_log *log;
4225         u8 next_fw_slot, cur_fw_slot;
4226
4227         log = kmalloc(sizeof(*log), GFP_KERNEL);
4228         if (!log)
4229                 return;
4230
4231         if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
4232                          log, sizeof(*log), 0)) {
4233                 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4234                 goto out_free_log;
4235         }
4236
4237         cur_fw_slot = log->afi & 0x7;
4238         next_fw_slot = (log->afi & 0x70) >> 4;
4239         if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) {
4240                 dev_info(ctrl->device,
4241                          "Firmware is activated after next Controller Level Reset\n");
4242                 goto out_free_log;
4243         }
4244
4245         memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1],
4246                 sizeof(ctrl->subsys->firmware_rev));
4247
4248 out_free_log:
4249         kfree(log);
4250 }
4251
4252 static void nvme_fw_act_work(struct work_struct *work)
4253 {
4254         struct nvme_ctrl *ctrl = container_of(work,
4255                                 struct nvme_ctrl, fw_act_work);
4256         unsigned long fw_act_timeout;
4257
4258         nvme_auth_stop(ctrl);
4259
4260         if (ctrl->mtfa)
4261                 fw_act_timeout = jiffies +
4262                                 msecs_to_jiffies(ctrl->mtfa * 100);
4263         else
4264                 fw_act_timeout = jiffies +
4265                                 msecs_to_jiffies(admin_timeout * 1000);
4266
4267         nvme_quiesce_io_queues(ctrl);
4268         while (nvme_ctrl_pp_status(ctrl)) {
4269                 if (time_after(jiffies, fw_act_timeout)) {
4270                         dev_warn(ctrl->device,
4271                                 "Fw activation timeout, reset controller\n");
4272                         nvme_try_sched_reset(ctrl);
4273                         return;
4274                 }
4275                 msleep(100);
4276         }
4277
4278         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4279                 return;
4280
4281         nvme_unquiesce_io_queues(ctrl);
4282         /* read FW slot information to clear the AER */
4283         nvme_get_fw_slot_info(ctrl);
4284
4285         queue_work(nvme_wq, &ctrl->async_event_work);
4286 }
4287
4288 static u32 nvme_aer_type(u32 result)
4289 {
4290         return result & 0x7;
4291 }
4292
4293 static u32 nvme_aer_subtype(u32 result)
4294 {
4295         return (result & 0xff00) >> 8;
4296 }
4297
4298 static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
4299 {
4300         u32 aer_notice_type = nvme_aer_subtype(result);
4301         bool requeue = true;
4302
4303         switch (aer_notice_type) {
4304         case NVME_AER_NOTICE_NS_CHANGED:
4305                 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4306                 nvme_queue_scan(ctrl);
4307                 break;
4308         case NVME_AER_NOTICE_FW_ACT_STARTING:
4309                 /*
4310                  * We are (ab)using the RESETTING state to prevent subsequent
4311                  * recovery actions from interfering with the controller's
4312                  * firmware activation.
4313                  */
4314                 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
4315                         requeue = false;
4316                         queue_work(nvme_wq, &ctrl->fw_act_work);
4317                 }
4318                 break;
4319 #ifdef CONFIG_NVME_MULTIPATH
4320         case NVME_AER_NOTICE_ANA:
4321                 if (!ctrl->ana_log_buf)
4322                         break;
4323                 queue_work(nvme_wq, &ctrl->ana_work);
4324                 break;
4325 #endif
4326         case NVME_AER_NOTICE_DISC_CHANGED:
4327                 ctrl->aen_result = result;
4328                 break;
4329         default:
4330                 dev_warn(ctrl->device, "async event result %08x\n", result);
4331         }
4332         return requeue;
4333 }
4334
4335 static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
4336 {
4337         dev_warn(ctrl->device, "resetting controller due to AER\n");
4338         nvme_reset_ctrl(ctrl);
4339 }
4340
4341 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4342                 volatile union nvme_result *res)
4343 {
4344         u32 result = le32_to_cpu(res->u32);
4345         u32 aer_type = nvme_aer_type(result);
4346         u32 aer_subtype = nvme_aer_subtype(result);
4347         bool requeue = true;
4348
4349         if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4350                 return;
4351
4352         trace_nvme_async_event(ctrl, result);
4353         switch (aer_type) {
4354         case NVME_AER_NOTICE:
4355                 requeue = nvme_handle_aen_notice(ctrl, result);
4356                 break;
4357         case NVME_AER_ERROR:
4358                 /*
4359                  * For a persistent internal error, don't run async_event_work
4360                  * to submit a new AER. The controller reset will do it.
4361                  */
4362                 if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
4363                         nvme_handle_aer_persistent_error(ctrl);
4364                         return;
4365                 }
4366                 fallthrough;
4367         case NVME_AER_SMART:
4368         case NVME_AER_CSS:
4369         case NVME_AER_VS:
4370                 ctrl->aen_result = result;
4371                 break;
4372         default:
4373                 break;
4374         }
4375
4376         if (requeue)
4377                 queue_work(nvme_wq, &ctrl->async_event_work);
4378 }
4379 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4380
4381 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
4382                 const struct blk_mq_ops *ops, unsigned int cmd_size)
4383 {
4384         struct queue_limits lim = {};
4385         int ret;
4386
4387         memset(set, 0, sizeof(*set));
4388         set->ops = ops;
4389         set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
4390         if (ctrl->ops->flags & NVME_F_FABRICS)
4391                 /* Reserved for fabric connect and keep alive */
4392                 set->reserved_tags = 2;
4393         set->numa_node = ctrl->numa_node;
4394         set->flags = BLK_MQ_F_NO_SCHED;
4395         if (ctrl->ops->flags & NVME_F_BLOCKING)
4396                 set->flags |= BLK_MQ_F_BLOCKING;
4397         set->cmd_size = cmd_size;
4398         set->driver_data = ctrl;
4399         set->nr_hw_queues = 1;
4400         set->timeout = NVME_ADMIN_TIMEOUT;
4401         ret = blk_mq_alloc_tag_set(set);
4402         if (ret)
4403                 return ret;
4404
4405         ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL);
4406         if (IS_ERR(ctrl->admin_q)) {
4407                 ret = PTR_ERR(ctrl->admin_q);
4408                 goto out_free_tagset;
4409         }
4410
4411         if (ctrl->ops->flags & NVME_F_FABRICS) {
4412                 ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL);
4413                 if (IS_ERR(ctrl->fabrics_q)) {
4414                         ret = PTR_ERR(ctrl->fabrics_q);
4415                         goto out_cleanup_admin_q;
4416                 }
4417         }
4418
4419         ctrl->admin_tagset = set;
4420         return 0;
4421
4422 out_cleanup_admin_q:
4423         blk_mq_destroy_queue(ctrl->admin_q);
4424         blk_put_queue(ctrl->admin_q);
4425 out_free_tagset:
4426         blk_mq_free_tag_set(set);
4427         ctrl->admin_q = NULL;
4428         ctrl->fabrics_q = NULL;
4429         return ret;
4430 }
4431 EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
4432
4433 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
4434 {
4435         blk_mq_destroy_queue(ctrl->admin_q);
4436         blk_put_queue(ctrl->admin_q);
4437         if (ctrl->ops->flags & NVME_F_FABRICS) {
4438                 blk_mq_destroy_queue(ctrl->fabrics_q);
4439                 blk_put_queue(ctrl->fabrics_q);
4440         }
4441         blk_mq_free_tag_set(ctrl->admin_tagset);
4442 }
4443 EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
4444
4445 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
4446                 const struct blk_mq_ops *ops, unsigned int nr_maps,
4447                 unsigned int cmd_size)
4448 {
4449         int ret;
4450
4451         memset(set, 0, sizeof(*set));
4452         set->ops = ops;
4453         set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1);
4454         /*
4455          * Some Apple controllers requires tags to be unique across admin and
4456          * the (only) I/O queue, so reserve the first 32 tags of the I/O queue.
4457          */
4458         if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
4459                 set->reserved_tags = NVME_AQ_DEPTH;
4460         else if (ctrl->ops->flags & NVME_F_FABRICS)
4461                 /* Reserved for fabric connect */
4462                 set->reserved_tags = 1;
4463         set->numa_node = ctrl->numa_node;
4464         set->flags = BLK_MQ_F_SHOULD_MERGE;
4465         if (ctrl->ops->flags & NVME_F_BLOCKING)
4466                 set->flags |= BLK_MQ_F_BLOCKING;
4467         set->cmd_size = cmd_size,
4468         set->driver_data = ctrl;
4469         set->nr_hw_queues = ctrl->queue_count - 1;
4470         set->timeout = NVME_IO_TIMEOUT;
4471         set->nr_maps = nr_maps;
4472         ret = blk_mq_alloc_tag_set(set);
4473         if (ret)
4474                 return ret;
4475
4476         if (ctrl->ops->flags & NVME_F_FABRICS) {
4477                 ctrl->connect_q = blk_mq_alloc_queue(set, NULL, NULL);
4478                 if (IS_ERR(ctrl->connect_q)) {
4479                         ret = PTR_ERR(ctrl->connect_q);
4480                         goto out_free_tag_set;
4481                 }
4482                 blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE,
4483                                    ctrl->connect_q);
4484         }
4485
4486         ctrl->tagset = set;
4487         return 0;
4488
4489 out_free_tag_set:
4490         blk_mq_free_tag_set(set);
4491         ctrl->connect_q = NULL;
4492         return ret;
4493 }
4494 EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
4495
4496 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
4497 {
4498         if (ctrl->ops->flags & NVME_F_FABRICS) {
4499                 blk_mq_destroy_queue(ctrl->connect_q);
4500                 blk_put_queue(ctrl->connect_q);
4501         }
4502         blk_mq_free_tag_set(ctrl->tagset);
4503 }
4504 EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
4505
4506 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4507 {
4508         nvme_mpath_stop(ctrl);
4509         nvme_auth_stop(ctrl);
4510         nvme_stop_keep_alive(ctrl);
4511         nvme_stop_failfast_work(ctrl);
4512         flush_work(&ctrl->async_event_work);
4513         cancel_work_sync(&ctrl->fw_act_work);
4514         if (ctrl->ops->stop_ctrl)
4515                 ctrl->ops->stop_ctrl(ctrl);
4516 }
4517 EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
4518
4519 void nvme_start_ctrl(struct nvme_ctrl *ctrl)
4520 {
4521         nvme_enable_aen(ctrl);
4522
4523         /*
4524          * persistent discovery controllers need to send indication to userspace
4525          * to re-read the discovery log page to learn about possible changes
4526          * that were missed. We identify persistent discovery controllers by
4527          * checking that they started once before, hence are reconnecting back.
4528          */
4529         if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
4530             nvme_discovery_ctrl(ctrl))
4531                 nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
4532
4533         if (ctrl->queue_count > 1) {
4534                 nvme_queue_scan(ctrl);
4535                 nvme_unquiesce_io_queues(ctrl);
4536                 nvme_mpath_update(ctrl);
4537         }
4538
4539         nvme_change_uevent(ctrl, "NVME_EVENT=connected");
4540         set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
4541 }
4542 EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4543
4544 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
4545 {
4546         nvme_hwmon_exit(ctrl);
4547         nvme_fault_inject_fini(&ctrl->fault_inject);
4548         dev_pm_qos_hide_latency_tolerance(ctrl->device);
4549         cdev_device_del(&ctrl->cdev, ctrl->device);
4550         nvme_put_ctrl(ctrl);
4551 }
4552 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4553
4554 static void nvme_free_cels(struct nvme_ctrl *ctrl)
4555 {
4556         struct nvme_effects_log *cel;
4557         unsigned long i;
4558
4559         xa_for_each(&ctrl->cels, i, cel) {
4560                 xa_erase(&ctrl->cels, i);
4561                 kfree(cel);
4562         }
4563
4564         xa_destroy(&ctrl->cels);
4565 }
4566
4567 static void nvme_free_ctrl(struct device *dev)
4568 {
4569         struct nvme_ctrl *ctrl =
4570                 container_of(dev, struct nvme_ctrl, ctrl_device);
4571         struct nvme_subsystem *subsys = ctrl->subsys;
4572
4573         if (!subsys || ctrl->instance != subsys->instance)
4574                 ida_free(&nvme_instance_ida, ctrl->instance);
4575         key_put(ctrl->tls_key);
4576         nvme_free_cels(ctrl);
4577         nvme_mpath_uninit(ctrl);
4578         nvme_auth_stop(ctrl);
4579         nvme_auth_free(ctrl);
4580         __free_page(ctrl->discard_page);
4581         free_opal_dev(ctrl->opal_dev);
4582
4583         if (subsys) {
4584                 mutex_lock(&nvme_subsystems_lock);
4585                 list_del(&ctrl->subsys_entry);
4586                 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4587                 mutex_unlock(&nvme_subsystems_lock);
4588         }
4589
4590         ctrl->ops->free_ctrl(ctrl);
4591
4592         if (subsys)
4593                 nvme_put_subsystem(subsys);
4594 }
4595
4596 /*
4597  * Initialize a NVMe controller structures.  This needs to be called during
4598  * earliest initialization so that we have the initialized structured around
4599  * during probing.
4600  */
4601 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4602                 const struct nvme_ctrl_ops *ops, unsigned long quirks)
4603 {
4604         int ret;
4605
4606         WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
4607         ctrl->passthru_err_log_enabled = false;
4608         clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
4609         spin_lock_init(&ctrl->lock);
4610         mutex_init(&ctrl->scan_lock);
4611         INIT_LIST_HEAD(&ctrl->namespaces);
4612         xa_init(&ctrl->cels);
4613         init_rwsem(&ctrl->namespaces_rwsem);
4614         ctrl->dev = dev;
4615         ctrl->ops = ops;
4616         ctrl->quirks = quirks;
4617         ctrl->numa_node = NUMA_NO_NODE;
4618         INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4619         INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4620         INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4621         INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4622         init_waitqueue_head(&ctrl->state_wq);
4623
4624         INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4625         INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
4626         memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
4627         ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
4628         ctrl->ka_last_check_time = jiffies;
4629
4630         BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
4631                         PAGE_SIZE);
4632         ctrl->discard_page = alloc_page(GFP_KERNEL);
4633         if (!ctrl->discard_page) {
4634                 ret = -ENOMEM;
4635                 goto out;
4636         }
4637
4638         ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL);
4639         if (ret < 0)
4640                 goto out;
4641         ctrl->instance = ret;
4642
4643         device_initialize(&ctrl->ctrl_device);
4644         ctrl->device = &ctrl->ctrl_device;
4645         ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
4646                         ctrl->instance);
4647         ctrl->device->class = &nvme_class;
4648         ctrl->device->parent = ctrl->dev;
4649         if (ops->dev_attr_groups)
4650                 ctrl->device->groups = ops->dev_attr_groups;
4651         else
4652                 ctrl->device->groups = nvme_dev_attr_groups;
4653         ctrl->device->release = nvme_free_ctrl;
4654         dev_set_drvdata(ctrl->device, ctrl);
4655         ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
4656         if (ret)
4657                 goto out_release_instance;
4658
4659         nvme_get_ctrl(ctrl);
4660         cdev_init(&ctrl->cdev, &nvme_dev_fops);
4661         ctrl->cdev.owner = ops->module;
4662         ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4663         if (ret)
4664                 goto out_free_name;
4665
4666         /*
4667          * Initialize latency tolerance controls.  The sysfs files won't
4668          * be visible to userspace unless the device actually supports APST.
4669          */
4670         ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
4671         dev_pm_qos_update_user_latency_tolerance(ctrl->device,
4672                 min(default_ps_max_latency_us, (unsigned long)S32_MAX));
4673
4674         nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4675         nvme_mpath_init_ctrl(ctrl);
4676         ret = nvme_auth_init_ctrl(ctrl);
4677         if (ret)
4678                 goto out_free_cdev;
4679
4680         return 0;
4681 out_free_cdev:
4682         nvme_fault_inject_fini(&ctrl->fault_inject);
4683         dev_pm_qos_hide_latency_tolerance(ctrl->device);
4684         cdev_device_del(&ctrl->cdev, ctrl->device);
4685 out_free_name:
4686         nvme_put_ctrl(ctrl);
4687         kfree_const(ctrl->device->kobj.name);
4688 out_release_instance:
4689         ida_free(&nvme_instance_ida, ctrl->instance);
4690 out:
4691         if (ctrl->discard_page)
4692                 __free_page(ctrl->discard_page);
4693         return ret;
4694 }
4695 EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4696
4697 /* let I/O to all namespaces fail in preparation for surprise removal */
4698 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
4699 {
4700         struct nvme_ns *ns;
4701
4702         down_read(&ctrl->namespaces_rwsem);
4703         list_for_each_entry(ns, &ctrl->namespaces, list)
4704                 blk_mark_disk_dead(ns->disk);
4705         up_read(&ctrl->namespaces_rwsem);
4706 }
4707 EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead);
4708
4709 void nvme_unfreeze(struct nvme_ctrl *ctrl)
4710 {
4711         struct nvme_ns *ns;
4712
4713         down_read(&ctrl->namespaces_rwsem);
4714         list_for_each_entry(ns, &ctrl->namespaces, list)
4715                 blk_mq_unfreeze_queue(ns->queue);
4716         up_read(&ctrl->namespaces_rwsem);
4717         clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
4718 }
4719 EXPORT_SYMBOL_GPL(nvme_unfreeze);
4720
4721 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
4722 {
4723         struct nvme_ns *ns;
4724
4725         down_read(&ctrl->namespaces_rwsem);
4726         list_for_each_entry(ns, &ctrl->namespaces, list) {
4727                 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
4728                 if (timeout <= 0)
4729                         break;
4730         }
4731         up_read(&ctrl->namespaces_rwsem);
4732         return timeout;
4733 }
4734 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
4735
4736 void nvme_wait_freeze(struct nvme_ctrl *ctrl)
4737 {
4738         struct nvme_ns *ns;
4739
4740         down_read(&ctrl->namespaces_rwsem);
4741         list_for_each_entry(ns, &ctrl->namespaces, list)
4742                 blk_mq_freeze_queue_wait(ns->queue);
4743         up_read(&ctrl->namespaces_rwsem);
4744 }
4745 EXPORT_SYMBOL_GPL(nvme_wait_freeze);
4746
4747 void nvme_start_freeze(struct nvme_ctrl *ctrl)
4748 {
4749         struct nvme_ns *ns;
4750
4751         set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
4752         down_read(&ctrl->namespaces_rwsem);
4753         list_for_each_entry(ns, &ctrl->namespaces, list)
4754                 blk_freeze_queue_start(ns->queue);
4755         up_read(&ctrl->namespaces_rwsem);
4756 }
4757 EXPORT_SYMBOL_GPL(nvme_start_freeze);
4758
4759 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl)
4760 {
4761         if (!ctrl->tagset)
4762                 return;
4763         if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags))
4764                 blk_mq_quiesce_tagset(ctrl->tagset);
4765         else
4766                 blk_mq_wait_quiesce_done(ctrl->tagset);
4767 }
4768 EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues);
4769
4770 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl)
4771 {
4772         if (!ctrl->tagset)
4773                 return;
4774         if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags))
4775                 blk_mq_unquiesce_tagset(ctrl->tagset);
4776 }
4777 EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues);
4778
4779 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl)
4780 {
4781         if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
4782                 blk_mq_quiesce_queue(ctrl->admin_q);
4783         else
4784                 blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set);
4785 }
4786 EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue);
4787
4788 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl)
4789 {
4790         if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
4791                 blk_mq_unquiesce_queue(ctrl->admin_q);
4792 }
4793 EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue);
4794
4795 void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
4796 {
4797         struct nvme_ns *ns;
4798
4799         down_read(&ctrl->namespaces_rwsem);
4800         list_for_each_entry(ns, &ctrl->namespaces, list)
4801                 blk_sync_queue(ns->queue);
4802         up_read(&ctrl->namespaces_rwsem);
4803 }
4804 EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
4805
4806 void nvme_sync_queues(struct nvme_ctrl *ctrl)
4807 {
4808         nvme_sync_io_queues(ctrl);
4809         if (ctrl->admin_q)
4810                 blk_sync_queue(ctrl->admin_q);
4811 }
4812 EXPORT_SYMBOL_GPL(nvme_sync_queues);
4813
4814 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
4815 {
4816         if (file->f_op != &nvme_dev_fops)
4817                 return NULL;
4818         return file->private_data;
4819 }
4820 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU);
4821
4822 /*
4823  * Check we didn't inadvertently grow the command structure sizes:
4824  */
4825 static inline void _nvme_check_size(void)
4826 {
4827         BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
4828         BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
4829         BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
4830         BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
4831         BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
4832         BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
4833         BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
4834         BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
4835         BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
4836         BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
4837         BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
4838         BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
4839         BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
4840         BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) !=
4841                         NVME_IDENTIFY_DATA_SIZE);
4842         BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
4843         BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE);
4844         BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
4845         BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
4846         BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
4847         BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
4848         BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
4849         BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4850         BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512);
4851 }
4852
4853
4854 static int __init nvme_core_init(void)
4855 {
4856         int result = -ENOMEM;
4857
4858         _nvme_check_size();
4859
4860         nvme_wq = alloc_workqueue("nvme-wq",
4861                         WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4862         if (!nvme_wq)
4863                 goto out;
4864
4865         nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
4866                         WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4867         if (!nvme_reset_wq)
4868                 goto destroy_wq;
4869
4870         nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
4871                         WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4872         if (!nvme_delete_wq)
4873                 goto destroy_reset_wq;
4874
4875         result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
4876                         NVME_MINORS, "nvme");
4877         if (result < 0)
4878                 goto destroy_delete_wq;
4879
4880         result = class_register(&nvme_class);
4881         if (result)
4882                 goto unregister_chrdev;
4883
4884         result = class_register(&nvme_subsys_class);
4885         if (result)
4886                 goto destroy_class;
4887
4888         result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
4889                                      "nvme-generic");
4890         if (result < 0)
4891                 goto destroy_subsys_class;
4892
4893         result = class_register(&nvme_ns_chr_class);
4894         if (result)
4895                 goto unregister_generic_ns;
4896
4897         result = nvme_init_auth();
4898         if (result)
4899                 goto destroy_ns_chr;
4900         return 0;
4901
4902 destroy_ns_chr:
4903         class_unregister(&nvme_ns_chr_class);
4904 unregister_generic_ns:
4905         unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
4906 destroy_subsys_class:
4907         class_unregister(&nvme_subsys_class);
4908 destroy_class:
4909         class_unregister(&nvme_class);
4910 unregister_chrdev:
4911         unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4912 destroy_delete_wq:
4913         destroy_workqueue(nvme_delete_wq);
4914 destroy_reset_wq:
4915         destroy_workqueue(nvme_reset_wq);
4916 destroy_wq:
4917         destroy_workqueue(nvme_wq);
4918 out:
4919         return result;
4920 }
4921
4922 static void __exit nvme_core_exit(void)
4923 {
4924         nvme_exit_auth();
4925         class_unregister(&nvme_ns_chr_class);
4926         class_unregister(&nvme_subsys_class);
4927         class_unregister(&nvme_class);
4928         unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
4929         unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
4930         destroy_workqueue(nvme_delete_wq);
4931         destroy_workqueue(nvme_reset_wq);
4932         destroy_workqueue(nvme_wq);
4933         ida_destroy(&nvme_ns_chr_minor_ida);
4934         ida_destroy(&nvme_instance_ida);
4935 }
4936
4937 MODULE_LICENSE("GPL");
4938 MODULE_VERSION("1.0");
4939 MODULE_DESCRIPTION("NVMe host core framework");
4940 module_init(nvme_core_init);
4941 module_exit(nvme_core_exit);