MIPS: mark __fls() and __ffs() as __always_inline
[sfrench/cifs-2.6.git] / block / blk-mq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block multiqueue core code
4  *
5  * Copyright (C) 2013-2014 Jens Axboe
6  * Copyright (C) 2013-2014 Christoph Hellwig
7  */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/kmemleak.h>
14 #include <linux/mm.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/workqueue.h>
18 #include <linux/smp.h>
19 #include <linux/llist.h>
20 #include <linux/list_sort.h>
21 #include <linux/cpu.h>
22 #include <linux/cache.h>
23 #include <linux/sched/sysctl.h>
24 #include <linux/sched/topology.h>
25 #include <linux/sched/signal.h>
26 #include <linux/delay.h>
27 #include <linux/crash_dump.h>
28 #include <linux/prefetch.h>
29
30 #include <trace/events/block.h>
31
32 #include <linux/blk-mq.h>
33 #include "blk.h"
34 #include "blk-mq.h"
35 #include "blk-mq-debugfs.h"
36 #include "blk-mq-tag.h"
37 #include "blk-pm.h"
38 #include "blk-stat.h"
39 #include "blk-mq-sched.h"
40 #include "blk-rq-qos.h"
41
42 static void blk_mq_poll_stats_start(struct request_queue *q);
43 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
44
45 static int blk_mq_poll_stats_bkt(const struct request *rq)
46 {
47         int ddir, bytes, bucket;
48
49         ddir = rq_data_dir(rq);
50         bytes = blk_rq_bytes(rq);
51
52         bucket = ddir + 2*(ilog2(bytes) - 9);
53
54         if (bucket < 0)
55                 return -1;
56         else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
57                 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
58
59         return bucket;
60 }
61
62 /*
63  * Check if any of the ctx, dispatch list or elevator
64  * have pending work in this hardware queue.
65  */
66 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
67 {
68         return !list_empty_careful(&hctx->dispatch) ||
69                 sbitmap_any_bit_set(&hctx->ctx_map) ||
70                         blk_mq_sched_has_work(hctx);
71 }
72
73 /*
74  * Mark this ctx as having pending work in this hardware queue
75  */
76 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
77                                      struct blk_mq_ctx *ctx)
78 {
79         const int bit = ctx->index_hw[hctx->type];
80
81         if (!sbitmap_test_bit(&hctx->ctx_map, bit))
82                 sbitmap_set_bit(&hctx->ctx_map, bit);
83 }
84
85 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
86                                       struct blk_mq_ctx *ctx)
87 {
88         const int bit = ctx->index_hw[hctx->type];
89
90         sbitmap_clear_bit(&hctx->ctx_map, bit);
91 }
92
93 struct mq_inflight {
94         struct hd_struct *part;
95         unsigned int *inflight;
96 };
97
98 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
99                                   struct request *rq, void *priv,
100                                   bool reserved)
101 {
102         struct mq_inflight *mi = priv;
103
104         /*
105          * index[0] counts the specific partition that was asked for.
106          */
107         if (rq->part == mi->part)
108                 mi->inflight[0]++;
109
110         return true;
111 }
112
113 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
114 {
115         unsigned inflight[2];
116         struct mq_inflight mi = { .part = part, .inflight = inflight, };
117
118         inflight[0] = inflight[1] = 0;
119         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
120
121         return inflight[0];
122 }
123
124 static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
125                                      struct request *rq, void *priv,
126                                      bool reserved)
127 {
128         struct mq_inflight *mi = priv;
129
130         if (rq->part == mi->part)
131                 mi->inflight[rq_data_dir(rq)]++;
132
133         return true;
134 }
135
136 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
137                          unsigned int inflight[2])
138 {
139         struct mq_inflight mi = { .part = part, .inflight = inflight, };
140
141         inflight[0] = inflight[1] = 0;
142         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
143 }
144
145 void blk_freeze_queue_start(struct request_queue *q)
146 {
147         int freeze_depth;
148
149         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
150         if (freeze_depth == 1) {
151                 percpu_ref_kill(&q->q_usage_counter);
152                 if (queue_is_mq(q))
153                         blk_mq_run_hw_queues(q, false);
154         }
155 }
156 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
157
158 void blk_mq_freeze_queue_wait(struct request_queue *q)
159 {
160         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
161 }
162 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
163
164 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
165                                      unsigned long timeout)
166 {
167         return wait_event_timeout(q->mq_freeze_wq,
168                                         percpu_ref_is_zero(&q->q_usage_counter),
169                                         timeout);
170 }
171 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
172
173 /*
174  * Guarantee no request is in use, so we can change any data structure of
175  * the queue afterward.
176  */
177 void blk_freeze_queue(struct request_queue *q)
178 {
179         /*
180          * In the !blk_mq case we are only calling this to kill the
181          * q_usage_counter, otherwise this increases the freeze depth
182          * and waits for it to return to zero.  For this reason there is
183          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
184          * exported to drivers as the only user for unfreeze is blk_mq.
185          */
186         blk_freeze_queue_start(q);
187         blk_mq_freeze_queue_wait(q);
188 }
189
190 void blk_mq_freeze_queue(struct request_queue *q)
191 {
192         /*
193          * ...just an alias to keep freeze and unfreeze actions balanced
194          * in the blk_mq_* namespace
195          */
196         blk_freeze_queue(q);
197 }
198 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
199
200 void blk_mq_unfreeze_queue(struct request_queue *q)
201 {
202         int freeze_depth;
203
204         freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
205         WARN_ON_ONCE(freeze_depth < 0);
206         if (!freeze_depth) {
207                 percpu_ref_resurrect(&q->q_usage_counter);
208                 wake_up_all(&q->mq_freeze_wq);
209         }
210 }
211 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
212
213 /*
214  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
215  * mpt3sas driver such that this function can be removed.
216  */
217 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
218 {
219         blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
220 }
221 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
222
223 /**
224  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
225  * @q: request queue.
226  *
227  * Note: this function does not prevent that the struct request end_io()
228  * callback function is invoked. Once this function is returned, we make
229  * sure no dispatch can happen until the queue is unquiesced via
230  * blk_mq_unquiesce_queue().
231  */
232 void blk_mq_quiesce_queue(struct request_queue *q)
233 {
234         struct blk_mq_hw_ctx *hctx;
235         unsigned int i;
236         bool rcu = false;
237
238         blk_mq_quiesce_queue_nowait(q);
239
240         queue_for_each_hw_ctx(q, hctx, i) {
241                 if (hctx->flags & BLK_MQ_F_BLOCKING)
242                         synchronize_srcu(hctx->srcu);
243                 else
244                         rcu = true;
245         }
246         if (rcu)
247                 synchronize_rcu();
248 }
249 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
250
251 /*
252  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
253  * @q: request queue.
254  *
255  * This function recovers queue into the state before quiescing
256  * which is done by blk_mq_quiesce_queue.
257  */
258 void blk_mq_unquiesce_queue(struct request_queue *q)
259 {
260         blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
261
262         /* dispatch requests which are inserted during quiescing */
263         blk_mq_run_hw_queues(q, true);
264 }
265 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
266
267 void blk_mq_wake_waiters(struct request_queue *q)
268 {
269         struct blk_mq_hw_ctx *hctx;
270         unsigned int i;
271
272         queue_for_each_hw_ctx(q, hctx, i)
273                 if (blk_mq_hw_queue_mapped(hctx))
274                         blk_mq_tag_wakeup_all(hctx->tags, true);
275 }
276
277 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
278 {
279         return blk_mq_has_free_tags(hctx->tags);
280 }
281 EXPORT_SYMBOL(blk_mq_can_queue);
282
283 /*
284  * Only need start/end time stamping if we have stats enabled, or using
285  * an IO scheduler.
286  */
287 static inline bool blk_mq_need_time_stamp(struct request *rq)
288 {
289         return (rq->rq_flags & RQF_IO_STAT) || rq->q->elevator;
290 }
291
292 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
293                 unsigned int tag, unsigned int op)
294 {
295         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
296         struct request *rq = tags->static_rqs[tag];
297         req_flags_t rq_flags = 0;
298
299         if (data->flags & BLK_MQ_REQ_INTERNAL) {
300                 rq->tag = -1;
301                 rq->internal_tag = tag;
302         } else {
303                 if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
304                         rq_flags = RQF_MQ_INFLIGHT;
305                         atomic_inc(&data->hctx->nr_active);
306                 }
307                 rq->tag = tag;
308                 rq->internal_tag = -1;
309                 data->hctx->tags->rqs[rq->tag] = rq;
310         }
311
312         /* csd/requeue_work/fifo_time is initialized before use */
313         rq->q = data->q;
314         rq->mq_ctx = data->ctx;
315         rq->mq_hctx = data->hctx;
316         rq->rq_flags = rq_flags;
317         rq->cmd_flags = op;
318         if (data->flags & BLK_MQ_REQ_PREEMPT)
319                 rq->rq_flags |= RQF_PREEMPT;
320         if (blk_queue_io_stat(data->q))
321                 rq->rq_flags |= RQF_IO_STAT;
322         INIT_LIST_HEAD(&rq->queuelist);
323         INIT_HLIST_NODE(&rq->hash);
324         RB_CLEAR_NODE(&rq->rb_node);
325         rq->rq_disk = NULL;
326         rq->part = NULL;
327         if (blk_mq_need_time_stamp(rq))
328                 rq->start_time_ns = ktime_get_ns();
329         else
330                 rq->start_time_ns = 0;
331         rq->io_start_time_ns = 0;
332         rq->nr_phys_segments = 0;
333 #if defined(CONFIG_BLK_DEV_INTEGRITY)
334         rq->nr_integrity_segments = 0;
335 #endif
336         /* tag was already set */
337         rq->extra_len = 0;
338         WRITE_ONCE(rq->deadline, 0);
339
340         rq->timeout = 0;
341
342         rq->end_io = NULL;
343         rq->end_io_data = NULL;
344
345         data->ctx->rq_dispatched[op_is_sync(op)]++;
346         refcount_set(&rq->ref, 1);
347         return rq;
348 }
349
350 static struct request *blk_mq_get_request(struct request_queue *q,
351                                           struct bio *bio,
352                                           struct blk_mq_alloc_data *data)
353 {
354         struct elevator_queue *e = q->elevator;
355         struct request *rq;
356         unsigned int tag;
357         bool put_ctx_on_error = false;
358
359         blk_queue_enter_live(q);
360         data->q = q;
361         if (likely(!data->ctx)) {
362                 data->ctx = blk_mq_get_ctx(q);
363                 put_ctx_on_error = true;
364         }
365         if (likely(!data->hctx))
366                 data->hctx = blk_mq_map_queue(q, data->cmd_flags,
367                                                 data->ctx);
368         if (data->cmd_flags & REQ_NOWAIT)
369                 data->flags |= BLK_MQ_REQ_NOWAIT;
370
371         if (e) {
372                 data->flags |= BLK_MQ_REQ_INTERNAL;
373
374                 /*
375                  * Flush requests are special and go directly to the
376                  * dispatch list. Don't include reserved tags in the
377                  * limiting, as it isn't useful.
378                  */
379                 if (!op_is_flush(data->cmd_flags) &&
380                     e->type->ops.limit_depth &&
381                     !(data->flags & BLK_MQ_REQ_RESERVED))
382                         e->type->ops.limit_depth(data->cmd_flags, data);
383         } else {
384                 blk_mq_tag_busy(data->hctx);
385         }
386
387         tag = blk_mq_get_tag(data);
388         if (tag == BLK_MQ_TAG_FAIL) {
389                 if (put_ctx_on_error) {
390                         blk_mq_put_ctx(data->ctx);
391                         data->ctx = NULL;
392                 }
393                 blk_queue_exit(q);
394                 return NULL;
395         }
396
397         rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags);
398         if (!op_is_flush(data->cmd_flags)) {
399                 rq->elv.icq = NULL;
400                 if (e && e->type->ops.prepare_request) {
401                         if (e->type->icq_cache)
402                                 blk_mq_sched_assign_ioc(rq);
403
404                         e->type->ops.prepare_request(rq, bio);
405                         rq->rq_flags |= RQF_ELVPRIV;
406                 }
407         }
408         data->hctx->queued++;
409         return rq;
410 }
411
412 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
413                 blk_mq_req_flags_t flags)
414 {
415         struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
416         struct request *rq;
417         int ret;
418
419         ret = blk_queue_enter(q, flags);
420         if (ret)
421                 return ERR_PTR(ret);
422
423         rq = blk_mq_get_request(q, NULL, &alloc_data);
424         blk_queue_exit(q);
425
426         if (!rq)
427                 return ERR_PTR(-EWOULDBLOCK);
428
429         blk_mq_put_ctx(alloc_data.ctx);
430
431         rq->__data_len = 0;
432         rq->__sector = (sector_t) -1;
433         rq->bio = rq->biotail = NULL;
434         return rq;
435 }
436 EXPORT_SYMBOL(blk_mq_alloc_request);
437
438 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
439         unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
440 {
441         struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
442         struct request *rq;
443         unsigned int cpu;
444         int ret;
445
446         /*
447          * If the tag allocator sleeps we could get an allocation for a
448          * different hardware context.  No need to complicate the low level
449          * allocator for this for the rare use case of a command tied to
450          * a specific queue.
451          */
452         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
453                 return ERR_PTR(-EINVAL);
454
455         if (hctx_idx >= q->nr_hw_queues)
456                 return ERR_PTR(-EIO);
457
458         ret = blk_queue_enter(q, flags);
459         if (ret)
460                 return ERR_PTR(ret);
461
462         /*
463          * Check if the hardware context is actually mapped to anything.
464          * If not tell the caller that it should skip this queue.
465          */
466         alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
467         if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
468                 blk_queue_exit(q);
469                 return ERR_PTR(-EXDEV);
470         }
471         cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
472         alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
473
474         rq = blk_mq_get_request(q, NULL, &alloc_data);
475         blk_queue_exit(q);
476
477         if (!rq)
478                 return ERR_PTR(-EWOULDBLOCK);
479
480         return rq;
481 }
482 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
483
484 static void __blk_mq_free_request(struct request *rq)
485 {
486         struct request_queue *q = rq->q;
487         struct blk_mq_ctx *ctx = rq->mq_ctx;
488         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
489         const int sched_tag = rq->internal_tag;
490
491         blk_pm_mark_last_busy(rq);
492         rq->mq_hctx = NULL;
493         if (rq->tag != -1)
494                 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
495         if (sched_tag != -1)
496                 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
497         blk_mq_sched_restart(hctx);
498         blk_queue_exit(q);
499 }
500
501 void blk_mq_free_request(struct request *rq)
502 {
503         struct request_queue *q = rq->q;
504         struct elevator_queue *e = q->elevator;
505         struct blk_mq_ctx *ctx = rq->mq_ctx;
506         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
507
508         if (rq->rq_flags & RQF_ELVPRIV) {
509                 if (e && e->type->ops.finish_request)
510                         e->type->ops.finish_request(rq);
511                 if (rq->elv.icq) {
512                         put_io_context(rq->elv.icq->ioc);
513                         rq->elv.icq = NULL;
514                 }
515         }
516
517         ctx->rq_completed[rq_is_sync(rq)]++;
518         if (rq->rq_flags & RQF_MQ_INFLIGHT)
519                 atomic_dec(&hctx->nr_active);
520
521         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
522                 laptop_io_completion(q->backing_dev_info);
523
524         rq_qos_done(q, rq);
525
526         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
527         if (refcount_dec_and_test(&rq->ref))
528                 __blk_mq_free_request(rq);
529 }
530 EXPORT_SYMBOL_GPL(blk_mq_free_request);
531
532 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
533 {
534         u64 now = 0;
535
536         if (blk_mq_need_time_stamp(rq))
537                 now = ktime_get_ns();
538
539         if (rq->rq_flags & RQF_STATS) {
540                 blk_mq_poll_stats_start(rq->q);
541                 blk_stat_add(rq, now);
542         }
543
544         if (rq->internal_tag != -1)
545                 blk_mq_sched_completed_request(rq, now);
546
547         blk_account_io_done(rq, now);
548
549         if (rq->end_io) {
550                 rq_qos_done(rq->q, rq);
551                 rq->end_io(rq, error);
552         } else {
553                 blk_mq_free_request(rq);
554         }
555 }
556 EXPORT_SYMBOL(__blk_mq_end_request);
557
558 void blk_mq_end_request(struct request *rq, blk_status_t error)
559 {
560         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
561                 BUG();
562         __blk_mq_end_request(rq, error);
563 }
564 EXPORT_SYMBOL(blk_mq_end_request);
565
566 static void __blk_mq_complete_request_remote(void *data)
567 {
568         struct request *rq = data;
569         struct request_queue *q = rq->q;
570
571         q->mq_ops->complete(rq);
572 }
573
574 static void __blk_mq_complete_request(struct request *rq)
575 {
576         struct blk_mq_ctx *ctx = rq->mq_ctx;
577         struct request_queue *q = rq->q;
578         bool shared = false;
579         int cpu;
580
581         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
582         /*
583          * Most of single queue controllers, there is only one irq vector
584          * for handling IO completion, and the only irq's affinity is set
585          * as all possible CPUs. On most of ARCHs, this affinity means the
586          * irq is handled on one specific CPU.
587          *
588          * So complete IO reqeust in softirq context in case of single queue
589          * for not degrading IO performance by irqsoff latency.
590          */
591         if (q->nr_hw_queues == 1) {
592                 __blk_complete_request(rq);
593                 return;
594         }
595
596         /*
597          * For a polled request, always complete locallly, it's pointless
598          * to redirect the completion.
599          */
600         if ((rq->cmd_flags & REQ_HIPRI) ||
601             !test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) {
602                 q->mq_ops->complete(rq);
603                 return;
604         }
605
606         cpu = get_cpu();
607         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
608                 shared = cpus_share_cache(cpu, ctx->cpu);
609
610         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
611                 rq->csd.func = __blk_mq_complete_request_remote;
612                 rq->csd.info = rq;
613                 rq->csd.flags = 0;
614                 smp_call_function_single_async(ctx->cpu, &rq->csd);
615         } else {
616                 q->mq_ops->complete(rq);
617         }
618         put_cpu();
619 }
620
621 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
622         __releases(hctx->srcu)
623 {
624         if (!(hctx->flags & BLK_MQ_F_BLOCKING))
625                 rcu_read_unlock();
626         else
627                 srcu_read_unlock(hctx->srcu, srcu_idx);
628 }
629
630 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
631         __acquires(hctx->srcu)
632 {
633         if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
634                 /* shut up gcc false positive */
635                 *srcu_idx = 0;
636                 rcu_read_lock();
637         } else
638                 *srcu_idx = srcu_read_lock(hctx->srcu);
639 }
640
641 /**
642  * blk_mq_complete_request - end I/O on a request
643  * @rq:         the request being processed
644  *
645  * Description:
646  *      Ends all I/O on a request. It does not handle partial completions.
647  *      The actual completion happens out-of-order, through a IPI handler.
648  **/
649 bool blk_mq_complete_request(struct request *rq)
650 {
651         if (unlikely(blk_should_fake_timeout(rq->q)))
652                 return false;
653         __blk_mq_complete_request(rq);
654         return true;
655 }
656 EXPORT_SYMBOL(blk_mq_complete_request);
657
658 void blk_mq_complete_request_sync(struct request *rq)
659 {
660         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
661         rq->q->mq_ops->complete(rq);
662 }
663 EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync);
664
665 int blk_mq_request_started(struct request *rq)
666 {
667         return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
668 }
669 EXPORT_SYMBOL_GPL(blk_mq_request_started);
670
671 void blk_mq_start_request(struct request *rq)
672 {
673         struct request_queue *q = rq->q;
674
675         blk_mq_sched_started_request(rq);
676
677         trace_block_rq_issue(q, rq);
678
679         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
680                 rq->io_start_time_ns = ktime_get_ns();
681 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
682                 rq->throtl_size = blk_rq_sectors(rq);
683 #endif
684                 rq->rq_flags |= RQF_STATS;
685                 rq_qos_issue(q, rq);
686         }
687
688         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
689
690         blk_add_timer(rq);
691         WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
692
693         if (q->dma_drain_size && blk_rq_bytes(rq)) {
694                 /*
695                  * Make sure space for the drain appears.  We know we can do
696                  * this because max_hw_segments has been adjusted to be one
697                  * fewer than the device can handle.
698                  */
699                 rq->nr_phys_segments++;
700         }
701 }
702 EXPORT_SYMBOL(blk_mq_start_request);
703
704 static void __blk_mq_requeue_request(struct request *rq)
705 {
706         struct request_queue *q = rq->q;
707
708         blk_mq_put_driver_tag(rq);
709
710         trace_block_rq_requeue(q, rq);
711         rq_qos_requeue(q, rq);
712
713         if (blk_mq_request_started(rq)) {
714                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
715                 rq->rq_flags &= ~RQF_TIMED_OUT;
716                 if (q->dma_drain_size && blk_rq_bytes(rq))
717                         rq->nr_phys_segments--;
718         }
719 }
720
721 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
722 {
723         __blk_mq_requeue_request(rq);
724
725         /* this request will be re-inserted to io scheduler queue */
726         blk_mq_sched_requeue_request(rq);
727
728         BUG_ON(!list_empty(&rq->queuelist));
729         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
730 }
731 EXPORT_SYMBOL(blk_mq_requeue_request);
732
733 static void blk_mq_requeue_work(struct work_struct *work)
734 {
735         struct request_queue *q =
736                 container_of(work, struct request_queue, requeue_work.work);
737         LIST_HEAD(rq_list);
738         struct request *rq, *next;
739
740         spin_lock_irq(&q->requeue_lock);
741         list_splice_init(&q->requeue_list, &rq_list);
742         spin_unlock_irq(&q->requeue_lock);
743
744         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
745                 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
746                         continue;
747
748                 rq->rq_flags &= ~RQF_SOFTBARRIER;
749                 list_del_init(&rq->queuelist);
750                 /*
751                  * If RQF_DONTPREP, rq has contained some driver specific
752                  * data, so insert it to hctx dispatch list to avoid any
753                  * merge.
754                  */
755                 if (rq->rq_flags & RQF_DONTPREP)
756                         blk_mq_request_bypass_insert(rq, false);
757                 else
758                         blk_mq_sched_insert_request(rq, true, false, false);
759         }
760
761         while (!list_empty(&rq_list)) {
762                 rq = list_entry(rq_list.next, struct request, queuelist);
763                 list_del_init(&rq->queuelist);
764                 blk_mq_sched_insert_request(rq, false, false, false);
765         }
766
767         blk_mq_run_hw_queues(q, false);
768 }
769
770 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
771                                 bool kick_requeue_list)
772 {
773         struct request_queue *q = rq->q;
774         unsigned long flags;
775
776         /*
777          * We abuse this flag that is otherwise used by the I/O scheduler to
778          * request head insertion from the workqueue.
779          */
780         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
781
782         spin_lock_irqsave(&q->requeue_lock, flags);
783         if (at_head) {
784                 rq->rq_flags |= RQF_SOFTBARRIER;
785                 list_add(&rq->queuelist, &q->requeue_list);
786         } else {
787                 list_add_tail(&rq->queuelist, &q->requeue_list);
788         }
789         spin_unlock_irqrestore(&q->requeue_lock, flags);
790
791         if (kick_requeue_list)
792                 blk_mq_kick_requeue_list(q);
793 }
794
795 void blk_mq_kick_requeue_list(struct request_queue *q)
796 {
797         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
798 }
799 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
800
801 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
802                                     unsigned long msecs)
803 {
804         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
805                                     msecs_to_jiffies(msecs));
806 }
807 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
808
809 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
810 {
811         if (tag < tags->nr_tags) {
812                 prefetch(tags->rqs[tag]);
813                 return tags->rqs[tag];
814         }
815
816         return NULL;
817 }
818 EXPORT_SYMBOL(blk_mq_tag_to_rq);
819
820 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
821                                void *priv, bool reserved)
822 {
823         /*
824          * If we find a request that is inflight and the queue matches,
825          * we know the queue is busy. Return false to stop the iteration.
826          */
827         if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
828                 bool *busy = priv;
829
830                 *busy = true;
831                 return false;
832         }
833
834         return true;
835 }
836
837 bool blk_mq_queue_inflight(struct request_queue *q)
838 {
839         bool busy = false;
840
841         blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
842         return busy;
843 }
844 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
845
846 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
847 {
848         req->rq_flags |= RQF_TIMED_OUT;
849         if (req->q->mq_ops->timeout) {
850                 enum blk_eh_timer_return ret;
851
852                 ret = req->q->mq_ops->timeout(req, reserved);
853                 if (ret == BLK_EH_DONE)
854                         return;
855                 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
856         }
857
858         blk_add_timer(req);
859 }
860
861 static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
862 {
863         unsigned long deadline;
864
865         if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
866                 return false;
867         if (rq->rq_flags & RQF_TIMED_OUT)
868                 return false;
869
870         deadline = READ_ONCE(rq->deadline);
871         if (time_after_eq(jiffies, deadline))
872                 return true;
873
874         if (*next == 0)
875                 *next = deadline;
876         else if (time_after(*next, deadline))
877                 *next = deadline;
878         return false;
879 }
880
881 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
882                 struct request *rq, void *priv, bool reserved)
883 {
884         unsigned long *next = priv;
885
886         /*
887          * Just do a quick check if it is expired before locking the request in
888          * so we're not unnecessarilly synchronizing across CPUs.
889          */
890         if (!blk_mq_req_expired(rq, next))
891                 return true;
892
893         /*
894          * We have reason to believe the request may be expired. Take a
895          * reference on the request to lock this request lifetime into its
896          * currently allocated context to prevent it from being reallocated in
897          * the event the completion by-passes this timeout handler.
898          *
899          * If the reference was already released, then the driver beat the
900          * timeout handler to posting a natural completion.
901          */
902         if (!refcount_inc_not_zero(&rq->ref))
903                 return true;
904
905         /*
906          * The request is now locked and cannot be reallocated underneath the
907          * timeout handler's processing. Re-verify this exact request is truly
908          * expired; if it is not expired, then the request was completed and
909          * reallocated as a new request.
910          */
911         if (blk_mq_req_expired(rq, next))
912                 blk_mq_rq_timed_out(rq, reserved);
913         if (refcount_dec_and_test(&rq->ref))
914                 __blk_mq_free_request(rq);
915
916         return true;
917 }
918
919 static void blk_mq_timeout_work(struct work_struct *work)
920 {
921         struct request_queue *q =
922                 container_of(work, struct request_queue, timeout_work);
923         unsigned long next = 0;
924         struct blk_mq_hw_ctx *hctx;
925         int i;
926
927         /* A deadlock might occur if a request is stuck requiring a
928          * timeout at the same time a queue freeze is waiting
929          * completion, since the timeout code would not be able to
930          * acquire the queue reference here.
931          *
932          * That's why we don't use blk_queue_enter here; instead, we use
933          * percpu_ref_tryget directly, because we need to be able to
934          * obtain a reference even in the short window between the queue
935          * starting to freeze, by dropping the first reference in
936          * blk_freeze_queue_start, and the moment the last request is
937          * consumed, marked by the instant q_usage_counter reaches
938          * zero.
939          */
940         if (!percpu_ref_tryget(&q->q_usage_counter))
941                 return;
942
943         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
944
945         if (next != 0) {
946                 mod_timer(&q->timeout, next);
947         } else {
948                 /*
949                  * Request timeouts are handled as a forward rolling timer. If
950                  * we end up here it means that no requests are pending and
951                  * also that no request has been pending for a while. Mark
952                  * each hctx as idle.
953                  */
954                 queue_for_each_hw_ctx(q, hctx, i) {
955                         /* the hctx may be unmapped, so check it here */
956                         if (blk_mq_hw_queue_mapped(hctx))
957                                 blk_mq_tag_idle(hctx);
958                 }
959         }
960         blk_queue_exit(q);
961 }
962
963 struct flush_busy_ctx_data {
964         struct blk_mq_hw_ctx *hctx;
965         struct list_head *list;
966 };
967
968 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
969 {
970         struct flush_busy_ctx_data *flush_data = data;
971         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
972         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
973         enum hctx_type type = hctx->type;
974
975         spin_lock(&ctx->lock);
976         list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
977         sbitmap_clear_bit(sb, bitnr);
978         spin_unlock(&ctx->lock);
979         return true;
980 }
981
982 /*
983  * Process software queues that have been marked busy, splicing them
984  * to the for-dispatch
985  */
986 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
987 {
988         struct flush_busy_ctx_data data = {
989                 .hctx = hctx,
990                 .list = list,
991         };
992
993         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
994 }
995 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
996
997 struct dispatch_rq_data {
998         struct blk_mq_hw_ctx *hctx;
999         struct request *rq;
1000 };
1001
1002 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1003                 void *data)
1004 {
1005         struct dispatch_rq_data *dispatch_data = data;
1006         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1007         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1008         enum hctx_type type = hctx->type;
1009
1010         spin_lock(&ctx->lock);
1011         if (!list_empty(&ctx->rq_lists[type])) {
1012                 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1013                 list_del_init(&dispatch_data->rq->queuelist);
1014                 if (list_empty(&ctx->rq_lists[type]))
1015                         sbitmap_clear_bit(sb, bitnr);
1016         }
1017         spin_unlock(&ctx->lock);
1018
1019         return !dispatch_data->rq;
1020 }
1021
1022 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1023                                         struct blk_mq_ctx *start)
1024 {
1025         unsigned off = start ? start->index_hw[hctx->type] : 0;
1026         struct dispatch_rq_data data = {
1027                 .hctx = hctx,
1028                 .rq   = NULL,
1029         };
1030
1031         __sbitmap_for_each_set(&hctx->ctx_map, off,
1032                                dispatch_rq_from_ctx, &data);
1033
1034         return data.rq;
1035 }
1036
1037 static inline unsigned int queued_to_index(unsigned int queued)
1038 {
1039         if (!queued)
1040                 return 0;
1041
1042         return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1043 }
1044
1045 bool blk_mq_get_driver_tag(struct request *rq)
1046 {
1047         struct blk_mq_alloc_data data = {
1048                 .q = rq->q,
1049                 .hctx = rq->mq_hctx,
1050                 .flags = BLK_MQ_REQ_NOWAIT,
1051                 .cmd_flags = rq->cmd_flags,
1052         };
1053         bool shared;
1054
1055         if (rq->tag != -1)
1056                 goto done;
1057
1058         if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
1059                 data.flags |= BLK_MQ_REQ_RESERVED;
1060
1061         shared = blk_mq_tag_busy(data.hctx);
1062         rq->tag = blk_mq_get_tag(&data);
1063         if (rq->tag >= 0) {
1064                 if (shared) {
1065                         rq->rq_flags |= RQF_MQ_INFLIGHT;
1066                         atomic_inc(&data.hctx->nr_active);
1067                 }
1068                 data.hctx->tags->rqs[rq->tag] = rq;
1069         }
1070
1071 done:
1072         return rq->tag != -1;
1073 }
1074
1075 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1076                                 int flags, void *key)
1077 {
1078         struct blk_mq_hw_ctx *hctx;
1079
1080         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1081
1082         spin_lock(&hctx->dispatch_wait_lock);
1083         if (!list_empty(&wait->entry)) {
1084                 struct sbitmap_queue *sbq;
1085
1086                 list_del_init(&wait->entry);
1087                 sbq = &hctx->tags->bitmap_tags;
1088                 atomic_dec(&sbq->ws_active);
1089         }
1090         spin_unlock(&hctx->dispatch_wait_lock);
1091
1092         blk_mq_run_hw_queue(hctx, true);
1093         return 1;
1094 }
1095
1096 /*
1097  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1098  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1099  * restart. For both cases, take care to check the condition again after
1100  * marking us as waiting.
1101  */
1102 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1103                                  struct request *rq)
1104 {
1105         struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1106         struct wait_queue_head *wq;
1107         wait_queue_entry_t *wait;
1108         bool ret;
1109
1110         if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
1111                 blk_mq_sched_mark_restart_hctx(hctx);
1112
1113                 /*
1114                  * It's possible that a tag was freed in the window between the
1115                  * allocation failure and adding the hardware queue to the wait
1116                  * queue.
1117                  *
1118                  * Don't clear RESTART here, someone else could have set it.
1119                  * At most this will cost an extra queue run.
1120                  */
1121                 return blk_mq_get_driver_tag(rq);
1122         }
1123
1124         wait = &hctx->dispatch_wait;
1125         if (!list_empty_careful(&wait->entry))
1126                 return false;
1127
1128         wq = &bt_wait_ptr(sbq, hctx)->wait;
1129
1130         spin_lock_irq(&wq->lock);
1131         spin_lock(&hctx->dispatch_wait_lock);
1132         if (!list_empty(&wait->entry)) {
1133                 spin_unlock(&hctx->dispatch_wait_lock);
1134                 spin_unlock_irq(&wq->lock);
1135                 return false;
1136         }
1137
1138         atomic_inc(&sbq->ws_active);
1139         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1140         __add_wait_queue(wq, wait);
1141
1142         /*
1143          * It's possible that a tag was freed in the window between the
1144          * allocation failure and adding the hardware queue to the wait
1145          * queue.
1146          */
1147         ret = blk_mq_get_driver_tag(rq);
1148         if (!ret) {
1149                 spin_unlock(&hctx->dispatch_wait_lock);
1150                 spin_unlock_irq(&wq->lock);
1151                 return false;
1152         }
1153
1154         /*
1155          * We got a tag, remove ourselves from the wait queue to ensure
1156          * someone else gets the wakeup.
1157          */
1158         list_del_init(&wait->entry);
1159         atomic_dec(&sbq->ws_active);
1160         spin_unlock(&hctx->dispatch_wait_lock);
1161         spin_unlock_irq(&wq->lock);
1162
1163         return true;
1164 }
1165
1166 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
1167 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
1168 /*
1169  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1170  * - EWMA is one simple way to compute running average value
1171  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1172  * - take 4 as factor for avoiding to get too small(0) result, and this
1173  *   factor doesn't matter because EWMA decreases exponentially
1174  */
1175 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1176 {
1177         unsigned int ewma;
1178
1179         if (hctx->queue->elevator)
1180                 return;
1181
1182         ewma = hctx->dispatch_busy;
1183
1184         if (!ewma && !busy)
1185                 return;
1186
1187         ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1188         if (busy)
1189                 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1190         ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1191
1192         hctx->dispatch_busy = ewma;
1193 }
1194
1195 #define BLK_MQ_RESOURCE_DELAY   3               /* ms units */
1196
1197 /*
1198  * Returns true if we did some work AND can potentially do more.
1199  */
1200 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1201                              bool got_budget)
1202 {
1203         struct blk_mq_hw_ctx *hctx;
1204         struct request *rq, *nxt;
1205         bool no_tag = false;
1206         int errors, queued;
1207         blk_status_t ret = BLK_STS_OK;
1208
1209         if (list_empty(list))
1210                 return false;
1211
1212         WARN_ON(!list_is_singular(list) && got_budget);
1213
1214         /*
1215          * Now process all the entries, sending them to the driver.
1216          */
1217         errors = queued = 0;
1218         do {
1219                 struct blk_mq_queue_data bd;
1220
1221                 rq = list_first_entry(list, struct request, queuelist);
1222
1223                 hctx = rq->mq_hctx;
1224                 if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
1225                         break;
1226
1227                 if (!blk_mq_get_driver_tag(rq)) {
1228                         /*
1229                          * The initial allocation attempt failed, so we need to
1230                          * rerun the hardware queue when a tag is freed. The
1231                          * waitqueue takes care of that. If the queue is run
1232                          * before we add this entry back on the dispatch list,
1233                          * we'll re-run it below.
1234                          */
1235                         if (!blk_mq_mark_tag_wait(hctx, rq)) {
1236                                 blk_mq_put_dispatch_budget(hctx);
1237                                 /*
1238                                  * For non-shared tags, the RESTART check
1239                                  * will suffice.
1240                                  */
1241                                 if (hctx->flags & BLK_MQ_F_TAG_SHARED)
1242                                         no_tag = true;
1243                                 break;
1244                         }
1245                 }
1246
1247                 list_del_init(&rq->queuelist);
1248
1249                 bd.rq = rq;
1250
1251                 /*
1252                  * Flag last if we have no more requests, or if we have more
1253                  * but can't assign a driver tag to it.
1254                  */
1255                 if (list_empty(list))
1256                         bd.last = true;
1257                 else {
1258                         nxt = list_first_entry(list, struct request, queuelist);
1259                         bd.last = !blk_mq_get_driver_tag(nxt);
1260                 }
1261
1262                 ret = q->mq_ops->queue_rq(hctx, &bd);
1263                 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
1264                         /*
1265                          * If an I/O scheduler has been configured and we got a
1266                          * driver tag for the next request already, free it
1267                          * again.
1268                          */
1269                         if (!list_empty(list)) {
1270                                 nxt = list_first_entry(list, struct request, queuelist);
1271                                 blk_mq_put_driver_tag(nxt);
1272                         }
1273                         list_add(&rq->queuelist, list);
1274                         __blk_mq_requeue_request(rq);
1275                         break;
1276                 }
1277
1278                 if (unlikely(ret != BLK_STS_OK)) {
1279                         errors++;
1280                         blk_mq_end_request(rq, BLK_STS_IOERR);
1281                         continue;
1282                 }
1283
1284                 queued++;
1285         } while (!list_empty(list));
1286
1287         hctx->dispatched[queued_to_index(queued)]++;
1288
1289         /*
1290          * Any items that need requeuing? Stuff them into hctx->dispatch,
1291          * that is where we will continue on next queue run.
1292          */
1293         if (!list_empty(list)) {
1294                 bool needs_restart;
1295
1296                 /*
1297                  * If we didn't flush the entire list, we could have told
1298                  * the driver there was more coming, but that turned out to
1299                  * be a lie.
1300                  */
1301                 if (q->mq_ops->commit_rqs)
1302                         q->mq_ops->commit_rqs(hctx);
1303
1304                 spin_lock(&hctx->lock);
1305                 list_splice_init(list, &hctx->dispatch);
1306                 spin_unlock(&hctx->lock);
1307
1308                 /*
1309                  * If SCHED_RESTART was set by the caller of this function and
1310                  * it is no longer set that means that it was cleared by another
1311                  * thread and hence that a queue rerun is needed.
1312                  *
1313                  * If 'no_tag' is set, that means that we failed getting
1314                  * a driver tag with an I/O scheduler attached. If our dispatch
1315                  * waitqueue is no longer active, ensure that we run the queue
1316                  * AFTER adding our entries back to the list.
1317                  *
1318                  * If no I/O scheduler has been configured it is possible that
1319                  * the hardware queue got stopped and restarted before requests
1320                  * were pushed back onto the dispatch list. Rerun the queue to
1321                  * avoid starvation. Notes:
1322                  * - blk_mq_run_hw_queue() checks whether or not a queue has
1323                  *   been stopped before rerunning a queue.
1324                  * - Some but not all block drivers stop a queue before
1325                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1326                  *   and dm-rq.
1327                  *
1328                  * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1329                  * bit is set, run queue after a delay to avoid IO stalls
1330                  * that could otherwise occur if the queue is idle.
1331                  */
1332                 needs_restart = blk_mq_sched_needs_restart(hctx);
1333                 if (!needs_restart ||
1334                     (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1335                         blk_mq_run_hw_queue(hctx, true);
1336                 else if (needs_restart && (ret == BLK_STS_RESOURCE))
1337                         blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1338
1339                 blk_mq_update_dispatch_busy(hctx, true);
1340                 return false;
1341         } else
1342                 blk_mq_update_dispatch_busy(hctx, false);
1343
1344         /*
1345          * If the host/device is unable to accept more work, inform the
1346          * caller of that.
1347          */
1348         if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1349                 return false;
1350
1351         return (queued + errors) != 0;
1352 }
1353
1354 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1355 {
1356         int srcu_idx;
1357
1358         /*
1359          * We should be running this queue from one of the CPUs that
1360          * are mapped to it.
1361          *
1362          * There are at least two related races now between setting
1363          * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
1364          * __blk_mq_run_hw_queue():
1365          *
1366          * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
1367          *   but later it becomes online, then this warning is harmless
1368          *   at all
1369          *
1370          * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
1371          *   but later it becomes offline, then the warning can't be
1372          *   triggered, and we depend on blk-mq timeout handler to
1373          *   handle dispatched requests to this hctx
1374          */
1375         if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1376                 cpu_online(hctx->next_cpu)) {
1377                 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1378                         raw_smp_processor_id(),
1379                         cpumask_empty(hctx->cpumask) ? "inactive": "active");
1380                 dump_stack();
1381         }
1382
1383         /*
1384          * We can't run the queue inline with ints disabled. Ensure that
1385          * we catch bad users of this early.
1386          */
1387         WARN_ON_ONCE(in_interrupt());
1388
1389         might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1390
1391         hctx_lock(hctx, &srcu_idx);
1392         blk_mq_sched_dispatch_requests(hctx);
1393         hctx_unlock(hctx, srcu_idx);
1394 }
1395
1396 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1397 {
1398         int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1399
1400         if (cpu >= nr_cpu_ids)
1401                 cpu = cpumask_first(hctx->cpumask);
1402         return cpu;
1403 }
1404
1405 /*
1406  * It'd be great if the workqueue API had a way to pass
1407  * in a mask and had some smarts for more clever placement.
1408  * For now we just round-robin here, switching for every
1409  * BLK_MQ_CPU_WORK_BATCH queued items.
1410  */
1411 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1412 {
1413         bool tried = false;
1414         int next_cpu = hctx->next_cpu;
1415
1416         if (hctx->queue->nr_hw_queues == 1)
1417                 return WORK_CPU_UNBOUND;
1418
1419         if (--hctx->next_cpu_batch <= 0) {
1420 select_cpu:
1421                 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
1422                                 cpu_online_mask);
1423                 if (next_cpu >= nr_cpu_ids)
1424                         next_cpu = blk_mq_first_mapped_cpu(hctx);
1425                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1426         }
1427
1428         /*
1429          * Do unbound schedule if we can't find a online CPU for this hctx,
1430          * and it should only happen in the path of handling CPU DEAD.
1431          */
1432         if (!cpu_online(next_cpu)) {
1433                 if (!tried) {
1434                         tried = true;
1435                         goto select_cpu;
1436                 }
1437
1438                 /*
1439                  * Make sure to re-select CPU next time once after CPUs
1440                  * in hctx->cpumask become online again.
1441                  */
1442                 hctx->next_cpu = next_cpu;
1443                 hctx->next_cpu_batch = 1;
1444                 return WORK_CPU_UNBOUND;
1445         }
1446
1447         hctx->next_cpu = next_cpu;
1448         return next_cpu;
1449 }
1450
1451 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1452                                         unsigned long msecs)
1453 {
1454         if (unlikely(blk_mq_hctx_stopped(hctx)))
1455                 return;
1456
1457         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1458                 int cpu = get_cpu();
1459                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1460                         __blk_mq_run_hw_queue(hctx);
1461                         put_cpu();
1462                         return;
1463                 }
1464
1465                 put_cpu();
1466         }
1467
1468         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1469                                     msecs_to_jiffies(msecs));
1470 }
1471
1472 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1473 {
1474         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1475 }
1476 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1477
1478 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1479 {
1480         int srcu_idx;
1481         bool need_run;
1482
1483         /*
1484          * When queue is quiesced, we may be switching io scheduler, or
1485          * updating nr_hw_queues, or other things, and we can't run queue
1486          * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1487          *
1488          * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1489          * quiesced.
1490          */
1491         hctx_lock(hctx, &srcu_idx);
1492         need_run = !blk_queue_quiesced(hctx->queue) &&
1493                 blk_mq_hctx_has_pending(hctx);
1494         hctx_unlock(hctx, srcu_idx);
1495
1496         if (need_run) {
1497                 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1498                 return true;
1499         }
1500
1501         return false;
1502 }
1503 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1504
1505 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1506 {
1507         struct blk_mq_hw_ctx *hctx;
1508         int i;
1509
1510         queue_for_each_hw_ctx(q, hctx, i) {
1511                 if (blk_mq_hctx_stopped(hctx))
1512                         continue;
1513
1514                 blk_mq_run_hw_queue(hctx, async);
1515         }
1516 }
1517 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1518
1519 /**
1520  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1521  * @q: request queue.
1522  *
1523  * The caller is responsible for serializing this function against
1524  * blk_mq_{start,stop}_hw_queue().
1525  */
1526 bool blk_mq_queue_stopped(struct request_queue *q)
1527 {
1528         struct blk_mq_hw_ctx *hctx;
1529         int i;
1530
1531         queue_for_each_hw_ctx(q, hctx, i)
1532                 if (blk_mq_hctx_stopped(hctx))
1533                         return true;
1534
1535         return false;
1536 }
1537 EXPORT_SYMBOL(blk_mq_queue_stopped);
1538
1539 /*
1540  * This function is often used for pausing .queue_rq() by driver when
1541  * there isn't enough resource or some conditions aren't satisfied, and
1542  * BLK_STS_RESOURCE is usually returned.
1543  *
1544  * We do not guarantee that dispatch can be drained or blocked
1545  * after blk_mq_stop_hw_queue() returns. Please use
1546  * blk_mq_quiesce_queue() for that requirement.
1547  */
1548 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1549 {
1550         cancel_delayed_work(&hctx->run_work);
1551
1552         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1553 }
1554 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1555
1556 /*
1557  * This function is often used for pausing .queue_rq() by driver when
1558  * there isn't enough resource or some conditions aren't satisfied, and
1559  * BLK_STS_RESOURCE is usually returned.
1560  *
1561  * We do not guarantee that dispatch can be drained or blocked
1562  * after blk_mq_stop_hw_queues() returns. Please use
1563  * blk_mq_quiesce_queue() for that requirement.
1564  */
1565 void blk_mq_stop_hw_queues(struct request_queue *q)
1566 {
1567         struct blk_mq_hw_ctx *hctx;
1568         int i;
1569
1570         queue_for_each_hw_ctx(q, hctx, i)
1571                 blk_mq_stop_hw_queue(hctx);
1572 }
1573 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1574
1575 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1576 {
1577         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1578
1579         blk_mq_run_hw_queue(hctx, false);
1580 }
1581 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1582
1583 void blk_mq_start_hw_queues(struct request_queue *q)
1584 {
1585         struct blk_mq_hw_ctx *hctx;
1586         int i;
1587
1588         queue_for_each_hw_ctx(q, hctx, i)
1589                 blk_mq_start_hw_queue(hctx);
1590 }
1591 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1592
1593 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1594 {
1595         if (!blk_mq_hctx_stopped(hctx))
1596                 return;
1597
1598         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1599         blk_mq_run_hw_queue(hctx, async);
1600 }
1601 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1602
1603 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1604 {
1605         struct blk_mq_hw_ctx *hctx;
1606         int i;
1607
1608         queue_for_each_hw_ctx(q, hctx, i)
1609                 blk_mq_start_stopped_hw_queue(hctx, async);
1610 }
1611 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1612
1613 static void blk_mq_run_work_fn(struct work_struct *work)
1614 {
1615         struct blk_mq_hw_ctx *hctx;
1616
1617         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1618
1619         /*
1620          * If we are stopped, don't run the queue.
1621          */
1622         if (test_bit(BLK_MQ_S_STOPPED, &hctx->state))
1623                 return;
1624
1625         __blk_mq_run_hw_queue(hctx);
1626 }
1627
1628 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1629                                             struct request *rq,
1630                                             bool at_head)
1631 {
1632         struct blk_mq_ctx *ctx = rq->mq_ctx;
1633         enum hctx_type type = hctx->type;
1634
1635         lockdep_assert_held(&ctx->lock);
1636
1637         trace_block_rq_insert(hctx->queue, rq);
1638
1639         if (at_head)
1640                 list_add(&rq->queuelist, &ctx->rq_lists[type]);
1641         else
1642                 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
1643 }
1644
1645 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1646                              bool at_head)
1647 {
1648         struct blk_mq_ctx *ctx = rq->mq_ctx;
1649
1650         lockdep_assert_held(&ctx->lock);
1651
1652         __blk_mq_insert_req_list(hctx, rq, at_head);
1653         blk_mq_hctx_mark_pending(hctx, ctx);
1654 }
1655
1656 /*
1657  * Should only be used carefully, when the caller knows we want to
1658  * bypass a potential IO scheduler on the target device.
1659  */
1660 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
1661 {
1662         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1663
1664         spin_lock(&hctx->lock);
1665         list_add_tail(&rq->queuelist, &hctx->dispatch);
1666         spin_unlock(&hctx->lock);
1667
1668         if (run_queue)
1669                 blk_mq_run_hw_queue(hctx, false);
1670 }
1671
1672 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1673                             struct list_head *list)
1674
1675 {
1676         struct request *rq;
1677         enum hctx_type type = hctx->type;
1678
1679         /*
1680          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1681          * offline now
1682          */
1683         list_for_each_entry(rq, list, queuelist) {
1684                 BUG_ON(rq->mq_ctx != ctx);
1685                 trace_block_rq_insert(hctx->queue, rq);
1686         }
1687
1688         spin_lock(&ctx->lock);
1689         list_splice_tail_init(list, &ctx->rq_lists[type]);
1690         blk_mq_hctx_mark_pending(hctx, ctx);
1691         spin_unlock(&ctx->lock);
1692 }
1693
1694 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
1695 {
1696         struct request *rqa = container_of(a, struct request, queuelist);
1697         struct request *rqb = container_of(b, struct request, queuelist);
1698
1699         if (rqa->mq_ctx < rqb->mq_ctx)
1700                 return -1;
1701         else if (rqa->mq_ctx > rqb->mq_ctx)
1702                 return 1;
1703         else if (rqa->mq_hctx < rqb->mq_hctx)
1704                 return -1;
1705         else if (rqa->mq_hctx > rqb->mq_hctx)
1706                 return 1;
1707
1708         return blk_rq_pos(rqa) > blk_rq_pos(rqb);
1709 }
1710
1711 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1712 {
1713         struct blk_mq_hw_ctx *this_hctx;
1714         struct blk_mq_ctx *this_ctx;
1715         struct request_queue *this_q;
1716         struct request *rq;
1717         LIST_HEAD(list);
1718         LIST_HEAD(rq_list);
1719         unsigned int depth;
1720
1721         list_splice_init(&plug->mq_list, &list);
1722
1723         if (plug->rq_count > 2 && plug->multiple_queues)
1724                 list_sort(NULL, &list, plug_rq_cmp);
1725
1726         plug->rq_count = 0;
1727
1728         this_q = NULL;
1729         this_hctx = NULL;
1730         this_ctx = NULL;
1731         depth = 0;
1732
1733         while (!list_empty(&list)) {
1734                 rq = list_entry_rq(list.next);
1735                 list_del_init(&rq->queuelist);
1736                 BUG_ON(!rq->q);
1737                 if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
1738                         if (this_hctx) {
1739                                 trace_block_unplug(this_q, depth, !from_schedule);
1740                                 blk_mq_sched_insert_requests(this_hctx, this_ctx,
1741                                                                 &rq_list,
1742                                                                 from_schedule);
1743                         }
1744
1745                         this_q = rq->q;
1746                         this_ctx = rq->mq_ctx;
1747                         this_hctx = rq->mq_hctx;
1748                         depth = 0;
1749                 }
1750
1751                 depth++;
1752                 list_add_tail(&rq->queuelist, &rq_list);
1753         }
1754
1755         /*
1756          * If 'this_hctx' is set, we know we have entries to complete
1757          * on 'rq_list'. Do those.
1758          */
1759         if (this_hctx) {
1760                 trace_block_unplug(this_q, depth, !from_schedule);
1761                 blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
1762                                                 from_schedule);
1763         }
1764 }
1765
1766 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1767 {
1768         blk_init_request_from_bio(rq, bio);
1769
1770         blk_account_io_start(rq, true);
1771 }
1772
1773 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1774                                             struct request *rq,
1775                                             blk_qc_t *cookie, bool last)
1776 {
1777         struct request_queue *q = rq->q;
1778         struct blk_mq_queue_data bd = {
1779                 .rq = rq,
1780                 .last = last,
1781         };
1782         blk_qc_t new_cookie;
1783         blk_status_t ret;
1784
1785         new_cookie = request_to_qc_t(hctx, rq);
1786
1787         /*
1788          * For OK queue, we are done. For error, caller may kill it.
1789          * Any other error (busy), just add it to our list as we
1790          * previously would have done.
1791          */
1792         ret = q->mq_ops->queue_rq(hctx, &bd);
1793         switch (ret) {
1794         case BLK_STS_OK:
1795                 blk_mq_update_dispatch_busy(hctx, false);
1796                 *cookie = new_cookie;
1797                 break;
1798         case BLK_STS_RESOURCE:
1799         case BLK_STS_DEV_RESOURCE:
1800                 blk_mq_update_dispatch_busy(hctx, true);
1801                 __blk_mq_requeue_request(rq);
1802                 break;
1803         default:
1804                 blk_mq_update_dispatch_busy(hctx, false);
1805                 *cookie = BLK_QC_T_NONE;
1806                 break;
1807         }
1808
1809         return ret;
1810 }
1811
1812 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1813                                                 struct request *rq,
1814                                                 blk_qc_t *cookie,
1815                                                 bool bypass_insert, bool last)
1816 {
1817         struct request_queue *q = rq->q;
1818         bool run_queue = true;
1819
1820         /*
1821          * RCU or SRCU read lock is needed before checking quiesced flag.
1822          *
1823          * When queue is stopped or quiesced, ignore 'bypass_insert' from
1824          * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
1825          * and avoid driver to try to dispatch again.
1826          */
1827         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1828                 run_queue = false;
1829                 bypass_insert = false;
1830                 goto insert;
1831         }
1832
1833         if (q->elevator && !bypass_insert)
1834                 goto insert;
1835
1836         if (!blk_mq_get_dispatch_budget(hctx))
1837                 goto insert;
1838
1839         if (!blk_mq_get_driver_tag(rq)) {
1840                 blk_mq_put_dispatch_budget(hctx);
1841                 goto insert;
1842         }
1843
1844         return __blk_mq_issue_directly(hctx, rq, cookie, last);
1845 insert:
1846         if (bypass_insert)
1847                 return BLK_STS_RESOURCE;
1848
1849         blk_mq_request_bypass_insert(rq, run_queue);
1850         return BLK_STS_OK;
1851 }
1852
1853 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1854                 struct request *rq, blk_qc_t *cookie)
1855 {
1856         blk_status_t ret;
1857         int srcu_idx;
1858
1859         might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1860
1861         hctx_lock(hctx, &srcu_idx);
1862
1863         ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
1864         if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1865                 blk_mq_request_bypass_insert(rq, true);
1866         else if (ret != BLK_STS_OK)
1867                 blk_mq_end_request(rq, ret);
1868
1869         hctx_unlock(hctx, srcu_idx);
1870 }
1871
1872 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
1873 {
1874         blk_status_t ret;
1875         int srcu_idx;
1876         blk_qc_t unused_cookie;
1877         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1878
1879         hctx_lock(hctx, &srcu_idx);
1880         ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
1881         hctx_unlock(hctx, srcu_idx);
1882
1883         return ret;
1884 }
1885
1886 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1887                 struct list_head *list)
1888 {
1889         while (!list_empty(list)) {
1890                 blk_status_t ret;
1891                 struct request *rq = list_first_entry(list, struct request,
1892                                 queuelist);
1893
1894                 list_del_init(&rq->queuelist);
1895                 ret = blk_mq_request_issue_directly(rq, list_empty(list));
1896                 if (ret != BLK_STS_OK) {
1897                         if (ret == BLK_STS_RESOURCE ||
1898                                         ret == BLK_STS_DEV_RESOURCE) {
1899                                 blk_mq_request_bypass_insert(rq,
1900                                                         list_empty(list));
1901                                 break;
1902                         }
1903                         blk_mq_end_request(rq, ret);
1904                 }
1905         }
1906
1907         /*
1908          * If we didn't flush the entire list, we could have told
1909          * the driver there was more coming, but that turned out to
1910          * be a lie.
1911          */
1912         if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
1913                 hctx->queue->mq_ops->commit_rqs(hctx);
1914 }
1915
1916 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1917 {
1918         list_add_tail(&rq->queuelist, &plug->mq_list);
1919         plug->rq_count++;
1920         if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
1921                 struct request *tmp;
1922
1923                 tmp = list_first_entry(&plug->mq_list, struct request,
1924                                                 queuelist);
1925                 if (tmp->q != rq->q)
1926                         plug->multiple_queues = true;
1927         }
1928 }
1929
1930 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1931 {
1932         const int is_sync = op_is_sync(bio->bi_opf);
1933         const int is_flush_fua = op_is_flush(bio->bi_opf);
1934         struct blk_mq_alloc_data data = { .flags = 0};
1935         struct request *rq;
1936         struct blk_plug *plug;
1937         struct request *same_queue_rq = NULL;
1938         blk_qc_t cookie;
1939
1940         blk_queue_bounce(q, &bio);
1941
1942         blk_queue_split(q, &bio);
1943
1944         if (!bio_integrity_prep(bio))
1945                 return BLK_QC_T_NONE;
1946
1947         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1948             blk_attempt_plug_merge(q, bio, &same_queue_rq))
1949                 return BLK_QC_T_NONE;
1950
1951         if (blk_mq_sched_bio_merge(q, bio))
1952                 return BLK_QC_T_NONE;
1953
1954         rq_qos_throttle(q, bio);
1955
1956         data.cmd_flags = bio->bi_opf;
1957         rq = blk_mq_get_request(q, bio, &data);
1958         if (unlikely(!rq)) {
1959                 rq_qos_cleanup(q, bio);
1960                 if (bio->bi_opf & REQ_NOWAIT)
1961                         bio_wouldblock_error(bio);
1962                 return BLK_QC_T_NONE;
1963         }
1964
1965         trace_block_getrq(q, bio, bio->bi_opf);
1966
1967         rq_qos_track(q, rq, bio);
1968
1969         cookie = request_to_qc_t(data.hctx, rq);
1970
1971         plug = current->plug;
1972         if (unlikely(is_flush_fua)) {
1973                 blk_mq_put_ctx(data.ctx);
1974                 blk_mq_bio_to_request(rq, bio);
1975
1976                 /* bypass scheduler for flush rq */
1977                 blk_insert_flush(rq);
1978                 blk_mq_run_hw_queue(data.hctx, true);
1979         } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) {
1980                 /*
1981                  * Use plugging if we have a ->commit_rqs() hook as well, as
1982                  * we know the driver uses bd->last in a smart fashion.
1983                  */
1984                 unsigned int request_count = plug->rq_count;
1985                 struct request *last = NULL;
1986
1987                 blk_mq_put_ctx(data.ctx);
1988                 blk_mq_bio_to_request(rq, bio);
1989
1990                 if (!request_count)
1991                         trace_block_plug(q);
1992                 else
1993                         last = list_entry_rq(plug->mq_list.prev);
1994
1995                 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1996                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1997                         blk_flush_plug_list(plug, false);
1998                         trace_block_plug(q);
1999                 }
2000
2001                 blk_add_rq_to_plug(plug, rq);
2002         } else if (plug && !blk_queue_nomerges(q)) {
2003                 blk_mq_bio_to_request(rq, bio);
2004
2005                 /*
2006                  * We do limited plugging. If the bio can be merged, do that.
2007                  * Otherwise the existing request in the plug list will be
2008                  * issued. So the plug list will have one request at most
2009                  * The plug list might get flushed before this. If that happens,
2010                  * the plug list is empty, and same_queue_rq is invalid.
2011                  */
2012                 if (list_empty(&plug->mq_list))
2013                         same_queue_rq = NULL;
2014                 if (same_queue_rq) {
2015                         list_del_init(&same_queue_rq->queuelist);
2016                         plug->rq_count--;
2017                 }
2018                 blk_add_rq_to_plug(plug, rq);
2019                 trace_block_plug(q);
2020
2021                 blk_mq_put_ctx(data.ctx);
2022
2023                 if (same_queue_rq) {
2024                         data.hctx = same_queue_rq->mq_hctx;
2025                         trace_block_unplug(q, 1, true);
2026                         blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2027                                         &cookie);
2028                 }
2029         } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
2030                         !data.hctx->dispatch_busy)) {
2031                 blk_mq_put_ctx(data.ctx);
2032                 blk_mq_bio_to_request(rq, bio);
2033                 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2034         } else {
2035                 blk_mq_put_ctx(data.ctx);
2036                 blk_mq_bio_to_request(rq, bio);
2037                 blk_mq_sched_insert_request(rq, false, true, true);
2038         }
2039
2040         return cookie;
2041 }
2042
2043 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2044                      unsigned int hctx_idx)
2045 {
2046         struct page *page;
2047
2048         if (tags->rqs && set->ops->exit_request) {
2049                 int i;
2050
2051                 for (i = 0; i < tags->nr_tags; i++) {
2052                         struct request *rq = tags->static_rqs[i];
2053
2054                         if (!rq)
2055                                 continue;
2056                         set->ops->exit_request(set, rq, hctx_idx);
2057                         tags->static_rqs[i] = NULL;
2058                 }
2059         }
2060
2061         while (!list_empty(&tags->page_list)) {
2062                 page = list_first_entry(&tags->page_list, struct page, lru);
2063                 list_del_init(&page->lru);
2064                 /*
2065                  * Remove kmemleak object previously allocated in
2066                  * blk_mq_alloc_rqs().
2067                  */
2068                 kmemleak_free(page_address(page));
2069                 __free_pages(page, page->private);
2070         }
2071 }
2072
2073 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
2074 {
2075         kfree(tags->rqs);
2076         tags->rqs = NULL;
2077         kfree(tags->static_rqs);
2078         tags->static_rqs = NULL;
2079
2080         blk_mq_free_tags(tags);
2081 }
2082
2083 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2084                                         unsigned int hctx_idx,
2085                                         unsigned int nr_tags,
2086                                         unsigned int reserved_tags)
2087 {
2088         struct blk_mq_tags *tags;
2089         int node;
2090
2091         node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2092         if (node == NUMA_NO_NODE)
2093                 node = set->numa_node;
2094
2095         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
2096                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
2097         if (!tags)
2098                 return NULL;
2099
2100         tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2101                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2102                                  node);
2103         if (!tags->rqs) {
2104                 blk_mq_free_tags(tags);
2105                 return NULL;
2106         }
2107
2108         tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2109                                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2110                                         node);
2111         if (!tags->static_rqs) {
2112                 kfree(tags->rqs);
2113                 blk_mq_free_tags(tags);
2114                 return NULL;
2115         }
2116
2117         return tags;
2118 }
2119
2120 static size_t order_to_size(unsigned int order)
2121 {
2122         return (size_t)PAGE_SIZE << order;
2123 }
2124
2125 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2126                                unsigned int hctx_idx, int node)
2127 {
2128         int ret;
2129
2130         if (set->ops->init_request) {
2131                 ret = set->ops->init_request(set, rq, hctx_idx, node);
2132                 if (ret)
2133                         return ret;
2134         }
2135
2136         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
2137         return 0;
2138 }
2139
2140 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2141                      unsigned int hctx_idx, unsigned int depth)
2142 {
2143         unsigned int i, j, entries_per_page, max_order = 4;
2144         size_t rq_size, left;
2145         int node;
2146
2147         node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2148         if (node == NUMA_NO_NODE)
2149                 node = set->numa_node;
2150
2151         INIT_LIST_HEAD(&tags->page_list);
2152
2153         /*
2154          * rq_size is the size of the request plus driver payload, rounded
2155          * to the cacheline size
2156          */
2157         rq_size = round_up(sizeof(struct request) + set->cmd_size,
2158                                 cache_line_size());
2159         left = rq_size * depth;
2160
2161         for (i = 0; i < depth; ) {
2162                 int this_order = max_order;
2163                 struct page *page;
2164                 int to_do;
2165                 void *p;
2166
2167                 while (this_order && left < order_to_size(this_order - 1))
2168                         this_order--;
2169
2170                 do {
2171                         page = alloc_pages_node(node,
2172                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
2173                                 this_order);
2174                         if (page)
2175                                 break;
2176                         if (!this_order--)
2177                                 break;
2178                         if (order_to_size(this_order) < rq_size)
2179                                 break;
2180                 } while (1);
2181
2182                 if (!page)
2183                         goto fail;
2184
2185                 page->private = this_order;
2186                 list_add_tail(&page->lru, &tags->page_list);
2187
2188                 p = page_address(page);
2189                 /*
2190                  * Allow kmemleak to scan these pages as they contain pointers
2191                  * to additional allocations like via ops->init_request().
2192                  */
2193                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
2194                 entries_per_page = order_to_size(this_order) / rq_size;
2195                 to_do = min(entries_per_page, depth - i);
2196                 left -= to_do * rq_size;
2197                 for (j = 0; j < to_do; j++) {
2198                         struct request *rq = p;
2199
2200                         tags->static_rqs[i] = rq;
2201                         if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2202                                 tags->static_rqs[i] = NULL;
2203                                 goto fail;
2204                         }
2205
2206                         p += rq_size;
2207                         i++;
2208                 }
2209         }
2210         return 0;
2211
2212 fail:
2213         blk_mq_free_rqs(set, tags, hctx_idx);
2214         return -ENOMEM;
2215 }
2216
2217 /*
2218  * 'cpu' is going away. splice any existing rq_list entries from this
2219  * software queue to the hw queue dispatch list, and ensure that it
2220  * gets run.
2221  */
2222 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
2223 {
2224         struct blk_mq_hw_ctx *hctx;
2225         struct blk_mq_ctx *ctx;
2226         LIST_HEAD(tmp);
2227         enum hctx_type type;
2228
2229         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
2230         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2231         type = hctx->type;
2232
2233         spin_lock(&ctx->lock);
2234         if (!list_empty(&ctx->rq_lists[type])) {
2235                 list_splice_init(&ctx->rq_lists[type], &tmp);
2236                 blk_mq_hctx_clear_pending(hctx, ctx);
2237         }
2238         spin_unlock(&ctx->lock);
2239
2240         if (list_empty(&tmp))
2241                 return 0;
2242
2243         spin_lock(&hctx->lock);
2244         list_splice_tail_init(&tmp, &hctx->dispatch);
2245         spin_unlock(&hctx->lock);
2246
2247         blk_mq_run_hw_queue(hctx, true);
2248         return 0;
2249 }
2250
2251 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2252 {
2253         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2254                                             &hctx->cpuhp_dead);
2255 }
2256
2257 /* hctx->ctxs will be freed in queue's release handler */
2258 static void blk_mq_exit_hctx(struct request_queue *q,
2259                 struct blk_mq_tag_set *set,
2260                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2261 {
2262         if (blk_mq_hw_queue_mapped(hctx))
2263                 blk_mq_tag_idle(hctx);
2264
2265         if (set->ops->exit_request)
2266                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
2267
2268         if (set->ops->exit_hctx)
2269                 set->ops->exit_hctx(hctx, hctx_idx);
2270
2271         blk_mq_remove_cpuhp(hctx);
2272
2273         spin_lock(&q->unused_hctx_lock);
2274         list_add(&hctx->hctx_list, &q->unused_hctx_list);
2275         spin_unlock(&q->unused_hctx_lock);
2276 }
2277
2278 static void blk_mq_exit_hw_queues(struct request_queue *q,
2279                 struct blk_mq_tag_set *set, int nr_queue)
2280 {
2281         struct blk_mq_hw_ctx *hctx;
2282         unsigned int i;
2283
2284         queue_for_each_hw_ctx(q, hctx, i) {
2285                 if (i == nr_queue)
2286                         break;
2287                 blk_mq_debugfs_unregister_hctx(hctx);
2288                 blk_mq_exit_hctx(q, set, hctx, i);
2289         }
2290 }
2291
2292 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2293 {
2294         int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2295
2296         BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
2297                            __alignof__(struct blk_mq_hw_ctx)) !=
2298                      sizeof(struct blk_mq_hw_ctx));
2299
2300         if (tag_set->flags & BLK_MQ_F_BLOCKING)
2301                 hw_ctx_size += sizeof(struct srcu_struct);
2302
2303         return hw_ctx_size;
2304 }
2305
2306 static int blk_mq_init_hctx(struct request_queue *q,
2307                 struct blk_mq_tag_set *set,
2308                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2309 {
2310         hctx->queue_num = hctx_idx;
2311
2312         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2313
2314         hctx->tags = set->tags[hctx_idx];
2315
2316         if (set->ops->init_hctx &&
2317             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2318                 goto unregister_cpu_notifier;
2319
2320         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
2321                                 hctx->numa_node))
2322                 goto exit_hctx;
2323         return 0;
2324
2325  exit_hctx:
2326         if (set->ops->exit_hctx)
2327                 set->ops->exit_hctx(hctx, hctx_idx);
2328  unregister_cpu_notifier:
2329         blk_mq_remove_cpuhp(hctx);
2330         return -1;
2331 }
2332
2333 static struct blk_mq_hw_ctx *
2334 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
2335                 int node)
2336 {
2337         struct blk_mq_hw_ctx *hctx;
2338         gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
2339
2340         hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
2341         if (!hctx)
2342                 goto fail_alloc_hctx;
2343
2344         if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
2345                 goto free_hctx;
2346
2347         atomic_set(&hctx->nr_active, 0);
2348         if (node == NUMA_NO_NODE)
2349                 node = set->numa_node;
2350         hctx->numa_node = node;
2351
2352         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2353         spin_lock_init(&hctx->lock);
2354         INIT_LIST_HEAD(&hctx->dispatch);
2355         hctx->queue = q;
2356         hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
2357
2358         INIT_LIST_HEAD(&hctx->hctx_list);
2359
2360         /*
2361          * Allocate space for all possible cpus to avoid allocation at
2362          * runtime
2363          */
2364         hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2365                         gfp, node);
2366         if (!hctx->ctxs)
2367                 goto free_cpumask;
2368
2369         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
2370                                 gfp, node))
2371                 goto free_ctxs;
2372         hctx->nr_ctx = 0;
2373
2374         spin_lock_init(&hctx->dispatch_wait_lock);
2375         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2376         INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2377
2378         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
2379                         gfp);
2380         if (!hctx->fq)
2381                 goto free_bitmap;
2382
2383         if (hctx->flags & BLK_MQ_F_BLOCKING)
2384                 init_srcu_struct(hctx->srcu);
2385         blk_mq_hctx_kobj_init(hctx);
2386
2387         return hctx;
2388
2389  free_bitmap:
2390         sbitmap_free(&hctx->ctx_map);
2391  free_ctxs:
2392         kfree(hctx->ctxs);
2393  free_cpumask:
2394         free_cpumask_var(hctx->cpumask);
2395  free_hctx:
2396         kfree(hctx);
2397  fail_alloc_hctx:
2398         return NULL;
2399 }
2400
2401 static void blk_mq_init_cpu_queues(struct request_queue *q,
2402                                    unsigned int nr_hw_queues)
2403 {
2404         struct blk_mq_tag_set *set = q->tag_set;
2405         unsigned int i, j;
2406
2407         for_each_possible_cpu(i) {
2408                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2409                 struct blk_mq_hw_ctx *hctx;
2410                 int k;
2411
2412                 __ctx->cpu = i;
2413                 spin_lock_init(&__ctx->lock);
2414                 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
2415                         INIT_LIST_HEAD(&__ctx->rq_lists[k]);
2416
2417                 __ctx->queue = q;
2418
2419                 /*
2420                  * Set local node, IFF we have more than one hw queue. If
2421                  * not, we remain on the home node of the device
2422                  */
2423                 for (j = 0; j < set->nr_maps; j++) {
2424                         hctx = blk_mq_map_queue_type(q, j, i);
2425                         if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2426                                 hctx->numa_node = local_memory_node(cpu_to_node(i));
2427                 }
2428         }
2429 }
2430
2431 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2432 {
2433         int ret = 0;
2434
2435         set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2436                                         set->queue_depth, set->reserved_tags);
2437         if (!set->tags[hctx_idx])
2438                 return false;
2439
2440         ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2441                                 set->queue_depth);
2442         if (!ret)
2443                 return true;
2444
2445         blk_mq_free_rq_map(set->tags[hctx_idx]);
2446         set->tags[hctx_idx] = NULL;
2447         return false;
2448 }
2449
2450 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2451                                          unsigned int hctx_idx)
2452 {
2453         if (set->tags && set->tags[hctx_idx]) {
2454                 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2455                 blk_mq_free_rq_map(set->tags[hctx_idx]);
2456                 set->tags[hctx_idx] = NULL;
2457         }
2458 }
2459
2460 static void blk_mq_map_swqueue(struct request_queue *q)
2461 {
2462         unsigned int i, j, hctx_idx;
2463         struct blk_mq_hw_ctx *hctx;
2464         struct blk_mq_ctx *ctx;
2465         struct blk_mq_tag_set *set = q->tag_set;
2466
2467         /*
2468          * Avoid others reading imcomplete hctx->cpumask through sysfs
2469          */
2470         mutex_lock(&q->sysfs_lock);
2471
2472         queue_for_each_hw_ctx(q, hctx, i) {
2473                 cpumask_clear(hctx->cpumask);
2474                 hctx->nr_ctx = 0;
2475                 hctx->dispatch_from = NULL;
2476         }
2477
2478         /*
2479          * Map software to hardware queues.
2480          *
2481          * If the cpu isn't present, the cpu is mapped to first hctx.
2482          */
2483         for_each_possible_cpu(i) {
2484                 hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i];
2485                 /* unmapped hw queue can be remapped after CPU topo changed */
2486                 if (!set->tags[hctx_idx] &&
2487                     !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2488                         /*
2489                          * If tags initialization fail for some hctx,
2490                          * that hctx won't be brought online.  In this
2491                          * case, remap the current ctx to hctx[0] which
2492                          * is guaranteed to always have tags allocated
2493                          */
2494                         set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0;
2495                 }
2496
2497                 ctx = per_cpu_ptr(q->queue_ctx, i);
2498                 for (j = 0; j < set->nr_maps; j++) {
2499                         if (!set->map[j].nr_queues) {
2500                                 ctx->hctxs[j] = blk_mq_map_queue_type(q,
2501                                                 HCTX_TYPE_DEFAULT, i);
2502                                 continue;
2503                         }
2504
2505                         hctx = blk_mq_map_queue_type(q, j, i);
2506                         ctx->hctxs[j] = hctx;
2507                         /*
2508                          * If the CPU is already set in the mask, then we've
2509                          * mapped this one already. This can happen if
2510                          * devices share queues across queue maps.
2511                          */
2512                         if (cpumask_test_cpu(i, hctx->cpumask))
2513                                 continue;
2514
2515                         cpumask_set_cpu(i, hctx->cpumask);
2516                         hctx->type = j;
2517                         ctx->index_hw[hctx->type] = hctx->nr_ctx;
2518                         hctx->ctxs[hctx->nr_ctx++] = ctx;
2519
2520                         /*
2521                          * If the nr_ctx type overflows, we have exceeded the
2522                          * amount of sw queues we can support.
2523                          */
2524                         BUG_ON(!hctx->nr_ctx);
2525                 }
2526
2527                 for (; j < HCTX_MAX_TYPES; j++)
2528                         ctx->hctxs[j] = blk_mq_map_queue_type(q,
2529                                         HCTX_TYPE_DEFAULT, i);
2530         }
2531
2532         mutex_unlock(&q->sysfs_lock);
2533
2534         queue_for_each_hw_ctx(q, hctx, i) {
2535                 /*
2536                  * If no software queues are mapped to this hardware queue,
2537                  * disable it and free the request entries.
2538                  */
2539                 if (!hctx->nr_ctx) {
2540                         /* Never unmap queue 0.  We need it as a
2541                          * fallback in case of a new remap fails
2542                          * allocation
2543                          */
2544                         if (i && set->tags[i])
2545                                 blk_mq_free_map_and_requests(set, i);
2546
2547                         hctx->tags = NULL;
2548                         continue;
2549                 }
2550
2551                 hctx->tags = set->tags[i];
2552                 WARN_ON(!hctx->tags);
2553
2554                 /*
2555                  * Set the map size to the number of mapped software queues.
2556                  * This is more accurate and more efficient than looping
2557                  * over all possibly mapped software queues.
2558                  */
2559                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2560
2561                 /*
2562                  * Initialize batch roundrobin counts
2563                  */
2564                 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
2565                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2566         }
2567 }
2568
2569 /*
2570  * Caller needs to ensure that we're either frozen/quiesced, or that
2571  * the queue isn't live yet.
2572  */
2573 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2574 {
2575         struct blk_mq_hw_ctx *hctx;
2576         int i;
2577
2578         queue_for_each_hw_ctx(q, hctx, i) {
2579                 if (shared)
2580                         hctx->flags |= BLK_MQ_F_TAG_SHARED;
2581                 else
2582                         hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2583         }
2584 }
2585
2586 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2587                                         bool shared)
2588 {
2589         struct request_queue *q;
2590
2591         lockdep_assert_held(&set->tag_list_lock);
2592
2593         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2594                 blk_mq_freeze_queue(q);
2595                 queue_set_hctx_shared(q, shared);
2596                 blk_mq_unfreeze_queue(q);
2597         }
2598 }
2599
2600 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2601 {
2602         struct blk_mq_tag_set *set = q->tag_set;
2603
2604         mutex_lock(&set->tag_list_lock);
2605         list_del_rcu(&q->tag_set_list);
2606         if (list_is_singular(&set->tag_list)) {
2607                 /* just transitioned to unshared */
2608                 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2609                 /* update existing queue */
2610                 blk_mq_update_tag_set_depth(set, false);
2611         }
2612         mutex_unlock(&set->tag_list_lock);
2613         INIT_LIST_HEAD(&q->tag_set_list);
2614 }
2615
2616 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2617                                      struct request_queue *q)
2618 {
2619         mutex_lock(&set->tag_list_lock);
2620
2621         /*
2622          * Check to see if we're transitioning to shared (from 1 to 2 queues).
2623          */
2624         if (!list_empty(&set->tag_list) &&
2625             !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2626                 set->flags |= BLK_MQ_F_TAG_SHARED;
2627                 /* update existing queue */
2628                 blk_mq_update_tag_set_depth(set, true);
2629         }
2630         if (set->flags & BLK_MQ_F_TAG_SHARED)
2631                 queue_set_hctx_shared(q, true);
2632         list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2633
2634         mutex_unlock(&set->tag_list_lock);
2635 }
2636
2637 /* All allocations will be freed in release handler of q->mq_kobj */
2638 static int blk_mq_alloc_ctxs(struct request_queue *q)
2639 {
2640         struct blk_mq_ctxs *ctxs;
2641         int cpu;
2642
2643         ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
2644         if (!ctxs)
2645                 return -ENOMEM;
2646
2647         ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2648         if (!ctxs->queue_ctx)
2649                 goto fail;
2650
2651         for_each_possible_cpu(cpu) {
2652                 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
2653                 ctx->ctxs = ctxs;
2654         }
2655
2656         q->mq_kobj = &ctxs->kobj;
2657         q->queue_ctx = ctxs->queue_ctx;
2658
2659         return 0;
2660  fail:
2661         kfree(ctxs);
2662         return -ENOMEM;
2663 }
2664
2665 /*
2666  * It is the actual release handler for mq, but we do it from
2667  * request queue's release handler for avoiding use-after-free
2668  * and headache because q->mq_kobj shouldn't have been introduced,
2669  * but we can't group ctx/kctx kobj without it.
2670  */
2671 void blk_mq_release(struct request_queue *q)
2672 {
2673         struct blk_mq_hw_ctx *hctx, *next;
2674         int i;
2675
2676         cancel_delayed_work_sync(&q->requeue_work);
2677
2678         queue_for_each_hw_ctx(q, hctx, i)
2679                 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
2680
2681         /* all hctx are in .unused_hctx_list now */
2682         list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
2683                 list_del_init(&hctx->hctx_list);
2684                 kobject_put(&hctx->kobj);
2685         }
2686
2687         kfree(q->queue_hw_ctx);
2688
2689         /*
2690          * release .mq_kobj and sw queue's kobject now because
2691          * both share lifetime with request queue.
2692          */
2693         blk_mq_sysfs_deinit(q);
2694 }
2695
2696 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2697 {
2698         struct request_queue *uninit_q, *q;
2699
2700         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2701         if (!uninit_q)
2702                 return ERR_PTR(-ENOMEM);
2703
2704         q = blk_mq_init_allocated_queue(set, uninit_q);
2705         if (IS_ERR(q))
2706                 blk_cleanup_queue(uninit_q);
2707
2708         return q;
2709 }
2710 EXPORT_SYMBOL(blk_mq_init_queue);
2711
2712 /*
2713  * Helper for setting up a queue with mq ops, given queue depth, and
2714  * the passed in mq ops flags.
2715  */
2716 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
2717                                            const struct blk_mq_ops *ops,
2718                                            unsigned int queue_depth,
2719                                            unsigned int set_flags)
2720 {
2721         struct request_queue *q;
2722         int ret;
2723
2724         memset(set, 0, sizeof(*set));
2725         set->ops = ops;
2726         set->nr_hw_queues = 1;
2727         set->nr_maps = 1;
2728         set->queue_depth = queue_depth;
2729         set->numa_node = NUMA_NO_NODE;
2730         set->flags = set_flags;
2731
2732         ret = blk_mq_alloc_tag_set(set);
2733         if (ret)
2734                 return ERR_PTR(ret);
2735
2736         q = blk_mq_init_queue(set);
2737         if (IS_ERR(q)) {
2738                 blk_mq_free_tag_set(set);
2739                 return q;
2740         }
2741
2742         return q;
2743 }
2744 EXPORT_SYMBOL(blk_mq_init_sq_queue);
2745
2746 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
2747                 struct blk_mq_tag_set *set, struct request_queue *q,
2748                 int hctx_idx, int node)
2749 {
2750         struct blk_mq_hw_ctx *hctx = NULL, *tmp;
2751
2752         /* reuse dead hctx first */
2753         spin_lock(&q->unused_hctx_lock);
2754         list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
2755                 if (tmp->numa_node == node) {
2756                         hctx = tmp;
2757                         break;
2758                 }
2759         }
2760         if (hctx)
2761                 list_del_init(&hctx->hctx_list);
2762         spin_unlock(&q->unused_hctx_lock);
2763
2764         if (!hctx)
2765                 hctx = blk_mq_alloc_hctx(q, set, node);
2766         if (!hctx)
2767                 goto fail;
2768
2769         if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
2770                 goto free_hctx;
2771
2772         return hctx;
2773
2774  free_hctx:
2775         kobject_put(&hctx->kobj);
2776  fail:
2777         return NULL;
2778 }
2779
2780 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2781                                                 struct request_queue *q)
2782 {
2783         int i, j, end;
2784         struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2785
2786         /* protect against switching io scheduler  */
2787         mutex_lock(&q->sysfs_lock);
2788         for (i = 0; i < set->nr_hw_queues; i++) {
2789                 int node;
2790                 struct blk_mq_hw_ctx *hctx;
2791
2792                 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
2793                 /*
2794                  * If the hw queue has been mapped to another numa node,
2795                  * we need to realloc the hctx. If allocation fails, fallback
2796                  * to use the previous one.
2797                  */
2798                 if (hctxs[i] && (hctxs[i]->numa_node == node))
2799                         continue;
2800
2801                 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
2802                 if (hctx) {
2803                         if (hctxs[i])
2804                                 blk_mq_exit_hctx(q, set, hctxs[i], i);
2805                         hctxs[i] = hctx;
2806                 } else {
2807                         if (hctxs[i])
2808                                 pr_warn("Allocate new hctx on node %d fails,\
2809                                                 fallback to previous one on node %d\n",
2810                                                 node, hctxs[i]->numa_node);
2811                         else
2812                                 break;
2813                 }
2814         }
2815         /*
2816          * Increasing nr_hw_queues fails. Free the newly allocated
2817          * hctxs and keep the previous q->nr_hw_queues.
2818          */
2819         if (i != set->nr_hw_queues) {
2820                 j = q->nr_hw_queues;
2821                 end = i;
2822         } else {
2823                 j = i;
2824                 end = q->nr_hw_queues;
2825                 q->nr_hw_queues = set->nr_hw_queues;
2826         }
2827
2828         for (; j < end; j++) {
2829                 struct blk_mq_hw_ctx *hctx = hctxs[j];
2830
2831                 if (hctx) {
2832                         if (hctx->tags)
2833                                 blk_mq_free_map_and_requests(set, j);
2834                         blk_mq_exit_hctx(q, set, hctx, j);
2835                         hctxs[j] = NULL;
2836                 }
2837         }
2838         mutex_unlock(&q->sysfs_lock);
2839 }
2840
2841 /*
2842  * Maximum number of hardware queues we support. For single sets, we'll never
2843  * have more than the CPUs (software queues). For multiple sets, the tag_set
2844  * user may have set ->nr_hw_queues larger.
2845  */
2846 static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
2847 {
2848         if (set->nr_maps == 1)
2849                 return nr_cpu_ids;
2850
2851         return max(set->nr_hw_queues, nr_cpu_ids);
2852 }
2853
2854 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2855                                                   struct request_queue *q)
2856 {
2857         /* mark the queue as mq asap */
2858         q->mq_ops = set->ops;
2859
2860         q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2861                                              blk_mq_poll_stats_bkt,
2862                                              BLK_MQ_POLL_STATS_BKTS, q);
2863         if (!q->poll_cb)
2864                 goto err_exit;
2865
2866         if (blk_mq_alloc_ctxs(q))
2867                 goto err_exit;
2868
2869         /* init q->mq_kobj and sw queues' kobjects */
2870         blk_mq_sysfs_init(q);
2871
2872         q->nr_queues = nr_hw_queues(set);
2873         q->queue_hw_ctx = kcalloc_node(q->nr_queues, sizeof(*(q->queue_hw_ctx)),
2874                                                 GFP_KERNEL, set->numa_node);
2875         if (!q->queue_hw_ctx)
2876                 goto err_sys_init;
2877
2878         INIT_LIST_HEAD(&q->unused_hctx_list);
2879         spin_lock_init(&q->unused_hctx_lock);
2880
2881         blk_mq_realloc_hw_ctxs(set, q);
2882         if (!q->nr_hw_queues)
2883                 goto err_hctxs;
2884
2885         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2886         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2887
2888         q->tag_set = set;
2889
2890         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2891         if (set->nr_maps > HCTX_TYPE_POLL &&
2892             set->map[HCTX_TYPE_POLL].nr_queues)
2893                 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
2894
2895         q->sg_reserved_size = INT_MAX;
2896
2897         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2898         INIT_LIST_HEAD(&q->requeue_list);
2899         spin_lock_init(&q->requeue_lock);
2900
2901         blk_queue_make_request(q, blk_mq_make_request);
2902
2903         /*
2904          * Do this after blk_queue_make_request() overrides it...
2905          */
2906         q->nr_requests = set->queue_depth;
2907
2908         /*
2909          * Default to classic polling
2910          */
2911         q->poll_nsec = BLK_MQ_POLL_CLASSIC;
2912
2913         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2914         blk_mq_add_queue_tag_set(set, q);
2915         blk_mq_map_swqueue(q);
2916
2917         if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2918                 int ret;
2919
2920                 ret = elevator_init_mq(q);
2921                 if (ret)
2922                         return ERR_PTR(ret);
2923         }
2924
2925         return q;
2926
2927 err_hctxs:
2928         kfree(q->queue_hw_ctx);
2929 err_sys_init:
2930         blk_mq_sysfs_deinit(q);
2931 err_exit:
2932         q->mq_ops = NULL;
2933         return ERR_PTR(-ENOMEM);
2934 }
2935 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2936
2937 /* tags can _not_ be used after returning from blk_mq_exit_queue */
2938 void blk_mq_exit_queue(struct request_queue *q)
2939 {
2940         struct blk_mq_tag_set   *set = q->tag_set;
2941
2942         blk_mq_del_queue_tag_set(q);
2943         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2944 }
2945
2946 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2947 {
2948         int i;
2949
2950         for (i = 0; i < set->nr_hw_queues; i++)
2951                 if (!__blk_mq_alloc_rq_map(set, i))
2952                         goto out_unwind;
2953
2954         return 0;
2955
2956 out_unwind:
2957         while (--i >= 0)
2958                 blk_mq_free_rq_map(set->tags[i]);
2959
2960         return -ENOMEM;
2961 }
2962
2963 /*
2964  * Allocate the request maps associated with this tag_set. Note that this
2965  * may reduce the depth asked for, if memory is tight. set->queue_depth
2966  * will be updated to reflect the allocated depth.
2967  */
2968 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2969 {
2970         unsigned int depth;
2971         int err;
2972
2973         depth = set->queue_depth;
2974         do {
2975                 err = __blk_mq_alloc_rq_maps(set);
2976                 if (!err)
2977                         break;
2978
2979                 set->queue_depth >>= 1;
2980                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2981                         err = -ENOMEM;
2982                         break;
2983                 }
2984         } while (set->queue_depth);
2985
2986         if (!set->queue_depth || err) {
2987                 pr_err("blk-mq: failed to allocate request map\n");
2988                 return -ENOMEM;
2989         }
2990
2991         if (depth != set->queue_depth)
2992                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2993                                                 depth, set->queue_depth);
2994
2995         return 0;
2996 }
2997
2998 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2999 {
3000         if (set->ops->map_queues && !is_kdump_kernel()) {
3001                 int i;
3002
3003                 /*
3004                  * transport .map_queues is usually done in the following
3005                  * way:
3006                  *
3007                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
3008                  *      mask = get_cpu_mask(queue)
3009                  *      for_each_cpu(cpu, mask)
3010                  *              set->map[x].mq_map[cpu] = queue;
3011                  * }
3012                  *
3013                  * When we need to remap, the table has to be cleared for
3014                  * killing stale mapping since one CPU may not be mapped
3015                  * to any hw queue.
3016                  */
3017                 for (i = 0; i < set->nr_maps; i++)
3018                         blk_mq_clear_mq_map(&set->map[i]);
3019
3020                 return set->ops->map_queues(set);
3021         } else {
3022                 BUG_ON(set->nr_maps > 1);
3023                 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3024         }
3025 }
3026
3027 /*
3028  * Alloc a tag set to be associated with one or more request queues.
3029  * May fail with EINVAL for various error conditions. May adjust the
3030  * requested depth down, if it's too large. In that case, the set
3031  * value will be stored in set->queue_depth.
3032  */
3033 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
3034 {
3035         int i, ret;
3036
3037         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
3038
3039         if (!set->nr_hw_queues)
3040                 return -EINVAL;
3041         if (!set->queue_depth)
3042                 return -EINVAL;
3043         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
3044                 return -EINVAL;
3045
3046         if (!set->ops->queue_rq)
3047                 return -EINVAL;
3048
3049         if (!set->ops->get_budget ^ !set->ops->put_budget)
3050                 return -EINVAL;
3051
3052         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
3053                 pr_info("blk-mq: reduced tag depth to %u\n",
3054                         BLK_MQ_MAX_DEPTH);
3055                 set->queue_depth = BLK_MQ_MAX_DEPTH;
3056         }
3057
3058         if (!set->nr_maps)
3059                 set->nr_maps = 1;
3060         else if (set->nr_maps > HCTX_MAX_TYPES)
3061                 return -EINVAL;
3062
3063         /*
3064          * If a crashdump is active, then we are potentially in a very
3065          * memory constrained environment. Limit us to 1 queue and
3066          * 64 tags to prevent using too much memory.
3067          */
3068         if (is_kdump_kernel()) {
3069                 set->nr_hw_queues = 1;
3070                 set->nr_maps = 1;
3071                 set->queue_depth = min(64U, set->queue_depth);
3072         }
3073         /*
3074          * There is no use for more h/w queues than cpus if we just have
3075          * a single map
3076          */
3077         if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
3078                 set->nr_hw_queues = nr_cpu_ids;
3079
3080         set->tags = kcalloc_node(nr_hw_queues(set), sizeof(struct blk_mq_tags *),
3081                                  GFP_KERNEL, set->numa_node);
3082         if (!set->tags)
3083                 return -ENOMEM;
3084
3085         ret = -ENOMEM;
3086         for (i = 0; i < set->nr_maps; i++) {
3087                 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
3088                                                   sizeof(set->map[i].mq_map[0]),
3089                                                   GFP_KERNEL, set->numa_node);
3090                 if (!set->map[i].mq_map)
3091                         goto out_free_mq_map;
3092                 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
3093         }
3094
3095         ret = blk_mq_update_queue_map(set);
3096         if (ret)
3097                 goto out_free_mq_map;
3098
3099         ret = blk_mq_alloc_rq_maps(set);
3100         if (ret)
3101                 goto out_free_mq_map;
3102
3103         mutex_init(&set->tag_list_lock);
3104         INIT_LIST_HEAD(&set->tag_list);
3105
3106         return 0;
3107
3108 out_free_mq_map:
3109         for (i = 0; i < set->nr_maps; i++) {
3110                 kfree(set->map[i].mq_map);
3111                 set->map[i].mq_map = NULL;
3112         }
3113         kfree(set->tags);
3114         set->tags = NULL;
3115         return ret;
3116 }
3117 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
3118
3119 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
3120 {
3121         int i, j;
3122
3123         for (i = 0; i < nr_hw_queues(set); i++)
3124                 blk_mq_free_map_and_requests(set, i);
3125
3126         for (j = 0; j < set->nr_maps; j++) {
3127                 kfree(set->map[j].mq_map);
3128                 set->map[j].mq_map = NULL;
3129         }
3130
3131         kfree(set->tags);
3132         set->tags = NULL;
3133 }
3134 EXPORT_SYMBOL(blk_mq_free_tag_set);
3135
3136 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
3137 {
3138         struct blk_mq_tag_set *set = q->tag_set;
3139         struct blk_mq_hw_ctx *hctx;
3140         int i, ret;
3141
3142         if (!set)
3143                 return -EINVAL;
3144
3145         if (q->nr_requests == nr)
3146                 return 0;
3147
3148         blk_mq_freeze_queue(q);
3149         blk_mq_quiesce_queue(q);
3150
3151         ret = 0;
3152         queue_for_each_hw_ctx(q, hctx, i) {
3153                 if (!hctx->tags)
3154                         continue;
3155                 /*
3156                  * If we're using an MQ scheduler, just update the scheduler
3157                  * queue depth. This is similar to what the old code would do.
3158                  */
3159                 if (!hctx->sched_tags) {
3160                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
3161                                                         false);
3162                 } else {
3163                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
3164                                                         nr, true);
3165                 }
3166                 if (ret)
3167                         break;
3168                 if (q->elevator && q->elevator->type->ops.depth_updated)
3169                         q->elevator->type->ops.depth_updated(hctx);
3170         }
3171
3172         if (!ret)
3173                 q->nr_requests = nr;
3174
3175         blk_mq_unquiesce_queue(q);
3176         blk_mq_unfreeze_queue(q);
3177
3178         return ret;
3179 }
3180
3181 /*
3182  * request_queue and elevator_type pair.
3183  * It is just used by __blk_mq_update_nr_hw_queues to cache
3184  * the elevator_type associated with a request_queue.
3185  */
3186 struct blk_mq_qe_pair {
3187         struct list_head node;
3188         struct request_queue *q;
3189         struct elevator_type *type;
3190 };
3191
3192 /*
3193  * Cache the elevator_type in qe pair list and switch the
3194  * io scheduler to 'none'
3195  */
3196 static bool blk_mq_elv_switch_none(struct list_head *head,
3197                 struct request_queue *q)
3198 {
3199         struct blk_mq_qe_pair *qe;
3200
3201         if (!q->elevator)
3202                 return true;
3203
3204         qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
3205         if (!qe)
3206                 return false;
3207
3208         INIT_LIST_HEAD(&qe->node);
3209         qe->q = q;
3210         qe->type = q->elevator->type;
3211         list_add(&qe->node, head);
3212
3213         mutex_lock(&q->sysfs_lock);
3214         /*
3215          * After elevator_switch_mq, the previous elevator_queue will be
3216          * released by elevator_release. The reference of the io scheduler
3217          * module get by elevator_get will also be put. So we need to get
3218          * a reference of the io scheduler module here to prevent it to be
3219          * removed.
3220          */
3221         __module_get(qe->type->elevator_owner);
3222         elevator_switch_mq(q, NULL);
3223         mutex_unlock(&q->sysfs_lock);
3224
3225         return true;
3226 }
3227
3228 static void blk_mq_elv_switch_back(struct list_head *head,
3229                 struct request_queue *q)
3230 {
3231         struct blk_mq_qe_pair *qe;
3232         struct elevator_type *t = NULL;
3233
3234         list_for_each_entry(qe, head, node)
3235                 if (qe->q == q) {
3236                         t = qe->type;
3237                         break;
3238                 }
3239
3240         if (!t)
3241                 return;
3242
3243         list_del(&qe->node);
3244         kfree(qe);
3245
3246         mutex_lock(&q->sysfs_lock);
3247         elevator_switch_mq(q, t);
3248         mutex_unlock(&q->sysfs_lock);
3249 }
3250
3251 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
3252                                                         int nr_hw_queues)
3253 {
3254         struct request_queue *q;
3255         LIST_HEAD(head);
3256         int prev_nr_hw_queues;
3257
3258         lockdep_assert_held(&set->tag_list_lock);
3259
3260         if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
3261                 nr_hw_queues = nr_cpu_ids;
3262         if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
3263                 return;
3264
3265         list_for_each_entry(q, &set->tag_list, tag_set_list)
3266                 blk_mq_freeze_queue(q);
3267         /*
3268          * Sync with blk_mq_queue_tag_busy_iter.
3269          */
3270         synchronize_rcu();
3271         /*
3272          * Switch IO scheduler to 'none', cleaning up the data associated
3273          * with the previous scheduler. We will switch back once we are done
3274          * updating the new sw to hw queue mappings.
3275          */
3276         list_for_each_entry(q, &set->tag_list, tag_set_list)
3277                 if (!blk_mq_elv_switch_none(&head, q))
3278                         goto switch_back;
3279
3280         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3281                 blk_mq_debugfs_unregister_hctxs(q);
3282                 blk_mq_sysfs_unregister(q);
3283         }
3284
3285         prev_nr_hw_queues = set->nr_hw_queues;
3286         set->nr_hw_queues = nr_hw_queues;
3287         blk_mq_update_queue_map(set);
3288 fallback:
3289         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3290                 blk_mq_realloc_hw_ctxs(set, q);
3291                 if (q->nr_hw_queues != set->nr_hw_queues) {
3292                         pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
3293                                         nr_hw_queues, prev_nr_hw_queues);
3294                         set->nr_hw_queues = prev_nr_hw_queues;
3295                         blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3296                         goto fallback;
3297                 }
3298                 blk_mq_map_swqueue(q);
3299         }
3300
3301         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3302                 blk_mq_sysfs_register(q);
3303                 blk_mq_debugfs_register_hctxs(q);
3304         }
3305
3306 switch_back:
3307         list_for_each_entry(q, &set->tag_list, tag_set_list)
3308                 blk_mq_elv_switch_back(&head, q);
3309
3310         list_for_each_entry(q, &set->tag_list, tag_set_list)
3311                 blk_mq_unfreeze_queue(q);
3312 }
3313
3314 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
3315 {
3316         mutex_lock(&set->tag_list_lock);
3317         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
3318         mutex_unlock(&set->tag_list_lock);
3319 }
3320 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
3321
3322 /* Enable polling stats and return whether they were already enabled. */
3323 static bool blk_poll_stats_enable(struct request_queue *q)
3324 {
3325         if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3326             blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
3327                 return true;
3328         blk_stat_add_callback(q, q->poll_cb);
3329         return false;
3330 }
3331
3332 static void blk_mq_poll_stats_start(struct request_queue *q)
3333 {
3334         /*
3335          * We don't arm the callback if polling stats are not enabled or the
3336          * callback is already active.
3337          */
3338         if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3339             blk_stat_is_active(q->poll_cb))
3340                 return;
3341
3342         blk_stat_activate_msecs(q->poll_cb, 100);
3343 }
3344
3345 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3346 {
3347         struct request_queue *q = cb->data;
3348         int bucket;
3349
3350         for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3351                 if (cb->stat[bucket].nr_samples)
3352                         q->poll_stat[bucket] = cb->stat[bucket];
3353         }
3354 }
3355
3356 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3357                                        struct blk_mq_hw_ctx *hctx,
3358                                        struct request *rq)
3359 {
3360         unsigned long ret = 0;
3361         int bucket;
3362
3363         /*
3364          * If stats collection isn't on, don't sleep but turn it on for
3365          * future users
3366          */
3367         if (!blk_poll_stats_enable(q))
3368                 return 0;
3369
3370         /*
3371          * As an optimistic guess, use half of the mean service time
3372          * for this type of request. We can (and should) make this smarter.
3373          * For instance, if the completion latencies are tight, we can
3374          * get closer than just half the mean. This is especially
3375          * important on devices where the completion latencies are longer
3376          * than ~10 usec. We do use the stats for the relevant IO size
3377          * if available which does lead to better estimates.
3378          */
3379         bucket = blk_mq_poll_stats_bkt(rq);
3380         if (bucket < 0)
3381                 return ret;
3382
3383         if (q->poll_stat[bucket].nr_samples)
3384                 ret = (q->poll_stat[bucket].mean + 1) / 2;
3385
3386         return ret;
3387 }
3388
3389 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3390                                      struct blk_mq_hw_ctx *hctx,
3391                                      struct request *rq)
3392 {
3393         struct hrtimer_sleeper hs;
3394         enum hrtimer_mode mode;
3395         unsigned int nsecs;
3396         ktime_t kt;
3397
3398         if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
3399                 return false;
3400
3401         /*
3402          * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
3403          *
3404          *  0:  use half of prev avg
3405          * >0:  use this specific value
3406          */
3407         if (q->poll_nsec > 0)
3408                 nsecs = q->poll_nsec;
3409         else
3410                 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
3411
3412         if (!nsecs)
3413                 return false;
3414
3415         rq->rq_flags |= RQF_MQ_POLL_SLEPT;
3416
3417         /*
3418          * This will be replaced with the stats tracking code, using
3419          * 'avg_completion_time / 2' as the pre-sleep target.
3420          */
3421         kt = nsecs;
3422
3423         mode = HRTIMER_MODE_REL;
3424         hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
3425         hrtimer_set_expires(&hs.timer, kt);
3426
3427         hrtimer_init_sleeper(&hs, current);
3428         do {
3429                 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3430                         break;
3431                 set_current_state(TASK_UNINTERRUPTIBLE);
3432                 hrtimer_start_expires(&hs.timer, mode);
3433                 if (hs.task)
3434                         io_schedule();
3435                 hrtimer_cancel(&hs.timer);
3436                 mode = HRTIMER_MODE_ABS;
3437         } while (hs.task && !signal_pending(current));
3438
3439         __set_current_state(TASK_RUNNING);
3440         destroy_hrtimer_on_stack(&hs.timer);
3441         return true;
3442 }
3443
3444 static bool blk_mq_poll_hybrid(struct request_queue *q,
3445                                struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
3446 {
3447         struct request *rq;
3448
3449         if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
3450                 return false;
3451
3452         if (!blk_qc_t_is_internal(cookie))
3453                 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3454         else {
3455                 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3456                 /*
3457                  * With scheduling, if the request has completed, we'll
3458                  * get a NULL return here, as we clear the sched tag when
3459                  * that happens. The request still remains valid, like always,
3460                  * so we should be safe with just the NULL check.
3461                  */
3462                 if (!rq)
3463                         return false;
3464         }
3465
3466         return blk_mq_poll_hybrid_sleep(q, hctx, rq);
3467 }
3468
3469 /**
3470  * blk_poll - poll for IO completions
3471  * @q:  the queue
3472  * @cookie: cookie passed back at IO submission time
3473  * @spin: whether to spin for completions
3474  *
3475  * Description:
3476  *    Poll for completions on the passed in queue. Returns number of
3477  *    completed entries found. If @spin is true, then blk_poll will continue
3478  *    looping until at least one completion is found, unless the task is
3479  *    otherwise marked running (or we need to reschedule).
3480  */
3481 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
3482 {
3483         struct blk_mq_hw_ctx *hctx;
3484         long state;
3485
3486         if (!blk_qc_t_valid(cookie) ||
3487             !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
3488                 return 0;
3489
3490         if (current->plug)
3491                 blk_flush_plug_list(current->plug, false);
3492
3493         hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3494
3495         /*
3496          * If we sleep, have the caller restart the poll loop to reset
3497          * the state. Like for the other success return cases, the
3498          * caller is responsible for checking if the IO completed. If
3499          * the IO isn't complete, we'll get called again and will go
3500          * straight to the busy poll loop.
3501          */
3502         if (blk_mq_poll_hybrid(q, hctx, cookie))
3503                 return 1;
3504
3505         hctx->poll_considered++;
3506
3507         state = current->state;
3508         do {
3509                 int ret;
3510
3511                 hctx->poll_invoked++;
3512
3513                 ret = q->mq_ops->poll(hctx);
3514                 if (ret > 0) {
3515                         hctx->poll_success++;
3516                         __set_current_state(TASK_RUNNING);
3517                         return ret;
3518                 }
3519
3520                 if (signal_pending_state(state, current))
3521                         __set_current_state(TASK_RUNNING);
3522
3523                 if (current->state == TASK_RUNNING)
3524                         return 1;
3525                 if (ret < 0 || !spin)
3526                         break;
3527                 cpu_relax();
3528         } while (!need_resched());
3529
3530         __set_current_state(TASK_RUNNING);
3531         return 0;
3532 }
3533 EXPORT_SYMBOL_GPL(blk_poll);
3534
3535 unsigned int blk_mq_rq_cpu(struct request *rq)
3536 {
3537         return rq->mq_ctx->cpu;
3538 }
3539 EXPORT_SYMBOL(blk_mq_rq_cpu);
3540
3541 static int __init blk_mq_init(void)
3542 {
3543         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3544                                 blk_mq_hctx_notify_dead);
3545         return 0;
3546 }
3547 subsys_initcall(blk_mq_init);