Merge tag 'drm-misc-next-2019-04-18' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/sched/topology.h>
24 #include <linux/sched/signal.h>
25 #include <linux/delay.h>
26 #include <linux/crash_dump.h>
27 #include <linux/prefetch.h>
28
29 #include <trace/events/block.h>
30
31 #include <linux/blk-mq.h>
32 #include "blk.h"
33 #include "blk-mq.h"
34 #include "blk-mq-debugfs.h"
35 #include "blk-mq-tag.h"
36 #include "blk-pm.h"
37 #include "blk-stat.h"
38 #include "blk-mq-sched.h"
39 #include "blk-rq-qos.h"
40
41 static void blk_mq_poll_stats_start(struct request_queue *q);
42 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
43
44 static int blk_mq_poll_stats_bkt(const struct request *rq)
45 {
46         int ddir, bytes, bucket;
47
48         ddir = rq_data_dir(rq);
49         bytes = blk_rq_bytes(rq);
50
51         bucket = ddir + 2*(ilog2(bytes) - 9);
52
53         if (bucket < 0)
54                 return -1;
55         else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
56                 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
57
58         return bucket;
59 }
60
61 /*
62  * Check if any of the ctx, dispatch list or elevator
63  * have pending work in this hardware queue.
64  */
65 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
66 {
67         return !list_empty_careful(&hctx->dispatch) ||
68                 sbitmap_any_bit_set(&hctx->ctx_map) ||
69                         blk_mq_sched_has_work(hctx);
70 }
71
72 /*
73  * Mark this ctx as having pending work in this hardware queue
74  */
75 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
76                                      struct blk_mq_ctx *ctx)
77 {
78         const int bit = ctx->index_hw[hctx->type];
79
80         if (!sbitmap_test_bit(&hctx->ctx_map, bit))
81                 sbitmap_set_bit(&hctx->ctx_map, bit);
82 }
83
84 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
85                                       struct blk_mq_ctx *ctx)
86 {
87         const int bit = ctx->index_hw[hctx->type];
88
89         sbitmap_clear_bit(&hctx->ctx_map, bit);
90 }
91
92 struct mq_inflight {
93         struct hd_struct *part;
94         unsigned int *inflight;
95 };
96
97 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
98                                   struct request *rq, void *priv,
99                                   bool reserved)
100 {
101         struct mq_inflight *mi = priv;
102
103         /*
104          * index[0] counts the specific partition that was asked for.
105          */
106         if (rq->part == mi->part)
107                 mi->inflight[0]++;
108
109         return true;
110 }
111
112 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
113 {
114         unsigned inflight[2];
115         struct mq_inflight mi = { .part = part, .inflight = inflight, };
116
117         inflight[0] = inflight[1] = 0;
118         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
119
120         return inflight[0];
121 }
122
123 static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
124                                      struct request *rq, void *priv,
125                                      bool reserved)
126 {
127         struct mq_inflight *mi = priv;
128
129         if (rq->part == mi->part)
130                 mi->inflight[rq_data_dir(rq)]++;
131
132         return true;
133 }
134
135 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
136                          unsigned int inflight[2])
137 {
138         struct mq_inflight mi = { .part = part, .inflight = inflight, };
139
140         inflight[0] = inflight[1] = 0;
141         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
142 }
143
144 void blk_freeze_queue_start(struct request_queue *q)
145 {
146         int freeze_depth;
147
148         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
149         if (freeze_depth == 1) {
150                 percpu_ref_kill(&q->q_usage_counter);
151                 if (queue_is_mq(q))
152                         blk_mq_run_hw_queues(q, false);
153         }
154 }
155 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
156
157 void blk_mq_freeze_queue_wait(struct request_queue *q)
158 {
159         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
160 }
161 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
162
163 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
164                                      unsigned long timeout)
165 {
166         return wait_event_timeout(q->mq_freeze_wq,
167                                         percpu_ref_is_zero(&q->q_usage_counter),
168                                         timeout);
169 }
170 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
171
172 /*
173  * Guarantee no request is in use, so we can change any data structure of
174  * the queue afterward.
175  */
176 void blk_freeze_queue(struct request_queue *q)
177 {
178         /*
179          * In the !blk_mq case we are only calling this to kill the
180          * q_usage_counter, otherwise this increases the freeze depth
181          * and waits for it to return to zero.  For this reason there is
182          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
183          * exported to drivers as the only user for unfreeze is blk_mq.
184          */
185         blk_freeze_queue_start(q);
186         blk_mq_freeze_queue_wait(q);
187 }
188
189 void blk_mq_freeze_queue(struct request_queue *q)
190 {
191         /*
192          * ...just an alias to keep freeze and unfreeze actions balanced
193          * in the blk_mq_* namespace
194          */
195         blk_freeze_queue(q);
196 }
197 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
198
199 void blk_mq_unfreeze_queue(struct request_queue *q)
200 {
201         int freeze_depth;
202
203         freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
204         WARN_ON_ONCE(freeze_depth < 0);
205         if (!freeze_depth) {
206                 percpu_ref_resurrect(&q->q_usage_counter);
207                 wake_up_all(&q->mq_freeze_wq);
208         }
209 }
210 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
211
212 /*
213  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
214  * mpt3sas driver such that this function can be removed.
215  */
216 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
217 {
218         blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
219 }
220 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
221
222 /**
223  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
224  * @q: request queue.
225  *
226  * Note: this function does not prevent that the struct request end_io()
227  * callback function is invoked. Once this function is returned, we make
228  * sure no dispatch can happen until the queue is unquiesced via
229  * blk_mq_unquiesce_queue().
230  */
231 void blk_mq_quiesce_queue(struct request_queue *q)
232 {
233         struct blk_mq_hw_ctx *hctx;
234         unsigned int i;
235         bool rcu = false;
236
237         blk_mq_quiesce_queue_nowait(q);
238
239         queue_for_each_hw_ctx(q, hctx, i) {
240                 if (hctx->flags & BLK_MQ_F_BLOCKING)
241                         synchronize_srcu(hctx->srcu);
242                 else
243                         rcu = true;
244         }
245         if (rcu)
246                 synchronize_rcu();
247 }
248 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
249
250 /*
251  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
252  * @q: request queue.
253  *
254  * This function recovers queue into the state before quiescing
255  * which is done by blk_mq_quiesce_queue.
256  */
257 void blk_mq_unquiesce_queue(struct request_queue *q)
258 {
259         blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
260
261         /* dispatch requests which are inserted during quiescing */
262         blk_mq_run_hw_queues(q, true);
263 }
264 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
265
266 void blk_mq_wake_waiters(struct request_queue *q)
267 {
268         struct blk_mq_hw_ctx *hctx;
269         unsigned int i;
270
271         queue_for_each_hw_ctx(q, hctx, i)
272                 if (blk_mq_hw_queue_mapped(hctx))
273                         blk_mq_tag_wakeup_all(hctx->tags, true);
274 }
275
276 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
277 {
278         return blk_mq_has_free_tags(hctx->tags);
279 }
280 EXPORT_SYMBOL(blk_mq_can_queue);
281
282 /*
283  * Only need start/end time stamping if we have stats enabled, or using
284  * an IO scheduler.
285  */
286 static inline bool blk_mq_need_time_stamp(struct request *rq)
287 {
288         return (rq->rq_flags & RQF_IO_STAT) || rq->q->elevator;
289 }
290
291 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
292                 unsigned int tag, unsigned int op)
293 {
294         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
295         struct request *rq = tags->static_rqs[tag];
296         req_flags_t rq_flags = 0;
297
298         if (data->flags & BLK_MQ_REQ_INTERNAL) {
299                 rq->tag = -1;
300                 rq->internal_tag = tag;
301         } else {
302                 if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
303                         rq_flags = RQF_MQ_INFLIGHT;
304                         atomic_inc(&data->hctx->nr_active);
305                 }
306                 rq->tag = tag;
307                 rq->internal_tag = -1;
308                 data->hctx->tags->rqs[rq->tag] = rq;
309         }
310
311         /* csd/requeue_work/fifo_time is initialized before use */
312         rq->q = data->q;
313         rq->mq_ctx = data->ctx;
314         rq->mq_hctx = data->hctx;
315         rq->rq_flags = rq_flags;
316         rq->cmd_flags = op;
317         if (data->flags & BLK_MQ_REQ_PREEMPT)
318                 rq->rq_flags |= RQF_PREEMPT;
319         if (blk_queue_io_stat(data->q))
320                 rq->rq_flags |= RQF_IO_STAT;
321         INIT_LIST_HEAD(&rq->queuelist);
322         INIT_HLIST_NODE(&rq->hash);
323         RB_CLEAR_NODE(&rq->rb_node);
324         rq->rq_disk = NULL;
325         rq->part = NULL;
326         if (blk_mq_need_time_stamp(rq))
327                 rq->start_time_ns = ktime_get_ns();
328         else
329                 rq->start_time_ns = 0;
330         rq->io_start_time_ns = 0;
331         rq->nr_phys_segments = 0;
332 #if defined(CONFIG_BLK_DEV_INTEGRITY)
333         rq->nr_integrity_segments = 0;
334 #endif
335         /* tag was already set */
336         rq->extra_len = 0;
337         WRITE_ONCE(rq->deadline, 0);
338
339         rq->timeout = 0;
340
341         rq->end_io = NULL;
342         rq->end_io_data = NULL;
343
344         data->ctx->rq_dispatched[op_is_sync(op)]++;
345         refcount_set(&rq->ref, 1);
346         return rq;
347 }
348
349 static struct request *blk_mq_get_request(struct request_queue *q,
350                                           struct bio *bio,
351                                           struct blk_mq_alloc_data *data)
352 {
353         struct elevator_queue *e = q->elevator;
354         struct request *rq;
355         unsigned int tag;
356         bool put_ctx_on_error = false;
357
358         blk_queue_enter_live(q);
359         data->q = q;
360         if (likely(!data->ctx)) {
361                 data->ctx = blk_mq_get_ctx(q);
362                 put_ctx_on_error = true;
363         }
364         if (likely(!data->hctx))
365                 data->hctx = blk_mq_map_queue(q, data->cmd_flags,
366                                                 data->ctx);
367         if (data->cmd_flags & REQ_NOWAIT)
368                 data->flags |= BLK_MQ_REQ_NOWAIT;
369
370         if (e) {
371                 data->flags |= BLK_MQ_REQ_INTERNAL;
372
373                 /*
374                  * Flush requests are special and go directly to the
375                  * dispatch list. Don't include reserved tags in the
376                  * limiting, as it isn't useful.
377                  */
378                 if (!op_is_flush(data->cmd_flags) &&
379                     e->type->ops.limit_depth &&
380                     !(data->flags & BLK_MQ_REQ_RESERVED))
381                         e->type->ops.limit_depth(data->cmd_flags, data);
382         } else {
383                 blk_mq_tag_busy(data->hctx);
384         }
385
386         tag = blk_mq_get_tag(data);
387         if (tag == BLK_MQ_TAG_FAIL) {
388                 if (put_ctx_on_error) {
389                         blk_mq_put_ctx(data->ctx);
390                         data->ctx = NULL;
391                 }
392                 blk_queue_exit(q);
393                 return NULL;
394         }
395
396         rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags);
397         if (!op_is_flush(data->cmd_flags)) {
398                 rq->elv.icq = NULL;
399                 if (e && e->type->ops.prepare_request) {
400                         if (e->type->icq_cache)
401                                 blk_mq_sched_assign_ioc(rq);
402
403                         e->type->ops.prepare_request(rq, bio);
404                         rq->rq_flags |= RQF_ELVPRIV;
405                 }
406         }
407         data->hctx->queued++;
408         return rq;
409 }
410
411 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
412                 blk_mq_req_flags_t flags)
413 {
414         struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
415         struct request *rq;
416         int ret;
417
418         ret = blk_queue_enter(q, flags);
419         if (ret)
420                 return ERR_PTR(ret);
421
422         rq = blk_mq_get_request(q, NULL, &alloc_data);
423         blk_queue_exit(q);
424
425         if (!rq)
426                 return ERR_PTR(-EWOULDBLOCK);
427
428         blk_mq_put_ctx(alloc_data.ctx);
429
430         rq->__data_len = 0;
431         rq->__sector = (sector_t) -1;
432         rq->bio = rq->biotail = NULL;
433         return rq;
434 }
435 EXPORT_SYMBOL(blk_mq_alloc_request);
436
437 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
438         unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
439 {
440         struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
441         struct request *rq;
442         unsigned int cpu;
443         int ret;
444
445         /*
446          * If the tag allocator sleeps we could get an allocation for a
447          * different hardware context.  No need to complicate the low level
448          * allocator for this for the rare use case of a command tied to
449          * a specific queue.
450          */
451         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
452                 return ERR_PTR(-EINVAL);
453
454         if (hctx_idx >= q->nr_hw_queues)
455                 return ERR_PTR(-EIO);
456
457         ret = blk_queue_enter(q, flags);
458         if (ret)
459                 return ERR_PTR(ret);
460
461         /*
462          * Check if the hardware context is actually mapped to anything.
463          * If not tell the caller that it should skip this queue.
464          */
465         alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
466         if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
467                 blk_queue_exit(q);
468                 return ERR_PTR(-EXDEV);
469         }
470         cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
471         alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
472
473         rq = blk_mq_get_request(q, NULL, &alloc_data);
474         blk_queue_exit(q);
475
476         if (!rq)
477                 return ERR_PTR(-EWOULDBLOCK);
478
479         return rq;
480 }
481 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
482
483 static void __blk_mq_free_request(struct request *rq)
484 {
485         struct request_queue *q = rq->q;
486         struct blk_mq_ctx *ctx = rq->mq_ctx;
487         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
488         const int sched_tag = rq->internal_tag;
489
490         blk_pm_mark_last_busy(rq);
491         rq->mq_hctx = NULL;
492         if (rq->tag != -1)
493                 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
494         if (sched_tag != -1)
495                 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
496         blk_mq_sched_restart(hctx);
497         blk_queue_exit(q);
498 }
499
500 void blk_mq_free_request(struct request *rq)
501 {
502         struct request_queue *q = rq->q;
503         struct elevator_queue *e = q->elevator;
504         struct blk_mq_ctx *ctx = rq->mq_ctx;
505         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
506
507         if (rq->rq_flags & RQF_ELVPRIV) {
508                 if (e && e->type->ops.finish_request)
509                         e->type->ops.finish_request(rq);
510                 if (rq->elv.icq) {
511                         put_io_context(rq->elv.icq->ioc);
512                         rq->elv.icq = NULL;
513                 }
514         }
515
516         ctx->rq_completed[rq_is_sync(rq)]++;
517         if (rq->rq_flags & RQF_MQ_INFLIGHT)
518                 atomic_dec(&hctx->nr_active);
519
520         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
521                 laptop_io_completion(q->backing_dev_info);
522
523         rq_qos_done(q, rq);
524
525         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
526         if (refcount_dec_and_test(&rq->ref))
527                 __blk_mq_free_request(rq);
528 }
529 EXPORT_SYMBOL_GPL(blk_mq_free_request);
530
531 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
532 {
533         u64 now = 0;
534
535         if (blk_mq_need_time_stamp(rq))
536                 now = ktime_get_ns();
537
538         if (rq->rq_flags & RQF_STATS) {
539                 blk_mq_poll_stats_start(rq->q);
540                 blk_stat_add(rq, now);
541         }
542
543         if (rq->internal_tag != -1)
544                 blk_mq_sched_completed_request(rq, now);
545
546         blk_account_io_done(rq, now);
547
548         if (rq->end_io) {
549                 rq_qos_done(rq->q, rq);
550                 rq->end_io(rq, error);
551         } else {
552                 blk_mq_free_request(rq);
553         }
554 }
555 EXPORT_SYMBOL(__blk_mq_end_request);
556
557 void blk_mq_end_request(struct request *rq, blk_status_t error)
558 {
559         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
560                 BUG();
561         __blk_mq_end_request(rq, error);
562 }
563 EXPORT_SYMBOL(blk_mq_end_request);
564
565 static void __blk_mq_complete_request_remote(void *data)
566 {
567         struct request *rq = data;
568         struct request_queue *q = rq->q;
569
570         q->mq_ops->complete(rq);
571 }
572
573 static void __blk_mq_complete_request(struct request *rq)
574 {
575         struct blk_mq_ctx *ctx = rq->mq_ctx;
576         struct request_queue *q = rq->q;
577         bool shared = false;
578         int cpu;
579
580         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
581         /*
582          * Most of single queue controllers, there is only one irq vector
583          * for handling IO completion, and the only irq's affinity is set
584          * as all possible CPUs. On most of ARCHs, this affinity means the
585          * irq is handled on one specific CPU.
586          *
587          * So complete IO reqeust in softirq context in case of single queue
588          * for not degrading IO performance by irqsoff latency.
589          */
590         if (q->nr_hw_queues == 1) {
591                 __blk_complete_request(rq);
592                 return;
593         }
594
595         /*
596          * For a polled request, always complete locallly, it's pointless
597          * to redirect the completion.
598          */
599         if ((rq->cmd_flags & REQ_HIPRI) ||
600             !test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) {
601                 q->mq_ops->complete(rq);
602                 return;
603         }
604
605         cpu = get_cpu();
606         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
607                 shared = cpus_share_cache(cpu, ctx->cpu);
608
609         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
610                 rq->csd.func = __blk_mq_complete_request_remote;
611                 rq->csd.info = rq;
612                 rq->csd.flags = 0;
613                 smp_call_function_single_async(ctx->cpu, &rq->csd);
614         } else {
615                 q->mq_ops->complete(rq);
616         }
617         put_cpu();
618 }
619
620 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
621         __releases(hctx->srcu)
622 {
623         if (!(hctx->flags & BLK_MQ_F_BLOCKING))
624                 rcu_read_unlock();
625         else
626                 srcu_read_unlock(hctx->srcu, srcu_idx);
627 }
628
629 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
630         __acquires(hctx->srcu)
631 {
632         if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
633                 /* shut up gcc false positive */
634                 *srcu_idx = 0;
635                 rcu_read_lock();
636         } else
637                 *srcu_idx = srcu_read_lock(hctx->srcu);
638 }
639
640 /**
641  * blk_mq_complete_request - end I/O on a request
642  * @rq:         the request being processed
643  *
644  * Description:
645  *      Ends all I/O on a request. It does not handle partial completions.
646  *      The actual completion happens out-of-order, through a IPI handler.
647  **/
648 bool blk_mq_complete_request(struct request *rq)
649 {
650         if (unlikely(blk_should_fake_timeout(rq->q)))
651                 return false;
652         __blk_mq_complete_request(rq);
653         return true;
654 }
655 EXPORT_SYMBOL(blk_mq_complete_request);
656
657 void blk_mq_complete_request_sync(struct request *rq)
658 {
659         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
660         rq->q->mq_ops->complete(rq);
661 }
662 EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync);
663
664 int blk_mq_request_started(struct request *rq)
665 {
666         return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
667 }
668 EXPORT_SYMBOL_GPL(blk_mq_request_started);
669
670 void blk_mq_start_request(struct request *rq)
671 {
672         struct request_queue *q = rq->q;
673
674         blk_mq_sched_started_request(rq);
675
676         trace_block_rq_issue(q, rq);
677
678         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
679                 rq->io_start_time_ns = ktime_get_ns();
680 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
681                 rq->throtl_size = blk_rq_sectors(rq);
682 #endif
683                 rq->rq_flags |= RQF_STATS;
684                 rq_qos_issue(q, rq);
685         }
686
687         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
688
689         blk_add_timer(rq);
690         WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
691
692         if (q->dma_drain_size && blk_rq_bytes(rq)) {
693                 /*
694                  * Make sure space for the drain appears.  We know we can do
695                  * this because max_hw_segments has been adjusted to be one
696                  * fewer than the device can handle.
697                  */
698                 rq->nr_phys_segments++;
699         }
700 }
701 EXPORT_SYMBOL(blk_mq_start_request);
702
703 static void __blk_mq_requeue_request(struct request *rq)
704 {
705         struct request_queue *q = rq->q;
706
707         blk_mq_put_driver_tag(rq);
708
709         trace_block_rq_requeue(q, rq);
710         rq_qos_requeue(q, rq);
711
712         if (blk_mq_request_started(rq)) {
713                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
714                 rq->rq_flags &= ~RQF_TIMED_OUT;
715                 if (q->dma_drain_size && blk_rq_bytes(rq))
716                         rq->nr_phys_segments--;
717         }
718 }
719
720 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
721 {
722         __blk_mq_requeue_request(rq);
723
724         /* this request will be re-inserted to io scheduler queue */
725         blk_mq_sched_requeue_request(rq);
726
727         BUG_ON(!list_empty(&rq->queuelist));
728         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
729 }
730 EXPORT_SYMBOL(blk_mq_requeue_request);
731
732 static void blk_mq_requeue_work(struct work_struct *work)
733 {
734         struct request_queue *q =
735                 container_of(work, struct request_queue, requeue_work.work);
736         LIST_HEAD(rq_list);
737         struct request *rq, *next;
738
739         spin_lock_irq(&q->requeue_lock);
740         list_splice_init(&q->requeue_list, &rq_list);
741         spin_unlock_irq(&q->requeue_lock);
742
743         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
744                 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
745                         continue;
746
747                 rq->rq_flags &= ~RQF_SOFTBARRIER;
748                 list_del_init(&rq->queuelist);
749                 /*
750                  * If RQF_DONTPREP, rq has contained some driver specific
751                  * data, so insert it to hctx dispatch list to avoid any
752                  * merge.
753                  */
754                 if (rq->rq_flags & RQF_DONTPREP)
755                         blk_mq_request_bypass_insert(rq, false);
756                 else
757                         blk_mq_sched_insert_request(rq, true, false, false);
758         }
759
760         while (!list_empty(&rq_list)) {
761                 rq = list_entry(rq_list.next, struct request, queuelist);
762                 list_del_init(&rq->queuelist);
763                 blk_mq_sched_insert_request(rq, false, false, false);
764         }
765
766         blk_mq_run_hw_queues(q, false);
767 }
768
769 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
770                                 bool kick_requeue_list)
771 {
772         struct request_queue *q = rq->q;
773         unsigned long flags;
774
775         /*
776          * We abuse this flag that is otherwise used by the I/O scheduler to
777          * request head insertion from the workqueue.
778          */
779         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
780
781         spin_lock_irqsave(&q->requeue_lock, flags);
782         if (at_head) {
783                 rq->rq_flags |= RQF_SOFTBARRIER;
784                 list_add(&rq->queuelist, &q->requeue_list);
785         } else {
786                 list_add_tail(&rq->queuelist, &q->requeue_list);
787         }
788         spin_unlock_irqrestore(&q->requeue_lock, flags);
789
790         if (kick_requeue_list)
791                 blk_mq_kick_requeue_list(q);
792 }
793
794 void blk_mq_kick_requeue_list(struct request_queue *q)
795 {
796         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
797 }
798 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
799
800 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
801                                     unsigned long msecs)
802 {
803         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
804                                     msecs_to_jiffies(msecs));
805 }
806 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
807
808 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
809 {
810         if (tag < tags->nr_tags) {
811                 prefetch(tags->rqs[tag]);
812                 return tags->rqs[tag];
813         }
814
815         return NULL;
816 }
817 EXPORT_SYMBOL(blk_mq_tag_to_rq);
818
819 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
820                                void *priv, bool reserved)
821 {
822         /*
823          * If we find a request that is inflight and the queue matches,
824          * we know the queue is busy. Return false to stop the iteration.
825          */
826         if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
827                 bool *busy = priv;
828
829                 *busy = true;
830                 return false;
831         }
832
833         return true;
834 }
835
836 bool blk_mq_queue_inflight(struct request_queue *q)
837 {
838         bool busy = false;
839
840         blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
841         return busy;
842 }
843 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
844
845 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
846 {
847         req->rq_flags |= RQF_TIMED_OUT;
848         if (req->q->mq_ops->timeout) {
849                 enum blk_eh_timer_return ret;
850
851                 ret = req->q->mq_ops->timeout(req, reserved);
852                 if (ret == BLK_EH_DONE)
853                         return;
854                 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
855         }
856
857         blk_add_timer(req);
858 }
859
860 static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
861 {
862         unsigned long deadline;
863
864         if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
865                 return false;
866         if (rq->rq_flags & RQF_TIMED_OUT)
867                 return false;
868
869         deadline = READ_ONCE(rq->deadline);
870         if (time_after_eq(jiffies, deadline))
871                 return true;
872
873         if (*next == 0)
874                 *next = deadline;
875         else if (time_after(*next, deadline))
876                 *next = deadline;
877         return false;
878 }
879
880 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
881                 struct request *rq, void *priv, bool reserved)
882 {
883         unsigned long *next = priv;
884
885         /*
886          * Just do a quick check if it is expired before locking the request in
887          * so we're not unnecessarilly synchronizing across CPUs.
888          */
889         if (!blk_mq_req_expired(rq, next))
890                 return true;
891
892         /*
893          * We have reason to believe the request may be expired. Take a
894          * reference on the request to lock this request lifetime into its
895          * currently allocated context to prevent it from being reallocated in
896          * the event the completion by-passes this timeout handler.
897          *
898          * If the reference was already released, then the driver beat the
899          * timeout handler to posting a natural completion.
900          */
901         if (!refcount_inc_not_zero(&rq->ref))
902                 return true;
903
904         /*
905          * The request is now locked and cannot be reallocated underneath the
906          * timeout handler's processing. Re-verify this exact request is truly
907          * expired; if it is not expired, then the request was completed and
908          * reallocated as a new request.
909          */
910         if (blk_mq_req_expired(rq, next))
911                 blk_mq_rq_timed_out(rq, reserved);
912         if (refcount_dec_and_test(&rq->ref))
913                 __blk_mq_free_request(rq);
914
915         return true;
916 }
917
918 static void blk_mq_timeout_work(struct work_struct *work)
919 {
920         struct request_queue *q =
921                 container_of(work, struct request_queue, timeout_work);
922         unsigned long next = 0;
923         struct blk_mq_hw_ctx *hctx;
924         int i;
925
926         /* A deadlock might occur if a request is stuck requiring a
927          * timeout at the same time a queue freeze is waiting
928          * completion, since the timeout code would not be able to
929          * acquire the queue reference here.
930          *
931          * That's why we don't use blk_queue_enter here; instead, we use
932          * percpu_ref_tryget directly, because we need to be able to
933          * obtain a reference even in the short window between the queue
934          * starting to freeze, by dropping the first reference in
935          * blk_freeze_queue_start, and the moment the last request is
936          * consumed, marked by the instant q_usage_counter reaches
937          * zero.
938          */
939         if (!percpu_ref_tryget(&q->q_usage_counter))
940                 return;
941
942         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
943
944         if (next != 0) {
945                 mod_timer(&q->timeout, next);
946         } else {
947                 /*
948                  * Request timeouts are handled as a forward rolling timer. If
949                  * we end up here it means that no requests are pending and
950                  * also that no request has been pending for a while. Mark
951                  * each hctx as idle.
952                  */
953                 queue_for_each_hw_ctx(q, hctx, i) {
954                         /* the hctx may be unmapped, so check it here */
955                         if (blk_mq_hw_queue_mapped(hctx))
956                                 blk_mq_tag_idle(hctx);
957                 }
958         }
959         blk_queue_exit(q);
960 }
961
962 struct flush_busy_ctx_data {
963         struct blk_mq_hw_ctx *hctx;
964         struct list_head *list;
965 };
966
967 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
968 {
969         struct flush_busy_ctx_data *flush_data = data;
970         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
971         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
972         enum hctx_type type = hctx->type;
973
974         spin_lock(&ctx->lock);
975         list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
976         sbitmap_clear_bit(sb, bitnr);
977         spin_unlock(&ctx->lock);
978         return true;
979 }
980
981 /*
982  * Process software queues that have been marked busy, splicing them
983  * to the for-dispatch
984  */
985 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
986 {
987         struct flush_busy_ctx_data data = {
988                 .hctx = hctx,
989                 .list = list,
990         };
991
992         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
993 }
994 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
995
996 struct dispatch_rq_data {
997         struct blk_mq_hw_ctx *hctx;
998         struct request *rq;
999 };
1000
1001 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1002                 void *data)
1003 {
1004         struct dispatch_rq_data *dispatch_data = data;
1005         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1006         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1007         enum hctx_type type = hctx->type;
1008
1009         spin_lock(&ctx->lock);
1010         if (!list_empty(&ctx->rq_lists[type])) {
1011                 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1012                 list_del_init(&dispatch_data->rq->queuelist);
1013                 if (list_empty(&ctx->rq_lists[type]))
1014                         sbitmap_clear_bit(sb, bitnr);
1015         }
1016         spin_unlock(&ctx->lock);
1017
1018         return !dispatch_data->rq;
1019 }
1020
1021 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1022                                         struct blk_mq_ctx *start)
1023 {
1024         unsigned off = start ? start->index_hw[hctx->type] : 0;
1025         struct dispatch_rq_data data = {
1026                 .hctx = hctx,
1027                 .rq   = NULL,
1028         };
1029
1030         __sbitmap_for_each_set(&hctx->ctx_map, off,
1031                                dispatch_rq_from_ctx, &data);
1032
1033         return data.rq;
1034 }
1035
1036 static inline unsigned int queued_to_index(unsigned int queued)
1037 {
1038         if (!queued)
1039                 return 0;
1040
1041         return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1042 }
1043
1044 bool blk_mq_get_driver_tag(struct request *rq)
1045 {
1046         struct blk_mq_alloc_data data = {
1047                 .q = rq->q,
1048                 .hctx = rq->mq_hctx,
1049                 .flags = BLK_MQ_REQ_NOWAIT,
1050                 .cmd_flags = rq->cmd_flags,
1051         };
1052         bool shared;
1053
1054         if (rq->tag != -1)
1055                 goto done;
1056
1057         if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
1058                 data.flags |= BLK_MQ_REQ_RESERVED;
1059
1060         shared = blk_mq_tag_busy(data.hctx);
1061         rq->tag = blk_mq_get_tag(&data);
1062         if (rq->tag >= 0) {
1063                 if (shared) {
1064                         rq->rq_flags |= RQF_MQ_INFLIGHT;
1065                         atomic_inc(&data.hctx->nr_active);
1066                 }
1067                 data.hctx->tags->rqs[rq->tag] = rq;
1068         }
1069
1070 done:
1071         return rq->tag != -1;
1072 }
1073
1074 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1075                                 int flags, void *key)
1076 {
1077         struct blk_mq_hw_ctx *hctx;
1078
1079         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1080
1081         spin_lock(&hctx->dispatch_wait_lock);
1082         if (!list_empty(&wait->entry)) {
1083                 struct sbitmap_queue *sbq;
1084
1085                 list_del_init(&wait->entry);
1086                 sbq = &hctx->tags->bitmap_tags;
1087                 atomic_dec(&sbq->ws_active);
1088         }
1089         spin_unlock(&hctx->dispatch_wait_lock);
1090
1091         blk_mq_run_hw_queue(hctx, true);
1092         return 1;
1093 }
1094
1095 /*
1096  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1097  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1098  * restart. For both cases, take care to check the condition again after
1099  * marking us as waiting.
1100  */
1101 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1102                                  struct request *rq)
1103 {
1104         struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1105         struct wait_queue_head *wq;
1106         wait_queue_entry_t *wait;
1107         bool ret;
1108
1109         if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
1110                 blk_mq_sched_mark_restart_hctx(hctx);
1111
1112                 /*
1113                  * It's possible that a tag was freed in the window between the
1114                  * allocation failure and adding the hardware queue to the wait
1115                  * queue.
1116                  *
1117                  * Don't clear RESTART here, someone else could have set it.
1118                  * At most this will cost an extra queue run.
1119                  */
1120                 return blk_mq_get_driver_tag(rq);
1121         }
1122
1123         wait = &hctx->dispatch_wait;
1124         if (!list_empty_careful(&wait->entry))
1125                 return false;
1126
1127         wq = &bt_wait_ptr(sbq, hctx)->wait;
1128
1129         spin_lock_irq(&wq->lock);
1130         spin_lock(&hctx->dispatch_wait_lock);
1131         if (!list_empty(&wait->entry)) {
1132                 spin_unlock(&hctx->dispatch_wait_lock);
1133                 spin_unlock_irq(&wq->lock);
1134                 return false;
1135         }
1136
1137         atomic_inc(&sbq->ws_active);
1138         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1139         __add_wait_queue(wq, wait);
1140
1141         /*
1142          * It's possible that a tag was freed in the window between the
1143          * allocation failure and adding the hardware queue to the wait
1144          * queue.
1145          */
1146         ret = blk_mq_get_driver_tag(rq);
1147         if (!ret) {
1148                 spin_unlock(&hctx->dispatch_wait_lock);
1149                 spin_unlock_irq(&wq->lock);
1150                 return false;
1151         }
1152
1153         /*
1154          * We got a tag, remove ourselves from the wait queue to ensure
1155          * someone else gets the wakeup.
1156          */
1157         list_del_init(&wait->entry);
1158         atomic_dec(&sbq->ws_active);
1159         spin_unlock(&hctx->dispatch_wait_lock);
1160         spin_unlock_irq(&wq->lock);
1161
1162         return true;
1163 }
1164
1165 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
1166 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
1167 /*
1168  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1169  * - EWMA is one simple way to compute running average value
1170  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1171  * - take 4 as factor for avoiding to get too small(0) result, and this
1172  *   factor doesn't matter because EWMA decreases exponentially
1173  */
1174 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1175 {
1176         unsigned int ewma;
1177
1178         if (hctx->queue->elevator)
1179                 return;
1180
1181         ewma = hctx->dispatch_busy;
1182
1183         if (!ewma && !busy)
1184                 return;
1185
1186         ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1187         if (busy)
1188                 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1189         ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1190
1191         hctx->dispatch_busy = ewma;
1192 }
1193
1194 #define BLK_MQ_RESOURCE_DELAY   3               /* ms units */
1195
1196 /*
1197  * Returns true if we did some work AND can potentially do more.
1198  */
1199 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1200                              bool got_budget)
1201 {
1202         struct blk_mq_hw_ctx *hctx;
1203         struct request *rq, *nxt;
1204         bool no_tag = false;
1205         int errors, queued;
1206         blk_status_t ret = BLK_STS_OK;
1207
1208         if (list_empty(list))
1209                 return false;
1210
1211         WARN_ON(!list_is_singular(list) && got_budget);
1212
1213         /*
1214          * Now process all the entries, sending them to the driver.
1215          */
1216         errors = queued = 0;
1217         do {
1218                 struct blk_mq_queue_data bd;
1219
1220                 rq = list_first_entry(list, struct request, queuelist);
1221
1222                 hctx = rq->mq_hctx;
1223                 if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
1224                         break;
1225
1226                 if (!blk_mq_get_driver_tag(rq)) {
1227                         /*
1228                          * The initial allocation attempt failed, so we need to
1229                          * rerun the hardware queue when a tag is freed. The
1230                          * waitqueue takes care of that. If the queue is run
1231                          * before we add this entry back on the dispatch list,
1232                          * we'll re-run it below.
1233                          */
1234                         if (!blk_mq_mark_tag_wait(hctx, rq)) {
1235                                 blk_mq_put_dispatch_budget(hctx);
1236                                 /*
1237                                  * For non-shared tags, the RESTART check
1238                                  * will suffice.
1239                                  */
1240                                 if (hctx->flags & BLK_MQ_F_TAG_SHARED)
1241                                         no_tag = true;
1242                                 break;
1243                         }
1244                 }
1245
1246                 list_del_init(&rq->queuelist);
1247
1248                 bd.rq = rq;
1249
1250                 /*
1251                  * Flag last if we have no more requests, or if we have more
1252                  * but can't assign a driver tag to it.
1253                  */
1254                 if (list_empty(list))
1255                         bd.last = true;
1256                 else {
1257                         nxt = list_first_entry(list, struct request, queuelist);
1258                         bd.last = !blk_mq_get_driver_tag(nxt);
1259                 }
1260
1261                 ret = q->mq_ops->queue_rq(hctx, &bd);
1262                 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
1263                         /*
1264                          * If an I/O scheduler has been configured and we got a
1265                          * driver tag for the next request already, free it
1266                          * again.
1267                          */
1268                         if (!list_empty(list)) {
1269                                 nxt = list_first_entry(list, struct request, queuelist);
1270                                 blk_mq_put_driver_tag(nxt);
1271                         }
1272                         list_add(&rq->queuelist, list);
1273                         __blk_mq_requeue_request(rq);
1274                         break;
1275                 }
1276
1277                 if (unlikely(ret != BLK_STS_OK)) {
1278                         errors++;
1279                         blk_mq_end_request(rq, BLK_STS_IOERR);
1280                         continue;
1281                 }
1282
1283                 queued++;
1284         } while (!list_empty(list));
1285
1286         hctx->dispatched[queued_to_index(queued)]++;
1287
1288         /*
1289          * Any items that need requeuing? Stuff them into hctx->dispatch,
1290          * that is where we will continue on next queue run.
1291          */
1292         if (!list_empty(list)) {
1293                 bool needs_restart;
1294
1295                 /*
1296                  * If we didn't flush the entire list, we could have told
1297                  * the driver there was more coming, but that turned out to
1298                  * be a lie.
1299                  */
1300                 if (q->mq_ops->commit_rqs)
1301                         q->mq_ops->commit_rqs(hctx);
1302
1303                 spin_lock(&hctx->lock);
1304                 list_splice_init(list, &hctx->dispatch);
1305                 spin_unlock(&hctx->lock);
1306
1307                 /*
1308                  * If SCHED_RESTART was set by the caller of this function and
1309                  * it is no longer set that means that it was cleared by another
1310                  * thread and hence that a queue rerun is needed.
1311                  *
1312                  * If 'no_tag' is set, that means that we failed getting
1313                  * a driver tag with an I/O scheduler attached. If our dispatch
1314                  * waitqueue is no longer active, ensure that we run the queue
1315                  * AFTER adding our entries back to the list.
1316                  *
1317                  * If no I/O scheduler has been configured it is possible that
1318                  * the hardware queue got stopped and restarted before requests
1319                  * were pushed back onto the dispatch list. Rerun the queue to
1320                  * avoid starvation. Notes:
1321                  * - blk_mq_run_hw_queue() checks whether or not a queue has
1322                  *   been stopped before rerunning a queue.
1323                  * - Some but not all block drivers stop a queue before
1324                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1325                  *   and dm-rq.
1326                  *
1327                  * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1328                  * bit is set, run queue after a delay to avoid IO stalls
1329                  * that could otherwise occur if the queue is idle.
1330                  */
1331                 needs_restart = blk_mq_sched_needs_restart(hctx);
1332                 if (!needs_restart ||
1333                     (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1334                         blk_mq_run_hw_queue(hctx, true);
1335                 else if (needs_restart && (ret == BLK_STS_RESOURCE))
1336                         blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1337
1338                 blk_mq_update_dispatch_busy(hctx, true);
1339                 return false;
1340         } else
1341                 blk_mq_update_dispatch_busy(hctx, false);
1342
1343         /*
1344          * If the host/device is unable to accept more work, inform the
1345          * caller of that.
1346          */
1347         if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1348                 return false;
1349
1350         return (queued + errors) != 0;
1351 }
1352
1353 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1354 {
1355         int srcu_idx;
1356
1357         /*
1358          * We should be running this queue from one of the CPUs that
1359          * are mapped to it.
1360          *
1361          * There are at least two related races now between setting
1362          * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
1363          * __blk_mq_run_hw_queue():
1364          *
1365          * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
1366          *   but later it becomes online, then this warning is harmless
1367          *   at all
1368          *
1369          * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
1370          *   but later it becomes offline, then the warning can't be
1371          *   triggered, and we depend on blk-mq timeout handler to
1372          *   handle dispatched requests to this hctx
1373          */
1374         if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1375                 cpu_online(hctx->next_cpu)) {
1376                 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1377                         raw_smp_processor_id(),
1378                         cpumask_empty(hctx->cpumask) ? "inactive": "active");
1379                 dump_stack();
1380         }
1381
1382         /*
1383          * We can't run the queue inline with ints disabled. Ensure that
1384          * we catch bad users of this early.
1385          */
1386         WARN_ON_ONCE(in_interrupt());
1387
1388         might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1389
1390         hctx_lock(hctx, &srcu_idx);
1391         blk_mq_sched_dispatch_requests(hctx);
1392         hctx_unlock(hctx, srcu_idx);
1393 }
1394
1395 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1396 {
1397         int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1398
1399         if (cpu >= nr_cpu_ids)
1400                 cpu = cpumask_first(hctx->cpumask);
1401         return cpu;
1402 }
1403
1404 /*
1405  * It'd be great if the workqueue API had a way to pass
1406  * in a mask and had some smarts for more clever placement.
1407  * For now we just round-robin here, switching for every
1408  * BLK_MQ_CPU_WORK_BATCH queued items.
1409  */
1410 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1411 {
1412         bool tried = false;
1413         int next_cpu = hctx->next_cpu;
1414
1415         if (hctx->queue->nr_hw_queues == 1)
1416                 return WORK_CPU_UNBOUND;
1417
1418         if (--hctx->next_cpu_batch <= 0) {
1419 select_cpu:
1420                 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
1421                                 cpu_online_mask);
1422                 if (next_cpu >= nr_cpu_ids)
1423                         next_cpu = blk_mq_first_mapped_cpu(hctx);
1424                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1425         }
1426
1427         /*
1428          * Do unbound schedule if we can't find a online CPU for this hctx,
1429          * and it should only happen in the path of handling CPU DEAD.
1430          */
1431         if (!cpu_online(next_cpu)) {
1432                 if (!tried) {
1433                         tried = true;
1434                         goto select_cpu;
1435                 }
1436
1437                 /*
1438                  * Make sure to re-select CPU next time once after CPUs
1439                  * in hctx->cpumask become online again.
1440                  */
1441                 hctx->next_cpu = next_cpu;
1442                 hctx->next_cpu_batch = 1;
1443                 return WORK_CPU_UNBOUND;
1444         }
1445
1446         hctx->next_cpu = next_cpu;
1447         return next_cpu;
1448 }
1449
1450 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1451                                         unsigned long msecs)
1452 {
1453         if (unlikely(blk_mq_hctx_stopped(hctx)))
1454                 return;
1455
1456         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1457                 int cpu = get_cpu();
1458                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1459                         __blk_mq_run_hw_queue(hctx);
1460                         put_cpu();
1461                         return;
1462                 }
1463
1464                 put_cpu();
1465         }
1466
1467         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1468                                     msecs_to_jiffies(msecs));
1469 }
1470
1471 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1472 {
1473         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1474 }
1475 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1476
1477 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1478 {
1479         int srcu_idx;
1480         bool need_run;
1481
1482         /*
1483          * When queue is quiesced, we may be switching io scheduler, or
1484          * updating nr_hw_queues, or other things, and we can't run queue
1485          * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1486          *
1487          * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1488          * quiesced.
1489          */
1490         hctx_lock(hctx, &srcu_idx);
1491         need_run = !blk_queue_quiesced(hctx->queue) &&
1492                 blk_mq_hctx_has_pending(hctx);
1493         hctx_unlock(hctx, srcu_idx);
1494
1495         if (need_run) {
1496                 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1497                 return true;
1498         }
1499
1500         return false;
1501 }
1502 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1503
1504 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1505 {
1506         struct blk_mq_hw_ctx *hctx;
1507         int i;
1508
1509         queue_for_each_hw_ctx(q, hctx, i) {
1510                 if (blk_mq_hctx_stopped(hctx))
1511                         continue;
1512
1513                 blk_mq_run_hw_queue(hctx, async);
1514         }
1515 }
1516 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1517
1518 /**
1519  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1520  * @q: request queue.
1521  *
1522  * The caller is responsible for serializing this function against
1523  * blk_mq_{start,stop}_hw_queue().
1524  */
1525 bool blk_mq_queue_stopped(struct request_queue *q)
1526 {
1527         struct blk_mq_hw_ctx *hctx;
1528         int i;
1529
1530         queue_for_each_hw_ctx(q, hctx, i)
1531                 if (blk_mq_hctx_stopped(hctx))
1532                         return true;
1533
1534         return false;
1535 }
1536 EXPORT_SYMBOL(blk_mq_queue_stopped);
1537
1538 /*
1539  * This function is often used for pausing .queue_rq() by driver when
1540  * there isn't enough resource or some conditions aren't satisfied, and
1541  * BLK_STS_RESOURCE is usually returned.
1542  *
1543  * We do not guarantee that dispatch can be drained or blocked
1544  * after blk_mq_stop_hw_queue() returns. Please use
1545  * blk_mq_quiesce_queue() for that requirement.
1546  */
1547 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1548 {
1549         cancel_delayed_work(&hctx->run_work);
1550
1551         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1552 }
1553 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1554
1555 /*
1556  * This function is often used for pausing .queue_rq() by driver when
1557  * there isn't enough resource or some conditions aren't satisfied, and
1558  * BLK_STS_RESOURCE is usually returned.
1559  *
1560  * We do not guarantee that dispatch can be drained or blocked
1561  * after blk_mq_stop_hw_queues() returns. Please use
1562  * blk_mq_quiesce_queue() for that requirement.
1563  */
1564 void blk_mq_stop_hw_queues(struct request_queue *q)
1565 {
1566         struct blk_mq_hw_ctx *hctx;
1567         int i;
1568
1569         queue_for_each_hw_ctx(q, hctx, i)
1570                 blk_mq_stop_hw_queue(hctx);
1571 }
1572 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1573
1574 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1575 {
1576         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1577
1578         blk_mq_run_hw_queue(hctx, false);
1579 }
1580 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1581
1582 void blk_mq_start_hw_queues(struct request_queue *q)
1583 {
1584         struct blk_mq_hw_ctx *hctx;
1585         int i;
1586
1587         queue_for_each_hw_ctx(q, hctx, i)
1588                 blk_mq_start_hw_queue(hctx);
1589 }
1590 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1591
1592 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1593 {
1594         if (!blk_mq_hctx_stopped(hctx))
1595                 return;
1596
1597         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1598         blk_mq_run_hw_queue(hctx, async);
1599 }
1600 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1601
1602 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1603 {
1604         struct blk_mq_hw_ctx *hctx;
1605         int i;
1606
1607         queue_for_each_hw_ctx(q, hctx, i)
1608                 blk_mq_start_stopped_hw_queue(hctx, async);
1609 }
1610 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1611
1612 static void blk_mq_run_work_fn(struct work_struct *work)
1613 {
1614         struct blk_mq_hw_ctx *hctx;
1615
1616         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1617
1618         /*
1619          * If we are stopped, don't run the queue.
1620          */
1621         if (test_bit(BLK_MQ_S_STOPPED, &hctx->state))
1622                 return;
1623
1624         __blk_mq_run_hw_queue(hctx);
1625 }
1626
1627 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1628                                             struct request *rq,
1629                                             bool at_head)
1630 {
1631         struct blk_mq_ctx *ctx = rq->mq_ctx;
1632         enum hctx_type type = hctx->type;
1633
1634         lockdep_assert_held(&ctx->lock);
1635
1636         trace_block_rq_insert(hctx->queue, rq);
1637
1638         if (at_head)
1639                 list_add(&rq->queuelist, &ctx->rq_lists[type]);
1640         else
1641                 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
1642 }
1643
1644 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1645                              bool at_head)
1646 {
1647         struct blk_mq_ctx *ctx = rq->mq_ctx;
1648
1649         lockdep_assert_held(&ctx->lock);
1650
1651         __blk_mq_insert_req_list(hctx, rq, at_head);
1652         blk_mq_hctx_mark_pending(hctx, ctx);
1653 }
1654
1655 /*
1656  * Should only be used carefully, when the caller knows we want to
1657  * bypass a potential IO scheduler on the target device.
1658  */
1659 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
1660 {
1661         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1662
1663         spin_lock(&hctx->lock);
1664         list_add_tail(&rq->queuelist, &hctx->dispatch);
1665         spin_unlock(&hctx->lock);
1666
1667         if (run_queue)
1668                 blk_mq_run_hw_queue(hctx, false);
1669 }
1670
1671 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1672                             struct list_head *list)
1673
1674 {
1675         struct request *rq;
1676         enum hctx_type type = hctx->type;
1677
1678         /*
1679          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1680          * offline now
1681          */
1682         list_for_each_entry(rq, list, queuelist) {
1683                 BUG_ON(rq->mq_ctx != ctx);
1684                 trace_block_rq_insert(hctx->queue, rq);
1685         }
1686
1687         spin_lock(&ctx->lock);
1688         list_splice_tail_init(list, &ctx->rq_lists[type]);
1689         blk_mq_hctx_mark_pending(hctx, ctx);
1690         spin_unlock(&ctx->lock);
1691 }
1692
1693 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
1694 {
1695         struct request *rqa = container_of(a, struct request, queuelist);
1696         struct request *rqb = container_of(b, struct request, queuelist);
1697
1698         if (rqa->mq_ctx < rqb->mq_ctx)
1699                 return -1;
1700         else if (rqa->mq_ctx > rqb->mq_ctx)
1701                 return 1;
1702         else if (rqa->mq_hctx < rqb->mq_hctx)
1703                 return -1;
1704         else if (rqa->mq_hctx > rqb->mq_hctx)
1705                 return 1;
1706
1707         return blk_rq_pos(rqa) > blk_rq_pos(rqb);
1708 }
1709
1710 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1711 {
1712         struct blk_mq_hw_ctx *this_hctx;
1713         struct blk_mq_ctx *this_ctx;
1714         struct request_queue *this_q;
1715         struct request *rq;
1716         LIST_HEAD(list);
1717         LIST_HEAD(rq_list);
1718         unsigned int depth;
1719
1720         list_splice_init(&plug->mq_list, &list);
1721
1722         if (plug->rq_count > 2 && plug->multiple_queues)
1723                 list_sort(NULL, &list, plug_rq_cmp);
1724
1725         plug->rq_count = 0;
1726
1727         this_q = NULL;
1728         this_hctx = NULL;
1729         this_ctx = NULL;
1730         depth = 0;
1731
1732         while (!list_empty(&list)) {
1733                 rq = list_entry_rq(list.next);
1734                 list_del_init(&rq->queuelist);
1735                 BUG_ON(!rq->q);
1736                 if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
1737                         if (this_hctx) {
1738                                 trace_block_unplug(this_q, depth, !from_schedule);
1739                                 blk_mq_sched_insert_requests(this_hctx, this_ctx,
1740                                                                 &rq_list,
1741                                                                 from_schedule);
1742                         }
1743
1744                         this_q = rq->q;
1745                         this_ctx = rq->mq_ctx;
1746                         this_hctx = rq->mq_hctx;
1747                         depth = 0;
1748                 }
1749
1750                 depth++;
1751                 list_add_tail(&rq->queuelist, &rq_list);
1752         }
1753
1754         /*
1755          * If 'this_hctx' is set, we know we have entries to complete
1756          * on 'rq_list'. Do those.
1757          */
1758         if (this_hctx) {
1759                 trace_block_unplug(this_q, depth, !from_schedule);
1760                 blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
1761                                                 from_schedule);
1762         }
1763 }
1764
1765 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1766 {
1767         blk_init_request_from_bio(rq, bio);
1768
1769         blk_account_io_start(rq, true);
1770 }
1771
1772 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1773                                             struct request *rq,
1774                                             blk_qc_t *cookie, bool last)
1775 {
1776         struct request_queue *q = rq->q;
1777         struct blk_mq_queue_data bd = {
1778                 .rq = rq,
1779                 .last = last,
1780         };
1781         blk_qc_t new_cookie;
1782         blk_status_t ret;
1783
1784         new_cookie = request_to_qc_t(hctx, rq);
1785
1786         /*
1787          * For OK queue, we are done. For error, caller may kill it.
1788          * Any other error (busy), just add it to our list as we
1789          * previously would have done.
1790          */
1791         ret = q->mq_ops->queue_rq(hctx, &bd);
1792         switch (ret) {
1793         case BLK_STS_OK:
1794                 blk_mq_update_dispatch_busy(hctx, false);
1795                 *cookie = new_cookie;
1796                 break;
1797         case BLK_STS_RESOURCE:
1798         case BLK_STS_DEV_RESOURCE:
1799                 blk_mq_update_dispatch_busy(hctx, true);
1800                 __blk_mq_requeue_request(rq);
1801                 break;
1802         default:
1803                 blk_mq_update_dispatch_busy(hctx, false);
1804                 *cookie = BLK_QC_T_NONE;
1805                 break;
1806         }
1807
1808         return ret;
1809 }
1810
1811 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1812                                                 struct request *rq,
1813                                                 blk_qc_t *cookie,
1814                                                 bool bypass_insert, bool last)
1815 {
1816         struct request_queue *q = rq->q;
1817         bool run_queue = true;
1818
1819         /*
1820          * RCU or SRCU read lock is needed before checking quiesced flag.
1821          *
1822          * When queue is stopped or quiesced, ignore 'bypass_insert' from
1823          * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
1824          * and avoid driver to try to dispatch again.
1825          */
1826         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1827                 run_queue = false;
1828                 bypass_insert = false;
1829                 goto insert;
1830         }
1831
1832         if (q->elevator && !bypass_insert)
1833                 goto insert;
1834
1835         if (!blk_mq_get_dispatch_budget(hctx))
1836                 goto insert;
1837
1838         if (!blk_mq_get_driver_tag(rq)) {
1839                 blk_mq_put_dispatch_budget(hctx);
1840                 goto insert;
1841         }
1842
1843         return __blk_mq_issue_directly(hctx, rq, cookie, last);
1844 insert:
1845         if (bypass_insert)
1846                 return BLK_STS_RESOURCE;
1847
1848         blk_mq_request_bypass_insert(rq, run_queue);
1849         return BLK_STS_OK;
1850 }
1851
1852 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1853                 struct request *rq, blk_qc_t *cookie)
1854 {
1855         blk_status_t ret;
1856         int srcu_idx;
1857
1858         might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1859
1860         hctx_lock(hctx, &srcu_idx);
1861
1862         ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
1863         if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1864                 blk_mq_request_bypass_insert(rq, true);
1865         else if (ret != BLK_STS_OK)
1866                 blk_mq_end_request(rq, ret);
1867
1868         hctx_unlock(hctx, srcu_idx);
1869 }
1870
1871 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
1872 {
1873         blk_status_t ret;
1874         int srcu_idx;
1875         blk_qc_t unused_cookie;
1876         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1877
1878         hctx_lock(hctx, &srcu_idx);
1879         ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
1880         hctx_unlock(hctx, srcu_idx);
1881
1882         return ret;
1883 }
1884
1885 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1886                 struct list_head *list)
1887 {
1888         while (!list_empty(list)) {
1889                 blk_status_t ret;
1890                 struct request *rq = list_first_entry(list, struct request,
1891                                 queuelist);
1892
1893                 list_del_init(&rq->queuelist);
1894                 ret = blk_mq_request_issue_directly(rq, list_empty(list));
1895                 if (ret != BLK_STS_OK) {
1896                         if (ret == BLK_STS_RESOURCE ||
1897                                         ret == BLK_STS_DEV_RESOURCE) {
1898                                 blk_mq_request_bypass_insert(rq,
1899                                                         list_empty(list));
1900                                 break;
1901                         }
1902                         blk_mq_end_request(rq, ret);
1903                 }
1904         }
1905
1906         /*
1907          * If we didn't flush the entire list, we could have told
1908          * the driver there was more coming, but that turned out to
1909          * be a lie.
1910          */
1911         if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
1912                 hctx->queue->mq_ops->commit_rqs(hctx);
1913 }
1914
1915 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1916 {
1917         list_add_tail(&rq->queuelist, &plug->mq_list);
1918         plug->rq_count++;
1919         if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
1920                 struct request *tmp;
1921
1922                 tmp = list_first_entry(&plug->mq_list, struct request,
1923                                                 queuelist);
1924                 if (tmp->q != rq->q)
1925                         plug->multiple_queues = true;
1926         }
1927 }
1928
1929 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1930 {
1931         const int is_sync = op_is_sync(bio->bi_opf);
1932         const int is_flush_fua = op_is_flush(bio->bi_opf);
1933         struct blk_mq_alloc_data data = { .flags = 0};
1934         struct request *rq;
1935         struct blk_plug *plug;
1936         struct request *same_queue_rq = NULL;
1937         blk_qc_t cookie;
1938
1939         blk_queue_bounce(q, &bio);
1940
1941         blk_queue_split(q, &bio);
1942
1943         if (!bio_integrity_prep(bio))
1944                 return BLK_QC_T_NONE;
1945
1946         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1947             blk_attempt_plug_merge(q, bio, &same_queue_rq))
1948                 return BLK_QC_T_NONE;
1949
1950         if (blk_mq_sched_bio_merge(q, bio))
1951                 return BLK_QC_T_NONE;
1952
1953         rq_qos_throttle(q, bio);
1954
1955         data.cmd_flags = bio->bi_opf;
1956         rq = blk_mq_get_request(q, bio, &data);
1957         if (unlikely(!rq)) {
1958                 rq_qos_cleanup(q, bio);
1959                 if (bio->bi_opf & REQ_NOWAIT)
1960                         bio_wouldblock_error(bio);
1961                 return BLK_QC_T_NONE;
1962         }
1963
1964         trace_block_getrq(q, bio, bio->bi_opf);
1965
1966         rq_qos_track(q, rq, bio);
1967
1968         cookie = request_to_qc_t(data.hctx, rq);
1969
1970         plug = current->plug;
1971         if (unlikely(is_flush_fua)) {
1972                 blk_mq_put_ctx(data.ctx);
1973                 blk_mq_bio_to_request(rq, bio);
1974
1975                 /* bypass scheduler for flush rq */
1976                 blk_insert_flush(rq);
1977                 blk_mq_run_hw_queue(data.hctx, true);
1978         } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) {
1979                 /*
1980                  * Use plugging if we have a ->commit_rqs() hook as well, as
1981                  * we know the driver uses bd->last in a smart fashion.
1982                  */
1983                 unsigned int request_count = plug->rq_count;
1984                 struct request *last = NULL;
1985
1986                 blk_mq_put_ctx(data.ctx);
1987                 blk_mq_bio_to_request(rq, bio);
1988
1989                 if (!request_count)
1990                         trace_block_plug(q);
1991                 else
1992                         last = list_entry_rq(plug->mq_list.prev);
1993
1994                 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1995                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1996                         blk_flush_plug_list(plug, false);
1997                         trace_block_plug(q);
1998                 }
1999
2000                 blk_add_rq_to_plug(plug, rq);
2001         } else if (plug && !blk_queue_nomerges(q)) {
2002                 blk_mq_bio_to_request(rq, bio);
2003
2004                 /*
2005                  * We do limited plugging. If the bio can be merged, do that.
2006                  * Otherwise the existing request in the plug list will be
2007                  * issued. So the plug list will have one request at most
2008                  * The plug list might get flushed before this. If that happens,
2009                  * the plug list is empty, and same_queue_rq is invalid.
2010                  */
2011                 if (list_empty(&plug->mq_list))
2012                         same_queue_rq = NULL;
2013                 if (same_queue_rq) {
2014                         list_del_init(&same_queue_rq->queuelist);
2015                         plug->rq_count--;
2016                 }
2017                 blk_add_rq_to_plug(plug, rq);
2018                 trace_block_plug(q);
2019
2020                 blk_mq_put_ctx(data.ctx);
2021
2022                 if (same_queue_rq) {
2023                         data.hctx = same_queue_rq->mq_hctx;
2024                         trace_block_unplug(q, 1, true);
2025                         blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2026                                         &cookie);
2027                 }
2028         } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
2029                         !data.hctx->dispatch_busy)) {
2030                 blk_mq_put_ctx(data.ctx);
2031                 blk_mq_bio_to_request(rq, bio);
2032                 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2033         } else {
2034                 blk_mq_put_ctx(data.ctx);
2035                 blk_mq_bio_to_request(rq, bio);
2036                 blk_mq_sched_insert_request(rq, false, true, true);
2037         }
2038
2039         return cookie;
2040 }
2041
2042 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2043                      unsigned int hctx_idx)
2044 {
2045         struct page *page;
2046
2047         if (tags->rqs && set->ops->exit_request) {
2048                 int i;
2049
2050                 for (i = 0; i < tags->nr_tags; i++) {
2051                         struct request *rq = tags->static_rqs[i];
2052
2053                         if (!rq)
2054                                 continue;
2055                         set->ops->exit_request(set, rq, hctx_idx);
2056                         tags->static_rqs[i] = NULL;
2057                 }
2058         }
2059
2060         while (!list_empty(&tags->page_list)) {
2061                 page = list_first_entry(&tags->page_list, struct page, lru);
2062                 list_del_init(&page->lru);
2063                 /*
2064                  * Remove kmemleak object previously allocated in
2065                  * blk_mq_init_rq_map().
2066                  */
2067                 kmemleak_free(page_address(page));
2068                 __free_pages(page, page->private);
2069         }
2070 }
2071
2072 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
2073 {
2074         kfree(tags->rqs);
2075         tags->rqs = NULL;
2076         kfree(tags->static_rqs);
2077         tags->static_rqs = NULL;
2078
2079         blk_mq_free_tags(tags);
2080 }
2081
2082 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2083                                         unsigned int hctx_idx,
2084                                         unsigned int nr_tags,
2085                                         unsigned int reserved_tags)
2086 {
2087         struct blk_mq_tags *tags;
2088         int node;
2089
2090         node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2091         if (node == NUMA_NO_NODE)
2092                 node = set->numa_node;
2093
2094         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
2095                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
2096         if (!tags)
2097                 return NULL;
2098
2099         tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2100                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2101                                  node);
2102         if (!tags->rqs) {
2103                 blk_mq_free_tags(tags);
2104                 return NULL;
2105         }
2106
2107         tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2108                                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2109                                         node);
2110         if (!tags->static_rqs) {
2111                 kfree(tags->rqs);
2112                 blk_mq_free_tags(tags);
2113                 return NULL;
2114         }
2115
2116         return tags;
2117 }
2118
2119 static size_t order_to_size(unsigned int order)
2120 {
2121         return (size_t)PAGE_SIZE << order;
2122 }
2123
2124 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2125                                unsigned int hctx_idx, int node)
2126 {
2127         int ret;
2128
2129         if (set->ops->init_request) {
2130                 ret = set->ops->init_request(set, rq, hctx_idx, node);
2131                 if (ret)
2132                         return ret;
2133         }
2134
2135         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
2136         return 0;
2137 }
2138
2139 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2140                      unsigned int hctx_idx, unsigned int depth)
2141 {
2142         unsigned int i, j, entries_per_page, max_order = 4;
2143         size_t rq_size, left;
2144         int node;
2145
2146         node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2147         if (node == NUMA_NO_NODE)
2148                 node = set->numa_node;
2149
2150         INIT_LIST_HEAD(&tags->page_list);
2151
2152         /*
2153          * rq_size is the size of the request plus driver payload, rounded
2154          * to the cacheline size
2155          */
2156         rq_size = round_up(sizeof(struct request) + set->cmd_size,
2157                                 cache_line_size());
2158         left = rq_size * depth;
2159
2160         for (i = 0; i < depth; ) {
2161                 int this_order = max_order;
2162                 struct page *page;
2163                 int to_do;
2164                 void *p;
2165
2166                 while (this_order && left < order_to_size(this_order - 1))
2167                         this_order--;
2168
2169                 do {
2170                         page = alloc_pages_node(node,
2171                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
2172                                 this_order);
2173                         if (page)
2174                                 break;
2175                         if (!this_order--)
2176                                 break;
2177                         if (order_to_size(this_order) < rq_size)
2178                                 break;
2179                 } while (1);
2180
2181                 if (!page)
2182                         goto fail;
2183
2184                 page->private = this_order;
2185                 list_add_tail(&page->lru, &tags->page_list);
2186
2187                 p = page_address(page);
2188                 /*
2189                  * Allow kmemleak to scan these pages as they contain pointers
2190                  * to additional allocations like via ops->init_request().
2191                  */
2192                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
2193                 entries_per_page = order_to_size(this_order) / rq_size;
2194                 to_do = min(entries_per_page, depth - i);
2195                 left -= to_do * rq_size;
2196                 for (j = 0; j < to_do; j++) {
2197                         struct request *rq = p;
2198
2199                         tags->static_rqs[i] = rq;
2200                         if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2201                                 tags->static_rqs[i] = NULL;
2202                                 goto fail;
2203                         }
2204
2205                         p += rq_size;
2206                         i++;
2207                 }
2208         }
2209         return 0;
2210
2211 fail:
2212         blk_mq_free_rqs(set, tags, hctx_idx);
2213         return -ENOMEM;
2214 }
2215
2216 /*
2217  * 'cpu' is going away. splice any existing rq_list entries from this
2218  * software queue to the hw queue dispatch list, and ensure that it
2219  * gets run.
2220  */
2221 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
2222 {
2223         struct blk_mq_hw_ctx *hctx;
2224         struct blk_mq_ctx *ctx;
2225         LIST_HEAD(tmp);
2226         enum hctx_type type;
2227
2228         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
2229         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2230         type = hctx->type;
2231
2232         spin_lock(&ctx->lock);
2233         if (!list_empty(&ctx->rq_lists[type])) {
2234                 list_splice_init(&ctx->rq_lists[type], &tmp);
2235                 blk_mq_hctx_clear_pending(hctx, ctx);
2236         }
2237         spin_unlock(&ctx->lock);
2238
2239         if (list_empty(&tmp))
2240                 return 0;
2241
2242         spin_lock(&hctx->lock);
2243         list_splice_tail_init(&tmp, &hctx->dispatch);
2244         spin_unlock(&hctx->lock);
2245
2246         blk_mq_run_hw_queue(hctx, true);
2247         return 0;
2248 }
2249
2250 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2251 {
2252         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2253                                             &hctx->cpuhp_dead);
2254 }
2255
2256 /* hctx->ctxs will be freed in queue's release handler */
2257 static void blk_mq_exit_hctx(struct request_queue *q,
2258                 struct blk_mq_tag_set *set,
2259                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2260 {
2261         if (blk_mq_hw_queue_mapped(hctx))
2262                 blk_mq_tag_idle(hctx);
2263
2264         if (set->ops->exit_request)
2265                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
2266
2267         if (set->ops->exit_hctx)
2268                 set->ops->exit_hctx(hctx, hctx_idx);
2269
2270         if (hctx->flags & BLK_MQ_F_BLOCKING)
2271                 cleanup_srcu_struct(hctx->srcu);
2272
2273         blk_mq_remove_cpuhp(hctx);
2274         blk_free_flush_queue(hctx->fq);
2275         sbitmap_free(&hctx->ctx_map);
2276 }
2277
2278 static void blk_mq_exit_hw_queues(struct request_queue *q,
2279                 struct blk_mq_tag_set *set, int nr_queue)
2280 {
2281         struct blk_mq_hw_ctx *hctx;
2282         unsigned int i;
2283
2284         queue_for_each_hw_ctx(q, hctx, i) {
2285                 if (i == nr_queue)
2286                         break;
2287                 blk_mq_debugfs_unregister_hctx(hctx);
2288                 blk_mq_exit_hctx(q, set, hctx, i);
2289         }
2290 }
2291
2292 static int blk_mq_init_hctx(struct request_queue *q,
2293                 struct blk_mq_tag_set *set,
2294                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2295 {
2296         int node;
2297
2298         node = hctx->numa_node;
2299         if (node == NUMA_NO_NODE)
2300                 node = hctx->numa_node = set->numa_node;
2301
2302         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2303         spin_lock_init(&hctx->lock);
2304         INIT_LIST_HEAD(&hctx->dispatch);
2305         hctx->queue = q;
2306         hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
2307
2308         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2309
2310         hctx->tags = set->tags[hctx_idx];
2311
2312         /*
2313          * Allocate space for all possible cpus to avoid allocation at
2314          * runtime
2315          */
2316         hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2317                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
2318         if (!hctx->ctxs)
2319                 goto unregister_cpu_notifier;
2320
2321         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
2322                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
2323                 goto free_ctxs;
2324
2325         hctx->nr_ctx = 0;
2326
2327         spin_lock_init(&hctx->dispatch_wait_lock);
2328         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2329         INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2330
2331         if (set->ops->init_hctx &&
2332             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2333                 goto free_bitmap;
2334
2335         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
2336                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
2337         if (!hctx->fq)
2338                 goto exit_hctx;
2339
2340         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
2341                 goto free_fq;
2342
2343         if (hctx->flags & BLK_MQ_F_BLOCKING)
2344                 init_srcu_struct(hctx->srcu);
2345
2346         return 0;
2347
2348  free_fq:
2349         blk_free_flush_queue(hctx->fq);
2350  exit_hctx:
2351         if (set->ops->exit_hctx)
2352                 set->ops->exit_hctx(hctx, hctx_idx);
2353  free_bitmap:
2354         sbitmap_free(&hctx->ctx_map);
2355  free_ctxs:
2356         kfree(hctx->ctxs);
2357  unregister_cpu_notifier:
2358         blk_mq_remove_cpuhp(hctx);
2359         return -1;
2360 }
2361
2362 static void blk_mq_init_cpu_queues(struct request_queue *q,
2363                                    unsigned int nr_hw_queues)
2364 {
2365         struct blk_mq_tag_set *set = q->tag_set;
2366         unsigned int i, j;
2367
2368         for_each_possible_cpu(i) {
2369                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2370                 struct blk_mq_hw_ctx *hctx;
2371                 int k;
2372
2373                 __ctx->cpu = i;
2374                 spin_lock_init(&__ctx->lock);
2375                 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
2376                         INIT_LIST_HEAD(&__ctx->rq_lists[k]);
2377
2378                 __ctx->queue = q;
2379
2380                 /*
2381                  * Set local node, IFF we have more than one hw queue. If
2382                  * not, we remain on the home node of the device
2383                  */
2384                 for (j = 0; j < set->nr_maps; j++) {
2385                         hctx = blk_mq_map_queue_type(q, j, i);
2386                         if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2387                                 hctx->numa_node = local_memory_node(cpu_to_node(i));
2388                 }
2389         }
2390 }
2391
2392 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2393 {
2394         int ret = 0;
2395
2396         set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2397                                         set->queue_depth, set->reserved_tags);
2398         if (!set->tags[hctx_idx])
2399                 return false;
2400
2401         ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2402                                 set->queue_depth);
2403         if (!ret)
2404                 return true;
2405
2406         blk_mq_free_rq_map(set->tags[hctx_idx]);
2407         set->tags[hctx_idx] = NULL;
2408         return false;
2409 }
2410
2411 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2412                                          unsigned int hctx_idx)
2413 {
2414         if (set->tags && set->tags[hctx_idx]) {
2415                 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2416                 blk_mq_free_rq_map(set->tags[hctx_idx]);
2417                 set->tags[hctx_idx] = NULL;
2418         }
2419 }
2420
2421 static void blk_mq_map_swqueue(struct request_queue *q)
2422 {
2423         unsigned int i, j, hctx_idx;
2424         struct blk_mq_hw_ctx *hctx;
2425         struct blk_mq_ctx *ctx;
2426         struct blk_mq_tag_set *set = q->tag_set;
2427
2428         /*
2429          * Avoid others reading imcomplete hctx->cpumask through sysfs
2430          */
2431         mutex_lock(&q->sysfs_lock);
2432
2433         queue_for_each_hw_ctx(q, hctx, i) {
2434                 cpumask_clear(hctx->cpumask);
2435                 hctx->nr_ctx = 0;
2436                 hctx->dispatch_from = NULL;
2437         }
2438
2439         /*
2440          * Map software to hardware queues.
2441          *
2442          * If the cpu isn't present, the cpu is mapped to first hctx.
2443          */
2444         for_each_possible_cpu(i) {
2445                 hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i];
2446                 /* unmapped hw queue can be remapped after CPU topo changed */
2447                 if (!set->tags[hctx_idx] &&
2448                     !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2449                         /*
2450                          * If tags initialization fail for some hctx,
2451                          * that hctx won't be brought online.  In this
2452                          * case, remap the current ctx to hctx[0] which
2453                          * is guaranteed to always have tags allocated
2454                          */
2455                         set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0;
2456                 }
2457
2458                 ctx = per_cpu_ptr(q->queue_ctx, i);
2459                 for (j = 0; j < set->nr_maps; j++) {
2460                         if (!set->map[j].nr_queues) {
2461                                 ctx->hctxs[j] = blk_mq_map_queue_type(q,
2462                                                 HCTX_TYPE_DEFAULT, i);
2463                                 continue;
2464                         }
2465
2466                         hctx = blk_mq_map_queue_type(q, j, i);
2467                         ctx->hctxs[j] = hctx;
2468                         /*
2469                          * If the CPU is already set in the mask, then we've
2470                          * mapped this one already. This can happen if
2471                          * devices share queues across queue maps.
2472                          */
2473                         if (cpumask_test_cpu(i, hctx->cpumask))
2474                                 continue;
2475
2476                         cpumask_set_cpu(i, hctx->cpumask);
2477                         hctx->type = j;
2478                         ctx->index_hw[hctx->type] = hctx->nr_ctx;
2479                         hctx->ctxs[hctx->nr_ctx++] = ctx;
2480
2481                         /*
2482                          * If the nr_ctx type overflows, we have exceeded the
2483                          * amount of sw queues we can support.
2484                          */
2485                         BUG_ON(!hctx->nr_ctx);
2486                 }
2487
2488                 for (; j < HCTX_MAX_TYPES; j++)
2489                         ctx->hctxs[j] = blk_mq_map_queue_type(q,
2490                                         HCTX_TYPE_DEFAULT, i);
2491         }
2492
2493         mutex_unlock(&q->sysfs_lock);
2494
2495         queue_for_each_hw_ctx(q, hctx, i) {
2496                 /*
2497                  * If no software queues are mapped to this hardware queue,
2498                  * disable it and free the request entries.
2499                  */
2500                 if (!hctx->nr_ctx) {
2501                         /* Never unmap queue 0.  We need it as a
2502                          * fallback in case of a new remap fails
2503                          * allocation
2504                          */
2505                         if (i && set->tags[i])
2506                                 blk_mq_free_map_and_requests(set, i);
2507
2508                         hctx->tags = NULL;
2509                         continue;
2510                 }
2511
2512                 hctx->tags = set->tags[i];
2513                 WARN_ON(!hctx->tags);
2514
2515                 /*
2516                  * Set the map size to the number of mapped software queues.
2517                  * This is more accurate and more efficient than looping
2518                  * over all possibly mapped software queues.
2519                  */
2520                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2521
2522                 /*
2523                  * Initialize batch roundrobin counts
2524                  */
2525                 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
2526                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2527         }
2528 }
2529
2530 /*
2531  * Caller needs to ensure that we're either frozen/quiesced, or that
2532  * the queue isn't live yet.
2533  */
2534 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2535 {
2536         struct blk_mq_hw_ctx *hctx;
2537         int i;
2538
2539         queue_for_each_hw_ctx(q, hctx, i) {
2540                 if (shared)
2541                         hctx->flags |= BLK_MQ_F_TAG_SHARED;
2542                 else
2543                         hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2544         }
2545 }
2546
2547 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2548                                         bool shared)
2549 {
2550         struct request_queue *q;
2551
2552         lockdep_assert_held(&set->tag_list_lock);
2553
2554         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2555                 blk_mq_freeze_queue(q);
2556                 queue_set_hctx_shared(q, shared);
2557                 blk_mq_unfreeze_queue(q);
2558         }
2559 }
2560
2561 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2562 {
2563         struct blk_mq_tag_set *set = q->tag_set;
2564
2565         mutex_lock(&set->tag_list_lock);
2566         list_del_rcu(&q->tag_set_list);
2567         if (list_is_singular(&set->tag_list)) {
2568                 /* just transitioned to unshared */
2569                 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2570                 /* update existing queue */
2571                 blk_mq_update_tag_set_depth(set, false);
2572         }
2573         mutex_unlock(&set->tag_list_lock);
2574         INIT_LIST_HEAD(&q->tag_set_list);
2575 }
2576
2577 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2578                                      struct request_queue *q)
2579 {
2580         mutex_lock(&set->tag_list_lock);
2581
2582         /*
2583          * Check to see if we're transitioning to shared (from 1 to 2 queues).
2584          */
2585         if (!list_empty(&set->tag_list) &&
2586             !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2587                 set->flags |= BLK_MQ_F_TAG_SHARED;
2588                 /* update existing queue */
2589                 blk_mq_update_tag_set_depth(set, true);
2590         }
2591         if (set->flags & BLK_MQ_F_TAG_SHARED)
2592                 queue_set_hctx_shared(q, true);
2593         list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2594
2595         mutex_unlock(&set->tag_list_lock);
2596 }
2597
2598 /* All allocations will be freed in release handler of q->mq_kobj */
2599 static int blk_mq_alloc_ctxs(struct request_queue *q)
2600 {
2601         struct blk_mq_ctxs *ctxs;
2602         int cpu;
2603
2604         ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
2605         if (!ctxs)
2606                 return -ENOMEM;
2607
2608         ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2609         if (!ctxs->queue_ctx)
2610                 goto fail;
2611
2612         for_each_possible_cpu(cpu) {
2613                 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
2614                 ctx->ctxs = ctxs;
2615         }
2616
2617         q->mq_kobj = &ctxs->kobj;
2618         q->queue_ctx = ctxs->queue_ctx;
2619
2620         return 0;
2621  fail:
2622         kfree(ctxs);
2623         return -ENOMEM;
2624 }
2625
2626 /*
2627  * It is the actual release handler for mq, but we do it from
2628  * request queue's release handler for avoiding use-after-free
2629  * and headache because q->mq_kobj shouldn't have been introduced,
2630  * but we can't group ctx/kctx kobj without it.
2631  */
2632 void blk_mq_release(struct request_queue *q)
2633 {
2634         struct blk_mq_hw_ctx *hctx;
2635         unsigned int i;
2636
2637         /* hctx kobj stays in hctx */
2638         queue_for_each_hw_ctx(q, hctx, i) {
2639                 if (!hctx)
2640                         continue;
2641                 kobject_put(&hctx->kobj);
2642         }
2643
2644         kfree(q->queue_hw_ctx);
2645
2646         /*
2647          * release .mq_kobj and sw queue's kobject now because
2648          * both share lifetime with request queue.
2649          */
2650         blk_mq_sysfs_deinit(q);
2651 }
2652
2653 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2654 {
2655         struct request_queue *uninit_q, *q;
2656
2657         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2658         if (!uninit_q)
2659                 return ERR_PTR(-ENOMEM);
2660
2661         q = blk_mq_init_allocated_queue(set, uninit_q);
2662         if (IS_ERR(q))
2663                 blk_cleanup_queue(uninit_q);
2664
2665         return q;
2666 }
2667 EXPORT_SYMBOL(blk_mq_init_queue);
2668
2669 /*
2670  * Helper for setting up a queue with mq ops, given queue depth, and
2671  * the passed in mq ops flags.
2672  */
2673 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
2674                                            const struct blk_mq_ops *ops,
2675                                            unsigned int queue_depth,
2676                                            unsigned int set_flags)
2677 {
2678         struct request_queue *q;
2679         int ret;
2680
2681         memset(set, 0, sizeof(*set));
2682         set->ops = ops;
2683         set->nr_hw_queues = 1;
2684         set->nr_maps = 1;
2685         set->queue_depth = queue_depth;
2686         set->numa_node = NUMA_NO_NODE;
2687         set->flags = set_flags;
2688
2689         ret = blk_mq_alloc_tag_set(set);
2690         if (ret)
2691                 return ERR_PTR(ret);
2692
2693         q = blk_mq_init_queue(set);
2694         if (IS_ERR(q)) {
2695                 blk_mq_free_tag_set(set);
2696                 return q;
2697         }
2698
2699         return q;
2700 }
2701 EXPORT_SYMBOL(blk_mq_init_sq_queue);
2702
2703 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2704 {
2705         int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2706
2707         BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
2708                            __alignof__(struct blk_mq_hw_ctx)) !=
2709                      sizeof(struct blk_mq_hw_ctx));
2710
2711         if (tag_set->flags & BLK_MQ_F_BLOCKING)
2712                 hw_ctx_size += sizeof(struct srcu_struct);
2713
2714         return hw_ctx_size;
2715 }
2716
2717 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
2718                 struct blk_mq_tag_set *set, struct request_queue *q,
2719                 int hctx_idx, int node)
2720 {
2721         struct blk_mq_hw_ctx *hctx;
2722
2723         hctx = kzalloc_node(blk_mq_hw_ctx_size(set),
2724                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2725                         node);
2726         if (!hctx)
2727                 return NULL;
2728
2729         if (!zalloc_cpumask_var_node(&hctx->cpumask,
2730                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2731                                 node)) {
2732                 kfree(hctx);
2733                 return NULL;
2734         }
2735
2736         atomic_set(&hctx->nr_active, 0);
2737         hctx->numa_node = node;
2738         hctx->queue_num = hctx_idx;
2739
2740         if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) {
2741                 free_cpumask_var(hctx->cpumask);
2742                 kfree(hctx);
2743                 return NULL;
2744         }
2745         blk_mq_hctx_kobj_init(hctx);
2746
2747         return hctx;
2748 }
2749
2750 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2751                                                 struct request_queue *q)
2752 {
2753         int i, j, end;
2754         struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2755
2756         /* protect against switching io scheduler  */
2757         mutex_lock(&q->sysfs_lock);
2758         for (i = 0; i < set->nr_hw_queues; i++) {
2759                 int node;
2760                 struct blk_mq_hw_ctx *hctx;
2761
2762                 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
2763                 /*
2764                  * If the hw queue has been mapped to another numa node,
2765                  * we need to realloc the hctx. If allocation fails, fallback
2766                  * to use the previous one.
2767                  */
2768                 if (hctxs[i] && (hctxs[i]->numa_node == node))
2769                         continue;
2770
2771                 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
2772                 if (hctx) {
2773                         if (hctxs[i]) {
2774                                 blk_mq_exit_hctx(q, set, hctxs[i], i);
2775                                 kobject_put(&hctxs[i]->kobj);
2776                         }
2777                         hctxs[i] = hctx;
2778                 } else {
2779                         if (hctxs[i])
2780                                 pr_warn("Allocate new hctx on node %d fails,\
2781                                                 fallback to previous one on node %d\n",
2782                                                 node, hctxs[i]->numa_node);
2783                         else
2784                                 break;
2785                 }
2786         }
2787         /*
2788          * Increasing nr_hw_queues fails. Free the newly allocated
2789          * hctxs and keep the previous q->nr_hw_queues.
2790          */
2791         if (i != set->nr_hw_queues) {
2792                 j = q->nr_hw_queues;
2793                 end = i;
2794         } else {
2795                 j = i;
2796                 end = q->nr_hw_queues;
2797                 q->nr_hw_queues = set->nr_hw_queues;
2798         }
2799
2800         for (; j < end; j++) {
2801                 struct blk_mq_hw_ctx *hctx = hctxs[j];
2802
2803                 if (hctx) {
2804                         if (hctx->tags)
2805                                 blk_mq_free_map_and_requests(set, j);
2806                         blk_mq_exit_hctx(q, set, hctx, j);
2807                         kobject_put(&hctx->kobj);
2808                         hctxs[j] = NULL;
2809
2810                 }
2811         }
2812         mutex_unlock(&q->sysfs_lock);
2813 }
2814
2815 /*
2816  * Maximum number of hardware queues we support. For single sets, we'll never
2817  * have more than the CPUs (software queues). For multiple sets, the tag_set
2818  * user may have set ->nr_hw_queues larger.
2819  */
2820 static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
2821 {
2822         if (set->nr_maps == 1)
2823                 return nr_cpu_ids;
2824
2825         return max(set->nr_hw_queues, nr_cpu_ids);
2826 }
2827
2828 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2829                                                   struct request_queue *q)
2830 {
2831         /* mark the queue as mq asap */
2832         q->mq_ops = set->ops;
2833
2834         q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2835                                              blk_mq_poll_stats_bkt,
2836                                              BLK_MQ_POLL_STATS_BKTS, q);
2837         if (!q->poll_cb)
2838                 goto err_exit;
2839
2840         if (blk_mq_alloc_ctxs(q))
2841                 goto err_exit;
2842
2843         /* init q->mq_kobj and sw queues' kobjects */
2844         blk_mq_sysfs_init(q);
2845
2846         q->nr_queues = nr_hw_queues(set);
2847         q->queue_hw_ctx = kcalloc_node(q->nr_queues, sizeof(*(q->queue_hw_ctx)),
2848                                                 GFP_KERNEL, set->numa_node);
2849         if (!q->queue_hw_ctx)
2850                 goto err_sys_init;
2851
2852         blk_mq_realloc_hw_ctxs(set, q);
2853         if (!q->nr_hw_queues)
2854                 goto err_hctxs;
2855
2856         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2857         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2858
2859         q->tag_set = set;
2860
2861         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2862         if (set->nr_maps > HCTX_TYPE_POLL &&
2863             set->map[HCTX_TYPE_POLL].nr_queues)
2864                 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
2865
2866         q->sg_reserved_size = INT_MAX;
2867
2868         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2869         INIT_LIST_HEAD(&q->requeue_list);
2870         spin_lock_init(&q->requeue_lock);
2871
2872         blk_queue_make_request(q, blk_mq_make_request);
2873
2874         /*
2875          * Do this after blk_queue_make_request() overrides it...
2876          */
2877         q->nr_requests = set->queue_depth;
2878
2879         /*
2880          * Default to classic polling
2881          */
2882         q->poll_nsec = BLK_MQ_POLL_CLASSIC;
2883
2884         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2885         blk_mq_add_queue_tag_set(set, q);
2886         blk_mq_map_swqueue(q);
2887
2888         if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2889                 int ret;
2890
2891                 ret = elevator_init_mq(q);
2892                 if (ret)
2893                         return ERR_PTR(ret);
2894         }
2895
2896         return q;
2897
2898 err_hctxs:
2899         kfree(q->queue_hw_ctx);
2900 err_sys_init:
2901         blk_mq_sysfs_deinit(q);
2902 err_exit:
2903         q->mq_ops = NULL;
2904         return ERR_PTR(-ENOMEM);
2905 }
2906 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2907
2908 void blk_mq_free_queue(struct request_queue *q)
2909 {
2910         struct blk_mq_tag_set   *set = q->tag_set;
2911
2912         blk_mq_del_queue_tag_set(q);
2913         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2914 }
2915
2916 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2917 {
2918         int i;
2919
2920         for (i = 0; i < set->nr_hw_queues; i++)
2921                 if (!__blk_mq_alloc_rq_map(set, i))
2922                         goto out_unwind;
2923
2924         return 0;
2925
2926 out_unwind:
2927         while (--i >= 0)
2928                 blk_mq_free_rq_map(set->tags[i]);
2929
2930         return -ENOMEM;
2931 }
2932
2933 /*
2934  * Allocate the request maps associated with this tag_set. Note that this
2935  * may reduce the depth asked for, if memory is tight. set->queue_depth
2936  * will be updated to reflect the allocated depth.
2937  */
2938 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2939 {
2940         unsigned int depth;
2941         int err;
2942
2943         depth = set->queue_depth;
2944         do {
2945                 err = __blk_mq_alloc_rq_maps(set);
2946                 if (!err)
2947                         break;
2948
2949                 set->queue_depth >>= 1;
2950                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2951                         err = -ENOMEM;
2952                         break;
2953                 }
2954         } while (set->queue_depth);
2955
2956         if (!set->queue_depth || err) {
2957                 pr_err("blk-mq: failed to allocate request map\n");
2958                 return -ENOMEM;
2959         }
2960
2961         if (depth != set->queue_depth)
2962                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2963                                                 depth, set->queue_depth);
2964
2965         return 0;
2966 }
2967
2968 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2969 {
2970         if (set->ops->map_queues && !is_kdump_kernel()) {
2971                 int i;
2972
2973                 /*
2974                  * transport .map_queues is usually done in the following
2975                  * way:
2976                  *
2977                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
2978                  *      mask = get_cpu_mask(queue)
2979                  *      for_each_cpu(cpu, mask)
2980                  *              set->map[x].mq_map[cpu] = queue;
2981                  * }
2982                  *
2983                  * When we need to remap, the table has to be cleared for
2984                  * killing stale mapping since one CPU may not be mapped
2985                  * to any hw queue.
2986                  */
2987                 for (i = 0; i < set->nr_maps; i++)
2988                         blk_mq_clear_mq_map(&set->map[i]);
2989
2990                 return set->ops->map_queues(set);
2991         } else {
2992                 BUG_ON(set->nr_maps > 1);
2993                 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2994         }
2995 }
2996
2997 /*
2998  * Alloc a tag set to be associated with one or more request queues.
2999  * May fail with EINVAL for various error conditions. May adjust the
3000  * requested depth down, if it's too large. In that case, the set
3001  * value will be stored in set->queue_depth.
3002  */
3003 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
3004 {
3005         int i, ret;
3006
3007         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
3008
3009         if (!set->nr_hw_queues)
3010                 return -EINVAL;
3011         if (!set->queue_depth)
3012                 return -EINVAL;
3013         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
3014                 return -EINVAL;
3015
3016         if (!set->ops->queue_rq)
3017                 return -EINVAL;
3018
3019         if (!set->ops->get_budget ^ !set->ops->put_budget)
3020                 return -EINVAL;
3021
3022         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
3023                 pr_info("blk-mq: reduced tag depth to %u\n",
3024                         BLK_MQ_MAX_DEPTH);
3025                 set->queue_depth = BLK_MQ_MAX_DEPTH;
3026         }
3027
3028         if (!set->nr_maps)
3029                 set->nr_maps = 1;
3030         else if (set->nr_maps > HCTX_MAX_TYPES)
3031                 return -EINVAL;
3032
3033         /*
3034          * If a crashdump is active, then we are potentially in a very
3035          * memory constrained environment. Limit us to 1 queue and
3036          * 64 tags to prevent using too much memory.
3037          */
3038         if (is_kdump_kernel()) {
3039                 set->nr_hw_queues = 1;
3040                 set->nr_maps = 1;
3041                 set->queue_depth = min(64U, set->queue_depth);
3042         }
3043         /*
3044          * There is no use for more h/w queues than cpus if we just have
3045          * a single map
3046          */
3047         if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
3048                 set->nr_hw_queues = nr_cpu_ids;
3049
3050         set->tags = kcalloc_node(nr_hw_queues(set), sizeof(struct blk_mq_tags *),
3051                                  GFP_KERNEL, set->numa_node);
3052         if (!set->tags)
3053                 return -ENOMEM;
3054
3055         ret = -ENOMEM;
3056         for (i = 0; i < set->nr_maps; i++) {
3057                 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
3058                                                   sizeof(set->map[i].mq_map[0]),
3059                                                   GFP_KERNEL, set->numa_node);
3060                 if (!set->map[i].mq_map)
3061                         goto out_free_mq_map;
3062                 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
3063         }
3064
3065         ret = blk_mq_update_queue_map(set);
3066         if (ret)
3067                 goto out_free_mq_map;
3068
3069         ret = blk_mq_alloc_rq_maps(set);
3070         if (ret)
3071                 goto out_free_mq_map;
3072
3073         mutex_init(&set->tag_list_lock);
3074         INIT_LIST_HEAD(&set->tag_list);
3075
3076         return 0;
3077
3078 out_free_mq_map:
3079         for (i = 0; i < set->nr_maps; i++) {
3080                 kfree(set->map[i].mq_map);
3081                 set->map[i].mq_map = NULL;
3082         }
3083         kfree(set->tags);
3084         set->tags = NULL;
3085         return ret;
3086 }
3087 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
3088
3089 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
3090 {
3091         int i, j;
3092
3093         for (i = 0; i < nr_hw_queues(set); i++)
3094                 blk_mq_free_map_and_requests(set, i);
3095
3096         for (j = 0; j < set->nr_maps; j++) {
3097                 kfree(set->map[j].mq_map);
3098                 set->map[j].mq_map = NULL;
3099         }
3100
3101         kfree(set->tags);
3102         set->tags = NULL;
3103 }
3104 EXPORT_SYMBOL(blk_mq_free_tag_set);
3105
3106 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
3107 {
3108         struct blk_mq_tag_set *set = q->tag_set;
3109         struct blk_mq_hw_ctx *hctx;
3110         int i, ret;
3111
3112         if (!set)
3113                 return -EINVAL;
3114
3115         if (q->nr_requests == nr)
3116                 return 0;
3117
3118         blk_mq_freeze_queue(q);
3119         blk_mq_quiesce_queue(q);
3120
3121         ret = 0;
3122         queue_for_each_hw_ctx(q, hctx, i) {
3123                 if (!hctx->tags)
3124                         continue;
3125                 /*
3126                  * If we're using an MQ scheduler, just update the scheduler
3127                  * queue depth. This is similar to what the old code would do.
3128                  */
3129                 if (!hctx->sched_tags) {
3130                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
3131                                                         false);
3132                 } else {
3133                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
3134                                                         nr, true);
3135                 }
3136                 if (ret)
3137                         break;
3138         }
3139
3140         if (!ret)
3141                 q->nr_requests = nr;
3142
3143         blk_mq_unquiesce_queue(q);
3144         blk_mq_unfreeze_queue(q);
3145
3146         return ret;
3147 }
3148
3149 /*
3150  * request_queue and elevator_type pair.
3151  * It is just used by __blk_mq_update_nr_hw_queues to cache
3152  * the elevator_type associated with a request_queue.
3153  */
3154 struct blk_mq_qe_pair {
3155         struct list_head node;
3156         struct request_queue *q;
3157         struct elevator_type *type;
3158 };
3159
3160 /*
3161  * Cache the elevator_type in qe pair list and switch the
3162  * io scheduler to 'none'
3163  */
3164 static bool blk_mq_elv_switch_none(struct list_head *head,
3165                 struct request_queue *q)
3166 {
3167         struct blk_mq_qe_pair *qe;
3168
3169         if (!q->elevator)
3170                 return true;
3171
3172         qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
3173         if (!qe)
3174                 return false;
3175
3176         INIT_LIST_HEAD(&qe->node);
3177         qe->q = q;
3178         qe->type = q->elevator->type;
3179         list_add(&qe->node, head);
3180
3181         mutex_lock(&q->sysfs_lock);
3182         /*
3183          * After elevator_switch_mq, the previous elevator_queue will be
3184          * released by elevator_release. The reference of the io scheduler
3185          * module get by elevator_get will also be put. So we need to get
3186          * a reference of the io scheduler module here to prevent it to be
3187          * removed.
3188          */
3189         __module_get(qe->type->elevator_owner);
3190         elevator_switch_mq(q, NULL);
3191         mutex_unlock(&q->sysfs_lock);
3192
3193         return true;
3194 }
3195
3196 static void blk_mq_elv_switch_back(struct list_head *head,
3197                 struct request_queue *q)
3198 {
3199         struct blk_mq_qe_pair *qe;
3200         struct elevator_type *t = NULL;
3201
3202         list_for_each_entry(qe, head, node)
3203                 if (qe->q == q) {
3204                         t = qe->type;
3205                         break;
3206                 }
3207
3208         if (!t)
3209                 return;
3210
3211         list_del(&qe->node);
3212         kfree(qe);
3213
3214         mutex_lock(&q->sysfs_lock);
3215         elevator_switch_mq(q, t);
3216         mutex_unlock(&q->sysfs_lock);
3217 }
3218
3219 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
3220                                                         int nr_hw_queues)
3221 {
3222         struct request_queue *q;
3223         LIST_HEAD(head);
3224         int prev_nr_hw_queues;
3225
3226         lockdep_assert_held(&set->tag_list_lock);
3227
3228         if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
3229                 nr_hw_queues = nr_cpu_ids;
3230         if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
3231                 return;
3232
3233         list_for_each_entry(q, &set->tag_list, tag_set_list)
3234                 blk_mq_freeze_queue(q);
3235         /*
3236          * Sync with blk_mq_queue_tag_busy_iter.
3237          */
3238         synchronize_rcu();
3239         /*
3240          * Switch IO scheduler to 'none', cleaning up the data associated
3241          * with the previous scheduler. We will switch back once we are done
3242          * updating the new sw to hw queue mappings.
3243          */
3244         list_for_each_entry(q, &set->tag_list, tag_set_list)
3245                 if (!blk_mq_elv_switch_none(&head, q))
3246                         goto switch_back;
3247
3248         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3249                 blk_mq_debugfs_unregister_hctxs(q);
3250                 blk_mq_sysfs_unregister(q);
3251         }
3252
3253         prev_nr_hw_queues = set->nr_hw_queues;
3254         set->nr_hw_queues = nr_hw_queues;
3255         blk_mq_update_queue_map(set);
3256 fallback:
3257         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3258                 blk_mq_realloc_hw_ctxs(set, q);
3259                 if (q->nr_hw_queues != set->nr_hw_queues) {
3260                         pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
3261                                         nr_hw_queues, prev_nr_hw_queues);
3262                         set->nr_hw_queues = prev_nr_hw_queues;
3263                         blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3264                         goto fallback;
3265                 }
3266                 blk_mq_map_swqueue(q);
3267         }
3268
3269         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3270                 blk_mq_sysfs_register(q);
3271                 blk_mq_debugfs_register_hctxs(q);
3272         }
3273
3274 switch_back:
3275         list_for_each_entry(q, &set->tag_list, tag_set_list)
3276                 blk_mq_elv_switch_back(&head, q);
3277
3278         list_for_each_entry(q, &set->tag_list, tag_set_list)
3279                 blk_mq_unfreeze_queue(q);
3280 }
3281
3282 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
3283 {
3284         mutex_lock(&set->tag_list_lock);
3285         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
3286         mutex_unlock(&set->tag_list_lock);
3287 }
3288 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
3289
3290 /* Enable polling stats and return whether they were already enabled. */
3291 static bool blk_poll_stats_enable(struct request_queue *q)
3292 {
3293         if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3294             blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
3295                 return true;
3296         blk_stat_add_callback(q, q->poll_cb);
3297         return false;
3298 }
3299
3300 static void blk_mq_poll_stats_start(struct request_queue *q)
3301 {
3302         /*
3303          * We don't arm the callback if polling stats are not enabled or the
3304          * callback is already active.
3305          */
3306         if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3307             blk_stat_is_active(q->poll_cb))
3308                 return;
3309
3310         blk_stat_activate_msecs(q->poll_cb, 100);
3311 }
3312
3313 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3314 {
3315         struct request_queue *q = cb->data;
3316         int bucket;
3317
3318         for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3319                 if (cb->stat[bucket].nr_samples)
3320                         q->poll_stat[bucket] = cb->stat[bucket];
3321         }
3322 }
3323
3324 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3325                                        struct blk_mq_hw_ctx *hctx,
3326                                        struct request *rq)
3327 {
3328         unsigned long ret = 0;
3329         int bucket;
3330
3331         /*
3332          * If stats collection isn't on, don't sleep but turn it on for
3333          * future users
3334          */
3335         if (!blk_poll_stats_enable(q))
3336                 return 0;
3337
3338         /*
3339          * As an optimistic guess, use half of the mean service time
3340          * for this type of request. We can (and should) make this smarter.
3341          * For instance, if the completion latencies are tight, we can
3342          * get closer than just half the mean. This is especially
3343          * important on devices where the completion latencies are longer
3344          * than ~10 usec. We do use the stats for the relevant IO size
3345          * if available which does lead to better estimates.
3346          */
3347         bucket = blk_mq_poll_stats_bkt(rq);
3348         if (bucket < 0)
3349                 return ret;
3350
3351         if (q->poll_stat[bucket].nr_samples)
3352                 ret = (q->poll_stat[bucket].mean + 1) / 2;
3353
3354         return ret;
3355 }
3356
3357 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3358                                      struct blk_mq_hw_ctx *hctx,
3359                                      struct request *rq)
3360 {
3361         struct hrtimer_sleeper hs;
3362         enum hrtimer_mode mode;
3363         unsigned int nsecs;
3364         ktime_t kt;
3365
3366         if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
3367                 return false;
3368
3369         /*
3370          * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
3371          *
3372          *  0:  use half of prev avg
3373          * >0:  use this specific value
3374          */
3375         if (q->poll_nsec > 0)
3376                 nsecs = q->poll_nsec;
3377         else
3378                 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
3379
3380         if (!nsecs)
3381                 return false;
3382
3383         rq->rq_flags |= RQF_MQ_POLL_SLEPT;
3384
3385         /*
3386          * This will be replaced with the stats tracking code, using
3387          * 'avg_completion_time / 2' as the pre-sleep target.
3388          */
3389         kt = nsecs;
3390
3391         mode = HRTIMER_MODE_REL;
3392         hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
3393         hrtimer_set_expires(&hs.timer, kt);
3394
3395         hrtimer_init_sleeper(&hs, current);
3396         do {
3397                 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3398                         break;
3399                 set_current_state(TASK_UNINTERRUPTIBLE);
3400                 hrtimer_start_expires(&hs.timer, mode);
3401                 if (hs.task)
3402                         io_schedule();
3403                 hrtimer_cancel(&hs.timer);
3404                 mode = HRTIMER_MODE_ABS;
3405         } while (hs.task && !signal_pending(current));
3406
3407         __set_current_state(TASK_RUNNING);
3408         destroy_hrtimer_on_stack(&hs.timer);
3409         return true;
3410 }
3411
3412 static bool blk_mq_poll_hybrid(struct request_queue *q,
3413                                struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
3414 {
3415         struct request *rq;
3416
3417         if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
3418                 return false;
3419
3420         if (!blk_qc_t_is_internal(cookie))
3421                 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3422         else {
3423                 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3424                 /*
3425                  * With scheduling, if the request has completed, we'll
3426                  * get a NULL return here, as we clear the sched tag when
3427                  * that happens. The request still remains valid, like always,
3428                  * so we should be safe with just the NULL check.
3429                  */
3430                 if (!rq)
3431                         return false;
3432         }
3433
3434         return blk_mq_poll_hybrid_sleep(q, hctx, rq);
3435 }
3436
3437 /**
3438  * blk_poll - poll for IO completions
3439  * @q:  the queue
3440  * @cookie: cookie passed back at IO submission time
3441  * @spin: whether to spin for completions
3442  *
3443  * Description:
3444  *    Poll for completions on the passed in queue. Returns number of
3445  *    completed entries found. If @spin is true, then blk_poll will continue
3446  *    looping until at least one completion is found, unless the task is
3447  *    otherwise marked running (or we need to reschedule).
3448  */
3449 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
3450 {
3451         struct blk_mq_hw_ctx *hctx;
3452         long state;
3453
3454         if (!blk_qc_t_valid(cookie) ||
3455             !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
3456                 return 0;
3457
3458         if (current->plug)
3459                 blk_flush_plug_list(current->plug, false);
3460
3461         hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3462
3463         /*
3464          * If we sleep, have the caller restart the poll loop to reset
3465          * the state. Like for the other success return cases, the
3466          * caller is responsible for checking if the IO completed. If
3467          * the IO isn't complete, we'll get called again and will go
3468          * straight to the busy poll loop.
3469          */
3470         if (blk_mq_poll_hybrid(q, hctx, cookie))
3471                 return 1;
3472
3473         hctx->poll_considered++;
3474
3475         state = current->state;
3476         do {
3477                 int ret;
3478
3479                 hctx->poll_invoked++;
3480
3481                 ret = q->mq_ops->poll(hctx);
3482                 if (ret > 0) {
3483                         hctx->poll_success++;
3484                         __set_current_state(TASK_RUNNING);
3485                         return ret;
3486                 }
3487
3488                 if (signal_pending_state(state, current))
3489                         __set_current_state(TASK_RUNNING);
3490
3491                 if (current->state == TASK_RUNNING)
3492                         return 1;
3493                 if (ret < 0 || !spin)
3494                         break;
3495                 cpu_relax();
3496         } while (!need_resched());
3497
3498         __set_current_state(TASK_RUNNING);
3499         return 0;
3500 }
3501 EXPORT_SYMBOL_GPL(blk_poll);
3502
3503 unsigned int blk_mq_rq_cpu(struct request *rq)
3504 {
3505         return rq->mq_ctx->cpu;
3506 }
3507 EXPORT_SYMBOL(blk_mq_rq_cpu);
3508
3509 static int __init blk_mq_init(void)
3510 {
3511         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3512                                 blk_mq_hctx_notify_dead);
3513         return 0;
3514 }
3515 subsys_initcall(blk_mq_init);