Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
[sfrench/cifs-2.6.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/sched/topology.h>
24 #include <linux/sched/signal.h>
25 #include <linux/delay.h>
26 #include <linux/crash_dump.h>
27 #include <linux/prefetch.h>
28
29 #include <trace/events/block.h>
30
31 #include <linux/blk-mq.h>
32 #include "blk.h"
33 #include "blk-mq.h"
34 #include "blk-mq-debugfs.h"
35 #include "blk-mq-tag.h"
36 #include "blk-stat.h"
37 #include "blk-wbt.h"
38 #include "blk-mq-sched.h"
39
40 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
41 static void blk_mq_poll_stats_start(struct request_queue *q);
42 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
43
44 static int blk_mq_poll_stats_bkt(const struct request *rq)
45 {
46         int ddir, bytes, bucket;
47
48         ddir = rq_data_dir(rq);
49         bytes = blk_rq_bytes(rq);
50
51         bucket = ddir + 2*(ilog2(bytes) - 9);
52
53         if (bucket < 0)
54                 return -1;
55         else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
56                 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
57
58         return bucket;
59 }
60
61 /*
62  * Check if any of the ctx's have pending work in this hardware queue
63  */
64 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
65 {
66         return !list_empty_careful(&hctx->dispatch) ||
67                 sbitmap_any_bit_set(&hctx->ctx_map) ||
68                         blk_mq_sched_has_work(hctx);
69 }
70
71 /*
72  * Mark this ctx as having pending work in this hardware queue
73  */
74 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
75                                      struct blk_mq_ctx *ctx)
76 {
77         if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
78                 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
79 }
80
81 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
82                                       struct blk_mq_ctx *ctx)
83 {
84         sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
85 }
86
87 struct mq_inflight {
88         struct hd_struct *part;
89         unsigned int *inflight;
90 };
91
92 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
93                                   struct request *rq, void *priv,
94                                   bool reserved)
95 {
96         struct mq_inflight *mi = priv;
97
98         if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) {
99                 /*
100                  * index[0] counts the specific partition that was asked
101                  * for. index[1] counts the ones that are active on the
102                  * whole device, so increment that if mi->part is indeed
103                  * a partition, and not a whole device.
104                  */
105                 if (rq->part == mi->part)
106                         mi->inflight[0]++;
107                 if (mi->part->partno)
108                         mi->inflight[1]++;
109         }
110 }
111
112 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
113                       unsigned int inflight[2])
114 {
115         struct mq_inflight mi = { .part = part, .inflight = inflight, };
116
117         inflight[0] = inflight[1] = 0;
118         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
119 }
120
121 void blk_freeze_queue_start(struct request_queue *q)
122 {
123         int freeze_depth;
124
125         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
126         if (freeze_depth == 1) {
127                 percpu_ref_kill(&q->q_usage_counter);
128                 if (q->mq_ops)
129                         blk_mq_run_hw_queues(q, false);
130         }
131 }
132 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
133
134 void blk_mq_freeze_queue_wait(struct request_queue *q)
135 {
136         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
137 }
138 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
139
140 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
141                                      unsigned long timeout)
142 {
143         return wait_event_timeout(q->mq_freeze_wq,
144                                         percpu_ref_is_zero(&q->q_usage_counter),
145                                         timeout);
146 }
147 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
148
149 /*
150  * Guarantee no request is in use, so we can change any data structure of
151  * the queue afterward.
152  */
153 void blk_freeze_queue(struct request_queue *q)
154 {
155         /*
156          * In the !blk_mq case we are only calling this to kill the
157          * q_usage_counter, otherwise this increases the freeze depth
158          * and waits for it to return to zero.  For this reason there is
159          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
160          * exported to drivers as the only user for unfreeze is blk_mq.
161          */
162         blk_freeze_queue_start(q);
163         if (!q->mq_ops)
164                 blk_drain_queue(q);
165         blk_mq_freeze_queue_wait(q);
166 }
167
168 void blk_mq_freeze_queue(struct request_queue *q)
169 {
170         /*
171          * ...just an alias to keep freeze and unfreeze actions balanced
172          * in the blk_mq_* namespace
173          */
174         blk_freeze_queue(q);
175 }
176 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
177
178 void blk_mq_unfreeze_queue(struct request_queue *q)
179 {
180         int freeze_depth;
181
182         freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
183         WARN_ON_ONCE(freeze_depth < 0);
184         if (!freeze_depth) {
185                 percpu_ref_reinit(&q->q_usage_counter);
186                 wake_up_all(&q->mq_freeze_wq);
187         }
188 }
189 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
190
191 /*
192  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
193  * mpt3sas driver such that this function can be removed.
194  */
195 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
196 {
197         unsigned long flags;
198
199         spin_lock_irqsave(q->queue_lock, flags);
200         queue_flag_set(QUEUE_FLAG_QUIESCED, q);
201         spin_unlock_irqrestore(q->queue_lock, flags);
202 }
203 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
204
205 /**
206  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
207  * @q: request queue.
208  *
209  * Note: this function does not prevent that the struct request end_io()
210  * callback function is invoked. Once this function is returned, we make
211  * sure no dispatch can happen until the queue is unquiesced via
212  * blk_mq_unquiesce_queue().
213  */
214 void blk_mq_quiesce_queue(struct request_queue *q)
215 {
216         struct blk_mq_hw_ctx *hctx;
217         unsigned int i;
218         bool rcu = false;
219
220         blk_mq_quiesce_queue_nowait(q);
221
222         queue_for_each_hw_ctx(q, hctx, i) {
223                 if (hctx->flags & BLK_MQ_F_BLOCKING)
224                         synchronize_srcu(hctx->srcu);
225                 else
226                         rcu = true;
227         }
228         if (rcu)
229                 synchronize_rcu();
230 }
231 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
232
233 /*
234  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
235  * @q: request queue.
236  *
237  * This function recovers queue into the state before quiescing
238  * which is done by blk_mq_quiesce_queue.
239  */
240 void blk_mq_unquiesce_queue(struct request_queue *q)
241 {
242         unsigned long flags;
243
244         spin_lock_irqsave(q->queue_lock, flags);
245         queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
246         spin_unlock_irqrestore(q->queue_lock, flags);
247
248         /* dispatch requests which are inserted during quiescing */
249         blk_mq_run_hw_queues(q, true);
250 }
251 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
252
253 void blk_mq_wake_waiters(struct request_queue *q)
254 {
255         struct blk_mq_hw_ctx *hctx;
256         unsigned int i;
257
258         queue_for_each_hw_ctx(q, hctx, i)
259                 if (blk_mq_hw_queue_mapped(hctx))
260                         blk_mq_tag_wakeup_all(hctx->tags, true);
261 }
262
263 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
264 {
265         return blk_mq_has_free_tags(hctx->tags);
266 }
267 EXPORT_SYMBOL(blk_mq_can_queue);
268
269 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
270                 unsigned int tag, unsigned int op)
271 {
272         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
273         struct request *rq = tags->static_rqs[tag];
274         req_flags_t rq_flags = 0;
275
276         if (data->flags & BLK_MQ_REQ_INTERNAL) {
277                 rq->tag = -1;
278                 rq->internal_tag = tag;
279         } else {
280                 if (blk_mq_tag_busy(data->hctx)) {
281                         rq_flags = RQF_MQ_INFLIGHT;
282                         atomic_inc(&data->hctx->nr_active);
283                 }
284                 rq->tag = tag;
285                 rq->internal_tag = -1;
286                 data->hctx->tags->rqs[rq->tag] = rq;
287         }
288
289         /* csd/requeue_work/fifo_time is initialized before use */
290         rq->q = data->q;
291         rq->mq_ctx = data->ctx;
292         rq->rq_flags = rq_flags;
293         rq->cpu = -1;
294         rq->cmd_flags = op;
295         if (data->flags & BLK_MQ_REQ_PREEMPT)
296                 rq->rq_flags |= RQF_PREEMPT;
297         if (blk_queue_io_stat(data->q))
298                 rq->rq_flags |= RQF_IO_STAT;
299         INIT_LIST_HEAD(&rq->queuelist);
300         INIT_HLIST_NODE(&rq->hash);
301         RB_CLEAR_NODE(&rq->rb_node);
302         rq->rq_disk = NULL;
303         rq->part = NULL;
304         rq->start_time = jiffies;
305         rq->nr_phys_segments = 0;
306 #if defined(CONFIG_BLK_DEV_INTEGRITY)
307         rq->nr_integrity_segments = 0;
308 #endif
309         rq->special = NULL;
310         /* tag was already set */
311         rq->extra_len = 0;
312         rq->__deadline = 0;
313
314         INIT_LIST_HEAD(&rq->timeout_list);
315         rq->timeout = 0;
316
317         rq->end_io = NULL;
318         rq->end_io_data = NULL;
319         rq->next_rq = NULL;
320
321 #ifdef CONFIG_BLK_CGROUP
322         rq->rl = NULL;
323         set_start_time_ns(rq);
324         rq->io_start_time_ns = 0;
325 #endif
326
327         data->ctx->rq_dispatched[op_is_sync(op)]++;
328         return rq;
329 }
330
331 static struct request *blk_mq_get_request(struct request_queue *q,
332                 struct bio *bio, unsigned int op,
333                 struct blk_mq_alloc_data *data)
334 {
335         struct elevator_queue *e = q->elevator;
336         struct request *rq;
337         unsigned int tag;
338         bool put_ctx_on_error = false;
339
340         blk_queue_enter_live(q);
341         data->q = q;
342         if (likely(!data->ctx)) {
343                 data->ctx = blk_mq_get_ctx(q);
344                 put_ctx_on_error = true;
345         }
346         if (likely(!data->hctx))
347                 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
348         if (op & REQ_NOWAIT)
349                 data->flags |= BLK_MQ_REQ_NOWAIT;
350
351         if (e) {
352                 data->flags |= BLK_MQ_REQ_INTERNAL;
353
354                 /*
355                  * Flush requests are special and go directly to the
356                  * dispatch list.
357                  */
358                 if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
359                         e->type->ops.mq.limit_depth(op, data);
360         }
361
362         tag = blk_mq_get_tag(data);
363         if (tag == BLK_MQ_TAG_FAIL) {
364                 if (put_ctx_on_error) {
365                         blk_mq_put_ctx(data->ctx);
366                         data->ctx = NULL;
367                 }
368                 blk_queue_exit(q);
369                 return NULL;
370         }
371
372         rq = blk_mq_rq_ctx_init(data, tag, op);
373         if (!op_is_flush(op)) {
374                 rq->elv.icq = NULL;
375                 if (e && e->type->ops.mq.prepare_request) {
376                         if (e->type->icq_cache && rq_ioc(bio))
377                                 blk_mq_sched_assign_ioc(rq, bio);
378
379                         e->type->ops.mq.prepare_request(rq, bio);
380                         rq->rq_flags |= RQF_ELVPRIV;
381                 }
382         }
383         data->hctx->queued++;
384         return rq;
385 }
386
387 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
388                 blk_mq_req_flags_t flags)
389 {
390         struct blk_mq_alloc_data alloc_data = { .flags = flags };
391         struct request *rq;
392         int ret;
393
394         ret = blk_queue_enter(q, flags);
395         if (ret)
396                 return ERR_PTR(ret);
397
398         rq = blk_mq_get_request(q, NULL, op, &alloc_data);
399         blk_queue_exit(q);
400
401         if (!rq)
402                 return ERR_PTR(-EWOULDBLOCK);
403
404         blk_mq_put_ctx(alloc_data.ctx);
405
406         rq->__data_len = 0;
407         rq->__sector = (sector_t) -1;
408         rq->bio = rq->biotail = NULL;
409         return rq;
410 }
411 EXPORT_SYMBOL(blk_mq_alloc_request);
412
413 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
414         unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
415 {
416         struct blk_mq_alloc_data alloc_data = { .flags = flags };
417         struct request *rq;
418         unsigned int cpu;
419         int ret;
420
421         /*
422          * If the tag allocator sleeps we could get an allocation for a
423          * different hardware context.  No need to complicate the low level
424          * allocator for this for the rare use case of a command tied to
425          * a specific queue.
426          */
427         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
428                 return ERR_PTR(-EINVAL);
429
430         if (hctx_idx >= q->nr_hw_queues)
431                 return ERR_PTR(-EIO);
432
433         ret = blk_queue_enter(q, flags);
434         if (ret)
435                 return ERR_PTR(ret);
436
437         /*
438          * Check if the hardware context is actually mapped to anything.
439          * If not tell the caller that it should skip this queue.
440          */
441         alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
442         if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
443                 blk_queue_exit(q);
444                 return ERR_PTR(-EXDEV);
445         }
446         cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
447         alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
448
449         rq = blk_mq_get_request(q, NULL, op, &alloc_data);
450         blk_queue_exit(q);
451
452         if (!rq)
453                 return ERR_PTR(-EWOULDBLOCK);
454
455         return rq;
456 }
457 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
458
459 void blk_mq_free_request(struct request *rq)
460 {
461         struct request_queue *q = rq->q;
462         struct elevator_queue *e = q->elevator;
463         struct blk_mq_ctx *ctx = rq->mq_ctx;
464         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
465         const int sched_tag = rq->internal_tag;
466
467         if (rq->rq_flags & RQF_ELVPRIV) {
468                 if (e && e->type->ops.mq.finish_request)
469                         e->type->ops.mq.finish_request(rq);
470                 if (rq->elv.icq) {
471                         put_io_context(rq->elv.icq->ioc);
472                         rq->elv.icq = NULL;
473                 }
474         }
475
476         ctx->rq_completed[rq_is_sync(rq)]++;
477         if (rq->rq_flags & RQF_MQ_INFLIGHT)
478                 atomic_dec(&hctx->nr_active);
479
480         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
481                 laptop_io_completion(q->backing_dev_info);
482
483         wbt_done(q->rq_wb, &rq->issue_stat);
484
485         if (blk_rq_rl(rq))
486                 blk_put_rl(blk_rq_rl(rq));
487
488         blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
489         if (rq->tag != -1)
490                 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
491         if (sched_tag != -1)
492                 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
493         blk_mq_sched_restart(hctx);
494         blk_queue_exit(q);
495 }
496 EXPORT_SYMBOL_GPL(blk_mq_free_request);
497
498 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
499 {
500         blk_account_io_done(rq);
501
502         if (rq->end_io) {
503                 wbt_done(rq->q->rq_wb, &rq->issue_stat);
504                 rq->end_io(rq, error);
505         } else {
506                 if (unlikely(blk_bidi_rq(rq)))
507                         blk_mq_free_request(rq->next_rq);
508                 blk_mq_free_request(rq);
509         }
510 }
511 EXPORT_SYMBOL(__blk_mq_end_request);
512
513 void blk_mq_end_request(struct request *rq, blk_status_t error)
514 {
515         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
516                 BUG();
517         __blk_mq_end_request(rq, error);
518 }
519 EXPORT_SYMBOL(blk_mq_end_request);
520
521 static void __blk_mq_complete_request_remote(void *data)
522 {
523         struct request *rq = data;
524
525         rq->q->softirq_done_fn(rq);
526 }
527
528 static void __blk_mq_complete_request(struct request *rq)
529 {
530         struct blk_mq_ctx *ctx = rq->mq_ctx;
531         bool shared = false;
532         int cpu;
533
534         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT);
535         blk_mq_rq_update_state(rq, MQ_RQ_COMPLETE);
536
537         if (rq->internal_tag != -1)
538                 blk_mq_sched_completed_request(rq);
539         if (rq->rq_flags & RQF_STATS) {
540                 blk_mq_poll_stats_start(rq->q);
541                 blk_stat_add(rq);
542         }
543
544         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
545                 rq->q->softirq_done_fn(rq);
546                 return;
547         }
548
549         cpu = get_cpu();
550         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
551                 shared = cpus_share_cache(cpu, ctx->cpu);
552
553         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
554                 rq->csd.func = __blk_mq_complete_request_remote;
555                 rq->csd.info = rq;
556                 rq->csd.flags = 0;
557                 smp_call_function_single_async(ctx->cpu, &rq->csd);
558         } else {
559                 rq->q->softirq_done_fn(rq);
560         }
561         put_cpu();
562 }
563
564 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
565         __releases(hctx->srcu)
566 {
567         if (!(hctx->flags & BLK_MQ_F_BLOCKING))
568                 rcu_read_unlock();
569         else
570                 srcu_read_unlock(hctx->srcu, srcu_idx);
571 }
572
573 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
574         __acquires(hctx->srcu)
575 {
576         if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
577                 /* shut up gcc false positive */
578                 *srcu_idx = 0;
579                 rcu_read_lock();
580         } else
581                 *srcu_idx = srcu_read_lock(hctx->srcu);
582 }
583
584 static void blk_mq_rq_update_aborted_gstate(struct request *rq, u64 gstate)
585 {
586         unsigned long flags;
587
588         /*
589          * blk_mq_rq_aborted_gstate() is used from the completion path and
590          * can thus be called from irq context.  u64_stats_fetch in the
591          * middle of update on the same CPU leads to lockup.  Disable irq
592          * while updating.
593          */
594         local_irq_save(flags);
595         u64_stats_update_begin(&rq->aborted_gstate_sync);
596         rq->aborted_gstate = gstate;
597         u64_stats_update_end(&rq->aborted_gstate_sync);
598         local_irq_restore(flags);
599 }
600
601 static u64 blk_mq_rq_aborted_gstate(struct request *rq)
602 {
603         unsigned int start;
604         u64 aborted_gstate;
605
606         do {
607                 start = u64_stats_fetch_begin(&rq->aborted_gstate_sync);
608                 aborted_gstate = rq->aborted_gstate;
609         } while (u64_stats_fetch_retry(&rq->aborted_gstate_sync, start));
610
611         return aborted_gstate;
612 }
613
614 /**
615  * blk_mq_complete_request - end I/O on a request
616  * @rq:         the request being processed
617  *
618  * Description:
619  *      Ends all I/O on a request. It does not handle partial completions.
620  *      The actual completion happens out-of-order, through a IPI handler.
621  **/
622 void blk_mq_complete_request(struct request *rq)
623 {
624         struct request_queue *q = rq->q;
625         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
626         int srcu_idx;
627
628         if (unlikely(blk_should_fake_timeout(q)))
629                 return;
630
631         /*
632          * If @rq->aborted_gstate equals the current instance, timeout is
633          * claiming @rq and we lost.  This is synchronized through
634          * hctx_lock().  See blk_mq_timeout_work() for details.
635          *
636          * Completion path never blocks and we can directly use RCU here
637          * instead of hctx_lock() which can be either RCU or SRCU.
638          * However, that would complicate paths which want to synchronize
639          * against us.  Let stay in sync with the issue path so that
640          * hctx_lock() covers both issue and completion paths.
641          */
642         hctx_lock(hctx, &srcu_idx);
643         if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
644                 __blk_mq_complete_request(rq);
645         hctx_unlock(hctx, srcu_idx);
646 }
647 EXPORT_SYMBOL(blk_mq_complete_request);
648
649 int blk_mq_request_started(struct request *rq)
650 {
651         return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
652 }
653 EXPORT_SYMBOL_GPL(blk_mq_request_started);
654
655 void blk_mq_start_request(struct request *rq)
656 {
657         struct request_queue *q = rq->q;
658
659         blk_mq_sched_started_request(rq);
660
661         trace_block_rq_issue(q, rq);
662
663         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
664                 blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
665                 rq->rq_flags |= RQF_STATS;
666                 wbt_issue(q->rq_wb, &rq->issue_stat);
667         }
668
669         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
670
671         /*
672          * Mark @rq in-flight which also advances the generation number,
673          * and register for timeout.  Protect with a seqcount to allow the
674          * timeout path to read both @rq->gstate and @rq->deadline
675          * coherently.
676          *
677          * This is the only place where a request is marked in-flight.  If
678          * the timeout path reads an in-flight @rq->gstate, the
679          * @rq->deadline it reads together under @rq->gstate_seq is
680          * guaranteed to be the matching one.
681          */
682         preempt_disable();
683         write_seqcount_begin(&rq->gstate_seq);
684
685         blk_mq_rq_update_state(rq, MQ_RQ_IN_FLIGHT);
686         blk_add_timer(rq);
687
688         write_seqcount_end(&rq->gstate_seq);
689         preempt_enable();
690
691         if (q->dma_drain_size && blk_rq_bytes(rq)) {
692                 /*
693                  * Make sure space for the drain appears.  We know we can do
694                  * this because max_hw_segments has been adjusted to be one
695                  * fewer than the device can handle.
696                  */
697                 rq->nr_phys_segments++;
698         }
699 }
700 EXPORT_SYMBOL(blk_mq_start_request);
701
702 /*
703  * When we reach here because queue is busy, it's safe to change the state
704  * to IDLE without checking @rq->aborted_gstate because we should still be
705  * holding the RCU read lock and thus protected against timeout.
706  */
707 static void __blk_mq_requeue_request(struct request *rq)
708 {
709         struct request_queue *q = rq->q;
710
711         blk_mq_put_driver_tag(rq);
712
713         trace_block_rq_requeue(q, rq);
714         wbt_requeue(q->rq_wb, &rq->issue_stat);
715
716         if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) {
717                 blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
718                 if (q->dma_drain_size && blk_rq_bytes(rq))
719                         rq->nr_phys_segments--;
720         }
721 }
722
723 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
724 {
725         __blk_mq_requeue_request(rq);
726
727         /* this request will be re-inserted to io scheduler queue */
728         blk_mq_sched_requeue_request(rq);
729
730         BUG_ON(blk_queued_rq(rq));
731         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
732 }
733 EXPORT_SYMBOL(blk_mq_requeue_request);
734
735 static void blk_mq_requeue_work(struct work_struct *work)
736 {
737         struct request_queue *q =
738                 container_of(work, struct request_queue, requeue_work.work);
739         LIST_HEAD(rq_list);
740         struct request *rq, *next;
741
742         spin_lock_irq(&q->requeue_lock);
743         list_splice_init(&q->requeue_list, &rq_list);
744         spin_unlock_irq(&q->requeue_lock);
745
746         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
747                 if (!(rq->rq_flags & RQF_SOFTBARRIER))
748                         continue;
749
750                 rq->rq_flags &= ~RQF_SOFTBARRIER;
751                 list_del_init(&rq->queuelist);
752                 blk_mq_sched_insert_request(rq, true, false, false);
753         }
754
755         while (!list_empty(&rq_list)) {
756                 rq = list_entry(rq_list.next, struct request, queuelist);
757                 list_del_init(&rq->queuelist);
758                 blk_mq_sched_insert_request(rq, false, false, false);
759         }
760
761         blk_mq_run_hw_queues(q, false);
762 }
763
764 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
765                                 bool kick_requeue_list)
766 {
767         struct request_queue *q = rq->q;
768         unsigned long flags;
769
770         /*
771          * We abuse this flag that is otherwise used by the I/O scheduler to
772          * request head insertion from the workqueue.
773          */
774         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
775
776         spin_lock_irqsave(&q->requeue_lock, flags);
777         if (at_head) {
778                 rq->rq_flags |= RQF_SOFTBARRIER;
779                 list_add(&rq->queuelist, &q->requeue_list);
780         } else {
781                 list_add_tail(&rq->queuelist, &q->requeue_list);
782         }
783         spin_unlock_irqrestore(&q->requeue_lock, flags);
784
785         if (kick_requeue_list)
786                 blk_mq_kick_requeue_list(q);
787 }
788 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
789
790 void blk_mq_kick_requeue_list(struct request_queue *q)
791 {
792         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
793 }
794 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
795
796 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
797                                     unsigned long msecs)
798 {
799         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
800                                     msecs_to_jiffies(msecs));
801 }
802 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
803
804 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
805 {
806         if (tag < tags->nr_tags) {
807                 prefetch(tags->rqs[tag]);
808                 return tags->rqs[tag];
809         }
810
811         return NULL;
812 }
813 EXPORT_SYMBOL(blk_mq_tag_to_rq);
814
815 struct blk_mq_timeout_data {
816         unsigned long next;
817         unsigned int next_set;
818         unsigned int nr_expired;
819 };
820
821 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
822 {
823         const struct blk_mq_ops *ops = req->q->mq_ops;
824         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
825
826         req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
827
828         if (ops->timeout)
829                 ret = ops->timeout(req, reserved);
830
831         switch (ret) {
832         case BLK_EH_HANDLED:
833                 __blk_mq_complete_request(req);
834                 break;
835         case BLK_EH_RESET_TIMER:
836                 /*
837                  * As nothing prevents from completion happening while
838                  * ->aborted_gstate is set, this may lead to ignored
839                  * completions and further spurious timeouts.
840                  */
841                 blk_mq_rq_update_aborted_gstate(req, 0);
842                 blk_add_timer(req);
843                 break;
844         case BLK_EH_NOT_HANDLED:
845                 break;
846         default:
847                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
848                 break;
849         }
850 }
851
852 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
853                 struct request *rq, void *priv, bool reserved)
854 {
855         struct blk_mq_timeout_data *data = priv;
856         unsigned long gstate, deadline;
857         int start;
858
859         might_sleep();
860
861         if (rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED)
862                 return;
863
864         /* read coherent snapshots of @rq->state_gen and @rq->deadline */
865         while (true) {
866                 start = read_seqcount_begin(&rq->gstate_seq);
867                 gstate = READ_ONCE(rq->gstate);
868                 deadline = blk_rq_deadline(rq);
869                 if (!read_seqcount_retry(&rq->gstate_seq, start))
870                         break;
871                 cond_resched();
872         }
873
874         /* if in-flight && overdue, mark for abortion */
875         if ((gstate & MQ_RQ_STATE_MASK) == MQ_RQ_IN_FLIGHT &&
876             time_after_eq(jiffies, deadline)) {
877                 blk_mq_rq_update_aborted_gstate(rq, gstate);
878                 data->nr_expired++;
879                 hctx->nr_expired++;
880         } else if (!data->next_set || time_after(data->next, deadline)) {
881                 data->next = deadline;
882                 data->next_set = 1;
883         }
884 }
885
886 static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx,
887                 struct request *rq, void *priv, bool reserved)
888 {
889         /*
890          * We marked @rq->aborted_gstate and waited for RCU.  If there were
891          * completions that we lost to, they would have finished and
892          * updated @rq->gstate by now; otherwise, the completion path is
893          * now guaranteed to see @rq->aborted_gstate and yield.  If
894          * @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
895          */
896         if (!(rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) &&
897             READ_ONCE(rq->gstate) == rq->aborted_gstate)
898                 blk_mq_rq_timed_out(rq, reserved);
899 }
900
901 static void blk_mq_timeout_work(struct work_struct *work)
902 {
903         struct request_queue *q =
904                 container_of(work, struct request_queue, timeout_work);
905         struct blk_mq_timeout_data data = {
906                 .next           = 0,
907                 .next_set       = 0,
908                 .nr_expired     = 0,
909         };
910         struct blk_mq_hw_ctx *hctx;
911         int i;
912
913         /* A deadlock might occur if a request is stuck requiring a
914          * timeout at the same time a queue freeze is waiting
915          * completion, since the timeout code would not be able to
916          * acquire the queue reference here.
917          *
918          * That's why we don't use blk_queue_enter here; instead, we use
919          * percpu_ref_tryget directly, because we need to be able to
920          * obtain a reference even in the short window between the queue
921          * starting to freeze, by dropping the first reference in
922          * blk_freeze_queue_start, and the moment the last request is
923          * consumed, marked by the instant q_usage_counter reaches
924          * zero.
925          */
926         if (!percpu_ref_tryget(&q->q_usage_counter))
927                 return;
928
929         /* scan for the expired ones and set their ->aborted_gstate */
930         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
931
932         if (data.nr_expired) {
933                 bool has_rcu = false;
934
935                 /*
936                  * Wait till everyone sees ->aborted_gstate.  The
937                  * sequential waits for SRCUs aren't ideal.  If this ever
938                  * becomes a problem, we can add per-hw_ctx rcu_head and
939                  * wait in parallel.
940                  */
941                 queue_for_each_hw_ctx(q, hctx, i) {
942                         if (!hctx->nr_expired)
943                                 continue;
944
945                         if (!(hctx->flags & BLK_MQ_F_BLOCKING))
946                                 has_rcu = true;
947                         else
948                                 synchronize_srcu(hctx->srcu);
949
950                         hctx->nr_expired = 0;
951                 }
952                 if (has_rcu)
953                         synchronize_rcu();
954
955                 /* terminate the ones we won */
956                 blk_mq_queue_tag_busy_iter(q, blk_mq_terminate_expired, NULL);
957         }
958
959         if (data.next_set) {
960                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
961                 mod_timer(&q->timeout, data.next);
962         } else {
963                 /*
964                  * Request timeouts are handled as a forward rolling timer. If
965                  * we end up here it means that no requests are pending and
966                  * also that no request has been pending for a while. Mark
967                  * each hctx as idle.
968                  */
969                 queue_for_each_hw_ctx(q, hctx, i) {
970                         /* the hctx may be unmapped, so check it here */
971                         if (blk_mq_hw_queue_mapped(hctx))
972                                 blk_mq_tag_idle(hctx);
973                 }
974         }
975         blk_queue_exit(q);
976 }
977
978 struct flush_busy_ctx_data {
979         struct blk_mq_hw_ctx *hctx;
980         struct list_head *list;
981 };
982
983 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
984 {
985         struct flush_busy_ctx_data *flush_data = data;
986         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
987         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
988
989         sbitmap_clear_bit(sb, bitnr);
990         spin_lock(&ctx->lock);
991         list_splice_tail_init(&ctx->rq_list, flush_data->list);
992         spin_unlock(&ctx->lock);
993         return true;
994 }
995
996 /*
997  * Process software queues that have been marked busy, splicing them
998  * to the for-dispatch
999  */
1000 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1001 {
1002         struct flush_busy_ctx_data data = {
1003                 .hctx = hctx,
1004                 .list = list,
1005         };
1006
1007         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1008 }
1009 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1010
1011 struct dispatch_rq_data {
1012         struct blk_mq_hw_ctx *hctx;
1013         struct request *rq;
1014 };
1015
1016 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1017                 void *data)
1018 {
1019         struct dispatch_rq_data *dispatch_data = data;
1020         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1021         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1022
1023         spin_lock(&ctx->lock);
1024         if (unlikely(!list_empty(&ctx->rq_list))) {
1025                 dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
1026                 list_del_init(&dispatch_data->rq->queuelist);
1027                 if (list_empty(&ctx->rq_list))
1028                         sbitmap_clear_bit(sb, bitnr);
1029         }
1030         spin_unlock(&ctx->lock);
1031
1032         return !dispatch_data->rq;
1033 }
1034
1035 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1036                                         struct blk_mq_ctx *start)
1037 {
1038         unsigned off = start ? start->index_hw : 0;
1039         struct dispatch_rq_data data = {
1040                 .hctx = hctx,
1041                 .rq   = NULL,
1042         };
1043
1044         __sbitmap_for_each_set(&hctx->ctx_map, off,
1045                                dispatch_rq_from_ctx, &data);
1046
1047         return data.rq;
1048 }
1049
1050 static inline unsigned int queued_to_index(unsigned int queued)
1051 {
1052         if (!queued)
1053                 return 0;
1054
1055         return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1056 }
1057
1058 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
1059                            bool wait)
1060 {
1061         struct blk_mq_alloc_data data = {
1062                 .q = rq->q,
1063                 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
1064                 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
1065         };
1066
1067         might_sleep_if(wait);
1068
1069         if (rq->tag != -1)
1070                 goto done;
1071
1072         if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
1073                 data.flags |= BLK_MQ_REQ_RESERVED;
1074
1075         rq->tag = blk_mq_get_tag(&data);
1076         if (rq->tag >= 0) {
1077                 if (blk_mq_tag_busy(data.hctx)) {
1078                         rq->rq_flags |= RQF_MQ_INFLIGHT;
1079                         atomic_inc(&data.hctx->nr_active);
1080                 }
1081                 data.hctx->tags->rqs[rq->tag] = rq;
1082         }
1083
1084 done:
1085         if (hctx)
1086                 *hctx = data.hctx;
1087         return rq->tag != -1;
1088 }
1089
1090 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1091                                 int flags, void *key)
1092 {
1093         struct blk_mq_hw_ctx *hctx;
1094
1095         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1096
1097         list_del_init(&wait->entry);
1098         blk_mq_run_hw_queue(hctx, true);
1099         return 1;
1100 }
1101
1102 /*
1103  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1104  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1105  * restart. For both cases, take care to check the condition again after
1106  * marking us as waiting.
1107  */
1108 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
1109                                  struct request *rq)
1110 {
1111         struct blk_mq_hw_ctx *this_hctx = *hctx;
1112         struct sbq_wait_state *ws;
1113         wait_queue_entry_t *wait;
1114         bool ret;
1115
1116         if (!(this_hctx->flags & BLK_MQ_F_TAG_SHARED)) {
1117                 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
1118                         set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
1119
1120                 /*
1121                  * It's possible that a tag was freed in the window between the
1122                  * allocation failure and adding the hardware queue to the wait
1123                  * queue.
1124                  *
1125                  * Don't clear RESTART here, someone else could have set it.
1126                  * At most this will cost an extra queue run.
1127                  */
1128                 return blk_mq_get_driver_tag(rq, hctx, false);
1129         }
1130
1131         wait = &this_hctx->dispatch_wait;
1132         if (!list_empty_careful(&wait->entry))
1133                 return false;
1134
1135         spin_lock(&this_hctx->lock);
1136         if (!list_empty(&wait->entry)) {
1137                 spin_unlock(&this_hctx->lock);
1138                 return false;
1139         }
1140
1141         ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
1142         add_wait_queue(&ws->wait, wait);
1143
1144         /*
1145          * It's possible that a tag was freed in the window between the
1146          * allocation failure and adding the hardware queue to the wait
1147          * queue.
1148          */
1149         ret = blk_mq_get_driver_tag(rq, hctx, false);
1150         if (!ret) {
1151                 spin_unlock(&this_hctx->lock);
1152                 return false;
1153         }
1154
1155         /*
1156          * We got a tag, remove ourselves from the wait queue to ensure
1157          * someone else gets the wakeup.
1158          */
1159         spin_lock_irq(&ws->wait.lock);
1160         list_del_init(&wait->entry);
1161         spin_unlock_irq(&ws->wait.lock);
1162         spin_unlock(&this_hctx->lock);
1163
1164         return true;
1165 }
1166
1167 #define BLK_MQ_RESOURCE_DELAY   3               /* ms units */
1168
1169 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1170                              bool got_budget)
1171 {
1172         struct blk_mq_hw_ctx *hctx;
1173         struct request *rq, *nxt;
1174         bool no_tag = false;
1175         int errors, queued;
1176         blk_status_t ret = BLK_STS_OK;
1177
1178         if (list_empty(list))
1179                 return false;
1180
1181         WARN_ON(!list_is_singular(list) && got_budget);
1182
1183         /*
1184          * Now process all the entries, sending them to the driver.
1185          */
1186         errors = queued = 0;
1187         do {
1188                 struct blk_mq_queue_data bd;
1189
1190                 rq = list_first_entry(list, struct request, queuelist);
1191                 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
1192                         /*
1193                          * The initial allocation attempt failed, so we need to
1194                          * rerun the hardware queue when a tag is freed. The
1195                          * waitqueue takes care of that. If the queue is run
1196                          * before we add this entry back on the dispatch list,
1197                          * we'll re-run it below.
1198                          */
1199                         if (!blk_mq_mark_tag_wait(&hctx, rq)) {
1200                                 if (got_budget)
1201                                         blk_mq_put_dispatch_budget(hctx);
1202                                 /*
1203                                  * For non-shared tags, the RESTART check
1204                                  * will suffice.
1205                                  */
1206                                 if (hctx->flags & BLK_MQ_F_TAG_SHARED)
1207                                         no_tag = true;
1208                                 break;
1209                         }
1210                 }
1211
1212                 if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
1213                         blk_mq_put_driver_tag(rq);
1214                         break;
1215                 }
1216
1217                 list_del_init(&rq->queuelist);
1218
1219                 bd.rq = rq;
1220
1221                 /*
1222                  * Flag last if we have no more requests, or if we have more
1223                  * but can't assign a driver tag to it.
1224                  */
1225                 if (list_empty(list))
1226                         bd.last = true;
1227                 else {
1228                         nxt = list_first_entry(list, struct request, queuelist);
1229                         bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1230                 }
1231
1232                 ret = q->mq_ops->queue_rq(hctx, &bd);
1233                 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
1234                         /*
1235                          * If an I/O scheduler has been configured and we got a
1236                          * driver tag for the next request already, free it
1237                          * again.
1238                          */
1239                         if (!list_empty(list)) {
1240                                 nxt = list_first_entry(list, struct request, queuelist);
1241                                 blk_mq_put_driver_tag(nxt);
1242                         }
1243                         list_add(&rq->queuelist, list);
1244                         __blk_mq_requeue_request(rq);
1245                         break;
1246                 }
1247
1248                 if (unlikely(ret != BLK_STS_OK)) {
1249                         errors++;
1250                         blk_mq_end_request(rq, BLK_STS_IOERR);
1251                         continue;
1252                 }
1253
1254                 queued++;
1255         } while (!list_empty(list));
1256
1257         hctx->dispatched[queued_to_index(queued)]++;
1258
1259         /*
1260          * Any items that need requeuing? Stuff them into hctx->dispatch,
1261          * that is where we will continue on next queue run.
1262          */
1263         if (!list_empty(list)) {
1264                 bool needs_restart;
1265
1266                 spin_lock(&hctx->lock);
1267                 list_splice_init(list, &hctx->dispatch);
1268                 spin_unlock(&hctx->lock);
1269
1270                 /*
1271                  * If SCHED_RESTART was set by the caller of this function and
1272                  * it is no longer set that means that it was cleared by another
1273                  * thread and hence that a queue rerun is needed.
1274                  *
1275                  * If 'no_tag' is set, that means that we failed getting
1276                  * a driver tag with an I/O scheduler attached. If our dispatch
1277                  * waitqueue is no longer active, ensure that we run the queue
1278                  * AFTER adding our entries back to the list.
1279                  *
1280                  * If no I/O scheduler has been configured it is possible that
1281                  * the hardware queue got stopped and restarted before requests
1282                  * were pushed back onto the dispatch list. Rerun the queue to
1283                  * avoid starvation. Notes:
1284                  * - blk_mq_run_hw_queue() checks whether or not a queue has
1285                  *   been stopped before rerunning a queue.
1286                  * - Some but not all block drivers stop a queue before
1287                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1288                  *   and dm-rq.
1289                  *
1290                  * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1291                  * bit is set, run queue after a delay to avoid IO stalls
1292                  * that could otherwise occur if the queue is idle.
1293                  */
1294                 needs_restart = blk_mq_sched_needs_restart(hctx);
1295                 if (!needs_restart ||
1296                     (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1297                         blk_mq_run_hw_queue(hctx, true);
1298                 else if (needs_restart && (ret == BLK_STS_RESOURCE))
1299                         blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1300         }
1301
1302         return (queued + errors) != 0;
1303 }
1304
1305 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1306 {
1307         int srcu_idx;
1308
1309         /*
1310          * We should be running this queue from one of the CPUs that
1311          * are mapped to it.
1312          *
1313          * There are at least two related races now between setting
1314          * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
1315          * __blk_mq_run_hw_queue():
1316          *
1317          * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
1318          *   but later it becomes online, then this warning is harmless
1319          *   at all
1320          *
1321          * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
1322          *   but later it becomes offline, then the warning can't be
1323          *   triggered, and we depend on blk-mq timeout handler to
1324          *   handle dispatched requests to this hctx
1325          */
1326         if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1327                 cpu_online(hctx->next_cpu)) {
1328                 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1329                         raw_smp_processor_id(),
1330                         cpumask_empty(hctx->cpumask) ? "inactive": "active");
1331                 dump_stack();
1332         }
1333
1334         /*
1335          * We can't run the queue inline with ints disabled. Ensure that
1336          * we catch bad users of this early.
1337          */
1338         WARN_ON_ONCE(in_interrupt());
1339
1340         might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1341
1342         hctx_lock(hctx, &srcu_idx);
1343         blk_mq_sched_dispatch_requests(hctx);
1344         hctx_unlock(hctx, srcu_idx);
1345 }
1346
1347 /*
1348  * It'd be great if the workqueue API had a way to pass
1349  * in a mask and had some smarts for more clever placement.
1350  * For now we just round-robin here, switching for every
1351  * BLK_MQ_CPU_WORK_BATCH queued items.
1352  */
1353 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1354 {
1355         bool tried = false;
1356
1357         if (hctx->queue->nr_hw_queues == 1)
1358                 return WORK_CPU_UNBOUND;
1359
1360         if (--hctx->next_cpu_batch <= 0) {
1361                 int next_cpu;
1362 select_cpu:
1363                 next_cpu = cpumask_next_and(hctx->next_cpu, hctx->cpumask,
1364                                 cpu_online_mask);
1365                 if (next_cpu >= nr_cpu_ids)
1366                         next_cpu = cpumask_first_and(hctx->cpumask,cpu_online_mask);
1367
1368                 /*
1369                  * No online CPU is found, so have to make sure hctx->next_cpu
1370                  * is set correctly for not breaking workqueue.
1371                  */
1372                 if (next_cpu >= nr_cpu_ids)
1373                         hctx->next_cpu = cpumask_first(hctx->cpumask);
1374                 else
1375                         hctx->next_cpu = next_cpu;
1376                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1377         }
1378
1379         /*
1380          * Do unbound schedule if we can't find a online CPU for this hctx,
1381          * and it should only happen in the path of handling CPU DEAD.
1382          */
1383         if (!cpu_online(hctx->next_cpu)) {
1384                 if (!tried) {
1385                         tried = true;
1386                         goto select_cpu;
1387                 }
1388
1389                 /*
1390                  * Make sure to re-select CPU next time once after CPUs
1391                  * in hctx->cpumask become online again.
1392                  */
1393                 hctx->next_cpu_batch = 1;
1394                 return WORK_CPU_UNBOUND;
1395         }
1396         return hctx->next_cpu;
1397 }
1398
1399 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1400                                         unsigned long msecs)
1401 {
1402         if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1403                 return;
1404
1405         if (unlikely(blk_mq_hctx_stopped(hctx)))
1406                 return;
1407
1408         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1409                 int cpu = get_cpu();
1410                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1411                         __blk_mq_run_hw_queue(hctx);
1412                         put_cpu();
1413                         return;
1414                 }
1415
1416                 put_cpu();
1417         }
1418
1419         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1420                                     msecs_to_jiffies(msecs));
1421 }
1422
1423 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1424 {
1425         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1426 }
1427 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1428
1429 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1430 {
1431         int srcu_idx;
1432         bool need_run;
1433
1434         /*
1435          * When queue is quiesced, we may be switching io scheduler, or
1436          * updating nr_hw_queues, or other things, and we can't run queue
1437          * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1438          *
1439          * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1440          * quiesced.
1441          */
1442         hctx_lock(hctx, &srcu_idx);
1443         need_run = !blk_queue_quiesced(hctx->queue) &&
1444                 blk_mq_hctx_has_pending(hctx);
1445         hctx_unlock(hctx, srcu_idx);
1446
1447         if (need_run) {
1448                 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1449                 return true;
1450         }
1451
1452         return false;
1453 }
1454 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1455
1456 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1457 {
1458         struct blk_mq_hw_ctx *hctx;
1459         int i;
1460
1461         queue_for_each_hw_ctx(q, hctx, i) {
1462                 if (blk_mq_hctx_stopped(hctx))
1463                         continue;
1464
1465                 blk_mq_run_hw_queue(hctx, async);
1466         }
1467 }
1468 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1469
1470 /**
1471  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1472  * @q: request queue.
1473  *
1474  * The caller is responsible for serializing this function against
1475  * blk_mq_{start,stop}_hw_queue().
1476  */
1477 bool blk_mq_queue_stopped(struct request_queue *q)
1478 {
1479         struct blk_mq_hw_ctx *hctx;
1480         int i;
1481
1482         queue_for_each_hw_ctx(q, hctx, i)
1483                 if (blk_mq_hctx_stopped(hctx))
1484                         return true;
1485
1486         return false;
1487 }
1488 EXPORT_SYMBOL(blk_mq_queue_stopped);
1489
1490 /*
1491  * This function is often used for pausing .queue_rq() by driver when
1492  * there isn't enough resource or some conditions aren't satisfied, and
1493  * BLK_STS_RESOURCE is usually returned.
1494  *
1495  * We do not guarantee that dispatch can be drained or blocked
1496  * after blk_mq_stop_hw_queue() returns. Please use
1497  * blk_mq_quiesce_queue() for that requirement.
1498  */
1499 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1500 {
1501         cancel_delayed_work(&hctx->run_work);
1502
1503         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1504 }
1505 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1506
1507 /*
1508  * This function is often used for pausing .queue_rq() by driver when
1509  * there isn't enough resource or some conditions aren't satisfied, and
1510  * BLK_STS_RESOURCE is usually returned.
1511  *
1512  * We do not guarantee that dispatch can be drained or blocked
1513  * after blk_mq_stop_hw_queues() returns. Please use
1514  * blk_mq_quiesce_queue() for that requirement.
1515  */
1516 void blk_mq_stop_hw_queues(struct request_queue *q)
1517 {
1518         struct blk_mq_hw_ctx *hctx;
1519         int i;
1520
1521         queue_for_each_hw_ctx(q, hctx, i)
1522                 blk_mq_stop_hw_queue(hctx);
1523 }
1524 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1525
1526 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1527 {
1528         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1529
1530         blk_mq_run_hw_queue(hctx, false);
1531 }
1532 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1533
1534 void blk_mq_start_hw_queues(struct request_queue *q)
1535 {
1536         struct blk_mq_hw_ctx *hctx;
1537         int i;
1538
1539         queue_for_each_hw_ctx(q, hctx, i)
1540                 blk_mq_start_hw_queue(hctx);
1541 }
1542 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1543
1544 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1545 {
1546         if (!blk_mq_hctx_stopped(hctx))
1547                 return;
1548
1549         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1550         blk_mq_run_hw_queue(hctx, async);
1551 }
1552 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1553
1554 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1555 {
1556         struct blk_mq_hw_ctx *hctx;
1557         int i;
1558
1559         queue_for_each_hw_ctx(q, hctx, i)
1560                 blk_mq_start_stopped_hw_queue(hctx, async);
1561 }
1562 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1563
1564 static void blk_mq_run_work_fn(struct work_struct *work)
1565 {
1566         struct blk_mq_hw_ctx *hctx;
1567
1568         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1569
1570         /*
1571          * If we are stopped, don't run the queue. The exception is if
1572          * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
1573          * the STOPPED bit and run it.
1574          */
1575         if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
1576                 if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
1577                         return;
1578
1579                 clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1580                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1581         }
1582
1583         __blk_mq_run_hw_queue(hctx);
1584 }
1585
1586
1587 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1588 {
1589         if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1590                 return;
1591
1592         /*
1593          * Stop the hw queue, then modify currently delayed work.
1594          * This should prevent us from running the queue prematurely.
1595          * Mark the queue as auto-clearing STOPPED when it runs.
1596          */
1597         blk_mq_stop_hw_queue(hctx);
1598         set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1599         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1600                                         &hctx->run_work,
1601                                         msecs_to_jiffies(msecs));
1602 }
1603 EXPORT_SYMBOL(blk_mq_delay_queue);
1604
1605 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1606                                             struct request *rq,
1607                                             bool at_head)
1608 {
1609         struct blk_mq_ctx *ctx = rq->mq_ctx;
1610
1611         lockdep_assert_held(&ctx->lock);
1612
1613         trace_block_rq_insert(hctx->queue, rq);
1614
1615         if (at_head)
1616                 list_add(&rq->queuelist, &ctx->rq_list);
1617         else
1618                 list_add_tail(&rq->queuelist, &ctx->rq_list);
1619 }
1620
1621 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1622                              bool at_head)
1623 {
1624         struct blk_mq_ctx *ctx = rq->mq_ctx;
1625
1626         lockdep_assert_held(&ctx->lock);
1627
1628         __blk_mq_insert_req_list(hctx, rq, at_head);
1629         blk_mq_hctx_mark_pending(hctx, ctx);
1630 }
1631
1632 /*
1633  * Should only be used carefully, when the caller knows we want to
1634  * bypass a potential IO scheduler on the target device.
1635  */
1636 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
1637 {
1638         struct blk_mq_ctx *ctx = rq->mq_ctx;
1639         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1640
1641         spin_lock(&hctx->lock);
1642         list_add_tail(&rq->queuelist, &hctx->dispatch);
1643         spin_unlock(&hctx->lock);
1644
1645         if (run_queue)
1646                 blk_mq_run_hw_queue(hctx, false);
1647 }
1648
1649 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1650                             struct list_head *list)
1651
1652 {
1653         /*
1654          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1655          * offline now
1656          */
1657         spin_lock(&ctx->lock);
1658         while (!list_empty(list)) {
1659                 struct request *rq;
1660
1661                 rq = list_first_entry(list, struct request, queuelist);
1662                 BUG_ON(rq->mq_ctx != ctx);
1663                 list_del_init(&rq->queuelist);
1664                 __blk_mq_insert_req_list(hctx, rq, false);
1665         }
1666         blk_mq_hctx_mark_pending(hctx, ctx);
1667         spin_unlock(&ctx->lock);
1668 }
1669
1670 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1671 {
1672         struct request *rqa = container_of(a, struct request, queuelist);
1673         struct request *rqb = container_of(b, struct request, queuelist);
1674
1675         return !(rqa->mq_ctx < rqb->mq_ctx ||
1676                  (rqa->mq_ctx == rqb->mq_ctx &&
1677                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1678 }
1679
1680 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1681 {
1682         struct blk_mq_ctx *this_ctx;
1683         struct request_queue *this_q;
1684         struct request *rq;
1685         LIST_HEAD(list);
1686         LIST_HEAD(ctx_list);
1687         unsigned int depth;
1688
1689         list_splice_init(&plug->mq_list, &list);
1690
1691         list_sort(NULL, &list, plug_ctx_cmp);
1692
1693         this_q = NULL;
1694         this_ctx = NULL;
1695         depth = 0;
1696
1697         while (!list_empty(&list)) {
1698                 rq = list_entry_rq(list.next);
1699                 list_del_init(&rq->queuelist);
1700                 BUG_ON(!rq->q);
1701                 if (rq->mq_ctx != this_ctx) {
1702                         if (this_ctx) {
1703                                 trace_block_unplug(this_q, depth, from_schedule);
1704                                 blk_mq_sched_insert_requests(this_q, this_ctx,
1705                                                                 &ctx_list,
1706                                                                 from_schedule);
1707                         }
1708
1709                         this_ctx = rq->mq_ctx;
1710                         this_q = rq->q;
1711                         depth = 0;
1712                 }
1713
1714                 depth++;
1715                 list_add_tail(&rq->queuelist, &ctx_list);
1716         }
1717
1718         /*
1719          * If 'this_ctx' is set, we know we have entries to complete
1720          * on 'ctx_list'. Do those.
1721          */
1722         if (this_ctx) {
1723                 trace_block_unplug(this_q, depth, from_schedule);
1724                 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1725                                                 from_schedule);
1726         }
1727 }
1728
1729 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1730 {
1731         blk_init_request_from_bio(rq, bio);
1732
1733         blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
1734
1735         blk_account_io_start(rq, true);
1736 }
1737
1738 static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
1739                                    struct blk_mq_ctx *ctx,
1740                                    struct request *rq)
1741 {
1742         spin_lock(&ctx->lock);
1743         __blk_mq_insert_request(hctx, rq, false);
1744         spin_unlock(&ctx->lock);
1745 }
1746
1747 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1748 {
1749         if (rq->tag != -1)
1750                 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1751
1752         return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1753 }
1754
1755 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1756                                             struct request *rq,
1757                                             blk_qc_t *cookie)
1758 {
1759         struct request_queue *q = rq->q;
1760         struct blk_mq_queue_data bd = {
1761                 .rq = rq,
1762                 .last = true,
1763         };
1764         blk_qc_t new_cookie;
1765         blk_status_t ret;
1766
1767         new_cookie = request_to_qc_t(hctx, rq);
1768
1769         /*
1770          * For OK queue, we are done. For error, caller may kill it.
1771          * Any other error (busy), just add it to our list as we
1772          * previously would have done.
1773          */
1774         ret = q->mq_ops->queue_rq(hctx, &bd);
1775         switch (ret) {
1776         case BLK_STS_OK:
1777                 *cookie = new_cookie;
1778                 break;
1779         case BLK_STS_RESOURCE:
1780         case BLK_STS_DEV_RESOURCE:
1781                 __blk_mq_requeue_request(rq);
1782                 break;
1783         default:
1784                 *cookie = BLK_QC_T_NONE;
1785                 break;
1786         }
1787
1788         return ret;
1789 }
1790
1791 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1792                                                 struct request *rq,
1793                                                 blk_qc_t *cookie,
1794                                                 bool bypass_insert)
1795 {
1796         struct request_queue *q = rq->q;
1797         bool run_queue = true;
1798
1799         /*
1800          * RCU or SRCU read lock is needed before checking quiesced flag.
1801          *
1802          * When queue is stopped or quiesced, ignore 'bypass_insert' from
1803          * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
1804          * and avoid driver to try to dispatch again.
1805          */
1806         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1807                 run_queue = false;
1808                 bypass_insert = false;
1809                 goto insert;
1810         }
1811
1812         if (q->elevator && !bypass_insert)
1813                 goto insert;
1814
1815         if (!blk_mq_get_driver_tag(rq, NULL, false))
1816                 goto insert;
1817
1818         if (!blk_mq_get_dispatch_budget(hctx)) {
1819                 blk_mq_put_driver_tag(rq);
1820                 goto insert;
1821         }
1822
1823         return __blk_mq_issue_directly(hctx, rq, cookie);
1824 insert:
1825         if (bypass_insert)
1826                 return BLK_STS_RESOURCE;
1827
1828         blk_mq_sched_insert_request(rq, false, run_queue, false);
1829         return BLK_STS_OK;
1830 }
1831
1832 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1833                 struct request *rq, blk_qc_t *cookie)
1834 {
1835         blk_status_t ret;
1836         int srcu_idx;
1837
1838         might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1839
1840         hctx_lock(hctx, &srcu_idx);
1841
1842         ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
1843         if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1844                 blk_mq_sched_insert_request(rq, false, true, false);
1845         else if (ret != BLK_STS_OK)
1846                 blk_mq_end_request(rq, ret);
1847
1848         hctx_unlock(hctx, srcu_idx);
1849 }
1850
1851 blk_status_t blk_mq_request_issue_directly(struct request *rq)
1852 {
1853         blk_status_t ret;
1854         int srcu_idx;
1855         blk_qc_t unused_cookie;
1856         struct blk_mq_ctx *ctx = rq->mq_ctx;
1857         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1858
1859         hctx_lock(hctx, &srcu_idx);
1860         ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
1861         hctx_unlock(hctx, srcu_idx);
1862
1863         return ret;
1864 }
1865
1866 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1867 {
1868         const int is_sync = op_is_sync(bio->bi_opf);
1869         const int is_flush_fua = op_is_flush(bio->bi_opf);
1870         struct blk_mq_alloc_data data = { .flags = 0 };
1871         struct request *rq;
1872         unsigned int request_count = 0;
1873         struct blk_plug *plug;
1874         struct request *same_queue_rq = NULL;
1875         blk_qc_t cookie;
1876         unsigned int wb_acct;
1877
1878         blk_queue_bounce(q, &bio);
1879
1880         blk_queue_split(q, &bio);
1881
1882         if (!bio_integrity_prep(bio))
1883                 return BLK_QC_T_NONE;
1884
1885         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1886             blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1887                 return BLK_QC_T_NONE;
1888
1889         if (blk_mq_sched_bio_merge(q, bio))
1890                 return BLK_QC_T_NONE;
1891
1892         wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1893
1894         trace_block_getrq(q, bio, bio->bi_opf);
1895
1896         rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
1897         if (unlikely(!rq)) {
1898                 __wbt_done(q->rq_wb, wb_acct);
1899                 if (bio->bi_opf & REQ_NOWAIT)
1900                         bio_wouldblock_error(bio);
1901                 return BLK_QC_T_NONE;
1902         }
1903
1904         wbt_track(&rq->issue_stat, wb_acct);
1905
1906         cookie = request_to_qc_t(data.hctx, rq);
1907
1908         plug = current->plug;
1909         if (unlikely(is_flush_fua)) {
1910                 blk_mq_put_ctx(data.ctx);
1911                 blk_mq_bio_to_request(rq, bio);
1912
1913                 /* bypass scheduler for flush rq */
1914                 blk_insert_flush(rq);
1915                 blk_mq_run_hw_queue(data.hctx, true);
1916         } else if (plug && q->nr_hw_queues == 1) {
1917                 struct request *last = NULL;
1918
1919                 blk_mq_put_ctx(data.ctx);
1920                 blk_mq_bio_to_request(rq, bio);
1921
1922                 /*
1923                  * @request_count may become stale because of schedule
1924                  * out, so check the list again.
1925                  */
1926                 if (list_empty(&plug->mq_list))
1927                         request_count = 0;
1928                 else if (blk_queue_nomerges(q))
1929                         request_count = blk_plug_queued_count(q);
1930
1931                 if (!request_count)
1932                         trace_block_plug(q);
1933                 else
1934                         last = list_entry_rq(plug->mq_list.prev);
1935
1936                 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1937                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1938                         blk_flush_plug_list(plug, false);
1939                         trace_block_plug(q);
1940                 }
1941
1942                 list_add_tail(&rq->queuelist, &plug->mq_list);
1943         } else if (plug && !blk_queue_nomerges(q)) {
1944                 blk_mq_bio_to_request(rq, bio);
1945
1946                 /*
1947                  * We do limited plugging. If the bio can be merged, do that.
1948                  * Otherwise the existing request in the plug list will be
1949                  * issued. So the plug list will have one request at most
1950                  * The plug list might get flushed before this. If that happens,
1951                  * the plug list is empty, and same_queue_rq is invalid.
1952                  */
1953                 if (list_empty(&plug->mq_list))
1954                         same_queue_rq = NULL;
1955                 if (same_queue_rq)
1956                         list_del_init(&same_queue_rq->queuelist);
1957                 list_add_tail(&rq->queuelist, &plug->mq_list);
1958
1959                 blk_mq_put_ctx(data.ctx);
1960
1961                 if (same_queue_rq) {
1962                         data.hctx = blk_mq_map_queue(q,
1963                                         same_queue_rq->mq_ctx->cpu);
1964                         blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1965                                         &cookie);
1966                 }
1967         } else if (q->nr_hw_queues > 1 && is_sync) {
1968                 blk_mq_put_ctx(data.ctx);
1969                 blk_mq_bio_to_request(rq, bio);
1970                 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
1971         } else if (q->elevator) {
1972                 blk_mq_put_ctx(data.ctx);
1973                 blk_mq_bio_to_request(rq, bio);
1974                 blk_mq_sched_insert_request(rq, false, true, true);
1975         } else {
1976                 blk_mq_put_ctx(data.ctx);
1977                 blk_mq_bio_to_request(rq, bio);
1978                 blk_mq_queue_io(data.hctx, data.ctx, rq);
1979                 blk_mq_run_hw_queue(data.hctx, true);
1980         }
1981
1982         return cookie;
1983 }
1984
1985 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1986                      unsigned int hctx_idx)
1987 {
1988         struct page *page;
1989
1990         if (tags->rqs && set->ops->exit_request) {
1991                 int i;
1992
1993                 for (i = 0; i < tags->nr_tags; i++) {
1994                         struct request *rq = tags->static_rqs[i];
1995
1996                         if (!rq)
1997                                 continue;
1998                         set->ops->exit_request(set, rq, hctx_idx);
1999                         tags->static_rqs[i] = NULL;
2000                 }
2001         }
2002
2003         while (!list_empty(&tags->page_list)) {
2004                 page = list_first_entry(&tags->page_list, struct page, lru);
2005                 list_del_init(&page->lru);
2006                 /*
2007                  * Remove kmemleak object previously allocated in
2008                  * blk_mq_init_rq_map().
2009                  */
2010                 kmemleak_free(page_address(page));
2011                 __free_pages(page, page->private);
2012         }
2013 }
2014
2015 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
2016 {
2017         kfree(tags->rqs);
2018         tags->rqs = NULL;
2019         kfree(tags->static_rqs);
2020         tags->static_rqs = NULL;
2021
2022         blk_mq_free_tags(tags);
2023 }
2024
2025 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2026                                         unsigned int hctx_idx,
2027                                         unsigned int nr_tags,
2028                                         unsigned int reserved_tags)
2029 {
2030         struct blk_mq_tags *tags;
2031         int node;
2032
2033         node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
2034         if (node == NUMA_NO_NODE)
2035                 node = set->numa_node;
2036
2037         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
2038                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
2039         if (!tags)
2040                 return NULL;
2041
2042         tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
2043                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2044                                  node);
2045         if (!tags->rqs) {
2046                 blk_mq_free_tags(tags);
2047                 return NULL;
2048         }
2049
2050         tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
2051                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2052                                  node);
2053         if (!tags->static_rqs) {
2054                 kfree(tags->rqs);
2055                 blk_mq_free_tags(tags);
2056                 return NULL;
2057         }
2058
2059         return tags;
2060 }
2061
2062 static size_t order_to_size(unsigned int order)
2063 {
2064         return (size_t)PAGE_SIZE << order;
2065 }
2066
2067 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2068                                unsigned int hctx_idx, int node)
2069 {
2070         int ret;
2071
2072         if (set->ops->init_request) {
2073                 ret = set->ops->init_request(set, rq, hctx_idx, node);
2074                 if (ret)
2075                         return ret;
2076         }
2077
2078         seqcount_init(&rq->gstate_seq);
2079         u64_stats_init(&rq->aborted_gstate_sync);
2080         return 0;
2081 }
2082
2083 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2084                      unsigned int hctx_idx, unsigned int depth)
2085 {
2086         unsigned int i, j, entries_per_page, max_order = 4;
2087         size_t rq_size, left;
2088         int node;
2089
2090         node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
2091         if (node == NUMA_NO_NODE)
2092                 node = set->numa_node;
2093
2094         INIT_LIST_HEAD(&tags->page_list);
2095
2096         /*
2097          * rq_size is the size of the request plus driver payload, rounded
2098          * to the cacheline size
2099          */
2100         rq_size = round_up(sizeof(struct request) + set->cmd_size,
2101                                 cache_line_size());
2102         left = rq_size * depth;
2103
2104         for (i = 0; i < depth; ) {
2105                 int this_order = max_order;
2106                 struct page *page;
2107                 int to_do;
2108                 void *p;
2109
2110                 while (this_order && left < order_to_size(this_order - 1))
2111                         this_order--;
2112
2113                 do {
2114                         page = alloc_pages_node(node,
2115                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
2116                                 this_order);
2117                         if (page)
2118                                 break;
2119                         if (!this_order--)
2120                                 break;
2121                         if (order_to_size(this_order) < rq_size)
2122                                 break;
2123                 } while (1);
2124
2125                 if (!page)
2126                         goto fail;
2127
2128                 page->private = this_order;
2129                 list_add_tail(&page->lru, &tags->page_list);
2130
2131                 p = page_address(page);
2132                 /*
2133                  * Allow kmemleak to scan these pages as they contain pointers
2134                  * to additional allocations like via ops->init_request().
2135                  */
2136                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
2137                 entries_per_page = order_to_size(this_order) / rq_size;
2138                 to_do = min(entries_per_page, depth - i);
2139                 left -= to_do * rq_size;
2140                 for (j = 0; j < to_do; j++) {
2141                         struct request *rq = p;
2142
2143                         tags->static_rqs[i] = rq;
2144                         if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2145                                 tags->static_rqs[i] = NULL;
2146                                 goto fail;
2147                         }
2148
2149                         p += rq_size;
2150                         i++;
2151                 }
2152         }
2153         return 0;
2154
2155 fail:
2156         blk_mq_free_rqs(set, tags, hctx_idx);
2157         return -ENOMEM;
2158 }
2159
2160 /*
2161  * 'cpu' is going away. splice any existing rq_list entries from this
2162  * software queue to the hw queue dispatch list, and ensure that it
2163  * gets run.
2164  */
2165 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
2166 {
2167         struct blk_mq_hw_ctx *hctx;
2168         struct blk_mq_ctx *ctx;
2169         LIST_HEAD(tmp);
2170
2171         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
2172         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2173
2174         spin_lock(&ctx->lock);
2175         if (!list_empty(&ctx->rq_list)) {
2176                 list_splice_init(&ctx->rq_list, &tmp);
2177                 blk_mq_hctx_clear_pending(hctx, ctx);
2178         }
2179         spin_unlock(&ctx->lock);
2180
2181         if (list_empty(&tmp))
2182                 return 0;
2183
2184         spin_lock(&hctx->lock);
2185         list_splice_tail_init(&tmp, &hctx->dispatch);
2186         spin_unlock(&hctx->lock);
2187
2188         blk_mq_run_hw_queue(hctx, true);
2189         return 0;
2190 }
2191
2192 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2193 {
2194         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2195                                             &hctx->cpuhp_dead);
2196 }
2197
2198 /* hctx->ctxs will be freed in queue's release handler */
2199 static void blk_mq_exit_hctx(struct request_queue *q,
2200                 struct blk_mq_tag_set *set,
2201                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2202 {
2203         blk_mq_debugfs_unregister_hctx(hctx);
2204
2205         if (blk_mq_hw_queue_mapped(hctx))
2206                 blk_mq_tag_idle(hctx);
2207
2208         if (set->ops->exit_request)
2209                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
2210
2211         blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2212
2213         if (set->ops->exit_hctx)
2214                 set->ops->exit_hctx(hctx, hctx_idx);
2215
2216         if (hctx->flags & BLK_MQ_F_BLOCKING)
2217                 cleanup_srcu_struct(hctx->srcu);
2218
2219         blk_mq_remove_cpuhp(hctx);
2220         blk_free_flush_queue(hctx->fq);
2221         sbitmap_free(&hctx->ctx_map);
2222 }
2223
2224 static void blk_mq_exit_hw_queues(struct request_queue *q,
2225                 struct blk_mq_tag_set *set, int nr_queue)
2226 {
2227         struct blk_mq_hw_ctx *hctx;
2228         unsigned int i;
2229
2230         queue_for_each_hw_ctx(q, hctx, i) {
2231                 if (i == nr_queue)
2232                         break;
2233                 blk_mq_exit_hctx(q, set, hctx, i);
2234         }
2235 }
2236
2237 static int blk_mq_init_hctx(struct request_queue *q,
2238                 struct blk_mq_tag_set *set,
2239                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2240 {
2241         int node;
2242
2243         node = hctx->numa_node;
2244         if (node == NUMA_NO_NODE)
2245                 node = hctx->numa_node = set->numa_node;
2246
2247         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2248         spin_lock_init(&hctx->lock);
2249         INIT_LIST_HEAD(&hctx->dispatch);
2250         hctx->queue = q;
2251         hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
2252
2253         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2254
2255         hctx->tags = set->tags[hctx_idx];
2256
2257         /*
2258          * Allocate space for all possible cpus to avoid allocation at
2259          * runtime
2260          */
2261         hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2262                                         GFP_KERNEL, node);
2263         if (!hctx->ctxs)
2264                 goto unregister_cpu_notifier;
2265
2266         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
2267                               node))
2268                 goto free_ctxs;
2269
2270         hctx->nr_ctx = 0;
2271
2272         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2273         INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2274
2275         if (set->ops->init_hctx &&
2276             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2277                 goto free_bitmap;
2278
2279         if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2280                 goto exit_hctx;
2281
2282         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2283         if (!hctx->fq)
2284                 goto sched_exit_hctx;
2285
2286         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
2287                 goto free_fq;
2288
2289         if (hctx->flags & BLK_MQ_F_BLOCKING)
2290                 init_srcu_struct(hctx->srcu);
2291
2292         blk_mq_debugfs_register_hctx(q, hctx);
2293
2294         return 0;
2295
2296  free_fq:
2297         kfree(hctx->fq);
2298  sched_exit_hctx:
2299         blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2300  exit_hctx:
2301         if (set->ops->exit_hctx)
2302                 set->ops->exit_hctx(hctx, hctx_idx);
2303  free_bitmap:
2304         sbitmap_free(&hctx->ctx_map);
2305  free_ctxs:
2306         kfree(hctx->ctxs);
2307  unregister_cpu_notifier:
2308         blk_mq_remove_cpuhp(hctx);
2309         return -1;
2310 }
2311
2312 static void blk_mq_init_cpu_queues(struct request_queue *q,
2313                                    unsigned int nr_hw_queues)
2314 {
2315         unsigned int i;
2316
2317         for_each_possible_cpu(i) {
2318                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2319                 struct blk_mq_hw_ctx *hctx;
2320
2321                 __ctx->cpu = i;
2322                 spin_lock_init(&__ctx->lock);
2323                 INIT_LIST_HEAD(&__ctx->rq_list);
2324                 __ctx->queue = q;
2325
2326                 /*
2327                  * Set local node, IFF we have more than one hw queue. If
2328                  * not, we remain on the home node of the device
2329                  */
2330                 hctx = blk_mq_map_queue(q, i);
2331                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2332                         hctx->numa_node = local_memory_node(cpu_to_node(i));
2333         }
2334 }
2335
2336 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2337 {
2338         int ret = 0;
2339
2340         set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2341                                         set->queue_depth, set->reserved_tags);
2342         if (!set->tags[hctx_idx])
2343                 return false;
2344
2345         ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2346                                 set->queue_depth);
2347         if (!ret)
2348                 return true;
2349
2350         blk_mq_free_rq_map(set->tags[hctx_idx]);
2351         set->tags[hctx_idx] = NULL;
2352         return false;
2353 }
2354
2355 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2356                                          unsigned int hctx_idx)
2357 {
2358         if (set->tags[hctx_idx]) {
2359                 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2360                 blk_mq_free_rq_map(set->tags[hctx_idx]);
2361                 set->tags[hctx_idx] = NULL;
2362         }
2363 }
2364
2365 static void blk_mq_map_swqueue(struct request_queue *q)
2366 {
2367         unsigned int i, hctx_idx;
2368         struct blk_mq_hw_ctx *hctx;
2369         struct blk_mq_ctx *ctx;
2370         struct blk_mq_tag_set *set = q->tag_set;
2371
2372         /*
2373          * Avoid others reading imcomplete hctx->cpumask through sysfs
2374          */
2375         mutex_lock(&q->sysfs_lock);
2376
2377         queue_for_each_hw_ctx(q, hctx, i) {
2378                 cpumask_clear(hctx->cpumask);
2379                 hctx->nr_ctx = 0;
2380         }
2381
2382         /*
2383          * Map software to hardware queues.
2384          *
2385          * If the cpu isn't present, the cpu is mapped to first hctx.
2386          */
2387         for_each_possible_cpu(i) {
2388                 hctx_idx = q->mq_map[i];
2389                 /* unmapped hw queue can be remapped after CPU topo changed */
2390                 if (!set->tags[hctx_idx] &&
2391                     !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2392                         /*
2393                          * If tags initialization fail for some hctx,
2394                          * that hctx won't be brought online.  In this
2395                          * case, remap the current ctx to hctx[0] which
2396                          * is guaranteed to always have tags allocated
2397                          */
2398                         q->mq_map[i] = 0;
2399                 }
2400
2401                 ctx = per_cpu_ptr(q->queue_ctx, i);
2402                 hctx = blk_mq_map_queue(q, i);
2403
2404                 cpumask_set_cpu(i, hctx->cpumask);
2405                 ctx->index_hw = hctx->nr_ctx;
2406                 hctx->ctxs[hctx->nr_ctx++] = ctx;
2407         }
2408
2409         mutex_unlock(&q->sysfs_lock);
2410
2411         queue_for_each_hw_ctx(q, hctx, i) {
2412                 /*
2413                  * If no software queues are mapped to this hardware queue,
2414                  * disable it and free the request entries.
2415                  */
2416                 if (!hctx->nr_ctx) {
2417                         /* Never unmap queue 0.  We need it as a
2418                          * fallback in case of a new remap fails
2419                          * allocation
2420                          */
2421                         if (i && set->tags[i])
2422                                 blk_mq_free_map_and_requests(set, i);
2423
2424                         hctx->tags = NULL;
2425                         continue;
2426                 }
2427
2428                 hctx->tags = set->tags[i];
2429                 WARN_ON(!hctx->tags);
2430
2431                 /*
2432                  * Set the map size to the number of mapped software queues.
2433                  * This is more accurate and more efficient than looping
2434                  * over all possibly mapped software queues.
2435                  */
2436                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2437
2438                 /*
2439                  * Initialize batch roundrobin counts
2440                  */
2441                 hctx->next_cpu = cpumask_first_and(hctx->cpumask,
2442                                 cpu_online_mask);
2443                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2444         }
2445 }
2446
2447 /*
2448  * Caller needs to ensure that we're either frozen/quiesced, or that
2449  * the queue isn't live yet.
2450  */
2451 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2452 {
2453         struct blk_mq_hw_ctx *hctx;
2454         int i;
2455
2456         queue_for_each_hw_ctx(q, hctx, i) {
2457                 if (shared) {
2458                         if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2459                                 atomic_inc(&q->shared_hctx_restart);
2460                         hctx->flags |= BLK_MQ_F_TAG_SHARED;
2461                 } else {
2462                         if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2463                                 atomic_dec(&q->shared_hctx_restart);
2464                         hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2465                 }
2466         }
2467 }
2468
2469 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2470                                         bool shared)
2471 {
2472         struct request_queue *q;
2473
2474         lockdep_assert_held(&set->tag_list_lock);
2475
2476         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2477                 blk_mq_freeze_queue(q);
2478                 queue_set_hctx_shared(q, shared);
2479                 blk_mq_unfreeze_queue(q);
2480         }
2481 }
2482
2483 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2484 {
2485         struct blk_mq_tag_set *set = q->tag_set;
2486
2487         mutex_lock(&set->tag_list_lock);
2488         list_del_rcu(&q->tag_set_list);
2489         INIT_LIST_HEAD(&q->tag_set_list);
2490         if (list_is_singular(&set->tag_list)) {
2491                 /* just transitioned to unshared */
2492                 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2493                 /* update existing queue */
2494                 blk_mq_update_tag_set_depth(set, false);
2495         }
2496         mutex_unlock(&set->tag_list_lock);
2497
2498         synchronize_rcu();
2499 }
2500
2501 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2502                                      struct request_queue *q)
2503 {
2504         q->tag_set = set;
2505
2506         mutex_lock(&set->tag_list_lock);
2507
2508         /*
2509          * Check to see if we're transitioning to shared (from 1 to 2 queues).
2510          */
2511         if (!list_empty(&set->tag_list) &&
2512             !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2513                 set->flags |= BLK_MQ_F_TAG_SHARED;
2514                 /* update existing queue */
2515                 blk_mq_update_tag_set_depth(set, true);
2516         }
2517         if (set->flags & BLK_MQ_F_TAG_SHARED)
2518                 queue_set_hctx_shared(q, true);
2519         list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2520
2521         mutex_unlock(&set->tag_list_lock);
2522 }
2523
2524 /*
2525  * It is the actual release handler for mq, but we do it from
2526  * request queue's release handler for avoiding use-after-free
2527  * and headache because q->mq_kobj shouldn't have been introduced,
2528  * but we can't group ctx/kctx kobj without it.
2529  */
2530 void blk_mq_release(struct request_queue *q)
2531 {
2532         struct blk_mq_hw_ctx *hctx;
2533         unsigned int i;
2534
2535         /* hctx kobj stays in hctx */
2536         queue_for_each_hw_ctx(q, hctx, i) {
2537                 if (!hctx)
2538                         continue;
2539                 kobject_put(&hctx->kobj);
2540         }
2541
2542         q->mq_map = NULL;
2543
2544         kfree(q->queue_hw_ctx);
2545
2546         /*
2547          * release .mq_kobj and sw queue's kobject now because
2548          * both share lifetime with request queue.
2549          */
2550         blk_mq_sysfs_deinit(q);
2551
2552         free_percpu(q->queue_ctx);
2553 }
2554
2555 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2556 {
2557         struct request_queue *uninit_q, *q;
2558
2559         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2560         if (!uninit_q)
2561                 return ERR_PTR(-ENOMEM);
2562
2563         q = blk_mq_init_allocated_queue(set, uninit_q);
2564         if (IS_ERR(q))
2565                 blk_cleanup_queue(uninit_q);
2566
2567         return q;
2568 }
2569 EXPORT_SYMBOL(blk_mq_init_queue);
2570
2571 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2572 {
2573         int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2574
2575         BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
2576                            __alignof__(struct blk_mq_hw_ctx)) !=
2577                      sizeof(struct blk_mq_hw_ctx));
2578
2579         if (tag_set->flags & BLK_MQ_F_BLOCKING)
2580                 hw_ctx_size += sizeof(struct srcu_struct);
2581
2582         return hw_ctx_size;
2583 }
2584
2585 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2586                                                 struct request_queue *q)
2587 {
2588         int i, j;
2589         struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2590
2591         blk_mq_sysfs_unregister(q);
2592
2593         /* protect against switching io scheduler  */
2594         mutex_lock(&q->sysfs_lock);
2595         for (i = 0; i < set->nr_hw_queues; i++) {
2596                 int node;
2597
2598                 if (hctxs[i])
2599                         continue;
2600
2601                 node = blk_mq_hw_queue_to_node(q->mq_map, i);
2602                 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
2603                                         GFP_KERNEL, node);
2604                 if (!hctxs[i])
2605                         break;
2606
2607                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2608                                                 node)) {
2609                         kfree(hctxs[i]);
2610                         hctxs[i] = NULL;
2611                         break;
2612                 }
2613
2614                 atomic_set(&hctxs[i]->nr_active, 0);
2615                 hctxs[i]->numa_node = node;
2616                 hctxs[i]->queue_num = i;
2617
2618                 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2619                         free_cpumask_var(hctxs[i]->cpumask);
2620                         kfree(hctxs[i]);
2621                         hctxs[i] = NULL;
2622                         break;
2623                 }
2624                 blk_mq_hctx_kobj_init(hctxs[i]);
2625         }
2626         for (j = i; j < q->nr_hw_queues; j++) {
2627                 struct blk_mq_hw_ctx *hctx = hctxs[j];
2628
2629                 if (hctx) {
2630                         if (hctx->tags)
2631                                 blk_mq_free_map_and_requests(set, j);
2632                         blk_mq_exit_hctx(q, set, hctx, j);
2633                         kobject_put(&hctx->kobj);
2634                         hctxs[j] = NULL;
2635
2636                 }
2637         }
2638         q->nr_hw_queues = i;
2639         mutex_unlock(&q->sysfs_lock);
2640         blk_mq_sysfs_register(q);
2641 }
2642
2643 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2644                                                   struct request_queue *q)
2645 {
2646         /* mark the queue as mq asap */
2647         q->mq_ops = set->ops;
2648
2649         q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2650                                              blk_mq_poll_stats_bkt,
2651                                              BLK_MQ_POLL_STATS_BKTS, q);
2652         if (!q->poll_cb)
2653                 goto err_exit;
2654
2655         q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2656         if (!q->queue_ctx)
2657                 goto err_exit;
2658
2659         /* init q->mq_kobj and sw queues' kobjects */
2660         blk_mq_sysfs_init(q);
2661
2662         q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2663                                                 GFP_KERNEL, set->numa_node);
2664         if (!q->queue_hw_ctx)
2665                 goto err_percpu;
2666
2667         q->mq_map = set->mq_map;
2668
2669         blk_mq_realloc_hw_ctxs(set, q);
2670         if (!q->nr_hw_queues)
2671                 goto err_hctxs;
2672
2673         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2674         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2675
2676         q->nr_queues = nr_cpu_ids;
2677
2678         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2679
2680         if (!(set->flags & BLK_MQ_F_SG_MERGE))
2681                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2682
2683         q->sg_reserved_size = INT_MAX;
2684
2685         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2686         INIT_LIST_HEAD(&q->requeue_list);
2687         spin_lock_init(&q->requeue_lock);
2688
2689         blk_queue_make_request(q, blk_mq_make_request);
2690         if (q->mq_ops->poll)
2691                 q->poll_fn = blk_mq_poll;
2692
2693         /*
2694          * Do this after blk_queue_make_request() overrides it...
2695          */
2696         q->nr_requests = set->queue_depth;
2697
2698         /*
2699          * Default to classic polling
2700          */
2701         q->poll_nsec = -1;
2702
2703         if (set->ops->complete)
2704                 blk_queue_softirq_done(q, set->ops->complete);
2705
2706         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2707         blk_mq_add_queue_tag_set(set, q);
2708         blk_mq_map_swqueue(q);
2709
2710         if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2711                 int ret;
2712
2713                 ret = blk_mq_sched_init(q);
2714                 if (ret)
2715                         return ERR_PTR(ret);
2716         }
2717
2718         return q;
2719
2720 err_hctxs:
2721         kfree(q->queue_hw_ctx);
2722 err_percpu:
2723         free_percpu(q->queue_ctx);
2724 err_exit:
2725         q->mq_ops = NULL;
2726         return ERR_PTR(-ENOMEM);
2727 }
2728 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2729
2730 void blk_mq_free_queue(struct request_queue *q)
2731 {
2732         struct blk_mq_tag_set   *set = q->tag_set;
2733
2734         blk_mq_del_queue_tag_set(q);
2735         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2736 }
2737
2738 /* Basically redo blk_mq_init_queue with queue frozen */
2739 static void blk_mq_queue_reinit(struct request_queue *q)
2740 {
2741         WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2742
2743         blk_mq_debugfs_unregister_hctxs(q);
2744         blk_mq_sysfs_unregister(q);
2745
2746         /*
2747          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2748          * we should change hctx numa_node according to the new topology (this
2749          * involves freeing and re-allocating memory, worth doing?)
2750          */
2751         blk_mq_map_swqueue(q);
2752
2753         blk_mq_sysfs_register(q);
2754         blk_mq_debugfs_register_hctxs(q);
2755 }
2756
2757 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2758 {
2759         int i;
2760
2761         for (i = 0; i < set->nr_hw_queues; i++)
2762                 if (!__blk_mq_alloc_rq_map(set, i))
2763                         goto out_unwind;
2764
2765         return 0;
2766
2767 out_unwind:
2768         while (--i >= 0)
2769                 blk_mq_free_rq_map(set->tags[i]);
2770
2771         return -ENOMEM;
2772 }
2773
2774 /*
2775  * Allocate the request maps associated with this tag_set. Note that this
2776  * may reduce the depth asked for, if memory is tight. set->queue_depth
2777  * will be updated to reflect the allocated depth.
2778  */
2779 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2780 {
2781         unsigned int depth;
2782         int err;
2783
2784         depth = set->queue_depth;
2785         do {
2786                 err = __blk_mq_alloc_rq_maps(set);
2787                 if (!err)
2788                         break;
2789
2790                 set->queue_depth >>= 1;
2791                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2792                         err = -ENOMEM;
2793                         break;
2794                 }
2795         } while (set->queue_depth);
2796
2797         if (!set->queue_depth || err) {
2798                 pr_err("blk-mq: failed to allocate request map\n");
2799                 return -ENOMEM;
2800         }
2801
2802         if (depth != set->queue_depth)
2803                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2804                                                 depth, set->queue_depth);
2805
2806         return 0;
2807 }
2808
2809 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2810 {
2811         if (set->ops->map_queues) {
2812                 int cpu;
2813                 /*
2814                  * transport .map_queues is usually done in the following
2815                  * way:
2816                  *
2817                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
2818                  *      mask = get_cpu_mask(queue)
2819                  *      for_each_cpu(cpu, mask)
2820                  *              set->mq_map[cpu] = queue;
2821                  * }
2822                  *
2823                  * When we need to remap, the table has to be cleared for
2824                  * killing stale mapping since one CPU may not be mapped
2825                  * to any hw queue.
2826                  */
2827                 for_each_possible_cpu(cpu)
2828                         set->mq_map[cpu] = 0;
2829
2830                 return set->ops->map_queues(set);
2831         } else
2832                 return blk_mq_map_queues(set);
2833 }
2834
2835 /*
2836  * Alloc a tag set to be associated with one or more request queues.
2837  * May fail with EINVAL for various error conditions. May adjust the
2838  * requested depth down, if if it too large. In that case, the set
2839  * value will be stored in set->queue_depth.
2840  */
2841 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2842 {
2843         int ret;
2844
2845         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2846
2847         if (!set->nr_hw_queues)
2848                 return -EINVAL;
2849         if (!set->queue_depth)
2850                 return -EINVAL;
2851         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2852                 return -EINVAL;
2853
2854         if (!set->ops->queue_rq)
2855                 return -EINVAL;
2856
2857         if (!set->ops->get_budget ^ !set->ops->put_budget)
2858                 return -EINVAL;
2859
2860         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2861                 pr_info("blk-mq: reduced tag depth to %u\n",
2862                         BLK_MQ_MAX_DEPTH);
2863                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2864         }
2865
2866         /*
2867          * If a crashdump is active, then we are potentially in a very
2868          * memory constrained environment. Limit us to 1 queue and
2869          * 64 tags to prevent using too much memory.
2870          */
2871         if (is_kdump_kernel()) {
2872                 set->nr_hw_queues = 1;
2873                 set->queue_depth = min(64U, set->queue_depth);
2874         }
2875         /*
2876          * There is no use for more h/w queues than cpus.
2877          */
2878         if (set->nr_hw_queues > nr_cpu_ids)
2879                 set->nr_hw_queues = nr_cpu_ids;
2880
2881         set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2882                                  GFP_KERNEL, set->numa_node);
2883         if (!set->tags)
2884                 return -ENOMEM;
2885
2886         ret = -ENOMEM;
2887         set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2888                         GFP_KERNEL, set->numa_node);
2889         if (!set->mq_map)
2890                 goto out_free_tags;
2891
2892         ret = blk_mq_update_queue_map(set);
2893         if (ret)
2894                 goto out_free_mq_map;
2895
2896         ret = blk_mq_alloc_rq_maps(set);
2897         if (ret)
2898                 goto out_free_mq_map;
2899
2900         mutex_init(&set->tag_list_lock);
2901         INIT_LIST_HEAD(&set->tag_list);
2902
2903         return 0;
2904
2905 out_free_mq_map:
2906         kfree(set->mq_map);
2907         set->mq_map = NULL;
2908 out_free_tags:
2909         kfree(set->tags);
2910         set->tags = NULL;
2911         return ret;
2912 }
2913 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2914
2915 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2916 {
2917         int i;
2918
2919         for (i = 0; i < nr_cpu_ids; i++)
2920                 blk_mq_free_map_and_requests(set, i);
2921
2922         kfree(set->mq_map);
2923         set->mq_map = NULL;
2924
2925         kfree(set->tags);
2926         set->tags = NULL;
2927 }
2928 EXPORT_SYMBOL(blk_mq_free_tag_set);
2929
2930 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2931 {
2932         struct blk_mq_tag_set *set = q->tag_set;
2933         struct blk_mq_hw_ctx *hctx;
2934         int i, ret;
2935
2936         if (!set)
2937                 return -EINVAL;
2938
2939         blk_mq_freeze_queue(q);
2940         blk_mq_quiesce_queue(q);
2941
2942         ret = 0;
2943         queue_for_each_hw_ctx(q, hctx, i) {
2944                 if (!hctx->tags)
2945                         continue;
2946                 /*
2947                  * If we're using an MQ scheduler, just update the scheduler
2948                  * queue depth. This is similar to what the old code would do.
2949                  */
2950                 if (!hctx->sched_tags) {
2951                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
2952                                                         false);
2953                 } else {
2954                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2955                                                         nr, true);
2956                 }
2957                 if (ret)
2958                         break;
2959         }
2960
2961         if (!ret)
2962                 q->nr_requests = nr;
2963
2964         blk_mq_unquiesce_queue(q);
2965         blk_mq_unfreeze_queue(q);
2966
2967         return ret;
2968 }
2969
2970 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2971                                                         int nr_hw_queues)
2972 {
2973         struct request_queue *q;
2974
2975         lockdep_assert_held(&set->tag_list_lock);
2976
2977         if (nr_hw_queues > nr_cpu_ids)
2978                 nr_hw_queues = nr_cpu_ids;
2979         if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2980                 return;
2981
2982         list_for_each_entry(q, &set->tag_list, tag_set_list)
2983                 blk_mq_freeze_queue(q);
2984
2985         set->nr_hw_queues = nr_hw_queues;
2986         blk_mq_update_queue_map(set);
2987         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2988                 blk_mq_realloc_hw_ctxs(set, q);
2989                 blk_mq_queue_reinit(q);
2990         }
2991
2992         list_for_each_entry(q, &set->tag_list, tag_set_list)
2993                 blk_mq_unfreeze_queue(q);
2994 }
2995
2996 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2997 {
2998         mutex_lock(&set->tag_list_lock);
2999         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
3000         mutex_unlock(&set->tag_list_lock);
3001 }
3002 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
3003
3004 /* Enable polling stats and return whether they were already enabled. */
3005 static bool blk_poll_stats_enable(struct request_queue *q)
3006 {
3007         if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3008             test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
3009                 return true;
3010         blk_stat_add_callback(q, q->poll_cb);
3011         return false;
3012 }
3013
3014 static void blk_mq_poll_stats_start(struct request_queue *q)
3015 {
3016         /*
3017          * We don't arm the callback if polling stats are not enabled or the
3018          * callback is already active.
3019          */
3020         if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3021             blk_stat_is_active(q->poll_cb))
3022                 return;
3023
3024         blk_stat_activate_msecs(q->poll_cb, 100);
3025 }
3026
3027 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3028 {
3029         struct request_queue *q = cb->data;
3030         int bucket;
3031
3032         for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3033                 if (cb->stat[bucket].nr_samples)
3034                         q->poll_stat[bucket] = cb->stat[bucket];
3035         }
3036 }
3037
3038 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3039                                        struct blk_mq_hw_ctx *hctx,
3040                                        struct request *rq)
3041 {
3042         unsigned long ret = 0;
3043         int bucket;
3044
3045         /*
3046          * If stats collection isn't on, don't sleep but turn it on for
3047          * future users
3048          */
3049         if (!blk_poll_stats_enable(q))
3050                 return 0;
3051
3052         /*
3053          * As an optimistic guess, use half of the mean service time
3054          * for this type of request. We can (and should) make this smarter.
3055          * For instance, if the completion latencies are tight, we can
3056          * get closer than just half the mean. This is especially
3057          * important on devices where the completion latencies are longer
3058          * than ~10 usec. We do use the stats for the relevant IO size
3059          * if available which does lead to better estimates.
3060          */
3061         bucket = blk_mq_poll_stats_bkt(rq);
3062         if (bucket < 0)
3063                 return ret;
3064
3065         if (q->poll_stat[bucket].nr_samples)
3066                 ret = (q->poll_stat[bucket].mean + 1) / 2;
3067
3068         return ret;
3069 }
3070
3071 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3072                                      struct blk_mq_hw_ctx *hctx,
3073                                      struct request *rq)
3074 {
3075         struct hrtimer_sleeper hs;
3076         enum hrtimer_mode mode;
3077         unsigned int nsecs;
3078         ktime_t kt;
3079
3080         if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
3081                 return false;
3082
3083         /*
3084          * poll_nsec can be:
3085          *
3086          * -1:  don't ever hybrid sleep
3087          *  0:  use half of prev avg
3088          * >0:  use this specific value
3089          */
3090         if (q->poll_nsec == -1)
3091                 return false;
3092         else if (q->poll_nsec > 0)
3093                 nsecs = q->poll_nsec;
3094         else
3095                 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
3096
3097         if (!nsecs)
3098                 return false;
3099
3100         rq->rq_flags |= RQF_MQ_POLL_SLEPT;
3101
3102         /*
3103          * This will be replaced with the stats tracking code, using
3104          * 'avg_completion_time / 2' as the pre-sleep target.
3105          */
3106         kt = nsecs;
3107
3108         mode = HRTIMER_MODE_REL;
3109         hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
3110         hrtimer_set_expires(&hs.timer, kt);
3111
3112         hrtimer_init_sleeper(&hs, current);
3113         do {
3114                 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3115                         break;
3116                 set_current_state(TASK_UNINTERRUPTIBLE);
3117                 hrtimer_start_expires(&hs.timer, mode);
3118                 if (hs.task)
3119                         io_schedule();
3120                 hrtimer_cancel(&hs.timer);
3121                 mode = HRTIMER_MODE_ABS;
3122         } while (hs.task && !signal_pending(current));
3123
3124         __set_current_state(TASK_RUNNING);
3125         destroy_hrtimer_on_stack(&hs.timer);
3126         return true;
3127 }
3128
3129 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
3130 {
3131         struct request_queue *q = hctx->queue;
3132         long state;
3133
3134         /*
3135          * If we sleep, have the caller restart the poll loop to reset
3136          * the state. Like for the other success return cases, the
3137          * caller is responsible for checking if the IO completed. If
3138          * the IO isn't complete, we'll get called again and will go
3139          * straight to the busy poll loop.
3140          */
3141         if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
3142                 return true;
3143
3144         hctx->poll_considered++;
3145
3146         state = current->state;
3147         while (!need_resched()) {
3148                 int ret;
3149
3150                 hctx->poll_invoked++;
3151
3152                 ret = q->mq_ops->poll(hctx, rq->tag);
3153                 if (ret > 0) {
3154                         hctx->poll_success++;
3155                         set_current_state(TASK_RUNNING);
3156                         return true;
3157                 }
3158
3159                 if (signal_pending_state(state, current))
3160                         set_current_state(TASK_RUNNING);
3161
3162                 if (current->state == TASK_RUNNING)
3163                         return true;
3164                 if (ret < 0)
3165                         break;
3166                 cpu_relax();
3167         }
3168
3169         __set_current_state(TASK_RUNNING);
3170         return false;
3171 }
3172
3173 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
3174 {
3175         struct blk_mq_hw_ctx *hctx;
3176         struct request *rq;
3177
3178         if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
3179                 return false;
3180
3181         hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3182         if (!blk_qc_t_is_internal(cookie))
3183                 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3184         else {
3185                 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3186                 /*
3187                  * With scheduling, if the request has completed, we'll
3188                  * get a NULL return here, as we clear the sched tag when
3189                  * that happens. The request still remains valid, like always,
3190                  * so we should be safe with just the NULL check.
3191                  */
3192                 if (!rq)
3193                         return false;
3194         }
3195
3196         return __blk_mq_poll(hctx, rq);
3197 }
3198
3199 static int __init blk_mq_init(void)
3200 {
3201         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3202                                 blk_mq_hctx_notify_dead);
3203         return 0;
3204 }
3205 subsys_initcall(blk_mq_init);