Merge remote-tracking branches 'spi/topic/spidev', 'spi/topic/spidev-test', 'spi...
[sfrench/cifs-2.6.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/sched/topology.h>
24 #include <linux/sched/signal.h>
25 #include <linux/delay.h>
26 #include <linux/crash_dump.h>
27 #include <linux/prefetch.h>
28
29 #include <trace/events/block.h>
30
31 #include <linux/blk-mq.h>
32 #include "blk.h"
33 #include "blk-mq.h"
34 #include "blk-mq-tag.h"
35 #include "blk-stat.h"
36 #include "blk-wbt.h"
37 #include "blk-mq-sched.h"
38
39 static DEFINE_MUTEX(all_q_mutex);
40 static LIST_HEAD(all_q_list);
41
42 /*
43  * Check if any of the ctx's have pending work in this hardware queue
44  */
45 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
46 {
47         return sbitmap_any_bit_set(&hctx->ctx_map) ||
48                         !list_empty_careful(&hctx->dispatch) ||
49                         blk_mq_sched_has_work(hctx);
50 }
51
52 /*
53  * Mark this ctx as having pending work in this hardware queue
54  */
55 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
56                                      struct blk_mq_ctx *ctx)
57 {
58         if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
59                 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
60 }
61
62 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
63                                       struct blk_mq_ctx *ctx)
64 {
65         sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
66 }
67
68 void blk_mq_freeze_queue_start(struct request_queue *q)
69 {
70         int freeze_depth;
71
72         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
73         if (freeze_depth == 1) {
74                 percpu_ref_kill(&q->q_usage_counter);
75                 blk_mq_run_hw_queues(q, false);
76         }
77 }
78 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
79
80 void blk_mq_freeze_queue_wait(struct request_queue *q)
81 {
82         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
83 }
84 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
85
86 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
87                                      unsigned long timeout)
88 {
89         return wait_event_timeout(q->mq_freeze_wq,
90                                         percpu_ref_is_zero(&q->q_usage_counter),
91                                         timeout);
92 }
93 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
94
95 /*
96  * Guarantee no request is in use, so we can change any data structure of
97  * the queue afterward.
98  */
99 void blk_freeze_queue(struct request_queue *q)
100 {
101         /*
102          * In the !blk_mq case we are only calling this to kill the
103          * q_usage_counter, otherwise this increases the freeze depth
104          * and waits for it to return to zero.  For this reason there is
105          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
106          * exported to drivers as the only user for unfreeze is blk_mq.
107          */
108         blk_mq_freeze_queue_start(q);
109         blk_mq_freeze_queue_wait(q);
110 }
111
112 void blk_mq_freeze_queue(struct request_queue *q)
113 {
114         /*
115          * ...just an alias to keep freeze and unfreeze actions balanced
116          * in the blk_mq_* namespace
117          */
118         blk_freeze_queue(q);
119 }
120 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
121
122 void blk_mq_unfreeze_queue(struct request_queue *q)
123 {
124         int freeze_depth;
125
126         freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
127         WARN_ON_ONCE(freeze_depth < 0);
128         if (!freeze_depth) {
129                 percpu_ref_reinit(&q->q_usage_counter);
130                 wake_up_all(&q->mq_freeze_wq);
131         }
132 }
133 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
134
135 /**
136  * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
137  * @q: request queue.
138  *
139  * Note: this function does not prevent that the struct request end_io()
140  * callback function is invoked. Additionally, it is not prevented that
141  * new queue_rq() calls occur unless the queue has been stopped first.
142  */
143 void blk_mq_quiesce_queue(struct request_queue *q)
144 {
145         struct blk_mq_hw_ctx *hctx;
146         unsigned int i;
147         bool rcu = false;
148
149         blk_mq_stop_hw_queues(q);
150
151         queue_for_each_hw_ctx(q, hctx, i) {
152                 if (hctx->flags & BLK_MQ_F_BLOCKING)
153                         synchronize_srcu(&hctx->queue_rq_srcu);
154                 else
155                         rcu = true;
156         }
157         if (rcu)
158                 synchronize_rcu();
159 }
160 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
161
162 void blk_mq_wake_waiters(struct request_queue *q)
163 {
164         struct blk_mq_hw_ctx *hctx;
165         unsigned int i;
166
167         queue_for_each_hw_ctx(q, hctx, i)
168                 if (blk_mq_hw_queue_mapped(hctx))
169                         blk_mq_tag_wakeup_all(hctx->tags, true);
170
171         /*
172          * If we are called because the queue has now been marked as
173          * dying, we need to ensure that processes currently waiting on
174          * the queue are notified as well.
175          */
176         wake_up_all(&q->mq_freeze_wq);
177 }
178
179 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
180 {
181         return blk_mq_has_free_tags(hctx->tags);
182 }
183 EXPORT_SYMBOL(blk_mq_can_queue);
184
185 void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
186                         struct request *rq, unsigned int op)
187 {
188         INIT_LIST_HEAD(&rq->queuelist);
189         /* csd/requeue_work/fifo_time is initialized before use */
190         rq->q = q;
191         rq->mq_ctx = ctx;
192         rq->cmd_flags = op;
193         if (blk_queue_io_stat(q))
194                 rq->rq_flags |= RQF_IO_STAT;
195         /* do not touch atomic flags, it needs atomic ops against the timer */
196         rq->cpu = -1;
197         INIT_HLIST_NODE(&rq->hash);
198         RB_CLEAR_NODE(&rq->rb_node);
199         rq->rq_disk = NULL;
200         rq->part = NULL;
201         rq->start_time = jiffies;
202 #ifdef CONFIG_BLK_CGROUP
203         rq->rl = NULL;
204         set_start_time_ns(rq);
205         rq->io_start_time_ns = 0;
206 #endif
207         rq->nr_phys_segments = 0;
208 #if defined(CONFIG_BLK_DEV_INTEGRITY)
209         rq->nr_integrity_segments = 0;
210 #endif
211         rq->special = NULL;
212         /* tag was already set */
213         rq->errors = 0;
214         rq->extra_len = 0;
215
216         INIT_LIST_HEAD(&rq->timeout_list);
217         rq->timeout = 0;
218
219         rq->end_io = NULL;
220         rq->end_io_data = NULL;
221         rq->next_rq = NULL;
222
223         ctx->rq_dispatched[op_is_sync(op)]++;
224 }
225 EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
226
227 struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
228                                        unsigned int op)
229 {
230         struct request *rq;
231         unsigned int tag;
232
233         tag = blk_mq_get_tag(data);
234         if (tag != BLK_MQ_TAG_FAIL) {
235                 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
236
237                 rq = tags->static_rqs[tag];
238
239                 if (data->flags & BLK_MQ_REQ_INTERNAL) {
240                         rq->tag = -1;
241                         rq->internal_tag = tag;
242                 } else {
243                         if (blk_mq_tag_busy(data->hctx)) {
244                                 rq->rq_flags = RQF_MQ_INFLIGHT;
245                                 atomic_inc(&data->hctx->nr_active);
246                         }
247                         rq->tag = tag;
248                         rq->internal_tag = -1;
249                         data->hctx->tags->rqs[rq->tag] = rq;
250                 }
251
252                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
253                 return rq;
254         }
255
256         return NULL;
257 }
258 EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
259
260 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
261                 unsigned int flags)
262 {
263         struct blk_mq_alloc_data alloc_data = { .flags = flags };
264         struct request *rq;
265         int ret;
266
267         ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
268         if (ret)
269                 return ERR_PTR(ret);
270
271         rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
272
273         blk_mq_put_ctx(alloc_data.ctx);
274         blk_queue_exit(q);
275
276         if (!rq)
277                 return ERR_PTR(-EWOULDBLOCK);
278
279         rq->__data_len = 0;
280         rq->__sector = (sector_t) -1;
281         rq->bio = rq->biotail = NULL;
282         return rq;
283 }
284 EXPORT_SYMBOL(blk_mq_alloc_request);
285
286 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
287                 unsigned int flags, unsigned int hctx_idx)
288 {
289         struct blk_mq_alloc_data alloc_data = { .flags = flags };
290         struct request *rq;
291         unsigned int cpu;
292         int ret;
293
294         /*
295          * If the tag allocator sleeps we could get an allocation for a
296          * different hardware context.  No need to complicate the low level
297          * allocator for this for the rare use case of a command tied to
298          * a specific queue.
299          */
300         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
301                 return ERR_PTR(-EINVAL);
302
303         if (hctx_idx >= q->nr_hw_queues)
304                 return ERR_PTR(-EIO);
305
306         ret = blk_queue_enter(q, true);
307         if (ret)
308                 return ERR_PTR(ret);
309
310         /*
311          * Check if the hardware context is actually mapped to anything.
312          * If not tell the caller that it should skip this queue.
313          */
314         alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
315         if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
316                 blk_queue_exit(q);
317                 return ERR_PTR(-EXDEV);
318         }
319         cpu = cpumask_first(alloc_data.hctx->cpumask);
320         alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
321
322         rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
323
324         blk_queue_exit(q);
325
326         if (!rq)
327                 return ERR_PTR(-EWOULDBLOCK);
328
329         return rq;
330 }
331 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
332
333 void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
334                              struct request *rq)
335 {
336         const int sched_tag = rq->internal_tag;
337         struct request_queue *q = rq->q;
338
339         if (rq->rq_flags & RQF_MQ_INFLIGHT)
340                 atomic_dec(&hctx->nr_active);
341
342         wbt_done(q->rq_wb, &rq->issue_stat);
343         rq->rq_flags = 0;
344
345         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
346         clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
347         if (rq->tag != -1)
348                 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
349         if (sched_tag != -1)
350                 blk_mq_sched_completed_request(hctx, rq);
351         blk_mq_sched_restart(hctx);
352         blk_queue_exit(q);
353 }
354
355 static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
356                                      struct request *rq)
357 {
358         struct blk_mq_ctx *ctx = rq->mq_ctx;
359
360         ctx->rq_completed[rq_is_sync(rq)]++;
361         __blk_mq_finish_request(hctx, ctx, rq);
362 }
363
364 void blk_mq_finish_request(struct request *rq)
365 {
366         blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
367 }
368
369 void blk_mq_free_request(struct request *rq)
370 {
371         blk_mq_sched_put_request(rq);
372 }
373 EXPORT_SYMBOL_GPL(blk_mq_free_request);
374
375 inline void __blk_mq_end_request(struct request *rq, int error)
376 {
377         blk_account_io_done(rq);
378
379         if (rq->end_io) {
380                 wbt_done(rq->q->rq_wb, &rq->issue_stat);
381                 rq->end_io(rq, error);
382         } else {
383                 if (unlikely(blk_bidi_rq(rq)))
384                         blk_mq_free_request(rq->next_rq);
385                 blk_mq_free_request(rq);
386         }
387 }
388 EXPORT_SYMBOL(__blk_mq_end_request);
389
390 void blk_mq_end_request(struct request *rq, int error)
391 {
392         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
393                 BUG();
394         __blk_mq_end_request(rq, error);
395 }
396 EXPORT_SYMBOL(blk_mq_end_request);
397
398 static void __blk_mq_complete_request_remote(void *data)
399 {
400         struct request *rq = data;
401
402         rq->q->softirq_done_fn(rq);
403 }
404
405 static void blk_mq_ipi_complete_request(struct request *rq)
406 {
407         struct blk_mq_ctx *ctx = rq->mq_ctx;
408         bool shared = false;
409         int cpu;
410
411         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
412                 rq->q->softirq_done_fn(rq);
413                 return;
414         }
415
416         cpu = get_cpu();
417         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
418                 shared = cpus_share_cache(cpu, ctx->cpu);
419
420         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
421                 rq->csd.func = __blk_mq_complete_request_remote;
422                 rq->csd.info = rq;
423                 rq->csd.flags = 0;
424                 smp_call_function_single_async(ctx->cpu, &rq->csd);
425         } else {
426                 rq->q->softirq_done_fn(rq);
427         }
428         put_cpu();
429 }
430
431 static void blk_mq_stat_add(struct request *rq)
432 {
433         if (rq->rq_flags & RQF_STATS) {
434                 /*
435                  * We could rq->mq_ctx here, but there's less of a risk
436                  * of races if we have the completion event add the stats
437                  * to the local software queue.
438                  */
439                 struct blk_mq_ctx *ctx;
440
441                 ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
442                 blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
443         }
444 }
445
446 static void __blk_mq_complete_request(struct request *rq)
447 {
448         struct request_queue *q = rq->q;
449
450         blk_mq_stat_add(rq);
451
452         if (!q->softirq_done_fn)
453                 blk_mq_end_request(rq, rq->errors);
454         else
455                 blk_mq_ipi_complete_request(rq);
456 }
457
458 /**
459  * blk_mq_complete_request - end I/O on a request
460  * @rq:         the request being processed
461  *
462  * Description:
463  *      Ends all I/O on a request. It does not handle partial completions.
464  *      The actual completion happens out-of-order, through a IPI handler.
465  **/
466 void blk_mq_complete_request(struct request *rq, int error)
467 {
468         struct request_queue *q = rq->q;
469
470         if (unlikely(blk_should_fake_timeout(q)))
471                 return;
472         if (!blk_mark_rq_complete(rq)) {
473                 rq->errors = error;
474                 __blk_mq_complete_request(rq);
475         }
476 }
477 EXPORT_SYMBOL(blk_mq_complete_request);
478
479 int blk_mq_request_started(struct request *rq)
480 {
481         return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
482 }
483 EXPORT_SYMBOL_GPL(blk_mq_request_started);
484
485 void blk_mq_start_request(struct request *rq)
486 {
487         struct request_queue *q = rq->q;
488
489         blk_mq_sched_started_request(rq);
490
491         trace_block_rq_issue(q, rq);
492
493         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
494                 blk_stat_set_issue_time(&rq->issue_stat);
495                 rq->rq_flags |= RQF_STATS;
496                 wbt_issue(q->rq_wb, &rq->issue_stat);
497         }
498
499         blk_add_timer(rq);
500
501         /*
502          * Ensure that ->deadline is visible before set the started
503          * flag and clear the completed flag.
504          */
505         smp_mb__before_atomic();
506
507         /*
508          * Mark us as started and clear complete. Complete might have been
509          * set if requeue raced with timeout, which then marked it as
510          * complete. So be sure to clear complete again when we start
511          * the request, otherwise we'll ignore the completion event.
512          */
513         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
514                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
515         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
516                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
517
518         if (q->dma_drain_size && blk_rq_bytes(rq)) {
519                 /*
520                  * Make sure space for the drain appears.  We know we can do
521                  * this because max_hw_segments has been adjusted to be one
522                  * fewer than the device can handle.
523                  */
524                 rq->nr_phys_segments++;
525         }
526 }
527 EXPORT_SYMBOL(blk_mq_start_request);
528
529 static void __blk_mq_requeue_request(struct request *rq)
530 {
531         struct request_queue *q = rq->q;
532
533         trace_block_rq_requeue(q, rq);
534         wbt_requeue(q->rq_wb, &rq->issue_stat);
535         blk_mq_sched_requeue_request(rq);
536
537         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
538                 if (q->dma_drain_size && blk_rq_bytes(rq))
539                         rq->nr_phys_segments--;
540         }
541 }
542
543 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
544 {
545         __blk_mq_requeue_request(rq);
546
547         BUG_ON(blk_queued_rq(rq));
548         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
549 }
550 EXPORT_SYMBOL(blk_mq_requeue_request);
551
552 static void blk_mq_requeue_work(struct work_struct *work)
553 {
554         struct request_queue *q =
555                 container_of(work, struct request_queue, requeue_work.work);
556         LIST_HEAD(rq_list);
557         struct request *rq, *next;
558         unsigned long flags;
559
560         spin_lock_irqsave(&q->requeue_lock, flags);
561         list_splice_init(&q->requeue_list, &rq_list);
562         spin_unlock_irqrestore(&q->requeue_lock, flags);
563
564         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
565                 if (!(rq->rq_flags & RQF_SOFTBARRIER))
566                         continue;
567
568                 rq->rq_flags &= ~RQF_SOFTBARRIER;
569                 list_del_init(&rq->queuelist);
570                 blk_mq_sched_insert_request(rq, true, false, false, true);
571         }
572
573         while (!list_empty(&rq_list)) {
574                 rq = list_entry(rq_list.next, struct request, queuelist);
575                 list_del_init(&rq->queuelist);
576                 blk_mq_sched_insert_request(rq, false, false, false, true);
577         }
578
579         blk_mq_run_hw_queues(q, false);
580 }
581
582 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
583                                 bool kick_requeue_list)
584 {
585         struct request_queue *q = rq->q;
586         unsigned long flags;
587
588         /*
589          * We abuse this flag that is otherwise used by the I/O scheduler to
590          * request head insertation from the workqueue.
591          */
592         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
593
594         spin_lock_irqsave(&q->requeue_lock, flags);
595         if (at_head) {
596                 rq->rq_flags |= RQF_SOFTBARRIER;
597                 list_add(&rq->queuelist, &q->requeue_list);
598         } else {
599                 list_add_tail(&rq->queuelist, &q->requeue_list);
600         }
601         spin_unlock_irqrestore(&q->requeue_lock, flags);
602
603         if (kick_requeue_list)
604                 blk_mq_kick_requeue_list(q);
605 }
606 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
607
608 void blk_mq_kick_requeue_list(struct request_queue *q)
609 {
610         kblockd_schedule_delayed_work(&q->requeue_work, 0);
611 }
612 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
613
614 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
615                                     unsigned long msecs)
616 {
617         kblockd_schedule_delayed_work(&q->requeue_work,
618                                       msecs_to_jiffies(msecs));
619 }
620 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
621
622 void blk_mq_abort_requeue_list(struct request_queue *q)
623 {
624         unsigned long flags;
625         LIST_HEAD(rq_list);
626
627         spin_lock_irqsave(&q->requeue_lock, flags);
628         list_splice_init(&q->requeue_list, &rq_list);
629         spin_unlock_irqrestore(&q->requeue_lock, flags);
630
631         while (!list_empty(&rq_list)) {
632                 struct request *rq;
633
634                 rq = list_first_entry(&rq_list, struct request, queuelist);
635                 list_del_init(&rq->queuelist);
636                 rq->errors = -EIO;
637                 blk_mq_end_request(rq, rq->errors);
638         }
639 }
640 EXPORT_SYMBOL(blk_mq_abort_requeue_list);
641
642 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
643 {
644         if (tag < tags->nr_tags) {
645                 prefetch(tags->rqs[tag]);
646                 return tags->rqs[tag];
647         }
648
649         return NULL;
650 }
651 EXPORT_SYMBOL(blk_mq_tag_to_rq);
652
653 struct blk_mq_timeout_data {
654         unsigned long next;
655         unsigned int next_set;
656 };
657
658 void blk_mq_rq_timed_out(struct request *req, bool reserved)
659 {
660         const struct blk_mq_ops *ops = req->q->mq_ops;
661         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
662
663         /*
664          * We know that complete is set at this point. If STARTED isn't set
665          * anymore, then the request isn't active and the "timeout" should
666          * just be ignored. This can happen due to the bitflag ordering.
667          * Timeout first checks if STARTED is set, and if it is, assumes
668          * the request is active. But if we race with completion, then
669          * we both flags will get cleared. So check here again, and ignore
670          * a timeout event with a request that isn't active.
671          */
672         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
673                 return;
674
675         if (ops->timeout)
676                 ret = ops->timeout(req, reserved);
677
678         switch (ret) {
679         case BLK_EH_HANDLED:
680                 __blk_mq_complete_request(req);
681                 break;
682         case BLK_EH_RESET_TIMER:
683                 blk_add_timer(req);
684                 blk_clear_rq_complete(req);
685                 break;
686         case BLK_EH_NOT_HANDLED:
687                 break;
688         default:
689                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
690                 break;
691         }
692 }
693
694 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
695                 struct request *rq, void *priv, bool reserved)
696 {
697         struct blk_mq_timeout_data *data = priv;
698
699         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
700                 return;
701
702         if (time_after_eq(jiffies, rq->deadline)) {
703                 if (!blk_mark_rq_complete(rq))
704                         blk_mq_rq_timed_out(rq, reserved);
705         } else if (!data->next_set || time_after(data->next, rq->deadline)) {
706                 data->next = rq->deadline;
707                 data->next_set = 1;
708         }
709 }
710
711 static void blk_mq_timeout_work(struct work_struct *work)
712 {
713         struct request_queue *q =
714                 container_of(work, struct request_queue, timeout_work);
715         struct blk_mq_timeout_data data = {
716                 .next           = 0,
717                 .next_set       = 0,
718         };
719         int i;
720
721         /* A deadlock might occur if a request is stuck requiring a
722          * timeout at the same time a queue freeze is waiting
723          * completion, since the timeout code would not be able to
724          * acquire the queue reference here.
725          *
726          * That's why we don't use blk_queue_enter here; instead, we use
727          * percpu_ref_tryget directly, because we need to be able to
728          * obtain a reference even in the short window between the queue
729          * starting to freeze, by dropping the first reference in
730          * blk_mq_freeze_queue_start, and the moment the last request is
731          * consumed, marked by the instant q_usage_counter reaches
732          * zero.
733          */
734         if (!percpu_ref_tryget(&q->q_usage_counter))
735                 return;
736
737         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
738
739         if (data.next_set) {
740                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
741                 mod_timer(&q->timeout, data.next);
742         } else {
743                 struct blk_mq_hw_ctx *hctx;
744
745                 queue_for_each_hw_ctx(q, hctx, i) {
746                         /* the hctx may be unmapped, so check it here */
747                         if (blk_mq_hw_queue_mapped(hctx))
748                                 blk_mq_tag_idle(hctx);
749                 }
750         }
751         blk_queue_exit(q);
752 }
753
754 /*
755  * Reverse check our software queue for entries that we could potentially
756  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
757  * too much time checking for merges.
758  */
759 static bool blk_mq_attempt_merge(struct request_queue *q,
760                                  struct blk_mq_ctx *ctx, struct bio *bio)
761 {
762         struct request *rq;
763         int checked = 8;
764
765         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
766                 bool merged = false;
767
768                 if (!checked--)
769                         break;
770
771                 if (!blk_rq_merge_ok(rq, bio))
772                         continue;
773
774                 switch (blk_try_merge(rq, bio)) {
775                 case ELEVATOR_BACK_MERGE:
776                         if (blk_mq_sched_allow_merge(q, rq, bio))
777                                 merged = bio_attempt_back_merge(q, rq, bio);
778                         break;
779                 case ELEVATOR_FRONT_MERGE:
780                         if (blk_mq_sched_allow_merge(q, rq, bio))
781                                 merged = bio_attempt_front_merge(q, rq, bio);
782                         break;
783                 case ELEVATOR_DISCARD_MERGE:
784                         merged = bio_attempt_discard_merge(q, rq, bio);
785                         break;
786                 default:
787                         continue;
788                 }
789
790                 if (merged)
791                         ctx->rq_merged++;
792                 return merged;
793         }
794
795         return false;
796 }
797
798 struct flush_busy_ctx_data {
799         struct blk_mq_hw_ctx *hctx;
800         struct list_head *list;
801 };
802
803 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
804 {
805         struct flush_busy_ctx_data *flush_data = data;
806         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
807         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
808
809         sbitmap_clear_bit(sb, bitnr);
810         spin_lock(&ctx->lock);
811         list_splice_tail_init(&ctx->rq_list, flush_data->list);
812         spin_unlock(&ctx->lock);
813         return true;
814 }
815
816 /*
817  * Process software queues that have been marked busy, splicing them
818  * to the for-dispatch
819  */
820 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
821 {
822         struct flush_busy_ctx_data data = {
823                 .hctx = hctx,
824                 .list = list,
825         };
826
827         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
828 }
829 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
830
831 static inline unsigned int queued_to_index(unsigned int queued)
832 {
833         if (!queued)
834                 return 0;
835
836         return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
837 }
838
839 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
840                            bool wait)
841 {
842         struct blk_mq_alloc_data data = {
843                 .q = rq->q,
844                 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
845                 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
846         };
847
848         if (rq->tag != -1)
849                 goto done;
850
851         if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
852                 data.flags |= BLK_MQ_REQ_RESERVED;
853
854         rq->tag = blk_mq_get_tag(&data);
855         if (rq->tag >= 0) {
856                 if (blk_mq_tag_busy(data.hctx)) {
857                         rq->rq_flags |= RQF_MQ_INFLIGHT;
858                         atomic_inc(&data.hctx->nr_active);
859                 }
860                 data.hctx->tags->rqs[rq->tag] = rq;
861         }
862
863 done:
864         if (hctx)
865                 *hctx = data.hctx;
866         return rq->tag != -1;
867 }
868
869 static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
870                                     struct request *rq)
871 {
872         blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
873         rq->tag = -1;
874
875         if (rq->rq_flags & RQF_MQ_INFLIGHT) {
876                 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
877                 atomic_dec(&hctx->nr_active);
878         }
879 }
880
881 static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
882                                        struct request *rq)
883 {
884         if (rq->tag == -1 || rq->internal_tag == -1)
885                 return;
886
887         __blk_mq_put_driver_tag(hctx, rq);
888 }
889
890 static void blk_mq_put_driver_tag(struct request *rq)
891 {
892         struct blk_mq_hw_ctx *hctx;
893
894         if (rq->tag == -1 || rq->internal_tag == -1)
895                 return;
896
897         hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
898         __blk_mq_put_driver_tag(hctx, rq);
899 }
900
901 /*
902  * If we fail getting a driver tag because all the driver tags are already
903  * assigned and on the dispatch list, BUT the first entry does not have a
904  * tag, then we could deadlock. For that case, move entries with assigned
905  * driver tags to the front, leaving the set of tagged requests in the
906  * same order, and the untagged set in the same order.
907  */
908 static bool reorder_tags_to_front(struct list_head *list)
909 {
910         struct request *rq, *tmp, *first = NULL;
911
912         list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
913                 if (rq == first)
914                         break;
915                 if (rq->tag != -1) {
916                         list_move(&rq->queuelist, list);
917                         if (!first)
918                                 first = rq;
919                 }
920         }
921
922         return first != NULL;
923 }
924
925 static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
926                                 void *key)
927 {
928         struct blk_mq_hw_ctx *hctx;
929
930         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
931
932         list_del(&wait->task_list);
933         clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
934         blk_mq_run_hw_queue(hctx, true);
935         return 1;
936 }
937
938 static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
939 {
940         struct sbq_wait_state *ws;
941
942         /*
943          * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
944          * The thread which wins the race to grab this bit adds the hardware
945          * queue to the wait queue.
946          */
947         if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
948             test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
949                 return false;
950
951         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
952         ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
953
954         /*
955          * As soon as this returns, it's no longer safe to fiddle with
956          * hctx->dispatch_wait, since a completion can wake up the wait queue
957          * and unlock the bit.
958          */
959         add_wait_queue(&ws->wait, &hctx->dispatch_wait);
960         return true;
961 }
962
963 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
964 {
965         struct blk_mq_hw_ctx *hctx;
966         struct request *rq;
967         LIST_HEAD(driver_list);
968         struct list_head *dptr;
969         int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
970
971         if (list_empty(list))
972                 return false;
973
974         /*
975          * Start off with dptr being NULL, so we start the first request
976          * immediately, even if we have more pending.
977          */
978         dptr = NULL;
979
980         /*
981          * Now process all the entries, sending them to the driver.
982          */
983         errors = queued = 0;
984         do {
985                 struct blk_mq_queue_data bd;
986
987                 rq = list_first_entry(list, struct request, queuelist);
988                 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
989                         if (!queued && reorder_tags_to_front(list))
990                                 continue;
991
992                         /*
993                          * The initial allocation attempt failed, so we need to
994                          * rerun the hardware queue when a tag is freed.
995                          */
996                         if (blk_mq_dispatch_wait_add(hctx)) {
997                                 /*
998                                  * It's possible that a tag was freed in the
999                                  * window between the allocation failure and
1000                                  * adding the hardware queue to the wait queue.
1001                                  */
1002                                 if (!blk_mq_get_driver_tag(rq, &hctx, false))
1003                                         break;
1004                         } else {
1005                                 break;
1006                         }
1007                 }
1008
1009                 list_del_init(&rq->queuelist);
1010
1011                 bd.rq = rq;
1012                 bd.list = dptr;
1013
1014                 /*
1015                  * Flag last if we have no more requests, or if we have more
1016                  * but can't assign a driver tag to it.
1017                  */
1018                 if (list_empty(list))
1019                         bd.last = true;
1020                 else {
1021                         struct request *nxt;
1022
1023                         nxt = list_first_entry(list, struct request, queuelist);
1024                         bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1025                 }
1026
1027                 ret = q->mq_ops->queue_rq(hctx, &bd);
1028                 switch (ret) {
1029                 case BLK_MQ_RQ_QUEUE_OK:
1030                         queued++;
1031                         break;
1032                 case BLK_MQ_RQ_QUEUE_BUSY:
1033                         blk_mq_put_driver_tag_hctx(hctx, rq);
1034                         list_add(&rq->queuelist, list);
1035                         __blk_mq_requeue_request(rq);
1036                         break;
1037                 default:
1038                         pr_err("blk-mq: bad return on queue: %d\n", ret);
1039                 case BLK_MQ_RQ_QUEUE_ERROR:
1040                         errors++;
1041                         rq->errors = -EIO;
1042                         blk_mq_end_request(rq, rq->errors);
1043                         break;
1044                 }
1045
1046                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
1047                         break;
1048
1049                 /*
1050                  * We've done the first request. If we have more than 1
1051                  * left in the list, set dptr to defer issue.
1052                  */
1053                 if (!dptr && list->next != list->prev)
1054                         dptr = &driver_list;
1055         } while (!list_empty(list));
1056
1057         hctx->dispatched[queued_to_index(queued)]++;
1058
1059         /*
1060          * Any items that need requeuing? Stuff them into hctx->dispatch,
1061          * that is where we will continue on next queue run.
1062          */
1063         if (!list_empty(list)) {
1064                 /*
1065                  * If we got a driver tag for the next request already,
1066                  * free it again.
1067                  */
1068                 rq = list_first_entry(list, struct request, queuelist);
1069                 blk_mq_put_driver_tag(rq);
1070
1071                 spin_lock(&hctx->lock);
1072                 list_splice_init(list, &hctx->dispatch);
1073                 spin_unlock(&hctx->lock);
1074
1075                 /*
1076                  * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
1077                  * it's possible the queue is stopped and restarted again
1078                  * before this. Queue restart will dispatch requests. And since
1079                  * requests in rq_list aren't added into hctx->dispatch yet,
1080                  * the requests in rq_list might get lost.
1081                  *
1082                  * blk_mq_run_hw_queue() already checks the STOPPED bit
1083                  *
1084                  * If RESTART or TAG_WAITING is set, then let completion restart
1085                  * the queue instead of potentially looping here.
1086                  */
1087                 if (!blk_mq_sched_needs_restart(hctx) &&
1088                     !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
1089                         blk_mq_run_hw_queue(hctx, true);
1090         }
1091
1092         return (queued + errors) != 0;
1093 }
1094
1095 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1096 {
1097         int srcu_idx;
1098
1099         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1100                 cpu_online(hctx->next_cpu));
1101
1102         if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1103                 rcu_read_lock();
1104                 blk_mq_sched_dispatch_requests(hctx);
1105                 rcu_read_unlock();
1106         } else {
1107                 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
1108                 blk_mq_sched_dispatch_requests(hctx);
1109                 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1110         }
1111 }
1112
1113 /*
1114  * It'd be great if the workqueue API had a way to pass
1115  * in a mask and had some smarts for more clever placement.
1116  * For now we just round-robin here, switching for every
1117  * BLK_MQ_CPU_WORK_BATCH queued items.
1118  */
1119 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1120 {
1121         if (hctx->queue->nr_hw_queues == 1)
1122                 return WORK_CPU_UNBOUND;
1123
1124         if (--hctx->next_cpu_batch <= 0) {
1125                 int next_cpu;
1126
1127                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1128                 if (next_cpu >= nr_cpu_ids)
1129                         next_cpu = cpumask_first(hctx->cpumask);
1130
1131                 hctx->next_cpu = next_cpu;
1132                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1133         }
1134
1135         return hctx->next_cpu;
1136 }
1137
1138 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1139                                         unsigned long msecs)
1140 {
1141         if (unlikely(blk_mq_hctx_stopped(hctx) ||
1142                      !blk_mq_hw_queue_mapped(hctx)))
1143                 return;
1144
1145         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1146                 int cpu = get_cpu();
1147                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1148                         __blk_mq_run_hw_queue(hctx);
1149                         put_cpu();
1150                         return;
1151                 }
1152
1153                 put_cpu();
1154         }
1155
1156         if (msecs == 0)
1157                 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
1158                                          &hctx->run_work);
1159         else
1160                 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1161                                                  &hctx->delayed_run_work,
1162                                                  msecs_to_jiffies(msecs));
1163 }
1164
1165 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1166 {
1167         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1168 }
1169 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1170
1171 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1172 {
1173         __blk_mq_delay_run_hw_queue(hctx, async, 0);
1174 }
1175
1176 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1177 {
1178         struct blk_mq_hw_ctx *hctx;
1179         int i;
1180
1181         queue_for_each_hw_ctx(q, hctx, i) {
1182                 if (!blk_mq_hctx_has_pending(hctx) ||
1183                     blk_mq_hctx_stopped(hctx))
1184                         continue;
1185
1186                 blk_mq_run_hw_queue(hctx, async);
1187         }
1188 }
1189 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1190
1191 /**
1192  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1193  * @q: request queue.
1194  *
1195  * The caller is responsible for serializing this function against
1196  * blk_mq_{start,stop}_hw_queue().
1197  */
1198 bool blk_mq_queue_stopped(struct request_queue *q)
1199 {
1200         struct blk_mq_hw_ctx *hctx;
1201         int i;
1202
1203         queue_for_each_hw_ctx(q, hctx, i)
1204                 if (blk_mq_hctx_stopped(hctx))
1205                         return true;
1206
1207         return false;
1208 }
1209 EXPORT_SYMBOL(blk_mq_queue_stopped);
1210
1211 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1212 {
1213         cancel_work(&hctx->run_work);
1214         cancel_delayed_work(&hctx->delay_work);
1215         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1216 }
1217 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1218
1219 void blk_mq_stop_hw_queues(struct request_queue *q)
1220 {
1221         struct blk_mq_hw_ctx *hctx;
1222         int i;
1223
1224         queue_for_each_hw_ctx(q, hctx, i)
1225                 blk_mq_stop_hw_queue(hctx);
1226 }
1227 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1228
1229 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1230 {
1231         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1232
1233         blk_mq_run_hw_queue(hctx, false);
1234 }
1235 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1236
1237 void blk_mq_start_hw_queues(struct request_queue *q)
1238 {
1239         struct blk_mq_hw_ctx *hctx;
1240         int i;
1241
1242         queue_for_each_hw_ctx(q, hctx, i)
1243                 blk_mq_start_hw_queue(hctx);
1244 }
1245 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1246
1247 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1248 {
1249         if (!blk_mq_hctx_stopped(hctx))
1250                 return;
1251
1252         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1253         blk_mq_run_hw_queue(hctx, async);
1254 }
1255 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1256
1257 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1258 {
1259         struct blk_mq_hw_ctx *hctx;
1260         int i;
1261
1262         queue_for_each_hw_ctx(q, hctx, i)
1263                 blk_mq_start_stopped_hw_queue(hctx, async);
1264 }
1265 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1266
1267 static void blk_mq_run_work_fn(struct work_struct *work)
1268 {
1269         struct blk_mq_hw_ctx *hctx;
1270
1271         hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
1272
1273         __blk_mq_run_hw_queue(hctx);
1274 }
1275
1276 static void blk_mq_delayed_run_work_fn(struct work_struct *work)
1277 {
1278         struct blk_mq_hw_ctx *hctx;
1279
1280         hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
1281
1282         __blk_mq_run_hw_queue(hctx);
1283 }
1284
1285 static void blk_mq_delay_work_fn(struct work_struct *work)
1286 {
1287         struct blk_mq_hw_ctx *hctx;
1288
1289         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
1290
1291         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
1292                 __blk_mq_run_hw_queue(hctx);
1293 }
1294
1295 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1296 {
1297         if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1298                 return;
1299
1300         blk_mq_stop_hw_queue(hctx);
1301         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1302                         &hctx->delay_work, msecs_to_jiffies(msecs));
1303 }
1304 EXPORT_SYMBOL(blk_mq_delay_queue);
1305
1306 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1307                                             struct request *rq,
1308                                             bool at_head)
1309 {
1310         struct blk_mq_ctx *ctx = rq->mq_ctx;
1311
1312         trace_block_rq_insert(hctx->queue, rq);
1313
1314         if (at_head)
1315                 list_add(&rq->queuelist, &ctx->rq_list);
1316         else
1317                 list_add_tail(&rq->queuelist, &ctx->rq_list);
1318 }
1319
1320 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1321                              bool at_head)
1322 {
1323         struct blk_mq_ctx *ctx = rq->mq_ctx;
1324
1325         __blk_mq_insert_req_list(hctx, rq, at_head);
1326         blk_mq_hctx_mark_pending(hctx, ctx);
1327 }
1328
1329 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1330                             struct list_head *list)
1331
1332 {
1333         /*
1334          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1335          * offline now
1336          */
1337         spin_lock(&ctx->lock);
1338         while (!list_empty(list)) {
1339                 struct request *rq;
1340
1341                 rq = list_first_entry(list, struct request, queuelist);
1342                 BUG_ON(rq->mq_ctx != ctx);
1343                 list_del_init(&rq->queuelist);
1344                 __blk_mq_insert_req_list(hctx, rq, false);
1345         }
1346         blk_mq_hctx_mark_pending(hctx, ctx);
1347         spin_unlock(&ctx->lock);
1348 }
1349
1350 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1351 {
1352         struct request *rqa = container_of(a, struct request, queuelist);
1353         struct request *rqb = container_of(b, struct request, queuelist);
1354
1355         return !(rqa->mq_ctx < rqb->mq_ctx ||
1356                  (rqa->mq_ctx == rqb->mq_ctx &&
1357                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1358 }
1359
1360 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1361 {
1362         struct blk_mq_ctx *this_ctx;
1363         struct request_queue *this_q;
1364         struct request *rq;
1365         LIST_HEAD(list);
1366         LIST_HEAD(ctx_list);
1367         unsigned int depth;
1368
1369         list_splice_init(&plug->mq_list, &list);
1370
1371         list_sort(NULL, &list, plug_ctx_cmp);
1372
1373         this_q = NULL;
1374         this_ctx = NULL;
1375         depth = 0;
1376
1377         while (!list_empty(&list)) {
1378                 rq = list_entry_rq(list.next);
1379                 list_del_init(&rq->queuelist);
1380                 BUG_ON(!rq->q);
1381                 if (rq->mq_ctx != this_ctx) {
1382                         if (this_ctx) {
1383                                 trace_block_unplug(this_q, depth, from_schedule);
1384                                 blk_mq_sched_insert_requests(this_q, this_ctx,
1385                                                                 &ctx_list,
1386                                                                 from_schedule);
1387                         }
1388
1389                         this_ctx = rq->mq_ctx;
1390                         this_q = rq->q;
1391                         depth = 0;
1392                 }
1393
1394                 depth++;
1395                 list_add_tail(&rq->queuelist, &ctx_list);
1396         }
1397
1398         /*
1399          * If 'this_ctx' is set, we know we have entries to complete
1400          * on 'ctx_list'. Do those.
1401          */
1402         if (this_ctx) {
1403                 trace_block_unplug(this_q, depth, from_schedule);
1404                 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1405                                                 from_schedule);
1406         }
1407 }
1408
1409 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1410 {
1411         init_request_from_bio(rq, bio);
1412
1413         blk_account_io_start(rq, true);
1414 }
1415
1416 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1417 {
1418         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1419                 !blk_queue_nomerges(hctx->queue);
1420 }
1421
1422 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1423                                          struct blk_mq_ctx *ctx,
1424                                          struct request *rq, struct bio *bio)
1425 {
1426         if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
1427                 blk_mq_bio_to_request(rq, bio);
1428                 spin_lock(&ctx->lock);
1429 insert_rq:
1430                 __blk_mq_insert_request(hctx, rq, false);
1431                 spin_unlock(&ctx->lock);
1432                 return false;
1433         } else {
1434                 struct request_queue *q = hctx->queue;
1435
1436                 spin_lock(&ctx->lock);
1437                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1438                         blk_mq_bio_to_request(rq, bio);
1439                         goto insert_rq;
1440                 }
1441
1442                 spin_unlock(&ctx->lock);
1443                 __blk_mq_finish_request(hctx, ctx, rq);
1444                 return true;
1445         }
1446 }
1447
1448 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1449 {
1450         if (rq->tag != -1)
1451                 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1452
1453         return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1454 }
1455
1456 static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
1457                                       bool may_sleep)
1458 {
1459         struct request_queue *q = rq->q;
1460         struct blk_mq_queue_data bd = {
1461                 .rq = rq,
1462                 .list = NULL,
1463                 .last = 1
1464         };
1465         struct blk_mq_hw_ctx *hctx;
1466         blk_qc_t new_cookie;
1467         int ret;
1468
1469         if (q->elevator)
1470                 goto insert;
1471
1472         if (!blk_mq_get_driver_tag(rq, &hctx, false))
1473                 goto insert;
1474
1475         new_cookie = request_to_qc_t(hctx, rq);
1476
1477         /*
1478          * For OK queue, we are done. For error, kill it. Any other
1479          * error (busy), just add it to our list as we previously
1480          * would have done
1481          */
1482         ret = q->mq_ops->queue_rq(hctx, &bd);
1483         if (ret == BLK_MQ_RQ_QUEUE_OK) {
1484                 *cookie = new_cookie;
1485                 return;
1486         }
1487
1488         __blk_mq_requeue_request(rq);
1489
1490         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1491                 *cookie = BLK_QC_T_NONE;
1492                 rq->errors = -EIO;
1493                 blk_mq_end_request(rq, rq->errors);
1494                 return;
1495         }
1496
1497 insert:
1498         blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
1499 }
1500
1501 /*
1502  * Multiple hardware queue variant. This will not use per-process plugs,
1503  * but will attempt to bypass the hctx queueing if we can go straight to
1504  * hardware for SYNC IO.
1505  */
1506 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1507 {
1508         const int is_sync = op_is_sync(bio->bi_opf);
1509         const int is_flush_fua = op_is_flush(bio->bi_opf);
1510         struct blk_mq_alloc_data data = { .flags = 0 };
1511         struct request *rq;
1512         unsigned int request_count = 0, srcu_idx;
1513         struct blk_plug *plug;
1514         struct request *same_queue_rq = NULL;
1515         blk_qc_t cookie;
1516         unsigned int wb_acct;
1517
1518         blk_queue_bounce(q, &bio);
1519
1520         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1521                 bio_io_error(bio);
1522                 return BLK_QC_T_NONE;
1523         }
1524
1525         blk_queue_split(q, &bio, q->bio_split);
1526
1527         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1528             blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1529                 return BLK_QC_T_NONE;
1530
1531         if (blk_mq_sched_bio_merge(q, bio))
1532                 return BLK_QC_T_NONE;
1533
1534         wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1535
1536         trace_block_getrq(q, bio, bio->bi_opf);
1537
1538         rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
1539         if (unlikely(!rq)) {
1540                 __wbt_done(q->rq_wb, wb_acct);
1541                 return BLK_QC_T_NONE;
1542         }
1543
1544         wbt_track(&rq->issue_stat, wb_acct);
1545
1546         cookie = request_to_qc_t(data.hctx, rq);
1547
1548         if (unlikely(is_flush_fua)) {
1549                 if (q->elevator)
1550                         goto elv_insert;
1551                 blk_mq_bio_to_request(rq, bio);
1552                 blk_insert_flush(rq);
1553                 goto run_queue;
1554         }
1555
1556         plug = current->plug;
1557         /*
1558          * If the driver supports defer issued based on 'last', then
1559          * queue it up like normal since we can potentially save some
1560          * CPU this way.
1561          */
1562         if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1563             !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1564                 struct request *old_rq = NULL;
1565
1566                 blk_mq_bio_to_request(rq, bio);
1567
1568                 /*
1569                  * We do limited plugging. If the bio can be merged, do that.
1570                  * Otherwise the existing request in the plug list will be
1571                  * issued. So the plug list will have one request at most
1572                  */
1573                 if (plug) {
1574                         /*
1575                          * The plug list might get flushed before this. If that
1576                          * happens, same_queue_rq is invalid and plug list is
1577                          * empty
1578                          */
1579                         if (same_queue_rq && !list_empty(&plug->mq_list)) {
1580                                 old_rq = same_queue_rq;
1581                                 list_del_init(&old_rq->queuelist);
1582                         }
1583                         list_add_tail(&rq->queuelist, &plug->mq_list);
1584                 } else /* is_sync */
1585                         old_rq = rq;
1586                 blk_mq_put_ctx(data.ctx);
1587                 if (!old_rq)
1588                         goto done;
1589
1590                 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1591                         rcu_read_lock();
1592                         blk_mq_try_issue_directly(old_rq, &cookie, false);
1593                         rcu_read_unlock();
1594                 } else {
1595                         srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
1596                         blk_mq_try_issue_directly(old_rq, &cookie, true);
1597                         srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1598                 }
1599                 goto done;
1600         }
1601
1602         if (q->elevator) {
1603 elv_insert:
1604                 blk_mq_put_ctx(data.ctx);
1605                 blk_mq_bio_to_request(rq, bio);
1606                 blk_mq_sched_insert_request(rq, false, true,
1607                                                 !is_sync || is_flush_fua, true);
1608                 goto done;
1609         }
1610         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1611                 /*
1612                  * For a SYNC request, send it to the hardware immediately. For
1613                  * an ASYNC request, just ensure that we run it later on. The
1614                  * latter allows for merging opportunities and more efficient
1615                  * dispatching.
1616                  */
1617 run_queue:
1618                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1619         }
1620         blk_mq_put_ctx(data.ctx);
1621 done:
1622         return cookie;
1623 }
1624
1625 /*
1626  * Single hardware queue variant. This will attempt to use any per-process
1627  * plug for merging and IO deferral.
1628  */
1629 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1630 {
1631         const int is_sync = op_is_sync(bio->bi_opf);
1632         const int is_flush_fua = op_is_flush(bio->bi_opf);
1633         struct blk_plug *plug;
1634         unsigned int request_count = 0;
1635         struct blk_mq_alloc_data data = { .flags = 0 };
1636         struct request *rq;
1637         blk_qc_t cookie;
1638         unsigned int wb_acct;
1639
1640         blk_queue_bounce(q, &bio);
1641
1642         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1643                 bio_io_error(bio);
1644                 return BLK_QC_T_NONE;
1645         }
1646
1647         blk_queue_split(q, &bio, q->bio_split);
1648
1649         if (!is_flush_fua && !blk_queue_nomerges(q)) {
1650                 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1651                         return BLK_QC_T_NONE;
1652         } else
1653                 request_count = blk_plug_queued_count(q);
1654
1655         if (blk_mq_sched_bio_merge(q, bio))
1656                 return BLK_QC_T_NONE;
1657
1658         wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1659
1660         trace_block_getrq(q, bio, bio->bi_opf);
1661
1662         rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
1663         if (unlikely(!rq)) {
1664                 __wbt_done(q->rq_wb, wb_acct);
1665                 return BLK_QC_T_NONE;
1666         }
1667
1668         wbt_track(&rq->issue_stat, wb_acct);
1669
1670         cookie = request_to_qc_t(data.hctx, rq);
1671
1672         if (unlikely(is_flush_fua)) {
1673                 if (q->elevator)
1674                         goto elv_insert;
1675                 blk_mq_bio_to_request(rq, bio);
1676                 blk_insert_flush(rq);
1677                 goto run_queue;
1678         }
1679
1680         /*
1681          * A task plug currently exists. Since this is completely lockless,
1682          * utilize that to temporarily store requests until the task is
1683          * either done or scheduled away.
1684          */
1685         plug = current->plug;
1686         if (plug) {
1687                 struct request *last = NULL;
1688
1689                 blk_mq_bio_to_request(rq, bio);
1690
1691                 /*
1692                  * @request_count may become stale because of schedule
1693                  * out, so check the list again.
1694                  */
1695                 if (list_empty(&plug->mq_list))
1696                         request_count = 0;
1697                 if (!request_count)
1698                         trace_block_plug(q);
1699                 else
1700                         last = list_entry_rq(plug->mq_list.prev);
1701
1702                 blk_mq_put_ctx(data.ctx);
1703
1704                 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1705                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1706                         blk_flush_plug_list(plug, false);
1707                         trace_block_plug(q);
1708                 }
1709
1710                 list_add_tail(&rq->queuelist, &plug->mq_list);
1711                 return cookie;
1712         }
1713
1714         if (q->elevator) {
1715 elv_insert:
1716                 blk_mq_put_ctx(data.ctx);
1717                 blk_mq_bio_to_request(rq, bio);
1718                 blk_mq_sched_insert_request(rq, false, true,
1719                                                 !is_sync || is_flush_fua, true);
1720                 goto done;
1721         }
1722         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1723                 /*
1724                  * For a SYNC request, send it to the hardware immediately. For
1725                  * an ASYNC request, just ensure that we run it later on. The
1726                  * latter allows for merging opportunities and more efficient
1727                  * dispatching.
1728                  */
1729 run_queue:
1730                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1731         }
1732
1733         blk_mq_put_ctx(data.ctx);
1734 done:
1735         return cookie;
1736 }
1737
1738 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1739                      unsigned int hctx_idx)
1740 {
1741         struct page *page;
1742
1743         if (tags->rqs && set->ops->exit_request) {
1744                 int i;
1745
1746                 for (i = 0; i < tags->nr_tags; i++) {
1747                         struct request *rq = tags->static_rqs[i];
1748
1749                         if (!rq)
1750                                 continue;
1751                         set->ops->exit_request(set->driver_data, rq,
1752                                                 hctx_idx, i);
1753                         tags->static_rqs[i] = NULL;
1754                 }
1755         }
1756
1757         while (!list_empty(&tags->page_list)) {
1758                 page = list_first_entry(&tags->page_list, struct page, lru);
1759                 list_del_init(&page->lru);
1760                 /*
1761                  * Remove kmemleak object previously allocated in
1762                  * blk_mq_init_rq_map().
1763                  */
1764                 kmemleak_free(page_address(page));
1765                 __free_pages(page, page->private);
1766         }
1767 }
1768
1769 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1770 {
1771         kfree(tags->rqs);
1772         tags->rqs = NULL;
1773         kfree(tags->static_rqs);
1774         tags->static_rqs = NULL;
1775
1776         blk_mq_free_tags(tags);
1777 }
1778
1779 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1780                                         unsigned int hctx_idx,
1781                                         unsigned int nr_tags,
1782                                         unsigned int reserved_tags)
1783 {
1784         struct blk_mq_tags *tags;
1785         int node;
1786
1787         node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1788         if (node == NUMA_NO_NODE)
1789                 node = set->numa_node;
1790
1791         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
1792                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1793         if (!tags)
1794                 return NULL;
1795
1796         tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1797                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1798                                  node);
1799         if (!tags->rqs) {
1800                 blk_mq_free_tags(tags);
1801                 return NULL;
1802         }
1803
1804         tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1805                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1806                                  node);
1807         if (!tags->static_rqs) {
1808                 kfree(tags->rqs);
1809                 blk_mq_free_tags(tags);
1810                 return NULL;
1811         }
1812
1813         return tags;
1814 }
1815
1816 static size_t order_to_size(unsigned int order)
1817 {
1818         return (size_t)PAGE_SIZE << order;
1819 }
1820
1821 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1822                      unsigned int hctx_idx, unsigned int depth)
1823 {
1824         unsigned int i, j, entries_per_page, max_order = 4;
1825         size_t rq_size, left;
1826         int node;
1827
1828         node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1829         if (node == NUMA_NO_NODE)
1830                 node = set->numa_node;
1831
1832         INIT_LIST_HEAD(&tags->page_list);
1833
1834         /*
1835          * rq_size is the size of the request plus driver payload, rounded
1836          * to the cacheline size
1837          */
1838         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1839                                 cache_line_size());
1840         left = rq_size * depth;
1841
1842         for (i = 0; i < depth; ) {
1843                 int this_order = max_order;
1844                 struct page *page;
1845                 int to_do;
1846                 void *p;
1847
1848                 while (this_order && left < order_to_size(this_order - 1))
1849                         this_order--;
1850
1851                 do {
1852                         page = alloc_pages_node(node,
1853                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1854                                 this_order);
1855                         if (page)
1856                                 break;
1857                         if (!this_order--)
1858                                 break;
1859                         if (order_to_size(this_order) < rq_size)
1860                                 break;
1861                 } while (1);
1862
1863                 if (!page)
1864                         goto fail;
1865
1866                 page->private = this_order;
1867                 list_add_tail(&page->lru, &tags->page_list);
1868
1869                 p = page_address(page);
1870                 /*
1871                  * Allow kmemleak to scan these pages as they contain pointers
1872                  * to additional allocations like via ops->init_request().
1873                  */
1874                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
1875                 entries_per_page = order_to_size(this_order) / rq_size;
1876                 to_do = min(entries_per_page, depth - i);
1877                 left -= to_do * rq_size;
1878                 for (j = 0; j < to_do; j++) {
1879                         struct request *rq = p;
1880
1881                         tags->static_rqs[i] = rq;
1882                         if (set->ops->init_request) {
1883                                 if (set->ops->init_request(set->driver_data,
1884                                                 rq, hctx_idx, i,
1885                                                 node)) {
1886                                         tags->static_rqs[i] = NULL;
1887                                         goto fail;
1888                                 }
1889                         }
1890
1891                         p += rq_size;
1892                         i++;
1893                 }
1894         }
1895         return 0;
1896
1897 fail:
1898         blk_mq_free_rqs(set, tags, hctx_idx);
1899         return -ENOMEM;
1900 }
1901
1902 /*
1903  * 'cpu' is going away. splice any existing rq_list entries from this
1904  * software queue to the hw queue dispatch list, and ensure that it
1905  * gets run.
1906  */
1907 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1908 {
1909         struct blk_mq_hw_ctx *hctx;
1910         struct blk_mq_ctx *ctx;
1911         LIST_HEAD(tmp);
1912
1913         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1914         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1915
1916         spin_lock(&ctx->lock);
1917         if (!list_empty(&ctx->rq_list)) {
1918                 list_splice_init(&ctx->rq_list, &tmp);
1919                 blk_mq_hctx_clear_pending(hctx, ctx);
1920         }
1921         spin_unlock(&ctx->lock);
1922
1923         if (list_empty(&tmp))
1924                 return 0;
1925
1926         spin_lock(&hctx->lock);
1927         list_splice_tail_init(&tmp, &hctx->dispatch);
1928         spin_unlock(&hctx->lock);
1929
1930         blk_mq_run_hw_queue(hctx, true);
1931         return 0;
1932 }
1933
1934 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1935 {
1936         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1937                                             &hctx->cpuhp_dead);
1938 }
1939
1940 /* hctx->ctxs will be freed in queue's release handler */
1941 static void blk_mq_exit_hctx(struct request_queue *q,
1942                 struct blk_mq_tag_set *set,
1943                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1944 {
1945         unsigned flush_start_tag = set->queue_depth;
1946
1947         blk_mq_tag_idle(hctx);
1948
1949         if (set->ops->exit_request)
1950                 set->ops->exit_request(set->driver_data,
1951                                        hctx->fq->flush_rq, hctx_idx,
1952                                        flush_start_tag + hctx_idx);
1953
1954         blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1955
1956         if (set->ops->exit_hctx)
1957                 set->ops->exit_hctx(hctx, hctx_idx);
1958
1959         if (hctx->flags & BLK_MQ_F_BLOCKING)
1960                 cleanup_srcu_struct(&hctx->queue_rq_srcu);
1961
1962         blk_mq_remove_cpuhp(hctx);
1963         blk_free_flush_queue(hctx->fq);
1964         sbitmap_free(&hctx->ctx_map);
1965 }
1966
1967 static void blk_mq_exit_hw_queues(struct request_queue *q,
1968                 struct blk_mq_tag_set *set, int nr_queue)
1969 {
1970         struct blk_mq_hw_ctx *hctx;
1971         unsigned int i;
1972
1973         queue_for_each_hw_ctx(q, hctx, i) {
1974                 if (i == nr_queue)
1975                         break;
1976                 blk_mq_exit_hctx(q, set, hctx, i);
1977         }
1978 }
1979
1980 static int blk_mq_init_hctx(struct request_queue *q,
1981                 struct blk_mq_tag_set *set,
1982                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1983 {
1984         int node;
1985         unsigned flush_start_tag = set->queue_depth;
1986
1987         node = hctx->numa_node;
1988         if (node == NUMA_NO_NODE)
1989                 node = hctx->numa_node = set->numa_node;
1990
1991         INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1992         INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
1993         INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1994         spin_lock_init(&hctx->lock);
1995         INIT_LIST_HEAD(&hctx->dispatch);
1996         hctx->queue = q;
1997         hctx->queue_num = hctx_idx;
1998         hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1999
2000         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2001
2002         hctx->tags = set->tags[hctx_idx];
2003
2004         /*
2005          * Allocate space for all possible cpus to avoid allocation at
2006          * runtime
2007          */
2008         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
2009                                         GFP_KERNEL, node);
2010         if (!hctx->ctxs)
2011                 goto unregister_cpu_notifier;
2012
2013         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
2014                               node))
2015                 goto free_ctxs;
2016
2017         hctx->nr_ctx = 0;
2018
2019         if (set->ops->init_hctx &&
2020             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2021                 goto free_bitmap;
2022
2023         if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2024                 goto exit_hctx;
2025
2026         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2027         if (!hctx->fq)
2028                 goto sched_exit_hctx;
2029
2030         if (set->ops->init_request &&
2031             set->ops->init_request(set->driver_data,
2032                                    hctx->fq->flush_rq, hctx_idx,
2033                                    flush_start_tag + hctx_idx, node))
2034                 goto free_fq;
2035
2036         if (hctx->flags & BLK_MQ_F_BLOCKING)
2037                 init_srcu_struct(&hctx->queue_rq_srcu);
2038
2039         return 0;
2040
2041  free_fq:
2042         kfree(hctx->fq);
2043  sched_exit_hctx:
2044         blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2045  exit_hctx:
2046         if (set->ops->exit_hctx)
2047                 set->ops->exit_hctx(hctx, hctx_idx);
2048  free_bitmap:
2049         sbitmap_free(&hctx->ctx_map);
2050  free_ctxs:
2051         kfree(hctx->ctxs);
2052  unregister_cpu_notifier:
2053         blk_mq_remove_cpuhp(hctx);
2054         return -1;
2055 }
2056
2057 static void blk_mq_init_cpu_queues(struct request_queue *q,
2058                                    unsigned int nr_hw_queues)
2059 {
2060         unsigned int i;
2061
2062         for_each_possible_cpu(i) {
2063                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2064                 struct blk_mq_hw_ctx *hctx;
2065
2066                 __ctx->cpu = i;
2067                 spin_lock_init(&__ctx->lock);
2068                 INIT_LIST_HEAD(&__ctx->rq_list);
2069                 __ctx->queue = q;
2070                 blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
2071                 blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
2072
2073                 /* If the cpu isn't online, the cpu is mapped to first hctx */
2074                 if (!cpu_online(i))
2075                         continue;
2076
2077                 hctx = blk_mq_map_queue(q, i);
2078
2079                 /*
2080                  * Set local node, IFF we have more than one hw queue. If
2081                  * not, we remain on the home node of the device
2082                  */
2083                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2084                         hctx->numa_node = local_memory_node(cpu_to_node(i));
2085         }
2086 }
2087
2088 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2089 {
2090         int ret = 0;
2091
2092         set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2093                                         set->queue_depth, set->reserved_tags);
2094         if (!set->tags[hctx_idx])
2095                 return false;
2096
2097         ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2098                                 set->queue_depth);
2099         if (!ret)
2100                 return true;
2101
2102         blk_mq_free_rq_map(set->tags[hctx_idx]);
2103         set->tags[hctx_idx] = NULL;
2104         return false;
2105 }
2106
2107 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2108                                          unsigned int hctx_idx)
2109 {
2110         if (set->tags[hctx_idx]) {
2111                 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2112                 blk_mq_free_rq_map(set->tags[hctx_idx]);
2113                 set->tags[hctx_idx] = NULL;
2114         }
2115 }
2116
2117 static void blk_mq_map_swqueue(struct request_queue *q,
2118                                const struct cpumask *online_mask)
2119 {
2120         unsigned int i, hctx_idx;
2121         struct blk_mq_hw_ctx *hctx;
2122         struct blk_mq_ctx *ctx;
2123         struct blk_mq_tag_set *set = q->tag_set;
2124
2125         /*
2126          * Avoid others reading imcomplete hctx->cpumask through sysfs
2127          */
2128         mutex_lock(&q->sysfs_lock);
2129
2130         queue_for_each_hw_ctx(q, hctx, i) {
2131                 cpumask_clear(hctx->cpumask);
2132                 hctx->nr_ctx = 0;
2133         }
2134
2135         /*
2136          * Map software to hardware queues
2137          */
2138         for_each_possible_cpu(i) {
2139                 /* If the cpu isn't online, the cpu is mapped to first hctx */
2140                 if (!cpumask_test_cpu(i, online_mask))
2141                         continue;
2142
2143                 hctx_idx = q->mq_map[i];
2144                 /* unmapped hw queue can be remapped after CPU topo changed */
2145                 if (!set->tags[hctx_idx] &&
2146                     !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2147                         /*
2148                          * If tags initialization fail for some hctx,
2149                          * that hctx won't be brought online.  In this
2150                          * case, remap the current ctx to hctx[0] which
2151                          * is guaranteed to always have tags allocated
2152                          */
2153                         q->mq_map[i] = 0;
2154                 }
2155
2156                 ctx = per_cpu_ptr(q->queue_ctx, i);
2157                 hctx = blk_mq_map_queue(q, i);
2158
2159                 cpumask_set_cpu(i, hctx->cpumask);
2160                 ctx->index_hw = hctx->nr_ctx;
2161                 hctx->ctxs[hctx->nr_ctx++] = ctx;
2162         }
2163
2164         mutex_unlock(&q->sysfs_lock);
2165
2166         queue_for_each_hw_ctx(q, hctx, i) {
2167                 /*
2168                  * If no software queues are mapped to this hardware queue,
2169                  * disable it and free the request entries.
2170                  */
2171                 if (!hctx->nr_ctx) {
2172                         /* Never unmap queue 0.  We need it as a
2173                          * fallback in case of a new remap fails
2174                          * allocation
2175                          */
2176                         if (i && set->tags[i])
2177                                 blk_mq_free_map_and_requests(set, i);
2178
2179                         hctx->tags = NULL;
2180                         continue;
2181                 }
2182
2183                 hctx->tags = set->tags[i];
2184                 WARN_ON(!hctx->tags);
2185
2186                 /*
2187                  * Set the map size to the number of mapped software queues.
2188                  * This is more accurate and more efficient than looping
2189                  * over all possibly mapped software queues.
2190                  */
2191                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2192
2193                 /*
2194                  * Initialize batch roundrobin counts
2195                  */
2196                 hctx->next_cpu = cpumask_first(hctx->cpumask);
2197                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2198         }
2199 }
2200
2201 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2202 {
2203         struct blk_mq_hw_ctx *hctx;
2204         int i;
2205
2206         queue_for_each_hw_ctx(q, hctx, i) {
2207                 if (shared)
2208                         hctx->flags |= BLK_MQ_F_TAG_SHARED;
2209                 else
2210                         hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2211         }
2212 }
2213
2214 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
2215 {
2216         struct request_queue *q;
2217
2218         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2219                 blk_mq_freeze_queue(q);
2220                 queue_set_hctx_shared(q, shared);
2221                 blk_mq_unfreeze_queue(q);
2222         }
2223 }
2224
2225 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2226 {
2227         struct blk_mq_tag_set *set = q->tag_set;
2228
2229         mutex_lock(&set->tag_list_lock);
2230         list_del_init(&q->tag_set_list);
2231         if (list_is_singular(&set->tag_list)) {
2232                 /* just transitioned to unshared */
2233                 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2234                 /* update existing queue */
2235                 blk_mq_update_tag_set_depth(set, false);
2236         }
2237         mutex_unlock(&set->tag_list_lock);
2238 }
2239
2240 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2241                                      struct request_queue *q)
2242 {
2243         q->tag_set = set;
2244
2245         mutex_lock(&set->tag_list_lock);
2246
2247         /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2248         if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2249                 set->flags |= BLK_MQ_F_TAG_SHARED;
2250                 /* update existing queue */
2251                 blk_mq_update_tag_set_depth(set, true);
2252         }
2253         if (set->flags & BLK_MQ_F_TAG_SHARED)
2254                 queue_set_hctx_shared(q, true);
2255         list_add_tail(&q->tag_set_list, &set->tag_list);
2256
2257         mutex_unlock(&set->tag_list_lock);
2258 }
2259
2260 /*
2261  * It is the actual release handler for mq, but we do it from
2262  * request queue's release handler for avoiding use-after-free
2263  * and headache because q->mq_kobj shouldn't have been introduced,
2264  * but we can't group ctx/kctx kobj without it.
2265  */
2266 void blk_mq_release(struct request_queue *q)
2267 {
2268         struct blk_mq_hw_ctx *hctx;
2269         unsigned int i;
2270
2271         /* hctx kobj stays in hctx */
2272         queue_for_each_hw_ctx(q, hctx, i) {
2273                 if (!hctx)
2274                         continue;
2275                 kobject_put(&hctx->kobj);
2276         }
2277
2278         q->mq_map = NULL;
2279
2280         kfree(q->queue_hw_ctx);
2281
2282         /*
2283          * release .mq_kobj and sw queue's kobject now because
2284          * both share lifetime with request queue.
2285          */
2286         blk_mq_sysfs_deinit(q);
2287
2288         free_percpu(q->queue_ctx);
2289 }
2290
2291 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2292 {
2293         struct request_queue *uninit_q, *q;
2294
2295         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2296         if (!uninit_q)
2297                 return ERR_PTR(-ENOMEM);
2298
2299         q = blk_mq_init_allocated_queue(set, uninit_q);
2300         if (IS_ERR(q))
2301                 blk_cleanup_queue(uninit_q);
2302
2303         return q;
2304 }
2305 EXPORT_SYMBOL(blk_mq_init_queue);
2306
2307 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2308                                                 struct request_queue *q)
2309 {
2310         int i, j;
2311         struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2312
2313         blk_mq_sysfs_unregister(q);
2314         for (i = 0; i < set->nr_hw_queues; i++) {
2315                 int node;
2316
2317                 if (hctxs[i])
2318                         continue;
2319
2320                 node = blk_mq_hw_queue_to_node(q->mq_map, i);
2321                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
2322                                         GFP_KERNEL, node);
2323                 if (!hctxs[i])
2324                         break;
2325
2326                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2327                                                 node)) {
2328                         kfree(hctxs[i]);
2329                         hctxs[i] = NULL;
2330                         break;
2331                 }
2332
2333                 atomic_set(&hctxs[i]->nr_active, 0);
2334                 hctxs[i]->numa_node = node;
2335                 hctxs[i]->queue_num = i;
2336
2337                 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2338                         free_cpumask_var(hctxs[i]->cpumask);
2339                         kfree(hctxs[i]);
2340                         hctxs[i] = NULL;
2341                         break;
2342                 }
2343                 blk_mq_hctx_kobj_init(hctxs[i]);
2344         }
2345         for (j = i; j < q->nr_hw_queues; j++) {
2346                 struct blk_mq_hw_ctx *hctx = hctxs[j];
2347
2348                 if (hctx) {
2349                         if (hctx->tags)
2350                                 blk_mq_free_map_and_requests(set, j);
2351                         blk_mq_exit_hctx(q, set, hctx, j);
2352                         kobject_put(&hctx->kobj);
2353                         hctxs[j] = NULL;
2354
2355                 }
2356         }
2357         q->nr_hw_queues = i;
2358         blk_mq_sysfs_register(q);
2359 }
2360
2361 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2362                                                   struct request_queue *q)
2363 {
2364         /* mark the queue as mq asap */
2365         q->mq_ops = set->ops;
2366
2367         q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2368         if (!q->queue_ctx)
2369                 goto err_exit;
2370
2371         /* init q->mq_kobj and sw queues' kobjects */
2372         blk_mq_sysfs_init(q);
2373
2374         q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2375                                                 GFP_KERNEL, set->numa_node);
2376         if (!q->queue_hw_ctx)
2377                 goto err_percpu;
2378
2379         q->mq_map = set->mq_map;
2380
2381         blk_mq_realloc_hw_ctxs(set, q);
2382         if (!q->nr_hw_queues)
2383                 goto err_hctxs;
2384
2385         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2386         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2387
2388         q->nr_queues = nr_cpu_ids;
2389
2390         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2391
2392         if (!(set->flags & BLK_MQ_F_SG_MERGE))
2393                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2394
2395         q->sg_reserved_size = INT_MAX;
2396
2397         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2398         INIT_LIST_HEAD(&q->requeue_list);
2399         spin_lock_init(&q->requeue_lock);
2400
2401         if (q->nr_hw_queues > 1)
2402                 blk_queue_make_request(q, blk_mq_make_request);
2403         else
2404                 blk_queue_make_request(q, blk_sq_make_request);
2405
2406         /*
2407          * Do this after blk_queue_make_request() overrides it...
2408          */
2409         q->nr_requests = set->queue_depth;
2410
2411         /*
2412          * Default to classic polling
2413          */
2414         q->poll_nsec = -1;
2415
2416         if (set->ops->complete)
2417                 blk_queue_softirq_done(q, set->ops->complete);
2418
2419         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2420
2421         get_online_cpus();
2422         mutex_lock(&all_q_mutex);
2423
2424         list_add_tail(&q->all_q_node, &all_q_list);
2425         blk_mq_add_queue_tag_set(set, q);
2426         blk_mq_map_swqueue(q, cpu_online_mask);
2427
2428         mutex_unlock(&all_q_mutex);
2429         put_online_cpus();
2430
2431         if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2432                 int ret;
2433
2434                 ret = blk_mq_sched_init(q);
2435                 if (ret)
2436                         return ERR_PTR(ret);
2437         }
2438
2439         return q;
2440
2441 err_hctxs:
2442         kfree(q->queue_hw_ctx);
2443 err_percpu:
2444         free_percpu(q->queue_ctx);
2445 err_exit:
2446         q->mq_ops = NULL;
2447         return ERR_PTR(-ENOMEM);
2448 }
2449 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2450
2451 void blk_mq_free_queue(struct request_queue *q)
2452 {
2453         struct blk_mq_tag_set   *set = q->tag_set;
2454
2455         mutex_lock(&all_q_mutex);
2456         list_del_init(&q->all_q_node);
2457         mutex_unlock(&all_q_mutex);
2458
2459         wbt_exit(q);
2460
2461         blk_mq_del_queue_tag_set(q);
2462
2463         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2464 }
2465
2466 /* Basically redo blk_mq_init_queue with queue frozen */
2467 static void blk_mq_queue_reinit(struct request_queue *q,
2468                                 const struct cpumask *online_mask)
2469 {
2470         WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2471
2472         blk_mq_sysfs_unregister(q);
2473
2474         /*
2475          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2476          * we should change hctx numa_node according to new topology (this
2477          * involves free and re-allocate memory, worthy doing?)
2478          */
2479
2480         blk_mq_map_swqueue(q, online_mask);
2481
2482         blk_mq_sysfs_register(q);
2483 }
2484
2485 /*
2486  * New online cpumask which is going to be set in this hotplug event.
2487  * Declare this cpumasks as global as cpu-hotplug operation is invoked
2488  * one-by-one and dynamically allocating this could result in a failure.
2489  */
2490 static struct cpumask cpuhp_online_new;
2491
2492 static void blk_mq_queue_reinit_work(void)
2493 {
2494         struct request_queue *q;
2495
2496         mutex_lock(&all_q_mutex);
2497         /*
2498          * We need to freeze and reinit all existing queues.  Freezing
2499          * involves synchronous wait for an RCU grace period and doing it
2500          * one by one may take a long time.  Start freezing all queues in
2501          * one swoop and then wait for the completions so that freezing can
2502          * take place in parallel.
2503          */
2504         list_for_each_entry(q, &all_q_list, all_q_node)
2505                 blk_mq_freeze_queue_start(q);
2506         list_for_each_entry(q, &all_q_list, all_q_node)
2507                 blk_mq_freeze_queue_wait(q);
2508
2509         list_for_each_entry(q, &all_q_list, all_q_node)
2510                 blk_mq_queue_reinit(q, &cpuhp_online_new);
2511
2512         list_for_each_entry(q, &all_q_list, all_q_node)
2513                 blk_mq_unfreeze_queue(q);
2514
2515         mutex_unlock(&all_q_mutex);
2516 }
2517
2518 static int blk_mq_queue_reinit_dead(unsigned int cpu)
2519 {
2520         cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2521         blk_mq_queue_reinit_work();
2522         return 0;
2523 }
2524
2525 /*
2526  * Before hotadded cpu starts handling requests, new mappings must be
2527  * established.  Otherwise, these requests in hw queue might never be
2528  * dispatched.
2529  *
2530  * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2531  * for CPU0, and ctx1 for CPU1).
2532  *
2533  * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2534  * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2535  *
2536  * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
2537  * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2538  * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
2539  * ignored.
2540  */
2541 static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2542 {
2543         cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2544         cpumask_set_cpu(cpu, &cpuhp_online_new);
2545         blk_mq_queue_reinit_work();
2546         return 0;
2547 }
2548
2549 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2550 {
2551         int i;
2552
2553         for (i = 0; i < set->nr_hw_queues; i++)
2554                 if (!__blk_mq_alloc_rq_map(set, i))
2555                         goto out_unwind;
2556
2557         return 0;
2558
2559 out_unwind:
2560         while (--i >= 0)
2561                 blk_mq_free_rq_map(set->tags[i]);
2562
2563         return -ENOMEM;
2564 }
2565
2566 /*
2567  * Allocate the request maps associated with this tag_set. Note that this
2568  * may reduce the depth asked for, if memory is tight. set->queue_depth
2569  * will be updated to reflect the allocated depth.
2570  */
2571 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2572 {
2573         unsigned int depth;
2574         int err;
2575
2576         depth = set->queue_depth;
2577         do {
2578                 err = __blk_mq_alloc_rq_maps(set);
2579                 if (!err)
2580                         break;
2581
2582                 set->queue_depth >>= 1;
2583                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2584                         err = -ENOMEM;
2585                         break;
2586                 }
2587         } while (set->queue_depth);
2588
2589         if (!set->queue_depth || err) {
2590                 pr_err("blk-mq: failed to allocate request map\n");
2591                 return -ENOMEM;
2592         }
2593
2594         if (depth != set->queue_depth)
2595                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2596                                                 depth, set->queue_depth);
2597
2598         return 0;
2599 }
2600
2601 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2602 {
2603         if (set->ops->map_queues)
2604                 return set->ops->map_queues(set);
2605         else
2606                 return blk_mq_map_queues(set);
2607 }
2608
2609 /*
2610  * Alloc a tag set to be associated with one or more request queues.
2611  * May fail with EINVAL for various error conditions. May adjust the
2612  * requested depth down, if if it too large. In that case, the set
2613  * value will be stored in set->queue_depth.
2614  */
2615 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2616 {
2617         int ret;
2618
2619         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2620
2621         if (!set->nr_hw_queues)
2622                 return -EINVAL;
2623         if (!set->queue_depth)
2624                 return -EINVAL;
2625         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2626                 return -EINVAL;
2627
2628         if (!set->ops->queue_rq)
2629                 return -EINVAL;
2630
2631         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2632                 pr_info("blk-mq: reduced tag depth to %u\n",
2633                         BLK_MQ_MAX_DEPTH);
2634                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2635         }
2636
2637         /*
2638          * If a crashdump is active, then we are potentially in a very
2639          * memory constrained environment. Limit us to 1 queue and
2640          * 64 tags to prevent using too much memory.
2641          */
2642         if (is_kdump_kernel()) {
2643                 set->nr_hw_queues = 1;
2644                 set->queue_depth = min(64U, set->queue_depth);
2645         }
2646         /*
2647          * There is no use for more h/w queues than cpus.
2648          */
2649         if (set->nr_hw_queues > nr_cpu_ids)
2650                 set->nr_hw_queues = nr_cpu_ids;
2651
2652         set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2653                                  GFP_KERNEL, set->numa_node);
2654         if (!set->tags)
2655                 return -ENOMEM;
2656
2657         ret = -ENOMEM;
2658         set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2659                         GFP_KERNEL, set->numa_node);
2660         if (!set->mq_map)
2661                 goto out_free_tags;
2662
2663         ret = blk_mq_update_queue_map(set);
2664         if (ret)
2665                 goto out_free_mq_map;
2666
2667         ret = blk_mq_alloc_rq_maps(set);
2668         if (ret)
2669                 goto out_free_mq_map;
2670
2671         mutex_init(&set->tag_list_lock);
2672         INIT_LIST_HEAD(&set->tag_list);
2673
2674         return 0;
2675
2676 out_free_mq_map:
2677         kfree(set->mq_map);
2678         set->mq_map = NULL;
2679 out_free_tags:
2680         kfree(set->tags);
2681         set->tags = NULL;
2682         return ret;
2683 }
2684 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2685
2686 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2687 {
2688         int i;
2689
2690         for (i = 0; i < nr_cpu_ids; i++)
2691                 blk_mq_free_map_and_requests(set, i);
2692
2693         kfree(set->mq_map);
2694         set->mq_map = NULL;
2695
2696         kfree(set->tags);
2697         set->tags = NULL;
2698 }
2699 EXPORT_SYMBOL(blk_mq_free_tag_set);
2700
2701 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2702 {
2703         struct blk_mq_tag_set *set = q->tag_set;
2704         struct blk_mq_hw_ctx *hctx;
2705         int i, ret;
2706
2707         if (!set)
2708                 return -EINVAL;
2709
2710         blk_mq_freeze_queue(q);
2711         blk_mq_quiesce_queue(q);
2712
2713         ret = 0;
2714         queue_for_each_hw_ctx(q, hctx, i) {
2715                 if (!hctx->tags)
2716                         continue;
2717                 /*
2718                  * If we're using an MQ scheduler, just update the scheduler
2719                  * queue depth. This is similar to what the old code would do.
2720                  */
2721                 if (!hctx->sched_tags) {
2722                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
2723                                                         min(nr, set->queue_depth),
2724                                                         false);
2725                 } else {
2726                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2727                                                         nr, true);
2728                 }
2729                 if (ret)
2730                         break;
2731         }
2732
2733         if (!ret)
2734                 q->nr_requests = nr;
2735
2736         blk_mq_unfreeze_queue(q);
2737         blk_mq_start_stopped_hw_queues(q, true);
2738
2739         return ret;
2740 }
2741
2742 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2743 {
2744         struct request_queue *q;
2745
2746         if (nr_hw_queues > nr_cpu_ids)
2747                 nr_hw_queues = nr_cpu_ids;
2748         if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2749                 return;
2750
2751         list_for_each_entry(q, &set->tag_list, tag_set_list)
2752                 blk_mq_freeze_queue(q);
2753
2754         set->nr_hw_queues = nr_hw_queues;
2755         blk_mq_update_queue_map(set);
2756         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2757                 blk_mq_realloc_hw_ctxs(set, q);
2758
2759                 /*
2760                  * Manually set the make_request_fn as blk_queue_make_request
2761                  * resets a lot of the queue settings.
2762                  */
2763                 if (q->nr_hw_queues > 1)
2764                         q->make_request_fn = blk_mq_make_request;
2765                 else
2766                         q->make_request_fn = blk_sq_make_request;
2767
2768                 blk_mq_queue_reinit(q, cpu_online_mask);
2769         }
2770
2771         list_for_each_entry(q, &set->tag_list, tag_set_list)
2772                 blk_mq_unfreeze_queue(q);
2773 }
2774 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2775
2776 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2777                                        struct blk_mq_hw_ctx *hctx,
2778                                        struct request *rq)
2779 {
2780         struct blk_rq_stat stat[2];
2781         unsigned long ret = 0;
2782
2783         /*
2784          * If stats collection isn't on, don't sleep but turn it on for
2785          * future users
2786          */
2787         if (!blk_stat_enable(q))
2788                 return 0;
2789
2790         /*
2791          * We don't have to do this once per IO, should optimize this
2792          * to just use the current window of stats until it changes
2793          */
2794         memset(&stat, 0, sizeof(stat));
2795         blk_hctx_stat_get(hctx, stat);
2796
2797         /*
2798          * As an optimistic guess, use half of the mean service time
2799          * for this type of request. We can (and should) make this smarter.
2800          * For instance, if the completion latencies are tight, we can
2801          * get closer than just half the mean. This is especially
2802          * important on devices where the completion latencies are longer
2803          * than ~10 usec.
2804          */
2805         if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
2806                 ret = (stat[BLK_STAT_READ].mean + 1) / 2;
2807         else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
2808                 ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
2809
2810         return ret;
2811 }
2812
2813 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2814                                      struct blk_mq_hw_ctx *hctx,
2815                                      struct request *rq)
2816 {
2817         struct hrtimer_sleeper hs;
2818         enum hrtimer_mode mode;
2819         unsigned int nsecs;
2820         ktime_t kt;
2821
2822         if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2823                 return false;
2824
2825         /*
2826          * poll_nsec can be:
2827          *
2828          * -1:  don't ever hybrid sleep
2829          *  0:  use half of prev avg
2830          * >0:  use this specific value
2831          */
2832         if (q->poll_nsec == -1)
2833                 return false;
2834         else if (q->poll_nsec > 0)
2835                 nsecs = q->poll_nsec;
2836         else
2837                 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2838
2839         if (!nsecs)
2840                 return false;
2841
2842         set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2843
2844         /*
2845          * This will be replaced with the stats tracking code, using
2846          * 'avg_completion_time / 2' as the pre-sleep target.
2847          */
2848         kt = nsecs;
2849
2850         mode = HRTIMER_MODE_REL;
2851         hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2852         hrtimer_set_expires(&hs.timer, kt);
2853
2854         hrtimer_init_sleeper(&hs, current);
2855         do {
2856                 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2857                         break;
2858                 set_current_state(TASK_UNINTERRUPTIBLE);
2859                 hrtimer_start_expires(&hs.timer, mode);
2860                 if (hs.task)
2861                         io_schedule();
2862                 hrtimer_cancel(&hs.timer);
2863                 mode = HRTIMER_MODE_ABS;
2864         } while (hs.task && !signal_pending(current));
2865
2866         __set_current_state(TASK_RUNNING);
2867         destroy_hrtimer_on_stack(&hs.timer);
2868         return true;
2869 }
2870
2871 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2872 {
2873         struct request_queue *q = hctx->queue;
2874         long state;
2875
2876         /*
2877          * If we sleep, have the caller restart the poll loop to reset
2878          * the state. Like for the other success return cases, the
2879          * caller is responsible for checking if the IO completed. If
2880          * the IO isn't complete, we'll get called again and will go
2881          * straight to the busy poll loop.
2882          */
2883         if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
2884                 return true;
2885
2886         hctx->poll_considered++;
2887
2888         state = current->state;
2889         while (!need_resched()) {
2890                 int ret;
2891
2892                 hctx->poll_invoked++;
2893
2894                 ret = q->mq_ops->poll(hctx, rq->tag);
2895                 if (ret > 0) {
2896                         hctx->poll_success++;
2897                         set_current_state(TASK_RUNNING);
2898                         return true;
2899                 }
2900
2901                 if (signal_pending_state(state, current))
2902                         set_current_state(TASK_RUNNING);
2903
2904                 if (current->state == TASK_RUNNING)
2905                         return true;
2906                 if (ret < 0)
2907                         break;
2908                 cpu_relax();
2909         }
2910
2911         return false;
2912 }
2913
2914 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2915 {
2916         struct blk_mq_hw_ctx *hctx;
2917         struct blk_plug *plug;
2918         struct request *rq;
2919
2920         if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2921             !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2922                 return false;
2923
2924         plug = current->plug;
2925         if (plug)
2926                 blk_flush_plug_list(plug, false);
2927
2928         hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2929         if (!blk_qc_t_is_internal(cookie))
2930                 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2931         else {
2932                 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
2933                 /*
2934                  * With scheduling, if the request has completed, we'll
2935                  * get a NULL return here, as we clear the sched tag when
2936                  * that happens. The request still remains valid, like always,
2937                  * so we should be safe with just the NULL check.
2938                  */
2939                 if (!rq)
2940                         return false;
2941         }
2942
2943         return __blk_mq_poll(hctx, rq);
2944 }
2945 EXPORT_SYMBOL_GPL(blk_mq_poll);
2946
2947 void blk_mq_disable_hotplug(void)
2948 {
2949         mutex_lock(&all_q_mutex);
2950 }
2951
2952 void blk_mq_enable_hotplug(void)
2953 {
2954         mutex_unlock(&all_q_mutex);
2955 }
2956
2957 static int __init blk_mq_init(void)
2958 {
2959         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2960                                 blk_mq_hctx_notify_dead);
2961
2962         cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2963                                   blk_mq_queue_reinit_prepare,
2964                                   blk_mq_queue_reinit_dead);
2965         return 0;
2966 }
2967 subsys_initcall(blk_mq_init);