UBI: Fastmap: Calc fastmap size correctly
[sfrench/cifs-2.6.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23
24 #include <trace/events/block.h>
25
26 #include <linux/blk-mq.h>
27 #include "blk.h"
28 #include "blk-mq.h"
29 #include "blk-mq-tag.h"
30
31 static DEFINE_MUTEX(all_q_mutex);
32 static LIST_HEAD(all_q_list);
33
34 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
35
36 /*
37  * Check if any of the ctx's have pending work in this hardware queue
38  */
39 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
40 {
41         unsigned int i;
42
43         for (i = 0; i < hctx->ctx_map.map_size; i++)
44                 if (hctx->ctx_map.map[i].word)
45                         return true;
46
47         return false;
48 }
49
50 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
51                                               struct blk_mq_ctx *ctx)
52 {
53         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
54 }
55
56 #define CTX_TO_BIT(hctx, ctx)   \
57         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
58
59 /*
60  * Mark this ctx as having pending work in this hardware queue
61  */
62 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
63                                      struct blk_mq_ctx *ctx)
64 {
65         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
66
67         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
68                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
69 }
70
71 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
72                                       struct blk_mq_ctx *ctx)
73 {
74         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
75
76         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
77 }
78
79 static int blk_mq_queue_enter(struct request_queue *q)
80 {
81         while (true) {
82                 int ret;
83
84                 if (percpu_ref_tryget_live(&q->mq_usage_counter))
85                         return 0;
86
87                 ret = wait_event_interruptible(q->mq_freeze_wq,
88                                 !q->mq_freeze_depth || blk_queue_dying(q));
89                 if (blk_queue_dying(q))
90                         return -ENODEV;
91                 if (ret)
92                         return ret;
93         }
94 }
95
96 static void blk_mq_queue_exit(struct request_queue *q)
97 {
98         percpu_ref_put(&q->mq_usage_counter);
99 }
100
101 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
102 {
103         struct request_queue *q =
104                 container_of(ref, struct request_queue, mq_usage_counter);
105
106         wake_up_all(&q->mq_freeze_wq);
107 }
108
109 /*
110  * Guarantee no request is in use, so we can change any data structure of
111  * the queue afterward.
112  */
113 void blk_mq_freeze_queue(struct request_queue *q)
114 {
115         bool freeze;
116
117         spin_lock_irq(q->queue_lock);
118         freeze = !q->mq_freeze_depth++;
119         spin_unlock_irq(q->queue_lock);
120
121         if (freeze) {
122                 percpu_ref_kill(&q->mq_usage_counter);
123                 blk_mq_run_queues(q, false);
124         }
125         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
126 }
127
128 static void blk_mq_unfreeze_queue(struct request_queue *q)
129 {
130         bool wake;
131
132         spin_lock_irq(q->queue_lock);
133         wake = !--q->mq_freeze_depth;
134         WARN_ON_ONCE(q->mq_freeze_depth < 0);
135         spin_unlock_irq(q->queue_lock);
136         if (wake) {
137                 percpu_ref_reinit(&q->mq_usage_counter);
138                 wake_up_all(&q->mq_freeze_wq);
139         }
140 }
141
142 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
143 {
144         return blk_mq_has_free_tags(hctx->tags);
145 }
146 EXPORT_SYMBOL(blk_mq_can_queue);
147
148 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
149                                struct request *rq, unsigned int rw_flags)
150 {
151         if (blk_queue_io_stat(q))
152                 rw_flags |= REQ_IO_STAT;
153
154         INIT_LIST_HEAD(&rq->queuelist);
155         /* csd/requeue_work/fifo_time is initialized before use */
156         rq->q = q;
157         rq->mq_ctx = ctx;
158         rq->cmd_flags |= rw_flags;
159         /* do not touch atomic flags, it needs atomic ops against the timer */
160         rq->cpu = -1;
161         INIT_HLIST_NODE(&rq->hash);
162         RB_CLEAR_NODE(&rq->rb_node);
163         rq->rq_disk = NULL;
164         rq->part = NULL;
165         rq->start_time = jiffies;
166 #ifdef CONFIG_BLK_CGROUP
167         rq->rl = NULL;
168         set_start_time_ns(rq);
169         rq->io_start_time_ns = 0;
170 #endif
171         rq->nr_phys_segments = 0;
172 #if defined(CONFIG_BLK_DEV_INTEGRITY)
173         rq->nr_integrity_segments = 0;
174 #endif
175         rq->special = NULL;
176         /* tag was already set */
177         rq->errors = 0;
178
179         rq->cmd = rq->__cmd;
180
181         rq->extra_len = 0;
182         rq->sense_len = 0;
183         rq->resid_len = 0;
184         rq->sense = NULL;
185
186         INIT_LIST_HEAD(&rq->timeout_list);
187         rq->timeout = 0;
188
189         rq->end_io = NULL;
190         rq->end_io_data = NULL;
191         rq->next_rq = NULL;
192
193         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
194 }
195
196 static struct request *
197 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
198 {
199         struct request *rq;
200         unsigned int tag;
201
202         tag = blk_mq_get_tag(data);
203         if (tag != BLK_MQ_TAG_FAIL) {
204                 rq = data->hctx->tags->rqs[tag];
205
206                 rq->cmd_flags = 0;
207                 if (blk_mq_tag_busy(data->hctx)) {
208                         rq->cmd_flags = REQ_MQ_INFLIGHT;
209                         atomic_inc(&data->hctx->nr_active);
210                 }
211
212                 rq->tag = tag;
213                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
214                 return rq;
215         }
216
217         return NULL;
218 }
219
220 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
221                 bool reserved)
222 {
223         struct blk_mq_ctx *ctx;
224         struct blk_mq_hw_ctx *hctx;
225         struct request *rq;
226         struct blk_mq_alloc_data alloc_data;
227
228         if (blk_mq_queue_enter(q))
229                 return NULL;
230
231         ctx = blk_mq_get_ctx(q);
232         hctx = q->mq_ops->map_queue(q, ctx->cpu);
233         blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
234                         reserved, ctx, hctx);
235
236         rq = __blk_mq_alloc_request(&alloc_data, rw);
237         if (!rq && (gfp & __GFP_WAIT)) {
238                 __blk_mq_run_hw_queue(hctx);
239                 blk_mq_put_ctx(ctx);
240
241                 ctx = blk_mq_get_ctx(q);
242                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
243                 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
244                                 hctx);
245                 rq =  __blk_mq_alloc_request(&alloc_data, rw);
246                 ctx = alloc_data.ctx;
247         }
248         blk_mq_put_ctx(ctx);
249         return rq;
250 }
251 EXPORT_SYMBOL(blk_mq_alloc_request);
252
253 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
254                                   struct blk_mq_ctx *ctx, struct request *rq)
255 {
256         const int tag = rq->tag;
257         struct request_queue *q = rq->q;
258
259         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
260                 atomic_dec(&hctx->nr_active);
261
262         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
263         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
264         blk_mq_queue_exit(q);
265 }
266
267 void blk_mq_free_request(struct request *rq)
268 {
269         struct blk_mq_ctx *ctx = rq->mq_ctx;
270         struct blk_mq_hw_ctx *hctx;
271         struct request_queue *q = rq->q;
272
273         ctx->rq_completed[rq_is_sync(rq)]++;
274
275         hctx = q->mq_ops->map_queue(q, ctx->cpu);
276         __blk_mq_free_request(hctx, ctx, rq);
277 }
278
279 /*
280  * Clone all relevant state from a request that has been put on hold in
281  * the flush state machine into the preallocated flush request that hangs
282  * off the request queue.
283  *
284  * For a driver the flush request should be invisible, that's why we are
285  * impersonating the original request here.
286  */
287 void blk_mq_clone_flush_request(struct request *flush_rq,
288                 struct request *orig_rq)
289 {
290         struct blk_mq_hw_ctx *hctx =
291                 orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
292
293         flush_rq->mq_ctx = orig_rq->mq_ctx;
294         flush_rq->tag = orig_rq->tag;
295         memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
296                 hctx->cmd_size);
297 }
298
299 inline void __blk_mq_end_io(struct request *rq, int error)
300 {
301         blk_account_io_done(rq);
302
303         if (rq->end_io) {
304                 rq->end_io(rq, error);
305         } else {
306                 if (unlikely(blk_bidi_rq(rq)))
307                         blk_mq_free_request(rq->next_rq);
308                 blk_mq_free_request(rq);
309         }
310 }
311 EXPORT_SYMBOL(__blk_mq_end_io);
312
313 void blk_mq_end_io(struct request *rq, int error)
314 {
315         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
316                 BUG();
317         __blk_mq_end_io(rq, error);
318 }
319 EXPORT_SYMBOL(blk_mq_end_io);
320
321 static void __blk_mq_complete_request_remote(void *data)
322 {
323         struct request *rq = data;
324
325         rq->q->softirq_done_fn(rq);
326 }
327
328 static void blk_mq_ipi_complete_request(struct request *rq)
329 {
330         struct blk_mq_ctx *ctx = rq->mq_ctx;
331         bool shared = false;
332         int cpu;
333
334         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
335                 rq->q->softirq_done_fn(rq);
336                 return;
337         }
338
339         cpu = get_cpu();
340         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
341                 shared = cpus_share_cache(cpu, ctx->cpu);
342
343         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
344                 rq->csd.func = __blk_mq_complete_request_remote;
345                 rq->csd.info = rq;
346                 rq->csd.flags = 0;
347                 smp_call_function_single_async(ctx->cpu, &rq->csd);
348         } else {
349                 rq->q->softirq_done_fn(rq);
350         }
351         put_cpu();
352 }
353
354 void __blk_mq_complete_request(struct request *rq)
355 {
356         struct request_queue *q = rq->q;
357
358         if (!q->softirq_done_fn)
359                 blk_mq_end_io(rq, rq->errors);
360         else
361                 blk_mq_ipi_complete_request(rq);
362 }
363
364 /**
365  * blk_mq_complete_request - end I/O on a request
366  * @rq:         the request being processed
367  *
368  * Description:
369  *      Ends all I/O on a request. It does not handle partial completions.
370  *      The actual completion happens out-of-order, through a IPI handler.
371  **/
372 void blk_mq_complete_request(struct request *rq)
373 {
374         struct request_queue *q = rq->q;
375
376         if (unlikely(blk_should_fake_timeout(q)))
377                 return;
378         if (!blk_mark_rq_complete(rq))
379                 __blk_mq_complete_request(rq);
380 }
381 EXPORT_SYMBOL(blk_mq_complete_request);
382
383 static void blk_mq_start_request(struct request *rq, bool last)
384 {
385         struct request_queue *q = rq->q;
386
387         trace_block_rq_issue(q, rq);
388
389         rq->resid_len = blk_rq_bytes(rq);
390         if (unlikely(blk_bidi_rq(rq)))
391                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
392
393         blk_add_timer(rq);
394
395         /*
396          * Mark us as started and clear complete. Complete might have been
397          * set if requeue raced with timeout, which then marked it as
398          * complete. So be sure to clear complete again when we start
399          * the request, otherwise we'll ignore the completion event.
400          */
401         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
402                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
403         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
404                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
405
406         if (q->dma_drain_size && blk_rq_bytes(rq)) {
407                 /*
408                  * Make sure space for the drain appears.  We know we can do
409                  * this because max_hw_segments has been adjusted to be one
410                  * fewer than the device can handle.
411                  */
412                 rq->nr_phys_segments++;
413         }
414
415         /*
416          * Flag the last request in the series so that drivers know when IO
417          * should be kicked off, if they don't do it on a per-request basis.
418          *
419          * Note: the flag isn't the only condition drivers should do kick off.
420          * If drive is busy, the last request might not have the bit set.
421          */
422         if (last)
423                 rq->cmd_flags |= REQ_END;
424 }
425
426 static void __blk_mq_requeue_request(struct request *rq)
427 {
428         struct request_queue *q = rq->q;
429
430         trace_block_rq_requeue(q, rq);
431         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
432
433         rq->cmd_flags &= ~REQ_END;
434
435         if (q->dma_drain_size && blk_rq_bytes(rq))
436                 rq->nr_phys_segments--;
437 }
438
439 void blk_mq_requeue_request(struct request *rq)
440 {
441         __blk_mq_requeue_request(rq);
442         blk_clear_rq_complete(rq);
443
444         BUG_ON(blk_queued_rq(rq));
445         blk_mq_add_to_requeue_list(rq, true);
446 }
447 EXPORT_SYMBOL(blk_mq_requeue_request);
448
449 static void blk_mq_requeue_work(struct work_struct *work)
450 {
451         struct request_queue *q =
452                 container_of(work, struct request_queue, requeue_work);
453         LIST_HEAD(rq_list);
454         struct request *rq, *next;
455         unsigned long flags;
456
457         spin_lock_irqsave(&q->requeue_lock, flags);
458         list_splice_init(&q->requeue_list, &rq_list);
459         spin_unlock_irqrestore(&q->requeue_lock, flags);
460
461         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
462                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
463                         continue;
464
465                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
466                 list_del_init(&rq->queuelist);
467                 blk_mq_insert_request(rq, true, false, false);
468         }
469
470         while (!list_empty(&rq_list)) {
471                 rq = list_entry(rq_list.next, struct request, queuelist);
472                 list_del_init(&rq->queuelist);
473                 blk_mq_insert_request(rq, false, false, false);
474         }
475
476         blk_mq_run_queues(q, false);
477 }
478
479 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
480 {
481         struct request_queue *q = rq->q;
482         unsigned long flags;
483
484         /*
485          * We abuse this flag that is otherwise used by the I/O scheduler to
486          * request head insertation from the workqueue.
487          */
488         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
489
490         spin_lock_irqsave(&q->requeue_lock, flags);
491         if (at_head) {
492                 rq->cmd_flags |= REQ_SOFTBARRIER;
493                 list_add(&rq->queuelist, &q->requeue_list);
494         } else {
495                 list_add_tail(&rq->queuelist, &q->requeue_list);
496         }
497         spin_unlock_irqrestore(&q->requeue_lock, flags);
498 }
499 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
500
501 void blk_mq_kick_requeue_list(struct request_queue *q)
502 {
503         kblockd_schedule_work(&q->requeue_work);
504 }
505 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
506
507 static inline bool is_flush_request(struct request *rq, unsigned int tag)
508 {
509         return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
510                         rq->q->flush_rq->tag == tag);
511 }
512
513 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
514 {
515         struct request *rq = tags->rqs[tag];
516
517         if (!is_flush_request(rq, tag))
518                 return rq;
519
520         return rq->q->flush_rq;
521 }
522 EXPORT_SYMBOL(blk_mq_tag_to_rq);
523
524 struct blk_mq_timeout_data {
525         struct blk_mq_hw_ctx *hctx;
526         unsigned long *next;
527         unsigned int *next_set;
528 };
529
530 static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
531 {
532         struct blk_mq_timeout_data *data = __data;
533         struct blk_mq_hw_ctx *hctx = data->hctx;
534         unsigned int tag;
535
536          /* It may not be in flight yet (this is where
537          * the REQ_ATOMIC_STARTED flag comes in). The requests are
538          * statically allocated, so we know it's always safe to access the
539          * memory associated with a bit offset into ->rqs[].
540          */
541         tag = 0;
542         do {
543                 struct request *rq;
544
545                 tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
546                 if (tag >= hctx->tags->nr_tags)
547                         break;
548
549                 rq = blk_mq_tag_to_rq(hctx->tags, tag++);
550                 if (rq->q != hctx->queue)
551                         continue;
552                 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
553                         continue;
554
555                 blk_rq_check_expired(rq, data->next, data->next_set);
556         } while (1);
557 }
558
559 static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
560                                         unsigned long *next,
561                                         unsigned int *next_set)
562 {
563         struct blk_mq_timeout_data data = {
564                 .hctx           = hctx,
565                 .next           = next,
566                 .next_set       = next_set,
567         };
568
569         /*
570          * Ask the tagging code to iterate busy requests, so we can
571          * check them for timeout.
572          */
573         blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
574 }
575
576 static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
577 {
578         struct request_queue *q = rq->q;
579
580         /*
581          * We know that complete is set at this point. If STARTED isn't set
582          * anymore, then the request isn't active and the "timeout" should
583          * just be ignored. This can happen due to the bitflag ordering.
584          * Timeout first checks if STARTED is set, and if it is, assumes
585          * the request is active. But if we race with completion, then
586          * we both flags will get cleared. So check here again, and ignore
587          * a timeout event with a request that isn't active.
588          */
589         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
590                 return BLK_EH_NOT_HANDLED;
591
592         if (!q->mq_ops->timeout)
593                 return BLK_EH_RESET_TIMER;
594
595         return q->mq_ops->timeout(rq);
596 }
597
598 static void blk_mq_rq_timer(unsigned long data)
599 {
600         struct request_queue *q = (struct request_queue *) data;
601         struct blk_mq_hw_ctx *hctx;
602         unsigned long next = 0;
603         int i, next_set = 0;
604
605         queue_for_each_hw_ctx(q, hctx, i) {
606                 /*
607                  * If not software queues are currently mapped to this
608                  * hardware queue, there's nothing to check
609                  */
610                 if (!hctx->nr_ctx || !hctx->tags)
611                         continue;
612
613                 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
614         }
615
616         if (next_set) {
617                 next = blk_rq_timeout(round_jiffies_up(next));
618                 mod_timer(&q->timeout, next);
619         } else {
620                 queue_for_each_hw_ctx(q, hctx, i)
621                         blk_mq_tag_idle(hctx);
622         }
623 }
624
625 /*
626  * Reverse check our software queue for entries that we could potentially
627  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
628  * too much time checking for merges.
629  */
630 static bool blk_mq_attempt_merge(struct request_queue *q,
631                                  struct blk_mq_ctx *ctx, struct bio *bio)
632 {
633         struct request *rq;
634         int checked = 8;
635
636         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
637                 int el_ret;
638
639                 if (!checked--)
640                         break;
641
642                 if (!blk_rq_merge_ok(rq, bio))
643                         continue;
644
645                 el_ret = blk_try_merge(rq, bio);
646                 if (el_ret == ELEVATOR_BACK_MERGE) {
647                         if (bio_attempt_back_merge(q, rq, bio)) {
648                                 ctx->rq_merged++;
649                                 return true;
650                         }
651                         break;
652                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
653                         if (bio_attempt_front_merge(q, rq, bio)) {
654                                 ctx->rq_merged++;
655                                 return true;
656                         }
657                         break;
658                 }
659         }
660
661         return false;
662 }
663
664 /*
665  * Process software queues that have been marked busy, splicing them
666  * to the for-dispatch
667  */
668 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
669 {
670         struct blk_mq_ctx *ctx;
671         int i;
672
673         for (i = 0; i < hctx->ctx_map.map_size; i++) {
674                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
675                 unsigned int off, bit;
676
677                 if (!bm->word)
678                         continue;
679
680                 bit = 0;
681                 off = i * hctx->ctx_map.bits_per_word;
682                 do {
683                         bit = find_next_bit(&bm->word, bm->depth, bit);
684                         if (bit >= bm->depth)
685                                 break;
686
687                         ctx = hctx->ctxs[bit + off];
688                         clear_bit(bit, &bm->word);
689                         spin_lock(&ctx->lock);
690                         list_splice_tail_init(&ctx->rq_list, list);
691                         spin_unlock(&ctx->lock);
692
693                         bit++;
694                 } while (1);
695         }
696 }
697
698 /*
699  * Run this hardware queue, pulling any software queues mapped to it in.
700  * Note that this function currently has various problems around ordering
701  * of IO. In particular, we'd like FIFO behaviour on handling existing
702  * items on the hctx->dispatch list. Ignore that for now.
703  */
704 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
705 {
706         struct request_queue *q = hctx->queue;
707         struct request *rq;
708         LIST_HEAD(rq_list);
709         int queued;
710
711         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
712
713         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
714                 return;
715
716         hctx->run++;
717
718         /*
719          * Touch any software queue that has pending entries.
720          */
721         flush_busy_ctxs(hctx, &rq_list);
722
723         /*
724          * If we have previous entries on our dispatch list, grab them
725          * and stuff them at the front for more fair dispatch.
726          */
727         if (!list_empty_careful(&hctx->dispatch)) {
728                 spin_lock(&hctx->lock);
729                 if (!list_empty(&hctx->dispatch))
730                         list_splice_init(&hctx->dispatch, &rq_list);
731                 spin_unlock(&hctx->lock);
732         }
733
734         /*
735          * Now process all the entries, sending them to the driver.
736          */
737         queued = 0;
738         while (!list_empty(&rq_list)) {
739                 int ret;
740
741                 rq = list_first_entry(&rq_list, struct request, queuelist);
742                 list_del_init(&rq->queuelist);
743
744                 blk_mq_start_request(rq, list_empty(&rq_list));
745
746                 ret = q->mq_ops->queue_rq(hctx, rq);
747                 switch (ret) {
748                 case BLK_MQ_RQ_QUEUE_OK:
749                         queued++;
750                         continue;
751                 case BLK_MQ_RQ_QUEUE_BUSY:
752                         list_add(&rq->queuelist, &rq_list);
753                         __blk_mq_requeue_request(rq);
754                         break;
755                 default:
756                         pr_err("blk-mq: bad return on queue: %d\n", ret);
757                 case BLK_MQ_RQ_QUEUE_ERROR:
758                         rq->errors = -EIO;
759                         blk_mq_end_io(rq, rq->errors);
760                         break;
761                 }
762
763                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
764                         break;
765         }
766
767         if (!queued)
768                 hctx->dispatched[0]++;
769         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
770                 hctx->dispatched[ilog2(queued) + 1]++;
771
772         /*
773          * Any items that need requeuing? Stuff them into hctx->dispatch,
774          * that is where we will continue on next queue run.
775          */
776         if (!list_empty(&rq_list)) {
777                 spin_lock(&hctx->lock);
778                 list_splice(&rq_list, &hctx->dispatch);
779                 spin_unlock(&hctx->lock);
780         }
781 }
782
783 /*
784  * It'd be great if the workqueue API had a way to pass
785  * in a mask and had some smarts for more clever placement.
786  * For now we just round-robin here, switching for every
787  * BLK_MQ_CPU_WORK_BATCH queued items.
788  */
789 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
790 {
791         int cpu = hctx->next_cpu;
792
793         if (--hctx->next_cpu_batch <= 0) {
794                 int next_cpu;
795
796                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
797                 if (next_cpu >= nr_cpu_ids)
798                         next_cpu = cpumask_first(hctx->cpumask);
799
800                 hctx->next_cpu = next_cpu;
801                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
802         }
803
804         return cpu;
805 }
806
807 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
808 {
809         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
810                 return;
811
812         if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
813                 __blk_mq_run_hw_queue(hctx);
814         else if (hctx->queue->nr_hw_queues == 1)
815                 kblockd_schedule_delayed_work(&hctx->run_work, 0);
816         else {
817                 unsigned int cpu;
818
819                 cpu = blk_mq_hctx_next_cpu(hctx);
820                 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
821         }
822 }
823
824 void blk_mq_run_queues(struct request_queue *q, bool async)
825 {
826         struct blk_mq_hw_ctx *hctx;
827         int i;
828
829         queue_for_each_hw_ctx(q, hctx, i) {
830                 if ((!blk_mq_hctx_has_pending(hctx) &&
831                     list_empty_careful(&hctx->dispatch)) ||
832                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
833                         continue;
834
835                 preempt_disable();
836                 blk_mq_run_hw_queue(hctx, async);
837                 preempt_enable();
838         }
839 }
840 EXPORT_SYMBOL(blk_mq_run_queues);
841
842 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
843 {
844         cancel_delayed_work(&hctx->run_work);
845         cancel_delayed_work(&hctx->delay_work);
846         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
847 }
848 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
849
850 void blk_mq_stop_hw_queues(struct request_queue *q)
851 {
852         struct blk_mq_hw_ctx *hctx;
853         int i;
854
855         queue_for_each_hw_ctx(q, hctx, i)
856                 blk_mq_stop_hw_queue(hctx);
857 }
858 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
859
860 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
861 {
862         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
863
864         preempt_disable();
865         blk_mq_run_hw_queue(hctx, false);
866         preempt_enable();
867 }
868 EXPORT_SYMBOL(blk_mq_start_hw_queue);
869
870 void blk_mq_start_hw_queues(struct request_queue *q)
871 {
872         struct blk_mq_hw_ctx *hctx;
873         int i;
874
875         queue_for_each_hw_ctx(q, hctx, i)
876                 blk_mq_start_hw_queue(hctx);
877 }
878 EXPORT_SYMBOL(blk_mq_start_hw_queues);
879
880
881 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
882 {
883         struct blk_mq_hw_ctx *hctx;
884         int i;
885
886         queue_for_each_hw_ctx(q, hctx, i) {
887                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
888                         continue;
889
890                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
891                 preempt_disable();
892                 blk_mq_run_hw_queue(hctx, async);
893                 preempt_enable();
894         }
895 }
896 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
897
898 static void blk_mq_run_work_fn(struct work_struct *work)
899 {
900         struct blk_mq_hw_ctx *hctx;
901
902         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
903
904         __blk_mq_run_hw_queue(hctx);
905 }
906
907 static void blk_mq_delay_work_fn(struct work_struct *work)
908 {
909         struct blk_mq_hw_ctx *hctx;
910
911         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
912
913         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
914                 __blk_mq_run_hw_queue(hctx);
915 }
916
917 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
918 {
919         unsigned long tmo = msecs_to_jiffies(msecs);
920
921         if (hctx->queue->nr_hw_queues == 1)
922                 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
923         else {
924                 unsigned int cpu;
925
926                 cpu = blk_mq_hctx_next_cpu(hctx);
927                 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
928         }
929 }
930 EXPORT_SYMBOL(blk_mq_delay_queue);
931
932 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
933                                     struct request *rq, bool at_head)
934 {
935         struct blk_mq_ctx *ctx = rq->mq_ctx;
936
937         trace_block_rq_insert(hctx->queue, rq);
938
939         if (at_head)
940                 list_add(&rq->queuelist, &ctx->rq_list);
941         else
942                 list_add_tail(&rq->queuelist, &ctx->rq_list);
943
944         blk_mq_hctx_mark_pending(hctx, ctx);
945 }
946
947 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
948                 bool async)
949 {
950         struct request_queue *q = rq->q;
951         struct blk_mq_hw_ctx *hctx;
952         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
953
954         current_ctx = blk_mq_get_ctx(q);
955         if (!cpu_online(ctx->cpu))
956                 rq->mq_ctx = ctx = current_ctx;
957
958         hctx = q->mq_ops->map_queue(q, ctx->cpu);
959
960         if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
961             !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
962                 blk_insert_flush(rq);
963         } else {
964                 spin_lock(&ctx->lock);
965                 __blk_mq_insert_request(hctx, rq, at_head);
966                 spin_unlock(&ctx->lock);
967         }
968
969         if (run_queue)
970                 blk_mq_run_hw_queue(hctx, async);
971
972         blk_mq_put_ctx(current_ctx);
973 }
974
975 static void blk_mq_insert_requests(struct request_queue *q,
976                                      struct blk_mq_ctx *ctx,
977                                      struct list_head *list,
978                                      int depth,
979                                      bool from_schedule)
980
981 {
982         struct blk_mq_hw_ctx *hctx;
983         struct blk_mq_ctx *current_ctx;
984
985         trace_block_unplug(q, depth, !from_schedule);
986
987         current_ctx = blk_mq_get_ctx(q);
988
989         if (!cpu_online(ctx->cpu))
990                 ctx = current_ctx;
991         hctx = q->mq_ops->map_queue(q, ctx->cpu);
992
993         /*
994          * preemption doesn't flush plug list, so it's possible ctx->cpu is
995          * offline now
996          */
997         spin_lock(&ctx->lock);
998         while (!list_empty(list)) {
999                 struct request *rq;
1000
1001                 rq = list_first_entry(list, struct request, queuelist);
1002                 list_del_init(&rq->queuelist);
1003                 rq->mq_ctx = ctx;
1004                 __blk_mq_insert_request(hctx, rq, false);
1005         }
1006         spin_unlock(&ctx->lock);
1007
1008         blk_mq_run_hw_queue(hctx, from_schedule);
1009         blk_mq_put_ctx(current_ctx);
1010 }
1011
1012 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1013 {
1014         struct request *rqa = container_of(a, struct request, queuelist);
1015         struct request *rqb = container_of(b, struct request, queuelist);
1016
1017         return !(rqa->mq_ctx < rqb->mq_ctx ||
1018                  (rqa->mq_ctx == rqb->mq_ctx &&
1019                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1020 }
1021
1022 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1023 {
1024         struct blk_mq_ctx *this_ctx;
1025         struct request_queue *this_q;
1026         struct request *rq;
1027         LIST_HEAD(list);
1028         LIST_HEAD(ctx_list);
1029         unsigned int depth;
1030
1031         list_splice_init(&plug->mq_list, &list);
1032
1033         list_sort(NULL, &list, plug_ctx_cmp);
1034
1035         this_q = NULL;
1036         this_ctx = NULL;
1037         depth = 0;
1038
1039         while (!list_empty(&list)) {
1040                 rq = list_entry_rq(list.next);
1041                 list_del_init(&rq->queuelist);
1042                 BUG_ON(!rq->q);
1043                 if (rq->mq_ctx != this_ctx) {
1044                         if (this_ctx) {
1045                                 blk_mq_insert_requests(this_q, this_ctx,
1046                                                         &ctx_list, depth,
1047                                                         from_schedule);
1048                         }
1049
1050                         this_ctx = rq->mq_ctx;
1051                         this_q = rq->q;
1052                         depth = 0;
1053                 }
1054
1055                 depth++;
1056                 list_add_tail(&rq->queuelist, &ctx_list);
1057         }
1058
1059         /*
1060          * If 'this_ctx' is set, we know we have entries to complete
1061          * on 'ctx_list'. Do those.
1062          */
1063         if (this_ctx) {
1064                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1065                                        from_schedule);
1066         }
1067 }
1068
1069 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1070 {
1071         init_request_from_bio(rq, bio);
1072
1073         if (blk_do_io_stat(rq))
1074                 blk_account_io_start(rq, 1);
1075 }
1076
1077 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1078 {
1079         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1080                 !blk_queue_nomerges(hctx->queue);
1081 }
1082
1083 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1084                                          struct blk_mq_ctx *ctx,
1085                                          struct request *rq, struct bio *bio)
1086 {
1087         if (!hctx_allow_merges(hctx)) {
1088                 blk_mq_bio_to_request(rq, bio);
1089                 spin_lock(&ctx->lock);
1090 insert_rq:
1091                 __blk_mq_insert_request(hctx, rq, false);
1092                 spin_unlock(&ctx->lock);
1093                 return false;
1094         } else {
1095                 struct request_queue *q = hctx->queue;
1096
1097                 spin_lock(&ctx->lock);
1098                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1099                         blk_mq_bio_to_request(rq, bio);
1100                         goto insert_rq;
1101                 }
1102
1103                 spin_unlock(&ctx->lock);
1104                 __blk_mq_free_request(hctx, ctx, rq);
1105                 return true;
1106         }
1107 }
1108
1109 struct blk_map_ctx {
1110         struct blk_mq_hw_ctx *hctx;
1111         struct blk_mq_ctx *ctx;
1112 };
1113
1114 static struct request *blk_mq_map_request(struct request_queue *q,
1115                                           struct bio *bio,
1116                                           struct blk_map_ctx *data)
1117 {
1118         struct blk_mq_hw_ctx *hctx;
1119         struct blk_mq_ctx *ctx;
1120         struct request *rq;
1121         int rw = bio_data_dir(bio);
1122         struct blk_mq_alloc_data alloc_data;
1123
1124         if (unlikely(blk_mq_queue_enter(q))) {
1125                 bio_endio(bio, -EIO);
1126                 return NULL;
1127         }
1128
1129         ctx = blk_mq_get_ctx(q);
1130         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1131
1132         if (rw_is_sync(bio->bi_rw))
1133                 rw |= REQ_SYNC;
1134
1135         trace_block_getrq(q, bio, rw);
1136         blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1137                         hctx);
1138         rq = __blk_mq_alloc_request(&alloc_data, rw);
1139         if (unlikely(!rq)) {
1140                 __blk_mq_run_hw_queue(hctx);
1141                 blk_mq_put_ctx(ctx);
1142                 trace_block_sleeprq(q, bio, rw);
1143
1144                 ctx = blk_mq_get_ctx(q);
1145                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1146                 blk_mq_set_alloc_data(&alloc_data, q,
1147                                 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1148                 rq = __blk_mq_alloc_request(&alloc_data, rw);
1149                 ctx = alloc_data.ctx;
1150                 hctx = alloc_data.hctx;
1151         }
1152
1153         hctx->queued++;
1154         data->hctx = hctx;
1155         data->ctx = ctx;
1156         return rq;
1157 }
1158
1159 /*
1160  * Multiple hardware queue variant. This will not use per-process plugs,
1161  * but will attempt to bypass the hctx queueing if we can go straight to
1162  * hardware for SYNC IO.
1163  */
1164 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1165 {
1166         const int is_sync = rw_is_sync(bio->bi_rw);
1167         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1168         struct blk_map_ctx data;
1169         struct request *rq;
1170
1171         blk_queue_bounce(q, &bio);
1172
1173         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1174                 bio_endio(bio, -EIO);
1175                 return;
1176         }
1177
1178         rq = blk_mq_map_request(q, bio, &data);
1179         if (unlikely(!rq))
1180                 return;
1181
1182         if (unlikely(is_flush_fua)) {
1183                 blk_mq_bio_to_request(rq, bio);
1184                 blk_insert_flush(rq);
1185                 goto run_queue;
1186         }
1187
1188         if (is_sync) {
1189                 int ret;
1190
1191                 blk_mq_bio_to_request(rq, bio);
1192                 blk_mq_start_request(rq, true);
1193
1194                 /*
1195                  * For OK queue, we are done. For error, kill it. Any other
1196                  * error (busy), just add it to our list as we previously
1197                  * would have done
1198                  */
1199                 ret = q->mq_ops->queue_rq(data.hctx, rq);
1200                 if (ret == BLK_MQ_RQ_QUEUE_OK)
1201                         goto done;
1202                 else {
1203                         __blk_mq_requeue_request(rq);
1204
1205                         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1206                                 rq->errors = -EIO;
1207                                 blk_mq_end_io(rq, rq->errors);
1208                                 goto done;
1209                         }
1210                 }
1211         }
1212
1213         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1214                 /*
1215                  * For a SYNC request, send it to the hardware immediately. For
1216                  * an ASYNC request, just ensure that we run it later on. The
1217                  * latter allows for merging opportunities and more efficient
1218                  * dispatching.
1219                  */
1220 run_queue:
1221                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1222         }
1223 done:
1224         blk_mq_put_ctx(data.ctx);
1225 }
1226
1227 /*
1228  * Single hardware queue variant. This will attempt to use any per-process
1229  * plug for merging and IO deferral.
1230  */
1231 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1232 {
1233         const int is_sync = rw_is_sync(bio->bi_rw);
1234         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1235         unsigned int use_plug, request_count = 0;
1236         struct blk_map_ctx data;
1237         struct request *rq;
1238
1239         /*
1240          * If we have multiple hardware queues, just go directly to
1241          * one of those for sync IO.
1242          */
1243         use_plug = !is_flush_fua && !is_sync;
1244
1245         blk_queue_bounce(q, &bio);
1246
1247         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1248                 bio_endio(bio, -EIO);
1249                 return;
1250         }
1251
1252         if (use_plug && !blk_queue_nomerges(q) &&
1253             blk_attempt_plug_merge(q, bio, &request_count))
1254                 return;
1255
1256         rq = blk_mq_map_request(q, bio, &data);
1257         if (unlikely(!rq))
1258                 return;
1259
1260         if (unlikely(is_flush_fua)) {
1261                 blk_mq_bio_to_request(rq, bio);
1262                 blk_insert_flush(rq);
1263                 goto run_queue;
1264         }
1265
1266         /*
1267          * A task plug currently exists. Since this is completely lockless,
1268          * utilize that to temporarily store requests until the task is
1269          * either done or scheduled away.
1270          */
1271         if (use_plug) {
1272                 struct blk_plug *plug = current->plug;
1273
1274                 if (plug) {
1275                         blk_mq_bio_to_request(rq, bio);
1276                         if (list_empty(&plug->mq_list))
1277                                 trace_block_plug(q);
1278                         else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1279                                 blk_flush_plug_list(plug, false);
1280                                 trace_block_plug(q);
1281                         }
1282                         list_add_tail(&rq->queuelist, &plug->mq_list);
1283                         blk_mq_put_ctx(data.ctx);
1284                         return;
1285                 }
1286         }
1287
1288         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1289                 /*
1290                  * For a SYNC request, send it to the hardware immediately. For
1291                  * an ASYNC request, just ensure that we run it later on. The
1292                  * latter allows for merging opportunities and more efficient
1293                  * dispatching.
1294                  */
1295 run_queue:
1296                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1297         }
1298
1299         blk_mq_put_ctx(data.ctx);
1300 }
1301
1302 /*
1303  * Default mapping to a software queue, since we use one per CPU.
1304  */
1305 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1306 {
1307         return q->queue_hw_ctx[q->mq_map[cpu]];
1308 }
1309 EXPORT_SYMBOL(blk_mq_map_queue);
1310
1311 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1312                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1313 {
1314         struct page *page;
1315
1316         if (tags->rqs && set->ops->exit_request) {
1317                 int i;
1318
1319                 for (i = 0; i < tags->nr_tags; i++) {
1320                         if (!tags->rqs[i])
1321                                 continue;
1322                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1323                                                 hctx_idx, i);
1324                 }
1325         }
1326
1327         while (!list_empty(&tags->page_list)) {
1328                 page = list_first_entry(&tags->page_list, struct page, lru);
1329                 list_del_init(&page->lru);
1330                 __free_pages(page, page->private);
1331         }
1332
1333         kfree(tags->rqs);
1334
1335         blk_mq_free_tags(tags);
1336 }
1337
1338 static size_t order_to_size(unsigned int order)
1339 {
1340         return (size_t)PAGE_SIZE << order;
1341 }
1342
1343 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1344                 unsigned int hctx_idx)
1345 {
1346         struct blk_mq_tags *tags;
1347         unsigned int i, j, entries_per_page, max_order = 4;
1348         size_t rq_size, left;
1349
1350         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1351                                 set->numa_node);
1352         if (!tags)
1353                 return NULL;
1354
1355         INIT_LIST_HEAD(&tags->page_list);
1356
1357         tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
1358                                         GFP_KERNEL, set->numa_node);
1359         if (!tags->rqs) {
1360                 blk_mq_free_tags(tags);
1361                 return NULL;
1362         }
1363
1364         /*
1365          * rq_size is the size of the request plus driver payload, rounded
1366          * to the cacheline size
1367          */
1368         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1369                                 cache_line_size());
1370         left = rq_size * set->queue_depth;
1371
1372         for (i = 0; i < set->queue_depth; ) {
1373                 int this_order = max_order;
1374                 struct page *page;
1375                 int to_do;
1376                 void *p;
1377
1378                 while (left < order_to_size(this_order - 1) && this_order)
1379                         this_order--;
1380
1381                 do {
1382                         page = alloc_pages_node(set->numa_node, GFP_KERNEL,
1383                                                 this_order);
1384                         if (page)
1385                                 break;
1386                         if (!this_order--)
1387                                 break;
1388                         if (order_to_size(this_order) < rq_size)
1389                                 break;
1390                 } while (1);
1391
1392                 if (!page)
1393                         goto fail;
1394
1395                 page->private = this_order;
1396                 list_add_tail(&page->lru, &tags->page_list);
1397
1398                 p = page_address(page);
1399                 entries_per_page = order_to_size(this_order) / rq_size;
1400                 to_do = min(entries_per_page, set->queue_depth - i);
1401                 left -= to_do * rq_size;
1402                 for (j = 0; j < to_do; j++) {
1403                         tags->rqs[i] = p;
1404                         if (set->ops->init_request) {
1405                                 if (set->ops->init_request(set->driver_data,
1406                                                 tags->rqs[i], hctx_idx, i,
1407                                                 set->numa_node))
1408                                         goto fail;
1409                         }
1410
1411                         p += rq_size;
1412                         i++;
1413                 }
1414         }
1415
1416         return tags;
1417
1418 fail:
1419         pr_warn("%s: failed to allocate requests\n", __func__);
1420         blk_mq_free_rq_map(set, tags, hctx_idx);
1421         return NULL;
1422 }
1423
1424 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1425 {
1426         kfree(bitmap->map);
1427 }
1428
1429 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1430 {
1431         unsigned int bpw = 8, total, num_maps, i;
1432
1433         bitmap->bits_per_word = bpw;
1434
1435         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1436         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1437                                         GFP_KERNEL, node);
1438         if (!bitmap->map)
1439                 return -ENOMEM;
1440
1441         bitmap->map_size = num_maps;
1442
1443         total = nr_cpu_ids;
1444         for (i = 0; i < num_maps; i++) {
1445                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1446                 total -= bitmap->map[i].depth;
1447         }
1448
1449         return 0;
1450 }
1451
1452 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1453 {
1454         struct request_queue *q = hctx->queue;
1455         struct blk_mq_ctx *ctx;
1456         LIST_HEAD(tmp);
1457
1458         /*
1459          * Move ctx entries to new CPU, if this one is going away.
1460          */
1461         ctx = __blk_mq_get_ctx(q, cpu);
1462
1463         spin_lock(&ctx->lock);
1464         if (!list_empty(&ctx->rq_list)) {
1465                 list_splice_init(&ctx->rq_list, &tmp);
1466                 blk_mq_hctx_clear_pending(hctx, ctx);
1467         }
1468         spin_unlock(&ctx->lock);
1469
1470         if (list_empty(&tmp))
1471                 return NOTIFY_OK;
1472
1473         ctx = blk_mq_get_ctx(q);
1474         spin_lock(&ctx->lock);
1475
1476         while (!list_empty(&tmp)) {
1477                 struct request *rq;
1478
1479                 rq = list_first_entry(&tmp, struct request, queuelist);
1480                 rq->mq_ctx = ctx;
1481                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1482         }
1483
1484         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1485         blk_mq_hctx_mark_pending(hctx, ctx);
1486
1487         spin_unlock(&ctx->lock);
1488
1489         blk_mq_run_hw_queue(hctx, true);
1490         blk_mq_put_ctx(ctx);
1491         return NOTIFY_OK;
1492 }
1493
1494 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1495 {
1496         struct request_queue *q = hctx->queue;
1497         struct blk_mq_tag_set *set = q->tag_set;
1498
1499         if (set->tags[hctx->queue_num])
1500                 return NOTIFY_OK;
1501
1502         set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1503         if (!set->tags[hctx->queue_num])
1504                 return NOTIFY_STOP;
1505
1506         hctx->tags = set->tags[hctx->queue_num];
1507         return NOTIFY_OK;
1508 }
1509
1510 static int blk_mq_hctx_notify(void *data, unsigned long action,
1511                               unsigned int cpu)
1512 {
1513         struct blk_mq_hw_ctx *hctx = data;
1514
1515         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1516                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1517         else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1518                 return blk_mq_hctx_cpu_online(hctx, cpu);
1519
1520         return NOTIFY_OK;
1521 }
1522
1523 static void blk_mq_exit_hw_queues(struct request_queue *q,
1524                 struct blk_mq_tag_set *set, int nr_queue)
1525 {
1526         struct blk_mq_hw_ctx *hctx;
1527         unsigned int i;
1528
1529         queue_for_each_hw_ctx(q, hctx, i) {
1530                 if (i == nr_queue)
1531                         break;
1532
1533                 blk_mq_tag_idle(hctx);
1534
1535                 if (set->ops->exit_hctx)
1536                         set->ops->exit_hctx(hctx, i);
1537
1538                 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1539                 kfree(hctx->ctxs);
1540                 blk_mq_free_bitmap(&hctx->ctx_map);
1541         }
1542
1543 }
1544
1545 static void blk_mq_free_hw_queues(struct request_queue *q,
1546                 struct blk_mq_tag_set *set)
1547 {
1548         struct blk_mq_hw_ctx *hctx;
1549         unsigned int i;
1550
1551         queue_for_each_hw_ctx(q, hctx, i) {
1552                 free_cpumask_var(hctx->cpumask);
1553                 kfree(hctx);
1554         }
1555 }
1556
1557 static int blk_mq_init_hw_queues(struct request_queue *q,
1558                 struct blk_mq_tag_set *set)
1559 {
1560         struct blk_mq_hw_ctx *hctx;
1561         unsigned int i;
1562
1563         /*
1564          * Initialize hardware queues
1565          */
1566         queue_for_each_hw_ctx(q, hctx, i) {
1567                 int node;
1568
1569                 node = hctx->numa_node;
1570                 if (node == NUMA_NO_NODE)
1571                         node = hctx->numa_node = set->numa_node;
1572
1573                 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1574                 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1575                 spin_lock_init(&hctx->lock);
1576                 INIT_LIST_HEAD(&hctx->dispatch);
1577                 hctx->queue = q;
1578                 hctx->queue_num = i;
1579                 hctx->flags = set->flags;
1580                 hctx->cmd_size = set->cmd_size;
1581
1582                 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1583                                                 blk_mq_hctx_notify, hctx);
1584                 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1585
1586                 hctx->tags = set->tags[i];
1587
1588                 /*
1589                  * Allocate space for all possible cpus to avoid allocation at
1590                  * runtime
1591                  */
1592                 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1593                                                 GFP_KERNEL, node);
1594                 if (!hctx->ctxs)
1595                         break;
1596
1597                 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1598                         break;
1599
1600                 hctx->nr_ctx = 0;
1601
1602                 if (set->ops->init_hctx &&
1603                     set->ops->init_hctx(hctx, set->driver_data, i))
1604                         break;
1605         }
1606
1607         if (i == q->nr_hw_queues)
1608                 return 0;
1609
1610         /*
1611          * Init failed
1612          */
1613         blk_mq_exit_hw_queues(q, set, i);
1614
1615         return 1;
1616 }
1617
1618 static void blk_mq_init_cpu_queues(struct request_queue *q,
1619                                    unsigned int nr_hw_queues)
1620 {
1621         unsigned int i;
1622
1623         for_each_possible_cpu(i) {
1624                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1625                 struct blk_mq_hw_ctx *hctx;
1626
1627                 memset(__ctx, 0, sizeof(*__ctx));
1628                 __ctx->cpu = i;
1629                 spin_lock_init(&__ctx->lock);
1630                 INIT_LIST_HEAD(&__ctx->rq_list);
1631                 __ctx->queue = q;
1632
1633                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1634                 if (!cpu_online(i))
1635                         continue;
1636
1637                 hctx = q->mq_ops->map_queue(q, i);
1638                 cpumask_set_cpu(i, hctx->cpumask);
1639                 hctx->nr_ctx++;
1640
1641                 /*
1642                  * Set local node, IFF we have more than one hw queue. If
1643                  * not, we remain on the home node of the device
1644                  */
1645                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1646                         hctx->numa_node = cpu_to_node(i);
1647         }
1648 }
1649
1650 static void blk_mq_map_swqueue(struct request_queue *q)
1651 {
1652         unsigned int i;
1653         struct blk_mq_hw_ctx *hctx;
1654         struct blk_mq_ctx *ctx;
1655
1656         queue_for_each_hw_ctx(q, hctx, i) {
1657                 cpumask_clear(hctx->cpumask);
1658                 hctx->nr_ctx = 0;
1659         }
1660
1661         /*
1662          * Map software to hardware queues
1663          */
1664         queue_for_each_ctx(q, ctx, i) {
1665                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1666                 if (!cpu_online(i))
1667                         continue;
1668
1669                 hctx = q->mq_ops->map_queue(q, i);
1670                 cpumask_set_cpu(i, hctx->cpumask);
1671                 ctx->index_hw = hctx->nr_ctx;
1672                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1673         }
1674
1675         queue_for_each_hw_ctx(q, hctx, i) {
1676                 /*
1677                  * If no software queues are mapped to this hardware queue,
1678                  * disable it and free the request entries.
1679                  */
1680                 if (!hctx->nr_ctx) {
1681                         struct blk_mq_tag_set *set = q->tag_set;
1682
1683                         if (set->tags[i]) {
1684                                 blk_mq_free_rq_map(set, set->tags[i], i);
1685                                 set->tags[i] = NULL;
1686                                 hctx->tags = NULL;
1687                         }
1688                         continue;
1689                 }
1690
1691                 /*
1692                  * Initialize batch roundrobin counts
1693                  */
1694                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1695                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1696         }
1697 }
1698
1699 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1700 {
1701         struct blk_mq_hw_ctx *hctx;
1702         struct request_queue *q;
1703         bool shared;
1704         int i;
1705
1706         if (set->tag_list.next == set->tag_list.prev)
1707                 shared = false;
1708         else
1709                 shared = true;
1710
1711         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1712                 blk_mq_freeze_queue(q);
1713
1714                 queue_for_each_hw_ctx(q, hctx, i) {
1715                         if (shared)
1716                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1717                         else
1718                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1719                 }
1720                 blk_mq_unfreeze_queue(q);
1721         }
1722 }
1723
1724 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1725 {
1726         struct blk_mq_tag_set *set = q->tag_set;
1727
1728         mutex_lock(&set->tag_list_lock);
1729         list_del_init(&q->tag_set_list);
1730         blk_mq_update_tag_set_depth(set);
1731         mutex_unlock(&set->tag_list_lock);
1732 }
1733
1734 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1735                                      struct request_queue *q)
1736 {
1737         q->tag_set = set;
1738
1739         mutex_lock(&set->tag_list_lock);
1740         list_add_tail(&q->tag_set_list, &set->tag_list);
1741         blk_mq_update_tag_set_depth(set);
1742         mutex_unlock(&set->tag_list_lock);
1743 }
1744
1745 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1746 {
1747         struct blk_mq_hw_ctx **hctxs;
1748         struct blk_mq_ctx __percpu *ctx;
1749         struct request_queue *q;
1750         unsigned int *map;
1751         int i;
1752
1753         ctx = alloc_percpu(struct blk_mq_ctx);
1754         if (!ctx)
1755                 return ERR_PTR(-ENOMEM);
1756
1757         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1758                         set->numa_node);
1759
1760         if (!hctxs)
1761                 goto err_percpu;
1762
1763         map = blk_mq_make_queue_map(set);
1764         if (!map)
1765                 goto err_map;
1766
1767         for (i = 0; i < set->nr_hw_queues; i++) {
1768                 int node = blk_mq_hw_queue_to_node(map, i);
1769
1770                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1771                                         GFP_KERNEL, node);
1772                 if (!hctxs[i])
1773                         goto err_hctxs;
1774
1775                 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1776                         goto err_hctxs;
1777
1778                 atomic_set(&hctxs[i]->nr_active, 0);
1779                 hctxs[i]->numa_node = node;
1780                 hctxs[i]->queue_num = i;
1781         }
1782
1783         q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1784         if (!q)
1785                 goto err_hctxs;
1786
1787         if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release))
1788                 goto err_map;
1789
1790         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1791         blk_queue_rq_timeout(q, 30000);
1792
1793         q->nr_queues = nr_cpu_ids;
1794         q->nr_hw_queues = set->nr_hw_queues;
1795         q->mq_map = map;
1796
1797         q->queue_ctx = ctx;
1798         q->queue_hw_ctx = hctxs;
1799
1800         q->mq_ops = set->ops;
1801         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1802
1803         if (!(set->flags & BLK_MQ_F_SG_MERGE))
1804                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1805
1806         q->sg_reserved_size = INT_MAX;
1807
1808         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1809         INIT_LIST_HEAD(&q->requeue_list);
1810         spin_lock_init(&q->requeue_lock);
1811
1812         if (q->nr_hw_queues > 1)
1813                 blk_queue_make_request(q, blk_mq_make_request);
1814         else
1815                 blk_queue_make_request(q, blk_sq_make_request);
1816
1817         blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1818         if (set->timeout)
1819                 blk_queue_rq_timeout(q, set->timeout);
1820
1821         /*
1822          * Do this after blk_queue_make_request() overrides it...
1823          */
1824         q->nr_requests = set->queue_depth;
1825
1826         if (set->ops->complete)
1827                 blk_queue_softirq_done(q, set->ops->complete);
1828
1829         blk_mq_init_flush(q);
1830         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1831
1832         q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1833                                 set->cmd_size, cache_line_size()),
1834                                 GFP_KERNEL);
1835         if (!q->flush_rq)
1836                 goto err_hw;
1837
1838         if (blk_mq_init_hw_queues(q, set))
1839                 goto err_flush_rq;
1840
1841         mutex_lock(&all_q_mutex);
1842         list_add_tail(&q->all_q_node, &all_q_list);
1843         mutex_unlock(&all_q_mutex);
1844
1845         blk_mq_add_queue_tag_set(set, q);
1846
1847         blk_mq_map_swqueue(q);
1848
1849         return q;
1850
1851 err_flush_rq:
1852         kfree(q->flush_rq);
1853 err_hw:
1854         blk_cleanup_queue(q);
1855 err_hctxs:
1856         kfree(map);
1857         for (i = 0; i < set->nr_hw_queues; i++) {
1858                 if (!hctxs[i])
1859                         break;
1860                 free_cpumask_var(hctxs[i]->cpumask);
1861                 kfree(hctxs[i]);
1862         }
1863 err_map:
1864         kfree(hctxs);
1865 err_percpu:
1866         free_percpu(ctx);
1867         return ERR_PTR(-ENOMEM);
1868 }
1869 EXPORT_SYMBOL(blk_mq_init_queue);
1870
1871 void blk_mq_free_queue(struct request_queue *q)
1872 {
1873         struct blk_mq_tag_set   *set = q->tag_set;
1874
1875         blk_mq_del_queue_tag_set(q);
1876
1877         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1878         blk_mq_free_hw_queues(q, set);
1879
1880         percpu_ref_exit(&q->mq_usage_counter);
1881
1882         free_percpu(q->queue_ctx);
1883         kfree(q->queue_hw_ctx);
1884         kfree(q->mq_map);
1885
1886         q->queue_ctx = NULL;
1887         q->queue_hw_ctx = NULL;
1888         q->mq_map = NULL;
1889
1890         mutex_lock(&all_q_mutex);
1891         list_del_init(&q->all_q_node);
1892         mutex_unlock(&all_q_mutex);
1893 }
1894
1895 /* Basically redo blk_mq_init_queue with queue frozen */
1896 static void blk_mq_queue_reinit(struct request_queue *q)
1897 {
1898         blk_mq_freeze_queue(q);
1899
1900         blk_mq_sysfs_unregister(q);
1901
1902         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1903
1904         /*
1905          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1906          * we should change hctx numa_node according to new topology (this
1907          * involves free and re-allocate memory, worthy doing?)
1908          */
1909
1910         blk_mq_map_swqueue(q);
1911
1912         blk_mq_sysfs_register(q);
1913
1914         blk_mq_unfreeze_queue(q);
1915 }
1916
1917 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1918                                       unsigned long action, void *hcpu)
1919 {
1920         struct request_queue *q;
1921
1922         /*
1923          * Before new mappings are established, hotadded cpu might already
1924          * start handling requests. This doesn't break anything as we map
1925          * offline CPUs to first hardware queue. We will re-init the queue
1926          * below to get optimal settings.
1927          */
1928         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1929             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1930                 return NOTIFY_OK;
1931
1932         mutex_lock(&all_q_mutex);
1933         list_for_each_entry(q, &all_q_list, all_q_node)
1934                 blk_mq_queue_reinit(q);
1935         mutex_unlock(&all_q_mutex);
1936         return NOTIFY_OK;
1937 }
1938
1939 /*
1940  * Alloc a tag set to be associated with one or more request queues.
1941  * May fail with EINVAL for various error conditions. May adjust the
1942  * requested depth down, if if it too large. In that case, the set
1943  * value will be stored in set->queue_depth.
1944  */
1945 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1946 {
1947         int i;
1948
1949         if (!set->nr_hw_queues)
1950                 return -EINVAL;
1951         if (!set->queue_depth)
1952                 return -EINVAL;
1953         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1954                 return -EINVAL;
1955
1956         if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
1957                 return -EINVAL;
1958
1959         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
1960                 pr_info("blk-mq: reduced tag depth to %u\n",
1961                         BLK_MQ_MAX_DEPTH);
1962                 set->queue_depth = BLK_MQ_MAX_DEPTH;
1963         }
1964
1965         set->tags = kmalloc_node(set->nr_hw_queues *
1966                                  sizeof(struct blk_mq_tags *),
1967                                  GFP_KERNEL, set->numa_node);
1968         if (!set->tags)
1969                 goto out;
1970
1971         for (i = 0; i < set->nr_hw_queues; i++) {
1972                 set->tags[i] = blk_mq_init_rq_map(set, i);
1973                 if (!set->tags[i])
1974                         goto out_unwind;
1975         }
1976
1977         mutex_init(&set->tag_list_lock);
1978         INIT_LIST_HEAD(&set->tag_list);
1979
1980         return 0;
1981
1982 out_unwind:
1983         while (--i >= 0)
1984                 blk_mq_free_rq_map(set, set->tags[i], i);
1985 out:
1986         return -ENOMEM;
1987 }
1988 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
1989
1990 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
1991 {
1992         int i;
1993
1994         for (i = 0; i < set->nr_hw_queues; i++) {
1995                 if (set->tags[i])
1996                         blk_mq_free_rq_map(set, set->tags[i], i);
1997         }
1998
1999         kfree(set->tags);
2000 }
2001 EXPORT_SYMBOL(blk_mq_free_tag_set);
2002
2003 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2004 {
2005         struct blk_mq_tag_set *set = q->tag_set;
2006         struct blk_mq_hw_ctx *hctx;
2007         int i, ret;
2008
2009         if (!set || nr > set->queue_depth)
2010                 return -EINVAL;
2011
2012         ret = 0;
2013         queue_for_each_hw_ctx(q, hctx, i) {
2014                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2015                 if (ret)
2016                         break;
2017         }
2018
2019         if (!ret)
2020                 q->nr_requests = nr;
2021
2022         return ret;
2023 }
2024
2025 void blk_mq_disable_hotplug(void)
2026 {
2027         mutex_lock(&all_q_mutex);
2028 }
2029
2030 void blk_mq_enable_hotplug(void)
2031 {
2032         mutex_unlock(&all_q_mutex);
2033 }
2034
2035 static int __init blk_mq_init(void)
2036 {
2037         blk_mq_cpu_init();
2038
2039         hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2040
2041         return 0;
2042 }
2043 subsys_initcall(blk_mq_init);