1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
5 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11 #include <linux/llist.h>
12 #include <linux/list_sort.h>
13 #include <linux/cpu.h>
14 #include <linux/cache.h>
15 #include <linux/sched/sysctl.h>
16 #include <linux/delay.h>
18 #include <trace/events/block.h>
20 #include <linux/blk-mq.h>
23 #include "blk-mq-tag.h"
25 static DEFINE_MUTEX(all_q_mutex);
26 static LIST_HEAD(all_q_list);
28 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
30 static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
33 return per_cpu_ptr(q->queue_ctx, cpu);
37 * This assumes per-cpu software queueing queues. They could be per-node
38 * as well, for instance. For now this is hardcoded as-is. Note that we don't
39 * care about preemption, since we know the ctx's are persistent. This does
40 * mean that we can't rely on ctx always matching the currently running CPU.
42 static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
44 return __blk_mq_get_ctx(q, get_cpu());
47 static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
53 * Check if any of the ctx's have pending work in this hardware queue
55 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
59 for (i = 0; i < hctx->ctx_map.map_size; i++)
60 if (hctx->ctx_map.map[i].word)
66 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
67 struct blk_mq_ctx *ctx)
69 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
72 #define CTX_TO_BIT(hctx, ctx) \
73 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
76 * Mark this ctx as having pending work in this hardware queue
78 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
79 struct blk_mq_ctx *ctx)
81 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
83 if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
84 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
87 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
88 struct blk_mq_ctx *ctx)
90 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
92 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
95 static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
96 struct blk_mq_ctx *ctx,
97 gfp_t gfp, bool reserved)
102 tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved);
103 if (tag != BLK_MQ_TAG_FAIL) {
104 rq = hctx->tags->rqs[tag];
107 if (blk_mq_tag_busy(hctx)) {
108 rq->cmd_flags = REQ_MQ_INFLIGHT;
109 atomic_inc(&hctx->nr_active);
119 static int blk_mq_queue_enter(struct request_queue *q)
123 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
125 /* we have problems to freeze the queue if it's initializing */
126 if (!blk_queue_bypass(q) || !blk_queue_init_done(q))
129 __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
131 spin_lock_irq(q->queue_lock);
132 ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
133 !blk_queue_bypass(q) || blk_queue_dying(q),
135 /* inc usage with lock hold to avoid freeze_queue runs here */
136 if (!ret && !blk_queue_dying(q))
137 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
138 else if (blk_queue_dying(q))
140 spin_unlock_irq(q->queue_lock);
145 static void blk_mq_queue_exit(struct request_queue *q)
147 __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
150 static void __blk_mq_drain_queue(struct request_queue *q)
155 spin_lock_irq(q->queue_lock);
156 count = percpu_counter_sum(&q->mq_usage_counter);
157 spin_unlock_irq(q->queue_lock);
161 blk_mq_run_queues(q, false);
167 * Guarantee no request is in use, so we can change any data structure of
168 * the queue afterward.
170 static void blk_mq_freeze_queue(struct request_queue *q)
174 spin_lock_irq(q->queue_lock);
175 drain = !q->bypass_depth++;
176 queue_flag_set(QUEUE_FLAG_BYPASS, q);
177 spin_unlock_irq(q->queue_lock);
180 __blk_mq_drain_queue(q);
183 void blk_mq_drain_queue(struct request_queue *q)
185 __blk_mq_drain_queue(q);
188 static void blk_mq_unfreeze_queue(struct request_queue *q)
192 spin_lock_irq(q->queue_lock);
193 if (!--q->bypass_depth) {
194 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
197 WARN_ON_ONCE(q->bypass_depth < 0);
198 spin_unlock_irq(q->queue_lock);
200 wake_up_all(&q->mq_freeze_wq);
203 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
205 return blk_mq_has_free_tags(hctx->tags);
207 EXPORT_SYMBOL(blk_mq_can_queue);
209 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
210 struct request *rq, unsigned int rw_flags)
212 if (blk_queue_io_stat(q))
213 rw_flags |= REQ_IO_STAT;
215 INIT_LIST_HEAD(&rq->queuelist);
216 /* csd/requeue_work/fifo_time is initialized before use */
219 rq->cmd_flags |= rw_flags;
221 /* do not touch atomic flags, it needs atomic ops against the timer */
224 rq->__sector = (sector_t) -1;
227 INIT_HLIST_NODE(&rq->hash);
228 RB_CLEAR_NODE(&rq->rb_node);
229 memset(&rq->flush, 0, max(sizeof(rq->flush), sizeof(rq->elv)));
232 rq->start_time = jiffies;
233 #ifdef CONFIG_BLK_CGROUP
235 set_start_time_ns(rq);
236 rq->io_start_time_ns = 0;
238 rq->nr_phys_segments = 0;
239 #if defined(CONFIG_BLK_DEV_INTEGRITY)
240 rq->nr_integrity_segments = 0;
244 /* tag was already set */
246 memset(rq->__cmd, 0, sizeof(rq->__cmd));
248 rq->cmd_len = BLK_MAX_CDB;
256 INIT_LIST_HEAD(&rq->timeout_list);
260 rq->end_io_data = NULL;
263 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
266 static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
273 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
274 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
276 rq = __blk_mq_alloc_request(hctx, ctx, gfp & ~__GFP_WAIT,
279 blk_mq_rq_ctx_init(q, ctx, rq, rw);
283 if (gfp & __GFP_WAIT) {
284 __blk_mq_run_hw_queue(hctx);
291 blk_mq_wait_for_tags(hctx, reserved);
297 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
301 if (blk_mq_queue_enter(q))
304 rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
306 blk_mq_put_ctx(rq->mq_ctx);
309 EXPORT_SYMBOL(blk_mq_alloc_request);
311 struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
316 if (blk_mq_queue_enter(q))
319 rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
321 blk_mq_put_ctx(rq->mq_ctx);
324 EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
326 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
327 struct blk_mq_ctx *ctx, struct request *rq)
329 const int tag = rq->tag;
330 struct request_queue *q = rq->q;
332 if (rq->cmd_flags & REQ_MQ_INFLIGHT)
333 atomic_dec(&hctx->nr_active);
335 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
336 blk_mq_put_tag(hctx, tag, &ctx->last_tag);
337 blk_mq_queue_exit(q);
340 void blk_mq_free_request(struct request *rq)
342 struct blk_mq_ctx *ctx = rq->mq_ctx;
343 struct blk_mq_hw_ctx *hctx;
344 struct request_queue *q = rq->q;
346 ctx->rq_completed[rq_is_sync(rq)]++;
348 hctx = q->mq_ops->map_queue(q, ctx->cpu);
349 __blk_mq_free_request(hctx, ctx, rq);
353 * Clone all relevant state from a request that has been put on hold in
354 * the flush state machine into the preallocated flush request that hangs
355 * off the request queue.
357 * For a driver the flush request should be invisible, that's why we are
358 * impersonating the original request here.
360 void blk_mq_clone_flush_request(struct request *flush_rq,
361 struct request *orig_rq)
363 struct blk_mq_hw_ctx *hctx =
364 orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
366 flush_rq->mq_ctx = orig_rq->mq_ctx;
367 flush_rq->tag = orig_rq->tag;
368 memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
372 inline void __blk_mq_end_io(struct request *rq, int error)
374 blk_account_io_done(rq);
377 rq->end_io(rq, error);
379 if (unlikely(blk_bidi_rq(rq)))
380 blk_mq_free_request(rq->next_rq);
381 blk_mq_free_request(rq);
384 EXPORT_SYMBOL(__blk_mq_end_io);
386 void blk_mq_end_io(struct request *rq, int error)
388 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
390 __blk_mq_end_io(rq, error);
392 EXPORT_SYMBOL(blk_mq_end_io);
394 static void __blk_mq_complete_request_remote(void *data)
396 struct request *rq = data;
398 rq->q->softirq_done_fn(rq);
401 void __blk_mq_complete_request(struct request *rq)
403 struct blk_mq_ctx *ctx = rq->mq_ctx;
407 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
408 rq->q->softirq_done_fn(rq);
413 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
414 shared = cpus_share_cache(cpu, ctx->cpu);
416 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
417 rq->csd.func = __blk_mq_complete_request_remote;
420 smp_call_function_single_async(ctx->cpu, &rq->csd);
422 rq->q->softirq_done_fn(rq);
428 * blk_mq_complete_request - end I/O on a request
429 * @rq: the request being processed
432 * Ends all I/O on a request. It does not handle partial completions.
433 * The actual completion happens out-of-order, through a IPI handler.
435 void blk_mq_complete_request(struct request *rq)
437 if (unlikely(blk_should_fake_timeout(rq->q)))
439 if (!blk_mark_rq_complete(rq))
440 __blk_mq_complete_request(rq);
442 EXPORT_SYMBOL(blk_mq_complete_request);
444 static void blk_mq_start_request(struct request *rq, bool last)
446 struct request_queue *q = rq->q;
448 trace_block_rq_issue(q, rq);
450 rq->resid_len = blk_rq_bytes(rq);
451 if (unlikely(blk_bidi_rq(rq)))
452 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
455 * Just mark start time and set the started bit. Due to memory
456 * ordering, we know we'll see the correct deadline as long as
457 * REQ_ATOMIC_STARTED is seen. Use the default queue timeout,
458 * unless one has been set in the request.
461 rq->deadline = jiffies + q->rq_timeout;
463 rq->deadline = jiffies + rq->timeout;
466 * Mark us as started and clear complete. Complete might have been
467 * set if requeue raced with timeout, which then marked it as
468 * complete. So be sure to clear complete again when we start
469 * the request, otherwise we'll ignore the completion event.
471 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
472 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
474 if (q->dma_drain_size && blk_rq_bytes(rq)) {
476 * Make sure space for the drain appears. We know we can do
477 * this because max_hw_segments has been adjusted to be one
478 * fewer than the device can handle.
480 rq->nr_phys_segments++;
484 * Flag the last request in the series so that drivers know when IO
485 * should be kicked off, if they don't do it on a per-request basis.
487 * Note: the flag isn't the only condition drivers should do kick off.
488 * If drive is busy, the last request might not have the bit set.
491 rq->cmd_flags |= REQ_END;
494 static void __blk_mq_requeue_request(struct request *rq)
496 struct request_queue *q = rq->q;
498 trace_block_rq_requeue(q, rq);
499 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
501 rq->cmd_flags &= ~REQ_END;
503 if (q->dma_drain_size && blk_rq_bytes(rq))
504 rq->nr_phys_segments--;
507 void blk_mq_requeue_request(struct request *rq)
509 __blk_mq_requeue_request(rq);
510 blk_clear_rq_complete(rq);
512 BUG_ON(blk_queued_rq(rq));
513 blk_mq_insert_request(rq, true, true, false);
515 EXPORT_SYMBOL(blk_mq_requeue_request);
517 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
519 return tags->rqs[tag];
521 EXPORT_SYMBOL(blk_mq_tag_to_rq);
523 struct blk_mq_timeout_data {
524 struct blk_mq_hw_ctx *hctx;
526 unsigned int *next_set;
529 static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
531 struct blk_mq_timeout_data *data = __data;
532 struct blk_mq_hw_ctx *hctx = data->hctx;
535 /* It may not be in flight yet (this is where
536 * the REQ_ATOMIC_STARTED flag comes in). The requests are
537 * statically allocated, so we know it's always safe to access the
538 * memory associated with a bit offset into ->rqs[].
544 tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
545 if (tag >= hctx->tags->nr_tags)
548 rq = blk_mq_tag_to_rq(hctx->tags, tag++);
549 if (rq->q != hctx->queue)
551 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
554 blk_rq_check_expired(rq, data->next, data->next_set);
558 static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
560 unsigned int *next_set)
562 struct blk_mq_timeout_data data = {
565 .next_set = next_set,
569 * Ask the tagging code to iterate busy requests, so we can
570 * check them for timeout.
572 blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
575 static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
577 struct request_queue *q = rq->q;
580 * We know that complete is set at this point. If STARTED isn't set
581 * anymore, then the request isn't active and the "timeout" should
582 * just be ignored. This can happen due to the bitflag ordering.
583 * Timeout first checks if STARTED is set, and if it is, assumes
584 * the request is active. But if we race with completion, then
585 * we both flags will get cleared. So check here again, and ignore
586 * a timeout event with a request that isn't active.
588 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
589 return BLK_EH_NOT_HANDLED;
591 if (!q->mq_ops->timeout)
592 return BLK_EH_RESET_TIMER;
594 return q->mq_ops->timeout(rq);
597 static void blk_mq_rq_timer(unsigned long data)
599 struct request_queue *q = (struct request_queue *) data;
600 struct blk_mq_hw_ctx *hctx;
601 unsigned long next = 0;
604 queue_for_each_hw_ctx(q, hctx, i) {
606 * If not software queues are currently mapped to this
607 * hardware queue, there's nothing to check
609 if (!hctx->nr_ctx || !hctx->tags)
612 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
616 next = blk_rq_timeout(round_jiffies_up(next));
617 mod_timer(&q->timeout, next);
619 queue_for_each_hw_ctx(q, hctx, i)
620 blk_mq_tag_idle(hctx);
625 * Reverse check our software queue for entries that we could potentially
626 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
627 * too much time checking for merges.
629 static bool blk_mq_attempt_merge(struct request_queue *q,
630 struct blk_mq_ctx *ctx, struct bio *bio)
635 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
641 if (!blk_rq_merge_ok(rq, bio))
644 el_ret = blk_try_merge(rq, bio);
645 if (el_ret == ELEVATOR_BACK_MERGE) {
646 if (bio_attempt_back_merge(q, rq, bio)) {
651 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
652 if (bio_attempt_front_merge(q, rq, bio)) {
664 * Process software queues that have been marked busy, splicing them
665 * to the for-dispatch
667 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
669 struct blk_mq_ctx *ctx;
672 for (i = 0; i < hctx->ctx_map.map_size; i++) {
673 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
674 unsigned int off, bit;
680 off = i * hctx->ctx_map.bits_per_word;
682 bit = find_next_bit(&bm->word, bm->depth, bit);
683 if (bit >= bm->depth)
686 ctx = hctx->ctxs[bit + off];
687 clear_bit(bit, &bm->word);
688 spin_lock(&ctx->lock);
689 list_splice_tail_init(&ctx->rq_list, list);
690 spin_unlock(&ctx->lock);
698 * Run this hardware queue, pulling any software queues mapped to it in.
699 * Note that this function currently has various problems around ordering
700 * of IO. In particular, we'd like FIFO behaviour on handling existing
701 * items on the hctx->dispatch list. Ignore that for now.
703 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
705 struct request_queue *q = hctx->queue;
710 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
712 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
718 * Touch any software queue that has pending entries.
720 flush_busy_ctxs(hctx, &rq_list);
723 * If we have previous entries on our dispatch list, grab them
724 * and stuff them at the front for more fair dispatch.
726 if (!list_empty_careful(&hctx->dispatch)) {
727 spin_lock(&hctx->lock);
728 if (!list_empty(&hctx->dispatch))
729 list_splice_init(&hctx->dispatch, &rq_list);
730 spin_unlock(&hctx->lock);
734 * Now process all the entries, sending them to the driver.
737 while (!list_empty(&rq_list)) {
740 rq = list_first_entry(&rq_list, struct request, queuelist);
741 list_del_init(&rq->queuelist);
743 blk_mq_start_request(rq, list_empty(&rq_list));
745 ret = q->mq_ops->queue_rq(hctx, rq);
747 case BLK_MQ_RQ_QUEUE_OK:
750 case BLK_MQ_RQ_QUEUE_BUSY:
751 list_add(&rq->queuelist, &rq_list);
752 __blk_mq_requeue_request(rq);
755 pr_err("blk-mq: bad return on queue: %d\n", ret);
756 case BLK_MQ_RQ_QUEUE_ERROR:
758 blk_mq_end_io(rq, rq->errors);
762 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
767 hctx->dispatched[0]++;
768 else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
769 hctx->dispatched[ilog2(queued) + 1]++;
772 * Any items that need requeuing? Stuff them into hctx->dispatch,
773 * that is where we will continue on next queue run.
775 if (!list_empty(&rq_list)) {
776 spin_lock(&hctx->lock);
777 list_splice(&rq_list, &hctx->dispatch);
778 spin_unlock(&hctx->lock);
783 * It'd be great if the workqueue API had a way to pass
784 * in a mask and had some smarts for more clever placement.
785 * For now we just round-robin here, switching for every
786 * BLK_MQ_CPU_WORK_BATCH queued items.
788 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
790 int cpu = hctx->next_cpu;
792 if (--hctx->next_cpu_batch <= 0) {
795 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
796 if (next_cpu >= nr_cpu_ids)
797 next_cpu = cpumask_first(hctx->cpumask);
799 hctx->next_cpu = next_cpu;
800 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
806 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
808 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
811 if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
812 __blk_mq_run_hw_queue(hctx);
813 else if (hctx->queue->nr_hw_queues == 1)
814 kblockd_schedule_delayed_work(&hctx->run_work, 0);
818 cpu = blk_mq_hctx_next_cpu(hctx);
819 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
823 void blk_mq_run_queues(struct request_queue *q, bool async)
825 struct blk_mq_hw_ctx *hctx;
828 queue_for_each_hw_ctx(q, hctx, i) {
829 if ((!blk_mq_hctx_has_pending(hctx) &&
830 list_empty_careful(&hctx->dispatch)) ||
831 test_bit(BLK_MQ_S_STOPPED, &hctx->state))
835 blk_mq_run_hw_queue(hctx, async);
839 EXPORT_SYMBOL(blk_mq_run_queues);
841 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
843 cancel_delayed_work(&hctx->run_work);
844 cancel_delayed_work(&hctx->delay_work);
845 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
847 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
849 void blk_mq_stop_hw_queues(struct request_queue *q)
851 struct blk_mq_hw_ctx *hctx;
854 queue_for_each_hw_ctx(q, hctx, i)
855 blk_mq_stop_hw_queue(hctx);
857 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
859 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
861 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
864 __blk_mq_run_hw_queue(hctx);
867 EXPORT_SYMBOL(blk_mq_start_hw_queue);
869 void blk_mq_start_hw_queues(struct request_queue *q)
871 struct blk_mq_hw_ctx *hctx;
874 queue_for_each_hw_ctx(q, hctx, i)
875 blk_mq_start_hw_queue(hctx);
877 EXPORT_SYMBOL(blk_mq_start_hw_queues);
880 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
882 struct blk_mq_hw_ctx *hctx;
885 queue_for_each_hw_ctx(q, hctx, i) {
886 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
889 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
891 blk_mq_run_hw_queue(hctx, async);
895 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
897 static void blk_mq_run_work_fn(struct work_struct *work)
899 struct blk_mq_hw_ctx *hctx;
901 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
903 __blk_mq_run_hw_queue(hctx);
906 static void blk_mq_delay_work_fn(struct work_struct *work)
908 struct blk_mq_hw_ctx *hctx;
910 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
912 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
913 __blk_mq_run_hw_queue(hctx);
916 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
918 unsigned long tmo = msecs_to_jiffies(msecs);
920 if (hctx->queue->nr_hw_queues == 1)
921 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
925 cpu = blk_mq_hctx_next_cpu(hctx);
926 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
929 EXPORT_SYMBOL(blk_mq_delay_queue);
931 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
932 struct request *rq, bool at_head)
934 struct blk_mq_ctx *ctx = rq->mq_ctx;
936 trace_block_rq_insert(hctx->queue, rq);
939 list_add(&rq->queuelist, &ctx->rq_list);
941 list_add_tail(&rq->queuelist, &ctx->rq_list);
943 blk_mq_hctx_mark_pending(hctx, ctx);
946 * We do this early, to ensure we are on the right CPU.
951 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
954 struct request_queue *q = rq->q;
955 struct blk_mq_hw_ctx *hctx;
956 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
958 current_ctx = blk_mq_get_ctx(q);
959 if (!cpu_online(ctx->cpu))
960 rq->mq_ctx = ctx = current_ctx;
962 hctx = q->mq_ops->map_queue(q, ctx->cpu);
964 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
965 !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
966 blk_insert_flush(rq);
968 spin_lock(&ctx->lock);
969 __blk_mq_insert_request(hctx, rq, at_head);
970 spin_unlock(&ctx->lock);
974 blk_mq_run_hw_queue(hctx, async);
976 blk_mq_put_ctx(current_ctx);
979 static void blk_mq_insert_requests(struct request_queue *q,
980 struct blk_mq_ctx *ctx,
981 struct list_head *list,
986 struct blk_mq_hw_ctx *hctx;
987 struct blk_mq_ctx *current_ctx;
989 trace_block_unplug(q, depth, !from_schedule);
991 current_ctx = blk_mq_get_ctx(q);
993 if (!cpu_online(ctx->cpu))
995 hctx = q->mq_ops->map_queue(q, ctx->cpu);
998 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1001 spin_lock(&ctx->lock);
1002 while (!list_empty(list)) {
1005 rq = list_first_entry(list, struct request, queuelist);
1006 list_del_init(&rq->queuelist);
1008 __blk_mq_insert_request(hctx, rq, false);
1010 spin_unlock(&ctx->lock);
1012 blk_mq_run_hw_queue(hctx, from_schedule);
1013 blk_mq_put_ctx(current_ctx);
1016 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1018 struct request *rqa = container_of(a, struct request, queuelist);
1019 struct request *rqb = container_of(b, struct request, queuelist);
1021 return !(rqa->mq_ctx < rqb->mq_ctx ||
1022 (rqa->mq_ctx == rqb->mq_ctx &&
1023 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1026 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1028 struct blk_mq_ctx *this_ctx;
1029 struct request_queue *this_q;
1032 LIST_HEAD(ctx_list);
1035 list_splice_init(&plug->mq_list, &list);
1037 list_sort(NULL, &list, plug_ctx_cmp);
1043 while (!list_empty(&list)) {
1044 rq = list_entry_rq(list.next);
1045 list_del_init(&rq->queuelist);
1047 if (rq->mq_ctx != this_ctx) {
1049 blk_mq_insert_requests(this_q, this_ctx,
1054 this_ctx = rq->mq_ctx;
1060 list_add_tail(&rq->queuelist, &ctx_list);
1064 * If 'this_ctx' is set, we know we have entries to complete
1065 * on 'ctx_list'. Do those.
1068 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1073 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1075 init_request_from_bio(rq, bio);
1076 blk_account_io_start(rq, 1);
1079 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1080 struct blk_mq_ctx *ctx,
1081 struct request *rq, struct bio *bio)
1083 struct request_queue *q = hctx->queue;
1085 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1086 blk_mq_bio_to_request(rq, bio);
1087 spin_lock(&ctx->lock);
1089 __blk_mq_insert_request(hctx, rq, false);
1090 spin_unlock(&ctx->lock);
1093 spin_lock(&ctx->lock);
1094 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1095 blk_mq_bio_to_request(rq, bio);
1099 spin_unlock(&ctx->lock);
1100 __blk_mq_free_request(hctx, ctx, rq);
1105 struct blk_map_ctx {
1106 struct blk_mq_hw_ctx *hctx;
1107 struct blk_mq_ctx *ctx;
1110 static struct request *blk_mq_map_request(struct request_queue *q,
1112 struct blk_map_ctx *data)
1114 struct blk_mq_hw_ctx *hctx;
1115 struct blk_mq_ctx *ctx;
1117 int rw = bio_data_dir(bio);
1119 if (unlikely(blk_mq_queue_enter(q))) {
1120 bio_endio(bio, -EIO);
1124 ctx = blk_mq_get_ctx(q);
1125 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1127 if (rw_is_sync(bio->bi_rw))
1130 trace_block_getrq(q, bio, rw);
1131 rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false);
1133 blk_mq_rq_ctx_init(q, ctx, rq, rw);
1135 blk_mq_put_ctx(ctx);
1136 trace_block_sleeprq(q, bio, rw);
1137 rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
1140 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1150 * Multiple hardware queue variant. This will not use per-process plugs,
1151 * but will attempt to bypass the hctx queueing if we can go straight to
1152 * hardware for SYNC IO.
1154 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1156 const int is_sync = rw_is_sync(bio->bi_rw);
1157 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1158 struct blk_map_ctx data;
1161 blk_queue_bounce(q, &bio);
1163 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1164 bio_endio(bio, -EIO);
1168 rq = blk_mq_map_request(q, bio, &data);
1172 if (unlikely(is_flush_fua)) {
1173 blk_mq_bio_to_request(rq, bio);
1174 blk_insert_flush(rq);
1181 blk_mq_bio_to_request(rq, bio);
1182 blk_mq_start_request(rq, true);
1185 * For OK queue, we are done. For error, kill it. Any other
1186 * error (busy), just add it to our list as we previously
1189 ret = q->mq_ops->queue_rq(data.hctx, rq);
1190 if (ret == BLK_MQ_RQ_QUEUE_OK)
1193 __blk_mq_requeue_request(rq);
1195 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1197 blk_mq_end_io(rq, rq->errors);
1203 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1205 * For a SYNC request, send it to the hardware immediately. For
1206 * an ASYNC request, just ensure that we run it later on. The
1207 * latter allows for merging opportunities and more efficient
1211 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1214 blk_mq_put_ctx(data.ctx);
1218 * Single hardware queue variant. This will attempt to use any per-process
1219 * plug for merging and IO deferral.
1221 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1223 const int is_sync = rw_is_sync(bio->bi_rw);
1224 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1225 unsigned int use_plug, request_count = 0;
1226 struct blk_map_ctx data;
1230 * If we have multiple hardware queues, just go directly to
1231 * one of those for sync IO.
1233 use_plug = !is_flush_fua && !is_sync;
1235 blk_queue_bounce(q, &bio);
1237 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1238 bio_endio(bio, -EIO);
1242 if (use_plug && !blk_queue_nomerges(q) &&
1243 blk_attempt_plug_merge(q, bio, &request_count))
1246 rq = blk_mq_map_request(q, bio, &data);
1248 if (unlikely(is_flush_fua)) {
1249 blk_mq_bio_to_request(rq, bio);
1250 blk_insert_flush(rq);
1255 * A task plug currently exists. Since this is completely lockless,
1256 * utilize that to temporarily store requests until the task is
1257 * either done or scheduled away.
1260 struct blk_plug *plug = current->plug;
1263 blk_mq_bio_to_request(rq, bio);
1264 if (list_empty(&plug->mq_list))
1265 trace_block_plug(q);
1266 else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1267 blk_flush_plug_list(plug, false);
1268 trace_block_plug(q);
1270 list_add_tail(&rq->queuelist, &plug->mq_list);
1271 blk_mq_put_ctx(data.ctx);
1276 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1278 * For a SYNC request, send it to the hardware immediately. For
1279 * an ASYNC request, just ensure that we run it later on. The
1280 * latter allows for merging opportunities and more efficient
1284 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1287 blk_mq_put_ctx(data.ctx);
1291 * Default mapping to a software queue, since we use one per CPU.
1293 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1295 return q->queue_hw_ctx[q->mq_map[cpu]];
1297 EXPORT_SYMBOL(blk_mq_map_queue);
1299 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
1300 unsigned int hctx_index)
1302 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
1305 EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
1307 void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
1308 unsigned int hctx_index)
1312 EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
1314 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1315 struct blk_mq_tags *tags, unsigned int hctx_idx)
1319 if (tags->rqs && set->ops->exit_request) {
1322 for (i = 0; i < tags->nr_tags; i++) {
1325 set->ops->exit_request(set->driver_data, tags->rqs[i],
1330 while (!list_empty(&tags->page_list)) {
1331 page = list_first_entry(&tags->page_list, struct page, lru);
1332 list_del_init(&page->lru);
1333 __free_pages(page, page->private);
1338 blk_mq_free_tags(tags);
1341 static size_t order_to_size(unsigned int order)
1343 return (size_t)PAGE_SIZE << order;
1346 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1347 unsigned int hctx_idx)
1349 struct blk_mq_tags *tags;
1350 unsigned int i, j, entries_per_page, max_order = 4;
1351 size_t rq_size, left;
1353 tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1358 INIT_LIST_HEAD(&tags->page_list);
1360 tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
1361 GFP_KERNEL, set->numa_node);
1363 blk_mq_free_tags(tags);
1368 * rq_size is the size of the request plus driver payload, rounded
1369 * to the cacheline size
1371 rq_size = round_up(sizeof(struct request) + set->cmd_size,
1373 left = rq_size * set->queue_depth;
1375 for (i = 0; i < set->queue_depth; ) {
1376 int this_order = max_order;
1381 while (left < order_to_size(this_order - 1) && this_order)
1385 page = alloc_pages_node(set->numa_node, GFP_KERNEL,
1391 if (order_to_size(this_order) < rq_size)
1398 page->private = this_order;
1399 list_add_tail(&page->lru, &tags->page_list);
1401 p = page_address(page);
1402 entries_per_page = order_to_size(this_order) / rq_size;
1403 to_do = min(entries_per_page, set->queue_depth - i);
1404 left -= to_do * rq_size;
1405 for (j = 0; j < to_do; j++) {
1407 if (set->ops->init_request) {
1408 if (set->ops->init_request(set->driver_data,
1409 tags->rqs[i], hctx_idx, i,
1422 pr_warn("%s: failed to allocate requests\n", __func__);
1423 blk_mq_free_rq_map(set, tags, hctx_idx);
1427 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1432 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1434 unsigned int bpw = 8, total, num_maps, i;
1436 bitmap->bits_per_word = bpw;
1438 num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1439 bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1444 bitmap->map_size = num_maps;
1447 for (i = 0; i < num_maps; i++) {
1448 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1449 total -= bitmap->map[i].depth;
1455 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1457 struct request_queue *q = hctx->queue;
1458 struct blk_mq_ctx *ctx;
1462 * Move ctx entries to new CPU, if this one is going away.
1464 ctx = __blk_mq_get_ctx(q, cpu);
1466 spin_lock(&ctx->lock);
1467 if (!list_empty(&ctx->rq_list)) {
1468 list_splice_init(&ctx->rq_list, &tmp);
1469 blk_mq_hctx_clear_pending(hctx, ctx);
1471 spin_unlock(&ctx->lock);
1473 if (list_empty(&tmp))
1476 ctx = blk_mq_get_ctx(q);
1477 spin_lock(&ctx->lock);
1479 while (!list_empty(&tmp)) {
1482 rq = list_first_entry(&tmp, struct request, queuelist);
1484 list_move_tail(&rq->queuelist, &ctx->rq_list);
1487 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1488 blk_mq_hctx_mark_pending(hctx, ctx);
1490 spin_unlock(&ctx->lock);
1492 blk_mq_run_hw_queue(hctx, true);
1493 blk_mq_put_ctx(ctx);
1497 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1499 struct request_queue *q = hctx->queue;
1500 struct blk_mq_tag_set *set = q->tag_set;
1502 if (set->tags[hctx->queue_num])
1505 set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1506 if (!set->tags[hctx->queue_num])
1509 hctx->tags = set->tags[hctx->queue_num];
1513 static int blk_mq_hctx_notify(void *data, unsigned long action,
1516 struct blk_mq_hw_ctx *hctx = data;
1518 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1519 return blk_mq_hctx_cpu_offline(hctx, cpu);
1520 else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1521 return blk_mq_hctx_cpu_online(hctx, cpu);
1526 static int blk_mq_init_hw_queues(struct request_queue *q,
1527 struct blk_mq_tag_set *set)
1529 struct blk_mq_hw_ctx *hctx;
1533 * Initialize hardware queues
1535 queue_for_each_hw_ctx(q, hctx, i) {
1538 node = hctx->numa_node;
1539 if (node == NUMA_NO_NODE)
1540 node = hctx->numa_node = set->numa_node;
1542 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1543 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1544 spin_lock_init(&hctx->lock);
1545 INIT_LIST_HEAD(&hctx->dispatch);
1547 hctx->queue_num = i;
1548 hctx->flags = set->flags;
1549 hctx->cmd_size = set->cmd_size;
1551 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1552 blk_mq_hctx_notify, hctx);
1553 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1555 hctx->tags = set->tags[i];
1558 * Allocate space for all possible cpus to avoid allocation in
1561 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1566 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1571 if (set->ops->init_hctx &&
1572 set->ops->init_hctx(hctx, set->driver_data, i))
1576 if (i == q->nr_hw_queues)
1582 queue_for_each_hw_ctx(q, hctx, j) {
1586 if (set->ops->exit_hctx)
1587 set->ops->exit_hctx(hctx, j);
1589 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1591 blk_mq_free_bitmap(&hctx->ctx_map);
1597 static void blk_mq_init_cpu_queues(struct request_queue *q,
1598 unsigned int nr_hw_queues)
1602 for_each_possible_cpu(i) {
1603 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1604 struct blk_mq_hw_ctx *hctx;
1606 memset(__ctx, 0, sizeof(*__ctx));
1608 spin_lock_init(&__ctx->lock);
1609 INIT_LIST_HEAD(&__ctx->rq_list);
1612 /* If the cpu isn't online, the cpu is mapped to first hctx */
1616 hctx = q->mq_ops->map_queue(q, i);
1617 cpumask_set_cpu(i, hctx->cpumask);
1621 * Set local node, IFF we have more than one hw queue. If
1622 * not, we remain on the home node of the device
1624 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1625 hctx->numa_node = cpu_to_node(i);
1629 static void blk_mq_map_swqueue(struct request_queue *q)
1632 struct blk_mq_hw_ctx *hctx;
1633 struct blk_mq_ctx *ctx;
1635 queue_for_each_hw_ctx(q, hctx, i) {
1636 cpumask_clear(hctx->cpumask);
1641 * Map software to hardware queues
1643 queue_for_each_ctx(q, ctx, i) {
1644 /* If the cpu isn't online, the cpu is mapped to first hctx */
1648 hctx = q->mq_ops->map_queue(q, i);
1649 cpumask_set_cpu(i, hctx->cpumask);
1650 ctx->index_hw = hctx->nr_ctx;
1651 hctx->ctxs[hctx->nr_ctx++] = ctx;
1654 queue_for_each_hw_ctx(q, hctx, i) {
1656 * If not software queues are mapped to this hardware queue,
1657 * disable it and free the request entries
1659 if (!hctx->nr_ctx) {
1660 struct blk_mq_tag_set *set = q->tag_set;
1663 blk_mq_free_rq_map(set, set->tags[i], i);
1664 set->tags[i] = NULL;
1671 * Initialize batch roundrobin counts
1673 hctx->next_cpu = cpumask_first(hctx->cpumask);
1674 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1678 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1680 struct blk_mq_hw_ctx *hctx;
1681 struct request_queue *q;
1685 if (set->tag_list.next == set->tag_list.prev)
1690 list_for_each_entry(q, &set->tag_list, tag_set_list) {
1691 blk_mq_freeze_queue(q);
1693 queue_for_each_hw_ctx(q, hctx, i) {
1695 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1697 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1699 blk_mq_unfreeze_queue(q);
1703 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1705 struct blk_mq_tag_set *set = q->tag_set;
1707 blk_mq_freeze_queue(q);
1709 mutex_lock(&set->tag_list_lock);
1710 list_del_init(&q->tag_set_list);
1711 blk_mq_update_tag_set_depth(set);
1712 mutex_unlock(&set->tag_list_lock);
1714 blk_mq_unfreeze_queue(q);
1717 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1718 struct request_queue *q)
1722 mutex_lock(&set->tag_list_lock);
1723 list_add_tail(&q->tag_set_list, &set->tag_list);
1724 blk_mq_update_tag_set_depth(set);
1725 mutex_unlock(&set->tag_list_lock);
1728 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1730 struct blk_mq_hw_ctx **hctxs;
1731 struct blk_mq_ctx *ctx;
1732 struct request_queue *q;
1735 ctx = alloc_percpu(struct blk_mq_ctx);
1737 return ERR_PTR(-ENOMEM);
1739 hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1745 for (i = 0; i < set->nr_hw_queues; i++) {
1746 hctxs[i] = set->ops->alloc_hctx(set, i);
1750 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1753 atomic_set(&hctxs[i]->nr_active, 0);
1754 hctxs[i]->numa_node = NUMA_NO_NODE;
1755 hctxs[i]->queue_num = i;
1758 q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1762 q->mq_map = blk_mq_make_queue_map(set);
1766 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1767 blk_queue_rq_timeout(q, 30000);
1769 q->nr_queues = nr_cpu_ids;
1770 q->nr_hw_queues = set->nr_hw_queues;
1773 q->queue_hw_ctx = hctxs;
1775 q->mq_ops = set->ops;
1776 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1778 q->sg_reserved_size = INT_MAX;
1780 if (q->nr_hw_queues > 1)
1781 blk_queue_make_request(q, blk_mq_make_request);
1783 blk_queue_make_request(q, blk_sq_make_request);
1785 blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1787 blk_queue_rq_timeout(q, set->timeout);
1790 * Do this after blk_queue_make_request() overrides it...
1792 q->nr_requests = set->queue_depth;
1794 if (set->ops->complete)
1795 blk_queue_softirq_done(q, set->ops->complete);
1797 blk_mq_init_flush(q);
1798 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1800 q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1801 set->cmd_size, cache_line_size()),
1806 if (blk_mq_init_hw_queues(q, set))
1809 mutex_lock(&all_q_mutex);
1810 list_add_tail(&q->all_q_node, &all_q_list);
1811 mutex_unlock(&all_q_mutex);
1813 blk_mq_add_queue_tag_set(set, q);
1815 blk_mq_map_swqueue(q);
1824 blk_cleanup_queue(q);
1826 for (i = 0; i < set->nr_hw_queues; i++) {
1829 free_cpumask_var(hctxs[i]->cpumask);
1830 set->ops->free_hctx(hctxs[i], i);
1835 return ERR_PTR(-ENOMEM);
1837 EXPORT_SYMBOL(blk_mq_init_queue);
1839 void blk_mq_free_queue(struct request_queue *q)
1841 struct blk_mq_hw_ctx *hctx;
1844 blk_mq_del_queue_tag_set(q);
1846 queue_for_each_hw_ctx(q, hctx, i) {
1848 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1849 if (q->mq_ops->exit_hctx)
1850 q->mq_ops->exit_hctx(hctx, i);
1851 free_cpumask_var(hctx->cpumask);
1852 q->mq_ops->free_hctx(hctx, i);
1855 free_percpu(q->queue_ctx);
1856 kfree(q->queue_hw_ctx);
1859 q->queue_ctx = NULL;
1860 q->queue_hw_ctx = NULL;
1863 mutex_lock(&all_q_mutex);
1864 list_del_init(&q->all_q_node);
1865 mutex_unlock(&all_q_mutex);
1868 /* Basically redo blk_mq_init_queue with queue frozen */
1869 static void blk_mq_queue_reinit(struct request_queue *q)
1871 blk_mq_freeze_queue(q);
1873 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1876 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1877 * we should change hctx numa_node according to new topology (this
1878 * involves free and re-allocate memory, worthy doing?)
1881 blk_mq_map_swqueue(q);
1883 blk_mq_unfreeze_queue(q);
1886 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1887 unsigned long action, void *hcpu)
1889 struct request_queue *q;
1892 * Before new mappings are established, hotadded cpu might already
1893 * start handling requests. This doesn't break anything as we map
1894 * offline CPUs to first hardware queue. We will re-init the queue
1895 * below to get optimal settings.
1897 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1898 action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1901 mutex_lock(&all_q_mutex);
1902 list_for_each_entry(q, &all_q_list, all_q_node)
1903 blk_mq_queue_reinit(q);
1904 mutex_unlock(&all_q_mutex);
1908 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1912 if (!set->nr_hw_queues)
1914 if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH)
1916 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1919 if (!set->nr_hw_queues ||
1920 !set->ops->queue_rq || !set->ops->map_queue ||
1921 !set->ops->alloc_hctx || !set->ops->free_hctx)
1925 set->tags = kmalloc_node(set->nr_hw_queues *
1926 sizeof(struct blk_mq_tags *),
1927 GFP_KERNEL, set->numa_node);
1931 for (i = 0; i < set->nr_hw_queues; i++) {
1932 set->tags[i] = blk_mq_init_rq_map(set, i);
1937 mutex_init(&set->tag_list_lock);
1938 INIT_LIST_HEAD(&set->tag_list);
1944 blk_mq_free_rq_map(set, set->tags[i], i);
1948 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
1950 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
1954 for (i = 0; i < set->nr_hw_queues; i++) {
1956 blk_mq_free_rq_map(set, set->tags[i], i);
1961 EXPORT_SYMBOL(blk_mq_free_tag_set);
1963 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
1965 struct blk_mq_tag_set *set = q->tag_set;
1966 struct blk_mq_hw_ctx *hctx;
1969 if (!set || nr > set->queue_depth)
1973 queue_for_each_hw_ctx(q, hctx, i) {
1974 ret = blk_mq_tag_update_depth(hctx->tags, nr);
1980 q->nr_requests = nr;
1985 void blk_mq_disable_hotplug(void)
1987 mutex_lock(&all_q_mutex);
1990 void blk_mq_enable_hotplug(void)
1992 mutex_unlock(&all_q_mutex);
1995 static int __init blk_mq_init(void)
1999 /* Must be called after percpu_counter_hotcpu_callback() */
2000 hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
2004 subsys_initcall(blk_mq_init);