Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[sfrench/cifs-2.6.git] / block / blk-mq.c
index 828f4bd193f256ff8ed41e39bb7620f09895bfd5..572966f495966b7fe8ce486ad4e5912a0d044eb5 100644 (file)
@@ -348,7 +348,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
                blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
        if (sched_tag != -1)
                blk_mq_sched_completed_request(hctx, rq);
-       blk_mq_sched_restart_queues(hctx);
+       blk_mq_sched_restart(hctx);
        blk_queue_exit(q);
 }
 
@@ -1135,7 +1135,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
        return hctx->next_cpu;
 }
 
-void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
+                                       unsigned long msecs)
 {
        if (unlikely(blk_mq_hctx_stopped(hctx) ||
                     !blk_mq_hw_queue_mapped(hctx)))
@@ -1152,7 +1153,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
                put_cpu();
        }
 
-       kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
+       if (msecs == 0)
+               kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
+                                        &hctx->run_work);
+       else
+               kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+                                                &hctx->delayed_run_work,
+                                                msecs_to_jiffies(msecs));
+}
+
+void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
+{
+       __blk_mq_delay_run_hw_queue(hctx, true, msecs);
+}
+EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
+
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+{
+       __blk_mq_delay_run_hw_queue(hctx, async, 0);
 }
 
 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -1255,6 +1273,15 @@ static void blk_mq_run_work_fn(struct work_struct *work)
        __blk_mq_run_hw_queue(hctx);
 }
 
+static void blk_mq_delayed_run_work_fn(struct work_struct *work)
+{
+       struct blk_mq_hw_ctx *hctx;
+
+       hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
+
+       __blk_mq_run_hw_queue(hctx);
+}
+
 static void blk_mq_delay_work_fn(struct work_struct *work)
 {
        struct blk_mq_hw_ctx *hctx;
@@ -1924,6 +1951,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
                                       hctx->fq->flush_rq, hctx_idx,
                                       flush_start_tag + hctx_idx);
 
+       blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
+
        if (set->ops->exit_hctx)
                set->ops->exit_hctx(hctx, hctx_idx);
 
@@ -1960,6 +1989,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
                node = hctx->numa_node = set->numa_node;
 
        INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
+       INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
        INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
        spin_lock_init(&hctx->lock);
        INIT_LIST_HEAD(&hctx->dispatch);
@@ -1990,9 +2020,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
            set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
                goto free_bitmap;
 
+       if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
+               goto exit_hctx;
+
        hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
        if (!hctx->fq)
-               goto exit_hctx;
+               goto sched_exit_hctx;
 
        if (set->ops->init_request &&
            set->ops->init_request(set->driver_data,
@@ -2007,6 +2040,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
 
  free_fq:
        kfree(hctx->fq);
+ sched_exit_hctx:
+       blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
  exit_hctx:
        if (set->ops->exit_hctx)
                set->ops->exit_hctx(hctx, hctx_idx);
@@ -2233,8 +2268,6 @@ void blk_mq_release(struct request_queue *q)
        struct blk_mq_hw_ctx *hctx;
        unsigned int i;
 
-       blk_mq_sched_teardown(q);
-
        /* hctx kobj stays in hctx */
        queue_for_each_hw_ctx(q, hctx, i) {
                if (!hctx)
@@ -2565,6 +2598,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
        return 0;
 }
 
+static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
+{
+       if (set->ops->map_queues)
+               return set->ops->map_queues(set);
+       else
+               return blk_mq_map_queues(set);
+}
+
 /*
  * Alloc a tag set to be associated with one or more request queues.
  * May fail with EINVAL for various error conditions. May adjust the
@@ -2619,10 +2660,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
        if (!set->mq_map)
                goto out_free_tags;
 
-       if (set->ops->map_queues)
-               ret = set->ops->map_queues(set);
-       else
-               ret = blk_mq_map_queues(set);
+       ret = blk_mq_update_queue_map(set);
        if (ret)
                goto out_free_mq_map;
 
@@ -2714,6 +2752,7 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
                blk_mq_freeze_queue(q);
 
        set->nr_hw_queues = nr_hw_queues;
+       blk_mq_update_queue_map(set);
        list_for_each_entry(q, &set->tag_list, tag_set_list) {
                blk_mq_realloc_hw_ctxs(set, q);