blk-mq: remove ->map_queue
authorChristoph Hellwig <hch@lst.de>
Wed, 14 Sep 2016 14:18:54 +0000 (16:18 +0200)
committerJens Axboe <axboe@fb.com>
Thu, 15 Sep 2016 14:42:03 +0000 (08:42 -0600)
All drivers use the default, so provide an inline version of it.  If we
ever need other queue mapping we can add an optional method back,
although supporting will also require major changes to the queue setup
code.

This provides better code generation, and better debugability as well.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
18 files changed:
block/blk-flush.c
block/blk-mq-tag.c
block/blk-mq.c
block/blk-mq.h
block/blk.h
drivers/block/loop.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/null_blk.c
drivers/block/rbd.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/md/dm-rq.c
drivers/mtd/ubi/block.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/loop.c
drivers/scsi/scsi_lib.c
include/linux/blk-mq.h

index d308def812db9b3794fc10e07da9a303dd31eb70..6a14b68b91358bdd28d90fb5ab23100a77af31dd 100644 (file)
@@ -232,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, int error)
 
                /* release the tag's ownership to the req cloned from */
                spin_lock_irqsave(&fq->mq_flush_lock, flags);
-               hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu);
+               hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
                blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
                flush_rq->tag = -1;
        }
@@ -325,7 +325,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
                flush_rq->tag = first_rq->tag;
                fq->orig_rq = first_rq;
 
-               hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu);
+               hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
                blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
        }
 
@@ -358,7 +358,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
        unsigned long flags;
        struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
 
-       hctx = q->mq_ops->map_queue(q, ctx->cpu);
+       hctx = blk_mq_map_queue(q, ctx->cpu);
 
        /*
         * After populating an empty queue, kick it to avoid stall.  Read
index 729bac3a673b7a89577669c1a65e70fc60309d5a..16028130289f9d8ab55dc00db94e555501c71bfb 100644 (file)
@@ -301,8 +301,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
                io_schedule();
 
                data->ctx = blk_mq_get_ctx(data->q);
-               data->hctx = data->q->mq_ops->map_queue(data->q,
-                               data->ctx->cpu);
+               data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
                if (data->flags & BLK_MQ_REQ_RESERVED) {
                        bt = &data->hctx->tags->breserved_tags;
                } else {
@@ -726,7 +725,7 @@ u32 blk_mq_unique_tag(struct request *rq)
        int hwq = 0;
 
        if (q->mq_ops) {
-               hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
+               hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
                hwq = hctx->queue_num;
        }
 
index c9499f118ef60fd920fdb1e5f12faea12ba7784d..6e077a9d61a8a26e18ad00944c70e4b5ea53461c 100644 (file)
@@ -245,7 +245,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
                return ERR_PTR(ret);
 
        ctx = blk_mq_get_ctx(q);
-       hctx = q->mq_ops->map_queue(q, ctx->cpu);
+       hctx = blk_mq_map_queue(q, ctx->cpu);
        blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
 
        rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
@@ -254,7 +254,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
                blk_mq_put_ctx(ctx);
 
                ctx = blk_mq_get_ctx(q);
-               hctx = q->mq_ops->map_queue(q, ctx->cpu);
+               hctx = blk_mq_map_queue(q, ctx->cpu);
                blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
                rq =  __blk_mq_alloc_request(&alloc_data, rw, 0);
                ctx = alloc_data.ctx;
@@ -338,11 +338,7 @@ EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
 
 void blk_mq_free_request(struct request *rq)
 {
-       struct blk_mq_hw_ctx *hctx;
-       struct request_queue *q = rq->q;
-
-       hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
-       blk_mq_free_hctx_request(hctx, rq);
+       blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
 }
 EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
@@ -1074,9 +1070,7 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
 {
        struct blk_mq_ctx *ctx = rq->mq_ctx;
        struct request_queue *q = rq->q;
-       struct blk_mq_hw_ctx *hctx;
-
-       hctx = q->mq_ops->map_queue(q, ctx->cpu);
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
 
        spin_lock(&ctx->lock);
        __blk_mq_insert_request(hctx, rq, at_head);
@@ -1093,12 +1087,10 @@ static void blk_mq_insert_requests(struct request_queue *q,
                                     bool from_schedule)
 
 {
-       struct blk_mq_hw_ctx *hctx;
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
 
        trace_block_unplug(q, depth, !from_schedule);
 
-       hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
        /*
         * preemption doesn't flush plug list, so it's possible ctx->cpu is
         * offline now
@@ -1232,7 +1224,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
 
        blk_queue_enter_live(q);
        ctx = blk_mq_get_ctx(q);
-       hctx = q->mq_ops->map_queue(q, ctx->cpu);
+       hctx = blk_mq_map_queue(q, ctx->cpu);
 
        if (rw_is_sync(bio_op(bio), bio->bi_opf))
                op_flags |= REQ_SYNC;
@@ -1246,7 +1238,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
                trace_block_sleeprq(q, bio, op);
 
                ctx = blk_mq_get_ctx(q);
-               hctx = q->mq_ops->map_queue(q, ctx->cpu);
+               hctx = blk_mq_map_queue(q, ctx->cpu);
                blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
                rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
                ctx = alloc_data.ctx;
@@ -1263,8 +1255,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
 {
        int ret;
        struct request_queue *q = rq->q;
-       struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
-                       rq->mq_ctx->cpu);
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
        struct blk_mq_queue_data bd = {
                .rq = rq,
                .list = NULL,
@@ -1468,15 +1459,6 @@ run_queue:
        return cookie;
 }
 
-/*
- * Default mapping to a software queue, since we use one per CPU.
- */
-struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
-{
-       return q->queue_hw_ctx[q->mq_map[cpu]];
-}
-EXPORT_SYMBOL(blk_mq_map_queue);
-
 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
                struct blk_mq_tags *tags, unsigned int hctx_idx)
 {
@@ -1810,7 +1792,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
                if (!cpu_online(i))
                        continue;
 
-               hctx = q->mq_ops->map_queue(q, i);
+               hctx = blk_mq_map_queue(q, i);
 
                /*
                 * Set local node, IFF we have more than one hw queue. If
@@ -1848,7 +1830,7 @@ static void blk_mq_map_swqueue(struct request_queue *q,
                        continue;
 
                ctx = per_cpu_ptr(q->queue_ctx, i);
-               hctx = q->mq_ops->map_queue(q, i);
+               hctx = blk_mq_map_queue(q, i);
 
                cpumask_set_cpu(i, hctx->cpumask);
                ctx->index_hw = hctx->nr_ctx;
@@ -2313,7 +2295,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
        if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
                return -EINVAL;
 
-       if (!set->ops->queue_rq || !set->ops->map_queue)
+       if (!set->ops->queue_rq)
                return -EINVAL;
 
        if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
index 9087b11037b70ae514fd46ea10f659152f291aa2..ec774bf4aea20db1c55d1ad3d74a4dda495d7cbb 100644 (file)
@@ -52,6 +52,12 @@ extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
                                   const struct cpumask *online_mask);
 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
 
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
+               int cpu)
+{
+       return q->queue_hw_ctx[q->mq_map[cpu]];
+}
+
 /*
  * sysfs helpers
  */
index c37492f5edaaece9c7603a6844872c3f279cefb0..74444c49078fc7911289f9d8a65939399d8cb126 100644 (file)
@@ -39,14 +39,9 @@ extern struct ida blk_queue_ida;
 static inline struct blk_flush_queue *blk_get_flush_queue(
                struct request_queue *q, struct blk_mq_ctx *ctx)
 {
-       struct blk_mq_hw_ctx *hctx;
-
-       if (!q->mq_ops)
-               return q->fq;
-
-       hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
-       return hctx->fq;
+       if (q->mq_ops)
+               return blk_mq_map_queue(q, ctx->cpu)->fq;
+       return q->fq;
 }
 
 static inline void __blk_get_queue(struct request_queue *q)
index c9f2107f7095148dc5e4027d7cb8fdc4794aad01..cbdb3b162718878a84e9eb69aebb8b0a05616e7a 100644 (file)
@@ -1703,7 +1703,6 @@ static int loop_init_request(void *data, struct request *rq,
 
 static struct blk_mq_ops loop_mq_ops = {
        .queue_rq       = loop_queue_rq,
-       .map_queue      = blk_mq_map_queue,
        .init_request   = loop_init_request,
 };
 
index 88c46853dbb54b497065c7ffdea5bb5891281064..3cfd879267b2dd364e3bac2603a645b4f575809f 100644 (file)
@@ -3895,7 +3895,6 @@ exit_handler:
 
 static struct blk_mq_ops mtip_mq_ops = {
        .queue_rq       = mtip_queue_rq,
-       .map_queue      = blk_mq_map_queue,
        .init_request   = mtip_init_cmd,
        .exit_request   = mtip_free_cmd,
        .complete       = mtip_softirq_done_fn,
index 75a7f88d67176af54c19df1768ce770b6d4f11ce..7d3b7d6e5149fd0868a066c234c57256288b13d2 100644 (file)
@@ -393,7 +393,6 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 
 static struct blk_mq_ops null_mq_ops = {
        .queue_rq       = null_queue_rq,
-       .map_queue      = blk_mq_map_queue,
        .init_hctx      = null_init_hctx,
        .complete       = null_softirq_done_fn,
 };
index 6c6519f6492a4198c78cae1eaad5e33e03efd2d9..c1f84df7838b8ce4c7850b4b0184f488f8d5bc59 100644 (file)
@@ -3621,7 +3621,6 @@ static int rbd_init_request(void *data, struct request *rq,
 
 static struct blk_mq_ops rbd_mq_ops = {
        .queue_rq       = rbd_queue_rq,
-       .map_queue      = blk_mq_map_queue,
        .init_request   = rbd_init_request,
 };
 
index 93b1aaa5ba3be26d5de4d0a7b461ecc2fe7beb61..2dc5c96c186aa3455ea124aa2bb824e889e1e15f 100644 (file)
@@ -542,7 +542,6 @@ static int virtblk_init_request(void *data, struct request *rq,
 
 static struct blk_mq_ops virtio_mq_ops = {
        .queue_rq       = virtio_queue_rq,
-       .map_queue      = blk_mq_map_queue,
        .complete       = virtblk_request_done,
        .init_request   = virtblk_init_request,
 };
index 88ef6d4729b46594b8542fa81467a45763f2f343..9908597c5209e19f118b01437ce0f362cab8f289 100644 (file)
@@ -909,7 +909,6 @@ out_busy:
 
 static struct blk_mq_ops blkfront_mq_ops = {
        .queue_rq = blkif_queue_rq,
-       .map_queue = blk_mq_map_queue,
 };
 
 static void blkif_set_queue_limits(struct blkfront_info *info)
index 1ca7463e8bb2b26c799f63f5d54a4a33d570704c..d1c3645d5ce1c1b77b95a21f8215a819d2c22961 100644 (file)
@@ -908,7 +908,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 static struct blk_mq_ops dm_mq_ops = {
        .queue_rq = dm_mq_queue_rq,
-       .map_queue = blk_mq_map_queue,
        .complete = dm_softirq_done,
        .init_request = dm_mq_init_request,
 };
index ebf46ad2d513edf5422e10d998043a14b9714747..d1e6931c132f79a5a7a57557c75644170c6ee4a6 100644 (file)
@@ -351,7 +351,6 @@ static int ubiblock_init_request(void *data, struct request *req,
 static struct blk_mq_ops ubiblock_mq_ops = {
        .queue_rq       = ubiblock_queue_rq,
        .init_request   = ubiblock_init_request,
-       .map_queue      = blk_mq_map_queue,
 };
 
 static DEFINE_IDR(ubiblock_minor_idr);
index 8dcf5a960951805b09d650b2cc243ceaeff6a5bb..086fd7e4511919e7b2ac3cea61705291e5969321 100644 (file)
@@ -1131,7 +1131,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
 static struct blk_mq_ops nvme_mq_admin_ops = {
        .queue_rq       = nvme_queue_rq,
        .complete       = nvme_complete_rq,
-       .map_queue      = blk_mq_map_queue,
        .init_hctx      = nvme_admin_init_hctx,
        .exit_hctx      = nvme_admin_exit_hctx,
        .init_request   = nvme_admin_init_request,
@@ -1141,7 +1140,6 @@ static struct blk_mq_ops nvme_mq_admin_ops = {
 static struct blk_mq_ops nvme_mq_ops = {
        .queue_rq       = nvme_queue_rq,
        .complete       = nvme_complete_rq,
-       .map_queue      = blk_mq_map_queue,
        .init_hctx      = nvme_init_hctx,
        .init_request   = nvme_init_request,
        .timeout        = nvme_timeout,
index ab545fb347a0f5081c27dc1d9c72c47789107cbe..9bbd8866363b55b53d487e981d6eefdf686ced7f 100644 (file)
@@ -1531,7 +1531,6 @@ static void nvme_rdma_complete_rq(struct request *rq)
 static struct blk_mq_ops nvme_rdma_mq_ops = {
        .queue_rq       = nvme_rdma_queue_rq,
        .complete       = nvme_rdma_complete_rq,
-       .map_queue      = blk_mq_map_queue,
        .init_request   = nvme_rdma_init_request,
        .exit_request   = nvme_rdma_exit_request,
        .reinit_request = nvme_rdma_reinit_request,
@@ -1543,7 +1542,6 @@ static struct blk_mq_ops nvme_rdma_mq_ops = {
 static struct blk_mq_ops nvme_rdma_admin_mq_ops = {
        .queue_rq       = nvme_rdma_queue_rq,
        .complete       = nvme_rdma_complete_rq,
-       .map_queue      = blk_mq_map_queue,
        .init_request   = nvme_rdma_init_admin_request,
        .exit_request   = nvme_rdma_exit_admin_request,
        .reinit_request = nvme_rdma_reinit_request,
index 395e60dad83542ff5300107a68bf73caeedaf6dd..d5df77d686b26a40414f192a1c74388651141fd1 100644 (file)
@@ -273,7 +273,6 @@ static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 static struct blk_mq_ops nvme_loop_mq_ops = {
        .queue_rq       = nvme_loop_queue_rq,
        .complete       = nvme_loop_complete_rq,
-       .map_queue      = blk_mq_map_queue,
        .init_request   = nvme_loop_init_request,
        .init_hctx      = nvme_loop_init_hctx,
        .timeout        = nvme_loop_timeout,
@@ -282,7 +281,6 @@ static struct blk_mq_ops nvme_loop_mq_ops = {
 static struct blk_mq_ops nvme_loop_admin_mq_ops = {
        .queue_rq       = nvme_loop_queue_rq,
        .complete       = nvme_loop_complete_rq,
-       .map_queue      = blk_mq_map_queue,
        .init_request   = nvme_loop_init_admin_request,
        .init_hctx      = nvme_loop_init_admin_hctx,
        .timeout        = nvme_loop_timeout,
index c71344aebdbb1d5cf1f07e082b48000fb8ad0c64..2cca9cffc63fde351f8ced6687951c0be2f86d6a 100644 (file)
@@ -2077,7 +2077,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
 }
 
 static struct blk_mq_ops scsi_mq_ops = {
-       .map_queue      = blk_mq_map_queue,
        .queue_rq       = scsi_queue_rq,
        .complete       = scsi_softirq_done,
        .timeout        = scsi_timeout,
index deda16a9bde43e498944c54b4ca8e2f8b82bfa81..f01379f2b0ac52998b2984ba0788784eff277e50 100644 (file)
@@ -91,7 +91,6 @@ struct blk_mq_queue_data {
 };
 
 typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
-typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
 typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
@@ -113,11 +112,6 @@ struct blk_mq_ops {
         */
        queue_rq_fn             *queue_rq;
 
-       /*
-        * Map to specific hardware queue
-        */
-       map_queue_fn            *map_queue;
-
        /*
         * Called on request timeout
         */
@@ -223,7 +217,6 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
        return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
 }
 
-struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
 
 int blk_mq_request_started(struct request *rq);
 void blk_mq_start_request(struct request *rq);