block: add queue_is_mq() helper
authorJens Axboe <axboe@kernel.dk>
Thu, 15 Nov 2018 19:22:51 +0000 (12:22 -0700)
committerJens Axboe <axboe@kernel.dk>
Fri, 16 Nov 2018 15:34:06 +0000 (08:34 -0700)
Various spots check for q->mq_ops being non-NULL, but provide
a helper to do this instead.

Where the ->mq_ops != NULL check is redundant, remove it.

Since mq == rq-based now that legacy is gone, get rid of the
queue_is_rq_based() and just use queue_is_mq() everywhere.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
14 files changed:
block/blk-cgroup.c
block/blk-core.c
block/blk-flush.c
block/blk-mq.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk-wbt.c
block/blk-zoned.c
block/bsg.c
block/elevator.c
block/genhd.c
drivers/md/dm-rq.c
drivers/md/dm-table.c
include/linux/blkdev.h

index 0f6b4461416519bbed77f69749e7bbb58b0008c5..63d226a084cdefca691928585a30f60b7776735d 100644 (file)
@@ -1324,7 +1324,7 @@ int blkcg_activate_policy(struct request_queue *q,
        if (blkcg_policy_enabled(q, pol))
                return 0;
 
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_freeze_queue(q);
 pd_prealloc:
        if (!pd_prealloc) {
@@ -1363,7 +1363,7 @@ pd_prealloc:
 
        spin_unlock_irq(&q->queue_lock);
 out_bypass_end:
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_unfreeze_queue(q);
        if (pd_prealloc)
                pol->pd_free_fn(pd_prealloc);
@@ -1387,7 +1387,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
        if (!blkcg_policy_enabled(q, pol))
                return;
 
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_freeze_queue(q);
 
        spin_lock_irq(&q->queue_lock);
@@ -1405,7 +1405,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
 
        spin_unlock_irq(&q->queue_lock);
 
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_unfreeze_queue(q);
 }
 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
index 92b6b200e9fb2044a6ec363d55db5231cef24b51..0b684a520a11e8ea241a4541833bed08d19996d0 100644 (file)
@@ -232,7 +232,7 @@ void blk_sync_queue(struct request_queue *q)
        del_timer_sync(&q->timeout);
        cancel_work_sync(&q->timeout_work);
 
-       if (q->mq_ops) {
+       if (queue_is_mq(q)) {
                struct blk_mq_hw_ctx *hctx;
                int i;
 
@@ -281,7 +281,7 @@ void blk_set_queue_dying(struct request_queue *q)
         */
        blk_freeze_queue_start(q);
 
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_wake_waiters(q);
 
        /* Make blk_queue_enter() reexamine the DYING flag. */
@@ -356,7 +356,7 @@ void blk_cleanup_queue(struct request_queue *q)
         * blk_freeze_queue() should be enough for cases of passthrough
         * request.
         */
-       if (q->mq_ops && blk_queue_init_done(q))
+       if (queue_is_mq(q) && blk_queue_init_done(q))
                blk_mq_quiesce_queue(q);
 
        /* for synchronous bio-based driver finish in-flight integrity i/o */
@@ -374,7 +374,7 @@ void blk_cleanup_queue(struct request_queue *q)
 
        blk_exit_queue(q);
 
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_free_queue(q);
 
        percpu_ref_exit(&q->q_usage_counter);
@@ -982,7 +982,7 @@ generic_make_request_checks(struct bio *bio)
         * For a REQ_NOWAIT based request, return -EOPNOTSUPP
         * if queue is not a request based queue.
         */
-       if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
+       if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
                goto not_supported;
 
        if (should_fail_bio(bio))
@@ -1657,7 +1657,7 @@ EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
  */
 int blk_lld_busy(struct request_queue *q)
 {
-       if (q->mq_ops && q->mq_ops->busy)
+       if (queue_is_mq(q) && q->mq_ops->busy)
                return q->mq_ops->busy(q);
 
        return 0;
index fcd18b158fd6fd5bdd6c6455be4b3f4d4b8990ae..a3fc7191c69428a2f81b107f827a847e10be1e98 100644 (file)
@@ -273,8 +273,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
         * assigned to empty flushes, and we deadlock if we are expecting
         * other requests to make progress. Don't defer for that case.
         */
-       if (!list_empty(&fq->flush_data_in_flight) &&
-           !(q->mq_ops && q->elevator) &&
+       if (!list_empty(&fq->flush_data_in_flight) && q->elevator &&
            time_before(jiffies,
                        fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
                return;
index 3b823891b3ef86ebe8b1c4d501b7ed7fd18a6c67..32b246ed44c0ac38b01bb8aeb161b63f52cc42a9 100644 (file)
@@ -150,7 +150,7 @@ void blk_freeze_queue_start(struct request_queue *q)
        freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
        if (freeze_depth == 1) {
                percpu_ref_kill(&q->q_usage_counter);
-               if (q->mq_ops)
+               if (queue_is_mq(q))
                        blk_mq_run_hw_queues(q, false);
        }
 }
index 1e370207a20e639055afa0ccd4f52478de64a9f3..80eef48fddc80756de9c3e5005a9a9f929cce3d4 100644 (file)
@@ -68,7 +68,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
        unsigned long nr;
        int ret, err;
 
-       if (!q->mq_ops)
+       if (!queue_is_mq(q))
                return -EINVAL;
 
        ret = queue_var_store(&nr, page, count);
@@ -835,12 +835,12 @@ static void __blk_release_queue(struct work_struct *work)
 
        blk_queue_free_zone_bitmaps(q);
 
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_release(q);
 
        blk_trace_shutdown(q);
 
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_debugfs_unregister(q);
 
        bioset_exit(&q->bio_split);
@@ -914,7 +914,7 @@ int blk_register_queue(struct gendisk *disk)
                goto unlock;
        }
 
-       if (q->mq_ops) {
+       if (queue_is_mq(q)) {
                __blk_mq_register_dev(dev, q);
                blk_mq_debugfs_register(q);
        }
@@ -925,7 +925,7 @@ int blk_register_queue(struct gendisk *disk)
 
        blk_throtl_register_queue(q);
 
-       if ((q->mq_ops && q->elevator)) {
+       if (q->elevator) {
                ret = elv_register_queue(q);
                if (ret) {
                        mutex_unlock(&q->sysfs_lock);
@@ -974,7 +974,7 @@ void blk_unregister_queue(struct gendisk *disk)
         * Remove the sysfs attributes before unregistering the queue data
         * structures that can be modified through sysfs.
         */
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_unregister_dev(disk_to_dev(disk), q);
        mutex_unlock(&q->sysfs_lock);
 
@@ -983,7 +983,7 @@ void blk_unregister_queue(struct gendisk *disk)
        blk_trace_remove_sysfs(disk_to_dev(disk));
 
        mutex_lock(&q->sysfs_lock);
-       if (q->mq_ops && q->elevator)
+       if (q->elevator)
                elv_unregister_queue(q);
        mutex_unlock(&q->sysfs_lock);
 
index d0a23f0bb3ed09a256bef656af5d553fffbeace7..8f0a104770ee166d1c9d721ea0e46991512ee194 100644 (file)
@@ -2456,7 +2456,7 @@ void blk_throtl_register_queue(struct request_queue *q)
        td->throtl_slice = DFL_THROTL_SLICE_HD;
 #endif
 
-       td->track_bio_latency = !queue_is_rq_based(q);
+       td->track_bio_latency = !queue_is_mq(q);
        if (!td->track_bio_latency)
                blk_stat_enable_accounting(q);
 }
index 9f142b84dc85704de776f6a64d76cc2bd0e35e33..d051ebfb4852fdab4b0f5cf787cc2a0d15cdba5d 100644 (file)
@@ -701,7 +701,7 @@ void wbt_enable_default(struct request_queue *q)
        if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
                return;
 
-       if (q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ))
+       if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
                wbt_init(q);
 }
 EXPORT_SYMBOL_GPL(wbt_enable_default);
index 13ba2011a306a8c4c52fe07bc7a5b147e86472a6..e9c332b1d9da4e2cf080b6900dd7ba5bb2dc4ace 100644 (file)
@@ -421,7 +421,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
         * BIO based queues do not use a scheduler so only q->nr_zones
         * needs to be updated so that the sysfs exposed value is correct.
         */
-       if (!queue_is_rq_based(q)) {
+       if (!queue_is_mq(q)) {
                q->nr_zones = nr_zones;
                return 0;
        }
index 9a442c23a715e2f8b53b8bb4209af912a3f45af3..44f6028b9567b46c50cbf3d9a57bade0b157e95a 100644 (file)
@@ -471,7 +471,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
        /*
         * we need a proper transport to send commands, not a stacked device
         */
-       if (!queue_is_rq_based(q))
+       if (!queue_is_mq(q))
                return 0;
 
        bcd = &q->bsg_dev;
index 7964362706827a346b34caca15a5ec735f9b4215..f05e90d4e695a3f5f5a04ae89c997cd11ad1263c 100644 (file)
@@ -667,7 +667,7 @@ static int __elevator_change(struct request_queue *q, const char *name)
        /*
         * Special case for mq, turn off scheduling
         */
-       if (q->mq_ops && !strncmp(name, "none", 4))
+       if (!strncmp(name, "none", 4))
                return elevator_switch(q, NULL);
 
        strlcpy(elevator_name, name, sizeof(elevator_name));
@@ -685,8 +685,7 @@ static int __elevator_change(struct request_queue *q, const char *name)
 
 static inline bool elv_support_iosched(struct request_queue *q)
 {
-       if (q->mq_ops && q->tag_set && (q->tag_set->flags &
-                               BLK_MQ_F_NO_SCHED))
+       if (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
                return false;
        return true;
 }
@@ -696,7 +695,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 {
        int ret;
 
-       if (!q->mq_ops || !elv_support_iosched(q))
+       if (!queue_is_mq(q) || !elv_support_iosched(q))
                return count;
 
        ret = __elevator_change(q, name);
@@ -713,7 +712,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
        struct elevator_type *__e;
        int len = 0;
 
-       if (!queue_is_rq_based(q))
+       if (!queue_is_mq(q))
                return sprintf(name, "none\n");
 
        if (!q->elevator)
@@ -732,7 +731,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
        }
        spin_unlock(&elv_list_lock);
 
-       if (q->mq_ops && q->elevator)
+       if (q->elevator)
                len += sprintf(name+len, "none");
 
        len += sprintf(len+name, "\n");
index cff6bdf27226bb597066bc377af3760b38a8250a..0145bcb0cc76d69ad412ff706a30e16f73ea8bf0 100644 (file)
@@ -47,7 +47,7 @@ static void disk_release_events(struct gendisk *disk);
 
 void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
 {
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                return;
 
        atomic_inc(&part->in_flight[rw]);
@@ -57,7 +57,7 @@ void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
 
 void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
 {
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                return;
 
        atomic_dec(&part->in_flight[rw]);
@@ -68,7 +68,7 @@ void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
 void part_in_flight(struct request_queue *q, struct hd_struct *part,
                    unsigned int inflight[2])
 {
-       if (q->mq_ops) {
+       if (queue_is_mq(q)) {
                blk_mq_in_flight(q, part, inflight);
                return;
        }
@@ -85,7 +85,7 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part,
 void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
                       unsigned int inflight[2])
 {
-       if (q->mq_ops) {
+       if (queue_is_mq(q)) {
                blk_mq_in_flight_rw(q, part, inflight);
                return;
        }
index 7cd36e4d131015c530bd6117e12733d8f0727cc8..1f1fe9a618ea91c0de69251068a0787df9c6866b 100644 (file)
@@ -43,7 +43,7 @@ static unsigned dm_get_blk_mq_queue_depth(void)
 
 int dm_request_based(struct mapped_device *md)
 {
-       return queue_is_rq_based(md->queue);
+       return queue_is_mq(md->queue);
 }
 
 void dm_start_queue(struct request_queue *q)
index 9038c302d5c29c44f60923f881d1cae9f82e68c3..844f7d0f2ef84b76b224f956cc0d24bb6f266902 100644 (file)
@@ -919,12 +919,12 @@ static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
        struct request_queue *q = bdev_get_queue(dev->bdev);
        struct verify_rq_based_data *v = data;
 
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                v->mq_count++;
        else
                v->sq_count++;
 
-       return queue_is_rq_based(q);
+       return queue_is_mq(q);
 }
 
 static int dm_table_determine_type(struct dm_table *t)
index 1d185f1fc3332c7a542cde3fa27ffbf81071ce5b..41aaa05e42c1e2798b54d77fda7b80d5cc75bd83 100644 (file)
@@ -656,11 +656,7 @@ static inline bool blk_account_rq(struct request *rq)
 
 #define rq_data_dir(rq)                (op_is_write(req_op(rq)) ? WRITE : READ)
 
-/*
- * Driver can handle struct request, if it either has an old style
- * request_fn defined, or is blk-mq based.
- */
-static inline bool queue_is_rq_based(struct request_queue *q)
+static inline bool queue_is_mq(struct request_queue *q)
 {
        return q->mq_ops;
 }