block: split elevator_switch
authorChristoph Hellwig <hch@lst.de>
Sun, 30 Oct 2022 10:07:14 +0000 (11:07 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 1 Nov 2022 15:12:24 +0000 (09:12 -0600)
Split an elevator_disable helper from elevator_switch for the case where
we want to switch to no scheduler at all.  This includes removing the
pointless elevator_switch_mq helper and removing the switch to no
schedule logic from blk_mq_init_sched.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20221030100714.876891-8-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-sched.c
block/blk-mq.c
block/blk.h
block/elevator.c

index 68227240fdea37fc2ecf70cafea38b19b2be1c80..23d1a90fec4271ca498ebec1add6ad93e13d6744 100644 (file)
@@ -564,13 +564,6 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
        unsigned long i;
        int ret;
 
-       if (!e) {
-               blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
-               q->elevator = NULL;
-               q->nr_requests = q->tag_set->queue_depth;
-               return 0;
-       }
-
        /*
         * Default to double of smaller one between hw queue_depth and 128,
         * since we don't split into sync/async like the old code did.
index 623e8a506539c57008576c837e603a11cf57715a..a78538586a40541aa0f89312fe566b8201ef0861 100644 (file)
@@ -4588,7 +4588,7 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
        __elevator_get(qe->type);
        qe->type = q->elevator->type;
        list_add(&qe->node, head);
-       elevator_switch(q, NULL);
+       elevator_disable(q);
        mutex_unlock(&q->sysfs_lock);
 
        return true;
index 7f9e089ab1f7567e8e4b9b822a7b5d578a8723d1..f1398fb96cec9b437739da96830429daf2cc20ec 100644 (file)
@@ -278,6 +278,7 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
 void blk_insert_flush(struct request *rq);
 
 int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
+void elevator_disable(struct request_queue *q);
 void elevator_exit(struct request_queue *q);
 int elv_register_queue(struct request_queue *q, bool uevent);
 void elv_unregister_queue(struct request_queue *q);
index 5781f5d50bb8e741623332156b49cf6c2958ef84..800e0038be0d73f661c34abd526ccd1048488ce5 100644 (file)
@@ -554,39 +554,6 @@ void elv_unregister(struct elevator_type *e)
 }
 EXPORT_SYMBOL_GPL(elv_unregister);
 
-static int elevator_switch_mq(struct request_queue *q,
-                             struct elevator_type *new_e)
-{
-       int ret;
-
-       lockdep_assert_held(&q->sysfs_lock);
-
-       if (q->elevator) {
-               elv_unregister_queue(q);
-               elevator_exit(q);
-       }
-
-       ret = blk_mq_init_sched(q, new_e);
-       if (ret)
-               goto out;
-
-       if (new_e) {
-               ret = elv_register_queue(q, true);
-               if (ret) {
-                       elevator_exit(q);
-                       goto out;
-               }
-       }
-
-       if (new_e)
-               blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
-       else
-               blk_add_trace_msg(q, "elv switch: none");
-
-out:
-       return ret;
-}
-
 static inline bool elv_support_iosched(struct request_queue *q)
 {
        if (!queue_is_mq(q) ||
@@ -691,19 +658,51 @@ void elevator_init_mq(struct request_queue *q)
  */
 int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 {
-       int err;
+       int ret;
 
        lockdep_assert_held(&q->sysfs_lock);
 
        blk_mq_freeze_queue(q);
        blk_mq_quiesce_queue(q);
 
-       err = elevator_switch_mq(q, new_e);
+       if (q->elevator) {
+               elv_unregister_queue(q);
+               elevator_exit(q);
+       }
 
+       ret = blk_mq_init_sched(q, new_e);
+       if (ret)
+               goto out_unfreeze;
+
+       ret = elv_register_queue(q, true);
+       if (ret) {
+               elevator_exit(q);
+               goto out_unfreeze;
+       }
+       blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
+
+out_unfreeze:
        blk_mq_unquiesce_queue(q);
        blk_mq_unfreeze_queue(q);
+       return ret;
+}
+
+void elevator_disable(struct request_queue *q)
+{
+       lockdep_assert_held(&q->sysfs_lock);
 
-       return err;
+       blk_mq_freeze_queue(q);
+       blk_mq_quiesce_queue(q);
+
+       elv_unregister_queue(q);
+       elevator_exit(q);
+       blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
+       q->elevator = NULL;
+       q->nr_requests = q->tag_set->queue_depth;
+       blk_add_trace_msg(q, "elv switch: none");
+
+       blk_mq_unquiesce_queue(q);
+       blk_mq_unfreeze_queue(q);
 }
 
 /*
@@ -722,9 +721,9 @@ static int elevator_change(struct request_queue *q, const char *elevator_name)
         * Special case for mq, turn off scheduling
         */
        if (!strncmp(elevator_name, "none", 4)) {
-               if (!q->elevator)
-                       return 0;
-               return elevator_switch(q, NULL);
+               if (q->elevator)
+                       elevator_disable(q);
+               return 0;
        }
 
        if (q->elevator && elevator_match(q->elevator->type, elevator_name))