block: return whether or not to unplug through boolean
authorJens Axboe <axboe@kernel.dk>
Mon, 18 Oct 2021 16:07:09 +0000 (10:07 -0600)
committerJens Axboe <axboe@kernel.dk>
Tue, 19 Oct 2021 11:55:04 +0000 (05:55 -0600)
Instead of returning the same queue request through a request pointer,
use a boolean to accomplish the same.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-merge.c
block/blk-mq.c
block/blk.h

index ec727234ac482cff3e57b07f30934a9bd59430c6..c273b58378ce06f65511ecc0e4e5051af8140503 100644 (file)
@@ -1067,9 +1067,8 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
  * @q: request_queue new bio is being queued at
  * @bio: new bio being queued
  * @nr_segs: number of segments in @bio
- * @same_queue_rq: pointer to &struct request that gets filled in when
- * another request associated with @q is found on the plug list
- * (optional, may be %NULL)
+ * @same_queue_rq: output value, will be true if there's an existing request
+ * from the passed in @q already in the plug list
  *
  * Determine whether @bio being queued on @q can be merged with the previous
  * request on %current's plugged list.  Returns %true if merge was successful,
@@ -1085,7 +1084,7 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
  * Caller must ensure !blk_queue_nomerges(q) beforehand.
  */
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-               unsigned int nr_segs, struct request **same_queue_rq)
+               unsigned int nr_segs, bool *same_queue_rq)
 {
        struct blk_plug *plug;
        struct request *rq;
@@ -1096,12 +1095,12 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
 
        /* check the previously added entry for a quick merge attempt */
        rq = list_last_entry(&plug->mq_list, struct request, queuelist);
-       if (rq->q == q && same_queue_rq) {
+       if (rq->q == q) {
                /*
                 * Only blk-mq multiple hardware queues case checks the rq in
                 * the same queue, there should be only one such rq in a queue
                 */
-               *same_queue_rq = rq;
+               *same_queue_rq = true;
        }
        if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == BIO_MERGE_OK)
                return true;
index 59809ec243031e3d292c95cfbf4cfd6be0e6d718..335ec3a7eab7bfc181680a958178accf5dd4a3ab 100644 (file)
@@ -2432,7 +2432,7 @@ void blk_mq_submit_bio(struct bio *bio)
        const int is_flush_fua = op_is_flush(bio->bi_opf);
        struct request *rq;
        struct blk_plug *plug;
-       struct request *same_queue_rq = NULL;
+       bool same_queue_rq = false;
        unsigned int nr_segs = 1;
        blk_status_t ret;
 
@@ -2525,6 +2525,8 @@ void blk_mq_submit_bio(struct bio *bio)
                /* Insert the request at the IO scheduler queue */
                blk_mq_sched_insert_request(rq, false, true, true);
        } else if (plug && !blk_queue_nomerges(q)) {
+               struct request *next_rq = NULL;
+
                /*
                 * We do limited plugging. If the bio can be merged, do that.
                 * Otherwise the existing request in the plug list will be
@@ -2532,19 +2534,19 @@ void blk_mq_submit_bio(struct bio *bio)
                 * The plug list might get flushed before this. If that happens,
                 * the plug list is empty, and same_queue_rq is invalid.
                 */
-               if (list_empty(&plug->mq_list))
-                       same_queue_rq = NULL;
                if (same_queue_rq) {
-                       list_del_init(&same_queue_rq->queuelist);
+                       next_rq = list_last_entry(&plug->mq_list,
+                                                       struct request,
+                                                       queuelist);
+                       list_del_init(&next_rq->queuelist);
                        plug->rq_count--;
                }
                blk_add_rq_to_plug(plug, rq);
                trace_block_plug(q);
 
-               if (same_queue_rq) {
+               if (next_rq) {
                        trace_block_unplug(q, 1, true);
-                       blk_mq_try_issue_directly(same_queue_rq->mq_hctx,
-                                                 same_queue_rq);
+                       blk_mq_try_issue_directly(next_rq->mq_hctx, next_rq);
                }
        } else if ((q->nr_hw_queues > 1 && is_sync) ||
                   !rq->mq_hctx->dispatch_busy) {
index e80350327e6dc556b6e88640c466d6c1dbf8dbfa..b9729c12fd6213883778d23016c9ae4a52eddc2b 100644 (file)
@@ -218,7 +218,7 @@ void blk_add_timer(struct request *req);
 void blk_print_req_error(struct request *req, blk_status_t status);
 
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-               unsigned int nr_segs, struct request **same_queue_rq);
+               unsigned int nr_segs, bool *same_queue_rq);
 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
                        struct bio *bio, unsigned int nr_segs);