Merge tag 'block-6.9-20240315' of git://git.kernel.dk/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Mar 2024 21:55:50 +0000 (14:55 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Mar 2024 21:55:50 +0000 (14:55 -0700)
Pull block fixes from Jens Axboe:

 - Revert of a change for mq-deadline that went into the 6.8 release,
   causing a performance regression for some (Bart)

 - Revert of the interruptible discard handling. This needs more work
   since the ioctl and fs path aren't properly split, and will happen
   for the 6.10 kernel release. For 6.9, do the minimal revert
   (Christoph)

 - Fix for an issue with the timestamp caching code (me)

 - kerneldoc fix (Jiapeng)

* tag 'block-6.9-20240315' of git://git.kernel.dk/linux:
  block: fix mismatched kerneldoc function name
  Revert "blk-lib: check for kill signal"
  Revert "block/mq-deadline: use correct way to throttling write requests"
  block: limit block time caching to in_task() context

block/blk-lib.c
block/blk-settings.c
block/blk.h
block/mq-deadline.c

index dc8e35d0a51d6de0d4c7bfb2d4ce2f8cbb91a5d1..a6954eafb8c8af324971bd2d293fdceb2d481303 100644 (file)
@@ -35,26 +35,6 @@ static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
        return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
 }
 
-static void await_bio_endio(struct bio *bio)
-{
-       complete(bio->bi_private);
-       bio_put(bio);
-}
-
-/*
- * await_bio_chain - ends @bio and waits for every chained bio to complete
- */
-static void await_bio_chain(struct bio *bio)
-{
-       DECLARE_COMPLETION_ONSTACK_MAP(done,
-                       bio->bi_bdev->bd_disk->lockdep_map);
-
-       bio->bi_private = &done;
-       bio->bi_end_io = await_bio_endio;
-       bio_endio(bio);
-       blk_wait_io(&done);
-}
-
 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
 {
@@ -97,10 +77,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                 * is disabled.
                 */
                cond_resched();
-               if (fatal_signal_pending(current)) {
-                       await_bio_chain(bio);
-                       return -EINTR;
-               }
        }
 
        *biop = bio;
@@ -167,10 +143,6 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
                nr_sects -= len;
                sector += len;
                cond_resched();
-               if (fatal_signal_pending(current)) {
-                       await_bio_chain(bio);
-                       return -EINTR;
-               }
        }
 
        *biop = bio;
@@ -215,10 +187,6 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
                                break;
                }
                cond_resched();
-               if (fatal_signal_pending(current)) {
-                       await_bio_chain(bio);
-                       return -EINTR;
-               }
        }
 
        *biop = bio;
@@ -309,7 +277,7 @@ retry:
                bio_put(bio);
        }
        blk_finish_plug(&plug);
-       if (ret && ret != -EINTR && try_write_zeroes) {
+       if (ret && try_write_zeroes) {
                if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
                        try_write_zeroes = false;
                        goto retry;
@@ -361,12 +329,6 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
                sector += len;
                nr_sects -= len;
                cond_resched();
-               if (fatal_signal_pending(current)) {
-                       await_bio_chain(bio);
-                       ret = -EINTR;
-                       bio = NULL;
-                       break;
-               }
        }
        if (bio) {
                ret = submit_bio_wait(bio);
index e160d56e8edaa19f2e9e72dacc2552fcc652244a..3c7d8d638ab59dc9704aa01217c9b940b5941e4b 100644 (file)
@@ -267,7 +267,7 @@ int queue_limits_commit_update(struct request_queue *q,
 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
 
 /**
- * queue_limits_commit_set - apply queue limits to queue
+ * queue_limits_set - apply queue limits to queue
  * @q:         queue to update
  * @lim:       limits to apply
  *
index a19b7b42e6503cd5ca5e03aba41b894a891929b8..5cac4e29ae1744fe4dd370e05290df0f31f3f4ba 100644 (file)
@@ -534,7 +534,7 @@ static inline u64 blk_time_get_ns(void)
 {
        struct blk_plug *plug = current->plug;
 
-       if (!plug)
+       if (!plug || !in_task())
                return ktime_get_ns();
 
        /*
index f958e79277b8bc24e8f26b5375c29f0558ba09ad..02a916ba62ee750d4ad29127604b7d4a0cb474d7 100644 (file)
@@ -646,9 +646,8 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
        struct request_queue *q = hctx->queue;
        struct deadline_data *dd = q->elevator->elevator_data;
        struct blk_mq_tags *tags = hctx->sched_tags;
-       unsigned int shift = tags->bitmap_tags.sb.shift;
 
-       dd->async_depth = max(1U, 3 * (1U << shift)  / 4);
+       dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
 
        sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
 }