Merge tag 'ata-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal...
[sfrench/cifs-2.6.git] / block / blk-core.c
index b5098355d8b27349d3ac08b6728afbd392fe2725..82b5b2c53f1ee8e91150a03d3b4fa0453cbfad06 100644 (file)
@@ -570,7 +570,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
                return BLK_STS_NOTSUPP;
 
        /* The bio sector must point to the start of a sequential zone */
-       if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) ||
+       if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector) ||
            !bio_zone_is_seq(bio))
                return BLK_STS_IOERR;
 
@@ -684,6 +684,18 @@ static void __submit_bio_noacct_mq(struct bio *bio)
 
 void submit_bio_noacct_nocheck(struct bio *bio)
 {
+       blk_cgroup_bio_start(bio);
+       blkcg_bio_issue_init(bio);
+
+       if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
+               trace_block_bio_queue(bio);
+               /*
+                * Now that enqueuing has been traced, we need to trace
+                * completion as well.
+                */
+               bio_set_flag(bio, BIO_TRACE_COMPLETION);
+       }
+
        /*
         * We only want one ->submit_bio to be active at a time, else stack
         * usage with stacked devices could be a problem.  Use current->bio_list
@@ -741,12 +753,16 @@ void submit_bio_noacct(struct bio *bio)
         * Filter flush bio's early so that bio based drivers without flush
         * support don't have to worry about them.
         */
-       if (op_is_flush(bio->bi_opf) &&
-           !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
-               bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
-               if (!bio_sectors(bio)) {
-                       status = BLK_STS_OK;
+       if (op_is_flush(bio->bi_opf)) {
+               if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE &&
+                                bio_op(bio) != REQ_OP_ZONE_APPEND))
                        goto end_io;
+               if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
+                       bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
+                       if (!bio_sectors(bio)) {
+                               status = BLK_STS_OK;
+                               goto end_io;
+                       }
                }
        }
 
@@ -788,17 +804,6 @@ void submit_bio_noacct(struct bio *bio)
 
        if (blk_throtl_bio(bio))
                return;
-
-       blk_cgroup_bio_start(bio);
-       blkcg_bio_issue_init(bio);
-
-       if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
-               trace_block_bio_queue(bio);
-               /* Now that enqueuing has been traced, we need to trace
-                * completion as well.
-                */
-               bio_set_flag(bio, BIO_TRACE_COMPLETION);
-       }
        submit_bio_noacct_nocheck(bio);
        return;
 
@@ -869,7 +874,16 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
         */
        blk_flush_plug(current->plug, false);
 
-       if (bio_queue_enter(bio))
+       /*
+        * We need to be able to enter a frozen queue, similar to how
+        * timeouts also need to do that. If that is blocked, then we can
+        * have pending IO when a queue freeze is started, and then the
+        * wait for the freeze to finish will wait for polled requests to
+        * timeout as the poller is preventer from entering the queue and
+        * completing them. As long as we prevent new IO from being queued,
+        * that should be all that matters.
+        */
+       if (!percpu_ref_tryget(&q->q_usage_counter))
                return 0;
        if (queue_is_mq(q)) {
                ret = blk_mq_poll(q, cookie, iob, flags);