block: make blk_poll() take a parameter on whether to spin or not
authorJens Axboe <axboe@kernel.dk>
Mon, 26 Nov 2018 15:24:43 +0000 (08:24 -0700)
committerJens Axboe <axboe@kernel.dk>
Mon, 26 Nov 2018 15:25:53 +0000 (08:25 -0700)
blk_poll() has always kept spinning until it found an IO. This is
fine for SYNC polling, since we need to find one request we have
pending, but in preparation for ASYNC polling it can be beneficial
to just check if we have any entries available or not.

Existing callers are converted to pass in 'spin == true', to retain
the old behavior.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c
drivers/nvme/host/multipath.c
fs/block_dev.c
fs/direct-io.c
fs/iomap.c
include/linux/blkdev.h
mm/page_io.c

index 03c4202b69bf0057be334445d2e6ca1226abb0a4..9af56dbb84f111aaa6591101ba0a7b886e1e9339 100644 (file)
@@ -1277,19 +1277,22 @@ EXPORT_SYMBOL(submit_bio);
  * blk_poll - poll for IO completions
  * @q:  the queue
  * @cookie: cookie passed back at IO submission time
+ * @spin: whether to spin for completions
  *
  * Description:
  *    Poll for completions on the passed in queue. Returns number of
- *    completed entries found.
+ *    completed entries found. If @spin is true, then blk_poll will continue
+ *    looping until at least one completion is found, unless the task is
+ *    otherwise marked running (or we need to reschedule).
  */
-int blk_poll(struct request_queue *q, blk_qc_t cookie)
+int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
 {
        if (!q->poll_fn || !blk_qc_t_valid(cookie))
                return 0;
 
        if (current->plug)
                blk_flush_plug_list(current->plug, false);
-       return q->poll_fn(q, cookie);
+       return q->poll_fn(q, cookie, spin);
 }
 EXPORT_SYMBOL_GPL(blk_poll);
 
index b66cca3ce1e51e6081236918581f76d5681a057b..c2751f0a3ccc0f777fff08e473a52ac0f93348f6 100644 (file)
@@ -38,7 +38,7 @@
 #include "blk-mq-sched.h"
 #include "blk-rq-qos.h"
 
-static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
+static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
 static void blk_mq_poll_stats_start(struct request_queue *q);
 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
 
@@ -3352,7 +3352,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
        return blk_mq_poll_hybrid_sleep(q, hctx, rq);
 }
 
-static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
 {
        struct blk_mq_hw_ctx *hctx;
        long state;
@@ -3392,7 +3392,7 @@ static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
 
                if (current->state == TASK_RUNNING)
                        return 1;
-               if (ret < 0)
+               if (ret < 0 || !spin)
                        break;
                cpu_relax();
        }
index f9eeb3b5863290e1c94f09b91dbb58598176d286..ffebdd0ae34befbb2762fda384199fb667cbff11 100644 (file)
@@ -220,7 +220,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
        return ret;
 }
 
-static int nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
+static int nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc, bool spin)
 {
        struct nvme_ns_head *head = q->queuedata;
        struct nvme_ns *ns;
@@ -230,7 +230,7 @@ static int nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
        srcu_idx = srcu_read_lock(&head->srcu);
        ns = srcu_dereference(head->current_path[numa_node_id()], &head->srcu);
        if (likely(ns && nvme_path_is_optimized(ns)))
-               found = ns->queue->poll_fn(q, qc);
+               found = ns->queue->poll_fn(q, qc, spin);
        srcu_read_unlock(&head->srcu, srcu_idx);
        return found;
 }
index 64ba27b8b7549e43caf5d7c85ab42fe18107f72a..d233a59ea364e61d0bfd064c9fc559ca1b339ec1 100644 (file)
@@ -243,7 +243,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
                        break;
 
                if (!(iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_poll(bdev_get_queue(bdev), qc))
+                   !blk_poll(bdev_get_queue(bdev), qc, true))
                        io_schedule();
        }
        __set_current_state(TASK_RUNNING);
@@ -423,7 +423,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
                        break;
 
                if (!(iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_poll(bdev_get_queue(bdev), qc))
+                   !blk_poll(bdev_get_queue(bdev), qc, true))
                        io_schedule();
        }
        __set_current_state(TASK_RUNNING);
index ea07d5a34317dcf2a644a5942d4fa8736c7a23b1..a5a4e5a1423eee7c4e292e2ec1cd2b80490ee342 100644 (file)
@@ -518,7 +518,7 @@ static struct bio *dio_await_one(struct dio *dio)
                dio->waiter = current;
                spin_unlock_irqrestore(&dio->bio_lock, flags);
                if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_poll(dio->bio_disk->queue, dio->bio_cookie))
+                   !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true))
                        io_schedule();
                /* wake up sets us TASK_RUNNING */
                spin_lock_irqsave(&dio->bio_lock, flags);
index c5df035ace6f3058fd3472e2c302cdeb63a4203e..74c1f37f0fd6b591a717727602ead631eb5e546a 100644 (file)
@@ -1896,7 +1896,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                        if (!(iocb->ki_flags & IOCB_HIPRI) ||
                            !dio->submit.last_queue ||
                            !blk_poll(dio->submit.last_queue,
-                                        dio->submit.cookie))
+                                        dio->submit.cookie, true))
                                io_schedule();
                }
                __set_current_state(TASK_RUNNING);
index f3015e9b5ae37b7406e9d97e4ffcbbcbab391978..e3c0a8ec16a79eac9c744effb9143775fa3914e0 100644 (file)
@@ -283,7 +283,7 @@ static inline unsigned short req_get_ioprio(struct request *req)
 struct blk_queue_ctx;
 
 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
-typedef int (poll_q_fn) (struct request_queue *q, blk_qc_t);
+typedef int (poll_q_fn) (struct request_queue *q, blk_qc_t, bool spin);
 
 struct bio_vec;
 typedef int (dma_drain_needed_fn)(struct request *);
@@ -867,7 +867,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
 int blk_status_to_errno(blk_status_t status);
 blk_status_t errno_to_blk_status(int errno);
 
-int blk_poll(struct request_queue *q, blk_qc_t cookie);
+int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 {
index a7271fa481f6e30f7217e191ef329b726866a231..5bdfd21c1bd9f061b950867bc62ada20c2b69352 100644 (file)
@@ -410,7 +410,7 @@ int swap_readpage(struct page *page, bool synchronous)
                if (!READ_ONCE(bio->bi_private))
                        break;
 
-               if (!blk_poll(disk->queue, qc))
+               if (!blk_poll(disk->queue, qc, true))
                        break;
        }
        __set_current_state(TASK_RUNNING);