block: move poll code to blk-mq
authorJens Axboe <axboe@fb.com>
Fri, 4 Nov 2016 15:34:34 +0000 (09:34 -0600)
committerJens Axboe <axboe@fb.com>
Fri, 11 Nov 2016 20:40:25 +0000 (13:40 -0700)
The poll code is blk-mq specific, let's move it to blk-mq.c. This
is a prep patch for improving the polling code.

Signed-off-by: Jens Axboe <axboe@fb.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
block/blk-core.c
block/blk-mq.c
drivers/nvme/target/io-cmd.c
fs/direct-io.c
include/linux/blkdev.h

index 59f8129a42950e0d23e0e4cc28d4ab8e8e67b63e..eea246567884bc37d45d40fe75ba60bbfc12221b 100644 (file)
@@ -3312,52 +3312,6 @@ void blk_finish_plug(struct blk_plug *plug)
 }
 EXPORT_SYMBOL(blk_finish_plug);
 
-bool blk_poll(struct request_queue *q, blk_qc_t cookie)
-{
-       struct blk_plug *plug;
-       long state;
-       unsigned int queue_num;
-       struct blk_mq_hw_ctx *hctx;
-
-       if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
-           !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
-               return false;
-
-       queue_num = blk_qc_t_to_queue_num(cookie);
-       hctx = q->queue_hw_ctx[queue_num];
-       hctx->poll_considered++;
-
-       plug = current->plug;
-       if (plug)
-               blk_flush_plug_list(plug, false);
-
-       state = current->state;
-       while (!need_resched()) {
-               int ret;
-
-               hctx->poll_invoked++;
-
-               ret = q->mq_ops->poll(hctx, blk_qc_t_to_tag(cookie));
-               if (ret > 0) {
-                       hctx->poll_success++;
-                       set_current_state(TASK_RUNNING);
-                       return true;
-               }
-
-               if (signal_pending_state(state, current))
-                       set_current_state(TASK_RUNNING);
-
-               if (current->state == TASK_RUNNING)
-                       return true;
-               if (ret < 0)
-                       break;
-               cpu_relax();
-       }
-
-       return false;
-}
-EXPORT_SYMBOL_GPL(blk_poll);
-
 #ifdef CONFIG_PM
 /**
  * blk_pm_runtime_init - Block layer runtime PM initialization routine
index 77110aed24ea075710949c8ef3a4598732a46375..ae8df5ec20d3860663bad8bdd2ef271f8eadc491 100644 (file)
@@ -2461,6 +2461,60 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
 }
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 
+static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
+{
+       struct request_queue *q = hctx->queue;
+       long state;
+
+       hctx->poll_considered++;
+
+       state = current->state;
+       while (!need_resched()) {
+               int ret;
+
+               hctx->poll_invoked++;
+
+               ret = q->mq_ops->poll(hctx, rq->tag);
+               if (ret > 0) {
+                       hctx->poll_success++;
+                       set_current_state(TASK_RUNNING);
+                       return true;
+               }
+
+               if (signal_pending_state(state, current))
+                       set_current_state(TASK_RUNNING);
+
+               if (current->state == TASK_RUNNING)
+                       return true;
+               if (ret < 0)
+                       break;
+               cpu_relax();
+       }
+
+       return false;
+}
+
+bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+{
+       struct blk_mq_hw_ctx *hctx;
+       struct blk_plug *plug;
+       struct request *rq;
+
+       if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
+           !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+               return false;
+
+       plug = current->plug;
+       if (plug)
+               blk_flush_plug_list(plug, false);
+
+       hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
+       rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
+
+       return __blk_mq_poll(hctx, rq);
+}
+EXPORT_SYMBOL_GPL(blk_mq_poll);
+
 void blk_mq_disable_hotplug(void)
 {
        mutex_lock(&all_q_mutex);
index c2784cfc5e298b3d15d9351c17d5af8e058ff6db..ef52b1e701447ee9544ea664ac8163573de7cfb3 100644 (file)
@@ -96,7 +96,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
 
        cookie = submit_bio(bio);
 
-       blk_poll(bdev_get_queue(req->ns->bdev), cookie);
+       blk_mq_poll(bdev_get_queue(req->ns->bdev), cookie);
 }
 
 static void nvmet_execute_flush(struct nvmet_req *req)
index a5138c564019a4ca863b0540f7503f0a55ecd27e..835e23a4ee4b44b1c2d5a38b76fa50c1cf4b1b83 100644 (file)
@@ -457,7 +457,7 @@ static struct bio *dio_await_one(struct dio *dio)
                dio->waiter = current;
                spin_unlock_irqrestore(&dio->bio_lock, flags);
                if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
+                   !blk_mq_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
                        io_schedule();
                /* wake up sets us TASK_RUNNING */
                spin_lock_irqsave(&dio->bio_lock, flags);
index 15da9e430f90a0659afe6e19750e9f657f99ab61..bab18ee5810d3ac40c2d51ace9b80e947164769d 100644 (file)
@@ -952,7 +952,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
                                  struct request *, int, rq_end_io_fn *);
 
-bool blk_poll(struct request_queue *q, blk_qc_t cookie);
+bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 {