io_uring: don't take ctx refs in task_work handler
authorPavel Begunkov <asml.silence@gmail.com>
Fri, 19 Mar 2021 17:22:29 +0000 (17:22 +0000)
committerJens Axboe <axboe@kernel.dk>
Sun, 11 Apr 2021 23:41:58 +0000 (17:41 -0600)
__tctx_task_work() guarantees that ctx won't be killed while running
task_works, so we can remove now unnecessary ctx pinning for internally
armed polling.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 962a3580c49fdf7ca20db7cf644aa1c019966dfe..12e2ec7cfba4faa0a8587e85bdffbeeab8153d1b 100644 (file)
@@ -4882,7 +4882,6 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
 
        req->result = mask;
        req->task_work.func = func;
-       percpu_ref_get(&req->ctx->refs);
 
        /*
         * If this fails, then the task is exiting. When a task exits, the
@@ -4982,8 +4981,6 @@ static void io_poll_task_func(struct callback_head *cb)
                if (nxt)
                        __io_req_task_submit(nxt);
        }
-
-       percpu_ref_put(&ctx->refs);
 }
 
 static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
@@ -5090,7 +5087,6 @@ static void io_async_task_func(struct callback_head *cb)
 
        if (io_poll_rewait(req, &apoll->poll)) {
                spin_unlock_irq(&ctx->completion_lock);
-               percpu_ref_put(&ctx->refs);
                return;
        }
 
@@ -5106,7 +5102,6 @@ static void io_async_task_func(struct callback_head *cb)
        else
                __io_req_task_cancel(req, -ECANCELED);
 
-       percpu_ref_put(&ctx->refs);
        kfree(apoll->double_poll);
        kfree(apoll);
 }