io_uring: cache poll/double-poll state with a request flag
authorJens Axboe <axboe@kernel.dk>
Wed, 16 Mar 2022 22:59:10 +0000 (16:59 -0600)
committerJens Axboe <axboe@kernel.dk>
Wed, 16 Mar 2022 22:59:10 +0000 (16:59 -0600)
With commit "io_uring: cache req->apoll->events in req->cflags" applied,
we now have just io_poll_remove_entries() dipping into req->apoll when
it isn't strictly necessary.

Mark poll and double-poll with a flag, so we know if we need to look
at apoll->double_poll. This avoids pulling in those cachelines if we
don't need them. The common case is that the poll wake handler already
removed these entries while hot off the completion path.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index bfddad7a14ef2351efabf9e33ffc679780711666..5b5f48f0f81e754ca91e0290ad1afc6c12ec64c3 100644 (file)
@@ -771,6 +771,8 @@ enum {
        REQ_F_ARM_LTIMEOUT_BIT,
        REQ_F_ASYNC_DATA_BIT,
        REQ_F_SKIP_LINK_CQES_BIT,
+       REQ_F_SINGLE_POLL_BIT,
+       REQ_F_DOUBLE_POLL_BIT,
        /* keep async read/write and isreg together and in order */
        REQ_F_SUPPORT_NOWAIT_BIT,
        REQ_F_ISREG_BIT,
@@ -829,6 +831,10 @@ enum {
        REQ_F_ASYNC_DATA        = BIT(REQ_F_ASYNC_DATA_BIT),
        /* don't post CQEs while failing linked requests */
        REQ_F_SKIP_LINK_CQES    = BIT(REQ_F_SKIP_LINK_CQES_BIT),
+       /* single poll may be active */
+       REQ_F_SINGLE_POLL       = BIT(REQ_F_SINGLE_POLL_BIT),
+       /* double poll may active */
+       REQ_F_DOUBLE_POLL       = BIT(REQ_F_DOUBLE_POLL_BIT),
 };
 
 struct async_poll {
@@ -5823,8 +5829,12 @@ static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
 
 static void io_poll_remove_entries(struct io_kiocb *req)
 {
-       struct io_poll_iocb *poll = io_poll_get_single(req);
-       struct io_poll_iocb *poll_double = io_poll_get_double(req);
+       /*
+        * Nothing to do if neither of those flags are set. Avoid dipping
+        * into the poll/apoll/double cachelines if we can.
+        */
+       if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
+               return;
 
        /*
         * While we hold the waitqueue lock and the waitqueue is nonempty,
@@ -5842,9 +5852,10 @@ static void io_poll_remove_entries(struct io_kiocb *req)
         * In that case, only RCU prevents the queue memory from being freed.
         */
        rcu_read_lock();
-       io_poll_remove_entry(poll);
-       if (poll_double)
-               io_poll_remove_entry(poll_double);
+       if (req->flags & REQ_F_SINGLE_POLL)
+               io_poll_remove_entry(io_poll_get_single(req));
+       if (req->flags & REQ_F_DOUBLE_POLL)
+               io_poll_remove_entry(io_poll_get_double(req));
        rcu_read_unlock();
 }
 
@@ -6026,6 +6037,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                if (mask && poll->events & EPOLLONESHOT) {
                        list_del_init(&poll->wait.entry);
                        poll->head = NULL;
+                       req->flags &= ~REQ_F_SINGLE_POLL;
                }
                __io_poll_execute(req, mask, poll->events);
        }
@@ -6062,12 +6074,14 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                        pt->error = -ENOMEM;
                        return;
                }
+               req->flags |= REQ_F_DOUBLE_POLL;
                io_init_poll_iocb(poll, first->events, first->wait.func);
                *poll_ptr = poll;
                if (req->opcode == IORING_OP_POLL_ADD)
                        req->flags |= REQ_F_ASYNC_DATA;
        }
 
+       req->flags |= REQ_F_SINGLE_POLL;
        pt->nr_entries++;
        poll->head = head;
        poll->wait.private = req;