x86/platform/ce4100: Dont override x86_init.mpparse.setup_ioapic_ids
[sfrench/cifs-2.6.git] / io_uring / kbuf.h
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_KBUF_H
3 #define IOU_KBUF_H
4
5 #include <uapi/linux/io_uring.h>
6
7 struct io_buffer_list {
8         /*
9          * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10          * then these are classic provided buffers and ->buf_list is used.
11          */
12         union {
13                 struct list_head buf_list;
14                 struct {
15                         struct page **buf_pages;
16                         struct io_uring_buf_ring *buf_ring;
17                 };
18                 struct rcu_head rcu;
19         };
20         __u16 bgid;
21
22         /* below is for ring provided buffers */
23         __u16 buf_nr_pages;
24         __u16 nr_entries;
25         __u16 head;
26         __u16 mask;
27
28         /* ring mapped provided buffers */
29         __u8 is_mapped;
30         /* ring mapped provided buffers, but mmap'ed by application */
31         __u8 is_mmap;
32         /* bl is visible from an RCU point of view for lookup */
33         __u8 is_ready;
34 };
35
36 struct io_buffer {
37         struct list_head list;
38         __u64 addr;
39         __u32 len;
40         __u16 bid;
41         __u16 bgid;
42 };
43
44 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
45                               unsigned int issue_flags);
46 void io_destroy_buffers(struct io_ring_ctx *ctx);
47
48 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
49 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
50
51 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
52 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
53
54 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
55 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
56 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
57
58 void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
59
60 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
61
62 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
63
64 void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
65
66 static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
67 {
68         /*
69          * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
70          * the flag and hence ensure that bl->head doesn't get incremented.
71          * If the tail has already been incremented, hang on to it.
72          * The exception is partial io, that case we should increment bl->head
73          * to monopolize the buffer.
74          */
75         if (req->buf_list) {
76                 if (req->flags & REQ_F_PARTIAL_IO) {
77                         /*
78                          * If we end up here, then the io_uring_lock has
79                          * been kept held since we retrieved the buffer.
80                          * For the io-wq case, we already cleared
81                          * req->buf_list when the buffer was retrieved,
82                          * hence it cannot be set here for that case.
83                          */
84                         req->buf_list->head++;
85                         req->buf_list = NULL;
86                 } else {
87                         req->buf_index = req->buf_list->bgid;
88                         req->flags &= ~REQ_F_BUFFER_RING;
89                         return true;
90                 }
91         }
92         return false;
93 }
94
95 static inline bool io_do_buffer_select(struct io_kiocb *req)
96 {
97         if (!(req->flags & REQ_F_BUFFER_SELECT))
98                 return false;
99         return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
100 }
101
102 static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
103 {
104         if (req->flags & REQ_F_BUFFER_SELECTED)
105                 return io_kbuf_recycle_legacy(req, issue_flags);
106         if (req->flags & REQ_F_BUFFER_RING)
107                 return io_kbuf_recycle_ring(req);
108         return false;
109 }
110
111 static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
112                                               struct list_head *list)
113 {
114         unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
115
116         if (req->flags & REQ_F_BUFFER_RING) {
117                 if (req->buf_list) {
118                         req->buf_index = req->buf_list->bgid;
119                         req->buf_list->head++;
120                 }
121                 req->flags &= ~REQ_F_BUFFER_RING;
122         } else {
123                 req->buf_index = req->kbuf->bgid;
124                 list_add(&req->kbuf->list, list);
125                 req->flags &= ~REQ_F_BUFFER_SELECTED;
126         }
127
128         return ret;
129 }
130
131 static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
132 {
133         lockdep_assert_held(&req->ctx->completion_lock);
134
135         if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
136                 return 0;
137         return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
138 }
139
140 static inline unsigned int io_put_kbuf(struct io_kiocb *req,
141                                        unsigned issue_flags)
142 {
143
144         if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
145                 return 0;
146         return __io_put_kbuf(req, issue_flags);
147 }
148 #endif