1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring/cmd.h>
6 #include <linux/security.h>
7 #include <linux/nospec.h>
9 #include <uapi/linux/io_uring.h>
10 #include <asm/ioctls.h>
14 #include "uring_cmd.h"
16 static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
17 unsigned int issue_flags)
19 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
20 struct io_ring_ctx *ctx = req->ctx;
22 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
25 cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
26 io_ring_submit_lock(ctx, issue_flags);
27 hlist_del(&req->hash_node);
28 io_ring_submit_unlock(ctx, issue_flags);
32 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
33 * will try to cancel this issued command by sending ->uring_cmd() with
34 * issue_flags of IO_URING_F_CANCEL.
36 * The command is guaranteed to not be done when calling ->uring_cmd()
37 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
38 * with race between io_uring canceling and normal completion.
40 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
41 unsigned int issue_flags)
43 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
44 struct io_ring_ctx *ctx = req->ctx;
46 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
47 cmd->flags |= IORING_URING_CMD_CANCELABLE;
48 io_ring_submit_lock(ctx, issue_flags);
49 hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
50 io_ring_submit_unlock(ctx, issue_flags);
53 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
55 static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
57 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
58 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
60 ioucmd->task_work_cb(ioucmd, issue_flags);
63 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
64 void (*task_work_cb)(struct io_uring_cmd *, unsigned),
67 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
69 ioucmd->task_work_cb = task_work_cb;
70 req->io_task_work.func = io_uring_cmd_work;
71 __io_req_task_work_add(req, flags);
73 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
75 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
76 u64 extra1, u64 extra2)
78 req->big_cqe.extra1 = extra1;
79 req->big_cqe.extra2 = extra2;
83 * Called by consumers of io_uring_cmd, if they originally returned
84 * -EIOCBQUEUED upon receiving the command.
86 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
89 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
91 io_uring_cmd_del_cancelable(ioucmd, issue_flags);
96 io_req_set_res(req, ret, 0);
97 if (req->ctx->flags & IORING_SETUP_CQE32)
98 io_req_set_cqe32_extra(req, res2, 0);
99 if (req->ctx->flags & IORING_SETUP_IOPOLL) {
100 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
101 smp_store_release(&req->iopoll_completed, 1);
103 struct io_tw_state ts = {
104 .locked = !(issue_flags & IO_URING_F_UNLOCKED),
106 io_req_task_complete(req, &ts);
109 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
111 int io_uring_cmd_prep_async(struct io_kiocb *req)
113 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
115 memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx));
116 ioucmd->sqe = req->async_data;
120 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
122 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
127 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
128 if (ioucmd->flags & ~IORING_URING_CMD_MASK)
131 if (ioucmd->flags & IORING_URING_CMD_FIXED) {
132 struct io_ring_ctx *ctx = req->ctx;
135 req->buf_index = READ_ONCE(sqe->buf_index);
136 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
138 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
139 req->imu = ctx->user_bufs[index];
140 io_req_set_rsrc_node(req, ctx, 0);
143 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
147 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
149 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
150 struct io_ring_ctx *ctx = req->ctx;
151 struct file *file = req->file;
154 if (!file->f_op->uring_cmd)
157 ret = security_uring_cmd(ioucmd);
161 if (ctx->flags & IORING_SETUP_SQE128)
162 issue_flags |= IO_URING_F_SQE128;
163 if (ctx->flags & IORING_SETUP_CQE32)
164 issue_flags |= IO_URING_F_CQE32;
166 issue_flags |= IO_URING_F_COMPAT;
167 if (ctx->flags & IORING_SETUP_IOPOLL) {
168 if (!file->f_op->uring_cmd_iopoll)
170 issue_flags |= IO_URING_F_IOPOLL;
171 req->iopoll_completed = 0;
174 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
175 if (ret == -EAGAIN) {
176 if (!req_has_async_data(req)) {
177 if (io_alloc_async_data(req))
179 io_uring_cmd_prep_async(req);
184 if (ret != -EIOCBQUEUED) {
187 io_req_set_res(req, ret, 0);
191 return IOU_ISSUE_SKIP_COMPLETE;
194 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
195 struct iov_iter *iter, void *ioucmd)
197 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
199 return io_import_fixed(rw, iter, req->imu, ubuf, len);
201 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
203 static inline int io_uring_cmd_getsockopt(struct socket *sock,
204 struct io_uring_cmd *cmd,
205 unsigned int issue_flags)
207 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
208 int optlen, optname, level, err;
211 level = READ_ONCE(cmd->sqe->level);
212 if (level != SOL_SOCKET)
215 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
216 optname = READ_ONCE(cmd->sqe->optname);
217 optlen = READ_ONCE(cmd->sqe->optlen);
219 err = do_sock_getsockopt(sock, compat, level, optname,
220 USER_SOCKPTR(optval),
221 KERNEL_SOCKPTR(&optlen));
225 /* On success, return optlen */
229 static inline int io_uring_cmd_setsockopt(struct socket *sock,
230 struct io_uring_cmd *cmd,
231 unsigned int issue_flags)
233 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
234 int optname, optlen, level;
238 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
239 optname = READ_ONCE(cmd->sqe->optname);
240 optlen = READ_ONCE(cmd->sqe->optlen);
241 level = READ_ONCE(cmd->sqe->level);
242 optval_s = USER_SOCKPTR(optval);
244 return do_sock_setsockopt(sock, compat, level, optname, optval_s,
248 #if defined(CONFIG_NET)
249 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
251 struct socket *sock = cmd->file->private_data;
252 struct sock *sk = sock->sk;
253 struct proto *prot = READ_ONCE(sk->sk_prot);
256 if (!prot || !prot->ioctl)
259 switch (cmd->sqe->cmd_op) {
260 case SOCKET_URING_OP_SIOCINQ:
261 ret = prot->ioctl(sk, SIOCINQ, &arg);
265 case SOCKET_URING_OP_SIOCOUTQ:
266 ret = prot->ioctl(sk, SIOCOUTQ, &arg);
270 case SOCKET_URING_OP_GETSOCKOPT:
271 return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
272 case SOCKET_URING_OP_SETSOCKOPT:
273 return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
278 EXPORT_SYMBOL_GPL(io_uring_cmd_sock);