vfs_io_uring: avoid stack recursion of vfs_io_uring_queue_run()
[vlendec/samba-autobuild/.git] / source3 / modules / vfs_io_uring.c
1 /*
2  * Use the io_uring of Linux (>= 5.1)
3  *
4  * Copyright (C) Volker Lendecke 2008
5  * Copyright (C) Jeremy Allison 2010
6  * Copyright (C) Stefan Metzmacher 2019
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #include "includes.h"
24 #include "system/filesys.h"
25 #include "smbd/smbd.h"
26 #include "smbd/globals.h"
27 #include "lib/util/tevent_unix.h"
28 #include "lib/util/sys_rw.h"
29 #include "smbprofile.h"
30 #include <liburing.h>
31
32 struct vfs_io_uring_request;
33
34 struct vfs_io_uring_config {
35         struct io_uring uring;
36         struct tevent_fd *fde;
37         /* recursion guard. See comment above vfs_io_uring_queue_run() */
38         bool busy;
39         /* recursion guard. See comment above vfs_io_uring_queue_run() */
40         bool need_retry;
41         struct vfs_io_uring_request *queue;
42         struct vfs_io_uring_request *pending;
43 };
44
45 struct vfs_io_uring_request {
46         struct vfs_io_uring_request *prev, *next;
47         struct vfs_io_uring_request **list_head;
48         struct vfs_io_uring_config *config;
49         struct tevent_req *req;
50         struct io_uring_sqe sqe;
51         struct io_uring_cqe cqe;
52         void (*completion_fn)(struct vfs_io_uring_request *cur,
53                               const char *location);
54         struct timespec start_time;
55         struct timespec end_time;
56         SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes);
57 };
58
59 static void vfs_io_uring_finish_req(struct vfs_io_uring_request *cur,
60                                     const struct io_uring_cqe *cqe,
61                                     struct timespec end_time,
62                                     const char *location)
63 {
64         struct tevent_req *req =
65                 talloc_get_type_abort(cur->req,
66                 struct tevent_req);
67         void *state = _tevent_req_data(req);
68
69         talloc_set_destructor(state, NULL);
70         if (cur->list_head != NULL) {
71                 DLIST_REMOVE((*cur->list_head), cur);
72                 cur->list_head = NULL;
73         }
74         cur->cqe = *cqe;
75
76         SMBPROFILE_BYTES_ASYNC_SET_IDLE(cur->profile_bytes);
77         cur->end_time = end_time;
78
79         /*
80          * We rely on being inside the _send() function
81          * or tevent_req_defer_callback() being called
82          * already.
83          */
84         cur->completion_fn(cur, location);
85 }
86
87 static void vfs_io_uring_config_destroy(struct vfs_io_uring_config *config,
88                                         int ret,
89                                         const char *location)
90 {
91         struct vfs_io_uring_request *cur = NULL, *next = NULL;
92         struct timespec start_time;
93         struct timespec end_time;
94         struct io_uring_cqe err_cqe = {
95                 .res = ret,
96         };
97
98         PROFILE_TIMESTAMP(&start_time);
99
100         if (config->uring.ring_fd != -1) {
101                 /* TODO: cancel queued and pending requests */
102                 TALLOC_FREE(config->fde);
103                 io_uring_queue_exit(&config->uring);
104                 config->uring.ring_fd = -1;
105         }
106
107         PROFILE_TIMESTAMP(&end_time);
108
109         for (cur = config->pending; cur != NULL; cur = next) {
110                 next = cur->next;
111                 err_cqe.user_data = (uintptr_t)(void *)cur;
112                 vfs_io_uring_finish_req(cur, &err_cqe, end_time, location);
113         }
114
115         for (cur = config->queue; cur != NULL; cur = next) {
116                 next = cur->next;
117                 err_cqe.user_data = (uintptr_t)(void *)cur;
118                 cur->start_time = start_time;
119                 vfs_io_uring_finish_req(cur, &err_cqe, end_time, location);
120         }
121 }
122
123 static int vfs_io_uring_config_destructor(struct vfs_io_uring_config *config)
124 {
125         vfs_io_uring_config_destroy(config, -EUCLEAN, __location__);
126         return 0;
127 }
128
129 static int vfs_io_uring_request_state_deny_destructor(void *_state)
130 {
131         struct __vfs_io_uring_generic_state {
132                 struct vfs_io_uring_request ur;
133         } *state = (struct __vfs_io_uring_generic_state *)_state;
134         struct vfs_io_uring_request *cur = &state->ur;
135
136         /* our parent is gone */
137         cur->req = NULL;
138
139         /* remove ourself from any list */
140         DLIST_REMOVE((*cur->list_head), cur);
141         cur->list_head = NULL;
142
143         /*
144          * Our state is about to go away,
145          * all we can do is shutting down the whole uring.
146          * But that's ok as we're most likely called from exit_server()
147          */
148         vfs_io_uring_config_destroy(cur->config, -ESHUTDOWN, __location__);
149         return 0;
150 }
151
152 static void vfs_io_uring_fd_handler(struct tevent_context *ev,
153                                     struct tevent_fd *fde,
154                                     uint16_t flags,
155                                     void *private_data);
156
157 static int vfs_io_uring_connect(vfs_handle_struct *handle, const char *service,
158                             const char *user)
159 {
160         int ret;
161         struct vfs_io_uring_config *config;
162         unsigned num_entries;
163         bool sqpoll;
164         unsigned flags = 0;
165
166         config = talloc_zero(handle->conn, struct vfs_io_uring_config);
167         if (config == NULL) {
168                 DEBUG(0, ("talloc_zero() failed\n"));
169                 return -1;
170         }
171
172         SMB_VFS_HANDLE_SET_DATA(handle, config,
173                                 NULL, struct vfs_io_uring_config,
174                                 return -1);
175
176         ret = SMB_VFS_NEXT_CONNECT(handle, service, user);
177         if (ret < 0) {
178                 return ret;
179         }
180
181         num_entries = lp_parm_ulong(SNUM(handle->conn),
182                                     "io_uring",
183                                     "num_entries",
184                                     128);
185         num_entries = MAX(num_entries, 1);
186
187         sqpoll = lp_parm_bool(SNUM(handle->conn),
188                              "io_uring",
189                              "sqpoll",
190                              false);
191         if (sqpoll) {
192                 flags |= IORING_SETUP_SQPOLL;
193         }
194
195         ret = io_uring_queue_init(num_entries, &config->uring, flags);
196         if (ret < 0) {
197                 SMB_VFS_NEXT_DISCONNECT(handle);
198                 errno = -ret;
199                 return -1;
200         }
201
202         talloc_set_destructor(config, vfs_io_uring_config_destructor);
203
204 #ifdef HAVE_IO_URING_RING_DONTFORK
205         ret = io_uring_ring_dontfork(&config->uring);
206         if (ret < 0) {
207                 SMB_VFS_NEXT_DISCONNECT(handle);
208                 errno = -ret;
209                 return -1;
210         }
211 #endif /* HAVE_IO_URING_RING_DONTFORK */
212
213         config->fde = tevent_add_fd(handle->conn->sconn->ev_ctx,
214                                     config,
215                                     config->uring.ring_fd,
216                                     TEVENT_FD_READ,
217                                     vfs_io_uring_fd_handler,
218                                     handle);
219         if (config->fde == NULL) {
220                 ret = errno;
221                 SMB_VFS_NEXT_DISCONNECT(handle);
222                 errno = ret;
223                 return -1;
224         }
225
226         return 0;
227 }
228
229 static void _vfs_io_uring_queue_run(struct vfs_io_uring_config *config)
230 {
231         struct vfs_io_uring_request *cur = NULL, *next = NULL;
232         struct io_uring_cqe *cqe = NULL;
233         unsigned cqhead;
234         unsigned nr = 0;
235         struct timespec start_time;
236         struct timespec end_time;
237         int ret;
238
239         PROFILE_TIMESTAMP(&start_time);
240
241         if (config->uring.ring_fd == -1) {
242                 vfs_io_uring_config_destroy(config, -ESTALE, __location__);
243                 return;
244         }
245
246         for (cur = config->queue; cur != NULL; cur = next) {
247                 struct io_uring_sqe *sqe = NULL;
248                 void *state = _tevent_req_data(cur->req);
249
250                 next = cur->next;
251
252                 sqe = io_uring_get_sqe(&config->uring);
253                 if (sqe == NULL) {
254                         break;
255                 }
256
257                 talloc_set_destructor(state,
258                         vfs_io_uring_request_state_deny_destructor);
259                 DLIST_REMOVE(config->queue, cur);
260                 *sqe = cur->sqe;
261                 DLIST_ADD_END(config->pending, cur);
262                 cur->list_head = &config->pending;
263                 SMBPROFILE_BYTES_ASYNC_SET_BUSY(cur->profile_bytes);
264
265                 cur->start_time = start_time;
266         }
267
268         ret = io_uring_submit(&config->uring);
269         if (ret == -EAGAIN || ret == -EBUSY) {
270                 /* We just retry later */
271         } else if (ret < 0) {
272                 vfs_io_uring_config_destroy(config, ret, __location__);
273                 return;
274         }
275
276         PROFILE_TIMESTAMP(&end_time);
277
278         io_uring_for_each_cqe(&config->uring, cqhead, cqe) {
279                 cur = (struct vfs_io_uring_request *)io_uring_cqe_get_data(cqe);
280                 vfs_io_uring_finish_req(cur, cqe, end_time, __location__);
281                 nr++;
282         }
283
284         io_uring_cq_advance(&config->uring, nr);
285 }
286
287 /*
288  * Wrapper function to prevent recursion which could happen
289  * if we called _vfs_io_uring_queue_run() directly without
290  * recursion checks.
291  *
292  * Looking at the pread call, we can have:
293  *
294  * vfs_io_uring_pread_send()
295  *        ->vfs_io_uring_pread_submit()  <-----------------------------------
296  *                ->vfs_io_uring_request_submit()                           |
297  *                        ->vfs_io_uring_queue_run()                        |
298  *                                ->_vfs_io_uring_queue_run()               |
299  *                                                                          |
300  * But inside _vfs_io_uring_queue_run() looks like:                         |
301  *                                                                          |
302  * _vfs_io_uring_queue_run() {                                              |
303  *      if (THIS_IO_COMPLETED) {                                            |
304  *              ->vfs_io_uring_finish_req()                                 |
305  *                      ->cur->completion_fn()                              |
306  *      }                                                                   |
307  * }                                                                        |
308  *                                                                          |
309  * cur->completion_fn() for pread is set to vfs_io_uring_pread_completion() |
310  *                                                                          |
311  * vfs_io_uring_pread_completion() {                                        |
312  *      if (READ_TERMINATED) {                                              |
313  *              -> tevent_req_done() - We're done, go back up the stack.    |
314  *              return;                                                     |
315  *      }                                                                   |
316  *                                                                          |
317  *      We have a short read - adjust the io vectors                        |
318  *                                                                          |
319  *      ->vfs_io_uring_pread_submit() ---------------------------------------
320  * }
321  *
322  * So before calling _vfs_io_uring_queue_run() we backet it with setting
323  * a flag config->busy, and unset it once _vfs_io_uring_queue_run() finally
324  * exits the retry loop.
325  *
326  * If we end up back into vfs_io_uring_queue_run() we notice we've done so
327  * as config->busy is set and don't recurse into _vfs_io_uring_queue_run().
328  *
329  * We set the second flag config->need_retry that tells us to loop in the
330  * vfs_io_uring_queue_run() call above us in the stack and return.
331  *
332  * When the outer call to _vfs_io_uring_queue_run() returns we are in
333  * a loop checking if config->need_retry was set. That happens if
334  * the short read case occurs and _vfs_io_uring_queue_run() ended up
335  * recursing into vfs_io_uring_queue_run().
336  *
337  * Once vfs_io_uring_pread_completion() finishes without a short
338  * read (the READ_TERMINATED case, tevent_req_done() is called)
339  * then config->need_retry is left as false, we exit the loop,
340  * set config->busy to false so the next top level call into
341  * vfs_io_uring_queue_run() won't think it's a recursed call
342  * and return.
343  *
344  */
345
346 static void vfs_io_uring_queue_run(struct vfs_io_uring_config *config)
347 {
348         if (config->busy) {
349                 /*
350                  * We've recursed due to short read/write.
351                  * Set need_retry to ensure we retry the
352                  * io_uring_submit().
353                  */
354                 config->need_retry = true;
355                 return;
356         }
357
358         /*
359          * Bracket the loop calling _vfs_io_uring_queue_run()
360          * with busy = true / busy = false.
361          * so we can detect recursion above.
362          */
363
364         config->busy = true;
365
366         do {
367                 config->need_retry = false;
368                 _vfs_io_uring_queue_run(config);
369         } while (config->need_retry);
370
371         config->busy = false;
372 }
373
374 static void vfs_io_uring_fd_handler(struct tevent_context *ev,
375                                     struct tevent_fd *fde,
376                                     uint16_t flags,
377                                     void *private_data)
378 {
379         vfs_handle_struct *handle = (vfs_handle_struct *)private_data;
380         struct vfs_io_uring_config *config = NULL;
381
382         SMB_VFS_HANDLE_GET_DATA(handle, config,
383                                 struct vfs_io_uring_config,
384                                 smb_panic(__location__));
385
386         vfs_io_uring_queue_run(config);
387 }
388
389 struct vfs_io_uring_pread_state {
390         struct vfs_io_uring_request ur;
391         struct iovec iov;
392         size_t nread;
393 };
394
395 static void vfs_io_uring_pread_completion(struct vfs_io_uring_request *cur,
396                                           const char *location);
397
398 static struct tevent_req *vfs_io_uring_pread_send(struct vfs_handle_struct *handle,
399                                              TALLOC_CTX *mem_ctx,
400                                              struct tevent_context *ev,
401                                              struct files_struct *fsp,
402                                              void *data,
403                                              size_t n, off_t offset)
404 {
405         struct tevent_req *req = NULL;
406         struct vfs_io_uring_pread_state *state = NULL;
407         struct vfs_io_uring_config *config = NULL;
408         bool ok;
409
410         SMB_VFS_HANDLE_GET_DATA(handle, config,
411                                 struct vfs_io_uring_config,
412                                 smb_panic(__location__));
413
414         req = tevent_req_create(mem_ctx, &state,
415                                 struct vfs_io_uring_pread_state);
416         if (req == NULL) {
417                 return NULL;
418         }
419         state->ur.config = config;
420         state->ur.req = req;
421         state->ur.completion_fn = vfs_io_uring_pread_completion;
422
423         SMBPROFILE_BYTES_ASYNC_START(syscall_asys_pread, profile_p,
424                                      state->ur.profile_bytes, n);
425         SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->ur.profile_bytes);
426
427         ok = sys_valid_io_range(offset, n);
428         if (!ok) {
429                 tevent_req_error(req, EINVAL);
430                 return tevent_req_post(req, ev);
431         }
432
433         state->iov.iov_base = (void *)data;
434         state->iov.iov_len = n;
435         io_uring_prep_readv(&state->ur.sqe,
436                             fsp->fh->fd,
437                             &state->iov, 1,
438                             offset);
439         io_uring_sqe_set_data(&state->ur.sqe, &state->ur);
440         DLIST_ADD_END(config->queue, &state->ur);
441         state->ur.list_head = &config->queue;
442
443         vfs_io_uring_queue_run(config);
444
445         if (!tevent_req_is_in_progress(req)) {
446                 return tevent_req_post(req, ev);
447         }
448
449         tevent_req_defer_callback(req, ev);
450         return req;
451 }
452
453 static void vfs_io_uring_pread_completion(struct vfs_io_uring_request *cur,
454                                           const char *location)
455 {
456         struct vfs_io_uring_pread_state *state = tevent_req_data(
457                 cur->req, struct vfs_io_uring_pread_state);
458
459         /*
460          * We rely on being inside the _send() function
461          * or tevent_req_defer_callback() being called
462          * already.
463          */
464
465         if (cur->cqe.res < 0) {
466                 int err = -cur->cqe.res;
467                 _tevent_req_error(cur->req, err, location);
468                 return;
469         }
470
471         state->nread = state->ur.cqe.res;
472         tevent_req_done(cur->req);
473 }
474
475 static ssize_t vfs_io_uring_pread_recv(struct tevent_req *req,
476                                   struct vfs_aio_state *vfs_aio_state)
477 {
478         struct vfs_io_uring_pread_state *state = tevent_req_data(
479                 req, struct vfs_io_uring_pread_state);
480         ssize_t ret;
481
482         SMBPROFILE_BYTES_ASYNC_END(state->ur.profile_bytes);
483         vfs_aio_state->duration = nsec_time_diff(&state->ur.end_time,
484                                                  &state->ur.start_time);
485
486         if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
487                 tevent_req_received(req);
488                 return -1;
489         }
490
491         vfs_aio_state->error = 0;
492         ret = state->nread;
493
494         tevent_req_received(req);
495         return ret;
496 }
497
498 struct vfs_io_uring_pwrite_state {
499         struct vfs_io_uring_request ur;
500         struct iovec iov;
501         size_t nwritten;
502 };
503
504 static void vfs_io_uring_pwrite_completion(struct vfs_io_uring_request *cur,
505                                            const char *location);
506
507 static struct tevent_req *vfs_io_uring_pwrite_send(struct vfs_handle_struct *handle,
508                                               TALLOC_CTX *mem_ctx,
509                                               struct tevent_context *ev,
510                                               struct files_struct *fsp,
511                                               const void *data,
512                                               size_t n, off_t offset)
513 {
514         struct tevent_req *req = NULL;
515         struct vfs_io_uring_pwrite_state *state = NULL;
516         struct vfs_io_uring_config *config = NULL;
517         bool ok;
518
519         SMB_VFS_HANDLE_GET_DATA(handle, config,
520                                 struct vfs_io_uring_config,
521                                 smb_panic(__location__));
522
523         req = tevent_req_create(mem_ctx, &state,
524                                 struct vfs_io_uring_pwrite_state);
525         if (req == NULL) {
526                 return NULL;
527         }
528         state->ur.config = config;
529         state->ur.req = req;
530         state->ur.completion_fn = vfs_io_uring_pwrite_completion;
531
532         SMBPROFILE_BYTES_ASYNC_START(syscall_asys_pwrite, profile_p,
533                                      state->ur.profile_bytes, n);
534         SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->ur.profile_bytes);
535
536         ok = sys_valid_io_range(offset, n);
537         if (!ok) {
538                 tevent_req_error(req, EINVAL);
539                 return tevent_req_post(req, ev);
540         }
541
542         state->iov.iov_base = discard_const(data);
543         state->iov.iov_len = n;
544         io_uring_prep_writev(&state->ur.sqe,
545                              fsp->fh->fd,
546                              &state->iov, 1,
547                              offset);
548         io_uring_sqe_set_data(&state->ur.sqe, &state->ur);
549         DLIST_ADD_END(config->queue, &state->ur);
550         state->ur.list_head = &config->queue;
551
552         vfs_io_uring_queue_run(config);
553
554         if (!tevent_req_is_in_progress(req)) {
555                 return tevent_req_post(req, ev);
556         }
557
558         tevent_req_defer_callback(req, ev);
559         return req;
560 }
561
562 static void vfs_io_uring_pwrite_completion(struct vfs_io_uring_request *cur,
563                                            const char *location)
564 {
565         struct vfs_io_uring_pwrite_state *state = tevent_req_data(
566                 cur->req, struct vfs_io_uring_pwrite_state);
567
568         /*
569          * We rely on being inside the _send() function
570          * or tevent_req_defer_callback() being called
571          * already.
572          */
573
574         if (cur->cqe.res < 0) {
575                 int err = -cur->cqe.res;
576                 _tevent_req_error(cur->req, err, location);
577                 return;
578         }
579
580         state->nwritten = state->ur.cqe.res;
581         tevent_req_done(cur->req);
582 }
583
584 static ssize_t vfs_io_uring_pwrite_recv(struct tevent_req *req,
585                                    struct vfs_aio_state *vfs_aio_state)
586 {
587         struct vfs_io_uring_pwrite_state *state = tevent_req_data(
588                 req, struct vfs_io_uring_pwrite_state);
589         ssize_t ret;
590
591         SMBPROFILE_BYTES_ASYNC_END(state->ur.profile_bytes);
592         vfs_aio_state->duration = nsec_time_diff(&state->ur.end_time,
593                                                  &state->ur.start_time);
594
595         if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
596                 tevent_req_received(req);
597                 return -1;
598         }
599
600         vfs_aio_state->error = 0;
601         ret = state->nwritten;
602
603         tevent_req_received(req);
604         return ret;
605 }
606
607 struct vfs_io_uring_fsync_state {
608         struct vfs_io_uring_request ur;
609 };
610
611 static void vfs_io_uring_fsync_completion(struct vfs_io_uring_request *cur,
612                                           const char *location);
613
614 static struct tevent_req *vfs_io_uring_fsync_send(struct vfs_handle_struct *handle,
615                                              TALLOC_CTX *mem_ctx,
616                                              struct tevent_context *ev,
617                                              struct files_struct *fsp)
618 {
619         struct tevent_req *req = NULL;
620         struct vfs_io_uring_fsync_state *state = NULL;
621         struct vfs_io_uring_config *config = NULL;
622
623         SMB_VFS_HANDLE_GET_DATA(handle, config,
624                                 struct vfs_io_uring_config,
625                                 smb_panic(__location__));
626
627         req = tevent_req_create(mem_ctx, &state,
628                                 struct vfs_io_uring_fsync_state);
629         if (req == NULL) {
630                 return NULL;
631         }
632         state->ur.config = config;
633         state->ur.req = req;
634         state->ur.completion_fn = vfs_io_uring_fsync_completion;
635
636         SMBPROFILE_BYTES_ASYNC_START(syscall_asys_fsync, profile_p,
637                                      state->ur.profile_bytes, 0);
638         SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->ur.profile_bytes);
639
640         io_uring_prep_fsync(&state->ur.sqe,
641                             fsp->fh->fd,
642                             0); /* fsync_flags */
643         io_uring_sqe_set_data(&state->ur.sqe, &state->ur);
644         DLIST_ADD_END(config->queue, &state->ur);
645         state->ur.list_head = &config->queue;
646
647         vfs_io_uring_queue_run(config);
648
649         if (!tevent_req_is_in_progress(req)) {
650                 return tevent_req_post(req, ev);
651         }
652
653         tevent_req_defer_callback(req, ev);
654         return req;
655 }
656
657 static void vfs_io_uring_fsync_completion(struct vfs_io_uring_request *cur,
658                                           const char *location)
659 {
660         /*
661          * We rely on being inside the _send() function
662          * or tevent_req_defer_callback() being called
663          * already.
664          */
665
666         if (cur->cqe.res < 0) {
667                 int err = -cur->cqe.res;
668                 _tevent_req_error(cur->req, err, location);
669                 return;
670         }
671
672         tevent_req_done(cur->req);
673 }
674
675 static int vfs_io_uring_fsync_recv(struct tevent_req *req,
676                               struct vfs_aio_state *vfs_aio_state)
677 {
678         struct vfs_io_uring_fsync_state *state = tevent_req_data(
679                 req, struct vfs_io_uring_fsync_state);
680
681         SMBPROFILE_BYTES_ASYNC_END(state->ur.profile_bytes);
682         vfs_aio_state->duration = nsec_time_diff(&state->ur.end_time,
683                                                  &state->ur.start_time);
684
685         if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
686                 tevent_req_received(req);
687                 return -1;
688         }
689
690         vfs_aio_state->error = 0;
691
692         tevent_req_received(req);
693         return 0;
694 }
695
696 static struct vfs_fn_pointers vfs_io_uring_fns = {
697         .connect_fn = vfs_io_uring_connect,
698         .pread_send_fn = vfs_io_uring_pread_send,
699         .pread_recv_fn = vfs_io_uring_pread_recv,
700         .pwrite_send_fn = vfs_io_uring_pwrite_send,
701         .pwrite_recv_fn = vfs_io_uring_pwrite_recv,
702         .fsync_send_fn = vfs_io_uring_fsync_send,
703         .fsync_recv_fn = vfs_io_uring_fsync_recv,
704 };
705
706 static_decl_vfs;
707 NTSTATUS vfs_io_uring_init(TALLOC_CTX *ctx)
708 {
709         return smb_register_vfs(SMB_VFS_INTERFACE_VERSION,
710                                 "io_uring", &vfs_io_uring_fns);
711 }