vfs_ceph: add fake async pwrite/pread send/recv hooks
authorDavid Disseldorp <ddiss@samba.org>
Wed, 9 May 2018 14:51:34 +0000 (16:51 +0200)
committerKarolin Seeger <kseeger@samba.org>
Mon, 14 May 2018 07:58:07 +0000 (09:58 +0200)
As found by Jeremy, VFS modules that don't provide pread_send() or
pwrite_send() hooks result in vfs_default fallback, which is
catastrophic for VFS modules with non-mounted filesystems such as
vfs_ceph.

Bug: https://bugzilla.samba.org/show_bug.cgi?id=13425

Reported-by: Jeremy Allison <jra@samba.org>
Signed-off-by: David Disseldorp <ddiss@samba.org>
Reviewed-by: Jeremy Allison <jra@samba.org>
(cherry picked from commit f0e6453b0420fe9d062936d4ddc05f44b40cf2ba)

source3/modules/vfs_ceph.c

index 61df5dedf82eba6df5557ed253b6a7f60fef68a6..857310c2ac3b5541e877da7d6af8fa7e5480405b 100644 (file)
@@ -482,6 +482,57 @@ static ssize_t cephwrap_pread(struct vfs_handle_struct *handle, files_struct *fs
        WRAP_RETURN(result);
 }
 
+struct cephwrap_pread_state {
+       ssize_t bytes_read;
+       struct vfs_aio_state vfs_aio_state;
+};
+
+/*
+ * Fake up an async ceph read by calling the synchronous API.
+ */
+static struct tevent_req *cephwrap_pread_send(struct vfs_handle_struct *handle,
+                                             TALLOC_CTX *mem_ctx,
+                                             struct tevent_context *ev,
+                                             struct files_struct *fsp,
+                                             void *data,
+                                             size_t n, off_t offset)
+{
+       struct tevent_req *req = NULL;
+       struct cephwrap_pread_state *state = NULL;
+       int ret = -1;
+
+       DBG_DEBUG("[CEPH] %s\n", __func__);
+       req = tevent_req_create(mem_ctx, &state, struct cephwrap_pread_state);
+       if (req == NULL) {
+               return NULL;
+       }
+
+       ret = ceph_read(handle->data, fsp->fh->fd, data, n, offset);
+       if (ret < 0) {
+               /* ceph returns -errno on error. */
+               tevent_req_error(req, -ret);
+               return tevent_req_post(req, ev);
+       }
+
+       state->bytes_read = ret;
+       tevent_req_done(req);
+       /* Return and schedule the completion of the call. */
+       return tevent_req_post(req, ev);
+}
+
+static ssize_t cephwrap_pread_recv(struct tevent_req *req,
+                                  struct vfs_aio_state *vfs_aio_state)
+{
+       struct cephwrap_pread_state *state =
+               tevent_req_data(req, struct cephwrap_pread_state);
+
+       DBG_DEBUG("[CEPH] %s\n", __func__);
+       if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
+               return -1;
+       }
+       *vfs_aio_state = state->vfs_aio_state;
+       return state->bytes_read;
+}
 
 static ssize_t cephwrap_write(struct vfs_handle_struct *handle, files_struct *fsp, const void *data, size_t n)
 {
@@ -510,6 +561,58 @@ static ssize_t cephwrap_pwrite(struct vfs_handle_struct *handle, files_struct *f
        WRAP_RETURN(result);
 }
 
+struct cephwrap_pwrite_state {
+       ssize_t bytes_written;
+       struct vfs_aio_state vfs_aio_state;
+};
+
+/*
+ * Fake up an async ceph write by calling the synchronous API.
+ */
+static struct tevent_req *cephwrap_pwrite_send(struct vfs_handle_struct *handle,
+                                              TALLOC_CTX *mem_ctx,
+                                              struct tevent_context *ev,
+                                              struct files_struct *fsp,
+                                              const void *data,
+                                              size_t n, off_t offset)
+{
+       struct tevent_req *req = NULL;
+       struct cephwrap_pwrite_state *state = NULL;
+       int ret = -1;
+
+       DBG_DEBUG("[CEPH] %s\n", __func__);
+       req = tevent_req_create(mem_ctx, &state, struct cephwrap_pwrite_state);
+       if (req == NULL) {
+               return NULL;
+       }
+
+       ret = ceph_write(handle->data, fsp->fh->fd, data, n, offset);
+       if (ret < 0) {
+               /* ceph returns -errno on error. */
+               tevent_req_error(req, -ret);
+               return tevent_req_post(req, ev);
+       }
+
+       state->bytes_written = ret;
+       tevent_req_done(req);
+       /* Return and schedule the completion of the call. */
+       return tevent_req_post(req, ev);
+}
+
+static ssize_t cephwrap_pwrite_recv(struct tevent_req *req,
+                                   struct vfs_aio_state *vfs_aio_state)
+{
+       struct cephwrap_pwrite_state *state =
+               tevent_req_data(req, struct cephwrap_pwrite_state);
+
+       DBG_DEBUG("[CEPH] %s\n", __func__);
+       if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
+               return -1;
+       }
+       *vfs_aio_state = state->vfs_aio_state;
+       return state->bytes_written;
+}
+
 static off_t cephwrap_lseek(struct vfs_handle_struct *handle, files_struct *fsp, off_t offset, int whence)
 {
        off_t result = 0;
@@ -571,7 +674,7 @@ static int cephwrap_fsync(struct vfs_handle_struct *handle, files_struct *fsp)
 }
 
 /*
- * Fake up an async ceph fsync by calling the sychronous API.
+ * Fake up an async ceph fsync by calling the synchronous API.
  */
 
 static struct tevent_req *cephwrap_fsync_send(struct vfs_handle_struct *handle,
@@ -1485,8 +1588,12 @@ static struct vfs_fn_pointers ceph_fns = {
        .close_fn = cephwrap_close,
        .read_fn = cephwrap_read,
        .pread_fn = cephwrap_pread,
+       .pread_send_fn = cephwrap_pread_send,
+       .pread_recv_fn = cephwrap_pread_recv,
        .write_fn = cephwrap_write,
        .pwrite_fn = cephwrap_pwrite,
+       .pwrite_send_fn = cephwrap_pwrite_send,
+       .pwrite_recv_fn = cephwrap_pwrite_recv,
        .lseek_fn = cephwrap_lseek,
        .sendfile_fn = cephwrap_sendfile,
        .recvfile_fn = cephwrap_recvfile,