s3:vfs: Correctly check if OFD locks should be enabled or not
[kai/samba-autobuild/.git] / source3 / modules / vfs_default.c
index 93ff657855330047ba36a122645c92573fe2f3c6..cb5537e096e6cbe07c2edfe07ec37b3fa5160585 100644 (file)
@@ -34,6 +34,7 @@
 #include "lib/util/sys_rw.h"
 #include "lib/pthreadpool/pthreadpool_tevent.h"
 #include "librpc/gen_ndr/ndr_ioctl.h"
+#include "offload_token.h"
 
 #undef DBGC_CLASS
 #define DBGC_CLASS DBGC_VFS
@@ -128,8 +129,14 @@ static uint32_t vfswrap_fs_capabilities(struct vfs_handle_struct *handle,
        struct vfs_statvfs_struct statbuf;
        int ret;
 
+       smb_fname_cpath = synthetic_smb_fname(talloc_tos(), conn->connectpath,
+                                             NULL, NULL, 0);
+       if (smb_fname_cpath == NULL) {
+               return caps;
+       }
+
        ZERO_STRUCT(statbuf);
-       ret = sys_statvfs(conn->connectpath, &statbuf);
+       ret = SMB_VFS_STATVFS(conn, smb_fname_cpath, &statbuf);
        if (ret == 0) {
                caps = statbuf.FsCapabilities;
        }
@@ -139,12 +146,6 @@ static uint32_t vfswrap_fs_capabilities(struct vfs_handle_struct *handle,
        /* Work out what timestamp resolution we can
         * use when setting a timestamp. */
 
-       smb_fname_cpath = synthetic_smb_fname(talloc_tos(), conn->connectpath,
-                                             NULL, NULL, 0);
-       if (smb_fname_cpath == NULL) {
-               return caps;
-       }
-
        ret = SMB_VFS_STAT(conn, smb_fname_cpath);
        if (ret == -1) {
                TALLOC_FREE(smb_fname_cpath);
@@ -228,10 +229,18 @@ static NTSTATUS vfswrap_get_dfs_referrals(struct vfs_handle_struct *handle,
                                   !handle->conn->sconn->using_smb2,
                                   junction, &consumedcnt, &self_referral);
        if (!NT_STATUS_IS_OK(status)) {
-               vfs_ChDir(handle->conn, handle->conn->connectpath);
+               struct smb_filename connectpath_fname = {
+                       .base_name = handle->conn->connectpath
+               };
+               vfs_ChDir(handle->conn, &connectpath_fname);
                return status;
        }
-       vfs_ChDir(handle->conn, handle->conn->connectpath);
+       {
+               struct smb_filename connectpath_fname = {
+                       .base_name = handle->conn->connectpath
+               };
+               vfs_ChDir(handle->conn, &connectpath_fname);
+       }
 
        if (!self_referral) {
                pathnamep[consumedcnt] = '\0';
@@ -489,7 +498,6 @@ static int vfswrap_mkdir(vfs_handle_struct *handle,
                        mode_t mode)
 {
        int result;
-       bool has_dacl = False;
        const char *path = smb_fname->base_name;
        char *parent = NULL;
 
@@ -497,7 +505,7 @@ static int vfswrap_mkdir(vfs_handle_struct *handle,
 
        if (lp_inherit_acls(SNUM(handle->conn))
            && parent_dirname(talloc_tos(), path, &parent, NULL)
-           && (has_dacl = directory_has_default_acl(handle->conn, parent))) {
+           && directory_has_default_acl(handle->conn, parent)) {
                mode = (0777 & lp_directory_mask(SNUM(handle->conn)));
        }
 
@@ -505,21 +513,6 @@ static int vfswrap_mkdir(vfs_handle_struct *handle,
 
        result = mkdir(path, mode);
 
-       if (result == 0 && !has_dacl) {
-               /*
-                * We need to do this as the default behavior of POSIX ACLs
-                * is to set the mask to be the requested group permission
-                * bits, not the group permission bits to be the requested
-                * group permission bits. This is not what we want, as it will
-                * mess up any inherited ACL bits that were set. JRA.
-                */
-               int saved_errno = errno; /* We may get ENOSYS */
-               if ((SMB_VFS_CHMOD_ACL(handle->conn, smb_fname, mode) == -1) &&
-                               (errno == ENOSYS)) {
-                       errno = saved_errno;
-               }
-       }
-
        END_PROFILE(syscall_mkdir);
        return result;
 }
@@ -545,12 +538,6 @@ static int vfswrap_closedir(vfs_handle_struct *handle, DIR *dirp)
        return result;
 }
 
-static void vfswrap_init_search_op(vfs_handle_struct *handle,
-                                  DIR *dirp)
-{
-       /* Default behavior is a NOOP */
-}
-
 /* File operations */
 
 static int vfswrap_open(vfs_handle_struct *handle,
@@ -611,16 +598,6 @@ static int vfswrap_close(vfs_handle_struct *handle, files_struct *fsp)
        return result;
 }
 
-static ssize_t vfswrap_read(vfs_handle_struct *handle, files_struct *fsp, void *data, size_t n)
-{
-       ssize_t result;
-
-       START_PROFILE_BYTES(syscall_read, n);
-       result = sys_read(fsp->fh->fd, data, n);
-       END_PROFILE_BYTES(syscall_read);
-       return result;
-}
-
 static ssize_t vfswrap_pread(vfs_handle_struct *handle, files_struct *fsp, void *data,
                        size_t n, off_t offset)
 {
@@ -633,48 +610,18 @@ static ssize_t vfswrap_pread(vfs_handle_struct *handle, files_struct *fsp, void
 
        if (result == -1 && errno == ESPIPE) {
                /* Maintain the fiction that pipes can be seeked (sought?) on. */
-               result = SMB_VFS_READ(fsp, data, n);
+               result = sys_read(fsp->fh->fd, data, n);
                fsp->fh->pos = 0;
        }
 
 #else /* HAVE_PREAD */
-       off_t   curr;
-       int lerrno;
-
-       curr = SMB_VFS_LSEEK(fsp, 0, SEEK_CUR);
-       if (curr == -1 && errno == ESPIPE) {
-               /* Maintain the fiction that pipes can be seeked (sought?) on. */
-               result = SMB_VFS_READ(fsp, data, n);
-               fsp->fh->pos = 0;
-               return result;
-       }
-
-       if (SMB_VFS_LSEEK(fsp, offset, SEEK_SET) == -1) {
-               return -1;
-       }
-
-       errno = 0;
-       result = SMB_VFS_READ(fsp, data, n);
-       lerrno = errno;
-
-       SMB_VFS_LSEEK(fsp, curr, SEEK_SET);
-       errno = lerrno;
-
+       errno = ENOSYS;
+       result = -1;
 #endif /* HAVE_PREAD */
 
        return result;
 }
 
-static ssize_t vfswrap_write(vfs_handle_struct *handle, files_struct *fsp, const void *data, size_t n)
-{
-       ssize_t result;
-
-       START_PROFILE_BYTES(syscall_write, n);
-       result = sys_write(fsp->fh->fd, data, n);
-       END_PROFILE_BYTES(syscall_write);
-       return result;
-}
-
 static ssize_t vfswrap_pwrite(vfs_handle_struct *handle, files_struct *fsp, const void *data,
                        size_t n, off_t offset)
 {
@@ -687,49 +634,19 @@ static ssize_t vfswrap_pwrite(vfs_handle_struct *handle, files_struct *fsp, cons
 
        if (result == -1 && errno == ESPIPE) {
                /* Maintain the fiction that pipes can be sought on. */
-               result = SMB_VFS_WRITE(fsp, data, n);
+               result = sys_write(fsp->fh->fd, data, n);
        }
 
 #else /* HAVE_PWRITE */
-       off_t   curr;
-       int         lerrno;
-
-       curr = SMB_VFS_LSEEK(fsp, 0, SEEK_CUR);
-       if (curr == -1) {
-               return -1;
-       }
-
-       if (SMB_VFS_LSEEK(fsp, offset, SEEK_SET) == -1) {
-               return -1;
-       }
-
-       result = SMB_VFS_WRITE(fsp, data, n);
-       lerrno = errno;
-
-       SMB_VFS_LSEEK(fsp, curr, SEEK_SET);
-       errno = lerrno;
-
+       errno = ENOSYS;
+       result = -1;
 #endif /* HAVE_PWRITE */
 
        return result;
 }
 
-static int vfswrap_init_pool(struct smbd_server_connection *conn)
-{
-       int ret;
-
-       if (conn->pool != NULL) {
-               return 0;
-       }
-
-       ret = pthreadpool_tevent_init(conn, lp_aio_max_threads(),
-                                     &conn->pool);
-       return ret;
-}
-
 struct vfswrap_pread_state {
        ssize_t ret;
-       int err;
        int fd;
        void *buf;
        size_t count;
@@ -741,6 +658,7 @@ struct vfswrap_pread_state {
 
 static void vfs_pread_do(void *private_data);
 static void vfs_pread_done(struct tevent_req *subreq);
+static int vfs_pread_state_destructor(struct vfswrap_pread_state *state);
 
 static struct tevent_req *vfswrap_pread_send(struct vfs_handle_struct *handle,
                                             TALLOC_CTX *mem_ctx,
@@ -751,18 +669,12 @@ static struct tevent_req *vfswrap_pread_send(struct vfs_handle_struct *handle,
 {
        struct tevent_req *req, *subreq;
        struct vfswrap_pread_state *state;
-       int ret;
 
        req = tevent_req_create(mem_ctx, &state, struct vfswrap_pread_state);
        if (req == NULL) {
                return NULL;
        }
 
-       ret = vfswrap_init_pool(handle->conn->sconn);
-       if (tevent_req_error(req, ret)) {
-               return tevent_req_post(req, ev);
-       }
-
        state->ret = -1;
        state->fd = fsp->fh->fd;
        state->buf = data;
@@ -781,6 +693,8 @@ static struct tevent_req *vfswrap_pread_send(struct vfs_handle_struct *handle,
        }
        tevent_req_set_callback(subreq, vfs_pread_done, req);
 
+       talloc_set_destructor(state, vfs_pread_state_destructor);
+
        return req;
 }
 
@@ -800,7 +714,9 @@ static void vfs_pread_do(void *private_data)
                                   state->offset);
        } while ((state->ret == -1) && (errno == EINTR));
 
-       state->err = errno;
+       if (state->ret == -1) {
+               state->vfs_aio_state.error = errno;
+       }
 
        PROFILE_TIMESTAMP(&end_time);
 
@@ -809,21 +725,35 @@ static void vfs_pread_do(void *private_data)
        SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
 }
 
+static int vfs_pread_state_destructor(struct vfswrap_pread_state *state)
+{
+       return -1;
+}
+
 static void vfs_pread_done(struct tevent_req *subreq)
 {
        struct tevent_req *req = tevent_req_callback_data(
                subreq, struct tevent_req);
-#ifdef WITH_PROFILE
        struct vfswrap_pread_state *state = tevent_req_data(
                req, struct vfswrap_pread_state);
-#endif
        int ret;
 
        ret = pthreadpool_tevent_job_recv(subreq);
        TALLOC_FREE(subreq);
        SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
-       if (tevent_req_error(req, ret)) {
-               return;
+       talloc_set_destructor(state, NULL);
+       if (ret != 0) {
+               if (ret != EAGAIN) {
+                       tevent_req_error(req, ret);
+                       return;
+               }
+               /*
+                * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+                * means the lower level pthreadpool failed to create a new
+                * thread. Fallback to sync processing in that case to allow
+                * some progress for the client.
+                */
+               vfs_pread_do(state);
        }
 
        tevent_req_done(req);
@@ -845,7 +775,6 @@ static ssize_t vfswrap_pread_recv(struct tevent_req *req,
 
 struct vfswrap_pwrite_state {
        ssize_t ret;
-       int err;
        int fd;
        const void *buf;
        size_t count;
@@ -857,6 +786,7 @@ struct vfswrap_pwrite_state {
 
 static void vfs_pwrite_do(void *private_data);
 static void vfs_pwrite_done(struct tevent_req *subreq);
+static int vfs_pwrite_state_destructor(struct vfswrap_pwrite_state *state);
 
 static struct tevent_req *vfswrap_pwrite_send(struct vfs_handle_struct *handle,
                                              TALLOC_CTX *mem_ctx,
@@ -867,18 +797,12 @@ static struct tevent_req *vfswrap_pwrite_send(struct vfs_handle_struct *handle,
 {
        struct tevent_req *req, *subreq;
        struct vfswrap_pwrite_state *state;
-       int ret;
 
        req = tevent_req_create(mem_ctx, &state, struct vfswrap_pwrite_state);
        if (req == NULL) {
                return NULL;
        }
 
-       ret = vfswrap_init_pool(handle->conn->sconn);
-       if (tevent_req_error(req, ret)) {
-               return tevent_req_post(req, ev);
-       }
-
        state->ret = -1;
        state->fd = fsp->fh->fd;
        state->buf = data;
@@ -897,6 +821,8 @@ static struct tevent_req *vfswrap_pwrite_send(struct vfs_handle_struct *handle,
        }
        tevent_req_set_callback(subreq, vfs_pwrite_done, req);
 
+       talloc_set_destructor(state, vfs_pwrite_state_destructor);
+
        return req;
 }
 
@@ -916,7 +842,9 @@ static void vfs_pwrite_do(void *private_data)
                                   state->offset);
        } while ((state->ret == -1) && (errno == EINTR));
 
-       state->err = errno;
+       if (state->ret == -1) {
+               state->vfs_aio_state.error = errno;
+       }
 
        PROFILE_TIMESTAMP(&end_time);
 
@@ -925,21 +853,35 @@ static void vfs_pwrite_do(void *private_data)
        SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
 }
 
+static int vfs_pwrite_state_destructor(struct vfswrap_pwrite_state *state)
+{
+       return -1;
+}
+
 static void vfs_pwrite_done(struct tevent_req *subreq)
 {
        struct tevent_req *req = tevent_req_callback_data(
                subreq, struct tevent_req);
-#ifdef WITH_PROFILE
        struct vfswrap_pwrite_state *state = tevent_req_data(
                req, struct vfswrap_pwrite_state);
-#endif
        int ret;
 
        ret = pthreadpool_tevent_job_recv(subreq);
        TALLOC_FREE(subreq);
        SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
-       if (tevent_req_error(req, ret)) {
-               return;
+       talloc_set_destructor(state, NULL);
+       if (ret != 0) {
+               if (ret != EAGAIN) {
+                       tevent_req_error(req, ret);
+                       return;
+               }
+               /*
+                * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+                * means the lower level pthreadpool failed to create a new
+                * thread. Fallback to sync processing in that case to allow
+                * some progress for the client.
+                */
+               vfs_pwrite_do(state);
        }
 
        tevent_req_done(req);
@@ -961,15 +903,15 @@ static ssize_t vfswrap_pwrite_recv(struct tevent_req *req,
 
 struct vfswrap_fsync_state {
        ssize_t ret;
-       int err;
        int fd;
 
        struct vfs_aio_state vfs_aio_state;
-       SMBPROFILE_BASIC_ASYNC_STATE(profile_basic);
+       SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes);
 };
 
 static void vfs_fsync_do(void *private_data);
 static void vfs_fsync_done(struct tevent_req *subreq);
+static int vfs_fsync_state_destructor(struct vfswrap_fsync_state *state);
 
 static struct tevent_req *vfswrap_fsync_send(struct vfs_handle_struct *handle,
                                             TALLOC_CTX *mem_ctx,
@@ -978,23 +920,18 @@ static struct tevent_req *vfswrap_fsync_send(struct vfs_handle_struct *handle,
 {
        struct tevent_req *req, *subreq;
        struct vfswrap_fsync_state *state;
-       int ret;
 
        req = tevent_req_create(mem_ctx, &state, struct vfswrap_fsync_state);
        if (req == NULL) {
                return NULL;
        }
 
-       ret = vfswrap_init_pool(handle->conn->sconn);
-       if (tevent_req_error(req, ret)) {
-               return tevent_req_post(req, ev);
-       }
-
        state->ret = -1;
        state->fd = fsp->fh->fd;
 
-       SMBPROFILE_BASIC_ASYNC_START(syscall_asys_fsync, profile_p,
-                                    state->profile_basic);
+       SMBPROFILE_BYTES_ASYNC_START(syscall_asys_fsync, profile_p,
+                                    state->profile_bytes, 0);
+       SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
 
        subreq = pthreadpool_tevent_job_send(
                state, ev, handle->conn->sconn->pool, vfs_fsync_do, state);
@@ -1003,6 +940,8 @@ static struct tevent_req *vfswrap_fsync_send(struct vfs_handle_struct *handle,
        }
        tevent_req_set_callback(subreq, vfs_fsync_done, req);
 
+       talloc_set_destructor(state, vfs_fsync_state_destructor);
+
        return req;
 }
 
@@ -1013,34 +952,54 @@ static void vfs_fsync_do(void *private_data)
        struct timespec start_time;
        struct timespec end_time;
 
+       SMBPROFILE_BYTES_ASYNC_SET_BUSY(state->profile_bytes);
+
        PROFILE_TIMESTAMP(&start_time);
 
        do {
                state->ret = fsync(state->fd);
        } while ((state->ret == -1) && (errno == EINTR));
 
-       state->err = errno;
+       if (state->ret == -1) {
+               state->vfs_aio_state.error = errno;
+       }
 
        PROFILE_TIMESTAMP(&end_time);
 
        state->vfs_aio_state.duration = nsec_time_diff(&end_time, &start_time);
+
+       SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+}
+
+static int vfs_fsync_state_destructor(struct vfswrap_fsync_state *state)
+{
+       return -1;
 }
 
 static void vfs_fsync_done(struct tevent_req *subreq)
 {
        struct tevent_req *req = tevent_req_callback_data(
                subreq, struct tevent_req);
-#ifdef WITH_PROFILE
        struct vfswrap_fsync_state *state = tevent_req_data(
                req, struct vfswrap_fsync_state);
-#endif
        int ret;
 
        ret = pthreadpool_tevent_job_recv(subreq);
        TALLOC_FREE(subreq);
-       SMBPROFILE_BASIC_ASYNC_END(state->profile_basic);
-       if (tevent_req_error(req, ret)) {
-               return;
+       SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
+       talloc_set_destructor(state, NULL);
+       if (ret != 0) {
+               if (ret != EAGAIN) {
+                       tevent_req_error(req, ret);
+                       return;
+               }
+               /*
+                * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+                * means the lower level pthreadpool failed to create a new
+                * thread. Fallback to sync processing in that case to allow
+                * some progress for the client.
+                */
+               vfs_fsync_do(state);
        }
 
        tevent_req_done(req);
@@ -1131,20 +1090,6 @@ static int vfswrap_rename(vfs_handle_struct *handle,
        return result;
 }
 
-static int vfswrap_fsync(vfs_handle_struct *handle, files_struct *fsp)
-{
-#ifdef HAVE_FSYNC
-       int result;
-
-       START_PROFILE(syscall_fsync);
-       result = fsync(fsp->fh->fd);
-       END_PROFILE(syscall_fsync);
-       return result;
-#else
-       return 0;
-#endif
-}
-
 static int vfswrap_stat(vfs_handle_struct *handle,
                        struct smb_filename *smb_fname)
 {
@@ -1414,6 +1359,7 @@ static NTSTATUS vfswrap_fsctl(struct vfs_handle_struct *handle,
                 * but I have to check that --metze
                 */
                struct dom_sid sid;
+               struct dom_sid_buf buf;
                uid_t uid;
                size_t sid_len;
 
@@ -1433,11 +1379,12 @@ static NTSTATUS vfswrap_fsctl(struct vfs_handle_struct *handle,
                if (!sid_parse(_in_data + 4, sid_len, &sid)) {
                        return NT_STATUS_INVALID_PARAMETER;
                }
-               DEBUGADD(10, ("for SID: %s\n", sid_string_dbg(&sid)));
+               DEBUGADD(10, ("for SID: %s\n",
+                             dom_sid_str_buf(&sid, &buf)));
 
                if (!sid_to_uid(&sid, &uid)) {
                        DEBUG(0,("sid_to_uid: failed, sid[%s] sid_len[%lu]\n",
-                                sid_string_dbg(&sid),
+                                dom_sid_str_buf(&sid, &buf),
                                 (unsigned long)sid_len));
                        uid = (-1);
                }
@@ -1571,6 +1518,142 @@ static NTSTATUS vfswrap_get_dos_attributes(struct vfs_handle_struct *handle,
        return get_ea_dos_attribute(handle->conn, smb_fname, dosmode);
 }
 
+struct vfswrap_get_dos_attributes_state {
+       struct vfs_aio_state aio_state;
+       connection_struct *conn;
+       TALLOC_CTX *mem_ctx;
+       struct tevent_context *ev;
+       files_struct *dir_fsp;
+       struct smb_filename *smb_fname;
+       uint32_t dosmode;
+       bool as_root;
+};
+
+static void vfswrap_get_dos_attributes_getxattr_done(struct tevent_req *subreq);
+
+static struct tevent_req *vfswrap_get_dos_attributes_send(
+                       TALLOC_CTX *mem_ctx,
+                       struct tevent_context *ev,
+                       struct vfs_handle_struct *handle,
+                       files_struct *dir_fsp,
+                       struct smb_filename *smb_fname)
+{
+       struct tevent_req *req = NULL;
+       struct tevent_req *subreq = NULL;
+       struct vfswrap_get_dos_attributes_state *state = NULL;
+
+       req = tevent_req_create(mem_ctx, &state,
+                               struct vfswrap_get_dos_attributes_state);
+       if (req == NULL) {
+               return NULL;
+       }
+
+       *state = (struct vfswrap_get_dos_attributes_state) {
+               .conn = dir_fsp->conn,
+               .mem_ctx = mem_ctx,
+               .ev = ev,
+               .dir_fsp = dir_fsp,
+               .smb_fname = smb_fname,
+       };
+
+       subreq = SMB_VFS_GETXATTRAT_SEND(state,
+                                        ev,
+                                        dir_fsp,
+                                        smb_fname,
+                                        SAMBA_XATTR_DOS_ATTRIB,
+                                        sizeof(fstring));
+       if (tevent_req_nomem(subreq, req)) {
+               return tevent_req_post(req, ev);
+       }
+       tevent_req_set_callback(subreq,
+                               vfswrap_get_dos_attributes_getxattr_done,
+                               req);
+
+       return req;
+}
+
+static void vfswrap_get_dos_attributes_getxattr_done(struct tevent_req *subreq)
+{
+       struct tevent_req *req =
+               tevent_req_callback_data(subreq,
+               struct tevent_req);
+       struct vfswrap_get_dos_attributes_state *state =
+               tevent_req_data(req,
+               struct vfswrap_get_dos_attributes_state);
+       ssize_t xattr_size;
+       DATA_BLOB blob = {0};
+       NTSTATUS status;
+
+       xattr_size = SMB_VFS_GETXATTRAT_RECV(subreq,
+                                            &state->aio_state,
+                                            state,
+                                            &blob.data);
+       TALLOC_FREE(subreq);
+       if (xattr_size == -1) {
+               status = map_nt_error_from_unix(state->aio_state.error);
+
+               if (state->as_root) {
+                       tevent_req_nterror(req, status);
+                       return;
+               }
+               if (!NT_STATUS_EQUAL(status, NT_STATUS_ACCESS_DENIED)) {
+                       tevent_req_nterror(req, status);
+                       return;
+               }
+
+               state->as_root = true;
+
+               become_root();
+               subreq = SMB_VFS_GETXATTRAT_SEND(state,
+                                                state->ev,
+                                                state->dir_fsp,
+                                                state->smb_fname,
+                                                SAMBA_XATTR_DOS_ATTRIB,
+                                                sizeof(fstring));
+               unbecome_root();
+               if (tevent_req_nomem(subreq, req)) {
+                       return;
+               }
+               tevent_req_set_callback(subreq,
+                                       vfswrap_get_dos_attributes_getxattr_done,
+                                       req);
+               return;
+       }
+
+       blob.length = xattr_size;
+
+       status = parse_dos_attribute_blob(state->smb_fname,
+                                         blob,
+                                         &state->dosmode);
+       if (!NT_STATUS_IS_OK(status)) {
+               tevent_req_nterror(req, status);
+               return;
+       }
+
+       tevent_req_done(req);
+       return;
+}
+
+static NTSTATUS vfswrap_get_dos_attributes_recv(struct tevent_req *req,
+                                               struct vfs_aio_state *aio_state,
+                                               uint32_t *dosmode)
+{
+       struct vfswrap_get_dos_attributes_state *state =
+               tevent_req_data(req,
+               struct vfswrap_get_dos_attributes_state);
+       NTSTATUS status;
+
+       if (tevent_req_is_nterror(req, &status)) {
+               tevent_req_received(req);
+               return status;
+       }
+
+       *aio_state = state->aio_state;
+       *dosmode = state->dosmode;
+       tevent_req_received(req);
+       return NT_STATUS_OK;
+}
+
 static NTSTATUS vfswrap_fget_dos_attributes(struct vfs_handle_struct *handle,
                                            struct files_struct *fsp,
                                            uint32_t *dosmode)
@@ -1599,139 +1682,266 @@ static NTSTATUS vfswrap_fset_dos_attributes(struct vfs_handle_struct *handle,
        return set_ea_dos_attribute(handle->conn, fsp->fsp_name, dosmode);
 }
 
-struct vfs_cc_state {
-       struct tevent_context *ev;
-       uint8_t *buf;
-       bool read_lck_locked;
-       struct lock_struct read_lck;
-       bool write_lck_locked;
-       struct lock_struct write_lck;
-       struct files_struct *src_fsp;
-       off_t src_off;
-       struct files_struct *dst_fsp;
-       off_t dst_off;
-       off_t to_copy;
-       off_t remaining;
-       size_t next_io_size;
-       uint32_t flags;
-};
+static struct vfs_offload_ctx *vfswrap_offload_ctx;
 
-static NTSTATUS copy_chunk_loop(struct tevent_req *req);
+struct vfswrap_offload_read_state {
+       DATA_BLOB token;
+};
 
-static struct tevent_req *vfswrap_copy_chunk_send(struct vfs_handle_struct *handle,
-                                                 TALLOC_CTX *mem_ctx,
-                                                 struct tevent_context *ev,
-                                                 struct files_struct *src_fsp,
-                                                 off_t src_off,
-                                                 struct files_struct *dest_fsp,
-                                                 off_t dest_off,
-                                                 off_t to_copy,
-                                                 uint32_t flags)
-{
-       struct tevent_req *req;
-       struct vfs_cc_state *state = NULL;
-       size_t num = MIN(to_copy, COPYCHUNK_MAX_TOTAL_LEN);
+static struct tevent_req *vfswrap_offload_read_send(
+       TALLOC_CTX *mem_ctx,
+       struct tevent_context *ev,
+       struct vfs_handle_struct *handle,
+       struct files_struct *fsp,
+       uint32_t fsctl,
+       uint32_t ttl,
+       off_t offset,
+       size_t to_copy)
+{
+       struct tevent_req *req = NULL;
+       struct vfswrap_offload_read_state *state = NULL;
        NTSTATUS status;
 
-       DBG_DEBUG("server side copy chunk of length %" PRIu64 "\n", to_copy);
-
-       req = tevent_req_create(mem_ctx, &state, struct vfs_cc_state);
+       req = tevent_req_create(mem_ctx, &state,
+                               struct vfswrap_offload_read_state);
        if (req == NULL) {
                return NULL;
        }
 
-       if (flags & ~VFS_COPY_CHUNK_FL_MASK_ALL) {
-               tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+       status = vfs_offload_token_ctx_init(fsp->conn->sconn->client,
+                                           &vfswrap_offload_ctx);
+       if (tevent_req_nterror(req, status)) {
                return tevent_req_post(req, ev);
        }
 
-       if (flags & VFS_COPY_CHUNK_FL_MUST_CLONE) {
-               DEBUG(10, ("COW clones not supported by vfs_default\n"));
-               tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+       if (fsctl != FSCTL_SRV_REQUEST_RESUME_KEY) {
+               tevent_req_nterror(req, NT_STATUS_INVALID_DEVICE_REQUEST);
                return tevent_req_post(req, ev);
        }
 
-       *state = (struct vfs_cc_state) {
-               .ev = ev,
-               .src_fsp = src_fsp,
-               .src_off = src_off,
-               .dst_fsp = dest_fsp,
-               .dst_off = dest_off,
-               .to_copy = to_copy,
-               .remaining = to_copy,
-               .flags = flags,
-       };
-       state->buf = talloc_array(state, uint8_t, num);
-       if (tevent_req_nomem(state->buf, req)) {
+       status = vfs_offload_token_create_blob(state, fsp, fsctl,
+                                              &state->token);
+       if (tevent_req_nterror(req, status)) {
                return tevent_req_post(req, ev);
        }
 
-       status = vfs_stat_fsp(src_fsp);
+       status = vfs_offload_token_db_store_fsp(vfswrap_offload_ctx, fsp,
+                                               &state->token);
        if (tevent_req_nterror(req, status)) {
                return tevent_req_post(req, ev);
        }
 
-       if (src_fsp->fsp_name->st.st_ex_size < src_off + num) {
-               /*
-                * [MS-SMB2] 3.3.5.15.6 Handling a Server-Side Data Copy Request
-                *   If the SourceOffset or SourceOffset + Length extends beyond
-                *   the end of file, the server SHOULD<240> treat this as a
-                *   STATUS_END_OF_FILE error.
-                * ...
-                *   <240> Section 3.3.5.15.6: Windows servers will return
-                *   STATUS_INVALID_VIEW_SIZE instead of STATUS_END_OF_FILE.
-                */
-               tevent_req_nterror(req, NT_STATUS_INVALID_VIEW_SIZE);
-               return tevent_req_post(req, ev);
-       }
+       tevent_req_done(req);
+       return tevent_req_post(req, ev);
+}
 
-       if (src_fsp->op == NULL) {
-               tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
-               return tevent_req_post(req, ev);
-       }
+static NTSTATUS vfswrap_offload_read_recv(struct tevent_req *req,
+                                         struct vfs_handle_struct *handle,
+                                         TALLOC_CTX *mem_ctx,
+                                         DATA_BLOB *token)
+{
+       struct vfswrap_offload_read_state *state = tevent_req_data(
+               req, struct vfswrap_offload_read_state);
+       NTSTATUS status;
 
-       if (dest_fsp->op == NULL) {
-               tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
-               return tevent_req_post(req, ev);
+       if (tevent_req_is_nterror(req, &status)) {
+               tevent_req_received(req);
+               return status;
        }
 
-       status = copy_chunk_loop(req);
-       if (!NT_STATUS_IS_OK(status)) {
-               tevent_req_nterror(req, status);
+       token->length = state->token.length;
+       token->data = talloc_move(mem_ctx, &state->token.data);
+
+       tevent_req_received(req);
+       return NT_STATUS_OK;
+}
+
+struct vfswrap_offload_write_state {
+       uint8_t *buf;
+       bool read_lck_locked;
+       bool write_lck_locked;
+       DATA_BLOB *token;
+       struct tevent_context *src_ev;
+       struct files_struct *src_fsp;
+       off_t src_off;
+       struct tevent_context *dst_ev;
+       struct files_struct *dst_fsp;
+       off_t dst_off;
+       off_t to_copy;
+       off_t remaining;
+       size_t next_io_size;
+};
+
+static void vfswrap_offload_write_cleanup(struct tevent_req *req,
+                                         enum tevent_req_state req_state)
+{
+       struct vfswrap_offload_write_state *state = tevent_req_data(
+               req, struct vfswrap_offload_write_state);
+       bool ok;
+
+       if (state->dst_fsp == NULL) {
+               return;
+       }
+
+       ok = change_to_user_by_fsp(state->dst_fsp);
+       SMB_ASSERT(ok);
+       state->dst_fsp = NULL;
+}
+
+static NTSTATUS vfswrap_offload_write_loop(struct tevent_req *req);
+
+static struct tevent_req *vfswrap_offload_write_send(
+       struct vfs_handle_struct *handle,
+       TALLOC_CTX *mem_ctx,
+       struct tevent_context *ev,
+       uint32_t fsctl,
+       DATA_BLOB *token,
+       off_t transfer_offset,
+       struct files_struct *dest_fsp,
+       off_t dest_off,
+       off_t to_copy)
+{
+       struct tevent_req *req;
+       struct vfswrap_offload_write_state *state = NULL;
+       size_t num = MIN(to_copy, COPYCHUNK_MAX_TOTAL_LEN);
+       files_struct *src_fsp = NULL;
+       NTSTATUS status;
+       bool ok;
+
+       req = tevent_req_create(mem_ctx, &state,
+                               struct vfswrap_offload_write_state);
+       if (req == NULL) {
+               return NULL;
+       }
+
+       *state = (struct vfswrap_offload_write_state) {
+               .token = token,
+               .src_off = transfer_offset,
+               .dst_ev = ev,
+               .dst_fsp = dest_fsp,
+               .dst_off = dest_off,
+               .to_copy = to_copy,
+               .remaining = to_copy,
+       };
+
+       tevent_req_set_cleanup_fn(req, vfswrap_offload_write_cleanup);
+
+       switch (fsctl) {
+       case FSCTL_SRV_COPYCHUNK:
+       case FSCTL_SRV_COPYCHUNK_WRITE:
+               break;
+
+       case FSCTL_OFFLOAD_WRITE:
+               tevent_req_nterror(req, NT_STATUS_NOT_IMPLEMENTED);
+               return tevent_req_post(req, ev);
+
+       case FSCTL_DUP_EXTENTS_TO_FILE:
+               DBG_DEBUG("COW clones not supported by vfs_default\n");
+               tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+               return tevent_req_post(req, ev);
+
+       default:
+               tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+               return tevent_req_post(req, ev);
+       }
+
+       /*
+        * From here on we assume a copy-chunk fsctl
+        */
+
+       if (to_copy == 0) {
+               tevent_req_done(req);
+               return tevent_req_post(req, ev);
+       }
+
+       status = vfs_offload_token_db_fetch_fsp(vfswrap_offload_ctx,
+                                               token, &src_fsp);
+       if (tevent_req_nterror(req, status)) {
+               return tevent_req_post(req, ev);
+       }
+
+       DBG_DEBUG("server side copy chunk of length %" PRIu64 "\n", to_copy);
+
+       status = vfs_offload_token_check_handles(fsctl, src_fsp, dest_fsp);
+       if (!NT_STATUS_IS_OK(status)) {
+               tevent_req_nterror(req, status);
+               return tevent_req_post(req, ev);
+       }
+
+       ok = change_to_user_by_fsp(src_fsp);
+       if (!ok) {
+               tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
+               return tevent_req_post(req, ev);
+       }
+
+       state->src_ev = src_fsp->conn->sconn->ev_ctx;
+       state->src_fsp = src_fsp;
+
+       state->buf = talloc_array(state, uint8_t, num);
+       if (tevent_req_nomem(state->buf, req)) {
+               return tevent_req_post(req, ev);
+       }
+
+       status = vfs_stat_fsp(src_fsp);
+       if (tevent_req_nterror(req, status)) {
+               return tevent_req_post(req, ev);
+       }
+
+       if (src_fsp->fsp_name->st.st_ex_size < state->src_off + num) {
+               /*
+                * [MS-SMB2] 3.3.5.15.6 Handling a Server-Side Data Copy Request
+                *   If the SourceOffset or SourceOffset + Length extends beyond
+                *   the end of file, the server SHOULD<240> treat this as a
+                *   STATUS_END_OF_FILE error.
+                * ...
+                *   <240> Section 3.3.5.15.6: Windows servers will return
+                *   STATUS_INVALID_VIEW_SIZE instead of STATUS_END_OF_FILE.
+                */
+               tevent_req_nterror(req, NT_STATUS_INVALID_VIEW_SIZE);
+               return tevent_req_post(req, ev);
+       }
+
+       status = vfswrap_offload_write_loop(req);
+       if (!NT_STATUS_IS_OK(status)) {
+               tevent_req_nterror(req, status);
                return tevent_req_post(req, ev);
        }
 
        return req;
 }
 
-static void vfswrap_copy_chunk_read_done(struct tevent_req *subreq);
+static void vfswrap_offload_write_read_done(struct tevent_req *subreq);
 
-static NTSTATUS copy_chunk_loop(struct tevent_req *req)
+static NTSTATUS vfswrap_offload_write_loop(struct tevent_req *req)
 {
-       struct vfs_cc_state *state = tevent_req_data(req, struct vfs_cc_state);
+       struct vfswrap_offload_write_state *state = tevent_req_data(
+               req, struct vfswrap_offload_write_state);
        struct tevent_req *subreq = NULL;
+       struct lock_struct read_lck;
        bool ok;
 
+       /*
+        * This is called under the context of state->src_fsp.
+        */
+
        state->next_io_size = MIN(state->remaining, talloc_array_length(state->buf));
 
-       if (!(state->flags & VFS_COPY_CHUNK_FL_IGNORE_LOCKS)) {
-               init_strict_lock_struct(state->src_fsp,
+       init_strict_lock_struct(state->src_fsp,
                                state->src_fsp->op->global->open_persistent_id,
-                                       state->src_off,
-                                       state->next_io_size,
-                                       READ_LOCK,
-                                       &state->read_lck);
-
-               ok = SMB_VFS_STRICT_LOCK(state->src_fsp->conn,
-                                        state->src_fsp,
-                                        &state->read_lck);
-               if (!ok) {
-                       return NT_STATUS_FILE_LOCK_CONFLICT;
-               }
+                               state->src_off,
+                               state->next_io_size,
+                               READ_LOCK,
+                               &read_lck);
+
+       ok = SMB_VFS_STRICT_LOCK_CHECK(state->src_fsp->conn,
+                                state->src_fsp,
+                                &read_lck);
+       if (!ok) {
+               return NT_STATUS_FILE_LOCK_CONFLICT;
        }
 
        subreq = SMB_VFS_PREAD_SEND(state,
-                                   state->src_fsp->conn->sconn->ev_ctx,
+                                   state->src_ev,
                                    state->src_fsp,
                                    state->buf,
                                    state->next_io_size,
@@ -1739,29 +1949,24 @@ static NTSTATUS copy_chunk_loop(struct tevent_req *req)
        if (subreq == NULL) {
                return NT_STATUS_NO_MEMORY;
        }
-       tevent_req_set_callback(subreq, vfswrap_copy_chunk_read_done, req);
+       tevent_req_set_callback(subreq, vfswrap_offload_write_read_done, req);
 
        return NT_STATUS_OK;
 }
 
-static void vfswrap_copy_chunk_write_done(struct tevent_req *subreq);
+static void vfswrap_offload_write_write_done(struct tevent_req *subreq);
 
-static void vfswrap_copy_chunk_read_done(struct tevent_req *subreq)
+static void vfswrap_offload_write_read_done(struct tevent_req *subreq)
 {
        struct tevent_req *req = tevent_req_callback_data(
                subreq, struct tevent_req);
-       struct vfs_cc_state *state = tevent_req_data(req, struct vfs_cc_state);
+       struct vfswrap_offload_write_state *state = tevent_req_data(
+               req, struct vfswrap_offload_write_state);
        struct vfs_aio_state aio_state;
+       struct lock_struct write_lck;
        ssize_t nread;
        bool ok;
 
-       if (!(state->flags & VFS_COPY_CHUNK_FL_IGNORE_LOCKS)) {
-               SMB_VFS_STRICT_UNLOCK(state->src_fsp->conn,
-                                     state->src_fsp,
-                                     &state->read_lck);
-               ZERO_STRUCT(state->read_lck);
-       }
-
        nread = SMB_VFS_PREAD_RECV(subreq, &aio_state);
        TALLOC_FREE(subreq);
        if (nread == -1) {
@@ -1778,25 +1983,29 @@ static void vfswrap_copy_chunk_read_done(struct tevent_req *subreq)
 
        state->src_off += nread;
 
-       if (!(state->flags & VFS_COPY_CHUNK_FL_IGNORE_LOCKS)) {
-               init_strict_lock_struct(state->dst_fsp,
+       ok = change_to_user_by_fsp(state->dst_fsp);
+       if (!ok) {
+               tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+               return;
+       }
+
+       init_strict_lock_struct(state->dst_fsp,
                                state->dst_fsp->op->global->open_persistent_id,
-                                       state->dst_off,
-                                       state->next_io_size,
-                                       WRITE_LOCK,
-                                       &state->write_lck);
-
-               ok = SMB_VFS_STRICT_LOCK(state->dst_fsp->conn,
-                                        state->dst_fsp,
-                                        &state->write_lck);
-               if (!ok) {
-                       tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
-                       return;
-               }
+                               state->dst_off,
+                               state->next_io_size,
+                               WRITE_LOCK,
+                               &write_lck);
+
+       ok = SMB_VFS_STRICT_LOCK_CHECK(state->dst_fsp->conn,
+                                state->dst_fsp,
+                                &write_lck);
+       if (!ok) {
+               tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
+               return;
        }
 
        subreq = SMB_VFS_PWRITE_SEND(state,
-                                    state->ev,
+                                    state->dst_ev,
                                     state->dst_fsp,
                                     state->buf,
                                     state->next_io_size,
@@ -1805,24 +2014,19 @@ static void vfswrap_copy_chunk_read_done(struct tevent_req *subreq)
                tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
                return;
        }
-       tevent_req_set_callback(subreq, vfswrap_copy_chunk_write_done, req);
+       tevent_req_set_callback(subreq, vfswrap_offload_write_write_done, req);
 }
 
-static void vfswrap_copy_chunk_write_done(struct tevent_req *subreq)
+static void vfswrap_offload_write_write_done(struct tevent_req *subreq)
 {
        struct tevent_req *req = tevent_req_callback_data(
                subreq, struct tevent_req);
-       struct vfs_cc_state *state = tevent_req_data(req, struct vfs_cc_state);
+       struct vfswrap_offload_write_state *state = tevent_req_data(
+               req, struct vfswrap_offload_write_state);
        struct vfs_aio_state aio_state;
        ssize_t nwritten;
        NTSTATUS status;
-
-       if (!(state->flags & VFS_COPY_CHUNK_FL_IGNORE_LOCKS)) {
-               SMB_VFS_STRICT_UNLOCK(state->dst_fsp->conn,
-                                     state->dst_fsp,
-                                     &state->write_lck);
-               ZERO_STRUCT(state->write_lck);
-       }
+       bool ok;
 
        nwritten = SMB_VFS_PWRITE_RECV(subreq, &aio_state);
        TALLOC_FREE(subreq);
@@ -1850,7 +2054,13 @@ static void vfswrap_copy_chunk_write_done(struct tevent_req *subreq)
                return;
        }
 
-       status = copy_chunk_loop(req);
+       ok = change_to_user_by_fsp(state->src_fsp);
+       if (!ok) {
+               tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+               return;
+       }
+
+       status = vfswrap_offload_write_loop(req);
        if (!NT_STATUS_IS_OK(status)) {
                tevent_req_nterror(req, status);
                return;
@@ -1859,11 +2069,12 @@ static void vfswrap_copy_chunk_write_done(struct tevent_req *subreq)
        return;
 }
 
-static NTSTATUS vfswrap_copy_chunk_recv(struct vfs_handle_struct *handle,
+static NTSTATUS vfswrap_offload_write_recv(struct vfs_handle_struct *handle,
                                        struct tevent_req *req,
                                        off_t *copied)
 {
-       struct vfs_cc_state *state = tevent_req_data(req, struct vfs_cc_state);
+       struct vfswrap_offload_write_state *state = tevent_req_data(
+               req, struct vfswrap_offload_write_state);
        NTSTATUS status;
 
        if (tevent_req_is_nterror(req, &status)) {
@@ -1980,27 +2191,6 @@ static int vfswrap_chmod(vfs_handle_struct *handle,
        int result;
 
        START_PROFILE(syscall_chmod);
-
-       /*
-        * We need to do this due to the fact that the default POSIX ACL
-        * chmod modifies the ACL *mask* for the group owner, not the
-        * group owner bits directly. JRA.
-        */
-
-
-       {
-               int saved_errno = errno; /* We might get ENOSYS */
-               result = SMB_VFS_CHMOD_ACL(handle->conn,
-                               smb_fname,
-                               mode);
-               if (result == 0) {
-                       END_PROFILE(syscall_chmod);
-                       return result;
-               }
-               /* Error - return the old errno. */
-               errno = saved_errno;
-       }
-
        result = chmod(smb_fname->base_name, mode);
        END_PROFILE(syscall_chmod);
        return result;
@@ -2011,23 +2201,6 @@ static int vfswrap_fchmod(vfs_handle_struct *handle, files_struct *fsp, mode_t m
        int result;
 
        START_PROFILE(syscall_fchmod);
-
-       /*
-        * We need to do this due to the fact that the default POSIX ACL
-        * chmod modifies the ACL *mask* for the group owner, not the
-        * group owner bits directly. JRA.
-        */
-
-       {
-               int saved_errno = errno; /* We might get ENOSYS */
-               if ((result = SMB_VFS_FCHMOD_ACL(fsp, mode)) == 0) {
-                       END_PROFILE(syscall_fchmod);
-                       return result;
-               }
-               /* Error - return the old errno. */
-               errno = saved_errno;
-       }
-
 #if defined(HAVE_FCHMOD)
        result = fchmod(fsp->fh->fd, mode);
 #else
@@ -2080,24 +2253,42 @@ static int vfswrap_lchown(vfs_handle_struct *handle,
        return result;
 }
 
-static int vfswrap_chdir(vfs_handle_struct *handle, const char *path)
+static int vfswrap_chdir(vfs_handle_struct *handle,
+                       const struct smb_filename *smb_fname)
 {
        int result;
 
        START_PROFILE(syscall_chdir);
-       result = chdir(path);
+       result = chdir(smb_fname->base_name);
        END_PROFILE(syscall_chdir);
        return result;
 }
 
-static char *vfswrap_getwd(vfs_handle_struct *handle)
+static struct smb_filename *vfswrap_getwd(vfs_handle_struct *handle,
+                               TALLOC_CTX *ctx)
 {
        char *result;
+       struct smb_filename *smb_fname = NULL;
 
        START_PROFILE(syscall_getwd);
        result = sys_getwd();
        END_PROFILE(syscall_getwd);
-       return result;
+
+       if (result == NULL) {
+               return NULL;
+       }
+       smb_fname = synthetic_smb_fname(ctx,
+                               result,
+                               NULL,
+                               NULL,
+                               0);
+       /*
+        * sys_getwd() *always* returns malloced memory.
+        * We must free here to avoid leaks:
+        * BUG:https://bugzilla.samba.org/show_bug.cgi?id=13372
+        */
+       SAFE_FREE(result);
+       return smb_fname;
 }
 
 /*********************************************************************
@@ -2362,11 +2553,8 @@ static bool vfswrap_lock(vfs_handle_struct *handle, files_struct *fsp, int op, o
 
        START_PROFILE(syscall_fcntl_lock);
 
-       if (fsp->use_ofd_locks || !lp_parm_bool(SNUM(fsp->conn),
-                                               "smbd",
-                                               "force process locks",
-                                               false)) {
-               op = map_process_lock_to_ofd_lock(op, &fsp->use_ofd_locks);
+       if (fsp->use_ofd_locks) {
+               op = map_process_lock_to_ofd_lock(op);
        }
 
        result =  fcntl_lock(fsp->fh->fd, op, offset, count, type);
@@ -2390,11 +2578,8 @@ static bool vfswrap_getlock(vfs_handle_struct *handle, files_struct *fsp, off_t
 
        START_PROFILE(syscall_fcntl_getlock);
 
-       if (fsp->use_ofd_locks || !lp_parm_bool(SNUM(fsp->conn),
-                                               "smbd",
-                                               "force process locks",
-                                               false)) {
-               op = map_process_lock_to_ofd_lock(op, &fsp->use_ofd_locks);
+       if (fsp->use_ofd_locks) {
+               op = map_process_lock_to_ofd_lock(op);
        }
 
        result = fcntl_getlock(fsp->fh->fd, op, poffset, pcount, ptype, ppid);
@@ -2418,12 +2603,14 @@ static int vfswrap_linux_setlease(vfs_handle_struct *handle, files_struct *fsp,
        return result;
 }
 
-static int vfswrap_symlink(vfs_handle_struct *handle, const char *oldpath, const char *newpath)
+static int vfswrap_symlink(vfs_handle_struct *handle,
+                       const char *link_target,
+                       const struct smb_filename *new_smb_fname)
 {
        int result;
 
        START_PROFILE(syscall_symlink);
-       result = symlink(oldpath, newpath);
+       result = symlink(link_target, new_smb_fname->base_name);
        END_PROFILE(syscall_symlink);
        return result;
 }
@@ -2466,14 +2653,21 @@ static int vfswrap_mknod(vfs_handle_struct *handle,
        return result;
 }
 
-static char *vfswrap_realpath(vfs_handle_struct *handle, const char *path)
+static struct smb_filename *vfswrap_realpath(vfs_handle_struct *handle,
+                       TALLOC_CTX *ctx,
+                       const struct smb_filename *smb_fname)
 {
        char *result;
+       struct smb_filename *result_fname = NULL;
 
        START_PROFILE(syscall_realpath);
-       result = sys_realpath(path);
+       result = sys_realpath(smb_fname->base_name);
        END_PROFILE(syscall_realpath);
-       return result;
+       if (result) {
+               result_fname = synthetic_smb_fname(ctx, result, NULL, NULL, 0);
+               SAFE_FREE(result);
+       }
+       return result_fname;
 }
 
 static int vfswrap_chflags(vfs_handle_struct *handle,
@@ -2582,7 +2776,7 @@ static int vfswrap_get_real_filename(struct vfs_handle_struct *handle,
 }
 
 static const char *vfswrap_connectpath(struct vfs_handle_struct *handle,
-                                      const char *fname)
+                                  const struct smb_filename *smb_fname)
 {
        return handle->conn->connectpath;
 }
@@ -2618,24 +2812,14 @@ static bool vfswrap_brl_cancel_windows(struct vfs_handle_struct *handle,
        return brl_lock_cancel_default(br_lck, plock);
 }
 
-static bool vfswrap_strict_lock(struct vfs_handle_struct *handle,
-                               files_struct *fsp,
-                               struct lock_struct *plock)
+static bool vfswrap_strict_lock_check(struct vfs_handle_struct *handle,
+                                     files_struct *fsp,
+                                     struct lock_struct *plock)
 {
        SMB_ASSERT(plock->lock_type == READ_LOCK ||
            plock->lock_type == WRITE_LOCK);
 
-       return strict_lock_default(fsp, plock);
-}
-
-static void vfswrap_strict_unlock(struct vfs_handle_struct *handle,
-                               files_struct *fsp,
-                               struct lock_struct *plock)
-{
-       SMB_ASSERT(plock->lock_type == READ_LOCK ||
-           plock->lock_type == WRITE_LOCK);
-
-       strict_unlock_default(fsp, plock);
+       return strict_lock_check_default(fsp, plock);
 }
 
 /* NT ACL operations. */
@@ -2692,38 +2876,6 @@ static NTSTATUS vfswrap_audit_file(struct vfs_handle_struct *handle,
        return NT_STATUS_OK; /* Nothing to do here ... */
 }
 
-static int vfswrap_chmod_acl(vfs_handle_struct *handle,
-                               const struct smb_filename *smb_fname,
-                               mode_t mode)
-{
-#ifdef HAVE_NO_ACL
-       errno = ENOSYS;
-       return -1;
-#else
-       int result;
-
-       START_PROFILE(chmod_acl);
-       result = chmod_acl(handle->conn, smb_fname, mode);
-       END_PROFILE(chmod_acl);
-       return result;
-#endif
-}
-
-static int vfswrap_fchmod_acl(vfs_handle_struct *handle, files_struct *fsp, mode_t mode)
-{
-#ifdef HAVE_NO_ACL
-       errno = ENOSYS;
-       return -1;
-#else
-       int result;
-
-       START_PROFILE(fchmod_acl);
-       result = fchmod_acl(fsp, mode);
-       END_PROFILE(fchmod_acl);
-       return result;
-#endif
-}
-
 static SMB_ACL_T vfswrap_sys_acl_get_file(vfs_handle_struct *handle,
                                          const struct smb_filename *smb_fname,
                                          SMB_ACL_TYPE_T type,
@@ -2771,6 +2923,325 @@ static ssize_t vfswrap_getxattr(struct vfs_handle_struct *handle,
        return getxattr(smb_fname->base_name, name, value, size);
 }
 
+struct vfswrap_getxattrat_state {
+       struct tevent_context *ev;
+       files_struct *dir_fsp;
+       const struct smb_filename *smb_fname;
+       struct tevent_req *req;
+
+       /*
+        * The following variables are talloced off "state" which is protected
+        * by a destructor and thus are guaranteed to be safe to be used in the
+        * job function in the worker thread.
+        */
+       char *name;
+       const char *xattr_name;
+       uint8_t *xattr_value;
+       struct security_unix_token *token;
+
+       ssize_t xattr_size;
+       struct vfs_aio_state vfs_aio_state;
+       SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes);
+};
+
+static int vfswrap_getxattrat_state_destructor(
+               struct vfswrap_getxattrat_state *state)
+{
+       return -1;
+}
+
+static void vfswrap_getxattrat_do_sync(struct tevent_req *req);
+static void vfswrap_getxattrat_do_async(void *private_data);
+static void vfswrap_getxattrat_done(struct tevent_req *subreq);
+
+static struct tevent_req *vfswrap_getxattrat_send(
+                       TALLOC_CTX *mem_ctx,
+                       struct tevent_context *ev,
+                       struct vfs_handle_struct *handle,
+                       files_struct *dir_fsp,
+                       const struct smb_filename *smb_fname,
+                       const char *xattr_name,
+                       size_t alloc_hint)
+{
+       struct tevent_req *req = NULL;
+       struct tevent_req *subreq = NULL;
+       struct vfswrap_getxattrat_state *state = NULL;
+       size_t max_threads = 0;
+       bool have_per_thread_cwd = false;
+       bool have_per_thread_creds = false;
+       bool do_async = false;
+
+       req = tevent_req_create(mem_ctx, &state,
+                               struct vfswrap_getxattrat_state);
+       if (req == NULL) {
+               return NULL;
+       }
+       *state = (struct vfswrap_getxattrat_state) {
+               .ev = ev,
+               .dir_fsp = dir_fsp,
+               .smb_fname = smb_fname,
+               .req = req,
+       };
+
+       max_threads = pthreadpool_tevent_max_threads(dir_fsp->conn->sconn->pool);
+       if (max_threads >= 1) {
+               /*
+                * We need a non sync threadpool!
+                */
+               have_per_thread_cwd = per_thread_cwd_supported();
+       }
+#ifdef HAVE_LINUX_THREAD_CREDENTIALS
+       have_per_thread_creds = true;
+#endif
+       if (have_per_thread_cwd && have_per_thread_creds) {
+               do_async = true;
+       }
+
+       SMBPROFILE_BYTES_ASYNC_START(syscall_asys_getxattrat, profile_p,
+                                    state->profile_bytes, 0);
+
+       if (dir_fsp->fh->fd == -1) {
+               DBG_ERR("Need a valid directory fd\n");
+               tevent_req_error(req, EINVAL);
+               return tevent_req_post(req, ev);
+       }
+
+       if (alloc_hint > 0) {
+               state->xattr_value = talloc_zero_array(state,
+                                                      uint8_t,
+                                                      alloc_hint);
+               if (tevent_req_nomem(state->xattr_value, req)) {
+                       return tevent_req_post(req, ev);
+               }
+       }
+
+       if (!do_async) {
+               vfswrap_getxattrat_do_sync(req);
+               return tevent_req_post(req, ev);
+       }
+
+       /*
+        * Now allocate all parameters from a memory context that won't go away
+        * no matter what. These paremeters will get used in threads and we
+        * can't reliably cancel threads, so all buffers passed to the threads
+        * must not be freed before all referencing threads terminate.
+        */
+
+       state->name = talloc_strdup(state, smb_fname->base_name);
+       if (tevent_req_nomem(state->name, req)) {
+               return tevent_req_post(req, ev);
+       }
+
+       state->xattr_name = talloc_strdup(state, xattr_name);
+       if (tevent_req_nomem(state->xattr_name, req)) {
+               return tevent_req_post(req, ev);
+       }
+
+       /*
+        * This is a hot codepath so at first glance one might think we should
+        * somehow optimize away the token allocation and do a
+        * talloc_reference() or similar black magic instead. But due to the
+        * talloc_stackframe pool per SMB2 request this should be a simple copy
+        * without a malloc in most cases.
+        */
+       if (geteuid() == sec_initial_uid()) {
+               state->token = root_unix_token(state);
+       } else {
+               state->token = copy_unix_token(
+                                       state,
+                                       dir_fsp->conn->session_info->unix_token);
+       }
+       if (tevent_req_nomem(state->token, req)) {
+               return tevent_req_post(req, ev);
+       }
+
+       SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+
+       subreq = pthreadpool_tevent_job_send(
+                       state,
+                       ev,
+                       dir_fsp->conn->sconn->pool,
+                       vfswrap_getxattrat_do_async,
+                       state);
+       if (tevent_req_nomem(subreq, req)) {
+               return tevent_req_post(req, ev);
+       }
+       tevent_req_set_callback(subreq, vfswrap_getxattrat_done, req);
+
+       talloc_set_destructor(state, vfswrap_getxattrat_state_destructor);
+
+       return req;
+}
+
+static void vfswrap_getxattrat_do_sync(struct tevent_req *req)
+{
+       struct vfswrap_getxattrat_state *state = talloc_get_type_abort(
+               req, struct vfswrap_getxattrat_state);
+       char *path = NULL;
+       char *tofree = NULL;
+       char pathbuf[PATH_MAX+1];
+       size_t pathlen;
+       int err;
+
+       pathlen = full_path_tos(state->dir_fsp->fsp_name->base_name,
+                               state->smb_fname->base_name,
+                               pathbuf,
+                               sizeof(pathbuf),
+                               &path,
+                               &tofree);
+       if (pathlen == -1) {
+               tevent_req_error(req, ENOMEM);
+               return;
+       }
+
+       state->xattr_size = getxattr(path,
+                                    state->xattr_name,
+                                    state->xattr_value,
+                                    talloc_array_length(state->xattr_value));
+       err = errno;
+       TALLOC_FREE(tofree);
+       if (state->xattr_size == -1) {
+               tevent_req_error(req, err);
+               return;
+       }
+
+       tevent_req_done(req);
+       return;
+}
+
+static void vfswrap_getxattrat_do_async(void *private_data)
+{
+       struct vfswrap_getxattrat_state *state = talloc_get_type_abort(
+               private_data, struct vfswrap_getxattrat_state);
+       struct timespec start_time;
+       struct timespec end_time;
+       int ret;
+
+       PROFILE_TIMESTAMP(&start_time);
+       SMBPROFILE_BYTES_ASYNC_SET_BUSY(state->profile_bytes);
+
+       /*
+        * Here we simulate a getxattrat()
+        * call using fchdir();getxattr()
+        */
+
+       per_thread_cwd_activate();
+
+       /* Become the correct credential on this thread. */
+       ret = set_thread_credentials(state->token->uid,
+                                    state->token->gid,
+                                    (size_t)state->token->ngroups,
+                                    state->token->groups);
+       if (ret != 0) {
+               state->xattr_size = -1;
+               state->vfs_aio_state.error = errno;
+               goto end_profile;
+       }
+
+       ret = fchdir(state->dir_fsp->fh->fd);
+       if (ret == -1) {
+               state->xattr_size = -1;
+               state->vfs_aio_state.error = errno;
+               goto end_profile;
+       }
+
+       state->xattr_size = getxattr(state->name,
+                                    state->xattr_name,
+                                    state->xattr_value,
+                                    talloc_array_length(state->xattr_value));
+       if (state->xattr_size == -1) {
+               state->vfs_aio_state.error = errno;
+       }
+
+end_profile:
+       PROFILE_TIMESTAMP(&end_time);
+       state->vfs_aio_state.duration = nsec_time_diff(&end_time, &start_time);
+       SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+}
+
+static void vfswrap_getxattrat_done(struct tevent_req *subreq)
+{
+       struct tevent_req *req = tevent_req_callback_data(
+               subreq, struct tevent_req);
+       struct vfswrap_getxattrat_state *state = tevent_req_data(
+               req, struct vfswrap_getxattrat_state);
+       int ret;
+       bool ok;
+
+       /*
+        * Make sure we run as the user again
+        */
+       ok = change_to_user_by_fsp(state->dir_fsp);
+       SMB_ASSERT(ok);
+
+       ret = pthreadpool_tevent_job_recv(subreq);
+       TALLOC_FREE(subreq);
+       SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
+       talloc_set_destructor(state, NULL);
+       if (ret != 0) {
+               if (ret != EAGAIN) {
+                       tevent_req_error(req, ret);
+                       return;
+               }
+               /*
+                * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+                * means the lower level pthreadpool failed to create a new
+                * thread. Fallback to sync processing in that case to allow
+                * some progress for the client.
+                */
+               vfswrap_getxattrat_do_sync(req);
+               return;
+       }
+
+       if (state->xattr_size == -1) {
+               tevent_req_error(req, state->vfs_aio_state.error);
+               return;
+       }
+
+       if (state->xattr_value == NULL) {
+               /*
+                * The caller only wanted the size.
+                */
+               tevent_req_done(req);
+               return;
+       }
+
+       /*
+        * shrink the buffer to the returned size.
+        * (can't fail). It means NULL if size is 0.
+        */
+       state->xattr_value = talloc_realloc(state,
+                                           state->xattr_value,
+                                           uint8_t,
+                                           state->xattr_size);
+
+       tevent_req_done(req);
+}
+
+static ssize_t vfswrap_getxattrat_recv(struct tevent_req *req,
+                                      struct vfs_aio_state *aio_state,
+                                      TALLOC_CTX *mem_ctx,
+                                      uint8_t **xattr_value)
+{
+       struct vfswrap_getxattrat_state *state = tevent_req_data(
+               req, struct vfswrap_getxattrat_state);
+       ssize_t xattr_size;
+
+       if (tevent_req_is_unix_error(req, &aio_state->error)) {
+               tevent_req_received(req);
+               return -1;
+       }
+
+       *aio_state = state->vfs_aio_state;
+       xattr_size = state->xattr_size;
+       if (xattr_value != NULL) {
+               *xattr_value = talloc_move(mem_ctx, &state->xattr_value);
+       }
+
+       tevent_req_received(req);
+       return xattr_size;
+}
+
 static ssize_t vfswrap_fgetxattr(struct vfs_handle_struct *handle, struct files_struct *fsp, const char *name, void *value, size_t size)
 {
        return fgetxattr(fsp->fh->fd, name, value, size);
@@ -2912,18 +3383,15 @@ static struct vfs_fn_pointers vfs_default_fns = {
        .mkdir_fn = vfswrap_mkdir,
        .rmdir_fn = vfswrap_rmdir,
        .closedir_fn = vfswrap_closedir,
-       .init_search_op_fn = vfswrap_init_search_op,
 
        /* File operations */
 
        .open_fn = vfswrap_open,
        .create_file_fn = vfswrap_create_file,
        .close_fn = vfswrap_close,
-       .read_fn = vfswrap_read,
        .pread_fn = vfswrap_pread,
        .pread_send_fn = vfswrap_pread_send,
        .pread_recv_fn = vfswrap_pread_recv,
-       .write_fn = vfswrap_write,
        .pwrite_fn = vfswrap_pwrite,
        .pwrite_send_fn = vfswrap_pwrite_send,
        .pwrite_recv_fn = vfswrap_pwrite_recv,
@@ -2931,7 +3399,6 @@ static struct vfs_fn_pointers vfs_default_fns = {
        .sendfile_fn = vfswrap_sendfile,
        .recvfile_fn = vfswrap_recvfile,
        .rename_fn = vfswrap_rename,
-       .fsync_fn = vfswrap_fsync,
        .fsync_send_fn = vfswrap_fsync_send,
        .fsync_recv_fn = vfswrap_fsync_recv,
        .stat_fn = vfswrap_stat,
@@ -2966,16 +3433,19 @@ static struct vfs_fn_pointers vfs_default_fns = {
        .brl_lock_windows_fn = vfswrap_brl_lock_windows,
        .brl_unlock_windows_fn = vfswrap_brl_unlock_windows,
        .brl_cancel_windows_fn = vfswrap_brl_cancel_windows,
-       .strict_lock_fn = vfswrap_strict_lock,
-       .strict_unlock_fn = vfswrap_strict_unlock,
+       .strict_lock_check_fn = vfswrap_strict_lock_check,
        .translate_name_fn = vfswrap_translate_name,
        .fsctl_fn = vfswrap_fsctl,
        .set_dos_attributes_fn = vfswrap_set_dos_attributes,
        .fset_dos_attributes_fn = vfswrap_fset_dos_attributes,
        .get_dos_attributes_fn = vfswrap_get_dos_attributes,
+       .get_dos_attributes_send_fn = vfswrap_get_dos_attributes_send,
+       .get_dos_attributes_recv_fn = vfswrap_get_dos_attributes_recv,
        .fget_dos_attributes_fn = vfswrap_fget_dos_attributes,
-       .copy_chunk_send_fn = vfswrap_copy_chunk_send,
-       .copy_chunk_recv_fn = vfswrap_copy_chunk_recv,
+       .offload_read_send_fn = vfswrap_offload_read_send,
+       .offload_read_recv_fn = vfswrap_offload_read_recv,
+       .offload_write_send_fn = vfswrap_offload_write_send,
+       .offload_write_recv_fn = vfswrap_offload_write_recv,
        .get_compression_fn = vfswrap_get_compression,
        .set_compression_fn = vfswrap_set_compression,
 
@@ -2988,9 +3458,6 @@ static struct vfs_fn_pointers vfs_default_fns = {
 
        /* POSIX ACL operations. */
 
-       .chmod_acl_fn = vfswrap_chmod_acl,
-       .fchmod_acl_fn = vfswrap_fchmod_acl,
-
        .sys_acl_get_file_fn = vfswrap_sys_acl_get_file,
        .sys_acl_get_fd_fn = vfswrap_sys_acl_get_fd,
        .sys_acl_blob_get_file_fn = posix_sys_acl_blob_get_file,
@@ -3001,6 +3468,8 @@ static struct vfs_fn_pointers vfs_default_fns = {
 
        /* EA operations. */
        .getxattr_fn = vfswrap_getxattr,
+       .getxattrat_send_fn = vfswrap_getxattrat_send,
+       .getxattrat_recv_fn = vfswrap_getxattrat_recv,
        .fgetxattr_fn = vfswrap_fgetxattr,
        .listxattr_fn = vfswrap_listxattr,
        .flistxattr_fn = vfswrap_flistxattr,
@@ -3018,9 +3487,15 @@ static struct vfs_fn_pointers vfs_default_fns = {
        .durable_reconnect_fn = vfswrap_durable_reconnect,
 };
 
-NTSTATUS vfs_default_init(TALLOC_CTX *);
+static_decl_vfs;
 NTSTATUS vfs_default_init(TALLOC_CTX *ctx)
 {
+       /*
+        * Here we need to implement every call!
+        *
+        * As this is the end of the vfs module chain.
+        */
+       smb_vfs_assert_all_fns(&vfs_default_fns, DEFAULT_VFS_MODULE_NAME);
        return smb_register_vfs(SMB_VFS_INTERFACE_VERSION,
                                DEFAULT_VFS_MODULE_NAME, &vfs_default_fns);
 }