#include "source3/include/msdfs.h"
#include "librpc/gen_ndr/ndr_dfsblobs.h"
#include "lib/util/tevent_unix.h"
-#include "lib/asys/asys.h"
#include "lib/util/tevent_ntstatus.h"
#include "lib/util/sys_rw.h"
#include "lib/pthreadpool/pthreadpool_tevent.h"
+#include "librpc/gen_ndr/ndr_ioctl.h"
+#include "offload_token.h"
#undef DBGC_CLASS
#define DBGC_CLASS DBGC_VFS
/* Disk operations */
-static uint64_t vfswrap_disk_free(vfs_handle_struct *handle, const char *path,
- uint64_t *bsize, uint64_t *dfree,
- uint64_t *dsize)
+static uint64_t vfswrap_disk_free(vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname,
+ uint64_t *bsize,
+ uint64_t *dfree,
+ uint64_t *dsize)
{
- if (sys_fsusage(path, dfree, dsize) != 0) {
+ if (sys_fsusage(smb_fname->base_name, dfree, dsize) != 0) {
return (uint64_t)-1;
}
return *dfree / 2;
}
-static int vfswrap_get_quota(struct vfs_handle_struct *handle, const char *path,
- enum SMB_QUOTA_TYPE qtype, unid_t id,
- SMB_DISK_QUOTA *qt)
+static int vfswrap_get_quota(struct vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname,
+ enum SMB_QUOTA_TYPE qtype,
+ unid_t id,
+ SMB_DISK_QUOTA *qt)
{
#ifdef HAVE_SYS_QUOTAS
int result;
START_PROFILE(syscall_get_quota);
- result = sys_get_quota(path, qtype, id, qt);
+ result = sys_get_quota(smb_fname->base_name, qtype, id, qt);
END_PROFILE(syscall_get_quota);
return result;
#else
return -1; /* Not implemented. */
}
-static int vfswrap_statvfs(struct vfs_handle_struct *handle, const char *path, vfs_statvfs_struct *statbuf)
+static int vfswrap_statvfs(struct vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname,
+ vfs_statvfs_struct *statbuf)
{
- return sys_statvfs(path, statbuf);
+ return sys_statvfs(smb_fname->base_name, statbuf);
}
static uint32_t vfswrap_fs_capabilities(struct vfs_handle_struct *handle,
struct vfs_statvfs_struct statbuf;
int ret;
+ smb_fname_cpath = synthetic_smb_fname(talloc_tos(), conn->connectpath,
+ NULL, NULL, 0);
+ if (smb_fname_cpath == NULL) {
+ return caps;
+ }
+
ZERO_STRUCT(statbuf);
- ret = sys_statvfs(conn->connectpath, &statbuf);
+ ret = SMB_VFS_STATVFS(conn, smb_fname_cpath, &statbuf);
if (ret == 0) {
caps = statbuf.FsCapabilities;
}
/* Work out what timestamp resolution we can
* use when setting a timestamp. */
- smb_fname_cpath = synthetic_smb_fname(talloc_tos(), conn->connectpath,
- NULL, NULL, 0);
- if (smb_fname_cpath == NULL) {
- return caps;
- }
-
ret = SMB_VFS_STAT(conn, smb_fname_cpath);
if (ret == -1) {
TALLOC_FREE(smb_fname_cpath);
/* The following call can change cwd. */
status = get_referred_path(r, pathnamep,
+ handle->conn->sconn->remote_address,
+ handle->conn->sconn->local_address,
!handle->conn->sconn->using_smb2,
junction, &consumedcnt, &self_referral);
if (!NT_STATUS_IS_OK(status)) {
- vfs_ChDir(handle->conn, handle->conn->connectpath);
+ struct smb_filename connectpath_fname = {
+ .base_name = handle->conn->connectpath
+ };
+ vfs_ChDir(handle->conn, &connectpath_fname);
return status;
}
- vfs_ChDir(handle->conn, handle->conn->connectpath);
+ {
+ struct smb_filename connectpath_fname = {
+ .base_name = handle->conn->connectpath
+ };
+ vfs_ChDir(handle->conn, &connectpath_fname);
+ }
if (!self_referral) {
pathnamep[consumedcnt] = '\0';
mode_t mode)
{
int result;
- bool has_dacl = False;
const char *path = smb_fname->base_name;
char *parent = NULL;
if (lp_inherit_acls(SNUM(handle->conn))
&& parent_dirname(talloc_tos(), path, &parent, NULL)
- && (has_dacl = directory_has_default_acl(handle->conn, parent))) {
+ && directory_has_default_acl(handle->conn, parent)) {
mode = (0777 & lp_directory_mask(SNUM(handle->conn)));
}
result = mkdir(path, mode);
- if (result == 0 && !has_dacl) {
- /*
- * We need to do this as the default behavior of POSIX ACLs
- * is to set the mask to be the requested group permission
- * bits, not the group permission bits to be the requested
- * group permission bits. This is not what we want, as it will
- * mess up any inherited ACL bits that were set. JRA.
- */
- int saved_errno = errno; /* We may get ENOSYS */
- if ((SMB_VFS_CHMOD_ACL(handle->conn, smb_fname, mode) == -1) &&
- (errno == ENOSYS)) {
- errno = saved_errno;
- }
- }
-
END_PROFILE(syscall_mkdir);
return result;
}
return result;
}
-static void vfswrap_init_search_op(vfs_handle_struct *handle,
- DIR *dirp)
-{
- /* Default behavior is a NOOP */
-}
-
/* File operations */
static int vfswrap_open(vfs_handle_struct *handle,
return result;
}
-static ssize_t vfswrap_read(vfs_handle_struct *handle, files_struct *fsp, void *data, size_t n)
-{
- ssize_t result;
-
- START_PROFILE_BYTES(syscall_read, n);
- result = sys_read(fsp->fh->fd, data, n);
- END_PROFILE_BYTES(syscall_read);
- return result;
-}
-
static ssize_t vfswrap_pread(vfs_handle_struct *handle, files_struct *fsp, void *data,
size_t n, off_t offset)
{
if (result == -1 && errno == ESPIPE) {
/* Maintain the fiction that pipes can be seeked (sought?) on. */
- result = SMB_VFS_READ(fsp, data, n);
+ result = sys_read(fsp->fh->fd, data, n);
fsp->fh->pos = 0;
}
#else /* HAVE_PREAD */
- off_t curr;
- int lerrno;
-
- curr = SMB_VFS_LSEEK(fsp, 0, SEEK_CUR);
- if (curr == -1 && errno == ESPIPE) {
- /* Maintain the fiction that pipes can be seeked (sought?) on. */
- result = SMB_VFS_READ(fsp, data, n);
- fsp->fh->pos = 0;
- return result;
- }
-
- if (SMB_VFS_LSEEK(fsp, offset, SEEK_SET) == -1) {
- return -1;
- }
-
- errno = 0;
- result = SMB_VFS_READ(fsp, data, n);
- lerrno = errno;
-
- SMB_VFS_LSEEK(fsp, curr, SEEK_SET);
- errno = lerrno;
-
+ errno = ENOSYS;
+ result = -1;
#endif /* HAVE_PREAD */
return result;
}
-static ssize_t vfswrap_write(vfs_handle_struct *handle, files_struct *fsp, const void *data, size_t n)
-{
- ssize_t result;
-
- START_PROFILE_BYTES(syscall_write, n);
- result = sys_write(fsp->fh->fd, data, n);
- END_PROFILE_BYTES(syscall_write);
- return result;
-}
-
static ssize_t vfswrap_pwrite(vfs_handle_struct *handle, files_struct *fsp, const void *data,
size_t n, off_t offset)
{
if (result == -1 && errno == ESPIPE) {
/* Maintain the fiction that pipes can be sought on. */
- result = SMB_VFS_WRITE(fsp, data, n);
+ result = sys_write(fsp->fh->fd, data, n);
}
#else /* HAVE_PWRITE */
- off_t curr;
- int lerrno;
-
- curr = SMB_VFS_LSEEK(fsp, 0, SEEK_CUR);
- if (curr == -1) {
- return -1;
- }
-
- if (SMB_VFS_LSEEK(fsp, offset, SEEK_SET) == -1) {
- return -1;
- }
-
- result = SMB_VFS_WRITE(fsp, data, n);
- lerrno = errno;
-
- SMB_VFS_LSEEK(fsp, curr, SEEK_SET);
- errno = lerrno;
-
+ errno = ENOSYS;
+ result = -1;
#endif /* HAVE_PWRITE */
return result;
}
-static void vfswrap_asys_finished(struct tevent_context *ev,
- struct tevent_fd *fde,
- uint16_t flags, void *p);
-
-static bool vfswrap_init_asys_ctx(struct smbd_server_connection *conn)
-{
- struct asys_context *ctx;
- struct tevent_fd *fde;
- int ret;
- int fd;
-
- if (conn->asys_ctx != NULL) {
- return true;
- }
-
- ret = asys_context_init(&ctx, lp_aio_max_threads());
- if (ret != 0) {
- DEBUG(1, ("asys_context_init failed: %s\n", strerror(ret)));
- return false;
- }
-
- fd = asys_signalfd(ctx);
-
- ret = set_blocking(fd, false);
- if (ret != 0) {
- DBG_WARNING("set_blocking failed: %s\n", strerror(errno));
- goto fail;
- }
-
- fde = tevent_add_fd(conn->ev_ctx, conn, fd, TEVENT_FD_READ,
- vfswrap_asys_finished, ctx);
- if (fde == NULL) {
- DEBUG(1, ("tevent_add_fd failed\n"));
- goto fail;
- }
-
- conn->asys_ctx = ctx;
- conn->asys_fde = fde;
- return true;
-
-fail:
- asys_context_destroy(ctx);
- return false;
-}
-
-static int vfswrap_init_pool(struct smbd_server_connection *conn)
-{
- int ret;
-
- if (conn->pool != NULL) {
- return 0;
- }
-
- ret = pthreadpool_tevent_init(conn, lp_aio_max_threads(),
- &conn->pool);
- return ret;
-}
-
-
-struct vfswrap_asys_state {
- struct asys_context *asys_ctx;
- struct tevent_req *req;
- ssize_t ret;
- struct vfs_aio_state vfs_aio_state;
- SMBPROFILE_BASIC_ASYNC_STATE(profile_basic);
- SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes);
-};
-
-static int vfswrap_asys_state_destructor(struct vfswrap_asys_state *s)
-{
- asys_cancel(s->asys_ctx, s->req);
- return 0;
-}
-
struct vfswrap_pread_state {
ssize_t ret;
- int err;
int fd;
void *buf;
size_t count;
static void vfs_pread_do(void *private_data);
static void vfs_pread_done(struct tevent_req *subreq);
+static int vfs_pread_state_destructor(struct vfswrap_pread_state *state);
static struct tevent_req *vfswrap_pread_send(struct vfs_handle_struct *handle,
TALLOC_CTX *mem_ctx,
{
struct tevent_req *req, *subreq;
struct vfswrap_pread_state *state;
- int ret;
req = tevent_req_create(mem_ctx, &state, struct vfswrap_pread_state);
if (req == NULL) {
return NULL;
}
- ret = vfswrap_init_pool(handle->conn->sconn);
- if (tevent_req_error(req, ret)) {
- return tevent_req_post(req, ev);
- }
-
state->ret = -1;
state->fd = fsp->fh->fd;
state->buf = data;
}
tevent_req_set_callback(subreq, vfs_pread_done, req);
+ talloc_set_destructor(state, vfs_pread_state_destructor);
+
return req;
}
state->offset);
} while ((state->ret == -1) && (errno == EINTR));
- state->err = errno;
+ if (state->ret == -1) {
+ state->vfs_aio_state.error = errno;
+ }
PROFILE_TIMESTAMP(&end_time);
SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
}
+static int vfs_pread_state_destructor(struct vfswrap_pread_state *state)
+{
+ return -1;
+}
+
static void vfs_pread_done(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
-#ifdef WITH_PROFILE
struct vfswrap_pread_state *state = tevent_req_data(
req, struct vfswrap_pread_state);
-#endif
int ret;
ret = pthreadpool_tevent_job_recv(subreq);
TALLOC_FREE(subreq);
SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
- if (tevent_req_error(req, ret)) {
- return;
+ talloc_set_destructor(state, NULL);
+ if (ret != 0) {
+ if (ret != EAGAIN) {
+ tevent_req_error(req, ret);
+ return;
+ }
+ /*
+ * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+ * means the lower level pthreadpool failed to create a new
+ * thread. Fallback to sync processing in that case to allow
+ * some progress for the client.
+ */
+ vfs_pread_do(state);
}
tevent_req_done(req);
return state->ret;
}
+struct vfswrap_pwrite_state {
+ ssize_t ret;
+ int fd;
+ const void *buf;
+ size_t count;
+ off_t offset;
+
+ struct vfs_aio_state vfs_aio_state;
+ SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes);
+};
+
+static void vfs_pwrite_do(void *private_data);
+static void vfs_pwrite_done(struct tevent_req *subreq);
+static int vfs_pwrite_state_destructor(struct vfswrap_pwrite_state *state);
+
static struct tevent_req *vfswrap_pwrite_send(struct vfs_handle_struct *handle,
TALLOC_CTX *mem_ctx,
struct tevent_context *ev,
const void *data,
size_t n, off_t offset)
{
- struct tevent_req *req;
- struct vfswrap_asys_state *state;
- int ret;
+ struct tevent_req *req, *subreq;
+ struct vfswrap_pwrite_state *state;
- req = tevent_req_create(mem_ctx, &state, struct vfswrap_asys_state);
+ req = tevent_req_create(mem_ctx, &state, struct vfswrap_pwrite_state);
if (req == NULL) {
return NULL;
}
- if (!vfswrap_init_asys_ctx(handle->conn->sconn)) {
- tevent_req_oom(req);
- return tevent_req_post(req, ev);
- }
- state->asys_ctx = handle->conn->sconn->asys_ctx;
- state->req = req;
+
+ state->ret = -1;
+ state->fd = fsp->fh->fd;
+ state->buf = data;
+ state->count = n;
+ state->offset = offset;
SMBPROFILE_BYTES_ASYNC_START(syscall_asys_pwrite, profile_p,
state->profile_bytes, n);
- ret = asys_pwrite(state->asys_ctx, fsp->fh->fd, data, n, offset, req);
- if (ret != 0) {
- tevent_req_error(req, ret);
+ SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+
+ subreq = pthreadpool_tevent_job_send(
+ state, ev, handle->conn->sconn->pool,
+ vfs_pwrite_do, state);
+ if (tevent_req_nomem(subreq, req)) {
return tevent_req_post(req, ev);
}
- talloc_set_destructor(state, vfswrap_asys_state_destructor);
+ tevent_req_set_callback(subreq, vfs_pwrite_done, req);
+
+ talloc_set_destructor(state, vfs_pwrite_state_destructor);
return req;
}
+static void vfs_pwrite_do(void *private_data)
+{
+ struct vfswrap_pwrite_state *state = talloc_get_type_abort(
+ private_data, struct vfswrap_pwrite_state);
+ struct timespec start_time;
+ struct timespec end_time;
+
+ SMBPROFILE_BYTES_ASYNC_SET_BUSY(state->profile_bytes);
+
+ PROFILE_TIMESTAMP(&start_time);
+
+ do {
+ state->ret = pwrite(state->fd, state->buf, state->count,
+ state->offset);
+ } while ((state->ret == -1) && (errno == EINTR));
+
+ if (state->ret == -1) {
+ state->vfs_aio_state.error = errno;
+ }
+
+ PROFILE_TIMESTAMP(&end_time);
+
+ state->vfs_aio_state.duration = nsec_time_diff(&end_time, &start_time);
+
+ SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+}
+
+static int vfs_pwrite_state_destructor(struct vfswrap_pwrite_state *state)
+{
+ return -1;
+}
+
+static void vfs_pwrite_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct vfswrap_pwrite_state *state = tevent_req_data(
+ req, struct vfswrap_pwrite_state);
+ int ret;
+
+ ret = pthreadpool_tevent_job_recv(subreq);
+ TALLOC_FREE(subreq);
+ SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
+ talloc_set_destructor(state, NULL);
+ if (ret != 0) {
+ if (ret != EAGAIN) {
+ tevent_req_error(req, ret);
+ return;
+ }
+ /*
+ * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+ * means the lower level pthreadpool failed to create a new
+ * thread. Fallback to sync processing in that case to allow
+ * some progress for the client.
+ */
+ vfs_pwrite_do(state);
+ }
+
+ tevent_req_done(req);
+}
+
+static ssize_t vfswrap_pwrite_recv(struct tevent_req *req,
+ struct vfs_aio_state *vfs_aio_state)
+{
+ struct vfswrap_pwrite_state *state = tevent_req_data(
+ req, struct vfswrap_pwrite_state);
+
+ if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
+ return -1;
+ }
+
+ *vfs_aio_state = state->vfs_aio_state;
+ return state->ret;
+}
+
+struct vfswrap_fsync_state {
+ ssize_t ret;
+ int fd;
+
+ struct vfs_aio_state vfs_aio_state;
+ SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes);
+};
+
+static void vfs_fsync_do(void *private_data);
+static void vfs_fsync_done(struct tevent_req *subreq);
+static int vfs_fsync_state_destructor(struct vfswrap_fsync_state *state);
+
static struct tevent_req *vfswrap_fsync_send(struct vfs_handle_struct *handle,
TALLOC_CTX *mem_ctx,
struct tevent_context *ev,
struct files_struct *fsp)
{
- struct tevent_req *req;
- struct vfswrap_asys_state *state;
- int ret;
+ struct tevent_req *req, *subreq;
+ struct vfswrap_fsync_state *state;
- req = tevent_req_create(mem_ctx, &state, struct vfswrap_asys_state);
+ req = tevent_req_create(mem_ctx, &state, struct vfswrap_fsync_state);
if (req == NULL) {
return NULL;
}
- if (!vfswrap_init_asys_ctx(handle->conn->sconn)) {
- tevent_req_oom(req);
- return tevent_req_post(req, ev);
- }
- state->asys_ctx = handle->conn->sconn->asys_ctx;
- state->req = req;
- SMBPROFILE_BASIC_ASYNC_START(syscall_asys_fsync, profile_p,
- state->profile_basic);
- ret = asys_fsync(state->asys_ctx, fsp->fh->fd, req);
- if (ret != 0) {
- tevent_req_error(req, ret);
+ state->ret = -1;
+ state->fd = fsp->fh->fd;
+
+ SMBPROFILE_BYTES_ASYNC_START(syscall_asys_fsync, profile_p,
+ state->profile_bytes, 0);
+ SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+
+ subreq = pthreadpool_tevent_job_send(
+ state, ev, handle->conn->sconn->pool, vfs_fsync_do, state);
+ if (tevent_req_nomem(subreq, req)) {
return tevent_req_post(req, ev);
}
- talloc_set_destructor(state, vfswrap_asys_state_destructor);
+ tevent_req_set_callback(subreq, vfs_fsync_done, req);
+
+ talloc_set_destructor(state, vfs_fsync_state_destructor);
return req;
}
-static void vfswrap_asys_finished(struct tevent_context *ev,
- struct tevent_fd *fde,
- uint16_t flags, void *p)
+static void vfs_fsync_do(void *private_data)
{
- struct asys_context *asys_ctx = (struct asys_context *)p;
- struct asys_result results[get_outstanding_aio_calls()];
- int i, ret;
+ struct vfswrap_fsync_state *state = talloc_get_type_abort(
+ private_data, struct vfswrap_fsync_state);
+ struct timespec start_time;
+ struct timespec end_time;
- if ((flags & TEVENT_FD_READ) == 0) {
- return;
- }
+ SMBPROFILE_BYTES_ASYNC_SET_BUSY(state->profile_bytes);
- ret = asys_results(asys_ctx, results, get_outstanding_aio_calls());
- if (ret < 0) {
- DEBUG(1, ("asys_results returned %s\n", strerror(-ret)));
- return;
- }
+ PROFILE_TIMESTAMP(&start_time);
- for (i=0; i<ret; i++) {
- struct asys_result *result = &results[i];
- struct tevent_req *req;
- struct vfswrap_asys_state *state;
+ do {
+ state->ret = fsync(state->fd);
+ } while ((state->ret == -1) && (errno == EINTR));
- if ((result->ret == -1) && (result->err == ECANCELED)) {
- continue;
- }
+ if (state->ret == -1) {
+ state->vfs_aio_state.error = errno;
+ }
- req = talloc_get_type_abort(result->private_data,
- struct tevent_req);
- state = tevent_req_data(req, struct vfswrap_asys_state);
+ PROFILE_TIMESTAMP(&end_time);
- talloc_set_destructor(state, NULL);
+ state->vfs_aio_state.duration = nsec_time_diff(&end_time, &start_time);
- SMBPROFILE_BASIC_ASYNC_END(state->profile_basic);
- SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
- state->ret = result->ret;
- state->vfs_aio_state.error = result->err;
- state->vfs_aio_state.duration = result->duration;
- tevent_req_defer_callback(req, ev);
- tevent_req_done(req);
- }
+ SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
}
-static ssize_t vfswrap_asys_ssize_t_recv(struct tevent_req *req,
- struct vfs_aio_state *vfs_aio_state)
+static int vfs_fsync_state_destructor(struct vfswrap_fsync_state *state)
{
- struct vfswrap_asys_state *state = tevent_req_data(
- req, struct vfswrap_asys_state);
+ return -1;
+}
- if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
- return -1;
+static void vfs_fsync_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct vfswrap_fsync_state *state = tevent_req_data(
+ req, struct vfswrap_fsync_state);
+ int ret;
+
+ ret = pthreadpool_tevent_job_recv(subreq);
+ TALLOC_FREE(subreq);
+ SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
+ talloc_set_destructor(state, NULL);
+ if (ret != 0) {
+ if (ret != EAGAIN) {
+ tevent_req_error(req, ret);
+ return;
+ }
+ /*
+ * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+ * means the lower level pthreadpool failed to create a new
+ * thread. Fallback to sync processing in that case to allow
+ * some progress for the client.
+ */
+ vfs_fsync_do(state);
}
- *vfs_aio_state = state->vfs_aio_state;
- return state->ret;
+
+ tevent_req_done(req);
}
-static int vfswrap_asys_int_recv(struct tevent_req *req,
- struct vfs_aio_state *vfs_aio_state)
+static int vfswrap_fsync_recv(struct tevent_req *req,
+ struct vfs_aio_state *vfs_aio_state)
{
- struct vfswrap_asys_state *state = tevent_req_data(
- req, struct vfswrap_asys_state);
+ struct vfswrap_fsync_state *state = tevent_req_data(
+ req, struct vfswrap_fsync_state);
if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
return -1;
}
+
*vfs_aio_state = state->vfs_aio_state;
return state->ret;
}
START_PROFILE(syscall_lseek);
- /* Cope with 'stat' file opens. */
- if (fsp->fh->fd != -1)
- result = lseek(fsp->fh->fd, offset, whence);
-
+ result = lseek(fsp->fh->fd, offset, whence);
/*
* We want to maintain the fiction that we can seek
* on a fifo for file system purposes. This allows
return result;
}
-static int vfswrap_fsync(vfs_handle_struct *handle, files_struct *fsp)
-{
-#ifdef HAVE_FSYNC
- int result;
-
- START_PROFILE(syscall_fsync);
- result = fsync(fsp->fh->fd);
- END_PROFILE(syscall_fsync);
- return result;
-#else
- return 0;
-#endif
-}
-
static int vfswrap_stat(vfs_handle_struct *handle,
struct smb_filename *smb_fname)
{
*
* but I have to check that --metze
*/
+ ssize_t ret;
struct dom_sid sid;
+ struct dom_sid_buf buf;
uid_t uid;
size_t sid_len;
/* unknown 4 bytes: this is not the length of the sid :-( */
/*unknown = IVAL(pdata,0);*/
- if (!sid_parse(_in_data + 4, sid_len, &sid)) {
+ ret = sid_parse(_in_data + 4, sid_len, &sid);
+ if (ret == -1) {
return NT_STATUS_INVALID_PARAMETER;
}
- DEBUGADD(10, ("for SID: %s\n", sid_string_dbg(&sid)));
+ DEBUGADD(10, ("for SID: %s\n",
+ dom_sid_str_buf(&sid, &buf)));
if (!sid_to_uid(&sid, &uid)) {
DEBUG(0,("sid_to_uid: failed, sid[%s] sid_len[%lu]\n",
- sid_string_dbg(&sid),
+ dom_sid_str_buf(&sid, &buf),
(unsigned long)sid_len));
uid = (-1);
}
return NT_STATUS_NOT_SUPPORTED;
}
+static bool vfswrap_is_offline(struct vfs_handle_struct *handle,
+ const struct smb_filename *fname,
+ SMB_STRUCT_STAT *sbuf);
+
static NTSTATUS vfswrap_get_dos_attributes(struct vfs_handle_struct *handle,
struct smb_filename *smb_fname,
uint32_t *dosmode)
{
+ bool offline;
+
+ offline = vfswrap_is_offline(handle, smb_fname, &smb_fname->st);
+ if (offline) {
+ *dosmode |= FILE_ATTRIBUTE_OFFLINE;
+ }
+
return get_ea_dos_attribute(handle->conn, smb_fname, dosmode);
}
+struct vfswrap_get_dos_attributes_state {
+ struct vfs_aio_state aio_state;
+ connection_struct *conn;
+ TALLOC_CTX *mem_ctx;
+ struct tevent_context *ev;
+ files_struct *dir_fsp;
+ struct smb_filename *smb_fname;
+ uint32_t dosmode;
+ bool as_root;
+};
+
+static void vfswrap_get_dos_attributes_getxattr_done(struct tevent_req *subreq);
+
+static struct tevent_req *vfswrap_get_dos_attributes_send(
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct vfs_handle_struct *handle,
+ files_struct *dir_fsp,
+ struct smb_filename *smb_fname)
+{
+ struct tevent_req *req = NULL;
+ struct tevent_req *subreq = NULL;
+ struct vfswrap_get_dos_attributes_state *state = NULL;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct vfswrap_get_dos_attributes_state);
+ if (req == NULL) {
+ return NULL;
+ }
+
+ *state = (struct vfswrap_get_dos_attributes_state) {
+ .conn = dir_fsp->conn,
+ .mem_ctx = mem_ctx,
+ .ev = ev,
+ .dir_fsp = dir_fsp,
+ .smb_fname = smb_fname,
+ };
+
+ subreq = SMB_VFS_GETXATTRAT_SEND(state,
+ ev,
+ dir_fsp,
+ smb_fname,
+ SAMBA_XATTR_DOS_ATTRIB,
+ sizeof(fstring));
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq,
+ vfswrap_get_dos_attributes_getxattr_done,
+ req);
+
+ return req;
+}
+
+static void vfswrap_get_dos_attributes_getxattr_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req =
+ tevent_req_callback_data(subreq,
+ struct tevent_req);
+ struct vfswrap_get_dos_attributes_state *state =
+ tevent_req_data(req,
+ struct vfswrap_get_dos_attributes_state);
+ ssize_t xattr_size;
+ DATA_BLOB blob = {0};
+ NTSTATUS status;
+
+ xattr_size = SMB_VFS_GETXATTRAT_RECV(subreq,
+ &state->aio_state,
+ state,
+ &blob.data);
+ TALLOC_FREE(subreq);
+ if (xattr_size == -1) {
+ status = map_nt_error_from_unix(state->aio_state.error);
+
+ if (state->as_root) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+ if (!NT_STATUS_EQUAL(status, NT_STATUS_ACCESS_DENIED)) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ state->as_root = true;
+
+ become_root();
+ subreq = SMB_VFS_GETXATTRAT_SEND(state,
+ state->ev,
+ state->dir_fsp,
+ state->smb_fname,
+ SAMBA_XATTR_DOS_ATTRIB,
+ sizeof(fstring));
+ unbecome_root();
+ if (tevent_req_nomem(subreq, req)) {
+ return;
+ }
+ tevent_req_set_callback(subreq,
+ vfswrap_get_dos_attributes_getxattr_done,
+ req);
+ return;
+ }
+
+ blob.length = xattr_size;
+
+ status = parse_dos_attribute_blob(state->smb_fname,
+ blob,
+ &state->dosmode);
+ if (!NT_STATUS_IS_OK(status)) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ tevent_req_done(req);
+ return;
+}
+
+static NTSTATUS vfswrap_get_dos_attributes_recv(struct tevent_req *req,
+ struct vfs_aio_state *aio_state,
+ uint32_t *dosmode)
+{
+ struct vfswrap_get_dos_attributes_state *state =
+ tevent_req_data(req,
+ struct vfswrap_get_dos_attributes_state);
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ tevent_req_received(req);
+ return status;
+ }
+
+ *aio_state = state->aio_state;
+ *dosmode = state->dosmode;
+ tevent_req_received(req);
+ return NT_STATUS_OK;
+}
+
static NTSTATUS vfswrap_fget_dos_attributes(struct vfs_handle_struct *handle,
struct files_struct *fsp,
uint32_t *dosmode)
{
+ bool offline;
+
+ offline = vfswrap_is_offline(handle, fsp->fsp_name, &fsp->fsp_name->st);
+ if (offline) {
+ *dosmode |= FILE_ATTRIBUTE_OFFLINE;
+ }
+
return get_ea_dos_attribute(handle->conn, fsp->fsp_name, dosmode);
}
return set_ea_dos_attribute(handle->conn, fsp->fsp_name, dosmode);
}
-struct vfs_cc_state {
- off_t copied;
+static struct vfs_offload_ctx *vfswrap_offload_ctx;
+
+struct vfswrap_offload_read_state {
+ DATA_BLOB token;
+};
+
+static struct tevent_req *vfswrap_offload_read_send(
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct vfs_handle_struct *handle,
+ struct files_struct *fsp,
+ uint32_t fsctl,
+ uint32_t ttl,
+ off_t offset,
+ size_t to_copy)
+{
+ struct tevent_req *req = NULL;
+ struct vfswrap_offload_read_state *state = NULL;
+ NTSTATUS status;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct vfswrap_offload_read_state);
+ if (req == NULL) {
+ return NULL;
+ }
+
+ status = vfs_offload_token_ctx_init(fsp->conn->sconn->client,
+ &vfswrap_offload_ctx);
+ if (tevent_req_nterror(req, status)) {
+ return tevent_req_post(req, ev);
+ }
+
+ if (fsctl != FSCTL_SRV_REQUEST_RESUME_KEY) {
+ tevent_req_nterror(req, NT_STATUS_INVALID_DEVICE_REQUEST);
+ return tevent_req_post(req, ev);
+ }
+
+ status = vfs_offload_token_create_blob(state, fsp, fsctl,
+ &state->token);
+ if (tevent_req_nterror(req, status)) {
+ return tevent_req_post(req, ev);
+ }
+
+ status = vfs_offload_token_db_store_fsp(vfswrap_offload_ctx, fsp,
+ &state->token);
+ if (tevent_req_nterror(req, status)) {
+ return tevent_req_post(req, ev);
+ }
+
+ tevent_req_done(req);
+ return tevent_req_post(req, ev);
+}
+
+static NTSTATUS vfswrap_offload_read_recv(struct tevent_req *req,
+ struct vfs_handle_struct *handle,
+ TALLOC_CTX *mem_ctx,
+ DATA_BLOB *token)
+{
+ struct vfswrap_offload_read_state *state = tevent_req_data(
+ req, struct vfswrap_offload_read_state);
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ tevent_req_received(req);
+ return status;
+ }
+
+ token->length = state->token.length;
+ token->data = talloc_move(mem_ctx, &state->token.data);
+
+ tevent_req_received(req);
+ return NT_STATUS_OK;
+}
+
+struct vfswrap_offload_write_state {
uint8_t *buf;
+ bool read_lck_locked;
+ bool write_lck_locked;
+ DATA_BLOB *token;
+ struct tevent_context *src_ev;
+ struct files_struct *src_fsp;
+ off_t src_off;
+ struct tevent_context *dst_ev;
+ struct files_struct *dst_fsp;
+ off_t dst_off;
+ off_t to_copy;
+ off_t remaining;
+ size_t next_io_size;
};
-static struct tevent_req *vfswrap_copy_chunk_send(struct vfs_handle_struct *handle,
- TALLOC_CTX *mem_ctx,
- struct tevent_context *ev,
- struct files_struct *src_fsp,
- off_t src_off,
- struct files_struct *dest_fsp,
- off_t dest_off,
- off_t num)
+static void vfswrap_offload_write_cleanup(struct tevent_req *req,
+ enum tevent_req_state req_state)
+{
+ struct vfswrap_offload_write_state *state = tevent_req_data(
+ req, struct vfswrap_offload_write_state);
+ bool ok;
+
+ if (state->dst_fsp == NULL) {
+ return;
+ }
+
+ ok = change_to_user_by_fsp(state->dst_fsp);
+ SMB_ASSERT(ok);
+ state->dst_fsp = NULL;
+}
+
+static NTSTATUS vfswrap_offload_write_loop(struct tevent_req *req);
+
+static struct tevent_req *vfswrap_offload_write_send(
+ struct vfs_handle_struct *handle,
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ uint32_t fsctl,
+ DATA_BLOB *token,
+ off_t transfer_offset,
+ struct files_struct *dest_fsp,
+ off_t dest_off,
+ off_t to_copy)
{
struct tevent_req *req;
- struct vfs_cc_state *vfs_cc_state;
+ struct vfswrap_offload_write_state *state = NULL;
+ /* off_t is signed! */
+ off_t max_offset = INT64_MAX - to_copy;
+ size_t num = MIN(to_copy, COPYCHUNK_MAX_TOTAL_LEN);
+ files_struct *src_fsp = NULL;
NTSTATUS status;
+ bool ok;
- DEBUG(10, ("performing server side copy chunk of length %lu\n",
- (unsigned long)num));
-
- req = tevent_req_create(mem_ctx, &vfs_cc_state, struct vfs_cc_state);
+ req = tevent_req_create(mem_ctx, &state,
+ struct vfswrap_offload_write_state);
if (req == NULL) {
return NULL;
}
- vfs_cc_state->buf = talloc_array(vfs_cc_state, uint8_t,
- MIN(num, 8*1024*1024));
- if (tevent_req_nomem(vfs_cc_state->buf, req)) {
+ *state = (struct vfswrap_offload_write_state) {
+ .token = token,
+ .src_off = transfer_offset,
+ .dst_ev = ev,
+ .dst_fsp = dest_fsp,
+ .dst_off = dest_off,
+ .to_copy = to_copy,
+ .remaining = to_copy,
+ };
+
+ tevent_req_set_cleanup_fn(req, vfswrap_offload_write_cleanup);
+
+ switch (fsctl) {
+ case FSCTL_SRV_COPYCHUNK:
+ case FSCTL_SRV_COPYCHUNK_WRITE:
+ break;
+
+ case FSCTL_OFFLOAD_WRITE:
+ tevent_req_nterror(req, NT_STATUS_NOT_IMPLEMENTED);
+ return tevent_req_post(req, ev);
+
+ case FSCTL_DUP_EXTENTS_TO_FILE:
+ DBG_DEBUG("COW clones not supported by vfs_default\n");
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+
+ default:
+ tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+ return tevent_req_post(req, ev);
+ }
+
+ /*
+ * From here on we assume a copy-chunk fsctl
+ */
+
+ if (to_copy == 0) {
+ tevent_req_done(req);
+ return tevent_req_post(req, ev);
+ }
+
+ if (state->src_off > max_offset) {
+ /*
+ * Protect integer checks below.
+ */
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+ if (state->src_off < 0) {
+ /*
+ * Protect integer checks below.
+ */
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+ if (state->dst_off > max_offset) {
+ /*
+ * Protect integer checks below.
+ */
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+ if (state->dst_off < 0) {
+ /*
+ * Protect integer checks below.
+ */
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+
+ status = vfs_offload_token_db_fetch_fsp(vfswrap_offload_ctx,
+ token, &src_fsp);
+ if (tevent_req_nterror(req, status)) {
+ return tevent_req_post(req, ev);
+ }
+
+ DBG_DEBUG("server side copy chunk of length %" PRIu64 "\n", to_copy);
+
+ status = vfs_offload_token_check_handles(fsctl, src_fsp, dest_fsp);
+ if (!NT_STATUS_IS_OK(status)) {
+ tevent_req_nterror(req, status);
+ return tevent_req_post(req, ev);
+ }
+
+ ok = change_to_user_by_fsp(src_fsp);
+ if (!ok) {
+ tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
return tevent_req_post(req, ev);
}
+ state->src_ev = src_fsp->conn->sconn->ev_ctx;
+ state->src_fsp = src_fsp;
+
status = vfs_stat_fsp(src_fsp);
if (tevent_req_nterror(req, status)) {
return tevent_req_post(req, ev);
}
- if (src_fsp->fsp_name->st.st_ex_size < src_off + num) {
+ if (src_fsp->fsp_name->st.st_ex_size < state->src_off + to_copy) {
/*
* [MS-SMB2] 3.3.5.15.6 Handling a Server-Side Data Copy Request
* If the SourceOffset or SourceOffset + Length extends beyond
return tevent_req_post(req, ev);
}
- /* could use 2.6.33+ sendfile here to do this in kernel */
- while (vfs_cc_state->copied < num) {
- ssize_t ret;
- struct lock_struct lck;
- int saved_errno;
+ state->buf = talloc_array(state, uint8_t, num);
+ if (tevent_req_nomem(state->buf, req)) {
+ return tevent_req_post(req, ev);
+ }
- off_t this_num = MIN(talloc_array_length(vfs_cc_state->buf),
- num - vfs_cc_state->copied);
+ status = vfswrap_offload_write_loop(req);
+ if (!NT_STATUS_IS_OK(status)) {
+ tevent_req_nterror(req, status);
+ return tevent_req_post(req, ev);
+ }
- if (src_fsp->op == NULL) {
- tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
- return tevent_req_post(req, ev);
- }
- init_strict_lock_struct(src_fsp,
- src_fsp->op->global->open_persistent_id,
- src_off,
- this_num,
- READ_LOCK,
- &lck);
-
- if (!SMB_VFS_STRICT_LOCK(src_fsp->conn, src_fsp, &lck)) {
- tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
- return tevent_req_post(req, ev);
- }
+ return req;
+}
- ret = SMB_VFS_PREAD(src_fsp, vfs_cc_state->buf,
- this_num, src_off);
- if (ret == -1) {
- saved_errno = errno;
- }
+static void vfswrap_offload_write_read_done(struct tevent_req *subreq);
- SMB_VFS_STRICT_UNLOCK(src_fsp->conn, src_fsp, &lck);
+static NTSTATUS vfswrap_offload_write_loop(struct tevent_req *req)
+{
+ struct vfswrap_offload_write_state *state = tevent_req_data(
+ req, struct vfswrap_offload_write_state);
+ struct tevent_req *subreq = NULL;
+ struct lock_struct read_lck;
+ bool ok;
- if (ret == -1) {
- errno = saved_errno;
- tevent_req_nterror(req, map_nt_error_from_unix(errno));
- return tevent_req_post(req, ev);
- }
- if (ret != this_num) {
- /* zero tolerance for short reads */
- tevent_req_nterror(req, NT_STATUS_IO_DEVICE_ERROR);
- return tevent_req_post(req, ev);
- }
+ /*
+ * This is called under the context of state->src_fsp.
+ */
+
+ state->next_io_size = MIN(state->remaining, talloc_array_length(state->buf));
+
+ init_strict_lock_struct(state->src_fsp,
+ state->src_fsp->op->global->open_persistent_id,
+ state->src_off,
+ state->next_io_size,
+ READ_LOCK,
+ &read_lck);
+
+ ok = SMB_VFS_STRICT_LOCK_CHECK(state->src_fsp->conn,
+ state->src_fsp,
+ &read_lck);
+ if (!ok) {
+ return NT_STATUS_FILE_LOCK_CONFLICT;
+ }
+
+ subreq = SMB_VFS_PREAD_SEND(state,
+ state->src_ev,
+ state->src_fsp,
+ state->buf,
+ state->next_io_size,
+ state->src_off);
+ if (subreq == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+ tevent_req_set_callback(subreq, vfswrap_offload_write_read_done, req);
+
+ return NT_STATUS_OK;
+}
+
+static void vfswrap_offload_write_write_done(struct tevent_req *subreq);
+
+static void vfswrap_offload_write_read_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct vfswrap_offload_write_state *state = tevent_req_data(
+ req, struct vfswrap_offload_write_state);
+ struct vfs_aio_state aio_state;
+ struct lock_struct write_lck;
+ ssize_t nread;
+ bool ok;
+
+ nread = SMB_VFS_PREAD_RECV(subreq, &aio_state);
+ TALLOC_FREE(subreq);
+ if (nread == -1) {
+ DBG_ERR("read failed: %s\n", strerror(aio_state.error));
+ tevent_req_nterror(req, map_nt_error_from_unix(aio_state.error));
+ return;
+ }
+ if (nread != state->next_io_size) {
+ DBG_ERR("Short read, only %zd of %zu\n",
+ nread, state->next_io_size);
+ tevent_req_nterror(req, NT_STATUS_IO_DEVICE_ERROR);
+ return;
+ }
+
+ state->src_off += nread;
+
+ ok = change_to_user_by_fsp(state->dst_fsp);
+ if (!ok) {
+ tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+ return;
+ }
- src_off += ret;
+ init_strict_lock_struct(state->dst_fsp,
+ state->dst_fsp->op->global->open_persistent_id,
+ state->dst_off,
+ state->next_io_size,
+ WRITE_LOCK,
+ &write_lck);
- if (dest_fsp->op == NULL) {
- tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
- return tevent_req_post(req, ev);
- }
+ ok = SMB_VFS_STRICT_LOCK_CHECK(state->dst_fsp->conn,
+ state->dst_fsp,
+ &write_lck);
+ if (!ok) {
+ tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
+ return;
+ }
- init_strict_lock_struct(dest_fsp,
- dest_fsp->op->global->open_persistent_id,
- dest_off,
- this_num,
- WRITE_LOCK,
- &lck);
+ subreq = SMB_VFS_PWRITE_SEND(state,
+ state->dst_ev,
+ state->dst_fsp,
+ state->buf,
+ state->next_io_size,
+ state->dst_off);
+ if (subreq == NULL) {
+ tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
+ return;
+ }
+ tevent_req_set_callback(subreq, vfswrap_offload_write_write_done, req);
+}
- if (!SMB_VFS_STRICT_LOCK(dest_fsp->conn, dest_fsp, &lck)) {
- tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
- return tevent_req_post(req, ev);
- }
+static void vfswrap_offload_write_write_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct vfswrap_offload_write_state *state = tevent_req_data(
+ req, struct vfswrap_offload_write_state);
+ struct vfs_aio_state aio_state;
+ ssize_t nwritten;
+ NTSTATUS status;
+ bool ok;
- ret = SMB_VFS_PWRITE(dest_fsp, vfs_cc_state->buf,
- this_num, dest_off);
- if (ret == -1) {
- saved_errno = errno;
- }
+ nwritten = SMB_VFS_PWRITE_RECV(subreq, &aio_state);
+ TALLOC_FREE(subreq);
+ if (nwritten == -1) {
+ DBG_ERR("write failed: %s\n", strerror(aio_state.error));
+ tevent_req_nterror(req, map_nt_error_from_unix(aio_state.error));
+ return;
+ }
+ if (nwritten != state->next_io_size) {
+ DBG_ERR("Short write, only %zd of %zu\n", nwritten, state->next_io_size);
+ tevent_req_nterror(req, NT_STATUS_IO_DEVICE_ERROR);
+ return;
+ }
- SMB_VFS_STRICT_UNLOCK(src_fsp->conn, src_fsp, &lck);
+ state->dst_off += nwritten;
- if (ret == -1) {
- errno = saved_errno;
- tevent_req_nterror(req, map_nt_error_from_unix(errno));
- return tevent_req_post(req, ev);
- }
- if (ret != this_num) {
- /* zero tolerance for short writes */
- tevent_req_nterror(req, NT_STATUS_IO_DEVICE_ERROR);
- return tevent_req_post(req, ev);
- }
- dest_off += ret;
+ if (state->remaining < nwritten) {
+ /* Paranoia check */
+ tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+ return;
+ }
+ state->remaining -= nwritten;
+ if (state->remaining == 0) {
+ tevent_req_done(req);
+ return;
+ }
- vfs_cc_state->copied += this_num;
+ ok = change_to_user_by_fsp(state->src_fsp);
+ if (!ok) {
+ tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+ return;
}
- tevent_req_done(req);
- return tevent_req_post(req, ev);
+ status = vfswrap_offload_write_loop(req);
+ if (!NT_STATUS_IS_OK(status)) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ return;
}
-static NTSTATUS vfswrap_copy_chunk_recv(struct vfs_handle_struct *handle,
+static NTSTATUS vfswrap_offload_write_recv(struct vfs_handle_struct *handle,
struct tevent_req *req,
off_t *copied)
{
- struct vfs_cc_state *vfs_cc_state = tevent_req_data(req,
- struct vfs_cc_state);
+ struct vfswrap_offload_write_state *state = tevent_req_data(
+ req, struct vfswrap_offload_write_state);
NTSTATUS status;
if (tevent_req_is_nterror(req, &status)) {
- DEBUG(2, ("server side copy chunk failed: %s\n",
- nt_errstr(status)));
+ DBG_DEBUG("copy chunk failed: %s\n", nt_errstr(status));
*copied = 0;
tevent_req_received(req);
return status;
}
- *copied = vfs_cc_state->copied;
- DEBUG(10, ("server side copy chunk copied %lu\n",
- (unsigned long)*copied));
+ *copied = state->to_copy;
+ DBG_DEBUG("copy chunk copied %lu\n", (unsigned long)*copied);
tevent_req_received(req);
return NT_STATUS_OK;
int result;
START_PROFILE(syscall_chmod);
-
- /*
- * We need to do this due to the fact that the default POSIX ACL
- * chmod modifies the ACL *mask* for the group owner, not the
- * group owner bits directly. JRA.
- */
-
-
- {
- int saved_errno = errno; /* We might get ENOSYS */
- result = SMB_VFS_CHMOD_ACL(handle->conn,
- smb_fname,
- mode);
- if (result == 0) {
- END_PROFILE(syscall_chmod);
- return result;
- }
- /* Error - return the old errno. */
- errno = saved_errno;
- }
-
result = chmod(smb_fname->base_name, mode);
END_PROFILE(syscall_chmod);
return result;
int result;
START_PROFILE(syscall_fchmod);
-
- /*
- * We need to do this due to the fact that the default POSIX ACL
- * chmod modifies the ACL *mask* for the group owner, not the
- * group owner bits directly. JRA.
- */
-
- {
- int saved_errno = errno; /* We might get ENOSYS */
- if ((result = SMB_VFS_FCHMOD_ACL(fsp, mode)) == 0) {
- END_PROFILE(syscall_fchmod);
- return result;
- }
- /* Error - return the old errno. */
- errno = saved_errno;
- }
-
#if defined(HAVE_FCHMOD)
result = fchmod(fsp->fh->fd, mode);
#else
return result;
}
-static int vfswrap_chdir(vfs_handle_struct *handle, const char *path)
+static int vfswrap_chdir(vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname)
{
int result;
START_PROFILE(syscall_chdir);
- result = chdir(path);
+ result = chdir(smb_fname->base_name);
END_PROFILE(syscall_chdir);
return result;
}
-static char *vfswrap_getwd(vfs_handle_struct *handle)
+static struct smb_filename *vfswrap_getwd(vfs_handle_struct *handle,
+ TALLOC_CTX *ctx)
{
char *result;
+ struct smb_filename *smb_fname = NULL;
START_PROFILE(syscall_getwd);
result = sys_getwd();
END_PROFILE(syscall_getwd);
- return result;
+
+ if (result == NULL) {
+ return NULL;
+ }
+ smb_fname = synthetic_smb_fname(ctx,
+ result,
+ NULL,
+ NULL,
+ 0);
+ /*
+ * sys_getwd() *always* returns malloced memory.
+ * We must free here to avoid leaks:
+ * BUG:https://bugzilla.samba.org/show_bug.cgi?id=13372
+ */
+ SAFE_FREE(result);
+ return smb_fname;
}
/*********************************************************************
START_PROFILE(syscall_fcntl_lock);
- if (fsp->use_ofd_locks || !lp_parm_bool(SNUM(fsp->conn),
- "smbd",
- "force process locks",
- false)) {
- op = map_process_lock_to_ofd_lock(op, &fsp->use_ofd_locks);
+ if (fsp->use_ofd_locks) {
+ op = map_process_lock_to_ofd_lock(op);
}
result = fcntl_lock(fsp->fh->fd, op, offset, count, type);
START_PROFILE(syscall_fcntl_getlock);
- if (fsp->use_ofd_locks || !lp_parm_bool(SNUM(fsp->conn),
- "smbd",
- "force process locks",
- false)) {
- op = map_process_lock_to_ofd_lock(op, &fsp->use_ofd_locks);
+ if (fsp->use_ofd_locks) {
+ op = map_process_lock_to_ofd_lock(op);
}
result = fcntl_getlock(fsp->fh->fd, op, poffset, pcount, ptype, ppid);
return result;
}
-static int vfswrap_symlink(vfs_handle_struct *handle, const char *oldpath, const char *newpath)
+static int vfswrap_symlink(vfs_handle_struct *handle,
+ const char *link_target,
+ const struct smb_filename *new_smb_fname)
{
int result;
START_PROFILE(syscall_symlink);
- result = symlink(oldpath, newpath);
+ result = symlink(link_target, new_smb_fname->base_name);
END_PROFILE(syscall_symlink);
return result;
}
-static int vfswrap_readlink(vfs_handle_struct *handle, const char *path, char *buf, size_t bufsiz)
+static int vfswrap_readlink(vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname,
+ char *buf,
+ size_t bufsiz)
{
int result;
START_PROFILE(syscall_readlink);
- result = readlink(path, buf, bufsiz);
+ result = readlink(smb_fname->base_name, buf, bufsiz);
END_PROFILE(syscall_readlink);
return result;
}
-static int vfswrap_link(vfs_handle_struct *handle, const char *oldpath, const char *newpath)
+static int vfswrap_link(vfs_handle_struct *handle,
+ const struct smb_filename *old_smb_fname,
+ const struct smb_filename *new_smb_fname)
{
int result;
START_PROFILE(syscall_link);
- result = link(oldpath, newpath);
+ result = link(old_smb_fname->base_name, new_smb_fname->base_name);
END_PROFILE(syscall_link);
return result;
}
-static int vfswrap_mknod(vfs_handle_struct *handle, const char *pathname, mode_t mode, SMB_DEV_T dev)
+static int vfswrap_mknod(vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname,
+ mode_t mode,
+ SMB_DEV_T dev)
{
int result;
START_PROFILE(syscall_mknod);
- result = sys_mknod(pathname, mode, dev);
+ result = sys_mknod(smb_fname->base_name, mode, dev);
END_PROFILE(syscall_mknod);
return result;
}
-static char *vfswrap_realpath(vfs_handle_struct *handle, const char *path)
+static struct smb_filename *vfswrap_realpath(vfs_handle_struct *handle,
+ TALLOC_CTX *ctx,
+ const struct smb_filename *smb_fname)
{
char *result;
+ struct smb_filename *result_fname = NULL;
START_PROFILE(syscall_realpath);
- result = sys_realpath(path);
+ result = sys_realpath(smb_fname->base_name);
END_PROFILE(syscall_realpath);
- return result;
+ if (result) {
+ result_fname = synthetic_smb_fname(ctx, result, NULL, NULL, 0);
+ SAFE_FREE(result);
+ }
+ return result_fname;
}
-static int vfswrap_chflags(vfs_handle_struct *handle, const char *path,
- unsigned int flags)
+static int vfswrap_chflags(vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname,
+ unsigned int flags)
{
#ifdef HAVE_CHFLAGS
- return chflags(path, flags);
+ return chflags(smb_fname->base_name, flags);
#else
errno = ENOSYS;
return -1;
}
static const char *vfswrap_connectpath(struct vfs_handle_struct *handle,
- const char *fname)
+ const struct smb_filename *smb_fname)
{
return handle->conn->connectpath;
}
return brl_lock_cancel_default(br_lck, plock);
}
-static bool vfswrap_strict_lock(struct vfs_handle_struct *handle,
- files_struct *fsp,
- struct lock_struct *plock)
-{
- SMB_ASSERT(plock->lock_type == READ_LOCK ||
- plock->lock_type == WRITE_LOCK);
-
- return strict_lock_default(fsp, plock);
-}
-
-static void vfswrap_strict_unlock(struct vfs_handle_struct *handle,
- files_struct *fsp,
- struct lock_struct *plock)
+static bool vfswrap_strict_lock_check(struct vfs_handle_struct *handle,
+ files_struct *fsp,
+ struct lock_struct *plock)
{
SMB_ASSERT(plock->lock_type == READ_LOCK ||
plock->lock_type == WRITE_LOCK);
- strict_unlock_default(fsp, plock);
+ return strict_lock_check_default(fsp, plock);
}
/* NT ACL operations. */
return NT_STATUS_OK; /* Nothing to do here ... */
}
-static int vfswrap_chmod_acl(vfs_handle_struct *handle,
- const struct smb_filename *smb_fname,
- mode_t mode)
-{
-#ifdef HAVE_NO_ACL
- errno = ENOSYS;
- return -1;
-#else
- int result;
-
- START_PROFILE(chmod_acl);
- result = chmod_acl(handle->conn, smb_fname->base_name, mode);
- END_PROFILE(chmod_acl);
- return result;
-#endif
-}
-
-static int vfswrap_fchmod_acl(vfs_handle_struct *handle, files_struct *fsp, mode_t mode)
-{
-#ifdef HAVE_NO_ACL
- errno = ENOSYS;
- return -1;
-#else
- int result;
-
- START_PROFILE(fchmod_acl);
- result = fchmod_acl(fsp, mode);
- END_PROFILE(fchmod_acl);
- return result;
-#endif
-}
-
static SMB_ACL_T vfswrap_sys_acl_get_file(vfs_handle_struct *handle,
- const char *path_p,
+ const struct smb_filename *smb_fname,
SMB_ACL_TYPE_T type,
TALLOC_CTX *mem_ctx)
{
- return sys_acl_get_file(handle, path_p, type, mem_ctx);
+ return sys_acl_get_file(handle, smb_fname, type, mem_ctx);
}
static SMB_ACL_T vfswrap_sys_acl_get_fd(vfs_handle_struct *handle,
return sys_acl_get_fd(handle, fsp, mem_ctx);
}
-static int vfswrap_sys_acl_set_file(vfs_handle_struct *handle, const char *name, SMB_ACL_TYPE_T acltype, SMB_ACL_T theacl)
+static int vfswrap_sys_acl_set_file(vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname,
+ SMB_ACL_TYPE_T acltype,
+ SMB_ACL_T theacl)
{
- return sys_acl_set_file(handle, name, acltype, theacl);
+ return sys_acl_set_file(handle, smb_fname, acltype, theacl);
}
static int vfswrap_sys_acl_set_fd(vfs_handle_struct *handle, files_struct *fsp, SMB_ACL_T theacl)
return sys_acl_set_fd(handle, fsp, theacl);
}
-static int vfswrap_sys_acl_delete_def_file(vfs_handle_struct *handle, const char *path)
+static int vfswrap_sys_acl_delete_def_file(vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname)
{
- return sys_acl_delete_def_file(handle, path);
+ return sys_acl_delete_def_file(handle, smb_fname);
}
/****************************************************************
Extended attribute operations.
*****************************************************************/
-static ssize_t vfswrap_getxattr(struct vfs_handle_struct *handle,const char *path, const char *name, void *value, size_t size)
+static ssize_t vfswrap_getxattr(struct vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname,
+ const char *name,
+ void *value,
+ size_t size)
+{
+ return getxattr(smb_fname->base_name, name, value, size);
+}
+
+struct vfswrap_getxattrat_state {
+ struct tevent_context *ev;
+ files_struct *dir_fsp;
+ const struct smb_filename *smb_fname;
+ struct tevent_req *req;
+
+ /*
+ * The following variables are talloced off "state" which is protected
+ * by a destructor and thus are guaranteed to be safe to be used in the
+ * job function in the worker thread.
+ */
+ char *name;
+ const char *xattr_name;
+ uint8_t *xattr_value;
+ struct security_unix_token *token;
+
+ ssize_t xattr_size;
+ struct vfs_aio_state vfs_aio_state;
+ SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes);
+};
+
+static int vfswrap_getxattrat_state_destructor(
+ struct vfswrap_getxattrat_state *state)
+{
+ return -1;
+}
+
+static void vfswrap_getxattrat_do_sync(struct tevent_req *req);
+static void vfswrap_getxattrat_do_async(void *private_data);
+static void vfswrap_getxattrat_done(struct tevent_req *subreq);
+
+static struct tevent_req *vfswrap_getxattrat_send(
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct vfs_handle_struct *handle,
+ files_struct *dir_fsp,
+ const struct smb_filename *smb_fname,
+ const char *xattr_name,
+ size_t alloc_hint)
+{
+ struct tevent_req *req = NULL;
+ struct tevent_req *subreq = NULL;
+ struct vfswrap_getxattrat_state *state = NULL;
+ size_t max_threads = 0;
+ bool have_per_thread_cwd = false;
+ bool have_per_thread_creds = false;
+ bool do_async = false;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct vfswrap_getxattrat_state);
+ if (req == NULL) {
+ return NULL;
+ }
+ *state = (struct vfswrap_getxattrat_state) {
+ .ev = ev,
+ .dir_fsp = dir_fsp,
+ .smb_fname = smb_fname,
+ .req = req,
+ };
+
+ max_threads = pthreadpool_tevent_max_threads(dir_fsp->conn->sconn->pool);
+ if (max_threads >= 1) {
+ /*
+ * We need a non sync threadpool!
+ */
+ have_per_thread_cwd = per_thread_cwd_supported();
+ }
+#ifdef HAVE_LINUX_THREAD_CREDENTIALS
+ have_per_thread_creds = true;
+#endif
+ if (have_per_thread_cwd && have_per_thread_creds) {
+ do_async = true;
+ }
+
+ SMBPROFILE_BYTES_ASYNC_START(syscall_asys_getxattrat, profile_p,
+ state->profile_bytes, 0);
+
+ if (dir_fsp->fh->fd == -1) {
+ DBG_ERR("Need a valid directory fd\n");
+ tevent_req_error(req, EINVAL);
+ return tevent_req_post(req, ev);
+ }
+
+ if (alloc_hint > 0) {
+ state->xattr_value = talloc_zero_array(state,
+ uint8_t,
+ alloc_hint);
+ if (tevent_req_nomem(state->xattr_value, req)) {
+ return tevent_req_post(req, ev);
+ }
+ }
+
+ if (!do_async) {
+ vfswrap_getxattrat_do_sync(req);
+ return tevent_req_post(req, ev);
+ }
+
+ /*
+ * Now allocate all parameters from a memory context that won't go away
+ * no matter what. These paremeters will get used in threads and we
+ * can't reliably cancel threads, so all buffers passed to the threads
+ * must not be freed before all referencing threads terminate.
+ */
+
+ state->name = talloc_strdup(state, smb_fname->base_name);
+ if (tevent_req_nomem(state->name, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ state->xattr_name = talloc_strdup(state, xattr_name);
+ if (tevent_req_nomem(state->xattr_name, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ /*
+ * This is a hot codepath so at first glance one might think we should
+ * somehow optimize away the token allocation and do a
+ * talloc_reference() or similar black magic instead. But due to the
+ * talloc_stackframe pool per SMB2 request this should be a simple copy
+ * without a malloc in most cases.
+ */
+ if (geteuid() == sec_initial_uid()) {
+ state->token = root_unix_token(state);
+ } else {
+ state->token = copy_unix_token(
+ state,
+ dir_fsp->conn->session_info->unix_token);
+ }
+ if (tevent_req_nomem(state->token, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+
+ subreq = pthreadpool_tevent_job_send(
+ state,
+ ev,
+ dir_fsp->conn->sconn->pool,
+ vfswrap_getxattrat_do_async,
+ state);
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, vfswrap_getxattrat_done, req);
+
+ talloc_set_destructor(state, vfswrap_getxattrat_state_destructor);
+
+ return req;
+}
+
+static void vfswrap_getxattrat_do_sync(struct tevent_req *req)
+{
+ struct vfswrap_getxattrat_state *state = talloc_get_type_abort(
+ req, struct vfswrap_getxattrat_state);
+ char *path = NULL;
+ char *tofree = NULL;
+ char pathbuf[PATH_MAX+1];
+ size_t pathlen;
+ int err;
+
+ pathlen = full_path_tos(state->dir_fsp->fsp_name->base_name,
+ state->smb_fname->base_name,
+ pathbuf,
+ sizeof(pathbuf),
+ &path,
+ &tofree);
+ if (pathlen == -1) {
+ tevent_req_error(req, ENOMEM);
+ return;
+ }
+
+ state->xattr_size = getxattr(path,
+ state->xattr_name,
+ state->xattr_value,
+ talloc_array_length(state->xattr_value));
+ err = errno;
+ TALLOC_FREE(tofree);
+ if (state->xattr_size == -1) {
+ tevent_req_error(req, err);
+ return;
+ }
+
+ tevent_req_done(req);
+ return;
+}
+
+static void vfswrap_getxattrat_do_async(void *private_data)
+{
+ struct vfswrap_getxattrat_state *state = talloc_get_type_abort(
+ private_data, struct vfswrap_getxattrat_state);
+ struct timespec start_time;
+ struct timespec end_time;
+ int ret;
+
+ PROFILE_TIMESTAMP(&start_time);
+ SMBPROFILE_BYTES_ASYNC_SET_BUSY(state->profile_bytes);
+
+ /*
+ * Here we simulate a getxattrat()
+ * call using fchdir();getxattr()
+ */
+
+ per_thread_cwd_activate();
+
+ /* Become the correct credential on this thread. */
+ ret = set_thread_credentials(state->token->uid,
+ state->token->gid,
+ (size_t)state->token->ngroups,
+ state->token->groups);
+ if (ret != 0) {
+ state->xattr_size = -1;
+ state->vfs_aio_state.error = errno;
+ goto end_profile;
+ }
+
+ ret = fchdir(state->dir_fsp->fh->fd);
+ if (ret == -1) {
+ state->xattr_size = -1;
+ state->vfs_aio_state.error = errno;
+ goto end_profile;
+ }
+
+ state->xattr_size = getxattr(state->name,
+ state->xattr_name,
+ state->xattr_value,
+ talloc_array_length(state->xattr_value));
+ if (state->xattr_size == -1) {
+ state->vfs_aio_state.error = errno;
+ }
+
+end_profile:
+ PROFILE_TIMESTAMP(&end_time);
+ state->vfs_aio_state.duration = nsec_time_diff(&end_time, &start_time);
+ SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+}
+
+static void vfswrap_getxattrat_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct vfswrap_getxattrat_state *state = tevent_req_data(
+ req, struct vfswrap_getxattrat_state);
+ int ret;
+ bool ok;
+
+ /*
+ * Make sure we run as the user again
+ */
+ ok = change_to_user_by_fsp(state->dir_fsp);
+ SMB_ASSERT(ok);
+
+ ret = pthreadpool_tevent_job_recv(subreq);
+ TALLOC_FREE(subreq);
+ SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
+ talloc_set_destructor(state, NULL);
+ if (ret != 0) {
+ if (ret != EAGAIN) {
+ tevent_req_error(req, ret);
+ return;
+ }
+ /*
+ * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+ * means the lower level pthreadpool failed to create a new
+ * thread. Fallback to sync processing in that case to allow
+ * some progress for the client.
+ */
+ vfswrap_getxattrat_do_sync(req);
+ return;
+ }
+
+ if (state->xattr_size == -1) {
+ tevent_req_error(req, state->vfs_aio_state.error);
+ return;
+ }
+
+ if (state->xattr_value == NULL) {
+ /*
+ * The caller only wanted the size.
+ */
+ tevent_req_done(req);
+ return;
+ }
+
+ /*
+ * shrink the buffer to the returned size.
+ * (can't fail). It means NULL if size is 0.
+ */
+ state->xattr_value = talloc_realloc(state,
+ state->xattr_value,
+ uint8_t,
+ state->xattr_size);
+
+ tevent_req_done(req);
+}
+
+static ssize_t vfswrap_getxattrat_recv(struct tevent_req *req,
+ struct vfs_aio_state *aio_state,
+ TALLOC_CTX *mem_ctx,
+ uint8_t **xattr_value)
{
- return getxattr(path, name, value, size);
+ struct vfswrap_getxattrat_state *state = tevent_req_data(
+ req, struct vfswrap_getxattrat_state);
+ ssize_t xattr_size;
+
+ if (tevent_req_is_unix_error(req, &aio_state->error)) {
+ tevent_req_received(req);
+ return -1;
+ }
+
+ *aio_state = state->vfs_aio_state;
+ xattr_size = state->xattr_size;
+ if (xattr_value != NULL) {
+ *xattr_value = talloc_move(mem_ctx, &state->xattr_value);
+ }
+
+ tevent_req_received(req);
+ return xattr_size;
}
static ssize_t vfswrap_fgetxattr(struct vfs_handle_struct *handle, struct files_struct *fsp, const char *name, void *value, size_t size)
return fgetxattr(fsp->fh->fd, name, value, size);
}
-static ssize_t vfswrap_listxattr(struct vfs_handle_struct *handle, const char *path, char *list, size_t size)
+static ssize_t vfswrap_listxattr(struct vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname,
+ char *list,
+ size_t size)
{
- return listxattr(path, list, size);
+ return listxattr(smb_fname->base_name, list, size);
}
static ssize_t vfswrap_flistxattr(struct vfs_handle_struct *handle, struct files_struct *fsp, char *list, size_t size)
return flistxattr(fsp->fh->fd, list, size);
}
-static int vfswrap_removexattr(struct vfs_handle_struct *handle, const char *path, const char *name)
+static int vfswrap_removexattr(struct vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname,
+ const char *name)
{
- return removexattr(path, name);
+ return removexattr(smb_fname->base_name, name);
}
static int vfswrap_fremovexattr(struct vfs_handle_struct *handle, struct files_struct *fsp, const char *name)
return fremovexattr(fsp->fh->fd, name);
}
-static int vfswrap_setxattr(struct vfs_handle_struct *handle, const char *path, const char *name, const void *value, size_t size, int flags)
+static int vfswrap_setxattr(struct vfs_handle_struct *handle,
+ const struct smb_filename *smb_fname,
+ const char *name,
+ const void *value,
+ size_t size,
+ int flags)
{
- return setxattr(path, name, value, size, flags);
+ return setxattr(smb_fname->base_name, name, value, size, flags);
}
static int vfswrap_fsetxattr(struct vfs_handle_struct *handle, struct files_struct *fsp, const char *name, const void *value, size_t size, int flags)
return offline;
}
-static int vfswrap_set_offline(struct vfs_handle_struct *handle,
- const struct smb_filename *fname)
-{
- /* We don't know how to set offline bit by default, needs to be overriden in the vfs modules */
-#if defined(ENOTSUP)
- errno = ENOTSUP;
-#endif
- return -1;
-}
-
static NTSTATUS vfswrap_durable_cookie(struct vfs_handle_struct *handle,
struct files_struct *fsp,
TALLOC_CTX *mem_ctx,
.mkdir_fn = vfswrap_mkdir,
.rmdir_fn = vfswrap_rmdir,
.closedir_fn = vfswrap_closedir,
- .init_search_op_fn = vfswrap_init_search_op,
/* File operations */
.open_fn = vfswrap_open,
.create_file_fn = vfswrap_create_file,
.close_fn = vfswrap_close,
- .read_fn = vfswrap_read,
.pread_fn = vfswrap_pread,
.pread_send_fn = vfswrap_pread_send,
.pread_recv_fn = vfswrap_pread_recv,
- .write_fn = vfswrap_write,
.pwrite_fn = vfswrap_pwrite,
.pwrite_send_fn = vfswrap_pwrite_send,
- .pwrite_recv_fn = vfswrap_asys_ssize_t_recv,
+ .pwrite_recv_fn = vfswrap_pwrite_recv,
.lseek_fn = vfswrap_lseek,
.sendfile_fn = vfswrap_sendfile,
.recvfile_fn = vfswrap_recvfile,
.rename_fn = vfswrap_rename,
- .fsync_fn = vfswrap_fsync,
.fsync_send_fn = vfswrap_fsync_send,
- .fsync_recv_fn = vfswrap_asys_int_recv,
+ .fsync_recv_fn = vfswrap_fsync_recv,
.stat_fn = vfswrap_stat,
.fstat_fn = vfswrap_fstat,
.lstat_fn = vfswrap_lstat,
.brl_lock_windows_fn = vfswrap_brl_lock_windows,
.brl_unlock_windows_fn = vfswrap_brl_unlock_windows,
.brl_cancel_windows_fn = vfswrap_brl_cancel_windows,
- .strict_lock_fn = vfswrap_strict_lock,
- .strict_unlock_fn = vfswrap_strict_unlock,
+ .strict_lock_check_fn = vfswrap_strict_lock_check,
.translate_name_fn = vfswrap_translate_name,
.fsctl_fn = vfswrap_fsctl,
.set_dos_attributes_fn = vfswrap_set_dos_attributes,
.fset_dos_attributes_fn = vfswrap_fset_dos_attributes,
.get_dos_attributes_fn = vfswrap_get_dos_attributes,
+ .get_dos_attributes_send_fn = vfswrap_get_dos_attributes_send,
+ .get_dos_attributes_recv_fn = vfswrap_get_dos_attributes_recv,
.fget_dos_attributes_fn = vfswrap_fget_dos_attributes,
- .copy_chunk_send_fn = vfswrap_copy_chunk_send,
- .copy_chunk_recv_fn = vfswrap_copy_chunk_recv,
+ .offload_read_send_fn = vfswrap_offload_read_send,
+ .offload_read_recv_fn = vfswrap_offload_read_recv,
+ .offload_write_send_fn = vfswrap_offload_write_send,
+ .offload_write_recv_fn = vfswrap_offload_write_recv,
.get_compression_fn = vfswrap_get_compression,
.set_compression_fn = vfswrap_set_compression,
/* POSIX ACL operations. */
- .chmod_acl_fn = vfswrap_chmod_acl,
- .fchmod_acl_fn = vfswrap_fchmod_acl,
-
.sys_acl_get_file_fn = vfswrap_sys_acl_get_file,
.sys_acl_get_fd_fn = vfswrap_sys_acl_get_fd,
.sys_acl_blob_get_file_fn = posix_sys_acl_blob_get_file,
/* EA operations. */
.getxattr_fn = vfswrap_getxattr,
+ .getxattrat_send_fn = vfswrap_getxattrat_send,
+ .getxattrat_recv_fn = vfswrap_getxattrat_recv,
.fgetxattr_fn = vfswrap_fgetxattr,
.listxattr_fn = vfswrap_listxattr,
.flistxattr_fn = vfswrap_flistxattr,
/* aio operations */
.aio_force_fn = vfswrap_aio_force,
- /* offline operations */
- .is_offline_fn = vfswrap_is_offline,
- .set_offline_fn = vfswrap_set_offline,
-
/* durable handle operations */
.durable_cookie_fn = vfswrap_durable_cookie,
.durable_disconnect_fn = vfswrap_durable_disconnect,
.durable_reconnect_fn = vfswrap_durable_reconnect,
};
-NTSTATUS vfs_default_init(void);
-NTSTATUS vfs_default_init(void)
+static_decl_vfs;
+NTSTATUS vfs_default_init(TALLOC_CTX *ctx)
{
+ /*
+ * Here we need to implement every call!
+ *
+ * As this is the end of the vfs module chain.
+ */
+ smb_vfs_assert_all_fns(&vfs_default_fns, DEFAULT_VFS_MODULE_NAME);
return smb_register_vfs(SMB_VFS_INTERFACE_VERSION,
DEFAULT_VFS_MODULE_NAME, &vfs_default_fns);
}