struct vfs_statvfs_struct statbuf;
int ret;
+ smb_fname_cpath = synthetic_smb_fname(talloc_tos(), conn->connectpath,
+ NULL, NULL, 0);
+ if (smb_fname_cpath == NULL) {
+ return caps;
+ }
+
ZERO_STRUCT(statbuf);
- ret = sys_statvfs(conn->connectpath, &statbuf);
+ ret = SMB_VFS_STATVFS(conn, smb_fname_cpath, &statbuf);
if (ret == 0) {
caps = statbuf.FsCapabilities;
}
/* Work out what timestamp resolution we can
* use when setting a timestamp. */
- smb_fname_cpath = synthetic_smb_fname(talloc_tos(), conn->connectpath,
- NULL, NULL, 0);
- if (smb_fname_cpath == NULL) {
- return caps;
- }
-
ret = SMB_VFS_STAT(conn, smb_fname_cpath);
if (ret == -1) {
TALLOC_FREE(smb_fname_cpath);
mode_t mode)
{
int result;
- bool has_dacl = False;
const char *path = smb_fname->base_name;
char *parent = NULL;
if (lp_inherit_acls(SNUM(handle->conn))
&& parent_dirname(talloc_tos(), path, &parent, NULL)
- && (has_dacl = directory_has_default_acl(handle->conn, parent))) {
+ && directory_has_default_acl(handle->conn, parent)) {
mode = (0777 & lp_directory_mask(SNUM(handle->conn)));
}
result = mkdir(path, mode);
- if (result == 0 && !has_dacl) {
- /*
- * We need to do this as the default behavior of POSIX ACLs
- * is to set the mask to be the requested group permission
- * bits, not the group permission bits to be the requested
- * group permission bits. This is not what we want, as it will
- * mess up any inherited ACL bits that were set. JRA.
- */
- int saved_errno = errno; /* We may get ENOSYS */
- if ((SMB_VFS_CHMOD_ACL(handle->conn, smb_fname, mode) == -1) &&
- (errno == ENOSYS)) {
- errno = saved_errno;
- }
- }
-
END_PROFILE(syscall_mkdir);
return result;
}
return result;
}
-static ssize_t vfswrap_read(vfs_handle_struct *handle, files_struct *fsp, void *data, size_t n)
-{
- ssize_t result;
-
- START_PROFILE_BYTES(syscall_read, n);
- result = sys_read(fsp->fh->fd, data, n);
- END_PROFILE_BYTES(syscall_read);
- return result;
-}
-
static ssize_t vfswrap_pread(vfs_handle_struct *handle, files_struct *fsp, void *data,
size_t n, off_t offset)
{
if (result == -1 && errno == ESPIPE) {
/* Maintain the fiction that pipes can be seeked (sought?) on. */
- result = SMB_VFS_READ(fsp, data, n);
+ result = sys_read(fsp->fh->fd, data, n);
fsp->fh->pos = 0;
}
#else /* HAVE_PREAD */
- off_t curr;
- int lerrno;
-
- curr = SMB_VFS_LSEEK(fsp, 0, SEEK_CUR);
- if (curr == -1 && errno == ESPIPE) {
- /* Maintain the fiction that pipes can be seeked (sought?) on. */
- result = SMB_VFS_READ(fsp, data, n);
- fsp->fh->pos = 0;
- return result;
- }
-
- if (SMB_VFS_LSEEK(fsp, offset, SEEK_SET) == -1) {
- return -1;
- }
-
- errno = 0;
- result = SMB_VFS_READ(fsp, data, n);
- lerrno = errno;
-
- SMB_VFS_LSEEK(fsp, curr, SEEK_SET);
- errno = lerrno;
-
+ errno = ENOSYS;
+ result = -1;
#endif /* HAVE_PREAD */
return result;
}
-static ssize_t vfswrap_write(vfs_handle_struct *handle, files_struct *fsp, const void *data, size_t n)
-{
- ssize_t result;
-
- START_PROFILE_BYTES(syscall_write, n);
- result = sys_write(fsp->fh->fd, data, n);
- END_PROFILE_BYTES(syscall_write);
- return result;
-}
-
static ssize_t vfswrap_pwrite(vfs_handle_struct *handle, files_struct *fsp, const void *data,
size_t n, off_t offset)
{
if (result == -1 && errno == ESPIPE) {
/* Maintain the fiction that pipes can be sought on. */
- result = SMB_VFS_WRITE(fsp, data, n);
+ result = sys_write(fsp->fh->fd, data, n);
}
#else /* HAVE_PWRITE */
- off_t curr;
- int lerrno;
-
- curr = SMB_VFS_LSEEK(fsp, 0, SEEK_CUR);
- if (curr == -1) {
- return -1;
- }
-
- if (SMB_VFS_LSEEK(fsp, offset, SEEK_SET) == -1) {
- return -1;
- }
-
- result = SMB_VFS_WRITE(fsp, data, n);
- lerrno = errno;
-
- SMB_VFS_LSEEK(fsp, curr, SEEK_SET);
- errno = lerrno;
-
+ errno = ENOSYS;
+ result = -1;
#endif /* HAVE_PWRITE */
return result;
}
-static int vfswrap_init_pool(struct smbd_server_connection *conn)
-{
- int ret;
-
- if (conn->pool != NULL) {
- return 0;
- }
-
- ret = pthreadpool_tevent_init(conn, lp_aio_max_threads(),
- &conn->pool);
- return ret;
-}
-
struct vfswrap_pread_state {
ssize_t ret;
int fd;
{
struct tevent_req *req, *subreq;
struct vfswrap_pread_state *state;
- int ret;
req = tevent_req_create(mem_ctx, &state, struct vfswrap_pread_state);
if (req == NULL) {
return NULL;
}
- ret = vfswrap_init_pool(handle->conn->sconn);
- if (tevent_req_error(req, ret)) {
- return tevent_req_post(req, ev);
- }
-
state->ret = -1;
state->fd = fsp->fh->fd;
state->buf = data;
TALLOC_FREE(subreq);
SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
talloc_set_destructor(state, NULL);
- if (tevent_req_error(req, ret)) {
- return;
+ if (ret != 0) {
+ if (ret != EAGAIN) {
+ tevent_req_error(req, ret);
+ return;
+ }
+ /*
+ * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+ * means the lower level pthreadpool failed to create a new
+ * thread. Fallback to sync processing in that case to allow
+ * some progress for the client.
+ */
+ vfs_pread_do(state);
}
tevent_req_done(req);
{
struct tevent_req *req, *subreq;
struct vfswrap_pwrite_state *state;
- int ret;
req = tevent_req_create(mem_ctx, &state, struct vfswrap_pwrite_state);
if (req == NULL) {
return NULL;
}
- ret = vfswrap_init_pool(handle->conn->sconn);
- if (tevent_req_error(req, ret)) {
- return tevent_req_post(req, ev);
- }
-
state->ret = -1;
state->fd = fsp->fh->fd;
state->buf = data;
TALLOC_FREE(subreq);
SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
talloc_set_destructor(state, NULL);
- if (tevent_req_error(req, ret)) {
- return;
+ if (ret != 0) {
+ if (ret != EAGAIN) {
+ tevent_req_error(req, ret);
+ return;
+ }
+ /*
+ * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+ * means the lower level pthreadpool failed to create a new
+ * thread. Fallback to sync processing in that case to allow
+ * some progress for the client.
+ */
+ vfs_pwrite_do(state);
}
tevent_req_done(req);
int fd;
struct vfs_aio_state vfs_aio_state;
- SMBPROFILE_BASIC_ASYNC_STATE(profile_basic);
+ SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes);
};
static void vfs_fsync_do(void *private_data);
{
struct tevent_req *req, *subreq;
struct vfswrap_fsync_state *state;
- int ret;
req = tevent_req_create(mem_ctx, &state, struct vfswrap_fsync_state);
if (req == NULL) {
return NULL;
}
- ret = vfswrap_init_pool(handle->conn->sconn);
- if (tevent_req_error(req, ret)) {
- return tevent_req_post(req, ev);
- }
-
state->ret = -1;
state->fd = fsp->fh->fd;
- SMBPROFILE_BASIC_ASYNC_START(syscall_asys_fsync, profile_p,
- state->profile_basic);
+ SMBPROFILE_BYTES_ASYNC_START(syscall_asys_fsync, profile_p,
+ state->profile_bytes, 0);
+ SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
subreq = pthreadpool_tevent_job_send(
state, ev, handle->conn->sconn->pool, vfs_fsync_do, state);
struct timespec start_time;
struct timespec end_time;
+ SMBPROFILE_BYTES_ASYNC_SET_BUSY(state->profile_bytes);
+
PROFILE_TIMESTAMP(&start_time);
do {
PROFILE_TIMESTAMP(&end_time);
state->vfs_aio_state.duration = nsec_time_diff(&end_time, &start_time);
+
+ SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
}
static int vfs_fsync_state_destructor(struct vfswrap_fsync_state *state)
ret = pthreadpool_tevent_job_recv(subreq);
TALLOC_FREE(subreq);
- SMBPROFILE_BASIC_ASYNC_END(state->profile_basic);
+ SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
talloc_set_destructor(state, NULL);
- if (tevent_req_error(req, ret)) {
- return;
+ if (ret != 0) {
+ if (ret != EAGAIN) {
+ tevent_req_error(req, ret);
+ return;
+ }
+ /*
+ * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+ * means the lower level pthreadpool failed to create a new
+ * thread. Fallback to sync processing in that case to allow
+ * some progress for the client.
+ */
+ vfs_fsync_do(state);
}
tevent_req_done(req);
START_PROFILE(syscall_lseek);
- /* Cope with 'stat' file opens. */
- if (fsp->fh->fd != -1)
- result = lseek(fsp->fh->fd, offset, whence);
-
+ result = lseek(fsp->fh->fd, offset, whence);
/*
* We want to maintain the fiction that we can seek
* on a fifo for file system purposes. This allows
return result;
}
-static int vfswrap_fsync(vfs_handle_struct *handle, files_struct *fsp)
-{
-#ifdef HAVE_FSYNC
- int result;
-
- START_PROFILE(syscall_fsync);
- result = fsync(fsp->fh->fd);
- END_PROFILE(syscall_fsync);
- return result;
-#else
- return 0;
-#endif
-}
-
static int vfswrap_stat(vfs_handle_struct *handle,
struct smb_filename *smb_fname)
{
*
* but I have to check that --metze
*/
+ ssize_t ret;
struct dom_sid sid;
+ struct dom_sid_buf buf;
uid_t uid;
size_t sid_len;
/* unknown 4 bytes: this is not the length of the sid :-( */
/*unknown = IVAL(pdata,0);*/
- if (!sid_parse(_in_data + 4, sid_len, &sid)) {
+ ret = sid_parse(_in_data + 4, sid_len, &sid);
+ if (ret == -1) {
return NT_STATUS_INVALID_PARAMETER;
}
- DEBUGADD(10, ("for SID: %s\n", sid_string_dbg(&sid)));
+ DEBUGADD(10, ("for SID: %s\n",
+ dom_sid_str_buf(&sid, &buf)));
if (!sid_to_uid(&sid, &uid)) {
DEBUG(0,("sid_to_uid: failed, sid[%s] sid_len[%lu]\n",
- sid_string_dbg(&sid),
+ dom_sid_str_buf(&sid, &buf),
(unsigned long)sid_len));
uid = (-1);
}
return get_ea_dos_attribute(handle->conn, smb_fname, dosmode);
}
+struct vfswrap_get_dos_attributes_state {
+ struct vfs_aio_state aio_state;
+ connection_struct *conn;
+ TALLOC_CTX *mem_ctx;
+ struct tevent_context *ev;
+ files_struct *dir_fsp;
+ struct smb_filename *smb_fname;
+ uint32_t dosmode;
+ bool as_root;
+};
+
+static void vfswrap_get_dos_attributes_getxattr_done(struct tevent_req *subreq);
+
+static struct tevent_req *vfswrap_get_dos_attributes_send(
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct vfs_handle_struct *handle,
+ files_struct *dir_fsp,
+ struct smb_filename *smb_fname)
+{
+ struct tevent_req *req = NULL;
+ struct tevent_req *subreq = NULL;
+ struct vfswrap_get_dos_attributes_state *state = NULL;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct vfswrap_get_dos_attributes_state);
+ if (req == NULL) {
+ return NULL;
+ }
+
+ *state = (struct vfswrap_get_dos_attributes_state) {
+ .conn = dir_fsp->conn,
+ .mem_ctx = mem_ctx,
+ .ev = ev,
+ .dir_fsp = dir_fsp,
+ .smb_fname = smb_fname,
+ };
+
+ subreq = SMB_VFS_GETXATTRAT_SEND(state,
+ ev,
+ dir_fsp,
+ smb_fname,
+ SAMBA_XATTR_DOS_ATTRIB,
+ sizeof(fstring));
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq,
+ vfswrap_get_dos_attributes_getxattr_done,
+ req);
+
+ return req;
+}
+
+static void vfswrap_get_dos_attributes_getxattr_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req =
+ tevent_req_callback_data(subreq,
+ struct tevent_req);
+ struct vfswrap_get_dos_attributes_state *state =
+ tevent_req_data(req,
+ struct vfswrap_get_dos_attributes_state);
+ ssize_t xattr_size;
+ DATA_BLOB blob = {0};
+ NTSTATUS status;
+
+ xattr_size = SMB_VFS_GETXATTRAT_RECV(subreq,
+ &state->aio_state,
+ state,
+ &blob.data);
+ TALLOC_FREE(subreq);
+ if (xattr_size == -1) {
+ status = map_nt_error_from_unix(state->aio_state.error);
+
+ if (state->as_root) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+ if (!NT_STATUS_EQUAL(status, NT_STATUS_ACCESS_DENIED)) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ state->as_root = true;
+
+ become_root();
+ subreq = SMB_VFS_GETXATTRAT_SEND(state,
+ state->ev,
+ state->dir_fsp,
+ state->smb_fname,
+ SAMBA_XATTR_DOS_ATTRIB,
+ sizeof(fstring));
+ unbecome_root();
+ if (tevent_req_nomem(subreq, req)) {
+ return;
+ }
+ tevent_req_set_callback(subreq,
+ vfswrap_get_dos_attributes_getxattr_done,
+ req);
+ return;
+ }
+
+ blob.length = xattr_size;
+
+ status = parse_dos_attribute_blob(state->smb_fname,
+ blob,
+ &state->dosmode);
+ if (!NT_STATUS_IS_OK(status)) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ tevent_req_done(req);
+ return;
+}
+
+static NTSTATUS vfswrap_get_dos_attributes_recv(struct tevent_req *req,
+ struct vfs_aio_state *aio_state,
+ uint32_t *dosmode)
+{
+ struct vfswrap_get_dos_attributes_state *state =
+ tevent_req_data(req,
+ struct vfswrap_get_dos_attributes_state);
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ tevent_req_received(req);
+ return status;
+ }
+
+ *aio_state = state->aio_state;
+ *dosmode = state->dosmode;
+ tevent_req_received(req);
+ return NT_STATUS_OK;
+}
+
static NTSTATUS vfswrap_fget_dos_attributes(struct vfs_handle_struct *handle,
struct files_struct *fsp,
uint32_t *dosmode)
}
struct vfswrap_offload_write_state {
- struct tevent_context *ev;
uint8_t *buf;
bool read_lck_locked;
bool write_lck_locked;
DATA_BLOB *token;
+ struct tevent_context *src_ev;
struct files_struct *src_fsp;
off_t src_off;
+ struct tevent_context *dst_ev;
struct files_struct *dst_fsp;
off_t dst_off;
off_t to_copy;
size_t next_io_size;
};
+static void vfswrap_offload_write_cleanup(struct tevent_req *req,
+ enum tevent_req_state req_state)
+{
+ struct vfswrap_offload_write_state *state = tevent_req_data(
+ req, struct vfswrap_offload_write_state);
+ bool ok;
+
+ if (state->dst_fsp == NULL) {
+ return;
+ }
+
+ ok = change_to_user_by_fsp(state->dst_fsp);
+ SMB_ASSERT(ok);
+ state->dst_fsp = NULL;
+}
+
static NTSTATUS vfswrap_offload_write_loop(struct tevent_req *req);
static struct tevent_req *vfswrap_offload_write_send(
{
struct tevent_req *req;
struct vfswrap_offload_write_state *state = NULL;
+ /* off_t is signed! */
+ off_t max_offset = INT64_MAX - to_copy;
size_t num = MIN(to_copy, COPYCHUNK_MAX_TOTAL_LEN);
files_struct *src_fsp = NULL;
NTSTATUS status;
+ bool ok;
req = tevent_req_create(mem_ctx, &state,
struct vfswrap_offload_write_state);
}
*state = (struct vfswrap_offload_write_state) {
- .ev = ev,
.token = token,
.src_off = transfer_offset,
+ .dst_ev = ev,
.dst_fsp = dest_fsp,
.dst_off = dest_off,
.to_copy = to_copy,
.remaining = to_copy,
};
+ tevent_req_set_cleanup_fn(req, vfswrap_offload_write_cleanup);
+
switch (fsctl) {
case FSCTL_SRV_COPYCHUNK:
case FSCTL_SRV_COPYCHUNK_WRITE:
return tevent_req_post(req, ev);
}
+ if (state->src_off > max_offset) {
+ /*
+ * Protect integer checks below.
+ */
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+ if (state->src_off < 0) {
+ /*
+ * Protect integer checks below.
+ */
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+ if (state->dst_off > max_offset) {
+ /*
+ * Protect integer checks below.
+ */
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+ if (state->dst_off < 0) {
+ /*
+ * Protect integer checks below.
+ */
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+
status = vfs_offload_token_db_fetch_fsp(vfswrap_offload_ctx,
token, &src_fsp);
if (tevent_req_nterror(req, status)) {
return tevent_req_post(req, ev);
}
- state->src_fsp = src_fsp;
DBG_DEBUG("server side copy chunk of length %" PRIu64 "\n", to_copy);
return tevent_req_post(req, ev);
}
- state->buf = talloc_array(state, uint8_t, num);
- if (tevent_req_nomem(state->buf, req)) {
+ ok = change_to_user_by_fsp(src_fsp);
+ if (!ok) {
+ tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
return tevent_req_post(req, ev);
}
+ state->src_ev = src_fsp->conn->sconn->ev_ctx;
+ state->src_fsp = src_fsp;
+
status = vfs_stat_fsp(src_fsp);
if (tevent_req_nterror(req, status)) {
return tevent_req_post(req, ev);
}
- if (src_fsp->fsp_name->st.st_ex_size < state->src_off + num) {
+ if (src_fsp->fsp_name->st.st_ex_size < state->src_off + to_copy) {
/*
* [MS-SMB2] 3.3.5.15.6 Handling a Server-Side Data Copy Request
* If the SourceOffset or SourceOffset + Length extends beyond
return tevent_req_post(req, ev);
}
- if (src_fsp->op == NULL) {
- tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
- return tevent_req_post(req, ev);
- }
-
- if (dest_fsp->op == NULL) {
- tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+ state->buf = talloc_array(state, uint8_t, num);
+ if (tevent_req_nomem(state->buf, req)) {
return tevent_req_post(req, ev);
}
struct lock_struct read_lck;
bool ok;
+ /*
+ * This is called under the context of state->src_fsp.
+ */
+
state->next_io_size = MIN(state->remaining, talloc_array_length(state->buf));
init_strict_lock_struct(state->src_fsp,
}
subreq = SMB_VFS_PREAD_SEND(state,
- state->src_fsp->conn->sconn->ev_ctx,
+ state->src_ev,
state->src_fsp,
state->buf,
state->next_io_size,
nread = SMB_VFS_PREAD_RECV(subreq, &aio_state);
TALLOC_FREE(subreq);
if (nread == -1) {
- DBG_ERR("read failed: %s\n", strerror(errno));
+ DBG_ERR("read failed: %s\n", strerror(aio_state.error));
tevent_req_nterror(req, map_nt_error_from_unix(aio_state.error));
return;
}
state->src_off += nread;
+ ok = change_to_user_by_fsp(state->dst_fsp);
+ if (!ok) {
+ tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+ return;
+ }
+
init_strict_lock_struct(state->dst_fsp,
state->dst_fsp->op->global->open_persistent_id,
state->dst_off,
}
subreq = SMB_VFS_PWRITE_SEND(state,
- state->ev,
+ state->dst_ev,
state->dst_fsp,
state->buf,
state->next_io_size,
struct vfs_aio_state aio_state;
ssize_t nwritten;
NTSTATUS status;
+ bool ok;
nwritten = SMB_VFS_PWRITE_RECV(subreq, &aio_state);
TALLOC_FREE(subreq);
if (nwritten == -1) {
- DBG_ERR("write failed: %s\n", strerror(errno));
+ DBG_ERR("write failed: %s\n", strerror(aio_state.error));
tevent_req_nterror(req, map_nt_error_from_unix(aio_state.error));
return;
}
return;
}
+ ok = change_to_user_by_fsp(state->src_fsp);
+ if (!ok) {
+ tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+ return;
+ }
+
status = vfswrap_offload_write_loop(req);
if (!NT_STATUS_IS_OK(status)) {
tevent_req_nterror(req, status);
int result;
START_PROFILE(syscall_chmod);
-
- /*
- * We need to do this due to the fact that the default POSIX ACL
- * chmod modifies the ACL *mask* for the group owner, not the
- * group owner bits directly. JRA.
- */
-
-
- {
- int saved_errno = errno; /* We might get ENOSYS */
- result = SMB_VFS_CHMOD_ACL(handle->conn,
- smb_fname,
- mode);
- if (result == 0) {
- END_PROFILE(syscall_chmod);
- return result;
- }
- /* Error - return the old errno. */
- errno = saved_errno;
- }
-
result = chmod(smb_fname->base_name, mode);
END_PROFILE(syscall_chmod);
return result;
int result;
START_PROFILE(syscall_fchmod);
-
- /*
- * We need to do this due to the fact that the default POSIX ACL
- * chmod modifies the ACL *mask* for the group owner, not the
- * group owner bits directly. JRA.
- */
-
- {
- int saved_errno = errno; /* We might get ENOSYS */
- if ((result = SMB_VFS_FCHMOD_ACL(fsp, mode)) == 0) {
- END_PROFILE(syscall_fchmod);
- return result;
- }
- /* Error - return the old errno. */
- errno = saved_errno;
- }
-
#if defined(HAVE_FCHMOD)
result = fchmod(fsp->fh->fd, mode);
#else
NULL,
NULL,
0);
- if (smb_fname == NULL) {
- SAFE_FREE(result);
- }
+ /*
+ * sys_getwd() *always* returns malloced memory.
+ * We must free here to avoid leaks:
+ * BUG:https://bugzilla.samba.org/show_bug.cgi?id=13372
+ */
+ SAFE_FREE(result);
return smb_fname;
}
START_PROFILE(syscall_fcntl_lock);
- if (fsp->use_ofd_locks || !lp_parm_bool(SNUM(fsp->conn),
- "smbd",
- "force process locks",
- false)) {
- op = map_process_lock_to_ofd_lock(op, &fsp->use_ofd_locks);
+ if (fsp->use_ofd_locks) {
+ op = map_process_lock_to_ofd_lock(op);
}
result = fcntl_lock(fsp->fh->fd, op, offset, count, type);
START_PROFILE(syscall_fcntl_getlock);
- if (fsp->use_ofd_locks || !lp_parm_bool(SNUM(fsp->conn),
- "smbd",
- "force process locks",
- false)) {
- op = map_process_lock_to_ofd_lock(op, &fsp->use_ofd_locks);
+ if (fsp->use_ofd_locks) {
+ op = map_process_lock_to_ofd_lock(op);
}
result = fcntl_getlock(fsp->fh->fd, op, poffset, pcount, ptype, ppid);
return NT_STATUS_OK; /* Nothing to do here ... */
}
-static int vfswrap_chmod_acl(vfs_handle_struct *handle,
- const struct smb_filename *smb_fname,
- mode_t mode)
-{
-#ifdef HAVE_NO_ACL
- errno = ENOSYS;
- return -1;
-#else
- int result;
-
- START_PROFILE(chmod_acl);
- result = chmod_acl(handle->conn, smb_fname, mode);
- END_PROFILE(chmod_acl);
- return result;
-#endif
-}
-
-static int vfswrap_fchmod_acl(vfs_handle_struct *handle, files_struct *fsp, mode_t mode)
-{
-#ifdef HAVE_NO_ACL
- errno = ENOSYS;
- return -1;
-#else
- int result;
-
- START_PROFILE(fchmod_acl);
- result = fchmod_acl(fsp, mode);
- END_PROFILE(fchmod_acl);
- return result;
-#endif
-}
-
static SMB_ACL_T vfswrap_sys_acl_get_file(vfs_handle_struct *handle,
const struct smb_filename *smb_fname,
SMB_ACL_TYPE_T type,
return getxattr(smb_fname->base_name, name, value, size);
}
+struct vfswrap_getxattrat_state {
+ struct tevent_context *ev;
+ files_struct *dir_fsp;
+ const struct smb_filename *smb_fname;
+ struct tevent_req *req;
+
+ /*
+ * The following variables are talloced off "state" which is protected
+ * by a destructor and thus are guaranteed to be safe to be used in the
+ * job function in the worker thread.
+ */
+ char *name;
+ const char *xattr_name;
+ uint8_t *xattr_value;
+ struct security_unix_token *token;
+
+ ssize_t xattr_size;
+ struct vfs_aio_state vfs_aio_state;
+ SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes);
+};
+
+static int vfswrap_getxattrat_state_destructor(
+ struct vfswrap_getxattrat_state *state)
+{
+ return -1;
+}
+
+static void vfswrap_getxattrat_do_sync(struct tevent_req *req);
+static void vfswrap_getxattrat_do_async(void *private_data);
+static void vfswrap_getxattrat_done(struct tevent_req *subreq);
+
+static struct tevent_req *vfswrap_getxattrat_send(
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct vfs_handle_struct *handle,
+ files_struct *dir_fsp,
+ const struct smb_filename *smb_fname,
+ const char *xattr_name,
+ size_t alloc_hint)
+{
+ struct tevent_req *req = NULL;
+ struct tevent_req *subreq = NULL;
+ struct vfswrap_getxattrat_state *state = NULL;
+ size_t max_threads = 0;
+ bool have_per_thread_cwd = false;
+ bool have_per_thread_creds = false;
+ bool do_async = false;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct vfswrap_getxattrat_state);
+ if (req == NULL) {
+ return NULL;
+ }
+ *state = (struct vfswrap_getxattrat_state) {
+ .ev = ev,
+ .dir_fsp = dir_fsp,
+ .smb_fname = smb_fname,
+ .req = req,
+ };
+
+ max_threads = pthreadpool_tevent_max_threads(dir_fsp->conn->sconn->pool);
+ if (max_threads >= 1) {
+ /*
+ * We need a non sync threadpool!
+ */
+ have_per_thread_cwd = per_thread_cwd_supported();
+ }
+#ifdef HAVE_LINUX_THREAD_CREDENTIALS
+ have_per_thread_creds = true;
+#endif
+ if (have_per_thread_cwd && have_per_thread_creds) {
+ do_async = true;
+ }
+
+ SMBPROFILE_BYTES_ASYNC_START(syscall_asys_getxattrat, profile_p,
+ state->profile_bytes, 0);
+
+ if (dir_fsp->fh->fd == -1) {
+ DBG_ERR("Need a valid directory fd\n");
+ tevent_req_error(req, EINVAL);
+ return tevent_req_post(req, ev);
+ }
+
+ if (alloc_hint > 0) {
+ state->xattr_value = talloc_zero_array(state,
+ uint8_t,
+ alloc_hint);
+ if (tevent_req_nomem(state->xattr_value, req)) {
+ return tevent_req_post(req, ev);
+ }
+ }
+
+ if (!do_async) {
+ vfswrap_getxattrat_do_sync(req);
+ return tevent_req_post(req, ev);
+ }
+
+ /*
+ * Now allocate all parameters from a memory context that won't go away
+ * no matter what. These paremeters will get used in threads and we
+ * can't reliably cancel threads, so all buffers passed to the threads
+ * must not be freed before all referencing threads terminate.
+ */
+
+ state->name = talloc_strdup(state, smb_fname->base_name);
+ if (tevent_req_nomem(state->name, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ state->xattr_name = talloc_strdup(state, xattr_name);
+ if (tevent_req_nomem(state->xattr_name, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ /*
+ * This is a hot codepath so at first glance one might think we should
+ * somehow optimize away the token allocation and do a
+ * talloc_reference() or similar black magic instead. But due to the
+ * talloc_stackframe pool per SMB2 request this should be a simple copy
+ * without a malloc in most cases.
+ */
+ if (geteuid() == sec_initial_uid()) {
+ state->token = root_unix_token(state);
+ } else {
+ state->token = copy_unix_token(
+ state,
+ dir_fsp->conn->session_info->unix_token);
+ }
+ if (tevent_req_nomem(state->token, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+
+ subreq = pthreadpool_tevent_job_send(
+ state,
+ ev,
+ dir_fsp->conn->sconn->pool,
+ vfswrap_getxattrat_do_async,
+ state);
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, vfswrap_getxattrat_done, req);
+
+ talloc_set_destructor(state, vfswrap_getxattrat_state_destructor);
+
+ return req;
+}
+
+static void vfswrap_getxattrat_do_sync(struct tevent_req *req)
+{
+ struct vfswrap_getxattrat_state *state = talloc_get_type_abort(
+ req, struct vfswrap_getxattrat_state);
+ char *path = NULL;
+ char *tofree = NULL;
+ char pathbuf[PATH_MAX+1];
+ size_t pathlen;
+ int err;
+
+ pathlen = full_path_tos(state->dir_fsp->fsp_name->base_name,
+ state->smb_fname->base_name,
+ pathbuf,
+ sizeof(pathbuf),
+ &path,
+ &tofree);
+ if (pathlen == -1) {
+ tevent_req_error(req, ENOMEM);
+ return;
+ }
+
+ state->xattr_size = getxattr(path,
+ state->xattr_name,
+ state->xattr_value,
+ talloc_array_length(state->xattr_value));
+ err = errno;
+ TALLOC_FREE(tofree);
+ if (state->xattr_size == -1) {
+ tevent_req_error(req, err);
+ return;
+ }
+
+ tevent_req_done(req);
+ return;
+}
+
+static void vfswrap_getxattrat_do_async(void *private_data)
+{
+ struct vfswrap_getxattrat_state *state = talloc_get_type_abort(
+ private_data, struct vfswrap_getxattrat_state);
+ struct timespec start_time;
+ struct timespec end_time;
+ int ret;
+
+ PROFILE_TIMESTAMP(&start_time);
+ SMBPROFILE_BYTES_ASYNC_SET_BUSY(state->profile_bytes);
+
+ /*
+ * Here we simulate a getxattrat()
+ * call using fchdir();getxattr()
+ */
+
+ per_thread_cwd_activate();
+
+ /* Become the correct credential on this thread. */
+ ret = set_thread_credentials(state->token->uid,
+ state->token->gid,
+ (size_t)state->token->ngroups,
+ state->token->groups);
+ if (ret != 0) {
+ state->xattr_size = -1;
+ state->vfs_aio_state.error = errno;
+ goto end_profile;
+ }
+
+ ret = fchdir(state->dir_fsp->fh->fd);
+ if (ret == -1) {
+ state->xattr_size = -1;
+ state->vfs_aio_state.error = errno;
+ goto end_profile;
+ }
+
+ state->xattr_size = getxattr(state->name,
+ state->xattr_name,
+ state->xattr_value,
+ talloc_array_length(state->xattr_value));
+ if (state->xattr_size == -1) {
+ state->vfs_aio_state.error = errno;
+ }
+
+end_profile:
+ PROFILE_TIMESTAMP(&end_time);
+ state->vfs_aio_state.duration = nsec_time_diff(&end_time, &start_time);
+ SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+}
+
+static void vfswrap_getxattrat_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct vfswrap_getxattrat_state *state = tevent_req_data(
+ req, struct vfswrap_getxattrat_state);
+ int ret;
+ bool ok;
+
+ /*
+ * Make sure we run as the user again
+ */
+ ok = change_to_user_by_fsp(state->dir_fsp);
+ SMB_ASSERT(ok);
+
+ ret = pthreadpool_tevent_job_recv(subreq);
+ TALLOC_FREE(subreq);
+ SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
+ talloc_set_destructor(state, NULL);
+ if (ret != 0) {
+ if (ret != EAGAIN) {
+ tevent_req_error(req, ret);
+ return;
+ }
+ /*
+ * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+ * means the lower level pthreadpool failed to create a new
+ * thread. Fallback to sync processing in that case to allow
+ * some progress for the client.
+ */
+ vfswrap_getxattrat_do_sync(req);
+ return;
+ }
+
+ if (state->xattr_size == -1) {
+ tevent_req_error(req, state->vfs_aio_state.error);
+ return;
+ }
+
+ if (state->xattr_value == NULL) {
+ /*
+ * The caller only wanted the size.
+ */
+ tevent_req_done(req);
+ return;
+ }
+
+ /*
+ * shrink the buffer to the returned size.
+ * (can't fail). It means NULL if size is 0.
+ */
+ state->xattr_value = talloc_realloc(state,
+ state->xattr_value,
+ uint8_t,
+ state->xattr_size);
+
+ tevent_req_done(req);
+}
+
+static ssize_t vfswrap_getxattrat_recv(struct tevent_req *req,
+ struct vfs_aio_state *aio_state,
+ TALLOC_CTX *mem_ctx,
+ uint8_t **xattr_value)
+{
+ struct vfswrap_getxattrat_state *state = tevent_req_data(
+ req, struct vfswrap_getxattrat_state);
+ ssize_t xattr_size;
+
+ if (tevent_req_is_unix_error(req, &aio_state->error)) {
+ tevent_req_received(req);
+ return -1;
+ }
+
+ *aio_state = state->vfs_aio_state;
+ xattr_size = state->xattr_size;
+ if (xattr_value != NULL) {
+ *xattr_value = talloc_move(mem_ctx, &state->xattr_value);
+ }
+
+ tevent_req_received(req);
+ return xattr_size;
+}
+
static ssize_t vfswrap_fgetxattr(struct vfs_handle_struct *handle, struct files_struct *fsp, const char *name, void *value, size_t size)
{
return fgetxattr(fsp->fh->fd, name, value, size);
.open_fn = vfswrap_open,
.create_file_fn = vfswrap_create_file,
.close_fn = vfswrap_close,
- .read_fn = vfswrap_read,
.pread_fn = vfswrap_pread,
.pread_send_fn = vfswrap_pread_send,
.pread_recv_fn = vfswrap_pread_recv,
- .write_fn = vfswrap_write,
.pwrite_fn = vfswrap_pwrite,
.pwrite_send_fn = vfswrap_pwrite_send,
.pwrite_recv_fn = vfswrap_pwrite_recv,
.sendfile_fn = vfswrap_sendfile,
.recvfile_fn = vfswrap_recvfile,
.rename_fn = vfswrap_rename,
- .fsync_fn = vfswrap_fsync,
.fsync_send_fn = vfswrap_fsync_send,
.fsync_recv_fn = vfswrap_fsync_recv,
.stat_fn = vfswrap_stat,
.set_dos_attributes_fn = vfswrap_set_dos_attributes,
.fset_dos_attributes_fn = vfswrap_fset_dos_attributes,
.get_dos_attributes_fn = vfswrap_get_dos_attributes,
+ .get_dos_attributes_send_fn = vfswrap_get_dos_attributes_send,
+ .get_dos_attributes_recv_fn = vfswrap_get_dos_attributes_recv,
.fget_dos_attributes_fn = vfswrap_fget_dos_attributes,
.offload_read_send_fn = vfswrap_offload_read_send,
.offload_read_recv_fn = vfswrap_offload_read_recv,
/* POSIX ACL operations. */
- .chmod_acl_fn = vfswrap_chmod_acl,
- .fchmod_acl_fn = vfswrap_fchmod_acl,
-
.sys_acl_get_file_fn = vfswrap_sys_acl_get_file,
.sys_acl_get_fd_fn = vfswrap_sys_acl_get_fd,
.sys_acl_blob_get_file_fn = posix_sys_acl_blob_get_file,
/* EA operations. */
.getxattr_fn = vfswrap_getxattr,
+ .getxattrat_send_fn = vfswrap_getxattrat_send,
+ .getxattrat_recv_fn = vfswrap_getxattrat_recv,
.fgetxattr_fn = vfswrap_fgetxattr,
.listxattr_fn = vfswrap_listxattr,
.flistxattr_fn = vfswrap_flistxattr,
static_decl_vfs;
NTSTATUS vfs_default_init(TALLOC_CTX *ctx)
{
+ /*
+ * Here we need to implement every call!
+ *
+ * As this is the end of the vfs module chain.
+ */
+ smb_vfs_assert_all_fns(&vfs_default_fns, DEFAULT_VFS_MODULE_NAME);
return smb_register_vfs(SMB_VFS_INTERFACE_VERSION,
DEFAULT_VFS_MODULE_NAME, &vfs_default_fns);
}