TALLOC_FREE(subreq);
SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
talloc_set_destructor(state, NULL);
- if (tevent_req_error(req, ret)) {
- return;
+ if (ret != 0) {
+ if (ret != EAGAIN) {
+ tevent_req_error(req, ret);
+ return;
+ }
+ /*
+ * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+ * means the lower level pthreadpool failed to create a new
+ * thread. Fallback to sync processing in that case to allow
+ * some progress for the client.
+ */
+ vfs_pread_do(state);
}
tevent_req_done(req);
TALLOC_FREE(subreq);
SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
talloc_set_destructor(state, NULL);
- if (tevent_req_error(req, ret)) {
- return;
+ if (ret != 0) {
+ if (ret != EAGAIN) {
+ tevent_req_error(req, ret);
+ return;
+ }
+ /*
+ * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+ * means the lower level pthreadpool failed to create a new
+ * thread. Fallback to sync processing in that case to allow
+ * some progress for the client.
+ */
+ vfs_pwrite_do(state);
}
tevent_req_done(req);
TALLOC_FREE(subreq);
SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
talloc_set_destructor(state, NULL);
- if (tevent_req_error(req, ret)) {
- return;
+ if (ret != 0) {
+ if (ret != EAGAIN) {
+ tevent_req_error(req, ret);
+ return;
+ }
+ /*
+ * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+ * means the lower level pthreadpool failed to create a new
+ * thread. Fallback to sync processing in that case to allow
+ * some progress for the client.
+ */
+ vfs_fsync_do(state);
}
tevent_req_done(req);
START_PROFILE(syscall_lseek);
- /* Cope with 'stat' file opens. */
- if (fsp->fh->fd != -1)
- result = lseek(fsp->fh->fd, offset, whence);
-
+ result = lseek(fsp->fh->fd, offset, whence);
/*
* We want to maintain the fiction that we can seek
* on a fifo for file system purposes. This allows
*
* but I have to check that --metze
*/
+ ssize_t ret;
struct dom_sid sid;
+ struct dom_sid_buf buf;
uid_t uid;
size_t sid_len;
/* unknown 4 bytes: this is not the length of the sid :-( */
/*unknown = IVAL(pdata,0);*/
- if (!sid_parse(_in_data + 4, sid_len, &sid)) {
+ ret = sid_parse(_in_data + 4, sid_len, &sid);
+ if (ret == -1) {
return NT_STATUS_INVALID_PARAMETER;
}
- DEBUGADD(10, ("for SID: %s\n", sid_string_dbg(&sid)));
+ DEBUGADD(10, ("for SID: %s\n",
+ dom_sid_str_buf(&sid, &buf)));
if (!sid_to_uid(&sid, &uid)) {
DEBUG(0,("sid_to_uid: failed, sid[%s] sid_len[%lu]\n",
- sid_string_dbg(&sid),
+ dom_sid_str_buf(&sid, &buf),
(unsigned long)sid_len));
uid = (-1);
}
return get_ea_dos_attribute(handle->conn, smb_fname, dosmode);
}
+struct vfswrap_get_dos_attributes_state {
+ struct vfs_aio_state aio_state;
+ connection_struct *conn;
+ TALLOC_CTX *mem_ctx;
+ struct tevent_context *ev;
+ files_struct *dir_fsp;
+ struct smb_filename *smb_fname;
+ uint32_t dosmode;
+ bool as_root;
+};
+
+static void vfswrap_get_dos_attributes_getxattr_done(struct tevent_req *subreq);
+
+static struct tevent_req *vfswrap_get_dos_attributes_send(
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct vfs_handle_struct *handle,
+ files_struct *dir_fsp,
+ struct smb_filename *smb_fname)
+{
+ struct tevent_req *req = NULL;
+ struct tevent_req *subreq = NULL;
+ struct vfswrap_get_dos_attributes_state *state = NULL;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct vfswrap_get_dos_attributes_state);
+ if (req == NULL) {
+ return NULL;
+ }
+
+ *state = (struct vfswrap_get_dos_attributes_state) {
+ .conn = dir_fsp->conn,
+ .mem_ctx = mem_ctx,
+ .ev = ev,
+ .dir_fsp = dir_fsp,
+ .smb_fname = smb_fname,
+ };
+
+ subreq = SMB_VFS_GETXATTRAT_SEND(state,
+ ev,
+ dir_fsp,
+ smb_fname,
+ SAMBA_XATTR_DOS_ATTRIB,
+ sizeof(fstring));
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq,
+ vfswrap_get_dos_attributes_getxattr_done,
+ req);
+
+ return req;
+}
+
+static void vfswrap_get_dos_attributes_getxattr_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req =
+ tevent_req_callback_data(subreq,
+ struct tevent_req);
+ struct vfswrap_get_dos_attributes_state *state =
+ tevent_req_data(req,
+ struct vfswrap_get_dos_attributes_state);
+ ssize_t xattr_size;
+ DATA_BLOB blob = {0};
+ NTSTATUS status;
+
+ xattr_size = SMB_VFS_GETXATTRAT_RECV(subreq,
+ &state->aio_state,
+ state,
+ &blob.data);
+ TALLOC_FREE(subreq);
+ if (xattr_size == -1) {
+ status = map_nt_error_from_unix(state->aio_state.error);
+
+ if (state->as_root) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+ if (!NT_STATUS_EQUAL(status, NT_STATUS_ACCESS_DENIED)) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ state->as_root = true;
+
+ become_root();
+ subreq = SMB_VFS_GETXATTRAT_SEND(state,
+ state->ev,
+ state->dir_fsp,
+ state->smb_fname,
+ SAMBA_XATTR_DOS_ATTRIB,
+ sizeof(fstring));
+ unbecome_root();
+ if (tevent_req_nomem(subreq, req)) {
+ return;
+ }
+ tevent_req_set_callback(subreq,
+ vfswrap_get_dos_attributes_getxattr_done,
+ req);
+ return;
+ }
+
+ blob.length = xattr_size;
+
+ status = parse_dos_attribute_blob(state->smb_fname,
+ blob,
+ &state->dosmode);
+ if (!NT_STATUS_IS_OK(status)) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ tevent_req_done(req);
+ return;
+}
+
+static NTSTATUS vfswrap_get_dos_attributes_recv(struct tevent_req *req,
+ struct vfs_aio_state *aio_state,
+ uint32_t *dosmode)
+{
+ struct vfswrap_get_dos_attributes_state *state =
+ tevent_req_data(req,
+ struct vfswrap_get_dos_attributes_state);
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ tevent_req_received(req);
+ return status;
+ }
+
+ *aio_state = state->aio_state;
+ *dosmode = state->dosmode;
+ tevent_req_received(req);
+ return NT_STATUS_OK;
+}
+
static NTSTATUS vfswrap_fget_dos_attributes(struct vfs_handle_struct *handle,
struct files_struct *fsp,
uint32_t *dosmode)
{
struct tevent_req *req;
struct vfswrap_offload_write_state *state = NULL;
+ /* off_t is signed! */
+ off_t max_offset = INT64_MAX - to_copy;
size_t num = MIN(to_copy, COPYCHUNK_MAX_TOTAL_LEN);
files_struct *src_fsp = NULL;
NTSTATUS status;
return tevent_req_post(req, ev);
}
+ if (state->src_off > max_offset) {
+ /*
+ * Protect integer checks below.
+ */
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+ if (state->src_off < 0) {
+ /*
+ * Protect integer checks below.
+ */
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+ if (state->dst_off > max_offset) {
+ /*
+ * Protect integer checks below.
+ */
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+ if (state->dst_off < 0) {
+ /*
+ * Protect integer checks below.
+ */
+ tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
+ return tevent_req_post(req, ev);
+ }
+
status = vfs_offload_token_db_fetch_fsp(vfswrap_offload_ctx,
token, &src_fsp);
if (tevent_req_nterror(req, status)) {
return tevent_req_post(req, ev);
}
- state->src_ev = src_fsp->conn->user_ev_ctx;
+ state->src_ev = src_fsp->conn->sconn->ev_ctx;
state->src_fsp = src_fsp;
- state->buf = talloc_array(state, uint8_t, num);
- if (tevent_req_nomem(state->buf, req)) {
- return tevent_req_post(req, ev);
- }
-
status = vfs_stat_fsp(src_fsp);
if (tevent_req_nterror(req, status)) {
return tevent_req_post(req, ev);
}
- if (src_fsp->fsp_name->st.st_ex_size < state->src_off + num) {
+ if (src_fsp->fsp_name->st.st_ex_size < state->src_off + to_copy) {
/*
* [MS-SMB2] 3.3.5.15.6 Handling a Server-Side Data Copy Request
* If the SourceOffset or SourceOffset + Length extends beyond
return tevent_req_post(req, ev);
}
+ state->buf = talloc_array(state, uint8_t, num);
+ if (tevent_req_nomem(state->buf, req)) {
+ return tevent_req_post(req, ev);
+ }
+
status = vfswrap_offload_write_loop(req);
if (!NT_STATUS_IS_OK(status)) {
tevent_req_nterror(req, status);
nread = SMB_VFS_PREAD_RECV(subreq, &aio_state);
TALLOC_FREE(subreq);
if (nread == -1) {
- DBG_ERR("read failed: %s\n", strerror(errno));
+ DBG_ERR("read failed: %s\n", strerror(aio_state.error));
tevent_req_nterror(req, map_nt_error_from_unix(aio_state.error));
return;
}
nwritten = SMB_VFS_PWRITE_RECV(subreq, &aio_state);
TALLOC_FREE(subreq);
if (nwritten == -1) {
- DBG_ERR("write failed: %s\n", strerror(errno));
+ DBG_ERR("write failed: %s\n", strerror(aio_state.error));
tevent_req_nterror(req, map_nt_error_from_unix(aio_state.error));
return;
}
START_PROFILE(syscall_fcntl_lock);
- if (fsp->use_ofd_locks || !lp_parm_bool(SNUM(fsp->conn),
- "smbd",
- "force process locks",
- false)) {
- op = map_process_lock_to_ofd_lock(op, &fsp->use_ofd_locks);
+ if (fsp->use_ofd_locks) {
+ op = map_process_lock_to_ofd_lock(op);
}
result = fcntl_lock(fsp->fh->fd, op, offset, count, type);
START_PROFILE(syscall_fcntl_getlock);
- if (fsp->use_ofd_locks || !lp_parm_bool(SNUM(fsp->conn),
- "smbd",
- "force process locks",
- false)) {
- op = map_process_lock_to_ofd_lock(op, &fsp->use_ofd_locks);
+ if (fsp->use_ofd_locks) {
+ op = map_process_lock_to_ofd_lock(op);
}
result = fcntl_getlock(fsp->fh->fd, op, poffset, pcount, ptype, ppid);
return getxattr(smb_fname->base_name, name, value, size);
}
+struct vfswrap_getxattrat_state {
+ struct tevent_context *ev;
+ files_struct *dir_fsp;
+ const struct smb_filename *smb_fname;
+ struct tevent_req *req;
+
+ /*
+ * The following variables are talloced off "state" which is protected
+ * by a destructor and thus are guaranteed to be safe to be used in the
+ * job function in the worker thread.
+ */
+ char *name;
+ const char *xattr_name;
+ uint8_t *xattr_value;
+ struct security_unix_token *token;
+
+ ssize_t xattr_size;
+ struct vfs_aio_state vfs_aio_state;
+ SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes);
+};
+
+static int vfswrap_getxattrat_state_destructor(
+ struct vfswrap_getxattrat_state *state)
+{
+ return -1;
+}
+
+static void vfswrap_getxattrat_do_sync(struct tevent_req *req);
+static void vfswrap_getxattrat_do_async(void *private_data);
+static void vfswrap_getxattrat_done(struct tevent_req *subreq);
+
+static struct tevent_req *vfswrap_getxattrat_send(
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct vfs_handle_struct *handle,
+ files_struct *dir_fsp,
+ const struct smb_filename *smb_fname,
+ const char *xattr_name,
+ size_t alloc_hint)
+{
+ struct tevent_req *req = NULL;
+ struct tevent_req *subreq = NULL;
+ struct vfswrap_getxattrat_state *state = NULL;
+ size_t max_threads = 0;
+ bool have_per_thread_cwd = false;
+ bool have_per_thread_creds = false;
+ bool do_async = false;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct vfswrap_getxattrat_state);
+ if (req == NULL) {
+ return NULL;
+ }
+ *state = (struct vfswrap_getxattrat_state) {
+ .ev = ev,
+ .dir_fsp = dir_fsp,
+ .smb_fname = smb_fname,
+ .req = req,
+ };
+
+ max_threads = pthreadpool_tevent_max_threads(dir_fsp->conn->sconn->pool);
+ if (max_threads >= 1) {
+ /*
+ * We need a non sync threadpool!
+ */
+ have_per_thread_cwd = per_thread_cwd_supported();
+ }
+#ifdef HAVE_LINUX_THREAD_CREDENTIALS
+ have_per_thread_creds = true;
+#endif
+ if (have_per_thread_cwd && have_per_thread_creds) {
+ do_async = true;
+ }
+
+ SMBPROFILE_BYTES_ASYNC_START(syscall_asys_getxattrat, profile_p,
+ state->profile_bytes, 0);
+
+ if (dir_fsp->fh->fd == -1) {
+ DBG_ERR("Need a valid directory fd\n");
+ tevent_req_error(req, EINVAL);
+ return tevent_req_post(req, ev);
+ }
+
+ if (alloc_hint > 0) {
+ state->xattr_value = talloc_zero_array(state,
+ uint8_t,
+ alloc_hint);
+ if (tevent_req_nomem(state->xattr_value, req)) {
+ return tevent_req_post(req, ev);
+ }
+ }
+
+ if (!do_async) {
+ vfswrap_getxattrat_do_sync(req);
+ return tevent_req_post(req, ev);
+ }
+
+ /*
+ * Now allocate all parameters from a memory context that won't go away
+ * no matter what. These paremeters will get used in threads and we
+ * can't reliably cancel threads, so all buffers passed to the threads
+ * must not be freed before all referencing threads terminate.
+ */
+
+ state->name = talloc_strdup(state, smb_fname->base_name);
+ if (tevent_req_nomem(state->name, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ state->xattr_name = talloc_strdup(state, xattr_name);
+ if (tevent_req_nomem(state->xattr_name, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ /*
+ * This is a hot codepath so at first glance one might think we should
+ * somehow optimize away the token allocation and do a
+ * talloc_reference() or similar black magic instead. But due to the
+ * talloc_stackframe pool per SMB2 request this should be a simple copy
+ * without a malloc in most cases.
+ */
+ if (geteuid() == sec_initial_uid()) {
+ state->token = root_unix_token(state);
+ } else {
+ state->token = copy_unix_token(
+ state,
+ dir_fsp->conn->session_info->unix_token);
+ }
+ if (tevent_req_nomem(state->token, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+
+ subreq = pthreadpool_tevent_job_send(
+ state,
+ ev,
+ dir_fsp->conn->sconn->pool,
+ vfswrap_getxattrat_do_async,
+ state);
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, vfswrap_getxattrat_done, req);
+
+ talloc_set_destructor(state, vfswrap_getxattrat_state_destructor);
+
+ return req;
+}
+
+static void vfswrap_getxattrat_do_sync(struct tevent_req *req)
+{
+ struct vfswrap_getxattrat_state *state = talloc_get_type_abort(
+ req, struct vfswrap_getxattrat_state);
+ char *path = NULL;
+ char *tofree = NULL;
+ char pathbuf[PATH_MAX+1];
+ size_t pathlen;
+ int err;
+
+ pathlen = full_path_tos(state->dir_fsp->fsp_name->base_name,
+ state->smb_fname->base_name,
+ pathbuf,
+ sizeof(pathbuf),
+ &path,
+ &tofree);
+ if (pathlen == -1) {
+ tevent_req_error(req, ENOMEM);
+ return;
+ }
+
+ state->xattr_size = getxattr(path,
+ state->xattr_name,
+ state->xattr_value,
+ talloc_array_length(state->xattr_value));
+ err = errno;
+ TALLOC_FREE(tofree);
+ if (state->xattr_size == -1) {
+ tevent_req_error(req, err);
+ return;
+ }
+
+ tevent_req_done(req);
+ return;
+}
+
+static void vfswrap_getxattrat_do_async(void *private_data)
+{
+ struct vfswrap_getxattrat_state *state = talloc_get_type_abort(
+ private_data, struct vfswrap_getxattrat_state);
+ struct timespec start_time;
+ struct timespec end_time;
+ int ret;
+
+ PROFILE_TIMESTAMP(&start_time);
+ SMBPROFILE_BYTES_ASYNC_SET_BUSY(state->profile_bytes);
+
+ /*
+ * Here we simulate a getxattrat()
+ * call using fchdir();getxattr()
+ */
+
+ per_thread_cwd_activate();
+
+ /* Become the correct credential on this thread. */
+ ret = set_thread_credentials(state->token->uid,
+ state->token->gid,
+ (size_t)state->token->ngroups,
+ state->token->groups);
+ if (ret != 0) {
+ state->xattr_size = -1;
+ state->vfs_aio_state.error = errno;
+ goto end_profile;
+ }
+
+ ret = fchdir(state->dir_fsp->fh->fd);
+ if (ret == -1) {
+ state->xattr_size = -1;
+ state->vfs_aio_state.error = errno;
+ goto end_profile;
+ }
+
+ state->xattr_size = getxattr(state->name,
+ state->xattr_name,
+ state->xattr_value,
+ talloc_array_length(state->xattr_value));
+ if (state->xattr_size == -1) {
+ state->vfs_aio_state.error = errno;
+ }
+
+end_profile:
+ PROFILE_TIMESTAMP(&end_time);
+ state->vfs_aio_state.duration = nsec_time_diff(&end_time, &start_time);
+ SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+}
+
+static void vfswrap_getxattrat_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct vfswrap_getxattrat_state *state = tevent_req_data(
+ req, struct vfswrap_getxattrat_state);
+ int ret;
+ bool ok;
+
+ /*
+ * Make sure we run as the user again
+ */
+ ok = change_to_user_by_fsp(state->dir_fsp);
+ SMB_ASSERT(ok);
+
+ ret = pthreadpool_tevent_job_recv(subreq);
+ TALLOC_FREE(subreq);
+ SMBPROFILE_BYTES_ASYNC_END(state->profile_bytes);
+ talloc_set_destructor(state, NULL);
+ if (ret != 0) {
+ if (ret != EAGAIN) {
+ tevent_req_error(req, ret);
+ return;
+ }
+ /*
+ * If we get EAGAIN from pthreadpool_tevent_job_recv() this
+ * means the lower level pthreadpool failed to create a new
+ * thread. Fallback to sync processing in that case to allow
+ * some progress for the client.
+ */
+ vfswrap_getxattrat_do_sync(req);
+ return;
+ }
+
+ if (state->xattr_size == -1) {
+ tevent_req_error(req, state->vfs_aio_state.error);
+ return;
+ }
+
+ if (state->xattr_value == NULL) {
+ /*
+ * The caller only wanted the size.
+ */
+ tevent_req_done(req);
+ return;
+ }
+
+ /*
+ * shrink the buffer to the returned size.
+ * (can't fail). It means NULL if size is 0.
+ */
+ state->xattr_value = talloc_realloc(state,
+ state->xattr_value,
+ uint8_t,
+ state->xattr_size);
+
+ tevent_req_done(req);
+}
+
+static ssize_t vfswrap_getxattrat_recv(struct tevent_req *req,
+ struct vfs_aio_state *aio_state,
+ TALLOC_CTX *mem_ctx,
+ uint8_t **xattr_value)
+{
+ struct vfswrap_getxattrat_state *state = tevent_req_data(
+ req, struct vfswrap_getxattrat_state);
+ ssize_t xattr_size;
+
+ if (tevent_req_is_unix_error(req, &aio_state->error)) {
+ tevent_req_received(req);
+ return -1;
+ }
+
+ *aio_state = state->vfs_aio_state;
+ xattr_size = state->xattr_size;
+ if (xattr_value != NULL) {
+ *xattr_value = talloc_move(mem_ctx, &state->xattr_value);
+ }
+
+ tevent_req_received(req);
+ return xattr_size;
+}
+
static ssize_t vfswrap_fgetxattr(struct vfs_handle_struct *handle, struct files_struct *fsp, const char *name, void *value, size_t size)
{
return fgetxattr(fsp->fh->fd, name, value, size);
.set_dos_attributes_fn = vfswrap_set_dos_attributes,
.fset_dos_attributes_fn = vfswrap_fset_dos_attributes,
.get_dos_attributes_fn = vfswrap_get_dos_attributes,
+ .get_dos_attributes_send_fn = vfswrap_get_dos_attributes_send,
+ .get_dos_attributes_recv_fn = vfswrap_get_dos_attributes_recv,
.fget_dos_attributes_fn = vfswrap_fget_dos_attributes,
.offload_read_send_fn = vfswrap_offload_read_send,
.offload_read_recv_fn = vfswrap_offload_read_recv,
/* EA operations. */
.getxattr_fn = vfswrap_getxattr,
+ .getxattrat_send_fn = vfswrap_getxattrat_send,
+ .getxattrat_recv_fn = vfswrap_getxattrat_recv,
.fgetxattr_fn = vfswrap_fgetxattr,
.listxattr_fn = vfswrap_listxattr,
.flistxattr_fn = vfswrap_flistxattr,