* opens and just terminates smbd if the async open times out.
*/
struct tevent_timer *te;
+
+ /*
+ * For the samba kernel oplock case we use both a timeout and
+ * a watch on locking.tdb. This way in case it's smbd holding
+ * the kernel oplock we get directly notified for the retry
+ * once the kernel oplock is properly broken. Store the req
+ * here so that it can be timely discarded once the timer
+ * above fires.
+ */
+ struct tevent_req *watch_req;
};
/****************************************************************************
}
/* Read the link target. */
- link_len = SMB_VFS_READLINK(conn,
+ link_len = SMB_VFS_READLINKAT(conn,
+ conn->cwd_fsp,
smb_fname,
link_target,
PATH_MAX - 1);
char *parent_dir = NULL;
struct smb_filename parent_dir_fname = {0};
const char *final_component = NULL;
- bool is_directory = false;
bool ok;
-#ifdef O_DIRECTORY
- if (flags & O_DIRECTORY) {
- is_directory = true;
- }
-#endif
-
- if (is_directory) {
+ if (fsp->is_directory) {
parent_dir = talloc_strdup(talloc_tos(), smb_fname->base_name);
if (parent_dir == NULL) {
saved_errno = errno;
bool file_existed = VALID_STAT(fsp->fsp_name->st);
int curr_flags;
- *file_created = false;
-
if (!(flags & O_CREAT)) {
/*
* We're not creating the file, just pass through.
*/
- return fd_open(conn, fsp, flags, mode);
+ status = fd_open(conn, fsp, flags, mode);
+ *file_created = false;
+ return status;
}
if (flags & O_EXCL) {
* mapped from the ELOOP POSIX error.
*/
- curr_flags = flags;
-
if (file_existed) {
- curr_flags &= ~(O_CREAT);
+ curr_flags = flags & ~(O_CREAT);
retry_status = NT_STATUS_OBJECT_NAME_NOT_FOUND;
} else {
- curr_flags |= O_EXCL;
+ curr_flags = flags | O_EXCL;
retry_status = NT_STATUS_OBJECT_NAME_COLLISION;
}
status = fd_open(conn, fsp, curr_flags, mode);
if (NT_STATUS_IS_OK(status)) {
- if (!file_existed) {
- *file_created = true;
- }
+ *file_created = !file_existed;
return NT_STATUS_OK;
}
- if (!NT_STATUS_EQUAL(status, retry_status)) {
- return status;
- }
+ if (NT_STATUS_EQUAL(status, retry_status)) {
- curr_flags = flags;
+ file_existed = !file_existed;
- /*
- * Keep file_existed up to date for clarity.
- */
- if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND)) {
- file_existed = false;
- curr_flags |= O_EXCL;
- DBG_DEBUG("file %s did not exist. Retry.\n",
- smb_fname_str_dbg(fsp->fsp_name));
- } else {
- file_existed = true;
- curr_flags &= ~(O_CREAT);
- DBG_DEBUG("file %s existed. Retry.\n",
- smb_fname_str_dbg(fsp->fsp_name));
- }
+ DBG_DEBUG("File %s %s. Retry.\n",
+ fsp_str_dbg(fsp),
+ file_existed ? "existed" : "did not exist");
- status = fd_open(conn, fsp, curr_flags, mode);
+ if (file_existed) {
+ curr_flags = flags & ~(O_CREAT);
+ } else {
+ curr_flags = flags | O_EXCL;
+ }
- if (NT_STATUS_IS_OK(status) && (!file_existed)) {
- *file_created = true;
+ status = fd_open(conn, fsp, curr_flags, mode);
}
+ *file_created = (NT_STATUS_IS_OK(status) && !file_existed);
return status;
}
* too. With blocking file descriptors this
* does not happen.
*/
- ret = set_blocking(fsp->fh->fd, true);
+ ret = vfs_set_blocking(fsp, true);
if (ret == -1) {
status = map_nt_error_from_unix(errno);
DBG_WARNING("Could not set fd to blocking: "
fsp->file_id = vfs_file_id_from_sbuf(conn, &smb_fname->st);
fsp->vuid = req ? req->vuid : UID_FIELD_INVALID;
fsp->file_pid = req ? req->smbpid : 0;
- fsp->can_lock = True;
- fsp->can_read = ((access_mask & FILE_READ_DATA) != 0);
- fsp->can_write =
+ fsp->fsp_flags.can_lock = true;
+ fsp->fsp_flags.can_read = ((access_mask & FILE_READ_DATA) != 0);
+ fsp->fsp_flags.can_write =
CAN_WRITE(conn) &&
((access_mask & (FILE_WRITE_DATA | FILE_APPEND_DATA)) != 0);
fsp->print_file = NULL;
fsp->aio_write_behind = True;
}
- fsp->wcp = NULL; /* Write cache pointer. */
-
DEBUG(2,("%s opened file %s read=%s write=%s (numopen=%d)\n",
conn->session_info->unix_info->unix_name,
smb_fname_str_dbg(smb_fname),
- BOOLSTR(fsp->can_read), BOOLSTR(fsp->can_write),
+ BOOLSTR(fsp->fsp_flags.can_read),
+ BOOLSTR(fsp->fsp_flags.can_write),
conn->num_files_open));
errno = 0;
return NT_STATUS_OK;
}
+static bool mask_conflict(
+ uint32_t new_access,
+ uint32_t existing_access,
+ uint32_t access_mask,
+ uint32_t new_sharemode,
+ uint32_t existing_sharemode,
+ uint32_t sharemode_mask)
+{
+ bool want_access = (new_access & access_mask);
+ bool allow_existing = (existing_sharemode & sharemode_mask);
+ bool have_access = (existing_access & access_mask);
+ bool allow_new = (new_sharemode & sharemode_mask);
+
+ if (want_access && !allow_existing) {
+ DBG_DEBUG("Access request 0x%"PRIx32"/0x%"PRIx32" conflicts "
+ "with existing sharemode 0x%"PRIx32"/0x%"PRIx32"\n",
+ new_access,
+ access_mask,
+ existing_sharemode,
+ sharemode_mask);
+ return true;
+ }
+ if (have_access && !allow_new) {
+ DBG_DEBUG("Sharemode request 0x%"PRIx32"/0x%"PRIx32" conflicts "
+ "with existing access 0x%"PRIx32"/0x%"PRIx32"\n",
+ new_sharemode,
+ sharemode_mask,
+ existing_access,
+ access_mask);
+ return true;
+ }
+ return false;
+}
+
/****************************************************************************
Check if we can open a file with a share mode.
Returns True if conflict, False if not.
****************************************************************************/
-static bool share_conflict(struct share_mode_entry *entry,
+static bool share_conflict(uint32_t e_access_mask,
+ uint32_t e_share_access,
uint32_t access_mask,
uint32_t share_access)
{
- DEBUG(10,("share_conflict: entry->access_mask = 0x%x, "
- "entry->share_access = 0x%x, "
- "entry->private_options = 0x%x\n",
- (unsigned int)entry->access_mask,
- (unsigned int)entry->share_access,
- (unsigned int)entry->private_options));
-
- if (server_id_is_disconnected(&entry->pid)) {
+ const uint32_t conflicting_access =
+ FILE_WRITE_DATA|
+ FILE_APPEND_DATA|
+ FILE_READ_DATA|
+ FILE_EXECUTE|
+ DELETE_ACCESS;
+ bool conflict;
+
+ DBG_DEBUG("existing access_mask = 0x%"PRIx32", "
+ "existing share access = 0x%"PRIx32", "
+ "access_mask = 0x%"PRIx32", "
+ "share_access = 0x%"PRIx32"\n",
+ e_access_mask,
+ e_share_access,
+ access_mask,
+ share_access);
+
+ if ((e_access_mask & conflicting_access) == 0) {
+ DBG_DEBUG("No conflict due to "
+ "existing access_mask = 0x%"PRIx32"\n",
+ e_access_mask);
return false;
}
-
- DEBUG(10,("share_conflict: access_mask = 0x%x, share_access = 0x%x\n",
- (unsigned int)access_mask, (unsigned int)share_access));
-
- if ((entry->access_mask & (FILE_WRITE_DATA|
- FILE_APPEND_DATA|
- FILE_READ_DATA|
- FILE_EXECUTE|
- DELETE_ACCESS)) == 0) {
- DEBUG(10,("share_conflict: No conflict due to "
- "entry->access_mask = 0x%x\n",
- (unsigned int)entry->access_mask ));
- return False;
- }
-
- if ((access_mask & (FILE_WRITE_DATA|
- FILE_APPEND_DATA|
- FILE_READ_DATA|
- FILE_EXECUTE|
- DELETE_ACCESS)) == 0) {
- DEBUG(10,("share_conflict: No conflict due to "
- "access_mask = 0x%x\n",
- (unsigned int)access_mask ));
- return False;
- }
-
-#if 1 /* JRA TEST - Superdebug. */
-#define CHECK_MASK(num, am, right, sa, share) \
- DEBUG(10,("share_conflict: [%d] am (0x%x) & right (0x%x) = 0x%x\n", \
- (unsigned int)(num), (unsigned int)(am), \
- (unsigned int)(right), (unsigned int)(am)&(right) )); \
- DEBUG(10,("share_conflict: [%d] sa (0x%x) & share (0x%x) = 0x%x\n", \
- (unsigned int)(num), (unsigned int)(sa), \
- (unsigned int)(share), (unsigned int)(sa)&(share) )); \
- if (((am) & (right)) && !((sa) & (share))) { \
- DEBUG(10,("share_conflict: check %d conflict am = 0x%x, right = 0x%x, \
-sa = 0x%x, share = 0x%x\n", (num), (unsigned int)(am), (unsigned int)(right), (unsigned int)(sa), \
- (unsigned int)(share) )); \
- return True; \
- }
-#else
-#define CHECK_MASK(num, am, right, sa, share) \
- if (((am) & (right)) && !((sa) & (share))) { \
- DEBUG(10,("share_conflict: check %d conflict am = 0x%x, right = 0x%x, \
-sa = 0x%x, share = 0x%x\n", (num), (unsigned int)(am), (unsigned int)(right), (unsigned int)(sa), \
- (unsigned int)(share) )); \
- return True; \
+ if ((access_mask & conflicting_access) == 0) {
+ DBG_DEBUG("No conflict due to access_mask = 0x%"PRIx32"\n",
+ access_mask);
+ return false;
}
-#endif
-
- CHECK_MASK(1, entry->access_mask, FILE_WRITE_DATA | FILE_APPEND_DATA,
- share_access, FILE_SHARE_WRITE);
- CHECK_MASK(2, access_mask, FILE_WRITE_DATA | FILE_APPEND_DATA,
- entry->share_access, FILE_SHARE_WRITE);
-
- CHECK_MASK(3, entry->access_mask, FILE_READ_DATA | FILE_EXECUTE,
- share_access, FILE_SHARE_READ);
- CHECK_MASK(4, access_mask, FILE_READ_DATA | FILE_EXECUTE,
- entry->share_access, FILE_SHARE_READ);
- CHECK_MASK(5, entry->access_mask, DELETE_ACCESS,
- share_access, FILE_SHARE_DELETE);
- CHECK_MASK(6, access_mask, DELETE_ACCESS,
- entry->share_access, FILE_SHARE_DELETE);
+ conflict = mask_conflict(
+ access_mask, e_access_mask, FILE_WRITE_DATA | FILE_APPEND_DATA,
+ share_access, e_share_access, FILE_SHARE_WRITE);
+ conflict |= mask_conflict(
+ access_mask, e_access_mask, FILE_READ_DATA | FILE_EXECUTE,
+ share_access, e_share_access, FILE_SHARE_READ);
+ conflict |= mask_conflict(
+ access_mask, e_access_mask, DELETE_ACCESS,
+ share_access, e_share_access, FILE_SHARE_DELETE);
- DEBUG(10,("share_conflict: No conflict.\n"));
- return False;
+ DBG_DEBUG("conflict=%s\n", conflict ? "true" : "false");
+ return conflict;
}
#if defined(DEVELOPER)
-static void validate_my_share_entries(struct smbd_server_connection *sconn,
- const struct file_id id,
- int num,
- struct share_mode_entry *share_entry)
+
+struct validate_my_share_entries_state {
+ struct smbd_server_connection *sconn;
+ struct file_id fid;
+ struct server_id self;
+};
+
+static bool validate_my_share_entries_fn(
+ struct share_mode_entry *e,
+ bool *modified,
+ void *private_data)
{
- struct server_id self = messaging_server_id(sconn->msg_ctx);
+ struct validate_my_share_entries_state *state = private_data;
files_struct *fsp;
- if (!serverid_equal(&self, &share_entry->pid)) {
- return;
+ if (!server_id_equal(&state->self, &e->pid)) {
+ return false;
}
- if (share_entry->op_mid == 0) {
+ if (e->op_mid == 0) {
/* INTERNAL_OPEN_ONLY */
- return;
- }
-
- if (!is_valid_share_mode_entry(share_entry)) {
- return;
+ return false;
}
- fsp = file_find_dif(sconn, id, share_entry->share_file_id);
+ fsp = file_find_dif(state->sconn, state->fid, e->share_file_id);
if (!fsp) {
DBG_ERR("PANIC : %s\n",
- share_mode_str(talloc_tos(), num, &id,
- share_entry));
+ share_mode_str(talloc_tos(), 0, &state->fid, e));
smb_panic("validate_my_share_entries: Cannot match a "
"share entry with an open file\n");
}
- if (((uint16_t)fsp->oplock_type) != share_entry->op_type) {
+ if (((uint16_t)fsp->oplock_type) != e->op_type) {
goto panic;
}
- return;
+ return false;
panic:
{
char *str;
DBG_ERR("validate_my_share_entries: PANIC : %s\n",
- share_mode_str(talloc_tos(), num, &id,
- share_entry));
+ share_mode_str(talloc_tos(), 0, &state->fid, e));
str = talloc_asprintf(talloc_tos(),
"validate_my_share_entries: "
"file %s, oplock_type = 0x%x, op_type = 0x%x\n",
fsp->fsp_name->base_name,
(unsigned int)fsp->oplock_type,
- (unsigned int)share_entry->op_type );
+ (unsigned int)e->op_type);
smb_panic(str);
}
+
+ return false;
}
#endif
((access_mask & ~stat_open_bits) == 0));
}
+struct has_delete_on_close_state {
+ bool ret;
+};
+
+static bool has_delete_on_close_fn(
+ struct share_mode_entry *e,
+ bool *modified,
+ void *private_data)
+{
+ struct has_delete_on_close_state *state = private_data;
+ state->ret = !share_entry_stale_pid(e);
+ return state->ret;
+}
+
static bool has_delete_on_close(struct share_mode_lock *lck,
uint32_t name_hash)
{
- struct share_mode_data *d = lck->data;
- uint32_t i;
+ struct has_delete_on_close_state state = { .ret = false };
+ bool ok;
- if (d->num_share_modes == 0) {
+ if (!is_delete_on_close_set(lck, name_hash)) {
return false;
}
- if (!is_delete_on_close_set(lck, name_hash)) {
+
+ ok= share_mode_forall_entries(lck, has_delete_on_close_fn, &state);
+ if (!ok) {
+ DBG_DEBUG("share_mode_forall_entries failed\n");
return false;
}
- for (i=0; i<d->num_share_modes; i++) {
- if (!share_mode_stale_pid(d, i)) {
- return true;
- }
+ return state.ret;
+}
+
+static void share_mode_flags_get(
+ uint16_t flags,
+ uint32_t *access_mask,
+ uint32_t *share_mode,
+ uint32_t *lease_type)
+{
+ if (access_mask != NULL) {
+ *access_mask =
+ ((flags & SHARE_MODE_ACCESS_READ) ?
+ FILE_READ_DATA : 0) |
+ ((flags & SHARE_MODE_ACCESS_WRITE) ?
+ FILE_WRITE_DATA : 0) |
+ ((flags & SHARE_MODE_ACCESS_DELETE) ?
+ DELETE_ACCESS : 0);
+ }
+ if (share_mode != NULL) {
+ *share_mode =
+ ((flags & SHARE_MODE_SHARE_READ) ?
+ FILE_SHARE_READ : 0) |
+ ((flags & SHARE_MODE_SHARE_WRITE) ?
+ FILE_SHARE_WRITE : 0) |
+ ((flags & SHARE_MODE_SHARE_DELETE) ?
+ FILE_SHARE_DELETE : 0);
+ }
+ if (lease_type != NULL) {
+ *lease_type =
+ ((flags & SHARE_MODE_LEASE_READ) ?
+ SMB2_LEASE_READ : 0) |
+ ((flags & SHARE_MODE_LEASE_WRITE) ?
+ SMB2_LEASE_WRITE : 0) |
+ ((flags & SHARE_MODE_LEASE_HANDLE) ?
+ SMB2_LEASE_HANDLE : 0);
}
- return false;
+}
+
+static uint16_t share_mode_flags_set(
+ uint16_t flags,
+ uint32_t access_mask,
+ uint32_t share_mode,
+ uint32_t lease_type)
+{
+ if (access_mask != UINT32_MAX) {
+ flags &= ~(SHARE_MODE_ACCESS_READ|
+ SHARE_MODE_ACCESS_WRITE|
+ SHARE_MODE_ACCESS_DELETE);
+ flags |= (access_mask & (FILE_READ_DATA | FILE_EXECUTE)) ?
+ SHARE_MODE_ACCESS_READ : 0;
+ flags |= (access_mask & (FILE_WRITE_DATA | FILE_APPEND_DATA)) ?
+ SHARE_MODE_ACCESS_WRITE : 0;
+ flags |= (access_mask & (DELETE_ACCESS)) ?
+ SHARE_MODE_ACCESS_DELETE : 0;
+ }
+ if (share_mode != UINT32_MAX) {
+ flags &= ~(SHARE_MODE_SHARE_READ|
+ SHARE_MODE_SHARE_WRITE|
+ SHARE_MODE_SHARE_DELETE);
+ flags |= (share_mode & FILE_SHARE_READ) ?
+ SHARE_MODE_SHARE_READ : 0;
+ flags |= (share_mode & FILE_SHARE_WRITE) ?
+ SHARE_MODE_SHARE_WRITE : 0;
+ flags |= (share_mode & FILE_SHARE_DELETE) ?
+ SHARE_MODE_SHARE_DELETE : 0;
+ }
+ if (lease_type != UINT32_MAX) {
+ flags &= ~(SHARE_MODE_LEASE_READ|
+ SHARE_MODE_LEASE_WRITE|
+ SHARE_MODE_LEASE_HANDLE);
+ flags |= (lease_type & SMB2_LEASE_READ) ?
+ SHARE_MODE_LEASE_READ : 0;
+ flags |= (lease_type & SMB2_LEASE_WRITE) ?
+ SHARE_MODE_LEASE_WRITE : 0;
+ flags |= (lease_type & SMB2_LEASE_HANDLE) ?
+ SHARE_MODE_LEASE_HANDLE : 0;
+ }
+
+ return flags;
+}
+
+static uint16_t share_mode_flags_restrict(
+ uint16_t flags,
+ uint32_t access_mask,
+ uint32_t share_mode,
+ uint32_t lease_type)
+{
+ uint32_t existing_access_mask, existing_share_mode;
+ uint32_t existing_lease_type;
+ uint16_t ret;
+
+ share_mode_flags_get(
+ flags,
+ &existing_access_mask,
+ &existing_share_mode,
+ &existing_lease_type);
+
+ existing_access_mask |= access_mask;
+ existing_share_mode &= share_mode;
+ existing_lease_type |= lease_type;
+
+ ret = share_mode_flags_set(
+ flags,
+ existing_access_mask,
+ existing_share_mode,
+ existing_lease_type);
+ return ret;
}
/****************************************************************************
Returns -1 on error, or number of share modes on success (may be zero).
****************************************************************************/
+struct open_mode_check_state {
+ struct file_id fid;
+ uint32_t access_mask;
+ uint32_t share_access;
+ uint32_t lease_type;
+};
+
+static bool open_mode_check_fn(
+ struct share_mode_entry *e,
+ bool *modified,
+ void *private_data)
+{
+ struct open_mode_check_state *state = private_data;
+ bool disconnected, stale;
+ uint32_t access_mask, share_access, lease_type;
+
+ disconnected = server_id_is_disconnected(&e->pid);
+ if (disconnected) {
+ return false;
+ }
+
+ access_mask = state->access_mask | e->access_mask;
+ share_access = state->share_access & e->share_access;
+ lease_type = state->lease_type | get_lease_type(e, state->fid);
+
+ if ((access_mask == state->access_mask) &&
+ (share_access == state->share_access) &&
+ (lease_type == state->lease_type)) {
+ return false;
+ }
+
+ stale = share_entry_stale_pid(e);
+ if (stale) {
+ return false;
+ }
+
+ state->access_mask = access_mask;
+ state->share_access = share_access;
+ state->lease_type = lease_type;
+
+ return false;
+}
+
static NTSTATUS open_mode_check(connection_struct *conn,
struct share_mode_lock *lck,
uint32_t access_mask,
uint32_t share_access)
{
- uint32_t i;
+ struct share_mode_data *d = lck->data;
+ struct open_mode_check_state state;
+ uint16_t new_flags;
+ bool ok, conflict, have_share_entries;
if (is_stat_open(access_mask)) {
/* Stat open that doesn't trigger oplock breaks or share mode
*/
#if defined(DEVELOPER)
- for(i = 0; i < lck->data->num_share_modes; i++) {
- validate_my_share_entries(conn->sconn, lck->data->id, i,
- &lck->data->share_modes[i]);
+ {
+ struct validate_my_share_entries_state validate_state = {
+ .sconn = conn->sconn,
+ .fid = d->id,
+ .self = messaging_server_id(conn->sconn->msg_ctx),
+ };
+ ok = share_mode_forall_entries(
+ lck, validate_my_share_entries_fn, &validate_state);
+ SMB_ASSERT(ok);
}
#endif
- /* Now we check the share modes, after any oplock breaks. */
- for(i = 0; i < lck->data->num_share_modes; i++) {
+ have_share_entries = share_mode_have_entries(lck);
+ if (!have_share_entries) {
+ /*
+ * This is a fresh share mode lock where no conflicts
+ * can happen.
+ */
+ return NT_STATUS_OK;
+ }
- if (!is_valid_share_mode_entry(&lck->data->share_modes[i])) {
- continue;
- }
+ share_mode_flags_get(
+ d->flags, &state.access_mask, &state.share_access, NULL);
- /* someone else has a share lock on it, check to see if we can
- * too */
- if (share_conflict(&lck->data->share_modes[i],
- access_mask, share_access)) {
+ conflict = share_conflict(
+ state.access_mask,
+ state.share_access,
+ access_mask,
+ share_access);
+ if (!conflict) {
+ DBG_DEBUG("No conflict due to share_mode_flags access\n");
+ return NT_STATUS_OK;
+ }
- if (share_mode_stale_pid(lck->data, i)) {
- continue;
- }
+ state = (struct open_mode_check_state) {
+ .fid = d->id,
+ .share_access = (FILE_SHARE_READ|
+ FILE_SHARE_WRITE|
+ FILE_SHARE_DELETE),
+ };
- return NT_STATUS_SHARING_VIOLATION;
- }
+ /*
+ * Walk the share mode array to recalculate d->flags
+ */
+
+ ok = share_mode_forall_entries(lck, open_mode_check_fn, &state);
+ if (!ok) {
+ DBG_DEBUG("share_mode_forall_entries failed\n");
+ return NT_STATUS_INTERNAL_ERROR;
}
- return NT_STATUS_OK;
+ new_flags = share_mode_flags_set(
+ 0, state.access_mask, state.share_access, state.lease_type);
+ if (new_flags == d->flags) {
+ /*
+ * We only end up here if we had a sharing violation
+ * from d->flags and have recalculated it.
+ */
+ return NT_STATUS_SHARING_VIOLATION;
+ }
+
+ d->flags = new_flags;
+ d->modified = true;
+
+ conflict = share_conflict(
+ state.access_mask,
+ state.share_access,
+ access_mask,
+ share_access);
+ if (!conflict) {
+ DBG_DEBUG("No conflict due to share_mode_flags access\n");
+ return NT_STATUS_OK;
+ }
+
+ return NT_STATUS_SHARING_VIOLATION;
}
/*
return status;
}
-/*
- * Do internal consistency checks on the share mode for a file.
- */
+struct validate_oplock_types_state {
+ bool valid;
+ bool batch;
+ bool ex_or_batch;
+ bool level2;
+ bool no_oplock;
+ uint32_t num_non_stat_opens;
+};
-static bool validate_oplock_types(struct share_mode_lock *lck)
+static bool validate_oplock_types_fn(
+ struct share_mode_entry *e,
+ bool *modified,
+ void *private_data)
{
- struct share_mode_data *d = lck->data;
- bool batch = false;
- bool ex_or_batch = false;
- bool level2 = false;
- bool no_oplock = false;
- uint32_t num_non_stat_opens = 0;
- uint32_t i;
+ struct validate_oplock_types_state *state = private_data;
- for (i=0; i<d->num_share_modes; i++) {
- struct share_mode_entry *e = &d->share_modes[i];
+ if (e->op_mid == 0) {
+ /* INTERNAL_OPEN_ONLY */
+ return false;
+ }
- if (!is_valid_share_mode_entry(e)) {
- continue;
- }
+ if (e->op_type == NO_OPLOCK && is_stat_open(e->access_mask)) {
+ /*
+ * We ignore stat opens in the table - they always
+ * have NO_OPLOCK and never get or cause breaks. JRA.
+ */
+ return false;
+ }
- if (e->op_mid == 0) {
- /* INTERNAL_OPEN_ONLY */
- continue;
- }
+ state->num_non_stat_opens += 1;
- if (e->op_type == NO_OPLOCK && is_stat_open(e->access_mask)) {
- /* We ignore stat opens in the table - they
- always have NO_OPLOCK and never get or
- cause breaks. JRA. */
- continue;
+ if (BATCH_OPLOCK_TYPE(e->op_type)) {
+ /* batch - can only be one. */
+ if (share_entry_stale_pid(e)) {
+ DBG_DEBUG("Found stale batch oplock\n");
+ return false;
}
-
- num_non_stat_opens += 1;
-
- if (BATCH_OPLOCK_TYPE(e->op_type)) {
- /* batch - can only be one. */
- if (share_mode_stale_pid(d, i)) {
- DEBUG(10, ("Found stale batch oplock\n"));
- continue;
- }
- if (ex_or_batch || batch || level2 || no_oplock) {
- DEBUG(0, ("Bad batch oplock entry %u.",
- (unsigned)i));
- return false;
- }
- batch = true;
+ if (state->ex_or_batch ||
+ state->batch ||
+ state->level2 ||
+ state->no_oplock) {
+ DBG_ERR("Bad batch oplock entry\n");
+ state->valid = false;
+ return true;
}
+ state->batch = true;
+ }
- if (EXCLUSIVE_OPLOCK_TYPE(e->op_type)) {
- if (share_mode_stale_pid(d, i)) {
- DEBUG(10, ("Found stale duplicate oplock\n"));
- continue;
- }
- /* Exclusive or batch - can only be one. */
- if (ex_or_batch || level2 || no_oplock) {
- DEBUG(0, ("Bad exclusive or batch oplock "
- "entry %u.", (unsigned)i));
- return false;
- }
- ex_or_batch = true;
+ if (EXCLUSIVE_OPLOCK_TYPE(e->op_type)) {
+ if (share_entry_stale_pid(e)) {
+ DBG_DEBUG("Found stale duplicate oplock\n");
+ return false;
}
+ /* Exclusive or batch - can only be one. */
+ if (state->ex_or_batch ||
+ state->level2 ||
+ state->no_oplock) {
+ DBG_ERR("Bad exclusive or batch oplock entry\n");
+ state->valid = false;
+ return true;
+ }
+ state->ex_or_batch = true;
+ }
- if (LEVEL_II_OPLOCK_TYPE(e->op_type)) {
- if (batch || ex_or_batch) {
- if (share_mode_stale_pid(d, i)) {
- DEBUG(10, ("Found stale LevelII "
- "oplock\n"));
- continue;
- }
- DEBUG(0, ("Bad levelII oplock entry %u.",
- (unsigned)i));
+ if (LEVEL_II_OPLOCK_TYPE(e->op_type)) {
+ if (state->batch || state->ex_or_batch) {
+ if (share_entry_stale_pid(e)) {
+ DBG_DEBUG("Found stale LevelII oplock\n");
return false;
}
- level2 = true;
+ DBG_DEBUG("Bad levelII oplock entry\n");
+ state->valid = false;
+ return true;
}
+ state->level2 = true;
+ }
- if (e->op_type == NO_OPLOCK) {
- if (batch || ex_or_batch) {
- if (share_mode_stale_pid(d, i)) {
- DEBUG(10, ("Found stale NO_OPLOCK "
- "entry\n"));
- continue;
- }
- DEBUG(0, ("Bad no oplock entry %u.",
- (unsigned)i));
+ if (e->op_type == NO_OPLOCK) {
+ if (state->batch || state->ex_or_batch) {
+ if (share_entry_stale_pid(e)) {
+ DBG_DEBUG("Found stale NO_OPLOCK entry\n");
return false;
}
- no_oplock = true;
+ DBG_ERR("Bad no oplock entry\n");
+ state->valid = false;
+ return true;
}
+ state->no_oplock = true;
}
- remove_stale_share_mode_entries(d);
+ return false;
+}
+
+/*
+ * Do internal consistency checks on the share mode for a file.
+ */
+
+static bool validate_oplock_types(struct share_mode_lock *lck)
+{
+ struct validate_oplock_types_state state = { .valid = true };
+ bool ok;
+
+ ok = share_mode_forall_entries(lck, validate_oplock_types_fn, &state);
+ if (!ok) {
+ DBG_DEBUG("share_mode_forall_entries failed\n");
+ return false;
+ }
+ if (!state.valid) {
+ DBG_DEBUG("Got invalid oplock configuration\n");
+ return false;
+ }
- if ((batch || ex_or_batch) && (num_non_stat_opens != 1)) {
- DEBUG(1, ("got batch (%d) or ex (%d) non-exclusively (%d)\n",
- (int)batch, (int)ex_or_batch,
- (int)d->num_share_modes));
+ if ((state.batch || state.ex_or_batch) &&
+ (state.num_non_stat_opens != 1)) {
+ DBG_WARNING("got batch (%d) or ex (%d) non-exclusively "
+ "(%"PRIu32")\n",
+ (int)state.batch,
+ (int)state.ex_or_batch,
+ state.num_non_stat_opens);
return false;
}
return true;
}
-static bool delay_for_oplock(files_struct *fsp,
- int oplock_request,
- const struct smb2_lease *lease,
- struct share_mode_lock *lck,
- bool have_sharing_violation,
- uint32_t create_disposition,
- bool first_open_attempt)
+static bool is_same_lease(const files_struct *fsp,
+ const struct share_mode_entry *e,
+ const struct smb2_lease *lease)
{
- struct share_mode_data *d = lck->data;
- uint32_t i;
- bool delay = false;
- bool will_overwrite;
- const uint32_t delay_mask = have_sharing_violation ?
- SMB2_LEASE_HANDLE : SMB2_LEASE_WRITE;
-
- if ((oplock_request & INTERNAL_OPEN_ONLY) ||
- is_stat_open(fsp->access_mask)) {
+ if (e->op_type != LEASE_OPLOCK) {
return false;
}
-
- switch (create_disposition) {
- case FILE_SUPERSEDE:
- case FILE_OVERWRITE:
- case FILE_OVERWRITE_IF:
- will_overwrite = true;
- break;
- default:
- will_overwrite = false;
- break;
+ if (lease == NULL) {
+ return false;
}
- for (i=0; i<d->num_share_modes; i++) {
- struct share_mode_entry *e = &d->share_modes[i];
- bool e_is_lease = (e->op_type == LEASE_OPLOCK);
- uint32_t e_lease_type = get_lease_type(d, e);
- uint32_t break_to;
- bool lease_is_breaking = false;
-
- if (e_is_lease) {
- NTSTATUS status;
-
- if (lease != NULL) {
- bool our_lease = smb2_lease_equal(
- fsp_client_guid(fsp),
- &lease->lease_key,
- &e->client_guid,
- &e->lease_key);
- if (our_lease) {
- DBG_DEBUG("Ignoring our own lease\n");
- continue;
- }
- }
-
- status = leases_db_get(
+ return smb2_lease_equal(fsp_client_guid(fsp),
+ &lease->lease_key,
&e->client_guid,
- &e->lease_key,
- &fsp->file_id,
- NULL, /* current_state */
- &lease_is_breaking,
- NULL, /* breaking_to_requested */
- NULL, /* breaking_to_required */
- NULL, /* lease_version */
- NULL); /* epoch */
- SMB_ASSERT(NT_STATUS_IS_OK(status));
- }
-
- break_to = e_lease_type & ~delay_mask;
-
- if (will_overwrite) {
- break_to &= ~(SMB2_LEASE_HANDLE|SMB2_LEASE_READ);
- }
-
- DEBUG(10, ("entry %u: e_lease_type %u, will_overwrite: %u\n",
- (unsigned)i, (unsigned)e_lease_type,
- (unsigned)will_overwrite));
-
- if ((e_lease_type & ~break_to) == 0) {
- if (lease_is_breaking) {
- delay = true;
- }
- continue;
- }
-
- if (share_mode_stale_pid(d, i)) {
- continue;
- }
-
- if (will_overwrite) {
- /*
- * If we break anyway break to NONE directly.
- * Otherwise vfs_set_filelen() will trigger the
- * break.
- */
- break_to &= ~(SMB2_LEASE_READ|SMB2_LEASE_WRITE);
- }
-
- if (!e_is_lease) {
- /*
- * Oplocks only support breaking to R or NONE.
- */
- break_to &= ~(SMB2_LEASE_HANDLE|SMB2_LEASE_WRITE);
- }
-
- DEBUG(10, ("breaking from %d to %d\n",
- (int)e_lease_type, (int)break_to));
- send_break_message(fsp->conn->sconn->msg_ctx, &fsp->file_id,
- e, break_to);
- if (e_lease_type & delay_mask) {
- delay = true;
- }
- if (lease_is_breaking && !first_open_attempt) {
- delay = true;
- }
- }
-
- return delay;
+ &e->lease_key);
}
static bool file_has_brlocks(files_struct *fsp)
return status;
}
-static bool is_same_lease(const files_struct *fsp,
- const struct share_mode_data *d,
- const struct share_mode_entry *e,
- const struct smb2_lease *lease)
-{
- if (e->op_type != LEASE_OPLOCK) {
- return false;
- }
- if (lease == NULL) {
- return false;
- }
-
- return smb2_lease_equal(fsp_client_guid(fsp),
- &lease->lease_key,
- &e->client_guid,
- &e->lease_key);
-}
-
static int map_lease_type_to_oplock(uint32_t lease_type)
{
int result = NO_OPLOCK;
return result;
}
-static NTSTATUS grant_fsp_oplock_type(struct smb_request *req,
- struct files_struct *fsp,
- struct share_mode_lock *lck,
- int oplock_request,
- struct smb2_lease *lease,
- uint32_t share_access,
- uint32_t access_mask)
+struct delay_for_oplock_state {
+ struct files_struct *fsp;
+ const struct smb2_lease *lease;
+ bool will_overwrite;
+ uint32_t delay_mask;
+ bool first_open_attempt;
+ bool got_handle_lease;
+ bool got_oplock;
+ bool have_other_lease;
+ bool delay;
+};
+
+static bool delay_for_oplock_fn(
+ struct share_mode_entry *e,
+ bool *modified,
+ void *private_data)
{
- struct share_mode_data *d = lck->data;
- bool got_handle_lease = false;
- bool got_oplock = false;
- uint32_t i;
+ struct delay_for_oplock_state *state = private_data;
+ struct files_struct *fsp = state->fsp;
+ const struct smb2_lease *lease = state->lease;
+ bool e_is_lease = (e->op_type == LEASE_OPLOCK);
+ uint32_t e_lease_type = get_lease_type(e, fsp->file_id);
+ uint32_t break_to;
+ bool lease_is_breaking = false;
+
+ if (e_is_lease) {
+ NTSTATUS status;
+
+ if (lease != NULL) {
+ bool our_lease = is_same_lease(fsp, e, lease);
+ if (our_lease) {
+ DBG_DEBUG("Ignoring our own lease\n");
+ return false;
+ }
+ }
+
+ status = leases_db_get(
+ &e->client_guid,
+ &e->lease_key,
+ &fsp->file_id,
+ NULL, /* current_state */
+ &lease_is_breaking,
+ NULL, /* breaking_to_requested */
+ NULL, /* breaking_to_required */
+ NULL, /* lease_version */
+ NULL); /* epoch */
+ SMB_ASSERT(NT_STATUS_IS_OK(status));
+ }
+
+ if (!state->got_handle_lease &&
+ ((e_lease_type & SMB2_LEASE_HANDLE) != 0) &&
+ !share_entry_stale_pid(e)) {
+ state->got_handle_lease = true;
+ }
+
+ if (!state->got_oplock &&
+ (e->op_type != LEASE_OPLOCK) &&
+ !share_entry_stale_pid(e)) {
+ state->got_oplock = true;
+ }
+
+ if (!state->have_other_lease &&
+ !is_same_lease(fsp, e, lease) &&
+ !share_entry_stale_pid(e)) {
+ state->have_other_lease = true;
+ }
+
+ break_to = e_lease_type & ~state->delay_mask;
+
+ if (state->will_overwrite) {
+ break_to &= ~(SMB2_LEASE_HANDLE|SMB2_LEASE_READ);
+ }
+
+ DBG_DEBUG("e_lease_type %u, will_overwrite: %u\n",
+ (unsigned)e_lease_type,
+ (unsigned)state->will_overwrite);
+
+ if ((e_lease_type & ~break_to) == 0) {
+ if (lease_is_breaking) {
+ state->delay = true;
+ }
+ return false;
+ }
+
+ if (share_entry_stale_pid(e)) {
+ return false;
+ }
+
+ if (state->will_overwrite) {
+ /*
+ * If we break anyway break to NONE directly.
+ * Otherwise vfs_set_filelen() will trigger the
+ * break.
+ */
+ break_to &= ~(SMB2_LEASE_READ|SMB2_LEASE_WRITE);
+ }
+
+ if (!e_is_lease) {
+ /*
+ * Oplocks only support breaking to R or NONE.
+ */
+ break_to &= ~(SMB2_LEASE_HANDLE|SMB2_LEASE_WRITE);
+ }
+
+ DBG_DEBUG("breaking from %d to %d\n",
+ (int)e_lease_type,
+ (int)break_to);
+ send_break_message(
+ fsp->conn->sconn->msg_ctx, &fsp->file_id, e, break_to);
+ if (e_lease_type & state->delay_mask) {
+ state->delay = true;
+ }
+ if (lease_is_breaking && !state->first_open_attempt) {
+ state->delay = true;
+ }
+
+ return false;
+};
+
+static NTSTATUS delay_for_oplock(files_struct *fsp,
+ int oplock_request,
+ const struct smb2_lease *lease,
+ struct share_mode_lock *lck,
+ bool have_sharing_violation,
+ uint32_t create_disposition,
+ bool first_open_attempt)
+{
+ struct delay_for_oplock_state state = {
+ .fsp = fsp,
+ .lease = lease,
+ .first_open_attempt = first_open_attempt,
+ };
uint32_t granted;
- bool ok;
NTSTATUS status;
+ bool ok;
+
+ if (is_stat_open(fsp->access_mask)) {
+ goto grant;
+ }
+
+ state.delay_mask = have_sharing_violation ?
+ SMB2_LEASE_HANDLE : SMB2_LEASE_WRITE;
+
+ switch (create_disposition) {
+ case FILE_SUPERSEDE:
+ case FILE_OVERWRITE:
+ case FILE_OVERWRITE_IF:
+ state.will_overwrite = true;
+ break;
+ default:
+ state.will_overwrite = false;
+ break;
+ }
+
+ ok = share_mode_forall_entries(lck, delay_for_oplock_fn, &state);
+ if (!ok) {
+ return NT_STATUS_INTERNAL_ERROR;
+ }
+
+ if (state.delay) {
+ return NT_STATUS_RETRY;
+ }
- if (oplock_request & INTERNAL_OPEN_ONLY) {
- /* No oplocks on internal open. */
- oplock_request = NO_OPLOCK;
- DEBUG(10,("grant_fsp_oplock_type: oplock type 0x%x on file %s\n",
- fsp->oplock_type, fsp_str_dbg(fsp)));
+grant:
+ if (have_sharing_violation) {
+ return NT_STATUS_SHARING_VIOLATION;
}
if (oplock_request == LEASE_OPLOCK) {
}
if (lp_locking(fsp->conn->params) && file_has_brlocks(fsp)) {
- DEBUG(10,("grant_fsp_oplock_type: file %s has byte range locks\n",
- fsp_str_dbg(fsp)));
+ DBG_DEBUG("file %s has byte range locks\n",
+ fsp_str_dbg(fsp));
granted &= ~SMB2_LEASE_READ;
}
- for (i=0; i<d->num_share_modes; i++) {
- struct share_mode_entry *e = &d->share_modes[i];
- uint32_t e_lease_type;
-
- e_lease_type = get_lease_type(d, e);
-
- if ((granted & SMB2_LEASE_WRITE) &&
- !is_same_lease(fsp, d, e, lease) &&
- !share_mode_stale_pid(d, i)) {
- /*
- * Can grant only one writer
- */
- granted &= ~SMB2_LEASE_WRITE;
- }
-
- if ((e_lease_type & SMB2_LEASE_HANDLE) && !got_handle_lease &&
- !share_mode_stale_pid(d, i)) {
- got_handle_lease = true;
- }
-
- if ((e->op_type != LEASE_OPLOCK) && !got_oplock &&
- !share_mode_stale_pid(d, i)) {
- got_oplock = true;
- }
+ if (state.have_other_lease) {
+ /*
+ * Can grant only one writer
+ */
+ granted &= ~SMB2_LEASE_WRITE;
}
if ((granted & SMB2_LEASE_READ) && !(granted & SMB2_LEASE_WRITE)) {
}
if (oplock_request == LEASE_OPLOCK) {
- if (got_oplock) {
+ if (state.got_oplock) {
granted &= ~SMB2_LEASE_HANDLE;
}
return status;
}
- *lease = fsp->lease->lease;
- DEBUG(10, ("lease_state=%d\n", lease->lease_state));
+ DBG_DEBUG("lease_state=%d\n", fsp->lease->lease.lease_state);
} else {
- if (got_handle_lease) {
+ if (state.got_handle_lease) {
granted = SMB2_LEASE_NONE;
}
}
}
- ok = set_share_mode(
- lck,
- fsp,
- get_current_uid(fsp->conn),
- req ? req->mid : 0,
- fsp->oplock_type,
- share_access,
- access_mask);
- if (!ok) {
- if (fsp->oplock_type == LEASE_OPLOCK) {
- status = remove_lease_if_stale(
- lck->data,
- fsp_client_guid(fsp),
- &fsp->lease->lease.lease_key);
- if (!NT_STATUS_IS_OK(status)) {
- DBG_WARNING("remove_lease_if_stale "
- "failed: %s\n",
- nt_errstr(status));
- }
- }
- return NT_STATUS_NO_MEMORY;
+ if ((granted & SMB2_LEASE_READ) &&
+ ((lck->data->flags & SHARE_MODE_LEASE_READ) == 0)) {
+ lck->data->flags |= SHARE_MODE_LEASE_READ;
+ lck->data->modified = true;
}
- if (granted & SMB2_LEASE_READ) {
- lck->data->flags |= SHARE_MODE_HAS_READ_LEASE;
+ DBG_DEBUG("oplock type 0x%x on file %s\n",
+ fsp->oplock_type, fsp_str_dbg(fsp));
+
+ return NT_STATUS_OK;
+}
+
+static NTSTATUS handle_share_mode_lease(
+ files_struct *fsp,
+ struct share_mode_lock *lck,
+ uint32_t create_disposition,
+ uint32_t access_mask,
+ uint32_t share_access,
+ int oplock_request,
+ const struct smb2_lease *lease,
+ bool first_open_attempt)
+{
+ bool sharing_violation = false;
+ NTSTATUS status;
+
+ status = open_mode_check(
+ fsp->conn, lck, access_mask, share_access);
+ if (NT_STATUS_EQUAL(status, NT_STATUS_SHARING_VIOLATION)) {
+ sharing_violation = true;
+ status = NT_STATUS_OK; /* handled later */
+ }
+
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+
+ if (oplock_request == INTERNAL_OPEN_ONLY) {
+ if (sharing_violation) {
+ DBG_DEBUG("Sharing violation for internal open\n");
+ return NT_STATUS_SHARING_VIOLATION;
+ }
+
+ /*
+ * Internal opens never do oplocks or leases. We don't
+ * need to go through delay_for_oplock().
+ */
+ fsp->oplock_type = NO_OPLOCK;
+
+ return NT_STATUS_OK;
}
- DEBUG(10,("grant_fsp_oplock_type: oplock type 0x%x on file %s\n",
- fsp->oplock_type, fsp_str_dbg(fsp)));
+ status = delay_for_oplock(
+ fsp,
+ oplock_request,
+ lease,
+ lck,
+ sharing_violation,
+ create_disposition,
+ first_open_attempt);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
return NT_STATUS_OK;
}
static void defer_open(struct share_mode_lock *lck,
struct timeval timeout,
struct smb_request *req,
- bool delayed_for_oplocks,
struct file_id id)
{
struct deferred_open_record *open_rec = NULL;
struct defer_open_state *watch_state;
struct tevent_req *watch_req;
struct timeval_buf tvbuf1, tvbuf2;
+ struct file_id_buf fbuf;
bool ok;
abs_timeout = timeval_sum(&req->request_time, &timeout);
DBG_DEBUG("request time [%s] timeout [%s] mid [%" PRIu64 "] "
- "delayed_for_oplocks [%s] file_id [%s]\n",
+ "file_id [%s]\n",
timeval_str_buf(&req->request_time, false, true, &tvbuf1),
timeval_str_buf(&abs_timeout, false, true, &tvbuf2),
req->mid,
- delayed_for_oplocks ? "yes" : "no",
- file_id_string_tos(&id));
+ file_id_str_buf(id, &fbuf));
open_rec = talloc_zero(NULL, struct deferred_open_record);
if (open_rec == NULL) {
DBG_DEBUG("defering mid %" PRIu64 "\n", req->mid);
- watch_req = dbwrap_watched_watch_send(watch_state,
- req->sconn->ev_ctx,
- lck->data->record,
- (struct server_id){0});
+ watch_req = share_mode_watch_send(
+ watch_state,
+ req->sconn->ev_ctx,
+ lck->data->id,
+ (struct server_id){0});
if (watch_req == NULL) {
exit_server("Could not watch share mode record");
}
NTSTATUS status;
bool ret;
- status = dbwrap_watched_watch_recv(req, NULL, NULL);
+ status = share_mode_watch_recv(req, NULL, NULL);
TALLOC_FREE(req);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(5, ("dbwrap_watched_watch_recv returned %s\n",
* Actually attempt the kernel oplock polling open.
*/
-static void kernel_oplock_poll_open_timer(struct tevent_context *ev,
- struct tevent_timer *te,
- struct timeval current_time,
- void *private_data)
+static void poll_open_fn(struct tevent_context *ev,
+ struct tevent_timer *te,
+ struct timeval current_time,
+ void *private_data)
{
struct deferred_open_record *open_rec = talloc_get_type_abort(
private_data, struct deferred_open_record);
bool ok;
+ TALLOC_FREE(open_rec->watch_req);
+
ok = schedule_deferred_open_message_smb(
open_rec->xconn, open_rec->mid);
if (!ok) {
exit_server("schedule_deferred_open_message_smb failed");
}
- DBG_DEBUG("kernel_oplock_poll_open_timer fired. Retrying open !\n");
+ DBG_DEBUG("timer fired. Retrying open !\n");
}
+static void poll_open_done(struct tevent_req *subreq);
+
/**
* Reschedule an open for 1 second from now, if not timed out.
**/
-static void setup_kernel_oplock_poll_open(struct smb_request *req,
- struct file_id id)
+static bool setup_poll_open(
+ struct smb_request *req,
+ struct share_mode_lock *lck,
+ struct file_id id,
+ struct timeval max_timeout,
+ struct timeval interval)
{
-
bool ok;
struct deferred_open_record *open_rec = NULL;
- /* Maximum wait time. */
- struct timeval timeout = timeval_set(OPLOCK_BREAK_TIMEOUT*2, 0);
+ struct timeval endtime, next_interval;
+ struct file_id_buf ftmp;
- if (request_timed_out(req, timeout)) {
- return;
+ if (request_timed_out(req, max_timeout)) {
+ return false;
}
open_rec = talloc_zero(NULL, struct deferred_open_record);
if (open_rec == NULL) {
DBG_WARNING("talloc failed\n");
- return;
+ return false;
}
open_rec->xconn = req->xconn;
open_rec->mid = req->mid;
- open_rec->te = tevent_add_timer(req->sconn->ev_ctx,
- open_rec,
- timeval_current_ofs(1, 0),
- kernel_oplock_poll_open_timer,
- open_rec);
+ /*
+ * Make sure open_rec->te does not come later than the
+ * request's maximum endtime.
+ */
+
+ endtime = timeval_sum(&req->request_time, &max_timeout);
+ next_interval = timeval_current_ofs(interval.tv_sec, interval.tv_usec);
+ next_interval = timeval_min(&endtime, &next_interval);
+
+ open_rec->te = tevent_add_timer(
+ req->sconn->ev_ctx,
+ open_rec,
+ next_interval,
+ poll_open_fn,
+ open_rec);
if (open_rec->te == NULL) {
DBG_WARNING("tevent_add_timer failed\n");
TALLOC_FREE(open_rec);
- return;
+ return false;
+ }
+
+ if (lck != NULL) {
+ open_rec->watch_req = share_mode_watch_send(
+ open_rec,
+ req->sconn->ev_ctx,
+ lck->data->id,
+ (struct server_id) {0});
+ if (open_rec->watch_req == NULL) {
+ DBG_WARNING("share_mode_watch_send failed\n");
+ TALLOC_FREE(open_rec);
+ return false;
+ }
+ tevent_req_set_callback(
+ open_rec->watch_req, poll_open_done, open_rec);
+ }
+
+ ok = push_deferred_open_message_smb(req, max_timeout, id, open_rec);
+ if (!ok) {
+ DBG_WARNING("push_deferred_open_message_smb failed\n");
+ TALLOC_FREE(open_rec);
+ return false;
+ }
+
+ DBG_DEBUG("poll request time [%s] mid [%" PRIu64 "] file_id [%s]\n",
+ timeval_string(talloc_tos(), &req->request_time, false),
+ req->mid,
+ file_id_str_buf(id, &ftmp));
+
+ return true;
+}
+
+static void poll_open_done(struct tevent_req *subreq)
+{
+ struct deferred_open_record *open_rec = tevent_req_callback_data(
+ subreq, struct deferred_open_record);
+ NTSTATUS status;
+ bool ok;
+
+ status = share_mode_watch_recv(subreq, NULL, NULL);
+ TALLOC_FREE(subreq);
+ DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
+ nt_errstr(status));
+
+ ok = schedule_deferred_open_message_smb(
+ open_rec->xconn, open_rec->mid);
+ if (!ok) {
+ exit_server("schedule_deferred_open_message_smb failed");
}
+}
- ok = push_deferred_open_message_smb(req, timeout, id, open_rec);
- if (!ok) {
- DBG_WARNING("push_deferred_open_message_smb failed\n");
- TALLOC_FREE(open_rec);
- return;
+bool defer_smb1_sharing_violation(struct smb_request *req)
+{
+ bool ok;
+ int timeout_usecs;
+
+ if (!lp_defer_sharing_violations()) {
+ return false;
}
- DBG_DEBUG("poll request time [%s] mid [%" PRIu64 "] file_id [%s]\n",
- timeval_string(talloc_tos(), &req->request_time, false),
- req->mid,
- file_id_string_tos(&id));
+ /*
+ * Try every 200msec up to (by default) one second. To be
+ * precise, according to behaviour note <247> in [MS-CIFS],
+ * the server tries 5 times. But up to one second should be
+ * close enough.
+ */
+
+ timeout_usecs = lp_parm_int(
+ SNUM(req->conn),
+ "smbd",
+ "sharedelay",
+ SHARING_VIOLATION_USEC_WAIT);
+
+ ok = setup_poll_open(
+ req,
+ NULL,
+ (struct file_id) {0},
+ (struct timeval) { .tv_usec = timeout_usecs },
+ (struct timeval) { .tv_usec = 200000 });
+ return ok;
}
/****************************************************************************
return;
}
- defer_open(lck, timeout, req, true, id);
+ defer_open(lck, timeout, req, id);
}
/****************************************************************************
uint32_t create_options, /* options such as delete on close. */
uint32_t new_dos_attributes, /* attributes used for new file. */
int oplock_request, /* internal Samba oplock codes. */
- struct smb2_lease *lease,
+ const struct smb2_lease *lease,
/* Information (FILE_EXISTS etc.) */
uint32_t private_flags, /* Samba specific flags. */
int *pinfo,
SMB_STRUCT_STAT saved_stat = smb_fname->st;
struct timespec old_write_time;
struct file_id id;
+ bool setup_poll = false;
+ bool ok;
if (conn->printer) {
/*
if (req) {
struct deferred_open_record *open_rec;
- struct timeval request_time;
- if (get_deferred_open_message_state(req,
- &request_time,
- &open_rec)) {
- /* Remember the absolute time of the original
- request with this mid. We'll use it later to
- see if this has timed out. */
-
- req->request_time = request_time;
+ if (get_deferred_open_message_state(req, NULL, &open_rec)) {
/* If it was an async create retry, the file
didn't exist. */
open_access_mask |= FILE_WRITE_DATA; /* This will cause oplock breaks. */
}
+ if (file_existed) {
+ /*
+ * stat opens on existing files don't get oplocks.
+ * They can get leases.
+ *
+ * Note that we check for stat open on the *open_access_mask*,
+ * i.e. the access mask we actually used to do the open,
+ * not the one the client asked for (which is in
+ * fsp->access_mask). This is due to the fact that
+ * FILE_OVERWRITE and FILE_OVERWRITE_IF add in O_TRUNC,
+ * which adds FILE_WRITE_DATA to open_access_mask.
+ */
+ if (is_stat_open(open_access_mask) && lease == NULL) {
+ oplock_request = NO_OPLOCK;
+ }
+ }
+
DEBUG(10, ("open_file_ntcreate: fname=%s, after mapping "
"access_mask=0x%x\n", smb_fname_str_dbg(smb_fname),
access_mask));
return NT_STATUS_ACCESS_DENIED;
}
- fsp->file_id = vfs_file_id_from_sbuf(conn, &smb_fname->st);
+ if (VALID_STAT(smb_fname->st)) {
+ /*
+ * Only try and create a file id before open
+ * for an existing file. For a file being created
+ * this won't do anything useful until the file
+ * exists and has a valid stat struct.
+ */
+ fsp->file_id = vfs_file_id_from_sbuf(conn, &smb_fname->st);
+ }
fsp->fh->private_options = private_flags;
fsp->access_mask = open_access_mask; /* We change this to the
* requested access_mask after
open_access_mask, &new_file_created);
if (NT_STATUS_EQUAL(fsp_open, NT_STATUS_NETWORK_BUSY)) {
- bool delay;
-
+ if (file_existed && S_ISFIFO(fsp->fsp_name->st.st_ex_mode)) {
+ DEBUG(10, ("FIFO busy\n"));
+ return NT_STATUS_NETWORK_BUSY;
+ }
+ if (req == NULL) {
+ DEBUG(10, ("Internal open busy\n"));
+ return NT_STATUS_NETWORK_BUSY;
+ }
/*
* This handles the kernel oplock case:
*
* "Samba locking.tdb oplocks" are handled below after acquiring
* the sharemode lock with get_share_mode_lock().
*/
- if (file_existed && S_ISFIFO(fsp->fsp_name->st.st_ex_mode)) {
- DEBUG(10, ("FIFO busy\n"));
- return NT_STATUS_NETWORK_BUSY;
- }
- if (req == NULL) {
- DEBUG(10, ("Internal open busy\n"));
- return NT_STATUS_NETWORK_BUSY;
- }
+ setup_poll = true;
+ }
+
+ if (NT_STATUS_EQUAL(fsp_open, NT_STATUS_RETRY)) {
+ /*
+ * EINTR from the open(2) syscall. Just setup a retry
+ * in a bit. We can't use the sys_write() tight retry
+ * loop here, as we might have to actually deal with
+ * lease-break signals to avoid a deadlock.
+ */
+ setup_poll = true;
+ }
+ if (setup_poll) {
/*
* From here on we assume this is an oplock break triggered
*/
lck = get_existing_share_mode_lock(talloc_tos(), fsp->file_id);
- if (lck == NULL) {
- /*
- * No oplock from Samba around. Set up a poll every 1
- * second to retry a non-blocking open until the time
- * expires.
- */
- setup_kernel_oplock_poll_open(req, fsp->file_id);
- DBG_DEBUG("No Samba oplock around after EWOULDBLOCK. "
- "Retrying with poll\n");
- return NT_STATUS_SHARING_VIOLATION;
- }
- if (!validate_oplock_types(lck)) {
+ if ((lck != NULL) && !validate_oplock_types(lck)) {
smb_panic("validate_oplock_types failed");
}
- delay = delay_for_oplock(fsp, 0, lease, lck, false,
- create_disposition,
- first_open_attempt);
- if (delay) {
- schedule_defer_open(lck, fsp->file_id, req);
- TALLOC_FREE(lck);
- DEBUG(10, ("Sent oplock break request to kernel "
- "oplock holder\n"));
- return NT_STATUS_SHARING_VIOLATION;
- }
-
/*
- * No oplock from Samba around. Set up a poll every 1
- * second to retry a non-blocking open until the time
- * expires.
+ * Retry once a second. If there's a share_mode_lock
+ * around, also wait for it in case it was smbd
+ * holding that kernel oplock that can quickly tell us
+ * the oplock got removed.
*/
- setup_kernel_oplock_poll_open(req, fsp->file_id);
+
+ setup_poll_open(
+ req,
+ lck,
+ fsp->file_id,
+ timeval_set(OPLOCK_BREAK_TIMEOUT*2, 0),
+ timeval_set(1, 0));
TALLOC_FREE(lck);
- DBG_DEBUG("No Samba oplock around after EWOULDBLOCK. "
- "Retrying with poll\n");
+
return NT_STATUS_SHARING_VIOLATION;
}
if (!NT_STATUS_IS_OK(fsp_open)) {
- if (NT_STATUS_EQUAL(fsp_open, NT_STATUS_RETRY)) {
+ bool wait_for_aio = NT_STATUS_EQUAL(
+ fsp_open, NT_STATUS_MORE_PROCESSING_REQUIRED);
+ if (wait_for_aio) {
schedule_async_open(req);
}
return fsp_open;
return NT_STATUS_DELETE_PENDING;
}
- status = open_mode_check(conn, lck,
- access_mask, share_access);
-
- if (NT_STATUS_EQUAL(status, NT_STATUS_SHARING_VIOLATION) ||
- (lck->data->num_share_modes > 0)) {
- /*
- * This comes from ancient times out of open_mode_check. I
- * have no clue whether this is still necessary. I can't think
- * of a case where this would actually matter further down in
- * this function. I leave it here for further investigation
- * :-)
- */
- file_existed = true;
- }
+ status = handle_share_mode_lease(
+ fsp,
+ lck,
+ create_disposition,
+ access_mask,
+ share_access,
+ oplock_request,
+ lease,
+ first_open_attempt);
- if (req != NULL) {
- /*
- * Handle oplocks, deferring the request if delay_for_oplock()
- * triggered a break message and we have to wait for the break
- * response.
- */
- bool delay;
- bool sharing_violation = NT_STATUS_EQUAL(
- status, NT_STATUS_SHARING_VIOLATION);
-
- delay = delay_for_oplock(fsp, oplock_request, lease, lck,
- sharing_violation,
- create_disposition,
- first_open_attempt);
- if (delay) {
- schedule_defer_open(lck, fsp->file_id, req);
- TALLOC_FREE(lck);
- fd_close(fsp);
- return NT_STATUS_SHARING_VIOLATION;
- }
+ if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
+ schedule_defer_open(lck, fsp->file_id, req);
+ TALLOC_FREE(lck);
+ fd_close(fsp);
+ return NT_STATUS_SHARING_VIOLATION;
}
if (!NT_STATUS_IS_OK(status)) {
+ TALLOC_FREE(lck);
+ fd_close(fsp);
+ return status;
+ }
- SMB_ASSERT(NT_STATUS_EQUAL(status, NT_STATUS_SHARING_VIOLATION));
+ {
+ struct share_mode_data *d = lck->data;
+ uint16_t new_flags = share_mode_flags_restrict(
+ d->flags, access_mask, share_access, UINT32_MAX);
- /*
- * If we're returning a share violation, ensure we
- * cope with the braindead 1 second delay (SMB1 only).
- */
+ if (new_flags != d->flags) {
+ d->flags = new_flags;
+ d->modified = true;
+ }
+ }
- if (!(oplock_request & INTERNAL_OPEN_ONLY) &&
- !conn->sconn->using_smb2 &&
- lp_defer_sharing_violations()) {
- struct timeval timeout;
- int timeout_usecs;
-
- /* this is a hack to speed up torture tests
- in 'make test' */
- timeout_usecs = lp_parm_int(SNUM(conn),
- "smbd","sharedelay",
- SHARING_VIOLATION_USEC_WAIT);
-
- /* This is a relative time, added to the absolute
- request_time value to get the absolute timeout time.
- Note that if this is the second or greater time we enter
- this codepath for this particular request mid then
- request_time is left as the absolute time of the *first*
- time this request mid was processed. This is what allows
- the request to eventually time out. */
-
- timeout = timeval_set(0, timeout_usecs);
-
- if (!request_timed_out(req, timeout)) {
- defer_open(lck, timeout, req, false, id);
+ ok = set_share_mode(
+ lck,
+ fsp,
+ get_current_uid(fsp->conn),
+ req ? req->mid : 0,
+ fsp->oplock_type,
+ share_access,
+ access_mask);
+ if (!ok) {
+ if (fsp->oplock_type == LEASE_OPLOCK) {
+ status = remove_lease_if_stale(
+ lck,
+ fsp_client_guid(fsp),
+ &fsp->lease->lease.lease_key);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_WARNING("remove_lease_if_stale "
+ "failed: %s\n",
+ nt_errstr(status));
}
}
-
- TALLOC_FREE(lck);
- fd_close(fsp);
-
- return NT_STATUS_SHARING_VIOLATION;
+ return NT_STATUS_NO_MEMORY;
}
/* Should we atomically (to the client at least) truncate ? */
if ((!new_file_created) &&
(flags2 & O_TRUNC) &&
- (!S_ISFIFO(fsp->fsp_name->st.st_ex_mode))) {
+ (S_ISREG(fsp->fsp_name->st.st_ex_mode))) {
int ret;
ret = SMB_VFS_FTRUNCATE(fsp, 0);
if (ret != 0) {
status = map_nt_error_from_unix(errno);
+ del_share_mode(lck, fsp);
TALLOC_FREE(lck);
fd_close(fsp);
return status;
!is_ntfs_stream_smb_fname(smb_fname)) {
status = delete_all_streams(conn, smb_fname);
if (!NT_STATUS_IS_OK(status)) {
+ del_share_mode(lck, fsp);
TALLOC_FREE(lck);
fd_close(fsp);
return status;
ret_flock = SMB_VFS_KERNEL_FLOCK(fsp, share_access, access_mask);
if(ret_flock == -1 ){
+ del_share_mode(lck, fsp);
TALLOC_FREE(lck);
fd_close(fsp);
return NT_STATUS_SHARING_VIOLATION;
}
- fsp->kernel_share_modes_taken = true;
+ fsp->fsp_flags.kernel_share_modes_taken = true;
}
/*
fsp->access_mask = access_mask | FILE_READ_ATTRIBUTES;
}
- if (file_existed) {
- /*
- * stat opens on existing files don't get oplocks.
- * They can get leases.
- *
- * Note that we check for stat open on the *open_access_mask*,
- * i.e. the access mask we actually used to do the open,
- * not the one the client asked for (which is in
- * fsp->access_mask). This is due to the fact that
- * FILE_OVERWRITE and FILE_OVERWRITE_IF add in O_TRUNC,
- * which adds FILE_WRITE_DATA to open_access_mask.
- */
- if (is_stat_open(open_access_mask) && lease == NULL) {
- oplock_request = NO_OPLOCK;
- }
- }
-
if (new_file_created) {
info = FILE_WAS_CREATED;
} else {
*pinfo = info;
}
- /*
- * Setup the oplock info in both the shared memory and
- * file structs.
- */
- status = grant_fsp_oplock_type(
- req,
- fsp,
- lck,
- oplock_request,
- lease,
- share_access,
- fsp->access_mask);
- if (!NT_STATUS_IS_OK(status)) {
- TALLOC_FREE(lck);
- fd_close(fsp);
- return status;
- }
-
/* Handle strange delete on close create semantics. */
if (create_options & FILE_DELETE_ON_CLOSE) {
if (!new_file_created) {
fsp->initial_delete_on_close = True;
}
- if (info == FILE_WAS_CREATED) {
+ /*
+ * If we created a file and it's not a stream, this is the point where
+ * we set the itime (aka invented time) that get's stored in the DOS
+ * attribute xattr. The value is going to be either what the filesystem
+ * provided or a copy of the creation date.
+ *
+ * Either way, we turn the itime into a File-ID, unless the filesystem
+ * provided one (unlikely).
+ */
+ if (info == FILE_WAS_CREATED && !is_named_stream(smb_fname)) {
smb_fname->st.st_ex_iflags &= ~ST_EX_IFLAG_CALCULATED_ITIME;
+
+ if (lp_store_dos_attributes(SNUM(conn)) &&
+ smb_fname->st.st_ex_iflags & ST_EX_IFLAG_CALCULATED_FILE_ID)
+ {
+ uint64_t file_id;
+
+ file_id = make_file_id_from_itime(&smb_fname->st);
+ update_stat_ex_file_id(&smb_fname->st, file_id);
+ }
}
if (info != FILE_WAS_OPENED) {
/* Overwritten files should be initially set as archive */
if ((info == FILE_WAS_OVERWRITTEN && lp_map_archive(SNUM(conn))) ||
lp_store_dos_attributes(SNUM(conn))) {
+ (void)dos_mode(conn, smb_fname);
if (!posix_open) {
if (file_set_dosmode(conn, smb_fname,
new_dos_attributes | FILE_ATTRIBUTE_ARCHIVE,
/* POSIX opens are sparse by default. */
fsp->is_sparse = true;
} else {
- fsp->is_sparse = (file_existed &&
- (existing_dos_attributes & FILE_ATTRIBUTE_SPARSE));
+ fsp->is_sparse =
+ (existing_dos_attributes & FILE_ATTRIBUTE_SPARSE);
}
/*
*/
struct timespec write_time = get_share_mode_write_time(lck);
- if (!null_timespec(write_time)) {
+ if (!is_omit_timespec(&write_time)) {
update_stat_ex_mtime(&fsp->fsp_name->st, write_time);
}
}
struct smb_filename *smb_dname,
uint32_t file_attributes)
{
+ const struct loadparm_substitution *lp_sub =
+ loadparm_s3_global_substitution();
mode_t mode;
char *parent_dir = NULL;
NTSTATUS status;
bool posix_open = false;
bool need_re_stat = false;
uint32_t access_mask = SEC_DIR_ADD_SUBDIR;
+ int ret;
if (!CAN_WRITE(conn) || (access_mask & ~(conn->share_access))) {
DEBUG(5,("mkdir_internal: failing share access "
- "%s\n", lp_servicename(talloc_tos(), SNUM(conn))));
+ "%s\n", lp_servicename(talloc_tos(), lp_sub, SNUM(conn))));
return NT_STATUS_ACCESS_DENIED;
}
return status;
}
- if (SMB_VFS_MKDIR(conn, smb_dname, mode) != 0) {
+ ret = SMB_VFS_MKDIRAT(conn,
+ conn->cwd_fsp,
+ smb_dname,
+ mode);
+ if (ret != 0) {
return map_nt_error_from_unix(errno);
}
smb_dname->st.st_ex_iflags &= ~ST_EX_IFLAG_CALCULATED_ITIME;
if (lp_store_dos_attributes(SNUM(conn))) {
+ if (smb_dname->st.st_ex_iflags & ST_EX_IFLAG_CALCULATED_FILE_ID)
+ {
+ uint64_t file_id;
+
+ file_id = make_file_id_from_itime(&smb_dname->st);
+ update_stat_ex_file_id(&smb_dname->st, file_id);
+ }
+
if (!posix_open) {
file_set_dosmode(conn, smb_dname,
file_attributes | FILE_ATTRIBUTE_DIRECTORY,
files_struct **result)
{
files_struct *fsp = NULL;
- bool dir_existed = VALID_STAT(smb_dname->st) ? True : False;
+ bool dir_existed = VALID_STAT(smb_dname->st);
struct share_mode_lock *lck = NULL;
NTSTATUS status;
struct timespec mtimespec;
file_attributes |= FILE_ATTRIBUTE_DIRECTORY;
}
- DEBUG(5,("open_directory: opening directory %s, access_mask = 0x%x, "
- "share_access = 0x%x create_options = 0x%x, "
- "create_disposition = 0x%x, file_attributes = 0x%x\n",
+ DBG_INFO("opening directory %s, access_mask = 0x%"PRIx32", "
+ "share_access = 0x%"PRIx32" create_options = 0x%"PRIx32", "
+ "create_disposition = 0x%"PRIx32", "
+ "file_attributes = 0x%"PRIx32"\n",
smb_fname_str_dbg(smb_dname),
- (unsigned int)access_mask,
- (unsigned int)share_access,
- (unsigned int)create_options,
- (unsigned int)create_disposition,
- (unsigned int)file_attributes));
+ access_mask,
+ share_access,
+ create_options,
+ create_disposition,
+ file_attributes);
status = smbd_calculate_access_mask(conn, smb_dname, false,
access_mask, &access_mask);
fsp->file_id = vfs_file_id_from_sbuf(conn, &smb_dname->st);
fsp->vuid = req ? req->vuid : UID_FIELD_INVALID;
fsp->file_pid = req ? req->smbpid : 0;
- fsp->can_lock = False;
- fsp->can_read = False;
- fsp->can_write = False;
+ fsp->fsp_flags.can_lock = false;
+ fsp->fsp_flags.can_read = false;
+ fsp->fsp_flags.can_write = false;
fsp->fh->private_options = 0;
/*
/* Don't store old timestamps for directory
handles in the internal database. We don't
update them in there if new objects
- are creaded in the directory. Currently
+ are created in the directory. Currently
we only update timestamps on file writes.
See bug #9870.
*/
- ZERO_STRUCT(mtimespec);
+ mtimespec = make_omit_timespec();
#ifdef O_DIRECTORY
status = fd_open(conn, fsp, O_RDONLY|O_DIRECTORY, 0);
return status;
}
+ {
+ struct share_mode_data *d = lck->data;
+ uint16_t new_flags = share_mode_flags_restrict(
+ d->flags, access_mask, share_access, UINT32_MAX);
+
+ if (new_flags != d->flags) {
+ d->flags = new_flags;
+ d->modified = true;
+ }
+ }
+
ok = set_share_mode(
lck,
fsp,
*/
struct timespec write_time = get_share_mode_write_time(lck);
- if (!null_timespec(write_time)) {
+ if (!is_omit_timespec(&write_time)) {
update_stat_ex_mtime(&fsp->fsp_name->st, write_time);
}
}
return;
}
+struct lease_match_break_state {
+ struct messaging_context *msg_ctx;
+ const struct smb2_lease_key *lease_key;
+ struct file_id id;
+
+ bool found_lease;
+ uint16_t version;
+ uint16_t epoch;
+};
+
+static bool lease_match_break_fn(
+ struct share_mode_entry *e,
+ void *private_data)
+{
+ struct lease_match_break_state *state = private_data;
+ bool stale, equal;
+ uint32_t e_lease_type;
+ NTSTATUS status;
+
+ stale = share_entry_stale_pid(e);
+ if (stale) {
+ return false;
+ }
+
+ equal = smb2_lease_key_equal(&e->lease_key, state->lease_key);
+ if (!equal) {
+ return false;
+ }
+
+ status = leases_db_get(
+ &e->client_guid,
+ &e->lease_key,
+ &state->id,
+ NULL, /* current_state */
+ NULL, /* breaking */
+ NULL, /* breaking_to_requested */
+ NULL, /* breaking_to_required */
+ &state->version, /* lease_version */
+ &state->epoch); /* epoch */
+ if (NT_STATUS_IS_OK(status)) {
+ state->found_lease = true;
+ } else {
+ DBG_WARNING("Could not find version/epoch: %s\n",
+ nt_errstr(status));
+ }
+
+ e_lease_type = get_lease_type(e, state->id);
+ if (e_lease_type == SMB2_LEASE_NONE) {
+ return false;
+ }
+ send_break_message(state->msg_ctx, &state->id, e, SMB2_LEASE_NONE);
+
+ /*
+ * Windows 7 and 8 lease clients are broken in that they will
+ * not respond to lease break requests whilst waiting for an
+ * outstanding open request on that lease handle on the same
+ * TCP connection, due to holding an internal inode lock.
+ *
+ * This means we can't reschedule ourselves here, but must
+ * return from the create.
+ *
+ * Work around:
+ *
+ * Send the breaks and then return SMB2_LEASE_NONE in the
+ * lease handle to cause them to acknowledge the lease
+ * break. Consultation with Microsoft engineering confirmed
+ * this approach is safe.
+ */
+
+ return false;
+}
+
static NTSTATUS lease_match(connection_struct *conn,
struct smb_request *req,
- struct smb2_lease_key *lease_key,
+ const struct smb2_lease_key *lease_key,
const char *servicepath,
const struct smb_filename *fname,
uint16_t *p_version,
/* We have to break all existing leases. */
for (i = 0; i < state.num_file_ids; i++) {
+ struct lease_match_break_state break_state = {
+ .msg_ctx = conn->sconn->msg_ctx,
+ .lease_key = lease_key,
+ };
struct share_mode_lock *lck;
- struct share_mode_data *d;
- struct share_mode_entry *lease_entry = NULL;
- uint32_t j;
+ bool ok;
if (file_id_equal(&state.ids[i], &state.id)) {
/* Don't need to break our own file. */
continue;
}
- lck = get_existing_share_mode_lock(talloc_tos(), state.ids[i]);
+ break_state.id = state.ids[i];
+
+ lck = get_existing_share_mode_lock(
+ talloc_tos(), break_state.id);
if (lck == NULL) {
/* Race condition - file already closed. */
continue;
}
- d = lck->data;
- for (j=0; j<d->num_share_modes; j++) {
- struct share_mode_entry *e = &d->share_modes[j];
- uint32_t e_lease_type = get_lease_type(d, e);
-
- if (share_mode_stale_pid(d, j)) {
- continue;
- }
-
- if (e->op_type == LEASE_OPLOCK) {
- if (!smb2_lease_key_equal(&e->lease_key,
- lease_key)) {
- continue;
- }
- lease_entry = e;
- }
-
- if (e_lease_type == SMB2_LEASE_NONE) {
- continue;
- }
-
- send_break_message(conn->sconn->msg_ctx, &d->id, e,
- SMB2_LEASE_NONE);
-
- /*
- * Windows 7 and 8 lease clients
- * are broken in that they will not
- * respond to lease break requests
- * whilst waiting for an outstanding
- * open request on that lease handle
- * on the same TCP connection, due
- * to holding an internal inode lock.
- *
- * This means we can't reschedule
- * ourselves here, but must return
- * from the create.
- *
- * Work around:
- *
- * Send the breaks and then return
- * SMB2_LEASE_NONE in the lease handle
- * to cause them to acknowledge the
- * lease break. Consultation with
- * Microsoft engineering confirmed
- * this approach is safe.
- */
-
- }
- if (lease_entry != NULL) {
- status = leases_db_get(
- &lease_entry->client_guid,
- &lease_entry->lease_key,
- &d->id,
- NULL, /* current_state */
- NULL, /* breaking */
- NULL, /* breaking_to_requested */
- NULL, /* breaking_to_required */
- p_version, /* lease_version */
- p_epoch); /* epoch */
- if (!NT_STATUS_IS_OK(status)) {
- DBG_WARNING("Could not find version/epoch: "
- "%s\n",
- nt_errstr(status));
- }
+ ok = share_mode_forall_leases(
+ lck, lease_match_break_fn, &break_state);
+ if (!ok) {
+ DBG_DEBUG("share_mode_forall_leases failed\n");
+ continue;
}
TALLOC_FREE(lck);
+
+ if (break_state.found_lease) {
+ *p_version = break_state.version;
+ *p_epoch = break_state.epoch;
+ }
}
/*
* Ensure we don't grant anything more so we
uint32_t create_options,
uint32_t file_attributes,
uint32_t oplock_request,
- struct smb2_lease *lease,
+ const struct smb2_lease *lease,
uint64_t allocation_size,
uint32_t private_flags,
struct security_descriptor *sd,
files_struct **result,
int *pinfo)
{
+ struct smb2_lease none_lease;
int info = FILE_WAS_OPENED;
files_struct *base_fsp = NULL;
files_struct *fsp = NULL;
&epoch);
if (NT_STATUS_EQUAL(status, NT_STATUS_OPLOCK_NOT_GRANTED)) {
/* Dynamic share file. No leases and update epoch... */
- lease->lease_state = SMB2_LEASE_NONE;
- lease->lease_epoch = epoch;
- lease->lease_version = version;
+ none_lease = *lease;
+ none_lease.lease_state = SMB2_LEASE_NONE;
+ none_lease.lease_epoch = epoch;
+ none_lease.lease_version = version;
+ lease = &none_lease;
} else if (!NT_STATUS_IS_OK(status)) {
goto fail;
}
uint32_t create_options,
uint32_t file_attributes,
uint32_t oplock_request,
- struct smb2_lease *lease,
+ const struct smb2_lease *lease,
uint64_t allocation_size,
uint32_t private_flags,
struct security_descriptor *sd,
files_struct *fsp = NULL;
NTSTATUS status;
bool stream_name = false;
+ struct smb2_create_blob *posx = NULL;
DBG_DEBUG("create_file: access_mask = 0x%x "
"file_attributes = 0x%x, share_access = 0x%x, "
(unsigned int)root_dir_fid,
ea_list, sd, smb_fname_str_dbg(smb_fname));
+ if (req != NULL) {
+ /*
+ * Remember the absolute time of the original request
+ * with this mid. We'll use it later to see if this
+ * has timed out.
+ */
+ get_deferred_open_message_state(req, &req->request_time, NULL);
+ }
+
/*
* Calculate the filename from the root_dir_if if necessary.
*/
}
}
+ posx = smb2_create_blob_find(
+ in_context_blobs, SMB2_CREATE_TAG_POSIX);
+ if (posx != NULL) {
+ uint32_t wire_mode_bits = 0;
+ mode_t mode_bits = 0;
+ SMB_STRUCT_STAT sbuf = { 0 };
+ enum perm_type ptype =
+ (create_options & FILE_DIRECTORY_FILE) ?
+ PERM_NEW_DIR : PERM_NEW_FILE;
+
+ if (posx->data.length != 4) {
+ status = NT_STATUS_INVALID_PARAMETER;
+ goto fail;
+ }
+
+ wire_mode_bits = IVAL(posx->data.data, 0);
+ status = unix_perms_from_wire(
+ conn, &sbuf, wire_mode_bits, ptype, &mode_bits);
+ if (!NT_STATUS_IS_OK(status)) {
+ goto fail;
+ }
+ /*
+ * Remove type info from mode, leaving only the
+ * permissions and setuid/gid bits.
+ */
+ mode_bits &= ~S_IFMT;
+
+ file_attributes = (FILE_FLAG_POSIX_SEMANTICS | mode_bits);
+ }
+
status = create_file_unixpath(
conn, req, smb_fname, access_mask, share_access,
create_disposition, create_options, file_attributes,