static NTSTATUS open_mode_check(connection_struct *conn,
struct share_mode_lock *lck,
uint32 access_mask,
- uint32 share_access,
- bool *file_existed)
+ uint32 share_access)
{
int i;
continue;
}
- *file_existed = true;
-
return NT_STATUS_SHARING_VIOLATION;
}
}
- if (lck->data->num_share_modes != 0) {
- *file_existed = true;
- }
-
return NT_STATUS_OK;
}
* our client.
*/
-static NTSTATUS send_break_message(files_struct *fsp,
- struct share_mode_entry *exclusive,
- uint64_t mid,
- int oplock_request)
+static NTSTATUS send_break_message(struct messaging_context *msg_ctx,
+ const struct share_mode_entry *exclusive,
+ uint16_t break_to)
{
NTSTATUS status;
char msg[MSG_SMB_SHARE_MODE_ENTRY_SIZE];
DEBUG(10, ("Sending break request to PID %s\n",
procid_str_static(&exclusive->pid)));
- exclusive->op_mid = mid;
/* Create the message. */
share_mode_entry_to_message(msg, exclusive);
- status = messaging_send_buf(fsp->conn->sconn->msg_ctx, exclusive->pid,
+ /* Overload entry->op_type */
+ SSVAL(msg,OP_BREAK_MSG_OP_TYPE_OFFSET, break_to);
+
+ status = messaging_send_buf(msg_ctx, exclusive->pid,
MSG_SMB_BREAK_REQUEST,
(uint8 *)msg, sizeof(msg));
if (!NT_STATUS_IS_OK(status)) {
}
/*
- * Return share_mode_entry pointers for :
- * 1). Batch oplock entry.
- * 2). Batch or exclusive oplock entry (may be identical to #1).
- * bool have_level2_oplock
- * bool have_no_oplock.
* Do internal consistency checks on the share mode for a file.
*/
-static void find_oplock_types(files_struct *fsp,
- int oplock_request,
- const struct share_mode_lock *lck,
- struct share_mode_entry **pp_batch,
- struct share_mode_entry **pp_ex_or_batch,
- bool *got_level2,
- bool *got_no_oplock)
+static bool validate_oplock_types(struct share_mode_lock *lck)
{
struct share_mode_data *d = lck->data;
- int i;
-
- *pp_batch = NULL;
- *pp_ex_or_batch = NULL;
- *got_level2 = false;
- *got_no_oplock = false;
-
- /* Ignore stat or internal opens, as is done in
- delay_for_batch_oplocks() and
- delay_for_exclusive_oplocks().
- */
- if ((oplock_request & INTERNAL_OPEN_ONLY) || is_stat_open(fsp->access_mask)) {
- return;
- }
+ bool batch = false;
+ bool ex_or_batch = false;
+ bool level2 = false;
+ bool no_oplock = false;
+ uint32_t num_non_stat_opens = 0;
+ uint32_t i;
for (i=0; i<d->num_share_modes; i++) {
struct share_mode_entry *e = &d->share_modes[i];
continue;
}
+ num_non_stat_opens += 1;
+
if (BATCH_OPLOCK_TYPE(e->op_type)) {
/* batch - can only be one. */
if (share_mode_stale_pid(d, i)) {
DEBUG(10, ("Found stale batch oplock\n"));
continue;
}
- if (*pp_ex_or_batch || *pp_batch || *got_level2 || *got_no_oplock) {
- smb_panic("Bad batch oplock entry.");
+ if (ex_or_batch || batch || level2 || no_oplock) {
+ DEBUG(0, ("Bad batch oplock entry %u.",
+ (unsigned)i));
+ return false;
}
- *pp_batch = e;
+ batch = true;
}
if (EXCLUSIVE_OPLOCK_TYPE(e->op_type)) {
continue;
}
/* Exclusive or batch - can only be one. */
- if (*pp_ex_or_batch || *got_level2 || *got_no_oplock) {
- smb_panic("Bad exclusive or batch oplock entry.");
+ if (ex_or_batch || level2 || no_oplock) {
+ DEBUG(0, ("Bad exclusive or batch oplock "
+ "entry %u.", (unsigned)i));
+ return false;
}
- *pp_ex_or_batch = e;
+ ex_or_batch = true;
}
if (LEVEL_II_OPLOCK_TYPE(e->op_type)) {
- if (*pp_batch || *pp_ex_or_batch) {
+ if (batch || ex_or_batch) {
if (share_mode_stale_pid(d, i)) {
DEBUG(10, ("Found stale LevelII "
"oplock\n"));
continue;
}
- smb_panic("Bad levelII oplock entry.");
+ DEBUG(0, ("Bad levelII oplock entry %u.",
+ (unsigned)i));
+ return false;
}
- *got_level2 = true;
+ level2 = true;
}
if (e->op_type == NO_OPLOCK) {
- if (*pp_batch || *pp_ex_or_batch) {
+ if (batch || ex_or_batch) {
if (share_mode_stale_pid(d, i)) {
DEBUG(10, ("Found stale NO_OPLOCK "
"entry\n"));
continue;
}
- smb_panic("Bad no oplock entry.");
+ DEBUG(0, ("Bad no oplock entry %u.",
+ (unsigned)i));
+ return false;
}
- *got_no_oplock = true;
+ no_oplock = true;
}
}
+
+ remove_stale_share_mode_entries(d);
+
+ if ((batch || ex_or_batch) && (num_non_stat_opens != 1)) {
+ DEBUG(1, ("got batch (%d) or ex (%d) non-exclusively (%d)\n",
+ (int)batch, (int)ex_or_batch,
+ (int)d->num_share_modes));
+ return false;
+ }
+
+ return true;
}
-static bool delay_for_batch_oplocks(files_struct *fsp,
- uint64_t mid,
- int oplock_request,
- struct share_mode_entry *batch_entry)
+static bool delay_for_oplock(files_struct *fsp,
+ int oplock_request,
+ struct share_mode_lock *lck,
+ bool have_sharing_violation,
+ uint32_t create_disposition)
{
+ struct share_mode_data *d = lck->data;
+ struct share_mode_entry *entry;
+ uint32_t num_non_stat_opens = 0;
+ uint32_t i;
+ uint16_t break_to;
+
if ((oplock_request & INTERNAL_OPEN_ONLY) || is_stat_open(fsp->access_mask)) {
return false;
}
- if (batch_entry == NULL) {
+ for (i=0; i<d->num_share_modes; i++) {
+ struct share_mode_entry *e = &d->share_modes[i];
+ if (e->op_type == NO_OPLOCK && is_stat_open(e->access_mask)) {
+ continue;
+ }
+ num_non_stat_opens += 1;
+
+ /*
+ * We found the a non-stat open, which in the exclusive/batch
+ * case will be inspected further down.
+ */
+ entry = e;
+ }
+ if (num_non_stat_opens == 0) {
+ /*
+ * Nothing to wait for around
+ */
+ return false;
+ }
+ if (num_non_stat_opens != 1) {
+ /*
+ * More than one open around. There can't be any exclusive or
+ * batch left, this is all level2.
+ */
return false;
}
- if (server_id_is_disconnected(&batch_entry->pid)) {
+ if (server_id_is_disconnected(&entry->pid)) {
/*
* TODO: clean up.
* This could be achieved by sending a break message
return false;
}
- /* Found a batch oplock */
- send_break_message(fsp, batch_entry, mid, oplock_request);
- return true;
-}
+ switch (create_disposition) {
+ case FILE_SUPERSEDE:
+ case FILE_OVERWRITE_IF:
+ break_to = NO_OPLOCK;
+ break;
+ default:
+ break_to = LEVEL_II_OPLOCK;
+ break;
+ }
-static bool delay_for_exclusive_oplocks(files_struct *fsp,
- uint64_t mid,
- int oplock_request,
- struct share_mode_entry *ex_entry)
-{
- if ((oplock_request & INTERNAL_OPEN_ONLY) || is_stat_open(fsp->access_mask)) {
- return false;
+ if (have_sharing_violation && (entry->op_type & BATCH_OPLOCK)) {
+ if (share_mode_stale_pid(d, 0)) {
+ return false;
+ }
+ send_break_message(fsp->conn->sconn->msg_ctx, entry, break_to);
+ return true;
}
- if (ex_entry == NULL) {
+ if (have_sharing_violation) {
+ /*
+ * Non-batch exclusive is not broken if we have a sharing
+ * violation
+ */
return false;
}
-
- if (server_id_is_disconnected(&ex_entry->pid)) {
+ if (!EXCLUSIVE_OPLOCK_TYPE(entry->op_type)) {
/*
- * since only durable handles can get disconnected,
- * and we can only get durable handles with batch oplocks,
- * this should actually never be reached...
+ * No break for NO_OPLOCK or LEVEL2_OPLOCK oplocks
*/
return false;
}
+ if (share_mode_stale_pid(d, 0)) {
+ return false;
+ }
- send_break_message(fsp, ex_entry, mid, oplock_request);
+ send_break_message(fsp->conn->sconn->msg_ctx, entry, break_to);
return true;
}
}
static void grant_fsp_oplock_type(files_struct *fsp,
- int oplock_request,
- bool got_level2_oplock,
- bool got_a_none_oplock)
+ struct share_mode_lock *lck,
+ int oplock_request)
{
bool allow_level2 = (global_client_caps & CAP_LEVEL_II_OPLOCKS) &&
lp_level2_oplocks(SNUM(fsp->conn));
+ bool got_level2_oplock, got_a_none_oplock;
+ uint32_t i;
/* Start by granting what the client asked for,
but ensure no SAMBA_PRIVATE bits can be set. */
return;
}
+ got_level2_oplock = false;
+ got_a_none_oplock = false;
+
+ for (i=0; i<lck->data->num_share_modes; i++) {
+ int op_type = lck->data->share_modes[i].op_type;
+
+ if (LEVEL_II_OPLOCK_TYPE(op_type)) {
+ got_level2_oplock = true;
+ }
+ if (op_type == NO_OPLOCK) {
+ got_a_none_oplock = true;
+ }
+ }
+
/*
* Match what was requested (fsp->oplock_type) with
* what was found in the existing share modes.
NTSTATUS status;
char *parent_dir;
SMB_STRUCT_STAT saved_stat = smb_fname->st;
- struct share_mode_entry *batch_entry = NULL;
- struct share_mode_entry *exclusive_entry = NULL;
- bool got_level2_oplock = false;
- bool got_a_none_oplock = false;
struct timespec old_write_time;
struct file_id id;
return NT_STATUS_SHARING_VIOLATION;
}
- find_oplock_types(fsp, 0, lck, &batch_entry, &exclusive_entry,
- &got_level2_oplock, &got_a_none_oplock);
+ if (!validate_oplock_types(lck)) {
+ smb_panic("validate_oplock_types failed");
+ }
- if (delay_for_batch_oplocks(fsp, req->mid, 0, batch_entry) ||
- delay_for_exclusive_oplocks(fsp, req->mid, 0,
- exclusive_entry)) {
+ if (delay_for_oplock(fsp, 0, lck, false, create_disposition)) {
schedule_defer_open(lck, request_time, req);
TALLOC_FREE(lck);
DEBUG(10, ("Sent oplock break request to kernel "
}
/* Get the types we need to examine. */
- find_oplock_types(fsp,
- oplock_request,
- lck,
- &batch_entry,
- &exclusive_entry,
- &got_level2_oplock,
- &got_a_none_oplock);
+ if (!validate_oplock_types(lck)) {
+ smb_panic("validate_oplock_types failed");
+ }
if (has_delete_on_close(lck, fsp->name_hash)) {
TALLOC_FREE(lck);
return NT_STATUS_DELETE_PENDING;
}
- /* First pass - send break only on batch oplocks. */
+ status = open_mode_check(conn, lck,
+ access_mask, share_access);
+
+ if (NT_STATUS_EQUAL(status, NT_STATUS_SHARING_VIOLATION) ||
+ (lck->data->num_share_modes > 0)) {
+ /*
+ * This comes from ancient times out of open_mode_check. I
+ * have no clue whether this is still necessary. I can't think
+ * of a case where this would actually matter further down in
+ * this function. I leave it here for further investigation
+ * :-)
+ */
+ file_existed = true;
+ }
+
if ((req != NULL) &&
- delay_for_batch_oplocks(fsp,
- req->mid,
- oplock_request,
- batch_entry)) {
+ delay_for_oplock(
+ fsp, oplock_request, lck,
+ NT_STATUS_EQUAL(status, NT_STATUS_SHARING_VIOLATION),
+ create_disposition)) {
schedule_defer_open(lck, request_time, req);
TALLOC_FREE(lck);
fd_close(fsp);
return NT_STATUS_SHARING_VIOLATION;
}
- status = open_mode_check(conn, lck,
- access_mask, share_access,
- &file_existed);
-
- if (NT_STATUS_IS_OK(status)) {
- /* We might be going to allow this open. Check oplock
- * status again. */
- /* Second pass - send break for both batch or
- * exclusive oplocks. */
- if ((req != NULL) &&
- delay_for_exclusive_oplocks(
- fsp,
- req->mid,
- oplock_request,
- exclusive_entry)) {
- schedule_defer_open(lck, request_time, req);
- TALLOC_FREE(lck);
- fd_close(fsp);
- return NT_STATUS_SHARING_VIOLATION;
- }
- }
-
if (!NT_STATUS_IS_OK(status)) {
uint32 can_access_mask;
bool can_access = True;
return status;
}
- grant_fsp_oplock_type(fsp,
- oplock_request,
- got_level2_oplock,
- got_a_none_oplock);
+ grant_fsp_oplock_type(fsp, lck, oplock_request);
/*
* We have the share entry *locked*.....
}
status = open_mode_check(conn, lck,
- access_mask, share_access,
- &dir_existed);
+ access_mask, share_access);
if (!NT_STATUS_IS_OK(status)) {
TALLOC_FREE(lck);