Copyright (C) Andrew Tridgell 1992-2000
Copyright (C) Jeremy Allison 1992-2000
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
used. This allows us to provide the same semantics as NT */
#include "includes.h"
+#include "system/filesys.h"
+#include "locking/proto.h"
+#include "smbd/globals.h"
+#include "dbwrap/dbwrap.h"
+#include "dbwrap/dbwrap_open.h"
+#include "serverid.h"
+#include "messages.h"
+#include "util_tdb.h"
#undef DBGC_CLASS
#define DBGC_CLASS DBGC_LOCKING
Debug info at level 10 for lock struct.
****************************************************************************/
-static void print_lock_struct(unsigned int i, struct lock_struct *pls)
+static void print_lock_struct(unsigned int i, const struct lock_struct *pls)
{
- DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %u, ",
+ DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ",
i,
- (unsigned int)pls->context.smbpid,
+ (unsigned long long)pls->context.smblctx,
(unsigned int)pls->context.tid,
- (unsigned int)procid_to_pid(&pls->context.pid) ));
-
- DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
+ server_id_str(talloc_tos(), &pls->context.pid) ));
+
+ DEBUG(10,("start = %.0f, size = %.0f, fnum = %llu, %s %s\n",
(double)pls->start,
(double)pls->size,
- pls->fnum,
+ (unsigned long long)pls->fnum,
lock_type_name(pls->lock_type),
lock_flav_name(pls->lock_flav) ));
}
bool brl_same_context(const struct lock_context *ctx1,
const struct lock_context *ctx2)
{
- return (procid_equal(&ctx1->pid, &ctx2->pid) &&
- (ctx1->smbpid == ctx2->smbpid) &&
+ return (serverid_equal(&ctx1->pid, &ctx2->pid) &&
+ (ctx1->smblctx == ctx2->smblctx) &&
(ctx1->tid == ctx2->tid));
}
static bool brl_overlap(const struct lock_struct *lck1,
const struct lock_struct *lck2)
{
- /* this extra check is not redundent - it copes with locks
+ /* XXX Remove for Win7 compatibility. */
+ /* this extra check is not redundant - it copes with locks
that go beyond the end of 64 bit file space */
if (lck1->size != 0 &&
lck1->start == lck2->start &&
return False;
}
- if (brl_same_context(&lck1->context, &lck2->context) &&
- lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
+ /* A READ lock can stack on top of a WRITE lock if they have the same
+ * context & fnum. */
+ if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
+ brl_same_context(&lck1->context, &lck2->context) &&
+ lck1->fnum == lck2->fnum) {
return False;
}
lck2->start >= (lck1->start + lck1->size)) {
return False;
}
-
+
return True;
}
#endif
app depends on this ?
****************************************************************************/
-static NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool blocking_lock)
+NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool blocking_lock)
{
if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
/* amazing the little things you learn with a test
return NT_STATUS_FILE_LOCK_CONFLICT;
}
- if (procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
+ if (serverid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
lock->context.tid == fsp->last_lock_failure.context.tid &&
lock->fnum == fsp->last_lock_failure.fnum &&
lock->start == fsp->last_lock_failure.start) {
void brl_init(bool read_only)
{
+ int tdb_flags;
+
if (brlock_db) {
return;
}
+
+ tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH;
+
+ if (!lp_clustering()) {
+ /*
+ * We can't use the SEQNUM trick to cache brlock
+ * entries in the clustering case because ctdb seqnum
+ * propagation has a delay.
+ */
+ tdb_flags |= TDB_SEQNUM;
+ }
+
brlock_db = db_open(NULL, lock_path("brlock.tdb"),
- lp_open_files_db_hash_size(),
- TDB_DEFAULT | TDB_CLEAR_IF_FIRST,
- read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
+ lp_open_files_db_hash_size(), tdb_flags,
+ read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644,
+ DBWRAP_LOCK_ORDER_2);
if (!brlock_db) {
DEBUG(0,("Failed to open byte range locking database %s\n",
lock_path("brlock.tdb")));
Lock a range of bytes - Windows lock semantics.
****************************************************************************/
-static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck,
- struct lock_struct *plock, bool blocking_lock)
+NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
+ struct lock_struct *plock, bool blocking_lock)
{
unsigned int i;
files_struct *fsp = br_lck->fsp;
struct lock_struct *locks = br_lck->lock_data;
+ NTSTATUS status;
+
+ SMB_ASSERT(plock->lock_type != UNLOCK_LOCK);
+
+ if ((plock->start + plock->size - 1 < plock->start) &&
+ plock->size != 0) {
+ return NT_STATUS_INVALID_LOCK_RANGE;
+ }
for (i=0; i < br_lck->num_locks; i++) {
/* Do any Windows or POSIX locks conflict ? */
if (brl_conflict(&locks[i], plock)) {
/* Remember who blocked us. */
- plock->context.smbpid = locks[i].context.smbpid;
+ plock->context.smblctx = locks[i].context.smblctx;
return brl_lock_failed(fsp,plock,blocking_lock);
}
#if ZERO_ZERO
#endif
}
+ if (!IS_PENDING_LOCK(plock->lock_type)) {
+ contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
+ }
+
/* We can get the Windows lock, now see if it needs to
be mapped into a lower level POSIX one, and if so can
we get it ? */
&errno_ret)) {
/* We don't know who blocked us. */
- plock->context.smbpid = 0xFFFFFFFF;
+ plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
if (errno_ret == EACCES || errno_ret == EAGAIN) {
- return NT_STATUS_FILE_LOCK_CONFLICT;
+ status = NT_STATUS_FILE_LOCK_CONFLICT;
+ goto fail;
} else {
- return map_nt_error_from_unix(errno);
+ status = map_nt_error_from_unix(errno);
+ goto fail;
}
}
}
/* no conflicts - add it to the list of locks */
locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
if (!locks) {
- return NT_STATUS_NO_MEMORY;
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
}
memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
br_lck->modified = True;
return NT_STATUS_OK;
+ fail:
+ if (!IS_PENDING_LOCK(plock->lock_type)) {
+ contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
+ }
+ return status;
}
/****************************************************************************
Cope with POSIX range splits and merges.
****************************************************************************/
-static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
- const struct lock_struct *ex, /* existing lock. */
- const struct lock_struct *plock, /* proposed lock. */
- bool *lock_was_added)
+static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
+ struct lock_struct *ex, /* existing lock. */
+ struct lock_struct *plock) /* proposed lock. */
{
bool lock_types_differ = (ex->lock_type != plock->lock_type);
/* Did we overlap ? */
/*********************************************
- +---------+
- | ex |
- +---------+
- +-------+
- | plock |
- +-------+
+ +---------+
+ | ex |
+ +---------+
+ +-------+
+ | plock |
+ +-------+
OR....
- +---------+
- | ex |
- +---------+
+ +---------+
+ | ex |
+ +---------+
**********************************************/
if ( (ex->start > (plock->start + plock->size)) ||
- (plock->start > (ex->start + ex->size))) {
+ (plock->start > (ex->start + ex->size))) {
+
/* No overlap with this lock - copy existing. */
+
memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
return 1;
}
+---------------------------+
| plock | -> replace with plock.
+---------------------------+
+OR
+ +---------------+
+ | ex |
+ +---------------+
+ +---------------------------+
+ | plock | -> replace with plock.
+ +---------------------------+
+
**********************************************/
if ( (ex->start >= plock->start) &&
- (ex->start + ex->size <= plock->start + plock->size) ) {
- memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
- *lock_was_added = True;
- return 1;
+ (ex->start + ex->size <= plock->start + plock->size) ) {
+
+ /* Replace - discard existing lock. */
+
+ return 0;
+ }
+
+/*********************************************
+Adjacent after.
+ +-------+
+ | ex |
+ +-------+
+ +---------------+
+ | plock |
+ +---------------+
+
+BECOMES....
+ +---------------+-------+
+ | plock | ex | - different lock types.
+ +---------------+-------+
+OR.... (merge)
+ +-----------------------+
+ | plock | - same lock type.
+ +-----------------------+
+**********************************************/
+
+ if (plock->start + plock->size == ex->start) {
+
+ /* If the lock types are the same, we merge, if different, we
+ add the remainder of the old lock. */
+
+ if (lock_types_differ) {
+ /* Add existing. */
+ memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
+ return 1;
+ } else {
+ /* Merge - adjust incoming lock as we may have more
+ * merging to come. */
+ plock->size += ex->size;
+ return 0;
+ }
}
/*********************************************
+Adjacent before.
+ +-------+
+ | ex |
+ +-------+
+ +---------------+
+ | plock |
+ +---------------+
+BECOMES....
+ +-------+---------------+
+ | ex | plock | - different lock types
+ +-------+---------------+
+
+OR.... (merge)
+ +-----------------------+
+ | plock | - same lock type.
+ +-----------------------+
+
+**********************************************/
+
+ if (ex->start + ex->size == plock->start) {
+
+ /* If the lock types are the same, we merge, if different, we
+ add the existing lock. */
+
+ if (lock_types_differ) {
+ memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
+ return 1;
+ } else {
+ /* Merge - adjust incoming lock as we may have more
+ * merging to come. */
+ plock->start = ex->start;
+ plock->size += ex->size;
+ return 0;
+ }
+ }
+
+/*********************************************
+Overlap after.
+-----------------------+
| ex |
+-----------------------+
+---------------+
| plock |
+---------------+
-OR....
- +-------+
- | ex |
- +-------+
+OR
+ +----------------+
+ | ex |
+ +----------------+
+---------------+
| plock |
+---------------+
+---------------+-------+
OR.... (merge)
+-----------------------+
- | ex | - same lock type.
+ | plock | - same lock type.
+-----------------------+
**********************************************/
if ( (ex->start >= plock->start) &&
- (ex->start <= plock->start + plock->size) &&
- (ex->start + ex->size > plock->start + plock->size) ) {
-
- *lock_was_added = True;
+ (ex->start <= plock->start + plock->size) &&
+ (ex->start + ex->size > plock->start + plock->size) ) {
/* If the lock types are the same, we merge, if different, we
- add the new lock before the old. */
+ add the remainder of the old lock. */
if (lock_types_differ) {
- /* Add new. */
- memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
- memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
+ /* Add remaining existing. */
+ memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
/* Adjust existing start and size. */
- lck_arr[1].start = plock->start + plock->size;
- lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
- return 2;
- } else {
- /* Merge. */
- memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
- /* Set new start and size. */
- lck_arr[0].start = plock->start;
- lck_arr[0].size = (ex->start + ex->size) - plock->start;
+ lck_arr[0].start = plock->start + plock->size;
+ lck_arr[0].size = (ex->start + ex->size) - (plock->start + plock->size);
return 1;
+ } else {
+ /* Merge - adjust incoming lock as we may have more
+ * merging to come. */
+ plock->size += (ex->start + ex->size) - (plock->start + plock->size);
+ return 0;
}
}
/*********************************************
- +-----------------------+
- | ex |
- +-----------------------+
- +---------------+
- | plock |
- +---------------+
-OR....
- +-------+
- | ex |
- +-------+
- +---------------+
- | plock |
- +---------------+
+Overlap before.
+ +-----------------------+
+ | ex |
+ +-----------------------+
+ +---------------+
+ | plock |
+ +---------------+
+OR
+ +-------------+
+ | ex |
+ +-------------+
+ +---------------+
+ | plock |
+ +---------------+
+
BECOMES....
- +-------+---------------+
- | ex | plock | - different lock types
- +-------+---------------+
+ +-------+---------------+
+ | ex | plock | - different lock types
+ +-------+---------------+
OR.... (merge)
- +-----------------------+
- | ex | - same lock type.
- +-----------------------+
+ +-----------------------+
+ | plock | - same lock type.
+ +-----------------------+
**********************************************/
(ex->start + ex->size >= plock->start) &&
(ex->start + ex->size <= plock->start + plock->size) ) {
- *lock_was_added = True;
-
/* If the lock types are the same, we merge, if different, we
- add the new lock after the old. */
+ add the truncated old lock. */
if (lock_types_differ) {
memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
- memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
/* Adjust existing size. */
lck_arr[0].size = plock->start - ex->start;
- return 2;
- } else {
- /* Merge. */
- memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
- /* Adjust existing size. */
- lck_arr[0].size = (plock->start + plock->size) - ex->start;
return 1;
+ } else {
+ /* Merge - adjust incoming lock as we may have more
+ * merging to come. MUST ADJUST plock SIZE FIRST ! */
+ plock->size += (plock->start - ex->start);
+ plock->start = ex->start;
+ return 0;
}
}
/*********************************************
+Complete overlap.
+---------------------------+
| ex |
+---------------------------+
+-------+---------+---------+
OR
+---------------------------+
- | ex | - same lock type.
+ | plock | - same lock type.
+---------------------------+
**********************************************/
if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
- *lock_was_added = True;
if (lock_types_differ) {
/* We have to split ex into two locks here. */
memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
- memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
- memcpy(&lck_arr[2], ex, sizeof(struct lock_struct));
+ memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
/* Adjust first existing size. */
lck_arr[0].size = plock->start - ex->start;
/* Adjust second existing start and size. */
- lck_arr[2].start = plock->start + plock->size;
- lck_arr[2].size = (ex->start + ex->size) - (plock->start + plock->size);
- return 3;
+ lck_arr[1].start = plock->start + plock->size;
+ lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
+ return 2;
} else {
- /* Just eat plock. */
- memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
- return 1;
+ /* Just eat the existing locks, merge them into plock. */
+ plock->start = ex->start;
+ plock->size = ex->size;
+ return 0;
}
}
struct byte_range_lock *br_lck,
struct lock_struct *plock)
{
- unsigned int i, count;
+ unsigned int i, count, posix_count;
struct lock_struct *locks = br_lck->lock_data;
struct lock_struct *tp;
- bool lock_was_added = False;
bool signal_pending_read = False;
+ bool break_oplocks = false;
+ NTSTATUS status;
/* No zero-zero locks for POSIX. */
if (plock->start == 0 && plock->size == 0) {
}
/* Don't allow 64-bit lock wrap. */
- if (plock->start + plock->size < plock->start ||
- plock->start + plock->size < plock->size) {
+ if (plock->start + plock->size - 1 < plock->start) {
return NT_STATUS_INVALID_PARAMETER;
}
if (!tp) {
return NT_STATUS_NO_MEMORY;
}
-
- count = 0;
+
+ count = posix_count = 0;
+
for (i=0; i < br_lck->num_locks; i++) {
struct lock_struct *curr_lock = &locks[i];
/* No games with error messages. */
SAFE_FREE(tp);
/* Remember who blocked us. */
- plock->context.smbpid = curr_lock->context.smbpid;
+ plock->context.smblctx = curr_lock->context.smblctx;
return NT_STATUS_FILE_LOCK_CONFLICT;
}
/* Just copy the Windows lock into the new array. */
memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
count++;
} else {
+ unsigned int tmp_count = 0;
+
/* POSIX conflict semantics are different. */
if (brl_conflict_posix(curr_lock, plock)) {
/* Can't block ourselves with POSIX locks. */
/* No games with error messages. */
SAFE_FREE(tp);
/* Remember who blocked us. */
- plock->context.smbpid = curr_lock->context.smbpid;
+ plock->context.smblctx = curr_lock->context.smblctx;
return NT_STATUS_FILE_LOCK_CONFLICT;
}
/* Work out overlaps. */
- count += brlock_posix_split_merge(&tp[count], curr_lock, plock, &lock_was_added);
+ tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock);
+ posix_count += tmp_count;
+ count += tmp_count;
}
}
- if (!lock_was_added) {
- memcpy(&tp[count], plock, sizeof(struct lock_struct));
- count++;
+ /*
+ * Break oplocks while we hold a brl. Since lock() and unlock() calls
+ * are not symetric with POSIX semantics, we cannot guarantee our
+ * contend_level2_oplocks_begin/end calls will be acquired and
+ * released one-for-one as with Windows semantics. Therefore we only
+ * call contend_level2_oplocks_begin if this is the first POSIX brl on
+ * the file.
+ */
+ break_oplocks = (!IS_PENDING_LOCK(plock->lock_type) &&
+ posix_count == 0);
+ if (break_oplocks) {
+ contend_level2_oplocks_begin(br_lck->fsp,
+ LEVEL2_CONTEND_POSIX_BRL);
+ }
+
+ /* Try and add the lock in order, sorted by lock start. */
+ for (i=0; i < count; i++) {
+ struct lock_struct *curr_lock = &tp[i];
+
+ if (curr_lock->start <= plock->start) {
+ continue;
+ }
+ }
+
+ if (i < count) {
+ memmove(&tp[i+1], &tp[i],
+ (count - i)*sizeof(struct lock_struct));
}
+ memcpy(&tp[i], plock, sizeof(struct lock_struct));
+ count++;
/* We can get the POSIX lock, now see if it needs to
be mapped into a lower level POSIX one, and if so can
&errno_ret)) {
/* We don't know who blocked us. */
- plock->context.smbpid = 0xFFFFFFFF;
+ plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
if (errno_ret == EACCES || errno_ret == EAGAIN) {
SAFE_FREE(tp);
- return NT_STATUS_FILE_LOCK_CONFLICT;
+ status = NT_STATUS_FILE_LOCK_CONFLICT;
+ goto fail;
} else {
SAFE_FREE(tp);
- return map_nt_error_from_unix(errno);
+ status = map_nt_error_from_unix(errno);
+ goto fail;
}
}
}
- /* Realloc so we don't leak entries per lock call. */
- tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
- if (!tp) {
- return NT_STATUS_NO_MEMORY;
+ /* If we didn't use all the allocated size,
+ * Realloc so we don't leak entries per lock call. */
+ if (count < br_lck->num_locks + 2) {
+ tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
+ if (!tp) {
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
+ }
}
+
br_lck->num_locks = count;
SAFE_FREE(br_lck->lock_data);
br_lck->lock_data = tp;
}
return NT_STATUS_OK;
+ fail:
+ if (break_oplocks) {
+ contend_level2_oplocks_end(br_lck->fsp,
+ LEVEL2_CONTEND_POSIX_BRL);
+ }
+ return status;
+}
+
+NTSTATUS smb_vfs_call_brl_lock_windows(struct vfs_handle_struct *handle,
+ struct byte_range_lock *br_lck,
+ struct lock_struct *plock,
+ bool blocking_lock,
+ struct blocking_lock_record *blr)
+{
+ VFS_FIND(brl_lock_windows);
+ return handle->fns->brl_lock_windows_fn(handle, br_lck, plock,
+ blocking_lock, blr);
}
/****************************************************************************
NTSTATUS brl_lock(struct messaging_context *msg_ctx,
struct byte_range_lock *br_lck,
- uint32 smbpid,
+ uint64_t smblctx,
struct server_id pid,
br_off start,
br_off size,
enum brl_type lock_type,
enum brl_flavour lock_flav,
bool blocking_lock,
- uint32 *psmbpid)
+ uint64_t *psmblctx,
+ struct blocking_lock_record *blr)
{
NTSTATUS ret;
struct lock_struct lock;
}
#endif
- lock.context.smbpid = smbpid;
+#ifdef DEVELOPER
+ /* Quieten valgrind on test. */
+ memset(&lock, '\0', sizeof(lock));
+#endif
+
+ lock.context.smblctx = smblctx;
lock.context.pid = pid;
lock.context.tid = br_lck->fsp->conn->cnum;
lock.start = start;
lock.lock_flav = lock_flav;
if (lock_flav == WINDOWS_LOCK) {
- ret = brl_lock_windows(br_lck, &lock, blocking_lock);
+ ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck,
+ &lock, blocking_lock, blr);
} else {
ret = brl_lock_posix(msg_ctx, br_lck, &lock);
}
#if ZERO_ZERO
/* sort the lock list */
- qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);
+ TYPESAFE_QSORT(br_lck->lock_data, (size_t)br_lck->num_locks, lock_compare);
#endif
/* If we're returning an error, return who blocked us. */
- if (!NT_STATUS_IS_OK(ret) && psmbpid) {
- *psmbpid = lock.context.smbpid;
+ if (!NT_STATUS_IS_OK(ret) && psmblctx) {
+ *psmblctx = lock.context.smblctx;
}
return ret;
}
Unlock a range of bytes - Windows semantics.
****************************************************************************/
-static bool brl_unlock_windows(struct messaging_context *msg_ctx,
+bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
struct byte_range_lock *br_lck,
const struct lock_struct *plock)
{
struct lock_struct *locks = br_lck->lock_data;
enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
+ SMB_ASSERT(plock->lock_type == UNLOCK_LOCK);
+
#if ZERO_ZERO
/* Delete write locks by preference... The lock list
is sorted in the zero zero case. */
for (i = 0; i < br_lck->num_locks; i++) {
struct lock_struct *lock = &locks[i];
+ if (IS_PENDING_LOCK(lock->lock_type)) {
+ continue;
+ }
+
/* Only remove our own locks that match in start, size, and flavour. */
if (brl_same_context(&lock->context, &plock->context) &&
lock->fnum == plock->fnum &&
}
}
+ contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
return True;
}
static bool brl_unlock_posix(struct messaging_context *msg_ctx,
struct byte_range_lock *br_lck,
- const struct lock_struct *plock)
+ struct lock_struct *plock)
{
unsigned int i, j, count;
struct lock_struct *tp;
count = 0;
for (i = 0; i < br_lck->num_locks; i++) {
struct lock_struct *lock = &locks[i];
- struct lock_struct tmp_lock[3];
- bool lock_was_added = False;
unsigned int tmp_count;
/* Only remove our own locks - ignore fnum. */
continue;
}
- /* Work out overlaps. */
- tmp_count = brlock_posix_split_merge(&tmp_lock[0], &locks[i], plock, &lock_was_added);
-
- if (tmp_count == 1) {
- /* Ether the locks didn't overlap, or the unlock completely
- overlapped this lock. If it didn't overlap, then there's
- no change in the locks. */
- if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
- SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
- /* No change in this lock. */
- memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
- count++;
- } else {
- SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
- overlap_found = True;
- }
- continue;
- } else if (tmp_count == 2) {
- /* The unlock overlapped an existing lock. Copy the truncated
- lock into the lock array. */
- if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
- SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
- SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
- memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
- if (tmp_lock[0].size != locks[i].size) {
- overlap_found = True;
- }
- } else {
- SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
- SMB_ASSERT(tmp_lock[1].lock_type == locks[i].lock_type);
- memcpy(&tp[count], &tmp_lock[1], sizeof(struct lock_struct));
- if (tmp_lock[1].start != locks[i].start) {
- overlap_found = True;
- }
+ if (lock->lock_flav == WINDOWS_LOCK) {
+ /* Do any Windows flavour locks conflict ? */
+ if (brl_conflict(lock, plock)) {
+ SAFE_FREE(tp);
+ return false;
}
+ /* Just copy the Windows lock into the new array. */
+ memcpy(&tp[count], lock, sizeof(struct lock_struct));
count++;
continue;
- } else {
- /* tmp_count == 3 - (we split a lock range in two). */
- SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
- SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
- SMB_ASSERT(tmp_lock[2].lock_type == locks[i].lock_type);
+ }
+
+ /* Work out overlaps. */
+ tmp_count = brlock_posix_split_merge(&tp[count], lock, plock);
+
+ if (tmp_count == 0) {
+ /* plock overlapped the existing lock completely,
+ or replaced it. Don't copy the existing lock. */
+ overlap_found = true;
+ } else if (tmp_count == 1) {
+ /* Either no overlap, (simple copy of existing lock) or
+ * an overlap of an existing lock. */
+ /* If the lock changed size, we had an overlap. */
+ if (tp[count].size != lock->size) {
+ overlap_found = true;
+ }
+ count += tmp_count;
+ } else if (tmp_count == 2) {
+ /* We split a lock range in two. */
+ overlap_found = true;
+ count += tmp_count;
- memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
- count++;
- memcpy(&tp[count], &tmp_lock[2], sizeof(struct lock_struct));
- count++;
- overlap_found = True;
/* Optimisation... */
/* We know we're finished here as we can't overlap any
more POSIX locks. Copy the rest of the lock array. */
+
if (i < br_lck->num_locks - 1) {
- memcpy(&tp[count], &locks[i+1],
+ memcpy(&tp[count], &locks[i+1],
sizeof(*locks)*((br_lck->num_locks-1) - i));
count += ((br_lck->num_locks-1) - i);
}
break;
}
+
}
if (!overlap_found) {
tp = NULL;
}
+ contend_level2_oplocks_end(br_lck->fsp,
+ LEVEL2_CONTEND_POSIX_BRL);
+
br_lck->num_locks = count;
SAFE_FREE(br_lck->lock_data);
locks = tp;
return True;
}
+bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct *handle,
+ struct messaging_context *msg_ctx,
+ struct byte_range_lock *br_lck,
+ const struct lock_struct *plock)
+{
+ VFS_FIND(brl_unlock_windows);
+ return handle->fns->brl_unlock_windows_fn(handle, msg_ctx, br_lck,
+ plock);
+}
+
/****************************************************************************
Unlock a range of bytes.
****************************************************************************/
bool brl_unlock(struct messaging_context *msg_ctx,
struct byte_range_lock *br_lck,
- uint32 smbpid,
+ uint64_t smblctx,
struct server_id pid,
br_off start,
br_off size,
{
struct lock_struct lock;
- lock.context.smbpid = smbpid;
+ lock.context.smblctx = smblctx;
lock.context.pid = pid;
lock.context.tid = br_lck->fsp->conn->cnum;
lock.start = start;
lock.lock_flav = lock_flav;
if (lock_flav == WINDOWS_LOCK) {
- return brl_unlock_windows(msg_ctx, br_lck, &lock);
+ return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck->fsp->conn, msg_ctx,
+ br_lck, &lock);
} else {
return brl_unlock_posix(msg_ctx, br_lck, &lock);
}
****************************************************************************/
bool brl_locktest(struct byte_range_lock *br_lck,
- uint32 smbpid,
+ uint64_t smblctx,
struct server_id pid,
br_off start,
br_off size,
const struct lock_struct *locks = br_lck->lock_data;
files_struct *fsp = br_lck->fsp;
- lock.context.smbpid = smbpid;
+ lock.context.smblctx = smblctx;
lock.context.pid = pid;
lock.context.tid = br_lck->fsp->conn->cnum;
lock.start = start;
if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
- DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
+ DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for %s file %s\n",
(double)start, (double)size, ret ? "locked" : "unlocked",
- fsp->fnum, fsp->fsp_name ));
+ fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
/* We need to return the inverse of is_posix_locked. */
ret = !ret;
****************************************************************************/
NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
- uint32 *psmbpid,
+ uint64_t *psmblctx,
struct server_id pid,
br_off *pstart,
br_off *psize,
const struct lock_struct *locks = br_lck->lock_data;
files_struct *fsp = br_lck->fsp;
- lock.context.smbpid = *psmbpid;
+ lock.context.smblctx = *psmblctx;
lock.context.pid = pid;
lock.context.tid = br_lck->fsp->conn->cnum;
lock.start = *pstart;
}
if (conflict) {
- *psmbpid = exlock->context.smbpid;
+ *psmblctx = exlock->context.smblctx;
*pstart = exlock->start;
*psize = exlock->size;
*plock_type = exlock->lock_type;
if(lp_posix_locking(fsp->conn->params)) {
bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
- DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
+ DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for %s file %s\n",
(double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
- fsp->fnum, fsp->fsp_name ));
+ fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
if (ret) {
- /* Hmmm. No clue what to set smbpid to - use -1. */
- *psmbpid = 0xFFFF;
+ /* Hmmm. No clue what to set smblctx to - use -1. */
+ *psmblctx = 0xFFFFFFFFFFFFFFFFLL;
return NT_STATUS_LOCK_NOT_GRANTED;
}
}
return NT_STATUS_OK;
}
+
+bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle,
+ struct byte_range_lock *br_lck,
+ struct lock_struct *plock,
+ struct blocking_lock_record *blr)
+{
+ VFS_FIND(brl_cancel_windows);
+ return handle->fns->brl_cancel_windows_fn(handle, br_lck, plock, blr);
+}
+
/****************************************************************************
Remove a particular pending lock.
****************************************************************************/
-
bool brl_lock_cancel(struct byte_range_lock *br_lck,
- uint32 smbpid,
+ uint64_t smblctx,
struct server_id pid,
br_off start,
br_off size,
- enum brl_flavour lock_flav)
+ enum brl_flavour lock_flav,
+ struct blocking_lock_record *blr)
+{
+ bool ret;
+ struct lock_struct lock;
+
+ lock.context.smblctx = smblctx;
+ lock.context.pid = pid;
+ lock.context.tid = br_lck->fsp->conn->cnum;
+ lock.start = start;
+ lock.size = size;
+ lock.fnum = br_lck->fsp->fnum;
+ lock.lock_flav = lock_flav;
+ /* lock.lock_type doesn't matter */
+
+ if (lock_flav == WINDOWS_LOCK) {
+ ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck,
+ &lock, blr);
+ } else {
+ ret = brl_lock_cancel_default(br_lck, &lock);
+ }
+
+ return ret;
+}
+
+bool brl_lock_cancel_default(struct byte_range_lock *br_lck,
+ struct lock_struct *plock)
{
unsigned int i;
struct lock_struct *locks = br_lck->lock_data;
- struct lock_context context;
- context.smbpid = smbpid;
- context.pid = pid;
- context.tid = br_lck->fsp->conn->cnum;
+ SMB_ASSERT(plock);
for (i = 0; i < br_lck->num_locks; i++) {
struct lock_struct *lock = &locks[i];
/* For pending locks we *always* care about the fnum. */
- if (brl_same_context(&lock->context, &context) &&
- lock->fnum == br_lck->fsp->fnum &&
+ if (brl_same_context(&lock->context, &plock->context) &&
+ lock->fnum == plock->fnum &&
IS_PENDING_LOCK(lock->lock_type) &&
- lock->lock_flav == lock_flav &&
- lock->start == start &&
- lock->size == size) {
+ lock->lock_flav == plock->lock_flav &&
+ lock->start == plock->start &&
+ lock->size == plock->size) {
break;
}
}
struct byte_range_lock *br_lck)
{
files_struct *fsp = br_lck->fsp;
- uint16 tid = fsp->conn->cnum;
- int fnum = fsp->fnum;
- unsigned int i, j, dcount=0;
- int num_deleted_windows_locks = 0;
+ uint32_t tid = fsp->conn->cnum;
+ uint64_t fnum = fsp->fnum;
+ unsigned int i;
struct lock_struct *locks = br_lck->lock_data;
- struct server_id pid = procid_self();
- bool unlock_individually = False;
+ struct server_id pid = messaging_server_id(fsp->conn->sconn->msg_ctx);
+ struct lock_struct *locks_copy;
+ unsigned int num_locks_copy;
+
+ /* Copy the current lock array. */
+ if (br_lck->num_locks) {
+ locks_copy = (struct lock_struct *)talloc_memdup(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
+ if (!locks_copy) {
+ smb_panic("brl_close_fnum: talloc failed");
+ }
+ } else {
+ locks_copy = NULL;
+ }
- if(lp_posix_locking(fsp->conn->params)) {
+ num_locks_copy = br_lck->num_locks;
- /* Check if there are any Windows locks associated with this dev/ino
- pair that are not this fnum. If so we need to call unlock on each
- one in order to release the system POSIX locks correctly. */
+ for (i=0; i < num_locks_copy; i++) {
+ struct lock_struct *lock = &locks_copy[i];
- for (i=0; i < br_lck->num_locks; i++) {
- struct lock_struct *lock = &locks[i];
+ if (lock->context.tid == tid && serverid_equal(&lock->context.pid, &pid) &&
+ (lock->fnum == fnum)) {
+ brl_unlock(msg_ctx,
+ br_lck,
+ lock->context.smblctx,
+ pid,
+ lock->start,
+ lock->size,
+ lock->lock_flav);
+ }
+ }
+}
- if (!procid_equal(&lock->context.pid, &pid)) {
- continue;
- }
+bool brl_mark_disconnected(struct files_struct *fsp)
+{
+ uint32_t tid = fsp->conn->cnum;
+ uint64_t smblctx = fsp->op->global->open_persistent_id;
+ uint64_t fnum = fsp->fnum;
+ unsigned int i;
+ struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
+ struct byte_range_lock *br_lck = NULL;
- if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
- continue; /* Ignore pending. */
- }
+ if (!fsp->op->global->durable) {
+ return false;
+ }
- if (lock->context.tid != tid || lock->fnum != fnum) {
- unlock_individually = True;
- break;
- }
- }
+ if (fsp->current_lock_count == 0) {
+ return true;
+ }
- if (unlock_individually) {
- struct lock_struct *locks_copy;
- unsigned int num_locks_copy;
-
- /* Copy the current lock array. */
- if (br_lck->num_locks) {
- locks_copy = (struct lock_struct *)TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
- if (!locks_copy) {
- smb_panic("brl_close_fnum: talloc failed");
- }
- } else {
- locks_copy = NULL;
- }
+ br_lck = brl_get_locks(talloc_tos(), fsp);
+ if (br_lck == NULL) {
+ return false;
+ }
- num_locks_copy = br_lck->num_locks;
+ for (i=0; i < br_lck->num_locks; i++) {
+ struct lock_struct *lock = &br_lck->lock_data[i];
- for (i=0; i < num_locks_copy; i++) {
- struct lock_struct *lock = &locks_copy[i];
+ /*
+ * as this is a durable handle, we only expect locks
+ * of the current file handle!
+ */
- if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&
- (lock->fnum == fnum)) {
- brl_unlock(msg_ctx,
- br_lck,
- lock->context.smbpid,
- pid,
- lock->start,
- lock->size,
- lock->lock_flav);
- }
- }
- return;
+ if (lock->context.smblctx != smblctx) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
+
+ if (lock->context.tid != tid) {
+ TALLOC_FREE(br_lck);
+ return false;
}
+
+ if (!serverid_equal(&lock->context.pid, &self)) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
+
+ if (lock->fnum != fnum) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
+
+ server_id_set_disconnected(&lock->context.pid);
+ lock->context.tid = TID_FIELD_INVALID;
+ lock->fnum = FNUM_FIELD_INVALID;
+ }
+
+ br_lck->modified = true;
+ TALLOC_FREE(br_lck);
+ return true;
+}
+
+bool brl_reconnect_disconnected(struct files_struct *fsp)
+{
+ uint32_t tid = fsp->conn->cnum;
+ uint64_t smblctx = fsp->op->global->open_persistent_id;
+ uint64_t fnum = fsp->fnum;
+ unsigned int i;
+ struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
+ struct byte_range_lock *br_lck = NULL;
+
+ if (!fsp->op->global->durable) {
+ return false;
}
- /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
+ /*
+ * When reconnecting, we do not want to validate the brlock entries
+ * and thereby remove our own (disconnected) entries but reactivate
+ * them instead.
+ */
+ fsp->lockdb_clean = true;
- /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
+ br_lck = brl_get_locks(talloc_tos(), fsp);
+ if (br_lck == NULL) {
+ return false;
+ }
+
+ if (br_lck->num_locks == 0) {
+ TALLOC_FREE(br_lck);
+ return true;
+ }
for (i=0; i < br_lck->num_locks; i++) {
- struct lock_struct *lock = &locks[i];
- bool del_this_lock = False;
-
- if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
- if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
- del_this_lock = True;
- num_deleted_windows_locks++;
- } else if (lock->lock_flav == POSIX_LOCK) {
- del_this_lock = True;
- }
- }
+ struct lock_struct *lock = &br_lck->lock_data[i];
- if (del_this_lock) {
- /* Send unlock messages to any pending waiters that overlap. */
- for (j=0; j < br_lck->num_locks; j++) {
- struct lock_struct *pend_lock = &locks[j];
+ /*
+ * as this is a durable handle we only expect locks
+ * of the current file handle!
+ */
- /* Ignore our own or non-pending locks. */
- if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
- continue;
- }
+ if (lock->context.smblctx != smblctx) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
- /* Optimisation - don't send to this fnum as we're
- closing it. */
- if (pend_lock->context.tid == tid &&
- procid_equal(&pend_lock->context.pid, &pid) &&
- pend_lock->fnum == fnum) {
- continue;
- }
+ if (lock->context.tid != TID_FIELD_INVALID) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
- /* We could send specific lock info here... */
- if (brl_pending_overlap(lock, pend_lock)) {
- messaging_send(msg_ctx, pend_lock->context.pid,
- MSG_SMB_UNLOCK, &data_blob_null);
- }
- }
+ if (!server_id_is_disconnected(&lock->context.pid)) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
- /* found it - delete it */
- if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
- memmove(&locks[i], &locks[i+1],
- sizeof(*locks)*((br_lck->num_locks-1) - i));
- }
- br_lck->num_locks--;
- br_lck->modified = True;
- i--;
- dcount++;
+ if (lock->fnum != FNUM_FIELD_INVALID) {
+ TALLOC_FREE(br_lck);
+ return false;
}
- }
- if(lp_posix_locking(fsp->conn->params) && num_deleted_windows_locks) {
- /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
- reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks);
+ lock->context.pid = self;
+ lock->context.tid = tid;
+ lock->fnum = fnum;
}
+
+ fsp->current_lock_count = br_lck->num_locks;
+ br_lck->modified = true;
+ TALLOC_FREE(br_lck);
+ return true;
}
/****************************************************************************
Ensure this set of lock entries is valid.
****************************************************************************/
-
-static bool validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks)
+static bool validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks,
+ bool keep_disconnected)
{
unsigned int i;
unsigned int num_valid_entries = 0;
struct lock_struct *locks = *pplocks;
+ TALLOC_CTX *frame = talloc_stackframe();
+ struct server_id *ids;
+ bool *exists;
+
+ ids = talloc_array(frame, struct server_id, *pnum_entries);
+ if (ids == NULL) {
+ DEBUG(0, ("validate_lock_entries: "
+ "talloc_array(struct server_id, %u) failed\n",
+ *pnum_entries));
+ talloc_free(frame);
+ return false;
+ }
+
+ exists = talloc_array(frame, bool, *pnum_entries);
+ if (exists == NULL) {
+ DEBUG(0, ("validate_lock_entries: "
+ "talloc_array(bool, %u) failed\n",
+ *pnum_entries));
+ talloc_free(frame);
+ return false;
+ }
for (i = 0; i < *pnum_entries; i++) {
- struct lock_struct *lock_data = &locks[i];
- if (!process_exists(lock_data->context.pid)) {
- /* This process no longer exists - mark this
- entry as invalid by zeroing it. */
- ZERO_STRUCTP(lock_data);
- } else {
+ ids[i] = locks[i].context.pid;
+ }
+
+ if (!serverids_exist(ids, *pnum_entries, exists)) {
+ DEBUG(3, ("validate_lock_entries: serverids_exists failed\n"));
+ talloc_free(frame);
+ return false;
+ }
+
+ for (i = 0; i < *pnum_entries; i++) {
+ if (exists[i]) {
+ num_valid_entries++;
+ continue;
+ }
+
+ if (keep_disconnected &&
+ server_id_is_disconnected(&ids[i]))
+ {
num_valid_entries++;
+ continue;
}
+
+ /* This process no longer exists - mark this
+ entry as invalid by zeroing it. */
+ ZERO_STRUCTP(&locks[i]);
}
+ TALLOC_FREE(frame);
if (num_valid_entries != *pnum_entries) {
struct lock_struct *new_lock_data = NULL;
num_valid_entries = 0;
for (i = 0; i < *pnum_entries; i++) {
struct lock_struct *lock_data = &locks[i];
- if (lock_data->context.smbpid &&
+ if (lock_data->context.smblctx &&
lock_data->context.tid) {
/* Valid (nonzero) entry - copy it. */
memcpy(&new_lock_data[num_valid_entries],
on each lock.
****************************************************************************/
-static int traverse_fn(struct db_record *rec, void *state)
+static int brl_traverse_fn(struct db_record *rec, void *state)
{
struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
struct lock_struct *locks;
unsigned int i;
unsigned int num_locks = 0;
unsigned int orig_num_locks = 0;
+ TDB_DATA dbkey;
+ TDB_DATA value;
+
+ dbkey = dbwrap_record_get_key(rec);
+ value = dbwrap_record_get_value(rec);
/* In a traverse function we must make a copy of
dbuf before modifying it. */
- locks = (struct lock_struct *)memdup(rec->value.dptr,
- rec->value.dsize);
+ locks = (struct lock_struct *)memdup(value.dptr, value.dsize);
if (!locks) {
return -1; /* Terminate traversal. */
}
- key = (struct file_id *)rec->key.dptr;
- orig_num_locks = num_locks = rec->value.dsize/sizeof(*locks);
+ key = (struct file_id *)dbkey.dptr;
+ orig_num_locks = num_locks = value.dsize/sizeof(*locks);
/* Ensure the lock db is clean of entries from invalid processes. */
- if (!validate_lock_entries(&num_locks, &locks)) {
+ if (!validate_lock_entries(&num_locks, &locks, true)) {
SAFE_FREE(locks);
return -1; /* Terminate traversal */
}
TDB_DATA data;
data.dptr = (uint8_t *)locks;
data.dsize = num_locks*sizeof(struct lock_struct);
- rec->store(rec, data, TDB_REPLACE);
+ dbwrap_record_store(rec, data, TDB_REPLACE);
} else {
- rec->delete_rec(rec);
+ dbwrap_record_delete(rec);
}
}
void *private_data)
{
struct brl_forall_cb cb;
+ NTSTATUS status;
+ int count = 0;
if (!brlock_db) {
return 0;
}
cb.fn = fn;
cb.private_data = private_data;
- return brlock_db->traverse(brlock_db, traverse_fn, &cb);
+ status = dbwrap_traverse(brlock_db, brl_traverse_fn, &cb, &count);
+
+ if (!NT_STATUS_IS_OK(status)) {
+ return -1;
+ } else {
+ return count;
+ }
}
/*******************************************************************
Unlock the record.
********************************************************************/
-static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
+static void byte_range_lock_flush(struct byte_range_lock *br_lck)
{
- TDB_DATA key;
-
- key.dptr = (uint8 *)&br_lck->key;
- key.dsize = sizeof(struct file_id);
-
if (br_lck->read_only) {
SMB_ASSERT(!br_lck->modified);
}
if (br_lck->num_locks == 0) {
/* No locks - delete this entry. */
- NTSTATUS status = br_lck->record->delete_rec(br_lck->record);
+ NTSTATUS status = dbwrap_record_delete(br_lck->record);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("delete_rec returned %s\n",
nt_errstr(status)));
data.dptr = (uint8 *)br_lck->lock_data;
data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
- status = br_lck->record->store(br_lck->record, data,
- TDB_REPLACE);
+ status = dbwrap_record_store(br_lck->record, data, TDB_REPLACE);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0, ("store returned %s\n", nt_errstr(status)));
smb_panic("Could not store byte range mode entry");
done:
- SAFE_FREE(br_lck->lock_data);
+ br_lck->read_only = true;
+ br_lck->modified = false;
+
TALLOC_FREE(br_lck->record);
+}
+
+static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
+{
+ byte_range_lock_flush(br_lck);
+ SAFE_FREE(br_lck->lock_data);
return 0;
}
files_struct *fsp, bool read_only)
{
TDB_DATA key, data;
- struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
+ struct byte_range_lock *br_lck = talloc(mem_ctx, struct byte_range_lock);
+ bool do_read_only = read_only;
if (br_lck == NULL) {
return NULL;
br_lck->fsp = fsp;
br_lck->num_locks = 0;
br_lck->modified = False;
- memset(&br_lck->key, '\0', sizeof(struct file_id));
br_lck->key = fsp->file_id;
key.dptr = (uint8 *)&br_lck->key;
if (!fsp->lockdb_clean) {
/* We must be read/write to clean
the dead entries. */
- read_only = False;
+ do_read_only = false;
}
- if (read_only) {
- if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) {
+ if (do_read_only) {
+ NTSTATUS status;
+ status = dbwrap_fetch(brlock_db, br_lck, key, &data);
+ if (!NT_STATUS_IS_OK(status)) {
DEBUG(3, ("Could not fetch byte range lock record\n"));
TALLOC_FREE(br_lck);
return NULL;
}
br_lck->record = NULL;
- }
- else {
- br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, key);
+ } else {
+ br_lck->record = dbwrap_fetch_locked(brlock_db, br_lck, key);
if (br_lck->record == NULL) {
DEBUG(3, ("Could not lock byte range lock entry\n"));
return NULL;
}
- data = br_lck->record->value;
+ data = dbwrap_record_get_value(br_lck->record);
}
- br_lck->read_only = read_only;
+ br_lck->read_only = do_read_only;
br_lck->lock_data = NULL;
talloc_set_destructor(br_lck, byte_range_lock_destructor);
memcpy(br_lck->lock_data, data.dptr, data.dsize);
}
-
+
if (!fsp->lockdb_clean) {
int orig_num_locks = br_lck->num_locks;
- /* This is the first time we've accessed this. */
- /* Go through and ensure all entries exist - remove any that don't. */
- /* Makes the lockdb self cleaning at low cost. */
+ /*
+ * This is the first time we access the byte range lock
+ * record with this fsp. Go through and ensure all entries
+ * are valid - remove any that don't.
+ * This makes the lockdb self cleaning at low cost.
+ *
+ * Note: Disconnected entries belong to disconnected
+ * durable handles. So at this point, we have a new
+ * handle on the file and the disconnected durable has
+ * already been closed (we are not a durable reconnect).
+ * So we need to clean the disconnected brl entry.
+ */
if (!validate_lock_entries(&br_lck->num_locks,
- &br_lck->lock_data)) {
+ &br_lck->lock_data, false)) {
SAFE_FREE(br_lck->lock_data);
TALLOC_FREE(br_lck);
return NULL;
print_lock_struct(i, &locks[i]);
}
}
+
+ if (do_read_only != read_only) {
+ /*
+ * this stores the record and gets rid of
+ * the write lock that is needed for a cleanup
+ */
+ byte_range_lock_flush(br_lck);
+ }
+
return br_lck;
}
return brl_get_locks_internal(mem_ctx, fsp, False);
}
-struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx,
- files_struct *fsp)
+struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
{
- return brl_get_locks_internal(mem_ctx, fsp, True);
+ struct byte_range_lock *br_lock;
+
+ if (lp_clustering()) {
+ return brl_get_locks_internal(talloc_tos(), fsp, true);
+ }
+
+ if ((fsp->brlock_rec != NULL)
+ && (dbwrap_get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
+ return fsp->brlock_rec;
+ }
+
+ TALLOC_FREE(fsp->brlock_rec);
+
+ br_lock = brl_get_locks_internal(talloc_tos(), fsp, true);
+ if (br_lock == NULL) {
+ return NULL;
+ }
+ fsp->brlock_seqnum = dbwrap_get_seqnum(brlock_db);
+
+ fsp->brlock_rec = talloc_move(fsp, &br_lock);
+
+ return fsp->brlock_rec;
}
struct brl_revalidate_state {
static int compare_procids(const void *p1, const void *p2)
{
- const struct server_id *i1 = (struct server_id *)p1;
- const struct server_id *i2 = (struct server_id *)p2;
+ const struct server_id *i1 = (const struct server_id *)p1;
+ const struct server_id *i2 = (const struct server_id *)p2;
if (i1->pid < i2->pid) return -1;
if (i2->pid > i2->pid) return 1;
* array, then qsort that array and only send to non-dupes.
*/
-static void brl_revalidate(struct messaging_context *msg_ctx,
- void *private_data,
- uint32_t msg_type,
- struct server_id server_id,
- DATA_BLOB *data)
+void brl_revalidate(struct messaging_context *msg_ctx,
+ void *private_data,
+ uint32_t msg_type,
+ struct server_id server_id,
+ DATA_BLOB *data)
{
struct brl_revalidate_state *state;
uint32 i;
struct server_id last_pid;
- if (!(state = TALLOC_ZERO_P(NULL, struct brl_revalidate_state))) {
+ if (!(state = talloc_zero(NULL, struct brl_revalidate_state))) {
DEBUG(0, ("talloc failed\n"));
return;
}
goto done;
}
- qsort(state->pids, state->num_pids, sizeof(state->pids[0]),
- compare_procids);
+ TYPESAFE_QSORT(state->pids, state->num_pids, compare_procids);
ZERO_STRUCT(last_pid);
for (i=0; i<state->num_pids; i++) {
- if (procid_equal(&last_pid, &state->pids[i])) {
+ if (serverid_equal(&last_pid, &state->pids[i])) {
/*
* We've seen that one already
*/
return;
}
-void brl_register_msgs(struct messaging_context *msg_ctx)
+bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id)
{
- messaging_register(msg_ctx, NULL, MSG_SMB_BRL_VALIDATE,
- brl_revalidate);
+ bool ret = false;
+ TALLOC_CTX *frame = talloc_stackframe();
+ TDB_DATA key, val;
+ struct db_record *rec;
+ struct lock_struct *lock;
+ unsigned n, num;
+ NTSTATUS status;
+
+ key = make_tdb_data((void*)&fid, sizeof(fid));
+
+ rec = dbwrap_fetch_locked(brlock_db, frame, key);
+ if (rec == NULL) {
+ DEBUG(5, ("brl_cleanup_disconnected: failed to fetch record "
+ "for file %s\n", file_id_string(frame, &fid)));
+ goto done;
+ }
+
+ val = dbwrap_record_get_value(rec);
+ lock = (struct lock_struct*)val.dptr;
+ num = val.dsize / sizeof(struct lock_struct);
+ if (lock == NULL) {
+ DEBUG(10, ("brl_cleanup_disconnected: no byte range locks for "
+ "file %s\n", file_id_string(frame, &fid)));
+ ret = true;
+ goto done;
+ }
+
+ for (n=0; n<num; n++) {
+ struct lock_context *ctx = &lock[n].context;
+
+ if (!server_id_is_disconnected(&ctx->pid)) {
+ DEBUG(5, ("brl_cleanup_disconnected: byte range lock "
+ "%s used by server %s, do not cleanup\n",
+ file_id_string(frame, &fid),
+ server_id_str(frame, &ctx->pid)));
+ goto done;
+ }
+
+ if (ctx->smblctx != open_persistent_id) {
+ DEBUG(5, ("brl_cleanup_disconnected: byte range lock "
+ "%s expected smblctx %llu but found %llu"
+ ", do not cleanup\n",
+ file_id_string(frame, &fid),
+ (unsigned long long)open_persistent_id,
+ (unsigned long long)ctx->smblctx));
+ goto done;
+ }
+ }
+
+ status = dbwrap_record_delete(rec);
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(5, ("brl_cleanup_disconnected: failed to delete record "
+ "for file %s from %s, open %llu: %s\n",
+ file_id_string(frame, &fid), dbwrap_name(brlock_db),
+ (unsigned long long)open_persistent_id,
+ nt_errstr(status)));
+ goto done;
+ }
+
+ DEBUG(10, ("brl_cleanup_disconnected: "
+ "file %s cleaned up %u entries from open %llu\n",
+ file_id_string(frame, &fid), num,
+ (unsigned long long)open_persistent_id));
+
+ ret = true;
+done:
+ talloc_free(frame);
+ return ret;
}