This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
+ the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* This module implements a tdb based byte range locking service,
/* The open brlock.tdb database. */
-static TDB_CONTEXT *tdb;
+static struct db_context *brlock_db;
/****************************************************************************
Debug info at level 10 for lock struct.
const struct lock_struct *lck2)
{
/* Ignore PENDING locks. */
- if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
+ if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
return False;
/* Read locks never conflict. */
#endif
/* Ignore PENDING locks. */
- if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
+ if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
return False;
/* Read locks never conflict. */
static BOOL brl_conflict1(const struct lock_struct *lck1,
const struct lock_struct *lck2)
{
- if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
+ if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
return False;
if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
static BOOL brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
{
- if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
+ if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
return False;
if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
}
/****************************************************************************
- Amazingly enough, w2k3 "remembers" whether the last lock failure
+ Check if an unlock overlaps a pending lock.
+****************************************************************************/
+
+static BOOL brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
+{
+ if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
+ return True;
+ if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
+ return True;
+ return False;
+}
+
+/****************************************************************************
+ Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
is the same as this one and changes its error code. I wonder if any
app depends on this ?
****************************************************************************/
-static NTSTATUS brl_lock_failed(const struct lock_struct *lock)
+static NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, BOOL blocking_lock)
{
- static struct lock_struct last_lock_failure;
-
- if (brl_same_context(&lock->context, &last_lock_failure.context) &&
- lock->fnum == last_lock_failure.fnum &&
- lock->start == last_lock_failure.start &&
- lock->size == last_lock_failure.size) {
- return NT_STATUS_FILE_LOCK_CONFLICT;
- }
- last_lock_failure = *lock;
- if (lock->start >= 0xEF000000 &&
- (lock->start >> 63) == 0) {
+ if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
/* amazing the little things you learn with a test
suite. Locks beyond this offset (as a 64 bit
number!) always generate the conflict error code,
unless the top bit is set */
+ if (!blocking_lock) {
+ fsp->last_lock_failure = *lock;
+ }
+ return NT_STATUS_FILE_LOCK_CONFLICT;
+ }
+
+ if (procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
+ lock->context.tid == fsp->last_lock_failure.context.tid &&
+ lock->fnum == fsp->last_lock_failure.fnum &&
+ lock->start == fsp->last_lock_failure.start) {
return NT_STATUS_FILE_LOCK_CONFLICT;
}
+
+ if (!blocking_lock) {
+ fsp->last_lock_failure = *lock;
+ }
return NT_STATUS_LOCK_NOT_GRANTED;
}
void brl_init(int read_only)
{
- if (tdb) {
+ if (brlock_db) {
return;
}
- tdb = tdb_open_log(lock_path("brlock.tdb"),
- lp_open_files_db_hash_size(),
- TDB_DEFAULT|(read_only?0x0:TDB_CLEAR_IF_FIRST),
- read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
- if (!tdb) {
+ brlock_db = db_open(NULL, lock_path("brlock.tdb"),
+ lp_open_files_db_hash_size(),
+ TDB_DEFAULT
+ |TDB_VOLATILE
+ |(read_only?0x0:TDB_CLEAR_IF_FIRST),
+ read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
+ if (!brlock_db) {
DEBUG(0,("Failed to open byte range locking database %s\n",
lock_path("brlock.tdb")));
return;
void brl_shutdown(int read_only)
{
- if (!tdb) {
+ if (!brlock_db) {
return;
}
- tdb_close(tdb);
+ TALLOC_FREE(brlock_db);
}
#if ZERO_ZERO
****************************************************************************/
static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck,
- const struct lock_struct *plock,
- BOOL *my_lock_ctx)
+ struct lock_struct *plock, BOOL blocking_lock)
{
unsigned int i;
files_struct *fsp = br_lck->fsp;
- struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
+ struct lock_struct *locks = br_lck->lock_data;
for (i=0; i < br_lck->num_locks; i++) {
/* Do any Windows or POSIX locks conflict ? */
if (brl_conflict(&locks[i], plock)) {
- NTSTATUS status = brl_lock_failed(plock);;
- /* Did we block ourselves ? */
- if (brl_same_context(&locks[i].context, &plock->context)) {
- *my_lock_ctx = True;
- }
- return status;
+ /* Remember who blocked us. */
+ plock->context.smbpid = locks[i].context.smbpid;
+ return brl_lock_failed(fsp,plock,blocking_lock);
}
#if ZERO_ZERO
if (plock->start == 0 && plock->size == 0 &&
be mapped into a lower level POSIX one, and if so can
we get it ? */
- if ((plock->lock_type != PENDING_LOCK) && lp_posix_locking(SNUM(fsp->conn))) {
+ if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
int errno_ret;
if (!set_posix_lock_windows_flavour(fsp,
plock->start,
locks,
br_lck->num_locks,
&errno_ret)) {
+
+ /* We don't know who blocked us. */
+ plock->context.smbpid = 0xFFFFFFFF;
+
if (errno_ret == EACCES || errno_ret == EAGAIN) {
return NT_STATUS_FILE_LOCK_CONFLICT;
} else {
memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
br_lck->num_locks += 1;
- br_lck->lock_data = (void *)locks;
+ br_lck->lock_data = locks;
br_lck->modified = True;
return NT_STATUS_OK;
}
/* Never get here. */
- smb_panic("brlock_posix_split_merge\n");
+ smb_panic("brlock_posix_split_merge");
/* Notreached. */
- abort();
+
/* Keep some compilers happy. */
return 0;
}
We must cope with range splits and merges.
****************************************************************************/
-static NTSTATUS brl_lock_posix(struct byte_range_lock *br_lck,
- const struct lock_struct *plock,
- BOOL *my_lock_ctx)
+static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
+ struct byte_range_lock *br_lck,
+ struct lock_struct *plock)
{
unsigned int i, count;
- struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
+ struct lock_struct *locks = br_lck->lock_data;
struct lock_struct *tp;
BOOL lock_was_added = False;
+ BOOL signal_pending_read = False;
/* No zero-zero locks for POSIX. */
if (plock->start == 0 && plock->size == 0) {
count = 0;
for (i=0; i < br_lck->num_locks; i++) {
- if (locks[i].lock_flav == WINDOWS_LOCK) {
+ struct lock_struct *curr_lock = &locks[i];
+
+ /* If we have a pending read lock, a lock downgrade should
+ trigger a lock re-evaluation. */
+ if (curr_lock->lock_type == PENDING_READ_LOCK &&
+ brl_pending_overlap(plock, curr_lock)) {
+ signal_pending_read = True;
+ }
+
+ if (curr_lock->lock_flav == WINDOWS_LOCK) {
/* Do any Windows flavour locks conflict ? */
- if (brl_conflict(&locks[i], plock)) {
- /* Did we block ourselves ? */
- if (brl_same_context(&locks[i].context, &plock->context)) {
- *my_lock_ctx = True;
- }
+ if (brl_conflict(curr_lock, plock)) {
/* No games with error messages. */
SAFE_FREE(tp);
+ /* Remember who blocked us. */
+ plock->context.smbpid = curr_lock->context.smbpid;
return NT_STATUS_FILE_LOCK_CONFLICT;
}
/* Just copy the Windows lock into the new array. */
- memcpy(&tp[count], &locks[i], sizeof(struct lock_struct));
+ memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
count++;
} else {
/* POSIX conflict semantics are different. */
- if (brl_conflict_posix(&locks[i], plock)) {
+ if (brl_conflict_posix(curr_lock, plock)) {
/* Can't block ourselves with POSIX locks. */
/* No games with error messages. */
SAFE_FREE(tp);
+ /* Remember who blocked us. */
+ plock->context.smbpid = curr_lock->context.smbpid;
return NT_STATUS_FILE_LOCK_CONFLICT;
}
/* Work out overlaps. */
- count += brlock_posix_split_merge(&tp[count], &locks[i], plock, &lock_was_added);
+ count += brlock_posix_split_merge(&tp[count], curr_lock, plock, &lock_was_added);
}
}
be mapped into a lower level POSIX one, and if so can
we get it ? */
- if ((plock->lock_type != PENDING_LOCK) && lp_posix_locking(SNUM(br_lck->fsp->conn))) {
+ if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
int errno_ret;
/* The lower layer just needs to attempt to
plock->size,
plock->lock_type,
&errno_ret)) {
+
+ /* We don't know who blocked us. */
+ plock->context.smbpid = 0xFFFFFFFF;
+
if (errno_ret == EACCES || errno_ret == EAGAIN) {
SAFE_FREE(tp);
return NT_STATUS_FILE_LOCK_CONFLICT;
return NT_STATUS_NO_MEMORY;
}
br_lck->num_locks = count;
- br_lck->lock_data = (void *)tp;
+ SAFE_FREE(br_lck->lock_data);
+ br_lck->lock_data = tp;
+ locks = tp;
br_lck->modified = True;
+
+ /* A successful downgrade from write to read lock can trigger a lock
+ re-evalutation where waiting readers can now proceed. */
+
+ if (signal_pending_read) {
+ /* Send unlock messages to any pending read waiters that overlap. */
+ for (i=0; i < br_lck->num_locks; i++) {
+ struct lock_struct *pend_lock = &locks[i];
+
+ /* Ignore non-pending locks. */
+ if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
+ continue;
+ }
+
+ if (pend_lock->lock_type == PENDING_READ_LOCK &&
+ brl_pending_overlap(plock, pend_lock)) {
+ DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
+ procid_str_static(&pend_lock->context.pid )));
+
+ messaging_send(msg_ctx, pend_lock->context.pid,
+ MSG_SMB_UNLOCK, &data_blob_null);
+ }
+ }
+ }
+
return NT_STATUS_OK;
}
Lock a range of bytes.
****************************************************************************/
-NTSTATUS brl_lock(struct byte_range_lock *br_lck,
+NTSTATUS brl_lock(struct messaging_context *msg_ctx,
+ struct byte_range_lock *br_lck,
uint32 smbpid,
- struct process_id pid,
+ struct server_id pid,
br_off start,
br_off size,
enum brl_type lock_type,
enum brl_flavour lock_flav,
- BOOL *my_lock_ctx)
+ BOOL blocking_lock,
+ uint32 *psmbpid)
{
NTSTATUS ret;
struct lock_struct lock;
- *my_lock_ctx = False;
-
#if !ZERO_ZERO
if (start == 0 && size == 0) {
DEBUG(0,("client sent 0/0 lock - please report this\n"));
lock.lock_flav = lock_flav;
if (lock_flav == WINDOWS_LOCK) {
- ret = brl_lock_windows(br_lck, &lock, my_lock_ctx);
+ ret = brl_lock_windows(br_lck, &lock, blocking_lock);
} else {
- ret = brl_lock_posix(br_lck, &lock, my_lock_ctx);
+ ret = brl_lock_posix(msg_ctx, br_lck, &lock);
}
#if ZERO_ZERO
qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);
#endif
+ /* If we're returning an error, return who blocked us. */
+ if (!NT_STATUS_IS_OK(ret) && psmbpid) {
+ *psmbpid = lock.context.smbpid;
+ }
return ret;
}
-/****************************************************************************
- Check if an unlock overlaps a pending lock.
-****************************************************************************/
-
-static BOOL brl_pending_overlap(struct lock_struct *lock, struct lock_struct *pend_lock)
-{
- if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
- return True;
- if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
- return True;
- return False;
-}
-
/****************************************************************************
Unlock a range of bytes - Windows semantics.
****************************************************************************/
-static BOOL brl_unlock_windows(struct byte_range_lock *br_lck, const struct lock_struct *plock)
+static BOOL brl_unlock_windows(struct messaging_context *msg_ctx,
+ struct byte_range_lock *br_lck,
+ const struct lock_struct *plock)
{
unsigned int i, j;
- struct lock_struct *lock = NULL;
- struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
+ struct lock_struct *locks = br_lck->lock_data;
enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
#if ZERO_ZERO
is sorted in the zero zero case. */
for (i = 0; i < br_lck->num_locks; i++) {
- lock = &locks[i];
+ struct lock_struct *lock = &locks[i];
if (lock->lock_type == WRITE_LOCK &&
brl_same_context(&lock->context, &plock->context) &&
#endif
for (i = 0; i < br_lck->num_locks; i++) {
- lock = &locks[i];
+ struct lock_struct *lock = &locks[i];
/* Only remove our own locks that match in start, size, and flavour. */
if (brl_same_context(&lock->context, &plock->context) &&
br_lck->modified = True;
/* Unlock the underlying POSIX regions. */
- if(lp_posix_locking(br_lck->fsp->conn->cnum)) {
+ if(lp_posix_locking(br_lck->fsp->conn->params)) {
release_posix_lock_windows_flavour(br_lck->fsp,
plock->start,
plock->size,
struct lock_struct *pend_lock = &locks[j];
/* Ignore non-pending locks. */
- if (pend_lock->lock_type != PENDING_LOCK) {
+ if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
continue;
}
/* We could send specific lock info here... */
- if (brl_pending_overlap(lock, pend_lock)) {
+ if (brl_pending_overlap(plock, pend_lock)) {
DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
procid_str_static(&pend_lock->context.pid )));
- become_root();
- message_send_pid(pend_lock->context.pid,
- MSG_SMB_UNLOCK,
- NULL, 0, True);
- unbecome_root();
+ messaging_send(msg_ctx, pend_lock->context.pid,
+ MSG_SMB_UNLOCK, &data_blob_null);
}
}
Unlock a range of bytes - POSIX semantics.
****************************************************************************/
-static BOOL brl_unlock_posix(struct byte_range_lock *br_lck, const struct lock_struct *plock)
+static BOOL brl_unlock_posix(struct messaging_context *msg_ctx,
+ struct byte_range_lock *br_lck,
+ const struct lock_struct *plock)
{
unsigned int i, j, count;
- struct lock_struct *lock = NULL;
struct lock_struct *tp;
- struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
+ struct lock_struct *locks = br_lck->lock_data;
BOOL overlap_found = False;
/* No zero-zero locks for POSIX. */
count = 0;
for (i = 0; i < br_lck->num_locks; i++) {
+ struct lock_struct *lock = &locks[i];
struct lock_struct tmp_lock[3];
BOOL lock_was_added = False;
unsigned int tmp_count;
- lock = &locks[i];
-
/* Only remove our own locks - ignore fnum. */
- if (lock->lock_type == PENDING_LOCK ||
+ if (IS_PENDING_LOCK(lock->lock_type) ||
!brl_same_context(&lock->context, &plock->context)) {
memcpy(&tp[count], lock, sizeof(struct lock_struct));
count++;
}
/* Unlock any POSIX regions. */
- if(lp_posix_locking(br_lck->fsp->conn->cnum)) {
+ if(lp_posix_locking(br_lck->fsp->conn->params)) {
release_posix_lock_posix_flavour(br_lck->fsp,
plock->start,
plock->size,
}
br_lck->num_locks = count;
- br_lck->lock_data = (void *)tp;
+ SAFE_FREE(br_lck->lock_data);
+ locks = tp;
+ br_lck->lock_data = tp;
br_lck->modified = True;
/* Send unlock messages to any pending waiters that overlap. */
- locks = tp;
for (j=0; j < br_lck->num_locks; j++) {
struct lock_struct *pend_lock = &locks[j];
/* Ignore non-pending locks. */
- if (pend_lock->lock_type != PENDING_LOCK) {
+ if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
continue;
}
/* We could send specific lock info here... */
- if (brl_pending_overlap(lock, pend_lock)) {
+ if (brl_pending_overlap(plock, pend_lock)) {
DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
procid_str_static(&pend_lock->context.pid )));
- become_root();
- message_send_pid(pend_lock->context.pid,
- MSG_SMB_UNLOCK,
- NULL, 0, True);
- unbecome_root();
+ messaging_send(msg_ctx, pend_lock->context.pid,
+ MSG_SMB_UNLOCK, &data_blob_null);
}
}
Unlock a range of bytes.
****************************************************************************/
-BOOL brl_unlock(struct byte_range_lock *br_lck,
+BOOL brl_unlock(struct messaging_context *msg_ctx,
+ struct byte_range_lock *br_lck,
uint32 smbpid,
- struct process_id pid,
+ struct server_id pid,
br_off start,
br_off size,
enum brl_flavour lock_flav)
lock.lock_flav = lock_flav;
if (lock_flav == WINDOWS_LOCK) {
- return brl_unlock_windows(br_lck, &lock);
+ return brl_unlock_windows(msg_ctx, br_lck, &lock);
} else {
- return brl_unlock_posix(br_lck, &lock);
+ return brl_unlock_posix(msg_ctx, br_lck, &lock);
}
}
BOOL brl_locktest(struct byte_range_lock *br_lck,
uint32 smbpid,
- struct process_id pid,
+ struct server_id pid,
br_off start,
br_off size,
enum brl_type lock_type,
BOOL ret = True;
unsigned int i;
struct lock_struct lock;
- struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
+ const struct lock_struct *locks = br_lck->lock_data;
files_struct *fsp = br_lck->fsp;
lock.context.smbpid = smbpid;
* This only conflicts with Windows locks, not POSIX locks.
*/
- if(lp_posix_locking(fsp->conn->cnum) && (lock_flav == WINDOWS_LOCK)) {
+ if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
uint32 *psmbpid,
- struct process_id pid,
+ struct server_id pid,
br_off *pstart,
br_off *psize,
enum brl_type *plock_type,
{
unsigned int i;
struct lock_struct lock;
- struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
+ const struct lock_struct *locks = br_lck->lock_data;
files_struct *fsp = br_lck->fsp;
lock.context.smbpid = *psmbpid;
/* Make sure existing locks don't conflict */
for (i=0; i < br_lck->num_locks; i++) {
- struct lock_struct *exlock = &locks[i];
+ const struct lock_struct *exlock = &locks[i];
BOOL conflict = False;
if (exlock->lock_flav == WINDOWS_LOCK) {
* see if there is a POSIX lock from a UNIX or NFS process.
*/
- if(lp_posix_locking(fsp->conn->cnum)) {
+ if(lp_posix_locking(fsp->conn->params)) {
BOOL ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
return NT_STATUS_OK;
}
-
/****************************************************************************
Remove a particular pending lock.
****************************************************************************/
-BOOL brl_remove_pending_lock(struct byte_range_lock *br_lck,
+BOOL brl_lock_cancel(struct byte_range_lock *br_lck,
uint32 smbpid,
- struct process_id pid,
+ struct server_id pid,
br_off start,
br_off size,
enum brl_flavour lock_flav)
{
unsigned int i;
- struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
+ struct lock_struct *locks = br_lck->lock_data;
struct lock_context context;
context.smbpid = smbpid;
/* For pending locks we *always* care about the fnum. */
if (brl_same_context(&lock->context, &context) &&
lock->fnum == br_lck->fsp->fnum &&
- lock->lock_type == PENDING_LOCK &&
+ IS_PENDING_LOCK(lock->lock_type) &&
lock->lock_flav == lock_flav &&
lock->start == start &&
lock->size == size) {
fd and so we should not immediately close the fd.
****************************************************************************/
-void brl_close_fnum(struct byte_range_lock *br_lck)
+void brl_close_fnum(struct messaging_context *msg_ctx,
+ struct byte_range_lock *br_lck)
{
files_struct *fsp = br_lck->fsp;
uint16 tid = fsp->conn->cnum;
int fnum = fsp->fnum;
unsigned int i, j, dcount=0;
- struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
- struct process_id pid = procid_self();
+ int num_deleted_windows_locks = 0;
+ struct lock_struct *locks = br_lck->lock_data;
+ struct server_id pid = procid_self();
BOOL unlock_individually = False;
- if(lp_posix_locking(fsp->conn->cnum) && !lp_posix_cifsu_locktype()) {
+ if(lp_posix_locking(fsp->conn->params)) {
/* Check if there are any Windows locks associated with this dev/ino
pair that are not this fnum. If so we need to call unlock on each
if (unlock_individually) {
struct lock_struct *locks_copy;
+ unsigned int num_locks_copy;
/* Copy the current lock array. */
- locks_copy = TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
- if (!locks_copy) {
- DEBUG(0,("brl_close_fnum: talloc fail.\n"));
+ if (br_lck->num_locks) {
+ locks_copy = (struct lock_struct *)TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
+ if (!locks_copy) {
+ smb_panic("brl_close_fnum: talloc failed");
+ }
+ } else {
+ locks_copy = NULL;
}
- for (i=0; i < br_lck->num_locks; i++) {
+ num_locks_copy = br_lck->num_locks;
+
+ for (i=0; i < num_locks_copy; i++) {
struct lock_struct *lock = &locks_copy[i];
if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&
(lock->fnum == fnum)) {
- brl_unlock(br_lck,
+ brl_unlock(msg_ctx,
+ br_lck,
lock->context.smbpid,
pid,
lock->start,
/* We can bulk delete - any POSIX locks will be removed when the fd closes. */
- /* Zero any lock reference count on this dev/ino pair. */
- zero_windows_lock_ref_count(fsp);
-
/* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
for (i=0; i < br_lck->num_locks; i++) {
if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
del_this_lock = True;
+ num_deleted_windows_locks++;
} else if (lock->lock_flav == POSIX_LOCK) {
del_this_lock = True;
}
struct lock_struct *pend_lock = &locks[j];
/* Ignore our own or non-pending locks. */
- if (pend_lock->lock_type != PENDING_LOCK) {
+ if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
continue;
}
/* We could send specific lock info here... */
if (brl_pending_overlap(lock, pend_lock)) {
- become_root();
- message_send_pid(pend_lock->context.pid,
- MSG_SMB_UNLOCK,
- NULL, 0, True);
- unbecome_root();
+ messaging_send(msg_ctx, pend_lock->context.pid,
+ MSG_SMB_UNLOCK, &data_blob_null);
}
}
dcount++;
}
}
+
+ if(lp_posix_locking(fsp->conn->params) && num_deleted_windows_locks) {
+ /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
+ reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks);
+ }
}
/****************************************************************************
return True;
}
+struct brl_forall_cb {
+ void (*fn)(struct file_id id, struct server_id pid,
+ enum brl_type lock_type,
+ enum brl_flavour lock_flav,
+ br_off start, br_off size,
+ void *private_data);
+ void *private_data;
+};
+
/****************************************************************************
Traverse the whole database with this function, calling traverse_callback
on each lock.
****************************************************************************/
-static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
+static int traverse_fn(struct db_record *rec, void *state)
{
+ struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
struct lock_struct *locks;
- struct lock_key *key;
+ struct file_id *key;
unsigned int i;
unsigned int num_locks = 0;
unsigned int orig_num_locks = 0;
- BRLOCK_FN(traverse_callback) = (BRLOCK_FN_CAST())state;
-
/* In a traverse function we must make a copy of
dbuf before modifying it. */
- locks = (struct lock_struct *)memdup(dbuf.dptr, dbuf.dsize);
+ locks = (struct lock_struct *)memdup(rec->value.dptr,
+ rec->value.dsize);
if (!locks) {
return -1; /* Terminate traversal. */
}
- key = (struct lock_key *)kbuf.dptr;
- orig_num_locks = num_locks = dbuf.dsize/sizeof(*locks);
+ key = (struct file_id *)rec->key.dptr;
+ orig_num_locks = num_locks = rec->value.dsize/sizeof(*locks);
/* Ensure the lock db is clean of entries from invalid processes. */
}
if (orig_num_locks != num_locks) {
- dbuf.dptr = (char *)locks;
- dbuf.dsize = num_locks * sizeof(*locks);
-
- if (dbuf.dsize) {
- tdb_store(ttdb, kbuf, dbuf, TDB_REPLACE);
+ if (num_locks) {
+ TDB_DATA data;
+ data.dptr = (uint8_t *)locks;
+ data.dsize = num_locks*sizeof(struct lock_struct);
+ rec->store(rec, data, TDB_REPLACE);
} else {
- tdb_delete(ttdb, kbuf);
+ rec->delete_rec(rec);
}
}
for ( i=0; i<num_locks; i++) {
- traverse_callback(key->device,
- key->inode,
- locks[i].context.pid,
- locks[i].lock_type,
- locks[i].lock_flav,
- locks[i].start,
- locks[i].size);
+ cb->fn(*key,
+ locks[i].context.pid,
+ locks[i].lock_type,
+ locks[i].lock_flav,
+ locks[i].start,
+ locks[i].size,
+ cb->private_data);
}
SAFE_FREE(locks);
Call the specified function on each lock in the database.
********************************************************************/
-int brl_forall(BRLOCK_FN(fn))
+int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
+ enum brl_type lock_type,
+ enum brl_flavour lock_flav,
+ br_off start, br_off size,
+ void *private_data),
+ void *private_data)
{
- if (!tdb) {
+ struct brl_forall_cb cb;
+
+ if (!brlock_db) {
return 0;
}
- return tdb_traverse(tdb, traverse_fn, (void *)fn);
+ cb.fn = fn;
+ cb.private_data = private_data;
+ return brlock_db->traverse(brlock_db, traverse_fn, &cb);
}
/*******************************************************************
Unlock the record.
********************************************************************/
-static int byte_range_lock_destructor(void *p)
+static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
{
- struct byte_range_lock *br_lck =
- talloc_get_type_abort(p, struct byte_range_lock);
TDB_DATA key;
- key.dptr = (char *)&br_lck->key;
- key.dsize = sizeof(struct lock_key);
+ key.dptr = (uint8 *)&br_lck->key;
+ key.dsize = sizeof(struct file_id);
+
+ if (br_lck->read_only) {
+ SMB_ASSERT(!br_lck->modified);
+ }
if (!br_lck->modified) {
goto done;
if (br_lck->num_locks == 0) {
/* No locks - delete this entry. */
- if (tdb_delete(tdb, key) == -1) {
- smb_panic("Could not delete byte range lock entry\n");
+ NTSTATUS status = br_lck->record->delete_rec(br_lck->record);
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(0, ("delete_rec returned %s\n",
+ nt_errstr(status)));
+ smb_panic("Could not delete byte range lock entry");
}
} else {
TDB_DATA data;
- data.dptr = (char *)br_lck->lock_data;
+ NTSTATUS status;
+
+ data.dptr = (uint8 *)br_lck->lock_data;
data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
- if (tdb_store(tdb, key, data, TDB_REPLACE) == -1) {
- smb_panic("Could not store byte range mode entry\n");
+ status = br_lck->record->store(br_lck->record, data,
+ TDB_REPLACE);
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(0, ("store returned %s\n", nt_errstr(status)));
+ smb_panic("Could not store byte range mode entry");
}
}
done:
- tdb_chainunlock(tdb, key);
SAFE_FREE(br_lck->lock_data);
+ TALLOC_FREE(br_lck->record);
return 0;
}
TALLOC_FREE(brl) will release the lock in the destructor.
********************************************************************/
-struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
- files_struct *fsp)
+static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
+ files_struct *fsp, BOOL read_only)
{
- TDB_DATA key;
- TDB_DATA data;
+ TDB_DATA key, data;
struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
if (br_lck == NULL) {
br_lck->fsp = fsp;
br_lck->num_locks = 0;
br_lck->modified = False;
- memset(&br_lck->key, '\0', sizeof(struct lock_key));
- br_lck->key.device = fsp->dev;
- br_lck->key.inode = fsp->inode;
+ memset(&br_lck->key, '\0', sizeof(struct file_id));
+ br_lck->key = fsp->file_id;
- key.dptr = (char *)&br_lck->key;
- key.dsize = sizeof(struct lock_key);
+ key.dptr = (uint8 *)&br_lck->key;
+ key.dsize = sizeof(struct file_id);
- if (tdb_chainlock(tdb, key) != 0) {
- DEBUG(3, ("Could not lock byte range lock entry\n"));
- TALLOC_FREE(br_lck);
- return NULL;
+ if (!fsp->lockdb_clean) {
+ /* We must be read/write to clean
+ the dead entries. */
+ read_only = False;
+ }
+
+ if (read_only) {
+ if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) {
+ DEBUG(3, ("Could not fetch byte range lock record\n"));
+ TALLOC_FREE(br_lck);
+ return NULL;
+ }
+ br_lck->record = NULL;
+ }
+ else {
+ br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, key);
+
+ if (br_lck->record == NULL) {
+ DEBUG(3, ("Could not lock byte range lock entry\n"));
+ TALLOC_FREE(br_lck);
+ return NULL;
+ }
+
+ data = br_lck->record->value;
}
+ br_lck->read_only = read_only;
+
talloc_set_destructor(br_lck, byte_range_lock_destructor);
- data = tdb_fetch(tdb, key);
- br_lck->lock_data = (void *)data.dptr;
br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
+ br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct, br_lck->num_locks);
+ if ((br_lck->num_locks != 0) && (br_lck->lock_data == NULL)) {
+ DEBUG(0, ("malloc failed\n"));
+ TALLOC_FREE(br_lck);
+ return NULL;
+ }
+ memcpy(br_lck->lock_data, data.dptr, data.dsize);
+
if (!fsp->lockdb_clean) {
+ int orig_num_locks = br_lck->num_locks;
/* This is the first time we've accessed this. */
/* Go through and ensure all entries exist - remove any that don't. */
/* Makes the lockdb self cleaning at low cost. */
- struct lock_struct *locks =
- (struct lock_struct *)br_lck->lock_data;
-
- if (!validate_lock_entries(&br_lck->num_locks, &locks)) {
+ if (!validate_lock_entries(&br_lck->num_locks,
+ &br_lck->lock_data)) {
SAFE_FREE(br_lck->lock_data);
TALLOC_FREE(br_lck);
return NULL;
}
- /*
- * validate_lock_entries might have changed locks. We can't
- * use a direct pointer here because otherwise gcc warnes
- * about strict aliasing rules being violated.
- */
- br_lck->lock_data = locks;
+ /* Ensure invalid locks are cleaned up in the destructor. */
+ if (orig_num_locks != br_lck->num_locks) {
+ br_lck->modified = True;
+ }
/* Mark the lockdb as "clean" as seen from this open file. */
fsp->lockdb_clean = True;
if (DEBUGLEVEL >= 10) {
unsigned int i;
- struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
- DEBUG(10,("brl_get_locks: %u current locks on dev=%.0f, inode=%.0f\n",
+ struct lock_struct *locks = br_lck->lock_data;
+ DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
br_lck->num_locks,
- (double)fsp->dev, (double)fsp->inode ));
+ file_id_static_string(&fsp->file_id)));
for( i = 0; i < br_lck->num_locks; i++) {
print_lock_struct(i, &locks[i]);
}
}
return br_lck;
}
+
+struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
+ files_struct *fsp)
+{
+ return brl_get_locks_internal(mem_ctx, fsp, False);
+}
+
+struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx,
+ files_struct *fsp)
+{
+ return brl_get_locks_internal(mem_ctx, fsp, True);
+}
+
+struct brl_revalidate_state {
+ ssize_t array_size;
+ uint32 num_pids;
+ struct server_id *pids;
+};
+
+/*
+ * Collect PIDs of all processes with pending entries
+ */
+
+static void brl_revalidate_collect(struct file_id id, struct server_id pid,
+ enum brl_type lock_type,
+ enum brl_flavour lock_flav,
+ br_off start, br_off size,
+ void *private_data)
+{
+ struct brl_revalidate_state *state =
+ (struct brl_revalidate_state *)private_data;
+
+ if (!IS_PENDING_LOCK(lock_type)) {
+ return;
+ }
+
+ add_to_large_array(state, sizeof(pid), (void *)&pid,
+ &state->pids, &state->num_pids,
+ &state->array_size);
+}
+
+/*
+ * qsort callback to sort the processes
+ */
+
+static int compare_procids(const void *p1, const void *p2)
+{
+ const struct server_id *i1 = (struct server_id *)p1;
+ const struct server_id *i2 = (struct server_id *)p2;
+
+ if (i1->pid < i2->pid) return -1;
+ if (i2->pid > i2->pid) return 1;
+ return 0;
+}
+
+/*
+ * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
+ * locks so that they retry. Mainly used in the cluster code after a node has
+ * died.
+ *
+ * Done in two steps to avoid double-sends: First we collect all entries in an
+ * array, then qsort that array and only send to non-dupes.
+ */
+
+static void brl_revalidate(struct messaging_context *msg_ctx,
+ void *private_data,
+ uint32_t msg_type,
+ struct server_id server_id,
+ DATA_BLOB *data)
+{
+ struct brl_revalidate_state *state;
+ uint32 i;
+ struct server_id last_pid;
+
+ if (!(state = TALLOC_ZERO_P(NULL, struct brl_revalidate_state))) {
+ DEBUG(0, ("talloc failed\n"));
+ return;
+ }
+
+ brl_forall(brl_revalidate_collect, state);
+
+ if (state->array_size == -1) {
+ DEBUG(0, ("talloc failed\n"));
+ goto done;
+ }
+
+ if (state->num_pids == 0) {
+ goto done;
+ }
+
+ qsort(state->pids, state->num_pids, sizeof(state->pids[0]),
+ compare_procids);
+
+ ZERO_STRUCT(last_pid);
+
+ for (i=0; i<state->num_pids; i++) {
+ if (procid_equal(&last_pid, &state->pids[i])) {
+ /*
+ * We've seen that one already
+ */
+ continue;
+ }
+
+ messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
+ &data_blob_null);
+ last_pid = state->pids[i];
+ }
+
+ done:
+ TALLOC_FREE(state);
+ return;
+}
+
+void brl_register_msgs(struct messaging_context *msg_ctx)
+{
+ messaging_register(msg_ctx, NULL, MSG_SMB_BRL_VALIDATE,
+ brl_revalidate);
+}