X-Git-Url: http://git.samba.org/samba.git/?p=nivanova%2Fsamba-autobuild%2F.git;a=blobdiff_plain;f=source3%2Flocking%2Fbrlock.c;h=9a9fd157892f95716e05b30d6320acccebccc0aa;hp=e0cc4eec1e0cd42e1641dc82face5f6c4733e5f9;hb=d5e6a47f064a3923b1e257ab84fa7ccd7c4f89f4;hpb=b1ce226af8b61ad7e3c37860a59c6715012e738b diff --git a/source3/locking/brlock.c b/source3/locking/brlock.c index e0cc4eec1e0..9a9fd157892 100644 --- a/source3/locking/brlock.c +++ b/source3/locking/brlock.c @@ -5,20 +5,19 @@ Copyright (C) Andrew Tridgell 1992-2000 Copyright (C) Jeremy Allison 1992-2000 - + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or + the Free Software Foundation; either version 3 of the License, or (at your option) any later version. - + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with this program. If not, see . */ /* This module implements a tdb based byte range locking service, @@ -26,6 +25,12 @@ used. This allows us to provide the same semantics as NT */ #include "includes.h" +#include "system/filesys.h" +#include "locking/proto.h" +#include "smbd/globals.h" +#include "dbwrap.h" +#include "serverid.h" +#include "messages.h" #undef DBGC_CLASS #define DBGC_CLASS DBGC_LOCKING @@ -42,12 +47,12 @@ static struct db_context *brlock_db; static void print_lock_struct(unsigned int i, struct lock_struct *pls) { - DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %u, ", + DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ", i, - (unsigned int)pls->context.smbpid, + (unsigned long long)pls->context.smblctx, (unsigned int)pls->context.tid, - (unsigned int)procid_to_pid(&pls->context.pid) )); - + procid_str(talloc_tos(), &pls->context.pid) )); + DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n", (double)pls->start, (double)pls->size, @@ -60,11 +65,11 @@ static void print_lock_struct(unsigned int i, struct lock_struct *pls) See if two locking contexts are equal. ****************************************************************************/ -BOOL brl_same_context(const struct lock_context *ctx1, +bool brl_same_context(const struct lock_context *ctx1, const struct lock_context *ctx2) { return (procid_equal(&ctx1->pid, &ctx2->pid) && - (ctx1->smbpid == ctx2->smbpid) && + (ctx1->smblctx == ctx2->smblctx) && (ctx1->tid == ctx2->tid)); } @@ -72,9 +77,10 @@ BOOL brl_same_context(const struct lock_context *ctx1, See if lck1 and lck2 overlap. ****************************************************************************/ -static BOOL brl_overlap(const struct lock_struct *lck1, +static bool brl_overlap(const struct lock_struct *lck1, const struct lock_struct *lck2) { + /* XXX Remove for Win7 compatibility. */ /* this extra check is not redundent - it copes with locks that go beyond the end of 64 bit file space */ if (lck1->size != 0 && @@ -94,7 +100,7 @@ static BOOL brl_overlap(const struct lock_struct *lck1, See if lock2 can be added when lock1 is in place. ****************************************************************************/ -static BOOL brl_conflict(const struct lock_struct *lck1, +static bool brl_conflict(const struct lock_struct *lck1, const struct lock_struct *lck2) { /* Ignore PENDING locks. */ @@ -106,8 +112,11 @@ static BOOL brl_conflict(const struct lock_struct *lck1, return False; } - if (brl_same_context(&lck1->context, &lck2->context) && - lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) { + /* A READ lock can stack on top of a WRITE lock if they have the same + * context & fnum. */ + if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK && + brl_same_context(&lck1->context, &lck2->context) && + lck1->fnum == lck2->fnum) { return False; } @@ -120,7 +129,7 @@ static BOOL brl_conflict(const struct lock_struct *lck1, know already match. ****************************************************************************/ -static BOOL brl_conflict_posix(const struct lock_struct *lck1, +static bool brl_conflict_posix(const struct lock_struct *lck1, const struct lock_struct *lck2) { #if defined(DEVELOPER) @@ -148,7 +157,7 @@ static BOOL brl_conflict_posix(const struct lock_struct *lck1, } #if ZERO_ZERO -static BOOL brl_conflict1(const struct lock_struct *lck1, +static bool brl_conflict1(const struct lock_struct *lck1, const struct lock_struct *lck2) { if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type)) @@ -171,7 +180,7 @@ static BOOL brl_conflict1(const struct lock_struct *lck1, lck2->start >= (lck1->start + lck1->size)) { return False; } - + return True; } #endif @@ -182,7 +191,7 @@ static BOOL brl_conflict1(const struct lock_struct *lck1, This is never used in the POSIX lock case. ****************************************************************************/ -static BOOL brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2) +static bool brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2) { if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type)) return False; @@ -214,7 +223,7 @@ static BOOL brl_conflict_other(const struct lock_struct *lck1, const struct lock Check if an unlock overlaps a pending lock. ****************************************************************************/ -static BOOL brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock) +static bool brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock) { if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start)) return True; @@ -229,7 +238,7 @@ static BOOL brl_pending_overlap(const struct lock_struct *lock, const struct loc app depends on this ? ****************************************************************************/ -static NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, BOOL blocking_lock) +NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool blocking_lock) { if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) { /* amazing the little things you learn with a test @@ -259,16 +268,27 @@ static NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *loc Open up the brlock.tdb database. ****************************************************************************/ -void brl_init(int read_only) +void brl_init(bool read_only) { + int tdb_flags; + if (brlock_db) { return; } + + tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH; + + if (!lp_clustering()) { + /* + * We can't use the SEQNUM trick to cache brlock + * entries in the clustering case because ctdb seqnum + * propagation has a delay. + */ + tdb_flags |= TDB_SEQNUM; + } + brlock_db = db_open(NULL, lock_path("brlock.tdb"), - lp_open_files_db_hash_size(), - TDB_DEFAULT - |TDB_VOLATILE - |(read_only?0x0:TDB_CLEAR_IF_FIRST), + lp_open_files_db_hash_size(), tdb_flags, read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 ); if (!brlock_db) { DEBUG(0,("Failed to open byte range locking database %s\n", @@ -281,11 +301,8 @@ void brl_init(int read_only) Close down the brlock.tdb database. ****************************************************************************/ -void brl_shutdown(int read_only) +void brl_shutdown(void) { - if (!brlock_db) { - return; - } TALLOC_FREE(brlock_db); } @@ -311,18 +328,26 @@ static int lock_compare(const struct lock_struct *lck1, Lock a range of bytes - Windows lock semantics. ****************************************************************************/ -static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck, - struct lock_struct *plock, BOOL blocking_lock) +NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck, + struct lock_struct *plock, bool blocking_lock) { unsigned int i; files_struct *fsp = br_lck->fsp; struct lock_struct *locks = br_lck->lock_data; + NTSTATUS status; + + SMB_ASSERT(plock->lock_type != UNLOCK_LOCK); + + if ((plock->start + plock->size - 1 < plock->start) && + plock->size != 0) { + return NT_STATUS_INVALID_LOCK_RANGE; + } for (i=0; i < br_lck->num_locks; i++) { /* Do any Windows or POSIX locks conflict ? */ if (brl_conflict(&locks[i], plock)) { /* Remember who blocked us. */ - plock->context.smbpid = locks[i].context.smbpid; + plock->context.smblctx = locks[i].context.smblctx; return brl_lock_failed(fsp,plock,blocking_lock); } #if ZERO_ZERO @@ -333,6 +358,10 @@ static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck, #endif } + if (!IS_PENDING_LOCK(plock->lock_type)) { + contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL); + } + /* We can get the Windows lock, now see if it needs to be mapped into a lower level POSIX one, and if so can we get it ? */ @@ -349,12 +378,14 @@ static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck, &errno_ret)) { /* We don't know who blocked us. */ - plock->context.smbpid = 0xFFFFFFFF; + plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL; if (errno_ret == EACCES || errno_ret == EAGAIN) { - return NT_STATUS_FILE_LOCK_CONFLICT; + status = NT_STATUS_FILE_LOCK_CONFLICT; + goto fail; } else { - return map_nt_error_from_unix(errno); + status = map_nt_error_from_unix(errno); + goto fail; } } } @@ -362,7 +393,8 @@ static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck, /* no conflicts - add it to the list of locks */ locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks)); if (!locks) { - return NT_STATUS_NO_MEMORY; + status = NT_STATUS_NO_MEMORY; + goto fail; } memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct)); @@ -371,18 +403,22 @@ static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck, br_lck->modified = True; return NT_STATUS_OK; + fail: + if (!IS_PENDING_LOCK(plock->lock_type)) { + contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL); + } + return status; } /**************************************************************************** Cope with POSIX range splits and merges. ****************************************************************************/ -static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */ - const struct lock_struct *ex, /* existing lock. */ - const struct lock_struct *plock, /* proposed lock. */ - BOOL *lock_was_added) +static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */ + struct lock_struct *ex, /* existing lock. */ + struct lock_struct *plock) /* proposed lock. */ { - BOOL lock_types_differ = (ex->lock_type != plock->lock_type); + bool lock_types_differ = (ex->lock_type != plock->lock_type); /* We can't merge non-conflicting locks on different context - ignore fnum. */ @@ -397,21 +433,23 @@ static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Ou /* Did we overlap ? */ /********************************************* - +---------+ - | ex | - +---------+ - +-------+ - | plock | - +-------+ + +---------+ + | ex | + +---------+ + +-------+ + | plock | + +-------+ OR.... - +---------+ - | ex | - +---------+ + +---------+ + | ex | + +---------+ **********************************************/ if ( (ex->start > (plock->start + plock->size)) || - (plock->start > (ex->start + ex->size))) { + (plock->start > (ex->start + ex->size))) { + /* No overlap with this lock - copy existing. */ + memcpy(&lck_arr[0], ex, sizeof(struct lock_struct)); return 1; } @@ -423,26 +461,109 @@ OR.... +---------------------------+ | plock | -> replace with plock. +---------------------------+ +OR + +---------------+ + | ex | + +---------------+ + +---------------------------+ + | plock | -> replace with plock. + +---------------------------+ + **********************************************/ if ( (ex->start >= plock->start) && - (ex->start + ex->size <= plock->start + plock->size) ) { - memcpy(&lck_arr[0], plock, sizeof(struct lock_struct)); - *lock_was_added = True; - return 1; + (ex->start + ex->size <= plock->start + plock->size) ) { + + /* Replace - discard existing lock. */ + + return 0; } /********************************************* +Adjacent after. + +-------+ + | ex | + +-------+ + +---------------+ + | plock | + +---------------+ + +BECOMES.... + +---------------+-------+ + | plock | ex | - different lock types. + +---------------+-------+ +OR.... (merge) + +-----------------------+ + | plock | - same lock type. + +-----------------------+ +**********************************************/ + + if (plock->start + plock->size == ex->start) { + + /* If the lock types are the same, we merge, if different, we + add the remainder of the old lock. */ + + if (lock_types_differ) { + /* Add existing. */ + memcpy(&lck_arr[0], ex, sizeof(struct lock_struct)); + return 1; + } else { + /* Merge - adjust incoming lock as we may have more + * merging to come. */ + plock->size += ex->size; + return 0; + } + } + +/********************************************* +Adjacent before. + +-------+ + | ex | + +-------+ + +---------------+ + | plock | + +---------------+ +BECOMES.... + +-------+---------------+ + | ex | plock | - different lock types + +-------+---------------+ + +OR.... (merge) + +-----------------------+ + | plock | - same lock type. + +-----------------------+ + +**********************************************/ + + if (ex->start + ex->size == plock->start) { + + /* If the lock types are the same, we merge, if different, we + add the existing lock. */ + + if (lock_types_differ) { + memcpy(&lck_arr[0], ex, sizeof(struct lock_struct)); + return 1; + } else { + /* Merge - adjust incoming lock as we may have more + * merging to come. */ + plock->start = ex->start; + plock->size += ex->size; + return 0; + } + } + +/********************************************* +Overlap after. +-----------------------+ | ex | +-----------------------+ +---------------+ | plock | +---------------+ -OR.... - +-------+ - | ex | - +-------+ +OR + +----------------+ + | ex | + +----------------+ +---------------+ | plock | +---------------+ @@ -453,60 +574,57 @@ BECOMES.... +---------------+-------+ OR.... (merge) +-----------------------+ - | ex | - same lock type. + | plock | - same lock type. +-----------------------+ **********************************************/ if ( (ex->start >= plock->start) && - (ex->start <= plock->start + plock->size) && - (ex->start + ex->size > plock->start + plock->size) ) { - - *lock_was_added = True; + (ex->start <= plock->start + plock->size) && + (ex->start + ex->size > plock->start + plock->size) ) { /* If the lock types are the same, we merge, if different, we - add the new lock before the old. */ + add the remainder of the old lock. */ if (lock_types_differ) { - /* Add new. */ - memcpy(&lck_arr[0], plock, sizeof(struct lock_struct)); - memcpy(&lck_arr[1], ex, sizeof(struct lock_struct)); + /* Add remaining existing. */ + memcpy(&lck_arr[0], ex, sizeof(struct lock_struct)); /* Adjust existing start and size. */ - lck_arr[1].start = plock->start + plock->size; - lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size); - return 2; - } else { - /* Merge. */ - memcpy(&lck_arr[0], plock, sizeof(struct lock_struct)); - /* Set new start and size. */ - lck_arr[0].start = plock->start; - lck_arr[0].size = (ex->start + ex->size) - plock->start; + lck_arr[0].start = plock->start + plock->size; + lck_arr[0].size = (ex->start + ex->size) - (plock->start + plock->size); return 1; + } else { + /* Merge - adjust incoming lock as we may have more + * merging to come. */ + plock->size += (ex->start + ex->size) - (plock->start + plock->size); + return 0; } } /********************************************* - +-----------------------+ - | ex | - +-----------------------+ - +---------------+ - | plock | - +---------------+ -OR.... - +-------+ - | ex | - +-------+ - +---------------+ - | plock | - +---------------+ +Overlap before. + +-----------------------+ + | ex | + +-----------------------+ + +---------------+ + | plock | + +---------------+ +OR + +-------------+ + | ex | + +-------------+ + +---------------+ + | plock | + +---------------+ + BECOMES.... - +-------+---------------+ - | ex | plock | - different lock types - +-------+---------------+ + +-------+---------------+ + | ex | plock | - different lock types + +-------+---------------+ OR.... (merge) - +-----------------------+ - | ex | - same lock type. - +-----------------------+ + +-----------------------+ + | plock | - same lock type. + +-----------------------+ **********************************************/ @@ -514,27 +632,25 @@ OR.... (merge) (ex->start + ex->size >= plock->start) && (ex->start + ex->size <= plock->start + plock->size) ) { - *lock_was_added = True; - /* If the lock types are the same, we merge, if different, we - add the new lock after the old. */ + add the truncated old lock. */ if (lock_types_differ) { memcpy(&lck_arr[0], ex, sizeof(struct lock_struct)); - memcpy(&lck_arr[1], plock, sizeof(struct lock_struct)); /* Adjust existing size. */ lck_arr[0].size = plock->start - ex->start; - return 2; - } else { - /* Merge. */ - memcpy(&lck_arr[0], ex, sizeof(struct lock_struct)); - /* Adjust existing size. */ - lck_arr[0].size = (plock->start + plock->size) - ex->start; return 1; + } else { + /* Merge - adjust incoming lock as we may have more + * merging to come. MUST ADJUST plock SIZE FIRST ! */ + plock->size += (plock->start - ex->start); + plock->start = ex->start; + return 0; } } /********************************************* +Complete overlap. +---------------------------+ | ex | +---------------------------+ @@ -547,32 +663,31 @@ BECOMES..... +-------+---------+---------+ OR +---------------------------+ - | ex | - same lock type. + | plock | - same lock type. +---------------------------+ **********************************************/ if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) { - *lock_was_added = True; if (lock_types_differ) { /* We have to split ex into two locks here. */ memcpy(&lck_arr[0], ex, sizeof(struct lock_struct)); - memcpy(&lck_arr[1], plock, sizeof(struct lock_struct)); - memcpy(&lck_arr[2], ex, sizeof(struct lock_struct)); + memcpy(&lck_arr[1], ex, sizeof(struct lock_struct)); /* Adjust first existing size. */ lck_arr[0].size = plock->start - ex->start; /* Adjust second existing start and size. */ - lck_arr[2].start = plock->start + plock->size; - lck_arr[2].size = (ex->start + ex->size) - (plock->start + plock->size); - return 3; + lck_arr[1].start = plock->start + plock->size; + lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size); + return 2; } else { - /* Just eat plock. */ - memcpy(&lck_arr[0], ex, sizeof(struct lock_struct)); - return 1; + /* Just eat the existing locks, merge them into plock. */ + plock->start = ex->start; + plock->size = ex->size; + return 0; } } @@ -593,11 +708,12 @@ static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx, struct byte_range_lock *br_lck, struct lock_struct *plock) { - unsigned int i, count; + unsigned int i, count, posix_count; struct lock_struct *locks = br_lck->lock_data; struct lock_struct *tp; - BOOL lock_was_added = False; - BOOL signal_pending_read = False; + bool signal_pending_read = False; + bool break_oplocks = false; + NTSTATUS status; /* No zero-zero locks for POSIX. */ if (plock->start == 0 && plock->size == 0) { @@ -605,8 +721,7 @@ static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx, } /* Don't allow 64-bit lock wrap. */ - if (plock->start + plock->size < plock->start || - plock->start + plock->size < plock->size) { + if (plock->start + plock->size - 1 < plock->start) { return NT_STATUS_INVALID_PARAMETER; } @@ -618,8 +733,9 @@ static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx, if (!tp) { return NT_STATUS_NO_MEMORY; } - - count = 0; + + count = posix_count = 0; + for (i=0; i < br_lck->num_locks; i++) { struct lock_struct *curr_lock = &locks[i]; @@ -636,32 +752,62 @@ static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx, /* No games with error messages. */ SAFE_FREE(tp); /* Remember who blocked us. */ - plock->context.smbpid = curr_lock->context.smbpid; + plock->context.smblctx = curr_lock->context.smblctx; return NT_STATUS_FILE_LOCK_CONFLICT; } /* Just copy the Windows lock into the new array. */ memcpy(&tp[count], curr_lock, sizeof(struct lock_struct)); count++; } else { + unsigned int tmp_count = 0; + /* POSIX conflict semantics are different. */ if (brl_conflict_posix(curr_lock, plock)) { /* Can't block ourselves with POSIX locks. */ /* No games with error messages. */ SAFE_FREE(tp); /* Remember who blocked us. */ - plock->context.smbpid = curr_lock->context.smbpid; + plock->context.smblctx = curr_lock->context.smblctx; return NT_STATUS_FILE_LOCK_CONFLICT; } /* Work out overlaps. */ - count += brlock_posix_split_merge(&tp[count], curr_lock, plock, &lock_was_added); + tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock); + posix_count += tmp_count; + count += tmp_count; } } - if (!lock_was_added) { - memcpy(&tp[count], plock, sizeof(struct lock_struct)); - count++; + /* + * Break oplocks while we hold a brl. Since lock() and unlock() calls + * are not symetric with POSIX semantics, we cannot guarantee our + * contend_level2_oplocks_begin/end calls will be acquired and + * released one-for-one as with Windows semantics. Therefore we only + * call contend_level2_oplocks_begin if this is the first POSIX brl on + * the file. + */ + break_oplocks = (!IS_PENDING_LOCK(plock->lock_type) && + posix_count == 0); + if (break_oplocks) { + contend_level2_oplocks_begin(br_lck->fsp, + LEVEL2_CONTEND_POSIX_BRL); + } + + /* Try and add the lock in order, sorted by lock start. */ + for (i=0; i < count; i++) { + struct lock_struct *curr_lock = &tp[i]; + + if (curr_lock->start <= plock->start) { + continue; + } + } + + if (i < count) { + memmove(&tp[i+1], &tp[i], + (count - i)*sizeof(struct lock_struct)); } + memcpy(&tp[i], plock, sizeof(struct lock_struct)); + count++; /* We can get the POSIX lock, now see if it needs to be mapped into a lower level POSIX one, and if so can @@ -681,23 +827,30 @@ static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx, &errno_ret)) { /* We don't know who blocked us. */ - plock->context.smbpid = 0xFFFFFFFF; + plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL; if (errno_ret == EACCES || errno_ret == EAGAIN) { SAFE_FREE(tp); - return NT_STATUS_FILE_LOCK_CONFLICT; + status = NT_STATUS_FILE_LOCK_CONFLICT; + goto fail; } else { SAFE_FREE(tp); - return map_nt_error_from_unix(errno); + status = map_nt_error_from_unix(errno); + goto fail; } } } - /* Realloc so we don't leak entries per lock call. */ - tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks)); - if (!tp) { - return NT_STATUS_NO_MEMORY; + /* If we didn't use all the allocated size, + * Realloc so we don't leak entries per lock call. */ + if (count < br_lck->num_locks + 2) { + tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks)); + if (!tp) { + status = NT_STATUS_NO_MEMORY; + goto fail; + } } + br_lck->num_locks = count; SAFE_FREE(br_lck->lock_data); br_lck->lock_data = tp; @@ -729,6 +882,23 @@ static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx, } return NT_STATUS_OK; + fail: + if (break_oplocks) { + contend_level2_oplocks_end(br_lck->fsp, + LEVEL2_CONTEND_POSIX_BRL); + } + return status; +} + +NTSTATUS smb_vfs_call_brl_lock_windows(struct vfs_handle_struct *handle, + struct byte_range_lock *br_lck, + struct lock_struct *plock, + bool blocking_lock, + struct blocking_lock_record *blr) +{ + VFS_FIND(brl_lock_windows); + return handle->fns->brl_lock_windows(handle, br_lck, plock, + blocking_lock, blr); } /**************************************************************************** @@ -737,14 +907,15 @@ static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx, NTSTATUS brl_lock(struct messaging_context *msg_ctx, struct byte_range_lock *br_lck, - uint32 smbpid, + uint64_t smblctx, struct server_id pid, br_off start, br_off size, enum brl_type lock_type, enum brl_flavour lock_flav, - BOOL blocking_lock, - uint32 *psmbpid) + bool blocking_lock, + uint64_t *psmblctx, + struct blocking_lock_record *blr) { NTSTATUS ret; struct lock_struct lock; @@ -755,7 +926,12 @@ NTSTATUS brl_lock(struct messaging_context *msg_ctx, } #endif - lock.context.smbpid = smbpid; +#ifdef DEVELOPER + /* Quieten valgrind on test. */ + memset(&lock, '\0', sizeof(lock)); +#endif + + lock.context.smblctx = smblctx; lock.context.pid = pid; lock.context.tid = br_lck->fsp->conn->cnum; lock.start = start; @@ -765,19 +941,20 @@ NTSTATUS brl_lock(struct messaging_context *msg_ctx, lock.lock_flav = lock_flav; if (lock_flav == WINDOWS_LOCK) { - ret = brl_lock_windows(br_lck, &lock, blocking_lock); + ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck, + &lock, blocking_lock, blr); } else { ret = brl_lock_posix(msg_ctx, br_lck, &lock); } #if ZERO_ZERO /* sort the lock list */ - qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare); + TYPESAFE_QSORT(br_lck->lock_data, (size_t)br_lck->num_locks, lock_compare); #endif /* If we're returning an error, return who blocked us. */ - if (!NT_STATUS_IS_OK(ret) && psmbpid) { - *psmbpid = lock.context.smbpid; + if (!NT_STATUS_IS_OK(ret) && psmblctx) { + *psmblctx = lock.context.smblctx; } return ret; } @@ -786,7 +963,7 @@ NTSTATUS brl_lock(struct messaging_context *msg_ctx, Unlock a range of bytes - Windows semantics. ****************************************************************************/ -static BOOL brl_unlock_windows(struct messaging_context *msg_ctx, +bool brl_unlock_windows_default(struct messaging_context *msg_ctx, struct byte_range_lock *br_lck, const struct lock_struct *plock) { @@ -794,6 +971,8 @@ static BOOL brl_unlock_windows(struct messaging_context *msg_ctx, struct lock_struct *locks = br_lck->lock_data; enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */ + SMB_ASSERT(plock->lock_type == UNLOCK_LOCK); + #if ZERO_ZERO /* Delete write locks by preference... The lock list is sorted in the zero zero case. */ @@ -823,6 +1002,10 @@ static BOOL brl_unlock_windows(struct messaging_context *msg_ctx, for (i = 0; i < br_lck->num_locks; i++) { struct lock_struct *lock = &locks[i]; + if (IS_PENDING_LOCK(lock->lock_type)) { + continue; + } + /* Only remove our own locks that match in start, size, and flavour. */ if (brl_same_context(&lock->context, &plock->context) && lock->fnum == plock->fnum && @@ -882,6 +1065,7 @@ static BOOL brl_unlock_windows(struct messaging_context *msg_ctx, } } + contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL); return True; } @@ -889,14 +1073,14 @@ static BOOL brl_unlock_windows(struct messaging_context *msg_ctx, Unlock a range of bytes - POSIX semantics. ****************************************************************************/ -static BOOL brl_unlock_posix(struct messaging_context *msg_ctx, +static bool brl_unlock_posix(struct messaging_context *msg_ctx, struct byte_range_lock *br_lck, - const struct lock_struct *plock) + struct lock_struct *plock) { unsigned int i, j, count; struct lock_struct *tp; struct lock_struct *locks = br_lck->lock_data; - BOOL overlap_found = False; + bool overlap_found = False; /* No zero-zero locks for POSIX. */ if (plock->start == 0 && plock->size == 0) { @@ -923,8 +1107,6 @@ static BOOL brl_unlock_posix(struct messaging_context *msg_ctx, count = 0; for (i = 0; i < br_lck->num_locks; i++) { struct lock_struct *lock = &locks[i]; - struct lock_struct tmp_lock[3]; - BOOL lock_was_added = False; unsigned int tmp_count; /* Only remove our own locks - ignore fnum. */ @@ -935,64 +1117,50 @@ static BOOL brl_unlock_posix(struct messaging_context *msg_ctx, continue; } - /* Work out overlaps. */ - tmp_count = brlock_posix_split_merge(&tmp_lock[0], &locks[i], plock, &lock_was_added); - - if (tmp_count == 1) { - /* Ether the locks didn't overlap, or the unlock completely - overlapped this lock. If it didn't overlap, then there's - no change in the locks. */ - if (tmp_lock[0].lock_type != UNLOCK_LOCK) { - SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type); - /* No change in this lock. */ - memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct)); - count++; - } else { - SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK); - overlap_found = True; - } - continue; - } else if (tmp_count == 2) { - /* The unlock overlapped an existing lock. Copy the truncated - lock into the lock array. */ - if (tmp_lock[0].lock_type != UNLOCK_LOCK) { - SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type); - SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK); - memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct)); - if (tmp_lock[0].size != locks[i].size) { - overlap_found = True; - } - } else { - SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK); - SMB_ASSERT(tmp_lock[1].lock_type == locks[i].lock_type); - memcpy(&tp[count], &tmp_lock[1], sizeof(struct lock_struct)); - if (tmp_lock[1].start != locks[i].start) { - overlap_found = True; - } + if (lock->lock_flav == WINDOWS_LOCK) { + /* Do any Windows flavour locks conflict ? */ + if (brl_conflict(lock, plock)) { + SAFE_FREE(tp); + return false; } + /* Just copy the Windows lock into the new array. */ + memcpy(&tp[count], lock, sizeof(struct lock_struct)); count++; continue; - } else { - /* tmp_count == 3 - (we split a lock range in two). */ - SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type); - SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK); - SMB_ASSERT(tmp_lock[2].lock_type == locks[i].lock_type); + } + + /* Work out overlaps. */ + tmp_count = brlock_posix_split_merge(&tp[count], lock, plock); + + if (tmp_count == 0) { + /* plock overlapped the existing lock completely, + or replaced it. Don't copy the existing lock. */ + overlap_found = true; + } else if (tmp_count == 1) { + /* Either no overlap, (simple copy of existing lock) or + * an overlap of an existing lock. */ + /* If the lock changed size, we had an overlap. */ + if (tp[count].size != lock->size) { + overlap_found = true; + } + count += tmp_count; + } else if (tmp_count == 2) { + /* We split a lock range in two. */ + overlap_found = true; + count += tmp_count; - memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct)); - count++; - memcpy(&tp[count], &tmp_lock[2], sizeof(struct lock_struct)); - count++; - overlap_found = True; /* Optimisation... */ /* We know we're finished here as we can't overlap any more POSIX locks. Copy the rest of the lock array. */ + if (i < br_lck->num_locks - 1) { - memcpy(&tp[count], &locks[i+1], + memcpy(&tp[count], &locks[i+1], sizeof(*locks)*((br_lck->num_locks-1) - i)); count += ((br_lck->num_locks-1) - i); } break; } + } if (!overlap_found) { @@ -1025,6 +1193,9 @@ static BOOL brl_unlock_posix(struct messaging_context *msg_ctx, tp = NULL; } + contend_level2_oplocks_end(br_lck->fsp, + LEVEL2_CONTEND_POSIX_BRL); + br_lck->num_locks = count; SAFE_FREE(br_lck->lock_data); locks = tp; @@ -1054,13 +1225,22 @@ static BOOL brl_unlock_posix(struct messaging_context *msg_ctx, return True; } +bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct *handle, + struct messaging_context *msg_ctx, + struct byte_range_lock *br_lck, + const struct lock_struct *plock) +{ + VFS_FIND(brl_unlock_windows); + return handle->fns->brl_unlock_windows(handle, msg_ctx, br_lck, plock); +} + /**************************************************************************** Unlock a range of bytes. ****************************************************************************/ -BOOL brl_unlock(struct messaging_context *msg_ctx, +bool brl_unlock(struct messaging_context *msg_ctx, struct byte_range_lock *br_lck, - uint32 smbpid, + uint64_t smblctx, struct server_id pid, br_off start, br_off size, @@ -1068,7 +1248,7 @@ BOOL brl_unlock(struct messaging_context *msg_ctx, { struct lock_struct lock; - lock.context.smbpid = smbpid; + lock.context.smblctx = smblctx; lock.context.pid = pid; lock.context.tid = br_lck->fsp->conn->cnum; lock.start = start; @@ -1078,7 +1258,8 @@ BOOL brl_unlock(struct messaging_context *msg_ctx, lock.lock_flav = lock_flav; if (lock_flav == WINDOWS_LOCK) { - return brl_unlock_windows(msg_ctx, br_lck, &lock); + return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck->fsp->conn, msg_ctx, + br_lck, &lock); } else { return brl_unlock_posix(msg_ctx, br_lck, &lock); } @@ -1089,21 +1270,21 @@ BOOL brl_unlock(struct messaging_context *msg_ctx, Returns True if the region required is currently unlocked, False if locked. ****************************************************************************/ -BOOL brl_locktest(struct byte_range_lock *br_lck, - uint32 smbpid, +bool brl_locktest(struct byte_range_lock *br_lck, + uint64_t smblctx, struct server_id pid, br_off start, br_off size, enum brl_type lock_type, enum brl_flavour lock_flav) { - BOOL ret = True; + bool ret = True; unsigned int i; struct lock_struct lock; const struct lock_struct *locks = br_lck->lock_data; files_struct *fsp = br_lck->fsp; - lock.context.smbpid = smbpid; + lock.context.smblctx = smblctx; lock.context.pid = pid; lock.context.tid = br_lck->fsp->conn->cnum; lock.start = start; @@ -1133,7 +1314,7 @@ BOOL brl_locktest(struct byte_range_lock *br_lck, DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n", (double)start, (double)size, ret ? "locked" : "unlocked", - fsp->fnum, fsp->fsp_name )); + fsp->fnum, fsp_str_dbg(fsp))); /* We need to return the inverse of is_posix_locked. */ ret = !ret; @@ -1148,7 +1329,7 @@ BOOL brl_locktest(struct byte_range_lock *br_lck, ****************************************************************************/ NTSTATUS brl_lockquery(struct byte_range_lock *br_lck, - uint32 *psmbpid, + uint64_t *psmblctx, struct server_id pid, br_off *pstart, br_off *psize, @@ -1160,7 +1341,7 @@ NTSTATUS brl_lockquery(struct byte_range_lock *br_lck, const struct lock_struct *locks = br_lck->lock_data; files_struct *fsp = br_lck->fsp; - lock.context.smbpid = *psmbpid; + lock.context.smblctx = *psmblctx; lock.context.pid = pid; lock.context.tid = br_lck->fsp->conn->cnum; lock.start = *pstart; @@ -1172,7 +1353,7 @@ NTSTATUS brl_lockquery(struct byte_range_lock *br_lck, /* Make sure existing locks don't conflict */ for (i=0; i < br_lck->num_locks; i++) { const struct lock_struct *exlock = &locks[i]; - BOOL conflict = False; + bool conflict = False; if (exlock->lock_flav == WINDOWS_LOCK) { conflict = brl_conflict(exlock, &lock); @@ -1181,7 +1362,7 @@ NTSTATUS brl_lockquery(struct byte_range_lock *br_lck, } if (conflict) { - *psmbpid = exlock->context.smbpid; + *psmblctx = exlock->context.smblctx; *pstart = exlock->start; *psize = exlock->size; *plock_type = exlock->lock_type; @@ -1195,15 +1376,15 @@ NTSTATUS brl_lockquery(struct byte_range_lock *br_lck, */ if(lp_posix_locking(fsp->conn->params)) { - BOOL ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK); + bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK); DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n", (double)*pstart, (double)*psize, ret ? "locked" : "unlocked", - fsp->fnum, fsp->fsp_name )); + fsp->fnum, fsp_str_dbg(fsp))); if (ret) { - /* Hmmm. No clue what to set smbpid to - use -1. */ - *psmbpid = 0xFFFF; + /* Hmmm. No clue what to set smblctx to - use -1. */ + *psmblctx = 0xFFFFFFFFFFFFFFFFLL; return NT_STATUS_LOCK_NOT_GRANTED; } } @@ -1211,35 +1392,67 @@ NTSTATUS brl_lockquery(struct byte_range_lock *br_lck, return NT_STATUS_OK; } + +bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle, + struct byte_range_lock *br_lck, + struct lock_struct *plock, + struct blocking_lock_record *blr) +{ + VFS_FIND(brl_cancel_windows); + return handle->fns->brl_cancel_windows(handle, br_lck, plock, blr); +} + /**************************************************************************** Remove a particular pending lock. ****************************************************************************/ - -BOOL brl_lock_cancel(struct byte_range_lock *br_lck, - uint32 smbpid, +bool brl_lock_cancel(struct byte_range_lock *br_lck, + uint64_t smblctx, struct server_id pid, br_off start, br_off size, - enum brl_flavour lock_flav) + enum brl_flavour lock_flav, + struct blocking_lock_record *blr) +{ + bool ret; + struct lock_struct lock; + + lock.context.smblctx = smblctx; + lock.context.pid = pid; + lock.context.tid = br_lck->fsp->conn->cnum; + lock.start = start; + lock.size = size; + lock.fnum = br_lck->fsp->fnum; + lock.lock_flav = lock_flav; + /* lock.lock_type doesn't matter */ + + if (lock_flav == WINDOWS_LOCK) { + ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck, + &lock, blr); + } else { + ret = brl_lock_cancel_default(br_lck, &lock); + } + + return ret; +} + +bool brl_lock_cancel_default(struct byte_range_lock *br_lck, + struct lock_struct *plock) { unsigned int i; struct lock_struct *locks = br_lck->lock_data; - struct lock_context context; - context.smbpid = smbpid; - context.pid = pid; - context.tid = br_lck->fsp->conn->cnum; + SMB_ASSERT(plock); for (i = 0; i < br_lck->num_locks; i++) { struct lock_struct *lock = &locks[i]; /* For pending locks we *always* care about the fnum. */ - if (brl_same_context(&lock->context, &context) && - lock->fnum == br_lck->fsp->fnum && + if (brl_same_context(&lock->context, &plock->context) && + lock->fnum == plock->fnum && IS_PENDING_LOCK(lock->lock_type) && - lock->lock_flav == lock_flav && - lock->start == start && - lock->size == size) { + lock->lock_flav == plock->lock_flav && + lock->start == plock->start && + lock->size == plock->size) { break; } } @@ -1275,8 +1488,9 @@ void brl_close_fnum(struct messaging_context *msg_ctx, unsigned int i, j, dcount=0; int num_deleted_windows_locks = 0; struct lock_struct *locks = br_lck->lock_data; - struct server_id pid = procid_self(); - BOOL unlock_individually = False; + struct server_id pid = sconn_server_id(fsp->conn->sconn); + bool unlock_individually = False; + bool posix_level2_contention_ended = false; if(lp_posix_locking(fsp->conn->params)) { @@ -1324,7 +1538,7 @@ void brl_close_fnum(struct messaging_context *msg_ctx, (lock->fnum == fnum)) { brl_unlock(msg_ctx, br_lck, - lock->context.smbpid, + lock->context.smblctx, pid, lock->start, lock->size, @@ -1341,14 +1555,23 @@ void brl_close_fnum(struct messaging_context *msg_ctx, for (i=0; i < br_lck->num_locks; i++) { struct lock_struct *lock = &locks[i]; - BOOL del_this_lock = False; + bool del_this_lock = False; if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) { if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) { del_this_lock = True; num_deleted_windows_locks++; + contend_level2_oplocks_end(br_lck->fsp, + LEVEL2_CONTEND_WINDOWS_BRL); } else if (lock->lock_flav == POSIX_LOCK) { del_this_lock = True; + + /* Only end level2 contention once for posix */ + if (!posix_level2_contention_ended) { + posix_level2_contention_ended = true; + contend_level2_oplocks_end(br_lck->fsp, + LEVEL2_CONTEND_POSIX_BRL); + } } } @@ -1398,8 +1621,7 @@ void brl_close_fnum(struct messaging_context *msg_ctx, /**************************************************************************** Ensure this set of lock entries is valid. ****************************************************************************/ - -static BOOL validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks) +static bool validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks) { unsigned int i; unsigned int num_valid_entries = 0; @@ -1407,7 +1629,7 @@ static BOOL validate_lock_entries(unsigned int *pnum_entries, struct lock_struct for (i = 0; i < *pnum_entries; i++) { struct lock_struct *lock_data = &locks[i]; - if (!process_exists(lock_data->context.pid)) { + if (!serverid_exists(&lock_data->context.pid)) { /* This process no longer exists - mark this entry as invalid by zeroing it. */ ZERO_STRUCTP(lock_data); @@ -1429,7 +1651,7 @@ static BOOL validate_lock_entries(unsigned int *pnum_entries, struct lock_struct num_valid_entries = 0; for (i = 0; i < *pnum_entries; i++) { struct lock_struct *lock_data = &locks[i]; - if (lock_data->context.smbpid && + if (lock_data->context.smblctx && lock_data->context.tid) { /* Valid (nonzero) entry - copy it. */ memcpy(&new_lock_data[num_valid_entries], @@ -1500,14 +1722,16 @@ static int traverse_fn(struct db_record *rec, void *state) } } - for ( i=0; ifn(*key, - locks[i].context.pid, - locks[i].lock_type, - locks[i].lock_flav, - locks[i].start, - locks[i].size, - cb->private_data); + if (cb->fn) { + for ( i=0; ifn(*key, + locks[i].context.pid, + locks[i].lock_type, + locks[i].lock_flav, + locks[i].start, + locks[i].size, + cb->private_data); + } } SAFE_FREE(locks); @@ -1541,13 +1765,8 @@ int brl_forall(void (*fn)(struct file_id id, struct server_id pid, Unlock the record. ********************************************************************/ -static int byte_range_lock_destructor(struct byte_range_lock *br_lck) +static void byte_range_lock_flush(struct byte_range_lock *br_lck) { - TDB_DATA key; - - key.dptr = (uint8 *)&br_lck->key; - key.dsize = sizeof(struct file_id); - if (br_lck->read_only) { SMB_ASSERT(!br_lck->modified); } @@ -1581,8 +1800,16 @@ static int byte_range_lock_destructor(struct byte_range_lock *br_lck) done: - SAFE_FREE(br_lck->lock_data); + br_lck->read_only = true; + br_lck->modified = false; + TALLOC_FREE(br_lck->record); +} + +static int byte_range_lock_destructor(struct byte_range_lock *br_lck) +{ + byte_range_lock_flush(br_lck); + SAFE_FREE(br_lck->lock_data); return 0; } @@ -1593,10 +1820,11 @@ static int byte_range_lock_destructor(struct byte_range_lock *br_lck) ********************************************************************/ static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx, - files_struct *fsp, BOOL read_only) + files_struct *fsp, bool read_only) { TDB_DATA key, data; - struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock); + struct byte_range_lock *br_lck = talloc(mem_ctx, struct byte_range_lock); + bool do_read_only = read_only; if (br_lck == NULL) { return NULL; @@ -1605,7 +1833,6 @@ static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx, br_lck->fsp = fsp; br_lck->num_locks = 0; br_lck->modified = False; - memset(&br_lck->key, '\0', sizeof(struct file_id)); br_lck->key = fsp->file_id; key.dptr = (uint8 *)&br_lck->key; @@ -1614,18 +1841,17 @@ static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx, if (!fsp->lockdb_clean) { /* We must be read/write to clean the dead entries. */ - read_only = False; + do_read_only = false; } - if (read_only) { + if (do_read_only) { if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) { DEBUG(3, ("Could not fetch byte range lock record\n")); TALLOC_FREE(br_lck); return NULL; } br_lck->record = NULL; - } - else { + } else { br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, key); if (br_lck->record == NULL) { @@ -1637,15 +1863,27 @@ static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx, data = br_lck->record->value; } - br_lck->read_only = read_only; + br_lck->read_only = do_read_only; + br_lck->lock_data = NULL; talloc_set_destructor(br_lck, byte_range_lock_destructor); br_lck->num_locks = data.dsize / sizeof(struct lock_struct); - br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct, br_lck->num_locks); - memcpy(br_lck->lock_data, data.dptr, data.dsize); - + + if (br_lck->num_locks != 0) { + br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct, + br_lck->num_locks); + if (br_lck->lock_data == NULL) { + DEBUG(0, ("malloc failed\n")); + TALLOC_FREE(br_lck); + return NULL; + } + + memcpy(br_lck->lock_data, data.dptr, data.dsize); + } + if (!fsp->lockdb_clean) { + int orig_num_locks = br_lck->num_locks; /* This is the first time we've accessed this. */ /* Go through and ensure all entries exist - remove any that don't. */ @@ -1658,6 +1896,11 @@ static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx, return NULL; } + /* Ensure invalid locks are cleaned up in the destructor. */ + if (orig_num_locks != br_lck->num_locks) { + br_lck->modified = True; + } + /* Mark the lockdb as "clean" as seen from this open file. */ fsp->lockdb_clean = True; } @@ -1667,11 +1910,20 @@ static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx, struct lock_struct *locks = br_lck->lock_data; DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n", br_lck->num_locks, - file_id_static_string(&fsp->file_id))); + file_id_string_tos(&fsp->file_id))); for( i = 0; i < br_lck->num_locks; i++) { print_lock_struct(i, &locks[i]); } } + + if (do_read_only != read_only) { + /* + * this stores the record and gets rid of + * the write lock that is needed for a cleanup + */ + byte_range_lock_flush(br_lck); + } + return br_lck; } @@ -1681,10 +1933,30 @@ struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx, return brl_get_locks_internal(mem_ctx, fsp, False); } -struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx, - files_struct *fsp) +struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp) { - return brl_get_locks_internal(mem_ctx, fsp, True); + struct byte_range_lock *br_lock; + + if (lp_clustering()) { + return brl_get_locks_internal(talloc_tos(), fsp, true); + } + + if ((fsp->brlock_rec != NULL) + && (brlock_db->get_seqnum(brlock_db) == fsp->brlock_seqnum)) { + return fsp->brlock_rec; + } + + TALLOC_FREE(fsp->brlock_rec); + + br_lock = brl_get_locks_internal(talloc_tos(), fsp, true); + if (br_lock == NULL) { + return NULL; + } + fsp->brlock_seqnum = brlock_db->get_seqnum(brlock_db); + + fsp->brlock_rec = talloc_move(fsp, &br_lock); + + return fsp->brlock_rec; } struct brl_revalidate_state { @@ -1721,8 +1993,8 @@ static void brl_revalidate_collect(struct file_id id, struct server_id pid, static int compare_procids(const void *p1, const void *p2) { - const struct server_id *i1 = (struct server_id *)p1; - const struct server_id *i2 = (struct server_id *)p2; + const struct server_id *i1 = (const struct server_id *)p1; + const struct server_id *i2 = (const struct server_id *)p2; if (i1->pid < i2->pid) return -1; if (i2->pid > i2->pid) return 1; @@ -1764,8 +2036,7 @@ static void brl_revalidate(struct messaging_context *msg_ctx, goto done; } - qsort(state->pids, state->num_pids, sizeof(state->pids[0]), - compare_procids); + TYPESAFE_QSORT(state->pids, state->num_pids, compare_procids); ZERO_STRUCT(last_pid);