/*
- Unix SMB/Netbios implementation.
- Version 3.0
+ Unix SMB/CIFS implementation.
Locking functions
- Copyright (C) Jeremy Allison 1992-2000
+ Copyright (C) Jeremy Allison 1992-2006
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
+ the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
Revision History:
*/
#include "includes.h"
-extern int DEBUGLEVEL;
-extern int global_smbpid;
-/*
- * The POSIX locking database handle.
- */
-
-static TDB_CONTEXT *posix_lock_tdb;
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_LOCKING
/*
* The pending close database handle.
*/
-static TDB_CONTEXT *posix_pending_close_tdb;
-
-/*
- * The data in POSIX lock records is an unsorted linear array of these
- * records. It is unnecessary to store the count as tdb provides the
- * size of the record.
- */
-
-struct posix_lock {
- int fd;
- SMB_OFF_T start;
- SMB_OFF_T size;
- int lock_type;
-};
+static struct db_context *posix_pending_close_db;
-/*
- * The data in POSIX pending close records is an unsorted linear array of ints
- * records. It is unnecessary to store the count as tdb provides the
- * size of the record.
- */
+/****************************************************************************
+ First - the functions that deal with the underlying system locks - these
+ functions are used no matter if we're mapping CIFS Windows locks or CIFS
+ POSIX locks onto POSIX.
+****************************************************************************/
-/* The key used in both the POSIX databases. */
+/****************************************************************************
+ Utility function to map a lock type correctly depending on the open
+ mode of a file.
+****************************************************************************/
-struct posix_lock_key {
- SMB_DEV_T device;
- SMB_INO_T inode;
-};
+static int map_posix_lock_type( files_struct *fsp, enum brl_type lock_type)
+{
+ if((lock_type == WRITE_LOCK) && !fsp->can_write) {
+ /*
+ * Many UNIX's cannot get a write lock on a file opened read-only.
+ * Win32 locking semantics allow this.
+ * Do the best we can and attempt a read-only lock.
+ */
+ DEBUG(10,("map_posix_lock_type: Downgrading write lock to read due to read-only file.\n"));
+ return F_RDLCK;
+ }
-/*******************************************************************
- Form a static locking key for a dev/inode pair.
-******************************************************************/
+ /*
+ * This return should be the most normal, as we attempt
+ * to always open files read/write.
+ */
-static TDB_DATA locking_key(SMB_DEV_T dev, SMB_INO_T inode)
-{
- static struct posix_lock_key key;
- TDB_DATA kbuf;
- key.dev = dev;
- key.inode = inode;
- kbuf.dptr = (char *)&key;
- kbuf.dsize = sizeof(key);
- return kbuf;
+ return (lock_type == READ_LOCK) ? F_RDLCK : F_WRLCK;
}
-/*******************************************************************
- Convenience function to get a key from an fsp.
-******************************************************************/
+/****************************************************************************
+ Debugging aid :-).
+****************************************************************************/
-static TDB_DATA locking_key_fsp(files_struct *fsp)
+static const char *posix_lock_type_name(int lock_type)
{
- return locking_key(fsp->dev, fsp->inode);
+ return (lock_type == F_RDLCK) ? "READ" : "WRITE";
}
/****************************************************************************
- Add an fd to the pending close tdb.
+ Check to see if the given unsigned lock range is within the possible POSIX
+ range. Modifies the given args to be in range if possible, just returns
+ False if not.
****************************************************************************/
-static BOOL add_fd_to_close_entry(files_struct *fsp)
+static bool posix_lock_in_range(SMB_OFF_T *offset_out, SMB_OFF_T *count_out,
+ uint64_t u_offset, uint64_t u_count)
{
- struct posix_lock_key = locking_key_fsp(fsp);
- TDB_DATA kbuf, dbuf;
- size_t count = 0;
- int *fd_array = NULL;
-
- dbuf.dptr = NULL;
+ SMB_OFF_T offset = (SMB_OFF_T)u_offset;
+ SMB_OFF_T count = (SMB_OFF_T)u_count;
- tdb_lockchain(posix_pending_close_tdb, kbuf);
- dbuf = tdb_fetch(posix_pending_close_tdb, kbuf);
+ /*
+ * For the type of system we are, attempt to
+ * find the maximum positive lock offset as an SMB_OFF_T.
+ */
- dbuf.dptr = Realloc(dbuf.dptr, dbuf.dsize + sizeof(int));
- if (!dbuf.dptr) {
- DEBUG(0,("add_fd_to_close_entry: Realloc fail !\n"));
- tdb_unlockchain(posix_pending_close_tdb, kbuf);
- return False;
- }
- memcpy(dbuf.dptr + dbuf.dsize, &fsp->fd, sizeof(int));
- dbuf.dsize += sizeof(int);
+#if defined(MAX_POSITIVE_LOCK_OFFSET) /* Some systems have arbitrary limits. */
- if (tdb_store(posix_pending_close_tdb, kbuf, dbuf, TDB_REPLACE) == -1) {
- DEBUG(0,("add_fd_to_close_entry: tdb_store fail !\n"));
- }
+ SMB_OFF_T max_positive_lock_offset = (MAX_POSITIVE_LOCK_OFFSET);
- free(dbuf.dptr);
- tdb_unlockchain(posix_pending_close_tdb, kbuf);
- return True;
-}
+#elif defined(LARGE_SMB_OFF_T) && !defined(HAVE_BROKEN_FCNTL64_LOCKS)
-/****************************************************************************
- Remove all fd entries for a specific dev/inode pair from the tdb.
-****************************************************************************/
+ /*
+ * In this case SMB_OFF_T is 64 bits,
+ * and the underlying system can handle 64 bit signed locks.
+ */
-static void delete_close_entries(files_struct *fsp)
-{
- struct posix_lock_key = locking_key_fsp(fsp);
- TDB_DATA kbuf, dbuf;
+ SMB_OFF_T mask2 = ((SMB_OFF_T)0x4) << (SMB_OFF_T_BITS-4);
+ SMB_OFF_T mask = (mask2<<1);
+ SMB_OFF_T max_positive_lock_offset = ~mask;
- tdb_lockchain(posix_pending_close_tdb, kbuf);
- if (tdb_delete(posix_pending_close_tdb, kbuf) == -1)
- DEBUG(0,("delete_close_entries: tdb_delete fail !\n"));
- tdb_unlockchain(posix_pending_close_tdb, kbuf);
-}
+#else /* !LARGE_SMB_OFF_T || HAVE_BROKEN_FCNTL64_LOCKS */
-/****************************************************************************
- Get the array of POSIX pending close records for an open fsp. Caller must
- free. Returns number of entries.
-****************************************************************************/
+ /*
+ * In this case either SMB_OFF_T is 32 bits,
+ * or the underlying system cannot handle 64 bit signed locks.
+ * All offsets & counts must be 2^31 or less.
+ */
-static size_t get_posix_pending_close_entries(files_struct *fsp, int **entries)
-{
- struct posix_lock_key = locking_key_fsp(fsp);
- TDB_DATA kbuf, dbuf;
- size_t count = 0;
+ SMB_OFF_T max_positive_lock_offset = 0x7FFFFFFF;
- *entries = NULL;
- dbuf.dptr = NULL;
+#endif /* !LARGE_SMB_OFF_T || HAVE_BROKEN_FCNTL64_LOCKS */
- tdb_lockchain(posix_pending_close_tdb, kbuf);
- dbuf = tdb_fetch(posix_pending_close_tdb, kbuf);
+ /*
+ * POSIX locks of length zero mean lock to end-of-file.
+ * Win32 locks of length zero are point probes. Ignore
+ * any Win32 locks of length zero. JRA.
+ */
- if (!dbuf.dptr) {
- tdb_unlockchain(posix_pending_close_tdb, kbuf);
- return 0;
+ if (count == (SMB_OFF_T)0) {
+ DEBUG(10,("posix_lock_in_range: count = 0, ignoring.\n"));
+ return False;
}
- *entries = (int *)dbuf.dptr;
- count = (size_t)(dbuf.dsize / sizeof(int));
-
- tdb_unlockchain(posix_pending_close_tdb, kbuf);
+ /*
+ * If the given offset was > max_positive_lock_offset then we cannot map this at all
+ * ignore this lock.
+ */
- return count;
-}
+ if (u_offset & ~((uint64_t)max_positive_lock_offset)) {
+ DEBUG(10,("posix_lock_in_range: (offset = %.0f) offset > %.0f and we cannot handle this. Ignoring lock.\n",
+ (double)u_offset, (double)((uint64_t)max_positive_lock_offset) ));
+ return False;
+ }
-/****************************************************************************
- Get the array of POSIX locks for an fsp. Caller must free. Returns
- number of entries.
-****************************************************************************/
+ /*
+ * We must truncate the count to less than max_positive_lock_offset.
+ */
-static size_t get_posix_lock_entries(files_struct *fsp, struct posix_lock **entries)
-{
- struct posix_lock_key = locking_key_fsp(fsp);
- TDB_DATA kbuf, dbuf;
- size_t count = 0;
+ if (u_count & ~((uint64_t)max_positive_lock_offset)) {
+ count = max_positive_lock_offset;
+ }
- *entries = NULL;
+ /*
+ * Truncate count to end at max lock offset.
+ */
- dbuf.dptr = NULL;
+ if (offset + count < 0 || offset + count > max_positive_lock_offset) {
+ count = max_positive_lock_offset - offset;
+ }
- tdb_lockchain(posix_lock_tdb, kbuf);
- dbuf = tdb_fetch(posix_lock_tdb, kbuf);
+ /*
+ * If we ate all the count, ignore this lock.
+ */
- if (!dbuf.dptr) {
- tdb_unlockchain(posix_lock_tdb, kbuf);
- return 0;
+ if (count == 0) {
+ DEBUG(10,("posix_lock_in_range: Count = 0. Ignoring lock u_offset = %.0f, u_count = %.0f\n",
+ (double)u_offset, (double)u_count ));
+ return False;
}
- *entries = (struct posix_lock_struct *)dbuf.dptr;
- count = (size_t)(dbuf.dsize / sizeof(struct posix_lock_struct));
+ /*
+ * The mapping was successful.
+ */
+
+ DEBUG(10,("posix_lock_in_range: offset_out = %.0f, count_out = %.0f\n",
+ (double)offset, (double)count ));
- tdb_unlockchain(posix_lock_tdb, kbuf);
+ *offset_out = offset;
+ *count_out = count;
+
+ return True;
+}
- return count;
+bool smb_vfs_call_lock(struct vfs_handle_struct *handle,
+ struct files_struct *fsp, int op, SMB_OFF_T offset,
+ SMB_OFF_T count, int type)
+{
+ VFS_FIND(lock);
+ return handle->fns->lock(handle, fsp, op, offset, count, type);
}
/****************************************************************************
- Deal with pending closes needed by POSIX locking support.
+ Actual function that does POSIX locks. Copes with 64 -> 32 bit cruft and
+ broken NFS implementations.
****************************************************************************/
-int fd_close_posix(struct connection_struct *conn, files_struct *fsp)
+static bool posix_fcntl_lock(files_struct *fsp, int op, SMB_OFF_T offset, SMB_OFF_T count, int type)
{
- int saved_errno = 0;
- int ret;
- size_t count, i;
- struct posix_lock *entries = NULL;
- int *fd_array = NULL;
+ bool ret;
- if (!lp_posix_locking(SNUM(conn))) {
- /*
- * No POSIX to worry about, just close.
- */
- ret = conn->vfs_ops.close(fsp->fd);
- fsp->fd = -1;
- return ret;
- }
+ DEBUG(8,("posix_fcntl_lock %d %d %.0f %.0f %d\n",fsp->fh->fd,op,(double)offset,(double)count,type));
- /*
- * Get the number of outstanding POSIX locks on this dev/inode pair.
- */
+ ret = SMB_VFS_LOCK(fsp, op, offset, count, type);
- count = get_posix_lock_entries(fsp, &entries);
-
- if (count) {
+ if (!ret && ((errno == EFBIG) || (errno == ENOLCK) || (errno == EINVAL))) {
+
+ DEBUG(0,("posix_fcntl_lock: WARNING: lock request at offset %.0f, length %.0f returned\n",
+ (double)offset,(double)count));
+ DEBUGADD(0,("an %s error. This can happen when using 64 bit lock offsets\n", strerror(errno)));
+ DEBUGADD(0,("on 32 bit NFS mounted file systems.\n"));
/*
- * There are outstanding locks on this dev/inode pair on other fds.
- * Add our fd to the pending close tdb and set fsp->fd to -1.
+ * If the offset is > 0x7FFFFFFF then this will cause problems on
+ * 32 bit NFS mounted filesystems. Just ignore it.
*/
- if (!add_fd_to_close_entry(fsp)) {
- free((char *)entries);
- return False;
+ if (offset & ~((SMB_OFF_T)0x7fffffff)) {
+ DEBUG(0,("Offset greater than 31 bits. Returning success.\n"));
+ return True;
}
- free((char *)entries);
- fsp->fd = -1;
- return 0;
+ if (count & ~((SMB_OFF_T)0x7fffffff)) {
+ /* 32 bit NFS file system, retry with smaller offset */
+ DEBUG(0,("Count greater than 31 bits - retrying with 31 bit truncated length.\n"));
+ errno = 0;
+ count &= 0x7fffffff;
+ ret = SMB_VFS_LOCK(fsp, op, offset, count, type);
+ }
}
- if(entries)
- free((char *)entries);
-
- /*
- * No outstanding POSIX locks. Get the pending close fd's
- * from the tdb and close them all.
- */
-
- count = get_posix_pending_close_entries(fsp, &fd_array)
+ DEBUG(8,("posix_fcntl_lock: Lock call %s\n", ret ? "successful" : "failed"));
+ return ret;
+}
- if (count) {
- DEBUG(10,("fd_close_posix: doing close on %u fd's.\n", (unsigned int)count ));
+bool smb_vfs_call_getlock(struct vfs_handle_struct *handle,
+ struct files_struct *fsp, SMB_OFF_T *poffset,
+ SMB_OFF_T *pcount, int *ptype, pid_t *ppid)
+{
+ VFS_FIND(getlock);
+ return handle->fns->getlock(handle, fsp, poffset, pcount, ptype, ppid);
+}
- for(i = 0; i < count; i++) {
- if (conn->vfs_ops.close(fd_array[i]) == -1) {
- saved_errno = errno;
- }
- }
+/****************************************************************************
+ Actual function that gets POSIX locks. Copes with 64 -> 32 bit cruft and
+ broken NFS implementations.
+****************************************************************************/
- if (fd_array)
- free((char *)fd_array);
+static bool posix_fcntl_getlock(files_struct *fsp, SMB_OFF_T *poffset, SMB_OFF_T *pcount, int *ptype)
+{
+ pid_t pid;
+ bool ret;
- /*
- * Delete all fd's stored in the tdb
- * for this dev/inode pair.
- */
+ DEBUG(8,("posix_fcntl_getlock %d %.0f %.0f %d\n",
+ fsp->fh->fd,(double)*poffset,(double)*pcount,*ptype));
- delete_close_entries(fsp);
- }
+ ret = SMB_VFS_GETLOCK(fsp, poffset, pcount, ptype, &pid);
- if (fd_array)
- free((char *)fd_array);
+ if (!ret && ((errno == EFBIG) || (errno == ENOLCK) || (errno == EINVAL))) {
- /*
- * Finally close the fd associated with this fsp.
- */
+ DEBUG(0,("posix_fcntl_getlock: WARNING: lock request at offset %.0f, length %.0f returned\n",
+ (double)*poffset,(double)*pcount));
+ DEBUGADD(0,("an %s error. This can happen when using 64 bit lock offsets\n", strerror(errno)));
+ DEBUGADD(0,("on 32 bit NFS mounted file systems.\n"));
- ret = conn->vfs_ops.close(fsp->fd);
+ /*
+ * If the offset is > 0x7FFFFFFF then this will cause problems on
+ * 32 bit NFS mounted filesystems. Just ignore it.
+ */
- if (saved_errno != 0) {
- errno = saved_errno;
- ret = -1;
- }
+ if (*poffset & ~((SMB_OFF_T)0x7fffffff)) {
+ DEBUG(0,("Offset greater than 31 bits. Returning success.\n"));
+ return True;
+ }
- fsp->fd = -1;
+ if (*pcount & ~((SMB_OFF_T)0x7fffffff)) {
+ /* 32 bit NFS file system, retry with smaller offset */
+ DEBUG(0,("Count greater than 31 bits - retrying with 31 bit truncated length.\n"));
+ errno = 0;
+ *pcount &= 0x7fffffff;
+ ret = SMB_VFS_GETLOCK(fsp,poffset,pcount,ptype,&pid);
+ }
+ }
+ DEBUG(8,("posix_fcntl_getlock: Lock query call %s\n", ret ? "successful" : "failed"));
return ret;
}
/****************************************************************************
- Debugging aid :-).
+ POSIX function to see if a file region is locked. Returns True if the
+ region is locked, False otherwise.
****************************************************************************/
-static const char *posix_lock_type_name(int lock_type)
+bool is_posix_locked(files_struct *fsp,
+ uint64_t *pu_offset,
+ uint64_t *pu_count,
+ enum brl_type *plock_type,
+ enum brl_flavour lock_flav)
{
- return (lock_type == F_RDLCK) ? "READ" : "WRITE";
-}
-
-/****************************************************************************
- Add an entry into the POSIX locking tdb.
-****************************************************************************/
+ SMB_OFF_T offset;
+ SMB_OFF_T count;
+ int posix_lock_type = map_posix_lock_type(fsp,*plock_type);
-static BOOL add_posix_lock_entry(files_struct *fsp, SMB_OFF_T start, SMB_OFF_T size, int lock_type)
-{
- struct posix_lock_key = locking_key_fsp(fsp);
- TDB_DATA kbuf, dbuf;
- struct posix_lock pl;
+ DEBUG(10,("is_posix_locked: File %s, offset = %.0f, count = %.0f, "
+ "type = %s\n", fsp_str_dbg(fsp), (double)*pu_offset,
+ (double)*pu_count, posix_lock_type_name(*plock_type)));
/*
- * Now setup the new record.
+ * If the requested lock won't fit in the POSIX range, we will
+ * never set it, so presume it is not locked.
*/
- pl.fd = fsp->fd;
- pl.start = start;
- pl.size = size;
- pl.lock_type = lock_type;
-
- dbuf.dptr = NULL;
-
- tdb_lockchain(posix_lock_tdb, kbuf);
- dbuf = tdb_fetch(posix_lock_tdb, kbuf);
+ if(!posix_lock_in_range(&offset, &count, *pu_offset, *pu_count)) {
+ return False;
+ }
- dbuf.dptr = Realloc(dbuf.dptr, dbuf.dsize + sizeof(*pl));
- if (!dbuf.dptr) {
- DEBUG(0,("add_posix_lock_entry: Realloc fail !\n"));
- goto fail;
+ if (!posix_fcntl_getlock(fsp,&offset,&count,&posix_lock_type)) {
+ return False;
}
- memcpy(dbuf.dptr + dbuf.dsize, rec, sizeof(*pl));
- dbuf.dsize += sizeof(*pl);
+ if (posix_lock_type == F_UNLCK) {
+ return False;
+ }
- if (tdb_store(posix_lock_tdb, kbuf, dbuf, TDB_REPLACE) == -1) {
- DEBUG(0,("add_posix_lock: Failed to add lock entry on file %s\n", fsp->fsp_name));
- goto fail;
+ if (lock_flav == POSIX_LOCK) {
+ /* Only POSIX lock queries need to know the details. */
+ *pu_offset = (uint64_t)offset;
+ *pu_count = (uint64_t)count;
+ *plock_type = (posix_lock_type == F_RDLCK) ? READ_LOCK : WRITE_LOCK;
}
+ return True;
+}
+
+/****************************************************************************
+ Next - the functions that deal with in memory database storing representations
+ of either Windows CIFS locks or POSIX CIFS locks.
+****************************************************************************/
- free(dbuf.dptr);
- tdb_unlockchain(posix_lock_tdb, kbuf);
+/* The key used in the in-memory POSIX databases. */
- DEBUG(10,("add_posix_lock: File %s: type = %s: start=%.0f size=%.0f:dev=%.0f inode=%.0f\n",
- fsp->fsp_name, posix_lock_type_name(lock_type), (double)start, (double)size,
- (double)fsp->dev, (double)fsp->inode ));
+struct lock_ref_count_key {
+ struct file_id id;
+ char r;
+};
- return True;
+/*******************************************************************
+ Form a static locking key for a dev/inode pair for the lock ref count
+******************************************************************/
- fail:
- if (dbuf.dptr)
- free(dbuf.dptr);
- tdb_unlockchain(tdb, kbuf);
- return False;
+static TDB_DATA locking_ref_count_key_fsp(files_struct *fsp,
+ struct lock_ref_count_key *tmp)
+{
+ ZERO_STRUCTP(tmp);
+ tmp->id = fsp->file_id;
+ tmp->r = 'r';
+ return make_tdb_data((uint8_t *)tmp, sizeof(*tmp));
}
-/****************************************************************************
- Delete an entry from the POSIX locking tdb.
-****************************************************************************/
+/*******************************************************************
+ Convenience function to get an fd_array key from an fsp.
+******************************************************************/
-static BOOL delete_posix_lock_entry(files_struct *fsp, SMB_OFF_T start, SMB_OFF_T size)
+static TDB_DATA fd_array_key_fsp(files_struct *fsp)
{
- struct posix_lock_key = locking_key_fsp(fsp);
- TDB_DATA kbuf, dbuf;
- struct posix_lock *locks;
- size_t i, count;
-
- dbuf.dptr = NULL;
+ return make_tdb_data((uint8 *)&fsp->file_id, sizeof(fsp->file_id));
+}
- tdb_lockchain(posix_lock_tdb, kbuf);
- dbuf = tdb_fetch(posix_lock_tdb, kbuf);
+/*******************************************************************
+ Create the in-memory POSIX lock databases.
+********************************************************************/
- if (!dbuf.dptr) {
- DEBUG(10,("delete_posix_lock_entry: tdb_fetch failed !\n"));
- goto fail;
+bool posix_locking_init(bool read_only)
+{
+ if (posix_pending_close_db != NULL) {
+ return true;
}
- /* There are existing locks - find a match. */
- locks = (struct lock_struct *)dbuf.dptr;
- count = (size_t(dbuf.dsize / sizeof(*locks));
-
- for (i=0; i<count; i++) {
- struct posix_lock *pl = &locks[i];
-
- if (pl->fd == fd &&
- pl->start == start &&
- pl->size == size) {
- /* Found it - delete it. */
- if (count == 1) {
- tdb_delete(posix_lock_tdb, kbuf);
- } else {
- if (i < count-1) {
- memmove(&locks[i], &locks[i+1], sizeof(*locks)*((count-1) - i));
- }
- dbuf.dsize -= sizeof(*locks);
- tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
- }
+ posix_pending_close_db = db_open_rbt(NULL);
- free(dbuf.dptr);
- tdb_unlockchain(tdb, kbuf);
- return True;
- }
+ if (posix_pending_close_db == NULL) {
+ DEBUG(0,("Failed to open POSIX pending close database.\n"));
+ return false;
}
- /* We didn't find it. */
+ return true;
+}
+
+/*******************************************************************
+ Delete the in-memory POSIX lock databases.
+********************************************************************/
- fail:
- if (dbuf.dptr)
- free(dbuf.dptr);
- tdb_unlockchain(tdb, kbuf);
- return False;
+bool posix_locking_end(void)
+{
+ /*
+ * Shouldn't we close all fd's here?
+ */
+ TALLOC_FREE(posix_pending_close_db);
+ return true;
}
/****************************************************************************
- Utility function to map a lock type correctly depending on the open
- mode of a file.
+ Next - the functions that deal with storing fd's that have outstanding
+ POSIX locks when closed.
****************************************************************************/
-static int map_posix_lock_type( files_struct *fsp, enum brl_type lock_type)
+/****************************************************************************
+ The records in posix_pending_close_tdb are composed of an array of ints
+ keyed by dev/ino pair.
+ The first int is a reference count of the number of outstanding locks on
+ all open fd's on this dev/ino pair. Any subsequent ints are the fd's that
+ were open on this dev/ino pair that should have been closed, but can't as
+ the lock ref count is non zero.
+****************************************************************************/
+
+/****************************************************************************
+ Keep a reference count of the number of Windows locks open on this dev/ino
+ pair. Creates entry if it doesn't exist.
+****************************************************************************/
+
+static void increment_windows_lock_ref_count(files_struct *fsp)
{
- if((lock_type == WRITE_LOCK) && !fsp->can_write) {
- /*
- * Many UNIX's cannot get a write lock on a file opened read-only.
- * Win32 locking semantics allow this.
- * Do the best we can and attempt a read-only lock.
- */
- DEBUG(10,("map_posix_lock_type: Downgrading write lock to read due to read-only file.\n"));
- return F_RDLCK;
- } else if((lock_type == READ_LOCK) && !fsp->can_read) {
- /*
- * Ditto for read locks on write only files.
- */
- DEBUG(10,("map_posix_lock_type: Changing read lock to write due to write-only file.\n"));
- return F_WRLCK;
+ struct lock_ref_count_key tmp;
+ struct db_record *rec;
+ int lock_ref_count = 0;
+ NTSTATUS status;
+
+ rec = posix_pending_close_db->fetch_locked(
+ posix_pending_close_db, talloc_tos(),
+ locking_ref_count_key_fsp(fsp, &tmp));
+
+ SMB_ASSERT(rec != NULL);
+
+ if (rec->value.dptr != NULL) {
+ SMB_ASSERT(rec->value.dsize == sizeof(lock_ref_count));
+ memcpy(&lock_ref_count, rec->value.dptr,
+ sizeof(lock_ref_count));
}
- /*
- * This return should be the most normal, as we attempt
- * to always open files read/write.
- */
+ lock_ref_count++;
+
+ status = rec->store(rec, make_tdb_data((uint8 *)&lock_ref_count,
+ sizeof(lock_ref_count)), 0);
+
+ SMB_ASSERT(NT_STATUS_IS_OK(status));
+
+ TALLOC_FREE(rec);
- return (lock_type == READ_LOCK) ? F_RDLCK : F_WRLCK;
+ DEBUG(10,("increment_windows_lock_ref_count for file now %s = %d\n",
+ fsp_str_dbg(fsp), lock_ref_count));
}
/****************************************************************************
- Check to see if the given unsigned lock range is within the possible POSIX
- range. Modifies the given args to be in range if possible, just returns
- False if not.
+ Bulk delete - subtract as many locks as we've just deleted.
****************************************************************************/
-static BOOL posix_lock_in_range(SMB_OFF_T *offset_out, SMB_OFF_T *count_out,
- SMB_BIG_UINT u_offset, SMB_BIG_UINT u_count)
+void reduce_windows_lock_ref_count(files_struct *fsp, unsigned int dcount)
{
- SMB_OFF_T offset;
- SMB_OFF_T count;
+ struct lock_ref_count_key tmp;
+ struct db_record *rec;
+ int lock_ref_count = 0;
+ NTSTATUS status;
-#if defined(LARGE_SMB_OFF_T) && !defined(HAVE_BROKEN_FCNTL64_LOCKS)
+ rec = posix_pending_close_db->fetch_locked(
+ posix_pending_close_db, talloc_tos(),
+ locking_ref_count_key_fsp(fsp, &tmp));
- SMB_OFF_T mask2 = ((SMB_OFF_T)0x4) << (SMB_OFF_T_BITS-4);
- SMB_OFF_T mask = (mask2<<1);
- SMB_OFF_T neg_mask = ~mask;
-
- /*
- * In this case SMB_OFF_T is 64 bits,
- * and the underlying system can handle 64 bit signed locks.
- * Cast to signed type.
- */
+ SMB_ASSERT((rec != NULL)
+ && (rec->value.dptr != NULL)
+ && (rec->value.dsize == sizeof(lock_ref_count)));
- offset = (SMB_OFF_T)u_offset;
- count = (SMB_OFF_T)u_count;
+ memcpy(&lock_ref_count, rec->value.dptr, sizeof(lock_ref_count));
- /*
- * Deal with a very common case of count of all ones.
- * (lock entire file).
- */
+ SMB_ASSERT(lock_ref_count > 0);
- if(count == (SMB_OFF_T)-1)
- count &= ~mask;
+ lock_ref_count -= dcount;
- /*
- * POSIX lock ranges cannot be negative.
- * Fail if any combination becomes negative.
- */
+ status = rec->store(rec, make_tdb_data((uint8 *)&lock_ref_count,
+ sizeof(lock_ref_count)), 0);
- if(offset < 0 || count < 0 || (offset + count < 0)) {
- DEBUG(10,("posix_lock_in_range: negative range: offset = %.0f, count = %.0f. Ignoring lock.\n",
- (double)offset, (double)count ));
- return False;
- }
+ SMB_ASSERT(NT_STATUS_IS_OK(status));
- /*
- * In this case SMB_OFF_T is 64 bits, the offset and count
- * fit within the positive range, and the underlying
- * system can handle 64 bit locks. Just return as the
- * cast values are ok.
- */
+ TALLOC_FREE(rec);
-#else /* !LARGE_SMB_OFF_T || HAVE_BROKEN_FCNTL64_LOCKS */
+ DEBUG(10,("reduce_windows_lock_ref_count for file now %s = %d\n",
+ fsp_str_dbg(fsp), lock_ref_count));
+}
- /*
- * In this case either SMB_OFF_T is 32 bits,
- * or the underlying system cannot handle 64 bit signed locks.
- * Either way we have to try and mangle to fit within 31 bits.
- * This is difficult.
- */
+static void decrement_windows_lock_ref_count(files_struct *fsp)
+{
+ reduce_windows_lock_ref_count(fsp, 1);
+}
-#if defined(HAVE_BROKEN_FCNTL64_LOCKS)
+/****************************************************************************
+ Fetch the lock ref count.
+****************************************************************************/
- /*
- * SMB_OFF_T is 64 bits, but we need to use 31 bits due to
- * broken large locking.
- */
+static int get_windows_lock_ref_count(files_struct *fsp)
+{
+ struct lock_ref_count_key tmp;
+ TDB_DATA dbuf;
+ int res;
+ int lock_ref_count = 0;
- /*
- * Deal with a very common case of count of all ones.
- * (lock entire file).
- */
+ res = posix_pending_close_db->fetch(
+ posix_pending_close_db, talloc_tos(),
+ locking_ref_count_key_fsp(fsp, &tmp), &dbuf);
- if(u_count == (SMB_BIG_UINT)-1)
- count = 0x7FFFFFFF;
+ SMB_ASSERT(res == 0);
- if(((u_offset >> 32) & 0xFFFFFFFF) || ((u_count >> 32) & 0xFFFFFFFF)) {
- DEBUG(10,("posix_lock_in_range: top 32 bits not zero. offset = %.0f, count = %.0f. Ignoring lock.\n",
- (double)u_offset, (double)u_count ));
- /* Top 32 bits of offset or count were not zero. */
- return False;
+ if (dbuf.dsize != 0) {
+ SMB_ASSERT(dbuf.dsize == sizeof(lock_ref_count));
+ memcpy(&lock_ref_count, dbuf.dptr, sizeof(lock_ref_count));
+ TALLOC_FREE(dbuf.dptr);
}
- /* Cast from 64 bits unsigned to 64 bits signed. */
- offset = (SMB_OFF_T)u_offset;
- count = (SMB_OFF_T)u_count;
-
- /*
- * Check if we are within the 2^31 range.
- */
-
- {
- int32 low_offset = (int32)offset;
- int32 low_count = (int32)count;
+ DEBUG(10,("get_windows_lock_count for file %s = %d\n",
+ fsp_str_dbg(fsp), lock_ref_count));
- if(low_offset < 0 || low_count < 0 || (low_offset + low_count < 0)) {
- DEBUG(10,("posix_lock_in_range: not within 2^31 range. low_offset = %d, low_count = %d. Ignoring lock.\n",
- low_offset, low_count ));
- return False;
- }
- }
+ return lock_ref_count;
+}
- /*
- * Ok - we can map from a 64 bit number to a 31 bit lock.
- */
+/****************************************************************************
+ Delete a lock_ref_count entry.
+****************************************************************************/
-#else /* HAVE_BROKEN_FCNTL64_LOCKS */
+static void delete_windows_lock_ref_count(files_struct *fsp)
+{
+ struct lock_ref_count_key tmp;
+ struct db_record *rec;
- /*
- * SMB_OFF_T is 32 bits.
- */
+ rec = posix_pending_close_db->fetch_locked(
+ posix_pending_close_db, talloc_tos(),
+ locking_ref_count_key_fsp(fsp, &tmp));
-#if defined(HAVE_LONGLONG)
+ SMB_ASSERT(rec != NULL);
- /*
- * SMB_BIG_UINT is 64 bits, we can do a 32 bit shift.
- */
+ /* Not a bug if it doesn't exist - no locks were ever granted. */
- /*
- * Deal with a very common case of count of all ones.
- * (lock entire file).
- */
+ rec->delete_rec(rec);
+ TALLOC_FREE(rec);
- if(u_count == (SMB_BIG_UINT)-1)
- count = 0x7FFFFFFF;
+ DEBUG(10,("delete_windows_lock_ref_count for file %s\n",
+ fsp_str_dbg(fsp)));
+}
- if(((u_offset >> 32) & 0xFFFFFFFF) || ((u_count >> 32) & 0xFFFFFFFF)) {
- DEBUG(10,("posix_lock_in_range: top 32 bits not zero. u_offset = %.0f, u_count = %.0f. Ignoring lock.\n",
- (double)u_offset, (double)u_count ));
- return False;
- }
+/****************************************************************************
+ Add an fd to the pending close tdb.
+****************************************************************************/
- /* Cast from 64 bits unsigned to 32 bits signed. */
- offset = (SMB_OFF_T)u_offset;
- count = (SMB_OFF_T)u_count;
+static void add_fd_to_close_entry(files_struct *fsp)
+{
+ struct db_record *rec;
+ uint8_t *new_data;
+ NTSTATUS status;
- /*
- * Check if we are within the 2^31 range.
- */
+ rec = posix_pending_close_db->fetch_locked(
+ posix_pending_close_db, talloc_tos(),
+ fd_array_key_fsp(fsp));
- if(offset < 0 || count < 0 || (offset + count < 0)) {
- DEBUG(10,("posix_lock_in_range: not within 2^31 range. offset = %d, count = %d. Ignoring lock.\n",
- (int)offset, (int)count ));
- return False;
- }
+ SMB_ASSERT(rec != NULL);
-#else /* HAVE_LONGLONG */
+ new_data = TALLOC_ARRAY(
+ rec, uint8_t, rec->value.dsize + sizeof(fsp->fh->fd));
- /*
- * SMB_BIG_UINT and SMB_OFF_T are both 32 bits,
- * just cast.
- */
+ SMB_ASSERT(new_data != NULL);
- /*
- * Deal with a very common case of count of all ones.
- * (lock entire file).
- */
+ memcpy(new_data, rec->value.dptr, rec->value.dsize);
+ memcpy(new_data + rec->value.dsize,
+ &fsp->fh->fd, sizeof(fsp->fh->fd));
- if(u_count == (SMB_BIG_UINT)-1)
- count = 0x7FFFFFFF;
+ status = rec->store(
+ rec, make_tdb_data(new_data,
+ rec->value.dsize + sizeof(fsp->fh->fd)), 0);
- /* Cast from 32 bits unsigned to 32 bits signed. */
- offset = (SMB_OFF_T)u_offset;
- count = (SMB_OFF_T)u_count;
+ SMB_ASSERT(NT_STATUS_IS_OK(status));
- /*
- * Check if we are within the 2^31 range.
- */
+ TALLOC_FREE(rec);
- if(offset < 0 || count < 0 || (offset + count < 0)) {
- DEBUG(10,("posix_lock_in_range: not within 2^31 range. offset = %d, count = %d. Ignoring lock.\n",
- (int)offset, (int)count ));
- return False;
- }
+ DEBUG(10,("add_fd_to_close_entry: added fd %d file %s\n",
+ fsp->fh->fd, fsp_str_dbg(fsp)));
+}
-#endif /* HAVE_LONGLONG */
-#endif /* LARGE_SMB_OFF_T */
-#endif /* !LARGE_SMB_OFF_T || HAVE_BROKEN_FCNTL64_LOCKS */
+/****************************************************************************
+ Remove all fd entries for a specific dev/inode pair from the tdb.
+****************************************************************************/
- /*
- * The mapping was successful.
- */
+static void delete_close_entries(files_struct *fsp)
+{
+ struct db_record *rec;
- DEBUG(10,("posix_lock_in_range: offset_out = %.0f, count_out = %.0f\n",
- (double)offset, (double)count ));
+ rec = posix_pending_close_db->fetch_locked(
+ posix_pending_close_db, talloc_tos(),
+ fd_array_key_fsp(fsp));
- *offset_out = offset;
- *count_out = count;
-
- return True;
+ SMB_ASSERT(rec != NULL);
+ rec->delete_rec(rec);
+ TALLOC_FREE(rec);
}
/****************************************************************************
- POSIX function to see if a file region is locked. Returns True if the
- region is locked, False otherwise.
+ Get the array of POSIX pending close records for an open fsp. Returns number
+ of entries.
****************************************************************************/
-BOOL is_posix_locked(files_struct *fsp, SMB_BIG_UINT u_offset, SMB_BIG_UINT u_count, enum brl_type lock_type)
+static size_t get_posix_pending_close_entries(TALLOC_CTX *mem_ctx,
+ files_struct *fsp, int **entries)
{
- SMB_OFF_T offset;
- SMB_OFF_T count;
- int posix_lock_type = map_posix_lock_type(fsp,lock_type);
+ TDB_DATA dbuf;
+ int res;
- DEBUG(10,("is_posix_locked: File %s, offset = %.0f, count = %.0f, type = %s\n",
- fsp->fsp_name, (double)u_offset, (double)u_count, lock_type_name(lock_type) ));
+ res = posix_pending_close_db->fetch(
+ posix_pending_close_db, mem_ctx, fd_array_key_fsp(fsp),
+ &dbuf);
- /*
- * If the requested lock won't fit in the POSIX range, we will
- * never set it, so presume it is not locked.
- */
+ SMB_ASSERT(res == 0);
- if(!posix_lock_in_range(&offset, &count, u_offset, u_count))
- return False;
-
- /*
- * Note that most UNIX's can *test* for a write lock on
- * a read-only fd, just not *set* a write lock on a read-only
- * fd. So we don't need to use map_lock_type here.
- */
+ if (dbuf.dsize == 0) {
+ *entries = NULL;
+ return 0;
+ }
- return fcntl_lock(fsp->fd,SMB_F_GETLK,offset,count,posix_lock_type);
+ *entries = (int *)dbuf.dptr;
+ return (size_t)(dbuf.dsize / sizeof(int));
}
/****************************************************************************
- POSIX function to acquire a lock. Returns True if the
- lock could be granted, False if not.
+ Deal with pending closes needed by POSIX locking support.
+ Note that posix_locking_close_file() is expected to have been called
+ to delete all locks on this fsp before this function is called.
****************************************************************************/
-BOOL set_posix_lock(files_struct *fsp, SMB_BIG_UINT u_offset, SMB_BIG_UINT u_count, enum brl_type lock_type)
+int fd_close_posix(struct files_struct *fsp)
{
- SMB_OFF_T offset;
- SMB_OFF_T count;
- BOOL ret = True;
- int posix_lock_type = map_posix_lock_type(fsp,lock_type);
+ int saved_errno = 0;
+ int ret;
+ int *fd_array = NULL;
+ size_t count, i;
+
+ if (!lp_locking(fsp->conn->params) ||
+ !lp_posix_locking(fsp->conn->params))
+ {
+ /*
+ * No locking or POSIX to worry about or we want POSIX semantics
+ * which will lose all locks on all fd's open on this dev/inode,
+ * just close.
+ */
+ return close(fsp->fh->fd);
+ }
+
+ if (get_windows_lock_ref_count(fsp)) {
+
+ /*
+ * There are outstanding locks on this dev/inode pair on
+ * other fds. Add our fd to the pending close tdb and set
+ * fsp->fh->fd to -1.
+ */
- DEBUG(5,("set_posix_lock: File %s, offset = %.0f, count = %.0f, type = %s\n",
- fsp->fsp_name, (double)u_offset, (double)u_count, lock_type_name(lock_type) ));
+ add_fd_to_close_entry(fsp);
+ return 0;
+ }
/*
- * If the requested lock won't fit in the POSIX range, we will
- * pretend it was successful.
+ * No outstanding locks. Get the pending close fd's
+ * from the tdb and close them all.
*/
- if(!posix_lock_in_range(&offset, &count, u_offset, u_count))
- return True;
+ count = get_posix_pending_close_entries(talloc_tos(), fsp, &fd_array);
+
+ if (count) {
+ DEBUG(10,("fd_close_posix: doing close on %u fd's.\n",
+ (unsigned int)count));
+
+ for(i = 0; i < count; i++) {
+ if (close(fd_array[i]) == -1) {
+ saved_errno = errno;
+ }
+ }
+
+ /*
+ * Delete all fd's stored in the tdb
+ * for this dev/inode pair.
+ */
+
+ delete_close_entries(fsp);
+ }
+
+ TALLOC_FREE(fd_array);
+
+ /* Don't need a lock ref count on this dev/ino anymore. */
+ delete_windows_lock_ref_count(fsp);
/*
- * Note that setting multiple overlapping read locks on different
- * file descriptors will not be held separately by the kernel (POSIX
- * braindamage), but will be merged into one continuous read lock
- * range. We cope with this case in the release_posix_lock code
- * below. JRA.
+ * Finally close the fd associated with this fsp.
*/
- ret = fcntl_lock(fsp->fd,SMB_F_SETLK,offset,count,posix_lock_type);
+ ret = close(fsp->fh->fd);
- if (ret)
- add_posix_lock_entry(fsp,offset,count,posix_lock_type);
+ if (ret == 0 && saved_errno != 0) {
+ errno = saved_errno;
+ ret = -1;
+ }
return ret;
}
+/****************************************************************************
+ Next - the functions that deal with the mapping CIFS Windows locks onto
+ the underlying system POSIX locks.
+****************************************************************************/
+
/*
* Structure used when splitting a lock range
* into a POSIX lock range. Doubly linked list.
*/
-struct unlock_list {
- struct unlock_list *next;
- struct unlock_list *prev;
- SMB_OFF_T start;
- SMB_OFF_T size;
- int fd;
+struct lock_list {
+ struct lock_list *next;
+ struct lock_list *prev;
+ SMB_OFF_T start;
+ SMB_OFF_T size;
};
/****************************************************************************
Create a list of lock ranges that don't overlap a given range. Used in calculating
- POSIX lock unlocks. This is a difficult function that requires ASCII art to
+ POSIX locks and unlocks. This is a difficult function that requires ASCII art to
understand it :-).
****************************************************************************/
-static struct unlock_list *posix_unlock_list(TALLOC_CTX *ctx, struct unlock_list *ulhead, files_struct *fsp)
+static struct lock_list *posix_lock_list(TALLOC_CTX *ctx,
+ struct lock_list *lhead,
+ const struct lock_context *lock_ctx, /* Lock context lhead belongs to. */
+ files_struct *fsp,
+ const struct lock_struct *plocks,
+ int num_locks)
{
- struct lock_key key;
- TDB_DATA kbuf, dbuf;
- struct lock_struct *locks;
- int num_locks, i;
-
- /*
- * Setup the key for this fetch.
- */
- key.device = dev;
- key.inode = ino;
- kbuf.dptr = (char *)&key;
- kbuf.dsize = sizeof(key);
-
- dbuf.dptr = NULL;
-
- tdb_lockchain(tdb, kbuf);
- dbuf = tdb_fetch(tdb, kbuf);
-
- if (!dbuf.dptr) {
- tdb_unlockchain(tdb, kbuf);
- return ulhead;
- }
-
- locks = (struct lock_struct *)dbuf.dptr;
- num_locks = dbuf.dsize / sizeof(*locks);
+ int i;
/*
* Check the current lock list on this dev/inode pair.
* Quit if the list is deleted.
*/
- DEBUG(10,("brl_unlock_list: curr: start=%.0f,size=%.0f\n",
- (double)ulhead->start, (double)ulhead->size ));
+ DEBUG(10,("posix_lock_list: curr: start=%.0f,size=%.0f\n",
+ (double)lhead->start, (double)lhead->size ));
- for (i=0; i<num_locks && ulhead; i++) {
+ for (i=0; i<num_locks && lhead; i++) {
+ const struct lock_struct *lock = &plocks[i];
+ struct lock_list *l_curr;
- struct lock_struct *lock = &locks[i];
- struct unlock_list *ul_curr;
+ /* Ignore all but read/write locks. */
+ if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
+ continue;
+ }
- /* If it's not this process, ignore it. */
- if (lock->context.pid != pid)
+ /* Ignore locks not owned by this process. */
+ if (!procid_equal(&lock->context.pid, &lock_ctx->pid)) {
continue;
+ }
/*
- * Walk the unlock list, checking for overlaps. Note that
- * the unlock list can expand within this loop if the current
+ * Walk the lock list, checking for overlaps. Note that
+ * the lock list can expand within this loop if the current
* range being examined needs to be split.
*/
- for (ul_curr = ulhead; ul_curr;) {
+ for (l_curr = lhead; l_curr;) {
- DEBUG(10,("brl_unlock_list: lock: start=%.0f,size=%.0f:",
- (double)lock->start, (double)lock->size ));
+ DEBUG(10,("posix_lock_list: lock: fnum=%d: start=%.0f,size=%.0f:type=%s", lock->fnum,
+ (double)lock->start, (double)lock->size, posix_lock_type_name(lock->lock_type) ));
- if ( (ul_curr->start >= (lock->start + lock->size)) ||
- (lock->start > (ul_curr->start + ul_curr->size))) {
+ if ( (l_curr->start >= (lock->start + lock->size)) ||
+ (lock->start >= (l_curr->start + l_curr->size))) {
- /* No overlap with this lock - leave this range alone. */
+ /* No overlap with existing lock - leave this range alone. */
/*********************************************
+---------+
- | ul_curr |
+ | l_curr |
+---------+
+-------+
| lock |
+-------+
OR....
+---------+
- | ul_curr |
+ | l_curr |
+---------+
**********************************************/
- DEBUG(10,("no overlap case.\n" ));
+ DEBUG(10,(" no overlap case.\n" ));
- ul_curr = ul_curr->next;
+ l_curr = l_curr->next;
- } else if ( (ul_curr->start >= lock->start) &&
- (ul_curr->start + ul_curr->size <= lock->start + lock->size) ) {
+ } else if ( (l_curr->start >= lock->start) &&
+ (l_curr->start + l_curr->size <= lock->start + lock->size) ) {
/*
- * This unlock is completely overlapped by this existing lock range
- * and thus should have no effect (not be unlocked). Delete it from the list.
+ * This range is completely overlapped by this existing lock range
+ * and thus should have no effect. Delete it from the list.
*/
/*********************************************
+---------+
- | ul_curr |
+ | l_curr |
+---------+
+---------------------------+
| lock |
+---------------------------+
**********************************************/
/* Save the next pointer */
- struct unlock_list *ul_next = ul_curr->next;
+ struct lock_list *ul_next = l_curr->next;
- DEBUG(10,("delete case.\n" ));
+ DEBUG(10,(" delete case.\n" ));
- DLIST_REMOVE(ulhead, ul_curr);
- if(ulhead == NULL)
+ DLIST_REMOVE(lhead, l_curr);
+ if(lhead == NULL) {
break; /* No more list... */
+ }
- ul_curr = ul_next;
+ l_curr = ul_next;
- } else if ( (ul_curr->start >= lock->start) &&
- (ul_curr->start < lock->start + lock->size) &&
- (ul_curr->start + ul_curr->size > lock->start + lock->size) ) {
+ } else if ( (l_curr->start >= lock->start) &&
+ (l_curr->start < lock->start + lock->size) &&
+ (l_curr->start + l_curr->size > lock->start + lock->size) ) {
/*
- * This unlock overlaps the existing lock range at the high end.
+ * This range overlaps the existing lock range at the high end.
* Truncate by moving start to existing range end and reducing size.
*/
/*********************************************
+---------------+
- | ul_curr |
+ | l_curr |
+---------------+
+---------------+
| lock |
+---------------+
BECOMES....
+-------+
- |ul_curr|
+ | l_curr|
+-------+
**********************************************/
- ul_curr->size = (ul_curr->start + ul_curr->size) - (lock->start + lock->size);
- ul_curr->start = lock->start + lock->size;
+ l_curr->size = (l_curr->start + l_curr->size) - (lock->start + lock->size);
+ l_curr->start = lock->start + lock->size;
- DEBUG(10,("truncate high case: start=%.0f,size=%.0f\n",
- (double)ul_curr->start, (double)ul_curr->size ));
+ DEBUG(10,(" truncate high case: start=%.0f,size=%.0f\n",
+ (double)l_curr->start, (double)l_curr->size ));
- ul_curr = ul_curr->next;
+ l_curr = l_curr->next;
- } else if ( (ul_curr->start < lock->start) &&
- (ul_curr->start + ul_curr->size > lock->start) ) {
+ } else if ( (l_curr->start < lock->start) &&
+ (l_curr->start + l_curr->size > lock->start) &&
+ (l_curr->start + l_curr->size <= lock->start + lock->size) ) {
/*
- * This unlock overlaps the existing lock range at the low end.
+ * This range overlaps the existing lock range at the low end.
* Truncate by reducing size.
*/
/*********************************************
+---------------+
- | ul_curr |
+ | l_curr |
+---------------+
+---------------+
| lock |
+---------------+
BECOMES....
+-------+
- |ul_curr|
+ | l_curr|
+-------+
**********************************************/
- ul_curr->size = lock->start - ul_curr->start;
+ l_curr->size = lock->start - l_curr->start;
- DEBUG(10,("truncate low case: start=%.0f,size=%.0f\n",
- (double)ul_curr->start, (double)ul_curr->size ));
+ DEBUG(10,(" truncate low case: start=%.0f,size=%.0f\n",
+ (double)l_curr->start, (double)l_curr->size ));
- ul_curr = ul_curr->next;
+ l_curr = l_curr->next;
- } else if ( (ul_curr->start < lock->start) &&
- (ul_curr->start + ul_curr->size > lock->start + lock->size) ) {
+ } else if ( (l_curr->start < lock->start) &&
+ (l_curr->start + l_curr->size > lock->start + lock->size) ) {
/*
- * Worst case scenario. Unlock request completely overlaps an existing
+ * Worst case scenario. Range completely overlaps an existing
* lock range. Split the request into two, push the new (upper) request
- * into the dlink list, and continue with the entry after ul_new (as we
- * know that ul_new will not overlap with this lock).
+ * into the dlink list, and continue with the entry after l_new (as we
+ * know that l_new will not overlap with this lock).
*/
/*********************************************
+---------------------------+
- | ul_curr |
+ | l_curr |
+---------------------------+
+---------+
| lock |
+---------+
BECOMES.....
+-------+ +---------+
- |ul_curr| |ul_new |
+ | l_curr| | l_new |
+-------+ +---------+
**********************************************/
- struct unlock_list *ul_new = (struct unlock_list *)talloc(ctx,
- sizeof(struct unlock_list));
+ struct lock_list *l_new = TALLOC_P(ctx, struct lock_list);
- if(ul_new == NULL) {
- DEBUG(0,("brl_unlock_list: talloc fail.\n"));
+ if(l_new == NULL) {
+ DEBUG(0,("posix_lock_list: talloc fail.\n"));
return NULL; /* The talloc_destroy takes care of cleanup. */
}
- ZERO_STRUCTP(ul_new);
- ul_new->start = lock->start + lock->size;
- ul_new->size = ul_curr->start + ul_curr->size - ul_new->start;
- ul_new->smbpid = ul_curr->smbpid;
+ ZERO_STRUCTP(l_new);
+ l_new->start = lock->start + lock->size;
+ l_new->size = l_curr->start + l_curr->size - l_new->start;
- /* Add into the dlink list after the ul_curr point - NOT at ulhead. */
- DLIST_ADD(ul_curr, ul_new);
+ /* Truncate the l_curr. */
+ l_curr->size = lock->start - l_curr->start;
- /* Truncate the ul_curr. */
- ul_curr->size = lock->start - ul_curr->start;
+ DEBUG(10,(" split case: curr: start=%.0f,size=%.0f \
+new: start=%.0f,size=%.0f\n", (double)l_curr->start, (double)l_curr->size,
+ (double)l_new->start, (double)l_new->size ));
- DEBUG(10,("split case: curr: start=%.0f,size=%.0f \
-new: start=%.0f,size=%.0f\n", (double)ul_curr->start, (double)ul_curr->size,
- (double)ul_new->start, (double)ul_new->size ));
+ /*
+ * Add into the dlink list after the l_curr point - NOT at lhead.
+ * Note we can't use DLINK_ADD here as this inserts at the head of the given list.
+ */
- ul_curr = ul_new->next;
+ l_new->prev = l_curr;
+ l_new->next = l_curr->next;
+ l_curr->next = l_new;
+
+ /* And move after the link we added. */
+ l_curr = l_new->next;
} else {
* This logic case should never happen. Ensure this is the
* case by forcing an abort.... Remove in production.
*/
+ char *msg = NULL;
- smb_panic("logic flaw in cases...\n");
+ if (asprintf(&msg, "logic flaw in cases: l_curr: start = %.0f, size = %.0f : \
+lock: start = %.0f, size = %.0f", (double)l_curr->start, (double)l_curr->size, (double)lock->start, (double)lock->size ) != -1) {
+ smb_panic(msg);
+ } else {
+ smb_panic("posix_lock_list");
+ }
}
- } /* end for ( ul_curr = ulhead; ul_curr;) */
+ } /* end for ( l_curr = lhead; l_curr;) */
} /* end for (i=0; i<num_locks && ul_head; i++) */
- tdb_unlockchain(tdb, kbuf);
+ return lhead;
+}
+
+/****************************************************************************
+ POSIX function to acquire a lock. Returns True if the
+ lock could be granted, False if not.
+****************************************************************************/
+
+bool set_posix_lock_windows_flavour(files_struct *fsp,
+ uint64_t u_offset,
+ uint64_t u_count,
+ enum brl_type lock_type,
+ const struct lock_context *lock_ctx,
+ const struct lock_struct *plocks,
+ int num_locks,
+ int *errno_ret)
+{
+ SMB_OFF_T offset;
+ SMB_OFF_T count;
+ int posix_lock_type = map_posix_lock_type(fsp,lock_type);
+ bool ret = True;
+ size_t lock_count;
+ TALLOC_CTX *l_ctx = NULL;
+ struct lock_list *llist = NULL;
+ struct lock_list *ll = NULL;
+
+ DEBUG(5,("set_posix_lock_windows_flavour: File %s, offset = %.0f, "
+ "count = %.0f, type = %s\n", fsp_str_dbg(fsp),
+ (double)u_offset, (double)u_count,
+ posix_lock_type_name(lock_type)));
+
+ /*
+ * If the requested lock won't fit in the POSIX range, we will
+ * pretend it was successful.
+ */
+
+ if(!posix_lock_in_range(&offset, &count, u_offset, u_count)) {
+ increment_windows_lock_ref_count(fsp);
+ return True;
+ }
- if (dbuf.dptr)
- free(dbuf.dptr);
+ /*
+ * Windows is very strange. It allows read locks to be overlayed
+ * (even over a write lock), but leaves the write lock in force until the first
+ * unlock. It also reference counts the locks. This means the following sequence :
+ *
+ * process1 process2
+ * ------------------------------------------------------------------------
+ * WRITE LOCK : start = 2, len = 10
+ * READ LOCK: start =0, len = 10 - FAIL
+ * READ LOCK : start = 0, len = 14
+ * READ LOCK: start =0, len = 10 - FAIL
+ * UNLOCK : start = 2, len = 10
+ * READ LOCK: start =0, len = 10 - OK
+ *
+ * Under POSIX, the same sequence in steps 1 and 2 would not be reference counted, but
+ * would leave a single read lock over the 0-14 region.
+ */
- return ulhead;
+ if ((l_ctx = talloc_init("set_posix_lock")) == NULL) {
+ DEBUG(0,("set_posix_lock_windows_flavour: unable to init talloc context.\n"));
+ return False;
+ }
+
+ if ((ll = TALLOC_P(l_ctx, struct lock_list)) == NULL) {
+ DEBUG(0,("set_posix_lock_windows_flavour: unable to talloc unlock list.\n"));
+ talloc_destroy(l_ctx);
+ return False;
+ }
+
+ /*
+ * Create the initial list entry containing the
+ * lock we want to add.
+ */
+
+ ZERO_STRUCTP(ll);
+ ll->start = offset;
+ ll->size = count;
+
+ DLIST_ADD(llist, ll);
+
+ /*
+ * The following call calculates if there are any
+ * overlapping locks held by this process on
+ * fd's open on the same file and splits this list
+ * into a list of lock ranges that do not overlap with existing
+ * POSIX locks.
+ */
+
+ llist = posix_lock_list(l_ctx,
+ llist,
+ lock_ctx, /* Lock context llist belongs to. */
+ fsp,
+ plocks,
+ num_locks);
+
+ /*
+ * Add the POSIX locks on the list of ranges returned.
+ * As the lock is supposed to be added atomically, we need to
+ * back out all the locks if any one of these calls fail.
+ */
+
+ for (lock_count = 0, ll = llist; ll; ll = ll->next, lock_count++) {
+ offset = ll->start;
+ count = ll->size;
+
+ DEBUG(5,("set_posix_lock_windows_flavour: Real lock: Type = %s: offset = %.0f, count = %.0f\n",
+ posix_lock_type_name(posix_lock_type), (double)offset, (double)count ));
+
+ if (!posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,posix_lock_type)) {
+ *errno_ret = errno;
+ DEBUG(5,("set_posix_lock_windows_flavour: Lock fail !: Type = %s: offset = %.0f, count = %.0f. Errno = %s\n",
+ posix_lock_type_name(posix_lock_type), (double)offset, (double)count, strerror(errno) ));
+ ret = False;
+ break;
+ }
+ }
+
+ if (!ret) {
+
+ /*
+ * Back out all the POSIX locks we have on fail.
+ */
+
+ for (ll = llist; lock_count; ll = ll->next, lock_count--) {
+ offset = ll->start;
+ count = ll->size;
+
+ DEBUG(5,("set_posix_lock_windows_flavour: Backing out locks: Type = %s: offset = %.0f, count = %.0f\n",
+ posix_lock_type_name(posix_lock_type), (double)offset, (double)count ));
+
+ posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,F_UNLCK);
+ }
+ } else {
+ /* Remember the number of Windows locks we have on this dev/ino pair. */
+ increment_windows_lock_ref_count(fsp);
+ }
+
+ talloc_destroy(l_ctx);
+ return ret;
}
/****************************************************************************
- POSIX function to release a lock given a list. Returns True if the
+ POSIX function to release a lock. Returns True if the
lock could be released, False if not.
****************************************************************************/
-static BOOL release_posix_lock(files_struct *fsp, SMB_BIG_UINT u_offset, SMB_BIG_UINT u_count)
+bool release_posix_lock_windows_flavour(files_struct *fsp,
+ uint64_t u_offset,
+ uint64_t u_count,
+ enum brl_type deleted_lock_type,
+ const struct lock_context *lock_ctx,
+ const struct lock_struct *plocks,
+ int num_locks)
{
SMB_OFF_T offset;
SMB_OFF_T count;
- BOOL ret = True;
+ bool ret = True;
TALLOC_CTX *ul_ctx = NULL;
- struct unlock_list *ulist = NULL;
- struct unlock_list *ul = NULL;
+ struct lock_list *ulist = NULL;
+ struct lock_list *ul = NULL;
- DEBUG(5,("release_posix_lock: File %s, offset = %.0f, count = %.0f\n",
- fsp->fsp_name, (double)offset, (double)count ));
+ DEBUG(5,("release_posix_lock_windows_flavour: File %s, offset = %.0f, "
+ "count = %.0f\n", fsp_str_dbg(fsp),
+ (double)u_offset, (double)u_count));
+
+ /* Remember the number of Windows locks we have on this dev/ino pair. */
+ decrement_windows_lock_ref_count(fsp);
/*
* If the requested lock won't fit in the POSIX range, we will
* pretend it was successful.
*/
- if(!posix_lock_in_range(&offset, &count, u_offset, u_count))
+ if(!posix_lock_in_range(&offset, &count, u_offset, u_count)) {
return True;
+ }
- if ((ul_ctx = talloc_init()) == NULL) {
- DEBUG(0,("release_posix_lock: unable to init talloc context.\n"));
- return True; /* Not a fatal error. */
+ if ((ul_ctx = talloc_init("release_posix_lock")) == NULL) {
+ DEBUG(0,("release_posix_lock_windows_flavour: unable to init talloc context.\n"));
+ return False;
}
- if ((ul = (struct unlock_list *)talloc(ul_ctx, sizeof(struct unlock_list))) == NULL) {
- DEBUG(0,("release_posix_lock: unable to talloc unlock list.\n"));
+ if ((ul = TALLOC_P(ul_ctx, struct lock_list)) == NULL) {
+ DEBUG(0,("release_posix_lock_windows_flavour: unable to talloc unlock list.\n"));
talloc_destroy(ul_ctx);
- return True; /* Not a fatal error. */
+ return False;
}
/*
ZERO_STRUCTP(ul);
ul->start = offset;
ul->size = count;
- ul->fd = fsp->fd;
DLIST_ADD(ulist, ul);
/*
* The following call calculates if there are any
- * overlapping read locks held by this process on
- * other fd's open on the same file and creates a
- * list of unlock ranges that will allow other
+ * overlapping locks held by this process on
+ * fd's open on the same file and creates a
+ * list of unlock ranges that will allow
* POSIX lock ranges to remain on the file whilst the
* unlocks are performed.
*/
- ulist = posix_unlock_list(ul_ctx, ulist, fsp);
+ ulist = posix_lock_list(ul_ctx,
+ ulist,
+ lock_ctx, /* Lock context ulist belongs to. */
+ fsp,
+ plocks,
+ num_locks);
/*
- * Release the POSIX locks on the list of ranges returned.
+ * If there were any overlapped entries (list is > 1 or size or start have changed),
+ * and the lock_type we just deleted from
+ * the upper layer tdb was a write lock, then before doing the unlock we need to downgrade
+ * the POSIX lock to a read lock. This allows any overlapping read locks
+ * to be atomically maintained.
*/
- for(; ulist; ulist = ulist->next) {
- SMB_OFF_T offset = ulist->start;
- SMB_OFF_T count = ulist->size;
+ if (deleted_lock_type == WRITE_LOCK &&
+ (!ulist || ulist->next != NULL || ulist->start != offset || ulist->size != count)) {
- DEBUG(5,("release_posix_lock: Real unlock: offset = %.0f, count = %.0f\n",
+ DEBUG(5,("release_posix_lock_windows_flavour: downgrading lock to READ: offset = %.0f, count = %.0f\n",
(double)offset, (double)count ));
- if(u_count == 0) {
-
- /*
- * This lock must overlap with an existing read-only lock
- * held by another fd. Don't do any POSIX call.
- */
-
- continue;
+ if (!posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,F_RDLCK)) {
+ DEBUG(0,("release_posix_lock_windows_flavour: downgrade of lock failed with error %s !\n", strerror(errno) ));
+ talloc_destroy(ul_ctx);
+ return False;
}
+ }
- /*
- * If the requested lock won't fit in the POSIX range, we will
- * pretend it was successful.
- */
+ /*
+ * Release the POSIX locks on the list of ranges returned.
+ */
- if(!posix_lock_in_range(&offset, &count, offset, count))
- continue;
+ for(; ulist; ulist = ulist->next) {
+ offset = ulist->start;
+ count = ulist->size;
- DEBUG(5,("release_posix_lock: Real unlock: offset = %.0f, count = %.0f\n",
+ DEBUG(5,("release_posix_lock_windows_flavour: Real unlock: offset = %.0f, count = %.0f\n",
(double)offset, (double)count ));
- ret = fcntl_lock(fsp->fd,SMB_F_SETLK,offset,count,F_UNLCK);
+ if (!posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,F_UNLCK)) {
+ ret = False;
+ }
}
- talloc_destroy(ul_ctx);
-
- /*
- * We treat this as one unlock request for POSIX accounting purposes even
- * if it may have been split into multiple smaller POSIX unlock ranges.
- */
-
- delete_posix_lock_entry(fsp->
-
+ talloc_destroy(ul_ctx);
return ret;
}
/****************************************************************************
- Return a lock list associated with an open file.
+ Next - the functions that deal with mapping CIFS POSIX locks onto
+ the underlying system POSIX locks.
****************************************************************************/
-struct unlock_list *brl_getlocklist( TALLOC_CTX *ctx, SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum)
-{
- struct lock_key key;
- TDB_DATA kbuf, dbuf;
- int i, count;
- struct lock_struct *locks;
- struct unlock_list *ulist = NULL;
+/****************************************************************************
+ POSIX function to acquire a lock. Returns True if the
+ lock could be granted, False if not.
+ As POSIX locks don't stack or conflict (they just overwrite)
+ we can map the requested lock directly onto a system one. We
+ know it doesn't conflict with locks on other contexts as the
+ upper layer would have refused it.
+****************************************************************************/
- key.device = dev;
- key.inode = ino;
- kbuf.dptr = (char *)&key;
- kbuf.dsize = sizeof(key);
+bool set_posix_lock_posix_flavour(files_struct *fsp,
+ uint64_t u_offset,
+ uint64_t u_count,
+ enum brl_type lock_type,
+ int *errno_ret)
+{
+ SMB_OFF_T offset;
+ SMB_OFF_T count;
+ int posix_lock_type = map_posix_lock_type(fsp,lock_type);
- dbuf.dptr = NULL;
+ DEBUG(5,("set_posix_lock_posix_flavour: File %s, offset = %.0f, count "
+ "= %.0f, type = %s\n", fsp_str_dbg(fsp),
+ (double)u_offset, (double)u_count,
+ posix_lock_type_name(lock_type)));
- tdb_lockchain(tdb, kbuf);
- dbuf = tdb_fetch(tdb, kbuf);
+ /*
+ * If the requested lock won't fit in the POSIX range, we will
+ * pretend it was successful.
+ */
- if (!dbuf.dptr) {
- tdb_unlockchain(tdb, kbuf);
- return NULL;
+ if(!posix_lock_in_range(&offset, &count, u_offset, u_count)) {
+ return True;
}
- /* There are existing locks - allocate an entry for each one. */
- locks = (struct lock_struct *)dbuf.dptr;
- count = dbuf.dsize / sizeof(*locks);
-
- for (i=0; i<count; i++) {
- struct lock_struct *lock = &locks[i];
-
- if (lock->context.tid == tid &&
- lock->context.pid == pid &&
- lock->fnum == fnum) {
-
- struct unlock_list *ul_new = (struct unlock_list *)talloc(ctx,
- sizeof(struct unlock_list));
-
- if(ul_new == NULL) {
- DEBUG(0,("brl_getlocklist: talloc fail.\n"));
- return NULL; /* The talloc_destroy takes care of cleanup. */
- }
-
- ZERO_STRUCTP(ul_new);
- ul_new->start = lock->start;
- ul_new->size = lock->size;
- ul_new->smbpid = lock->context.smbpid;
-
- DLIST_ADD(ulist, ul_new);
- }
+ if (!posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,posix_lock_type)) {
+ *errno_ret = errno;
+ DEBUG(5,("set_posix_lock_posix_flavour: Lock fail !: Type = %s: offset = %.0f, count = %.0f. Errno = %s\n",
+ posix_lock_type_name(posix_lock_type), (double)offset, (double)count, strerror(errno) ));
+ return False;
}
-
- if (dbuf.dptr)
- free(dbuf.dptr);
- tdb_unlockchain(tdb, kbuf);
-
- return ulist;
+ return True;
}
/****************************************************************************
- Remove any locks on this fd. Called from file_close().
+ POSIX function to release a lock. Returns True if the
+ lock could be released, False if not.
+ We are given a complete lock state from the upper layer which is what the lock
+ state should be after the unlock has already been done, so what
+ we do is punch out holes in the unlock range where locks owned by this process
+ have a different lock context.
****************************************************************************/
-void posix_locking_close_file(files_struct *fsp)
+bool release_posix_lock_posix_flavour(files_struct *fsp,
+ uint64_t u_offset,
+ uint64_t u_count,
+ const struct lock_context *lock_ctx,
+ const struct lock_struct *plocks,
+ int num_locks)
{
+ bool ret = True;
+ SMB_OFF_T offset;
+ SMB_OFF_T count;
TALLOC_CTX *ul_ctx = NULL;
- struct unlock_list *ul = NULL;
- int eclass;
- uint32 ecode;
- struct pending_closes *pc;
+ struct lock_list *ulist = NULL;
+ struct lock_list *ul = NULL;
- /*
- * Optimization for the common case where we are the only
- * opener of a file. If all fd entries are our own, we don't
- * need to explicitly release all the locks via the POSIX functions,
- * we can just release all the brl locks, as in the no POSIX locking case.
- */
+ DEBUG(5,("release_posix_lock_posix_flavour: File %s, offset = %.0f, "
+ "count = %.0f\n", fsp_str_dbg(fsp),
+ (double)u_offset, (double)u_count));
- if ((pc = find_pending_close_entry(fsp->dev, fsp->inode)) != NULL) {
+ /*
+ * If the requested lock won't fit in the POSIX range, we will
+ * pretend it was successful.
+ */
- if (pc->fd_array_size == 1 && pc->fd_array[0] == fsp->fd ) {
- /*
- * Just release all the brl locks, no need to release individually.
- */
+ if(!posix_lock_in_range(&offset, &count, u_offset, u_count)) {
+ return True;
+ }
- brl_close(fsp->dev, fsp->inode, pid, fsp->conn->cnum, fsp->fnum);
- return;
- }
- }
+ if ((ul_ctx = talloc_init("release_posix_lock")) == NULL) {
+ DEBUG(0,("release_posix_lock_windows_flavour: unable to init talloc context.\n"));
+ return False;
+ }
- if ((ul_ctx = talloc_init()) == NULL) {
- DEBUG(0,("locking_close_file: unable to init talloc context.\n"));
- return;
- }
+ if ((ul = TALLOC_P(ul_ctx, struct lock_list)) == NULL) {
+ DEBUG(0,("release_posix_lock_windows_flavour: unable to talloc unlock list.\n"));
+ talloc_destroy(ul_ctx);
+ return False;
+ }
- /*
- * We need to release all POSIX locks we have on this
- * fd. Get all our existing locks from the tdb locking database.
- */
+ /*
+ * Create the initial list entry containing the
+ * lock we want to remove.
+ */
- ul = brl_getlocklist(ul_ctx, fsp->dev, fsp->inode, pid, fsp->conn->cnum, fsp->fnum);
+ ZERO_STRUCTP(ul);
+ ul->start = offset;
+ ul->size = count;
- /*
- * Now unlock all of them. This will remove the brl entry also
- * for each lock. Note we need to make sure the global_smbpid matches
- * the one associated with each lock in case the client plays games
- * with smbpids (like smbtorture does :-).
- */
+ DLIST_ADD(ulist, ul);
- for(; ul; ul = ul->next) {
- global_smbpid = ul->smbpid;
- do_unlock(fsp,fsp->conn,ul->size,ul->start,&eclass,&ecode);
- }
-
- talloc_destroy(ul_ctx);
+ /*
+ * Walk the given array creating a linked list
+ * of unlock requests.
+ */
- } else {
+ ulist = posix_lock_list(ul_ctx,
+ ulist,
+ lock_ctx, /* Lock context ulist belongs to. */
+ fsp,
+ plocks,
+ num_locks);
- /*
- * Just release all the brl locks, no need to release individually.
- */
+ /*
+ * Release the POSIX locks on the list of ranges returned.
+ */
- brl_close(fsp->dev, fsp->inode, pid, fsp->conn->cnum, fsp->fnum);
- }
-}
+ for(; ulist; ulist = ulist->next) {
+ offset = ulist->start;
+ count = ulist->size;
-/*******************************************************************
- Create the in-memory POSIX lock databases.
-********************************************************************/
+ DEBUG(5,("release_posix_lock_posix_flavour: Real unlock: offset = %.0f, count = %.0f\n",
+ (double)offset, (double)count ));
-void posix_lock_init(void)
-{
- if (posix_lock_tdb && posix_pending_close_tdb)
- return;
-
- if (!posix_lock_tdb)
- posix_lock_tdb = tdb_open(NULL, 0, TDB_CLEAR_IF_FIRST,
- O_RDWR|O_CREAT, 0644);
- if (!posix_lock_tdb) {
- DEBUG(0,("Failed to open POSIX byte range locking database.\n"));
- }
- if (!posix_pending_close_tdb)
- posix_pending_close_tdb = tdb_open(NULL, 0, TDB_CLEAR_IF_FIRST,
- O_RDWR|O_CREAT, 0644);
- if (!posix_pending_close_tdb) {
- DEBUG(0,("Failed to open POSIX pending close database.\n"));
- }
+ if (!posix_fcntl_lock(fsp,SMB_F_SETLK,offset,count,F_UNLCK)) {
+ ret = False;
+ }
+ }
+
+ talloc_destroy(ul_ctx);
+ return ret;
}