2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 /* This module implements a tdb based byte range locking service,
25 replacing the fcntl() based byte range locking previously
26 used. This allows us to provide the same semantics as NT */
31 #define DBGC_CLASS DBGC_LOCKING
35 /* This contains elements that differentiate locks. The smbpid is a
36 client supplied pid, and is essentially the locking context for
42 struct process_id pid;
45 /* The data in brlock records is an unsorted linear array of these
46 records. It is unnecessary to store the count as tdb provides the
50 struct lock_context context;
54 enum brl_type lock_type;
55 enum brl_flavour lock_flav;
58 /* The open brlock.tdb database. */
60 static TDB_CONTEXT *tdb;
62 /****************************************************************************
63 Debug info at level 10 for lock struct.
64 ****************************************************************************/
66 static void print_lock_struct(unsigned int i, struct lock_struct *pls)
68 DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %u, ",
70 (unsigned int)pls->context.smbpid,
71 (unsigned int)pls->context.tid,
72 (unsigned int)procid_to_pid(&pls->context.pid) ));
74 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
78 lock_type_name(pls->lock_type),
79 lock_flav_name(pls->lock_flav) ));
82 /****************************************************************************
83 See if two locking contexts are equal.
84 ****************************************************************************/
86 static BOOL brl_same_context(const struct lock_context *ctx1,
87 const struct lock_context *ctx2)
89 return (procid_equal(&ctx1->pid, &ctx2->pid) &&
90 (ctx1->smbpid == ctx2->smbpid) &&
91 (ctx1->tid == ctx2->tid));
94 /****************************************************************************
95 See if lck1 and lck2 overlap.
96 ****************************************************************************/
98 static BOOL brl_overlap(const struct lock_struct *lck1,
99 const struct lock_struct *lck2)
101 /* this extra check is not redundent - it copes with locks
102 that go beyond the end of 64 bit file space */
103 if (lck1->size != 0 &&
104 lck1->start == lck2->start &&
105 lck1->size == lck2->size) {
109 if (lck1->start >= (lck2->start+lck2->size) ||
110 lck2->start >= (lck1->start+lck1->size)) {
116 /****************************************************************************
117 See if lock2 can be added when lock1 is in place.
118 ****************************************************************************/
120 static BOOL brl_conflict(const struct lock_struct *lck1,
121 const struct lock_struct *lck2)
123 /* Ignore PENDING locks. */
124 if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
127 /* Read locks never conflict. */
128 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
132 if (brl_same_context(&lck1->context, &lck2->context) &&
133 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
137 return brl_overlap(lck1, lck2);
140 /****************************************************************************
141 See if lock2 can be added when lock1 is in place - when both locks are POSIX
142 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
144 ****************************************************************************/
146 static BOOL brl_conflict_posix(const struct lock_struct *lck1,
147 const struct lock_struct *lck2)
149 #if defined(DEVELOPER)
150 SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
151 SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
154 /* Ignore PENDING locks. */
155 if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
158 /* Read locks never conflict. */
159 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
163 /* Locks on the same context con't conflict. Ignore fnum. */
164 if (brl_same_context(&lck1->context, &lck2->context)) {
168 /* One is read, the other write, or the context is different,
170 return brl_overlap(lck1, lck2);
174 static BOOL brl_conflict1(const struct lock_struct *lck1,
175 const struct lock_struct *lck2)
177 if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
180 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
184 if (brl_same_context(&lck1->context, &lck2->context) &&
185 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
189 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
193 if (lck1->start >= (lck2->start + lck2->size) ||
194 lck2->start >= (lck1->start + lck1->size)) {
202 /****************************************************************************
203 Check to see if this lock conflicts, but ignore our own locks on the
204 same fnum only. This is the read/write lock check code path.
205 This is never used in the POSIX lock case.
206 ****************************************************************************/
208 static BOOL brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
210 if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
213 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
216 /* POSIX flavour locks never conflict here - this is only called
217 in the read/write path. */
219 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
223 * Incoming WRITE locks conflict with existing READ locks even
224 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
227 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
228 if (brl_same_context(&lck1->context, &lck2->context) &&
229 lck1->fnum == lck2->fnum)
233 return brl_overlap(lck1, lck2);
236 /****************************************************************************
237 Amazingly enough, w2k3 "remembers" whether the last lock failure
238 is the same as this one and changes its error code. I wonder if any
239 app depends on this ?
240 ****************************************************************************/
242 static NTSTATUS brl_lock_failed(const struct lock_struct *lock)
244 static struct lock_struct last_lock_failure;
246 if (brl_same_context(&lock->context, &last_lock_failure.context) &&
247 lock->fnum == last_lock_failure.fnum &&
248 lock->start == last_lock_failure.start &&
249 lock->size == last_lock_failure.size) {
250 return NT_STATUS_FILE_LOCK_CONFLICT;
252 last_lock_failure = *lock;
253 if (lock->start >= 0xEF000000 &&
254 (lock->start >> 63) == 0) {
255 /* amazing the little things you learn with a test
256 suite. Locks beyond this offset (as a 64 bit
257 number!) always generate the conflict error code,
258 unless the top bit is set */
259 return NT_STATUS_FILE_LOCK_CONFLICT;
261 return NT_STATUS_LOCK_NOT_GRANTED;
264 /****************************************************************************
265 Open up the brlock.tdb database.
266 ****************************************************************************/
268 void brl_init(int read_only)
273 tdb = tdb_open_log(lock_path("brlock.tdb"),
274 lp_open_files_db_hash_size(),
275 TDB_DEFAULT|(read_only?0x0:TDB_CLEAR_IF_FIRST),
276 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
278 DEBUG(0,("Failed to open byte range locking database %s\n",
279 lock_path("brlock.tdb")));
284 /****************************************************************************
285 Close down the brlock.tdb database.
286 ****************************************************************************/
288 void brl_shutdown(int read_only)
297 /****************************************************************************
298 Compare two locks for sorting.
299 ****************************************************************************/
301 static int lock_compare(const struct lock_struct *lck1,
302 const struct lock_struct *lck2)
304 if (lck1->start != lck2->start) {
305 return (lck1->start - lck2->start);
307 if (lck2->size != lck1->size) {
308 return ((int)lck1->size - (int)lck2->size);
314 /****************************************************************************
315 Lock a range of bytes - Windows lock semantics.
316 ****************************************************************************/
318 static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck,
319 const struct lock_struct *plock,
323 files_struct *fsp = br_lck->fsp;
324 struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
326 for (i=0; i < br_lck->num_locks; i++) {
327 /* Do any Windows or POSIX locks conflict ? */
328 if (brl_conflict(&locks[i], plock)) {
329 NTSTATUS status = brl_lock_failed(plock);;
330 /* Did we block ourselves ? */
331 if (brl_same_context(&locks[i].context, &plock->context)) {
337 if (plock->start == 0 && plock->size == 0 &&
338 locks[i].size == 0) {
344 /* We can get the Windows lock, now see if it needs to
345 be mapped into a lower level POSIX one, and if so can
346 we get it ? We tell the lower lock layer about the
347 lock type so it can cope with the difference between
348 Windows "stacking" locks and POSIX "flat" ones. */
350 if ((plock->lock_type != PENDING_LOCK) && lp_posix_locking(SNUM(fsp->conn))) {
351 if (!set_posix_lock(fsp, plock->start, plock->size, plock->lock_type, WINDOWS_LOCK)) {
352 if (errno == EACCES || errno == EAGAIN) {
353 return NT_STATUS_FILE_LOCK_CONFLICT;
355 return map_nt_error_from_unix(errno);
360 /* no conflicts - add it to the list of locks */
361 locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
363 return NT_STATUS_NO_MEMORY;
366 memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
367 br_lck->num_locks += 1;
368 br_lck->lock_data = (void *)locks;
369 br_lck->modified = True;
374 /****************************************************************************
375 Cope with POSIX range splits and merges.
376 ****************************************************************************/
378 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr,
379 const struct lock_struct *ex,
380 const struct lock_struct *plock,
381 BOOL *lock_was_added)
383 BOOL lock_types_differ = (ex->lock_type != plock->lock_type);
385 /* We can't merge non-conflicting locks on different context - ignore fnum. */
387 if (!brl_same_context(&ex->context, &plock->context)) {
389 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
393 /* We now know we have the same context. */
395 /* Did we overlap ? */
397 /*********************************************
408 **********************************************/
410 if ( (ex->start >= (plock->start + plock->size)) ||
411 (plock->start >= (ex->start + ex->size))) {
412 /* No overlap with this lock - copy existing. */
413 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
417 /*********************************************
421 +---------------------------+
422 | plock | -> replace with plock.
423 +---------------------------+
424 **********************************************/
426 if ( (ex->start >= plock->start) &&
427 (ex->start + ex->size <= plock->start + plock->size) ) {
428 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
429 *lock_was_added = True;
433 /*********************************************
441 +---------------+-------+
442 | plock | ex | - different lock types.
443 +---------------+-------+
445 +-----------------------+
446 | ex | - same lock type.
447 +-----------------------+
448 **********************************************/
450 if ( (ex->start >= plock->start) &&
451 (ex->start < plock->start + plock->size) &&
452 (ex->start + ex->size > plock->start + plock->size) ) {
454 *lock_was_added = True;
456 /* If the lock types are the same, we merge, if different, we
457 add the new lock before the old. */
459 if (lock_types_differ) {
461 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
462 memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
463 /* Adjust existing start and size. */
464 lck_arr[1].start = plock->start + plock->size;
465 lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
469 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
470 /* Set new start and size. */
471 lck_arr[0].start = plock->start;
472 lck_arr[0].size = (ex->start + ex->size) - plock->start;
477 /*********************************************
485 +-------+---------------+
486 | ex | plock | - different lock types
487 +-------+---------------+
490 +-----------------------+
491 | ex | - same lock type.
492 +-----------------------+
494 **********************************************/
496 if ( (ex->start < plock->start) &&
497 (ex->start + ex->size > plock->start) &&
498 (ex->start + ex->size <= plock->start + plock->size) ) {
500 *lock_was_added = True;
502 /* If the lock types are the same, we merge, if different, we
503 add the new lock after the old. */
505 if (lock_types_differ) {
506 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
507 memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
508 /* Adjust existing size. */
509 lck_arr[0].size = plock->start - ex->start;
513 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
514 /* Adjust existing size. */
515 lck_arr[0].size = (plock->start + plock->size) - ex->start;
520 /*********************************************
521 +---------------------------+
523 +---------------------------+
528 +-------+---------+---------+
529 | ex | plock | ex | - different lock types.
530 +-------+---------+---------+
532 +---------------------------+
533 | ex | - same lock type.
534 +---------------------------+
535 **********************************************/
537 if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
538 *lock_was_added = True;
540 if (lock_types_differ) {
542 /* We have to split ex into two locks here. */
544 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
545 memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
546 memcpy(&lck_arr[2], ex, sizeof(struct lock_struct));
548 /* Adjust first existing size. */
549 lck_arr[0].size = plock->start - ex->start;
551 /* Adjust second existing start and size. */
552 lck_arr[2].start = plock->start + plock->size;
553 lck_arr[2].size = (ex->start + ex->size) - (plock->start + plock->size);
556 /* Just eat plock. */
557 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
562 /* Never get here. */
563 smb_panic("brlock_posix_split_merge\n");
568 /****************************************************************************
569 Lock a range of bytes - POSIX lock semantics.
570 We must cope with range splits and merges.
571 ****************************************************************************/
573 static NTSTATUS brl_lock_posix(struct byte_range_lock *br_lck,
574 const struct lock_struct *plock,
577 unsigned int i, count;
578 struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
579 struct lock_struct *tp;
580 BOOL lock_was_added = False;
582 /* No zero-zero locks for POSIX. */
583 if (plock->start == 0 && plock->size == 0) {
584 return NT_STATUS_INVALID_PARAMETER;
587 /* Don't allow 64-bit lock wrap. */
588 if (plock->start + plock->size < plock->start ||
589 plock->start + plock->size < plock->size) {
590 return NT_STATUS_INVALID_PARAMETER;
593 /* The worst case scenario here is we have to split an
594 existing POSIX lock range into two, and add our lock,
595 so we need at most 2 more entries. */
597 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
599 return NT_STATUS_NO_MEMORY;
603 for (i=0; i < br_lck->num_locks; i++) {
604 if (locks[i].lock_flav == WINDOWS_LOCK) {
605 /* Do any Windows flavour locks conflict ? */
606 if (brl_conflict(&locks[i], plock)) {
607 /* Did we block ourselves ? */
608 if (brl_same_context(&locks[i].context, &plock->context)) {
611 /* No games with error messages. */
613 return NT_STATUS_FILE_LOCK_CONFLICT;
615 /* Just copy the Windows lock into the new array. */
616 memcpy(&tp[count], &locks[i], sizeof(struct lock_struct));
619 /* POSIX conflict semantics are different. */
620 if (brl_conflict_posix(&locks[i], plock)) {
621 /* Can't block ourselves with POSIX locks. */
622 /* No games with error messages. */
624 return NT_STATUS_FILE_LOCK_CONFLICT;
627 /* Work out overlaps. */
628 count += brlock_posix_split_merge(&tp[count], &locks[i], plock, &lock_was_added);
632 /* We can get the POSIX lock, now see if it needs to
633 be mapped into a lower level POSIX one, and if so can
634 we get it ? We well the lower lock layer about the
635 lock type so it can cope with the difference between
636 Windows "stacking" locks and POSIX "flat" ones. */
639 /* FIXME - this call doesn't work correctly yet for POSIX locks... */
641 if ((plock->lock_type != PENDING_LOCK) && lp_posix_locking(SNUM(fsp->conn))) {
642 files_struct *fsp = br_lck->fsp;
644 if (!set_posix_lock(fsp, plock->start, plock->size, plock->lock_type, POSIX_LOCK)) {
645 if (errno == EACCES || errno == EAGAIN) {
647 return NT_STATUS_FILE_LOCK_CONFLICT;
650 return map_nt_error_from_unix(errno);
656 if (!lock_was_added) {
657 memcpy(&tp[count], plock, sizeof(struct lock_struct));
661 /* Realloc so we don't leak entries per lock call. */
662 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
664 return NT_STATUS_NO_MEMORY;
666 br_lck->num_locks = count;
667 br_lck->lock_data = (void *)tp;
668 br_lck->modified = True;
672 /****************************************************************************
673 Lock a range of bytes.
674 ****************************************************************************/
676 NTSTATUS brl_lock(struct byte_range_lock *br_lck,
678 struct process_id pid,
681 enum brl_type lock_type,
682 enum brl_flavour lock_flav,
686 struct lock_struct lock;
688 *my_lock_ctx = False;
691 if (start == 0 && size == 0) {
692 DEBUG(0,("client sent 0/0 lock - please report this\n"));
696 lock.context.smbpid = smbpid;
697 lock.context.pid = pid;
698 lock.context.tid = br_lck->fsp->conn->cnum;
701 lock.fnum = br_lck->fsp->fnum;
702 lock.lock_type = lock_type;
703 lock.lock_flav = lock_flav;
705 if (lock_flav == WINDOWS_LOCK) {
706 ret = brl_lock_windows(br_lck, &lock, my_lock_ctx);
708 ret = brl_lock_posix(br_lck, &lock, my_lock_ctx);
712 /* sort the lock list */
713 qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);
719 /****************************************************************************
720 Check if an unlock overlaps a pending lock.
721 ****************************************************************************/
723 static BOOL brl_pending_overlap(struct lock_struct *lock, struct lock_struct *pend_lock)
725 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
727 if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
732 /****************************************************************************
733 Unlock a range of bytes - Windows semantics.
734 ****************************************************************************/
736 static BOOL brl_unlock_windows(struct byte_range_lock *br_lck, const struct lock_struct *plock)
739 struct lock_struct *lock = NULL;
740 struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
743 for (i = 0; i < br_lck->num_locks; i++) {
746 if (lock->lock_type == WRITE_LOCK &&
747 brl_same_context(&lock->context, &plock->context) &&
748 lock->fnum == plock->fnum &&
749 lock->lock_flav == WINDOWS_LOCK &&
750 lock->start == plock->start &&
751 lock->size == plock->size) {
753 /* found it - delete it */
754 if (i < br_lck->num_locks - 1) {
755 memmove(&locks[i], &locks[i+1],
756 sizeof(*locks)*((br_lck->num_locks-1) - i));
759 br_lck->num_locks -= 1;
760 br_lck->modified = True;
766 for (i = 0; i < br_lck->num_locks; i++) {
769 /* Only remove our own locks that match in start, size, and flavour. */
770 if (brl_same_context(&lock->context, &plock->context) &&
771 lock->fnum == plock->fnum &&
772 lock->lock_flav == WINDOWS_LOCK &&
773 lock->start == plock->start &&
774 lock->size == plock->size ) {
779 if (i == br_lck->num_locks) {
780 /* we didn't find it */
784 /* Unlock any POSIX regions. */
785 if(lp_posix_locking(br_lck->fsp->conn->cnum)) {
786 release_posix_lock(br_lck->fsp, plock->start, plock->size);
789 /* Send unlock messages to any pending waiters that overlap. */
790 for (j=0; j < br_lck->num_locks; j++) {
791 struct lock_struct *pend_lock = &locks[j];
793 /* Ignore non-pending locks. */
794 if (pend_lock->lock_type != PENDING_LOCK) {
798 /* We could send specific lock info here... */
799 if (brl_pending_overlap(lock, pend_lock)) {
800 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
801 procid_str_static(&pend_lock->context.pid )));
804 message_send_pid(pend_lock->context.pid,
811 /* Actually delete the lock. */
812 if (i < br_lck->num_locks - 1) {
813 memmove(&locks[i], &locks[i+1],
814 sizeof(*locks)*((br_lck->num_locks-1) - i));
817 br_lck->num_locks -= 1;
818 br_lck->modified = True;
822 /****************************************************************************
823 Unlock a range of bytes - POSIX semantics.
824 ****************************************************************************/
826 static BOOL brl_unlock_posix(struct byte_range_lock *br_lck, const struct lock_struct *plock)
828 unsigned int i, j, count;
829 struct lock_struct *lock = NULL;
830 struct lock_struct *tp;
831 struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
832 BOOL overlap_found = False;
834 /* No zero-zero locks for POSIX. */
835 if (plock->start == 0 && plock->size == 0) {
839 /* Don't allow 64-bit lock wrap. */
840 if (plock->start + plock->size < plock->start ||
841 plock->start + plock->size < plock->size) {
842 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
846 /* The worst case scenario here is we have to split an
847 existing POSIX lock range into two, so we need at most
850 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
852 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
857 for (i = 0; i < br_lck->num_locks; i++) {
858 struct lock_struct tmp_lock[3];
859 BOOL lock_was_added = False;
860 unsigned int tmp_count;
864 /* Only remove our own locks - ignore fnum. */
865 if (lock->lock_type == PENDING_LOCK ||
866 !brl_same_context(&lock->context, &plock->context)) {
867 memcpy(&tp[count], lock, sizeof(struct lock_struct));
872 /* Work out overlaps. */
873 tmp_count = brlock_posix_split_merge(&tmp_lock[0], &locks[i], plock, &lock_was_added);
875 if (tmp_count == 1) {
876 /* Ether the locks didn't overlap, or the unlock completely
877 overlapped this lock. If it didn't overlap, then there's
878 no change in the locks. */
879 if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
880 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
881 /* No change in this lock. */
882 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
885 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
886 overlap_found = True;
889 } else if (tmp_count == 2) {
890 /* The unlock overlapped an existing lock. Copy the truncated
891 lock into the lock array. */
892 if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
893 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
894 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
895 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
897 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
898 SMB_ASSERT(tmp_lock[1].lock_type == locks[i].lock_type);
899 memcpy(&tp[count], &tmp_lock[1], sizeof(struct lock_struct));
902 overlap_found = True;
905 /* tmp_count == 3 - (we split a lock range in two). */
906 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
907 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
908 SMB_ASSERT(tmp_lock[2].lock_type != locks[i].lock_type);
910 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
912 memcpy(&tp[count], &tmp_lock[2], sizeof(struct lock_struct));
914 overlap_found = True;
915 /* Optimisation... */
916 /* We know we're finished here as we can't overlap any
917 more POSIX locks. Copy the rest of the lock array. */
918 if (i < br_lck->num_locks - 1) {
919 memcpy(&tp[count], &locks[i+1],
920 sizeof(*locks)*((br_lck->num_locks-1) - i));
921 count += ((br_lck->num_locks-1) - i);
927 if (!overlap_found) {
928 /* Just ignore - no change. */
930 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
935 /* FIXME - this call doesn't work correctly yet for POSIX locks... */
937 /* Unlock any POSIX regions. */
938 if(lp_posix_locking(br_lck->fsp->conn->cnum)) {
939 release_posix_lock(br_lck->fsp, plock->start, plock->size);
943 /* Realloc so we don't leak entries per unlock call. */
945 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
947 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
951 /* We deleted the last lock. */
956 br_lck->num_locks = count;
957 br_lck->lock_data = (void *)tp;
958 br_lck->modified = True;
960 /* Send unlock messages to any pending waiters that overlap. */
963 for (j=0; j < br_lck->num_locks; j++) {
964 struct lock_struct *pend_lock = &locks[j];
966 /* Ignore non-pending locks. */
967 if (pend_lock->lock_type != PENDING_LOCK) {
971 /* We could send specific lock info here... */
972 if (brl_pending_overlap(lock, pend_lock)) {
973 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
974 procid_str_static(&pend_lock->context.pid )));
977 message_send_pid(pend_lock->context.pid,
987 /****************************************************************************
988 Unlock a range of bytes.
989 ****************************************************************************/
991 BOOL brl_unlock(struct byte_range_lock *br_lck,
993 struct process_id pid,
996 enum brl_flavour lock_flav)
998 struct lock_struct lock;
1000 lock.context.smbpid = smbpid;
1001 lock.context.pid = pid;
1002 lock.context.tid = br_lck->fsp->conn->cnum;
1005 lock.fnum = br_lck->fsp->fnum;
1006 lock.lock_type = UNLOCK_LOCK;
1007 lock.lock_flav = lock_flav;
1009 if (lock_flav == WINDOWS_LOCK) {
1010 return brl_unlock_windows(br_lck, &lock);
1012 return brl_unlock_posix(br_lck, &lock);
1016 /****************************************************************************
1017 Test if we could add a lock if we wanted to.
1018 Returns True if the region required is currently unlocked, False if locked.
1019 ****************************************************************************/
1021 BOOL brl_locktest(struct byte_range_lock *br_lck,
1023 struct process_id pid,
1026 enum brl_type lock_type,
1027 enum brl_flavour lock_flav)
1031 struct lock_struct lock;
1032 struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1033 files_struct *fsp = br_lck->fsp;
1035 lock.context.smbpid = smbpid;
1036 lock.context.pid = pid;
1037 lock.context.tid = br_lck->fsp->conn->cnum;
1040 lock.fnum = fsp->fnum;
1041 lock.lock_type = lock_type;
1042 lock.lock_flav = lock_flav;
1044 /* Make sure existing locks don't conflict */
1045 for (i=0; i < br_lck->num_locks; i++) {
1047 * Our own locks don't conflict.
1049 if (brl_conflict_other(&locks[i], &lock)) {
1055 * There is no lock held by an SMB daemon, check to
1056 * see if there is a POSIX lock from a UNIX or NFS process.
1057 * This only conflicts with Windows locks, not POSIX locks.
1060 if(lp_posix_locking(fsp->conn->cnum) && (lock_flav == WINDOWS_LOCK)) {
1061 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1063 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1064 (double)start, (double)size, ret ? "locked" : "unlocked",
1065 fsp->fnum, fsp->fsp_name ));
1067 /* We need to return the inverse of is_posix_locked. */
1071 /* no conflicts - we could have added it */
1075 /****************************************************************************
1076 Query for existing locks.
1077 ****************************************************************************/
1079 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1081 struct process_id pid,
1084 enum brl_type *plock_type,
1085 enum brl_flavour lock_flav)
1088 struct lock_struct lock;
1089 struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1090 files_struct *fsp = br_lck->fsp;
1092 lock.context.smbpid = *psmbpid;
1093 lock.context.pid = pid;
1094 lock.context.tid = br_lck->fsp->conn->cnum;
1095 lock.start = *pstart;
1097 lock.fnum = fsp->fnum;
1098 lock.lock_type = *plock_type;
1099 lock.lock_flav = lock_flav;
1101 /* Make sure existing locks don't conflict */
1102 for (i=0; i < br_lck->num_locks; i++) {
1103 struct lock_struct *exlock = &locks[i];
1104 BOOL conflict = False;
1106 if (exlock->lock_flav == WINDOWS_LOCK) {
1107 conflict = brl_conflict(exlock, &lock);
1109 conflict = brl_conflict_posix(exlock, &lock);
1113 *psmbpid = exlock->context.smbpid;
1114 *pstart = exlock->start;
1115 *psize = exlock->size;
1116 *plock_type = exlock->lock_type;
1117 return NT_STATUS_LOCK_NOT_GRANTED;
1122 * There is no lock held by an SMB daemon, check to
1123 * see if there is a POSIX lock from a UNIX or NFS process.
1126 if(lp_posix_locking(fsp->conn->cnum)) {
1127 BOOL ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1129 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1130 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1131 fsp->fnum, fsp->fsp_name ));
1134 /* Hmmm. No clue what to set smbpid to - use -1. */
1136 return NT_STATUS_LOCK_NOT_GRANTED;
1140 return NT_STATUS_OK;
1144 /****************************************************************************
1145 Remove a particular pending lock.
1146 ****************************************************************************/
1148 BOOL brl_remove_pending_lock(struct byte_range_lock *br_lck,
1150 struct process_id pid,
1153 enum brl_flavour lock_flav)
1156 struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1157 struct lock_context context;
1159 context.smbpid = smbpid;
1161 context.tid = br_lck->fsp->conn->cnum;
1163 for (i = 0; i < br_lck->num_locks; i++) {
1164 struct lock_struct *lock = &locks[i];
1166 /* For pending locks we *always* care about the fnum. */
1167 if (brl_same_context(&lock->context, &context) &&
1168 lock->fnum == br_lck->fsp->fnum &&
1169 lock->lock_type == PENDING_LOCK &&
1170 lock->lock_flav == lock_flav &&
1171 lock->start == start &&
1172 lock->size == size) {
1177 if (i == br_lck->num_locks) {
1178 /* Didn't find it. */
1182 if (i < br_lck->num_locks - 1) {
1183 /* Found this particular pending lock - delete it */
1184 memmove(&locks[i], &locks[i+1],
1185 sizeof(*locks)*((br_lck->num_locks-1) - i));
1188 br_lck->num_locks -= 1;
1189 br_lck->modified = True;
1194 /****************************************************************************
1195 Remove any locks associated with a open file.
1196 ****************************************************************************/
1198 void brl_close_fnum(struct byte_range_lock *br_lck, struct process_id pid)
1200 files_struct *fsp = br_lck->fsp;
1201 uint16 tid = fsp->conn->cnum;
1202 int fnum = fsp->fnum;
1203 unsigned int i, j, dcount=0;
1204 struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1206 /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1208 for (i=0; i < br_lck->num_locks; i++) {
1209 struct lock_struct *lock = &locks[i];
1210 BOOL del_this_lock = False;
1212 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
1213 if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
1214 del_this_lock = True;
1215 } else if (lock->lock_flav == POSIX_LOCK) {
1216 del_this_lock = True;
1220 if (del_this_lock) {
1221 /* Send unlock messages to any pending waiters that overlap. */
1222 for (j=0; j < br_lck->num_locks; j++) {
1223 struct lock_struct *pend_lock = &locks[j];
1225 /* Ignore our own or non-pending locks. */
1226 if (pend_lock->lock_type != PENDING_LOCK) {
1230 /* Optimisation - don't send to this fnum as we're
1232 if (pend_lock->context.tid == tid &&
1233 procid_equal(&pend_lock->context.pid, &pid) &&
1234 pend_lock->fnum == fnum) {
1238 /* We could send specific lock info here... */
1239 if (brl_pending_overlap(lock, pend_lock)) {
1241 message_send_pid(pend_lock->context.pid,
1248 /* found it - delete it */
1249 if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
1250 memmove(&locks[i], &locks[i+1],
1251 sizeof(*locks)*((br_lck->num_locks-1) - i));
1253 br_lck->num_locks--;
1254 br_lck->modified = True;
1261 /****************************************************************************
1262 Traverse the whole database with this function, calling traverse_callback
1264 ****************************************************************************/
1266 static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
1268 struct lock_struct *locks;
1269 struct lock_key *key;
1272 BRLOCK_FN(traverse_callback) = (BRLOCK_FN_CAST())state;
1274 locks = (struct lock_struct *)dbuf.dptr;
1275 key = (struct lock_key *)kbuf.dptr;
1277 for (i=0;i<dbuf.dsize/sizeof(*locks);i++) {
1278 traverse_callback(key->device,
1280 locks[i].context.pid,
1289 /*******************************************************************
1290 Call the specified function on each lock in the database.
1291 ********************************************************************/
1293 int brl_forall(BRLOCK_FN(fn))
1298 return tdb_traverse(tdb, traverse_fn, (void *)fn);
1301 /*******************************************************************
1302 Store a potentially modified set of byte range lock data back into
1305 ********************************************************************/
1307 int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1311 key.dptr = (char *)&br_lck->key;
1312 key.dsize = sizeof(struct lock_key);
1314 if (!br_lck->modified) {
1318 if (br_lck->num_locks == 0) {
1319 /* No locks - delete this entry. */
1320 if (tdb_delete(tdb, key) == -1) {
1321 smb_panic("Could not delete byte range lock entry\n");
1325 data.dptr = br_lck->lock_data;
1326 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1328 if (tdb_store(tdb, key, data, TDB_REPLACE) == -1) {
1329 smb_panic("Could not store byte range mode entry\n");
1335 tdb_chainunlock(tdb, key);
1336 SAFE_FREE(br_lck->lock_data);
1341 /*******************************************************************
1342 Fetch a set of byte range lock data from the database.
1343 Leave the record locked.
1344 ********************************************************************/
1346 struct byte_range_lock *brl_get_locks(files_struct *fsp)
1350 struct byte_range_lock *br_lck = SMB_MALLOC_P(struct byte_range_lock);
1352 if (br_lck == NULL) {
1357 br_lck->num_locks = 0;
1358 br_lck->modified = False;
1359 memset(&br_lck->key, '\0', sizeof(struct lock_key));
1360 br_lck->key.device = fsp->dev;
1361 br_lck->key.inode = fsp->inode;
1363 key.dptr = (char *)&br_lck->key;
1364 key.dsize = sizeof(struct lock_key);
1366 if (tdb_chainlock(tdb, key) != 0) {
1367 DEBUG(3, ("Could not lock byte range lock entry\n"));
1372 data = tdb_fetch(tdb, key);
1373 br_lck->lock_data = (void *)data.dptr;
1374 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1376 if (DEBUGLEVEL >= 10) {
1378 struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1379 DEBUG(10,("brl_get_locks: %u current locks on dev=%.0f, inode=%.0f\n",
1381 (double)fsp->dev, (double)fsp->inode ));
1382 for( i = 0; i < br_lck->num_locks; i++) {
1383 print_lock_struct(i, &locks[i]);