2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 /* This module implements a tdb based byte range locking service,
25 replacing the fcntl() based byte range locking previously
26 used. This allows us to provide the same semantics as NT */
32 /* This contains elements that differentiate locks. The smbpid is a
33 client supplied pid, and is essentially the locking context for
42 /* The data in brlock records is an unsorted linear array of these
43 records. It is unnecessary to store the count as tdb provides the
47 struct lock_context context;
51 enum brl_type lock_type;
54 /* The key used in the brlock database. */
61 /* The open brlock.tdb database. */
63 static TDB_CONTEXT *tdb;
65 /****************************************************************************
66 Create a locking key - ensuring zero filled for pad purposes.
67 ****************************************************************************/
69 static TDB_DATA locking_key(SMB_DEV_T dev, SMB_INO_T inode)
71 static struct lock_key key;
74 memset(&key, '\0', sizeof(key));
77 kbuf.dptr = (char *)&key;
78 kbuf.dsize = sizeof(key);
82 /****************************************************************************
83 See if two locking contexts are equal.
84 ****************************************************************************/
86 static BOOL brl_same_context(struct lock_context *ctx1,
87 struct lock_context *ctx2)
89 return (ctx1->pid == ctx2->pid) &&
90 (ctx1->smbpid == ctx2->smbpid) &&
91 (ctx1->tid == ctx2->tid);
94 /****************************************************************************
95 See if lock2 can be added when lock1 is in place.
96 ****************************************************************************/
98 static BOOL brl_conflict(struct lock_struct *lck1,
99 struct lock_struct *lck2)
101 if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
104 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
108 if (brl_same_context(&lck1->context, &lck2->context) &&
109 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
113 if (lck1->start >= (lck2->start + lck2->size) ||
114 lck2->start >= (lck1->start + lck1->size)) {
122 static BOOL brl_conflict1(struct lock_struct *lck1,
123 struct lock_struct *lck2)
125 if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
128 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
132 if (brl_same_context(&lck1->context, &lck2->context) &&
133 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
137 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
141 if (lck1->start >= (lck2->start + lck2->size) ||
142 lck2->start >= (lck1->start + lck1->size)) {
150 /****************************************************************************
151 Check to see if this lock conflicts, but ignore our own locks on the
153 ****************************************************************************/
155 static BOOL brl_conflict_other(struct lock_struct *lck1, struct lock_struct *lck2)
157 if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
160 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
164 * Incoming WRITE locks conflict with existing READ locks even
165 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
168 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
169 if (brl_same_context(&lck1->context, &lck2->context) &&
170 lck1->fnum == lck2->fnum)
174 if (lck1->start >= (lck2->start + lck2->size) ||
175 lck2->start >= (lck1->start + lck1->size)) return False;
182 /* doing this traversal could kill solaris machines under high load (tridge) */
183 /* delete any dead locks */
185 /****************************************************************************
186 Delete a record if it is for a dead process, if check_self is true, then
187 delete any records belonging to this pid also (there shouldn't be any).
188 ****************************************************************************/
190 static int delete_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
192 struct lock_struct *locks;
194 BOOL check_self = *(BOOL *)state;
195 pid_t mypid = sys_getpid();
197 tdb_chainlock(tdb, kbuf);
199 locks = (struct lock_struct *)dbuf.dptr;
201 count = dbuf.dsize / sizeof(*locks);
202 for (i=0; i<count; i++) {
203 struct lock_struct *lock = &locks[i];
205 /* If check_self is true we want to remove our own records. */
206 if (check_self && (mypid == lock->context.pid)) {
208 DEBUG(0,("brlock : delete_fn. LOGIC ERROR ! Shutting down and a record for my pid (%u) exists !\n",
209 (unsigned int)lock->context.pid ));
211 } else if (process_exists(lock->context.pid)) {
213 DEBUG(10,("brlock : delete_fn. pid %u exists.\n", (unsigned int)lock->context.pid ));
217 DEBUG(10,("brlock : delete_fn. Deleting record for process %u\n",
218 (unsigned int)lock->context.pid ));
220 if (count > 1 && i < count-1) {
221 memmove(&locks[i], &locks[i+1],
222 sizeof(*locks)*((count-1) - i));
229 tdb_delete(tdb, kbuf);
230 } else if (count < (dbuf.dsize / sizeof(*locks))) {
231 dbuf.dsize = count * sizeof(*locks);
232 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
235 tdb_chainunlock(tdb, kbuf);
240 /****************************************************************************
241 Open up the brlock.tdb database.
242 ****************************************************************************/
244 void brl_init(int read_only)
248 tdb = tdb_open_log(lock_path("brlock.tdb"), 0, TDB_DEFAULT|(read_only?0x0:TDB_CLEAR_IF_FIRST),
249 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644);
251 DEBUG(0,("Failed to open byte range locking database\n"));
256 /* doing this traversal could kill solaris machines under high load (tridge) */
257 /* delete any dead locks */
259 BOOL check_self = False;
260 tdb_traverse(tdb, delete_fn, &check_self);
265 /****************************************************************************
266 Close down the brlock.tdb database.
267 ****************************************************************************/
269 void brl_shutdown(int read_only)
275 /* doing this traversal could kill solaris machines under high load (tridge) */
276 /* delete any dead locks */
278 BOOL check_self = True;
279 tdb_traverse(tdb, delete_fn, &check_self);
287 /****************************************************************************
288 compare two locks for sorting
289 ****************************************************************************/
290 static int lock_compare(struct lock_struct *lck1,
291 struct lock_struct *lck2)
293 if (lck1->start != lck2->start) return (lck1->start - lck2->start);
294 if (lck2->size != lck1->size) {
295 return ((int)lck1->size - (int)lck2->size);
301 /****************************************************************************
302 Lock a range of bytes.
303 ****************************************************************************/
305 NTSTATUS brl_lock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
306 uint16 smbpid, pid_t pid, uint16 tid,
307 br_off start, br_off size,
308 enum brl_type lock_type)
312 struct lock_struct lock, *locks;
314 NTSTATUS status = NT_STATUS_OK;
315 static int last_failed = -1;
316 static br_off last_failed_start;
318 kbuf = locking_key(dev,ino);
323 if (start == 0 && size == 0) {
324 DEBUG(0,("client sent 0/0 lock - please report this\n"));
328 tdb_chainlock(tdb, kbuf);
329 dbuf = tdb_fetch(tdb, kbuf);
331 lock.context.smbpid = smbpid;
332 lock.context.pid = pid;
333 lock.context.tid = tid;
337 lock.lock_type = lock_type;
340 /* there are existing locks - make sure they don't conflict */
341 locks = (struct lock_struct *)dbuf.dptr;
342 count = dbuf.dsize / sizeof(*locks);
343 for (i=0; i<count; i++) {
344 if (brl_conflict(&locks[i], &lock)) {
345 status = NT_STATUS_LOCK_NOT_GRANTED;
349 if (lock.start == 0 && lock.size == 0 &&
350 locks[i].size == 0) {
357 /* no conflicts - add it to the list of locks */
358 tp = Realloc(dbuf.dptr, dbuf.dsize + sizeof(*locks));
360 status = NT_STATUS_NO_MEMORY;
365 memcpy(dbuf.dptr + dbuf.dsize, &lock, sizeof(lock));
366 dbuf.dsize += sizeof(lock);
369 /* sort the lock list */
370 qsort(dbuf.dptr, dbuf.dsize/sizeof(lock), sizeof(lock), lock_compare);
373 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
375 SAFE_FREE(dbuf.dptr);
376 tdb_chainunlock(tdb, kbuf);
380 /* this is a nasty hack to try to simulate the lock result cache code in w2k.
381 It isn't completely accurate as I haven't yet worked out the correct
384 if (last_failed == fnum &&
385 last_failed_start == start &&
386 NT_STATUS_EQUAL(status, NT_STATUS_LOCK_NOT_GRANTED)) {
387 status = NT_STATUS_FILE_LOCK_CONFLICT;
390 last_failed_start = start;
392 SAFE_FREE(dbuf.dptr);
393 tdb_chainunlock(tdb, kbuf);
397 /****************************************************************************
398 Check if an unlock overlaps a pending lock.
399 ****************************************************************************/
401 static BOOL brl_pending_overlap(struct lock_struct *lock, struct lock_struct *pend_lock)
403 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
405 if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
410 /****************************************************************************
411 Unlock a range of bytes.
412 ****************************************************************************/
414 BOOL brl_unlock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
415 uint16 smbpid, pid_t pid, uint16 tid,
416 br_off start, br_off size,
417 BOOL remove_pending_locks_only)
421 struct lock_struct *locks;
422 struct lock_context context;
424 kbuf = locking_key(dev,ino);
428 tdb_chainlock(tdb, kbuf);
429 dbuf = tdb_fetch(tdb, kbuf);
432 DEBUG(10,("brl_unlock: tdb_fetch failed !\n"));
436 context.smbpid = smbpid;
440 /* there are existing locks - find a match */
441 locks = (struct lock_struct *)dbuf.dptr;
442 count = dbuf.dsize / sizeof(*locks);
445 for (i=0; i<count; i++) {
446 struct lock_struct *lock = &locks[i];
448 if (lock->lock_type == WRITE_LOCK &&
449 brl_same_context(&lock->context, &context) &&
450 lock->fnum == fnum &&
451 lock->start == start &&
452 lock->size == size) {
453 /* found it - delete it */
455 tdb_delete(tdb, kbuf);
458 memmove(&locks[i], &locks[i+1],
459 sizeof(*locks)*((count-1) - i));
461 dbuf.dsize -= sizeof(*locks);
462 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
465 SAFE_FREE(dbuf.dptr);
466 tdb_chainunlock(tdb, kbuf);
472 locks = (struct lock_struct *)dbuf.dptr;
473 count = dbuf.dsize / sizeof(*locks);
474 for (i=0; i<count; i++) {
475 struct lock_struct *lock = &locks[i];
477 if (brl_same_context(&lock->context, &context) &&
478 lock->fnum == fnum &&
479 lock->start == start &&
480 lock->size == size) {
482 if (remove_pending_locks_only && lock->lock_type != PENDING_LOCK)
485 if (lock->lock_type != PENDING_LOCK) {
486 /* Send unlock messages to any pending waiters that overlap. */
487 for (j=0; j<count; j++) {
488 struct lock_struct *pend_lock = &locks[j];
490 /* Ignore non-pending locks. */
491 if (pend_lock->lock_type != PENDING_LOCK)
494 /* We could send specific lock info here... */
495 if (brl_pending_overlap(lock, pend_lock)) {
496 DEBUG(10,("brl_unlock: sending unlock message to pid %u\n",
497 (unsigned int)pend_lock->context.pid ));
499 message_send_pid(pend_lock->context.pid,
506 /* found it - delete it */
508 tdb_delete(tdb, kbuf);
511 memmove(&locks[i], &locks[i+1],
512 sizeof(*locks)*((count-1) - i));
514 dbuf.dsize -= sizeof(*locks);
515 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
518 SAFE_FREE(dbuf.dptr);
519 tdb_chainunlock(tdb, kbuf);
524 /* we didn't find it */
527 SAFE_FREE(dbuf.dptr);
528 tdb_chainunlock(tdb, kbuf);
533 /****************************************************************************
534 Test if we could add a lock if we wanted to.
535 ****************************************************************************/
537 BOOL brl_locktest(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
538 uint16 smbpid, pid_t pid, uint16 tid,
539 br_off start, br_off size,
540 enum brl_type lock_type, int check_self)
544 struct lock_struct lock, *locks;
546 kbuf = locking_key(dev,ino);
550 tdb_chainlock(tdb, kbuf);
551 dbuf = tdb_fetch(tdb, kbuf);
553 lock.context.smbpid = smbpid;
554 lock.context.pid = pid;
555 lock.context.tid = tid;
559 lock.lock_type = lock_type;
562 /* there are existing locks - make sure they don't conflict */
563 locks = (struct lock_struct *)dbuf.dptr;
564 count = dbuf.dsize / sizeof(*locks);
565 for (i=0; i<count; i++) {
567 if (brl_conflict(&locks[i], &lock))
571 * Our own locks don't conflict.
573 if (brl_conflict_other(&locks[i], &lock))
579 /* no conflicts - we could have added it */
580 SAFE_FREE(dbuf.dptr);
581 tdb_chainunlock(tdb, kbuf);
585 SAFE_FREE(dbuf.dptr);
586 tdb_chainunlock(tdb, kbuf);
590 /****************************************************************************
591 Remove any locks associated with a open file.
592 ****************************************************************************/
594 void brl_close(SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum)
597 int count, i, j, dcount=0;
598 struct lock_struct *locks;
600 kbuf = locking_key(dev,ino);
604 tdb_chainlock(tdb, kbuf);
605 dbuf = tdb_fetch(tdb, kbuf);
607 if (!dbuf.dptr) goto fail;
609 /* there are existing locks - remove any for this fnum */
610 locks = (struct lock_struct *)dbuf.dptr;
611 count = dbuf.dsize / sizeof(*locks);
613 for (i=0; i<count; i++) {
614 struct lock_struct *lock = &locks[i];
616 if (lock->context.tid == tid &&
617 lock->context.pid == pid &&
618 lock->fnum == fnum) {
620 /* Send unlock messages to any pending waiters that overlap. */
621 for (j=0; j<count; j++) {
622 struct lock_struct *pend_lock = &locks[j];
624 /* Ignore our own or non-pending locks. */
625 if (pend_lock->lock_type != PENDING_LOCK)
628 if (pend_lock->context.tid == tid &&
629 pend_lock->context.pid == pid &&
630 pend_lock->fnum == fnum)
633 /* We could send specific lock info here... */
634 if (brl_pending_overlap(lock, pend_lock))
635 message_send_pid(pend_lock->context.pid,
640 /* found it - delete it */
641 if (count > 1 && i < count-1) {
642 memmove(&locks[i], &locks[i+1],
643 sizeof(*locks)*((count-1) - i));
652 tdb_delete(tdb, kbuf);
653 } else if (count < (dbuf.dsize / sizeof(*locks))) {
654 dbuf.dsize -= dcount * sizeof(*locks);
655 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
658 /* we didn't find it */
660 SAFE_FREE(dbuf.dptr);
661 tdb_chainunlock(tdb, kbuf);
664 /****************************************************************************
665 Traverse the whole database with this function, calling traverse_callback
667 ****************************************************************************/
669 static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
671 struct lock_struct *locks;
672 struct lock_key *key;
675 BRLOCK_FN(traverse_callback) = (BRLOCK_FN_CAST())state;
677 locks = (struct lock_struct *)dbuf.dptr;
678 key = (struct lock_key *)kbuf.dptr;
680 for (i=0;i<dbuf.dsize/sizeof(*locks);i++) {
681 traverse_callback(key->device, key->inode,
682 locks[i].context.pid,
690 /*******************************************************************
691 Call the specified function on each lock in the database.
692 ********************************************************************/
694 int brl_forall(BRLOCK_FN(fn))
697 return tdb_traverse(tdb, traverse_fn, (void *)fn);