2 Unix SMB/Netbios implementation.
4 byte range locking code
5 Updated to handle range splits/merges.
7 Copyright (C) Andrew Tridgell 1992-2000
8 Copyright (C) Jeremy Allison 1992-2000
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 /* This module implements a tdb based byte range locking service,
26 replacing the fcntl() based byte range locking previously
27 used. This allows us to provide the same semantics as NT */
31 extern int DEBUGLEVEL;
33 /* This contains elements that differentiate locks. The smbpid is a
34 client supplied pid, and is essentially the locking context for
43 /* The data in brlock records is an unsorted linear array of these
44 records. It is unnecessary to store the count as tdb provides the
48 struct lock_context context;
52 enum brl_type lock_type;
55 /* The key used in the brlock database. */
62 /* The open brlock.tdb database. */
64 static TDB_CONTEXT *tdb;
66 /****************************************************************************
67 See if two locking contexts are equal.
68 ****************************************************************************/
70 static BOOL brl_same_context(struct lock_context *ctx1,
71 struct lock_context *ctx2)
73 return (ctx1->pid == ctx2->pid) &&
74 (ctx1->smbpid == ctx2->smbpid) &&
75 (ctx1->tid == ctx2->tid);
78 /****************************************************************************
79 See if lock2 can be added when lock1 is in place.
80 ****************************************************************************/
82 static BOOL brl_conflict(struct lock_struct *lck1,
83 struct lock_struct *lck2)
85 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
88 if (brl_same_context(&lck1->context, &lck2->context) &&
89 lck2->lock_type == READ_LOCK) return False;
91 if (lck1->start >= (lck2->start + lck2->size) ||
92 lck2->start >= (lck1->start + lck1->size)) return False;
98 /****************************************************************************
99 Open up the brlock.tdb database.
100 ****************************************************************************/
102 void brl_init(int read_only)
105 tdb = tdb_open(lock_path("brlock.tdb"), 0, TDB_CLEAR_IF_FIRST,
106 read_only?O_RDONLY:O_RDWR|O_CREAT, 0644);
108 DEBUG(0,("Failed to open byte range locking database\n"));
113 /****************************************************************************
114 Lock a range of bytes.
115 ****************************************************************************/
117 BOOL brl_lock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
118 uint16 smbpid, pid_t pid, uint16 tid,
119 br_off start, br_off size,
120 enum brl_type lock_type)
125 struct lock_struct lock, *locks;
129 kbuf.dptr = (char *)&key;
130 kbuf.dsize = sizeof(key);
134 tdb_lockchain(tdb, kbuf);
135 dbuf = tdb_fetch(tdb, kbuf);
137 lock.context.smbpid = smbpid;
138 lock.context.pid = pid;
139 lock.context.tid = tid;
143 lock.lock_type = lock_type;
146 /* there are existing locks - make sure they don't conflict */
147 locks = (struct lock_struct *)dbuf.dptr;
148 count = dbuf.dsize / sizeof(*locks);
149 for (i=0; i<count; i++) {
150 if (brl_conflict(&locks[i], &lock)) {
156 /* no conflicts - add it to the list of locks */
157 dbuf.dptr = Realloc(dbuf.dptr, dbuf.dsize + sizeof(*locks));
158 if (!dbuf.dptr) goto fail;
159 memcpy(dbuf.dptr + dbuf.dsize, &lock, sizeof(lock));
160 dbuf.dsize += sizeof(lock);
161 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
164 tdb_unlockchain(tdb, kbuf);
168 if (dbuf.dptr) free(dbuf.dptr);
169 tdb_unlockchain(tdb, kbuf);
173 /****************************************************************************
174 Create a list of lock ranges that don't overlap a given range. Used in calculating
175 POSIX lock unlocks. This is a difficult function that requires ASCII art to
177 ****************************************************************************/
179 struct unlock_list *brl_unlock_list(TALLOC_CTX *ctx, struct unlock_list *ulhead,
180 pid_t pid, SMB_DEV_T dev, SMB_INO_T ino)
184 struct lock_struct *locks;
188 * Setup the key for this fetch.
192 kbuf.dptr = (char *)&key;
193 kbuf.dsize = sizeof(key);
197 tdb_lockchain(tdb, kbuf);
198 dbuf = tdb_fetch(tdb, kbuf);
201 tdb_unlockchain(tdb, kbuf);
205 locks = (struct lock_struct *)dbuf.dptr;
206 num_locks = dbuf.dsize / sizeof(*locks);
209 * Check the current lock list on this dev/inode pair.
210 * Quit if the list is deleted.
213 DEBUG(10,("brl_unlock_list: curr: start=%.0f,size=%.0f\n",
214 (double)ulhead->start, (double)ulhead->size ));
216 for (i=0; i<num_locks && ulhead; i++) {
218 struct lock_struct *lock = &locks[i];
219 struct unlock_list *ul_curr;
221 /* If it's not this process, ignore it. */
222 if (lock->context.pid != pid)
226 * Walk the unlock list, checking for overlaps. Note that
227 * the unlock list can expand within this loop if the current
228 * range being examined needs to be split.
231 for (ul_curr = ulhead; ul_curr;) {
233 DEBUG(10,("brl_unlock_list: lock: start=%.0f,size=%.0f:",
234 (double)lock->start, (double)lock->size ));
236 if ( (ul_curr->start >= (lock->start + lock->size)) ||
237 (lock->start > (ul_curr->start + ul_curr->size))) {
239 /* No overlap with this lock - leave this range alone. */
240 /*********************************************
251 **********************************************/
253 DEBUG(10,("no overlap case.\n" ));
255 ul_curr = ul_curr->next;
257 } else if ( (ul_curr->start >= lock->start) &&
258 (ul_curr->start + ul_curr->size <= lock->start + lock->size) ) {
261 * This unlock is completely overlapped by this existing lock range
262 * and thus should have no effect (not be unlocked). Delete it from the list.
264 /*********************************************
268 +---------------------------+
270 +---------------------------+
271 **********************************************/
272 /* Save the next pointer */
273 struct unlock_list *ul_next = ul_curr->next;
275 DEBUG(10,("delete case.\n" ));
277 DLIST_REMOVE(ulhead, ul_curr);
279 break; /* No more list... */
283 } else if ( (ul_curr->start >= lock->start) &&
284 (ul_curr->start < lock->start + lock->size) &&
285 (ul_curr->start + ul_curr->size > lock->start + lock->size) ) {
288 * This unlock overlaps the existing lock range at the high end.
289 * Truncate by moving start to existing range end and reducing size.
291 /*********************************************
302 **********************************************/
304 ul_curr->size = (ul_curr->start + ul_curr->size) - (lock->start + lock->size);
305 ul_curr->start = lock->start + lock->size;
307 DEBUG(10,("truncate high case: start=%.0f,size=%.0f\n",
308 (double)ul_curr->start, (double)ul_curr->size ));
310 ul_curr = ul_curr->next;
312 } else if ( (ul_curr->start < lock->start) &&
313 (ul_curr->start + ul_curr->size > lock->start) ) {
316 * This unlock overlaps the existing lock range at the low end.
317 * Truncate by reducing size.
319 /*********************************************
330 **********************************************/
332 ul_curr->size = lock->start - ul_curr->start;
334 DEBUG(10,("truncate low case: start=%.0f,size=%.0f\n",
335 (double)ul_curr->start, (double)ul_curr->size ));
337 ul_curr = ul_curr->next;
339 } else if ( (ul_curr->start < lock->start) &&
340 (ul_curr->start + ul_curr->size > lock->start + lock->size) ) {
342 * Worst case scenario. Unlock request completely overlaps an existing
343 * lock range. Split the request into two, push the new (upper) request
344 * into the dlink list, and continue with the entry after ul_new (as we
345 * know that ul_new will not overlap with this lock).
347 /*********************************************
348 +---------------------------+
350 +---------------------------+
355 +-------+ +---------+
357 +-------+ +---------+
358 **********************************************/
359 struct unlock_list *ul_new = (struct unlock_list *)talloc(ctx,
360 sizeof(struct unlock_list));
363 DEBUG(0,("brl_unlock_list: talloc fail.\n"));
364 return NULL; /* The talloc_destroy takes care of cleanup. */
367 ZERO_STRUCTP(ul_new);
368 ul_new->start = lock->start + lock->size;
369 ul_new->size = ul_curr->start + ul_curr->size - ul_new->start;
370 ul_new->smbpid = ul_curr->smbpid;
372 /* Add into the dlink list after the ul_curr point - NOT at ulhead. */
373 DLIST_ADD(ul_curr, ul_new);
375 /* Truncate the ul_curr. */
376 ul_curr->size = lock->start - ul_curr->start;
378 DEBUG(10,("split case: curr: start=%.0f,size=%.0f \
379 new: start=%.0f,size=%.0f\n", (double)ul_curr->start, (double)ul_curr->size,
380 (double)ul_new->start, (double)ul_new->size ));
382 ul_curr = ul_new->next;
387 * This logic case should never happen. Ensure this is the
388 * case by forcing an abort.... Remove in production.
391 smb_panic("logic flaw in cases...\n");
393 } /* end for ( ul_curr = ulhead; ul_curr;) */
394 } /* end for (i=0; i<num_locks && ul_head; i++) */
396 tdb_unlockchain(tdb, kbuf);
404 /****************************************************************************
405 Unlock a range of bytes.
406 ****************************************************************************/
408 BOOL brl_unlock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
409 uint16 smbpid, pid_t pid, uint16 tid,
410 br_off start, br_off size)
415 struct lock_struct *locks;
416 struct lock_context context;
420 kbuf.dptr = (char *)&key;
421 kbuf.dsize = sizeof(key);
425 tdb_lockchain(tdb, kbuf);
426 dbuf = tdb_fetch(tdb, kbuf);
429 DEBUG(0,("brl_unlock: tdb_fetch failed !\n"));
433 context.smbpid = smbpid;
437 /* there are existing locks - find a match */
438 locks = (struct lock_struct *)dbuf.dptr;
439 count = dbuf.dsize / sizeof(*locks);
440 for (i=0; i<count; i++) {
442 struct lock_struct *lock = &locks[i];
445 /* JRATEST - DEBUGGING INFO */
446 if(!brl_same_context(&lock->context, &context)) {
447 DEBUG(10,("brl_unlock: Not same context. l_smbpid = %u, l_pid = %u, l_tid = %u: \
448 smbpid = %u, pid = %u, tid = %u\n",
449 lock->context.smbpid, lock->context.pid, lock->context.tid,
450 context.smbpid, context.pid, context.tid ));
456 if (brl_same_context(&lock->context, &context) &&
457 lock->fnum == fnum &&
458 lock->start == start &&
459 lock->size == size) {
460 /* found it - delete it */
462 tdb_delete(tdb, kbuf);
465 memmove(&locks[i], &locks[i+1],
466 sizeof(*locks)*((count-1) - i));
468 dbuf.dsize -= sizeof(*locks);
469 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
473 tdb_unlockchain(tdb, kbuf);
478 /* we didn't find it */
481 if (dbuf.dptr) free(dbuf.dptr);
482 tdb_unlockchain(tdb, kbuf);
486 /****************************************************************************
487 Test if we could add a lock if we wanted to.
488 ****************************************************************************/
490 BOOL brl_locktest(SMB_DEV_T dev, SMB_INO_T ino,
491 uint16 smbpid, pid_t pid, uint16 tid,
492 br_off start, br_off size,
493 enum brl_type lock_type)
498 struct lock_struct lock, *locks;
502 kbuf.dptr = (char *)&key;
503 kbuf.dsize = sizeof(key);
507 tdb_lockchain(tdb, kbuf);
508 dbuf = tdb_fetch(tdb, kbuf);
510 lock.context.smbpid = smbpid;
511 lock.context.pid = pid;
512 lock.context.tid = tid;
515 lock.lock_type = lock_type;
518 /* there are existing locks - make sure they don't conflict */
519 locks = (struct lock_struct *)dbuf.dptr;
520 count = dbuf.dsize / sizeof(*locks);
521 for (i=0; i<count; i++) {
522 if (brl_conflict(&locks[i], &lock)) {
528 /* no conflicts - we could have added it */
530 tdb_unlockchain(tdb, kbuf);
534 if (dbuf.dptr) free(dbuf.dptr);
535 tdb_unlockchain(tdb, kbuf);
539 /****************************************************************************
540 Remove any locks associated with a open file.
541 ****************************************************************************/
543 void brl_close(SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum)
548 struct lock_struct *locks;
552 kbuf.dptr = (char *)&key;
553 kbuf.dsize = sizeof(key);
557 tdb_lockchain(tdb, kbuf);
558 dbuf = tdb_fetch(tdb, kbuf);
560 if (!dbuf.dptr) goto fail;
562 /* there are existing locks - remove any for this fnum */
563 locks = (struct lock_struct *)dbuf.dptr;
564 count = dbuf.dsize / sizeof(*locks);
565 for (i=0; i<count; i++) {
566 struct lock_struct *lock = &locks[i];
568 if (lock->context.tid == tid &&
569 lock->context.pid == pid &&
570 lock->fnum == fnum) {
571 /* found it - delete it */
572 if (count > 1 && i < count-1) {
573 memmove(&locks[i], &locks[i+1],
574 sizeof(*locks)*((count-1) - i));
582 tdb_delete(tdb, kbuf);
583 } else if (count < (dbuf.dsize / sizeof(*locks))) {
584 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
587 /* we didn't find it */
589 if (dbuf.dptr) free(dbuf.dptr);
590 tdb_unlockchain(tdb, kbuf);
593 /****************************************************************************
594 Return a lock list associated with an open file.
595 ****************************************************************************/
597 struct unlock_list *brl_getlocklist( TALLOC_CTX *ctx, SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum)
602 struct lock_struct *locks;
603 struct unlock_list *ulist = NULL;
607 kbuf.dptr = (char *)&key;
608 kbuf.dsize = sizeof(key);
612 tdb_lockchain(tdb, kbuf);
613 dbuf = tdb_fetch(tdb, kbuf);
616 tdb_unlockchain(tdb, kbuf);
620 /* There are existing locks - allocate an entry for each one. */
621 locks = (struct lock_struct *)dbuf.dptr;
622 count = dbuf.dsize / sizeof(*locks);
624 for (i=0; i<count; i++) {
625 struct lock_struct *lock = &locks[i];
627 if (lock->context.tid == tid &&
628 lock->context.pid == pid &&
629 lock->fnum == fnum) {
631 struct unlock_list *ul_new = (struct unlock_list *)talloc(ctx,
632 sizeof(struct unlock_list));
635 DEBUG(0,("brl_getlocklist: talloc fail.\n"));
636 return NULL; /* The talloc_destroy takes care of cleanup. */
639 ZERO_STRUCTP(ul_new);
640 ul_new->start = lock->start;
641 ul_new->size = lock->size;
642 ul_new->smbpid = lock->context.smbpid;
644 DLIST_ADD(ulist, ul_new);
650 tdb_unlockchain(tdb, kbuf);
656 /****************************************************************************
657 Traverse the whole database with this function, calling traverse_callback
659 ****************************************************************************/
661 static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
663 struct lock_struct *locks;
664 struct lock_key *key;
667 BRLOCK_FN(traverse_callback) = (BRLOCK_FN_CAST())state;
669 locks = (struct lock_struct *)dbuf.dptr;
670 key = (struct lock_key *)kbuf.dptr;
672 for (i=0;i<dbuf.dsize/sizeof(*locks);i++) {
673 traverse_callback(key->device, key->inode,
674 locks[i].context.pid,
682 /*******************************************************************
683 Call the specified function on each lock in the database.
684 ********************************************************************/
686 int brl_forall(BRLOCK_FN(fn))
689 return tdb_traverse(tdb, traverse_fn, (BRLOCK_FN_CAST())fn);