2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Andrew Tridgell 1999-2005
7 Copyright (C) Paul `Rusty' Russell 2000
8 Copyright (C) Jeremy Allison 2000-2003
10 ** NOTE! The following LGPL license applies to the tdb
11 ** library. This does NOT imply that all of Samba is released
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 3 of the License, or (at your option) any later version.
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 #include <ccan/build_assert/build_assert.h>
32 /* If we were threaded, we could wait for unlock, but we're not, so fail. */
33 enum TDB_ERROR owner_conflict(struct tdb_context *tdb, const char *call)
35 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
36 "%s: lock owned by another tdb in this process.",
40 /* If we fork, we no longer really own locks. */
41 bool check_lock_pid(struct tdb_context *tdb, const char *call, bool log)
43 /* No locks? No problem! */
44 if (tdb->file->allrecord_lock.count == 0
45 && tdb->file->num_lockrecs == 0) {
49 /* No fork? No problem! */
50 if (tdb->file->locker == getpid()) {
55 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
56 "%s: fork() detected after lock acquisition!"
57 " (%u vs %u)", call, tdb->file->locker, getpid());
62 int tdb_fcntl_lock(int fd, int rw, off_t off, off_t len, bool waitflag,
70 fl.l_whence = SEEK_SET;
75 ret = fcntl(fd, F_SETLKW, &fl);
77 ret = fcntl(fd, F_SETLK, &fl);
78 } while (ret != 0 && errno == EINTR);
82 int tdb_fcntl_unlock(int fd, int rw, off_t off, off_t len, void *unused)
89 fl.l_whence = SEEK_SET;
93 ret = fcntl(fd, F_SETLKW, &fl);
94 } while (ret != 0 && errno == EINTR);
98 static int lock(struct tdb_context *tdb,
99 int rw, off_t off, off_t len, bool waitflag)
102 if (tdb->file->allrecord_lock.count == 0
103 && tdb->file->num_lockrecs == 0) {
104 tdb->file->locker = getpid();
107 tdb->stats.lock_lowlevel++;
108 ret = tdb->lock_fn(tdb->file->fd, rw, off, len, waitflag,
111 tdb->stats.lock_nonblock++;
113 tdb->stats.lock_nonblock_fail++;
118 static int unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
120 #if 0 /* Check they matched up locks and unlocks correctly. */
125 locks = fopen("/proc/locks", "r");
127 while (fgets(line, 80, locks)) {
131 /* eg. 1: FLOCK ADVISORY WRITE 2440 08:01:2180826 0 EOF */
132 p = strchr(line, ':') + 1;
133 if (strncmp(p, " POSIX ADVISORY ", strlen(" POSIX ADVISORY ")))
135 p += strlen(" FLOCK ADVISORY ");
136 if (strncmp(p, "READ ", strlen("READ ")) == 0)
138 else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
143 if (atoi(p) != getpid())
145 p = strchr(strchr(p, ' ') + 1, ' ') + 1;
147 p = strchr(p, ' ') + 1;
148 if (strncmp(p, "EOF", 3) == 0)
151 l = atoi(p) - start + 1;
155 fprintf(stderr, "Len %u should be %u: %s",
160 fprintf(stderr, "Type %s wrong: %s",
161 rw == F_RDLCK ? "READ" : "WRITE", line);
170 fprintf(stderr, "Unlock on %u@%u not found!",
178 return tdb->unlock_fn(tdb->file->fd, rw, off, len, tdb->lock_data);
181 /* a byte range locking function - return 0 on success
182 this functions locks len bytes at the specified offset.
184 note that a len of zero means lock to end of file
186 static enum TDB_ERROR tdb_brlock(struct tdb_context *tdb,
187 int rw_type, tdb_off_t offset, tdb_off_t len,
188 enum tdb_lock_flags flags)
192 if (tdb->flags & TDB_NOLOCK) {
196 if (rw_type == F_WRLCK && (tdb->flags & TDB_RDONLY)) {
197 return tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_LOG_USE_ERROR,
198 "Write lock attempted on read-only database");
201 /* A 32 bit system cannot open a 64-bit file, but it could have
202 * expanded since then: check here. */
203 if ((size_t)(offset + len) != offset + len) {
204 return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
205 "tdb_brlock: lock on giant offset %llu",
206 (long long)(offset + len));
209 ret = lock(tdb, rw_type, offset, len, flags & TDB_LOCK_WAIT);
211 /* Generic lock error. errno set by fcntl.
212 * EAGAIN is an expected return from non-blocking
214 if (!(flags & TDB_LOCK_PROBE)
215 && (errno != EAGAIN && errno != EINTR)) {
216 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
217 "tdb_brlock failed (fd=%d) at"
218 " offset %zu rw_type=%d flags=%d len=%zu:"
220 tdb->file->fd, (size_t)offset, rw_type,
221 flags, (size_t)len, strerror(errno));
228 static enum TDB_ERROR tdb_brunlock(struct tdb_context *tdb,
229 int rw_type, tdb_off_t offset, size_t len)
231 if (tdb->flags & TDB_NOLOCK) {
235 if (!check_lock_pid(tdb, "tdb_brunlock", true))
238 if (unlock(tdb, rw_type, offset, len) == -1) {
239 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
240 "tdb_brunlock failed (fd=%d) at offset %zu"
241 " rw_type=%d len=%zu: %s",
242 tdb->file->fd, (size_t)offset, rw_type,
243 (size_t)len, strerror(errno));
249 upgrade a read lock to a write lock. This needs to be handled in a
250 special way as some OSes (such as solaris) have too conservative
251 deadlock detection and claim a deadlock when progress can be
252 made. For those OSes we may loop for a while.
254 enum TDB_ERROR tdb_allrecord_upgrade(struct tdb_context *tdb, off_t start)
258 if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true))
261 if (tdb->file->allrecord_lock.count != 1) {
262 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
263 "tdb_allrecord_upgrade failed:"
264 " count %u too high",
265 tdb->file->allrecord_lock.count);
268 if (tdb->file->allrecord_lock.off != 1) {
269 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
270 "tdb_allrecord_upgrade failed:"
271 " already upgraded?");
274 if (tdb->file->allrecord_lock.owner != tdb) {
275 return owner_conflict(tdb, "tdb_allrecord_upgrade");
280 if (tdb_brlock(tdb, F_WRLCK, start, 0,
281 TDB_LOCK_WAIT|TDB_LOCK_PROBE) == TDB_SUCCESS) {
282 tdb->file->allrecord_lock.ltype = F_WRLCK;
283 tdb->file->allrecord_lock.off = 0;
286 if (errno != EDEADLK) {
289 /* sleep for as short a time as we can - more portable than usleep() */
292 select(0, NULL, NULL, NULL, &tv);
295 if (errno != EAGAIN && errno != EINTR)
296 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
297 "tdb_allrecord_upgrade failed");
301 static struct tdb_lock *find_nestlock(struct tdb_context *tdb, tdb_off_t offset,
302 const struct tdb_context *owner)
306 for (i=0; i<tdb->file->num_lockrecs; i++) {
307 if (tdb->file->lockrecs[i].off == offset) {
308 if (owner && tdb->file->lockrecs[i].owner != owner)
310 return &tdb->file->lockrecs[i];
316 enum TDB_ERROR tdb_lock_and_recover(struct tdb_context *tdb)
318 enum TDB_ERROR ecode;
320 if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true))
323 ecode = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK,
325 if (ecode != TDB_SUCCESS) {
329 ecode = tdb_lock_open(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK);
330 if (ecode != TDB_SUCCESS) {
331 tdb_allrecord_unlock(tdb, F_WRLCK);
334 ecode = tdb_transaction_recover(tdb);
335 tdb_unlock_open(tdb, F_WRLCK);
336 tdb_allrecord_unlock(tdb, F_WRLCK);
341 /* lock an offset in the database. */
342 static enum TDB_ERROR tdb_nest_lock(struct tdb_context *tdb,
343 tdb_off_t offset, int ltype,
344 enum tdb_lock_flags flags)
346 struct tdb_lock *new_lck;
347 enum TDB_ERROR ecode;
349 if (offset > (TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
350 + tdb->file->map_size / 8)) {
351 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
352 "tdb_nest_lock: invalid offset %zu ltype=%d",
353 (size_t)offset, ltype);
356 if (tdb->flags & TDB_NOLOCK)
359 if (!check_lock_pid(tdb, "tdb_nest_lock", true)) {
365 new_lck = find_nestlock(tdb, offset, NULL);
367 if (new_lck->owner != tdb) {
368 return owner_conflict(tdb, "tdb_nest_lock");
371 if (new_lck->ltype == F_RDLCK && ltype == F_WRLCK) {
372 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
374 " offset %zu has read lock",
377 /* Just increment the struct, posix locks don't stack. */
383 if (tdb->file->num_lockrecs
384 && offset >= TDB_HASH_LOCK_START
385 && offset < TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE) {
386 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
387 "tdb_nest_lock: already have a hash lock?");
391 new_lck = (struct tdb_lock *)realloc(
393 sizeof(*tdb->file->lockrecs) * (tdb->file->num_lockrecs+1));
394 if (new_lck == NULL) {
395 return tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
397 " unable to allocate %zu lock struct",
398 tdb->file->num_lockrecs + 1);
400 tdb->file->lockrecs = new_lck;
402 /* Since fcntl locks don't nest, we do a lock for the first one,
403 and simply bump the count for future ones */
404 ecode = tdb_brlock(tdb, ltype, offset, 1, flags);
405 if (ecode != TDB_SUCCESS) {
409 /* First time we grab a lock, perhaps someone died in commit? */
410 if (!(flags & TDB_LOCK_NOCHECK)
411 && tdb->file->num_lockrecs == 0) {
412 tdb_bool_err berr = tdb_needs_recovery(tdb);
414 tdb_brunlock(tdb, ltype, offset, 1);
417 return TDB_OFF_TO_ERR(berr);
418 ecode = tdb_lock_and_recover(tdb);
419 if (ecode == TDB_SUCCESS) {
420 ecode = tdb_brlock(tdb, ltype, offset, 1,
423 if (ecode != TDB_SUCCESS) {
429 tdb->file->lockrecs[tdb->file->num_lockrecs].owner = tdb;
430 tdb->file->lockrecs[tdb->file->num_lockrecs].off = offset;
431 tdb->file->lockrecs[tdb->file->num_lockrecs].count = 1;
432 tdb->file->lockrecs[tdb->file->num_lockrecs].ltype = ltype;
433 tdb->file->num_lockrecs++;
438 static enum TDB_ERROR tdb_nest_unlock(struct tdb_context *tdb,
439 tdb_off_t off, int ltype)
441 struct tdb_lock *lck;
442 enum TDB_ERROR ecode;
444 if (tdb->flags & TDB_NOLOCK)
447 lck = find_nestlock(tdb, off, tdb);
448 if ((lck == NULL) || (lck->count == 0)) {
449 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
450 "tdb_nest_unlock: no lock for %zu",
454 if (lck->count > 1) {
460 * This lock has count==1 left, so we need to unlock it in the
461 * kernel. We don't bother with decrementing the in-memory array
462 * element, we're about to overwrite it with the last array element
465 ecode = tdb_brunlock(tdb, ltype, off, 1);
468 * Shrink the array by overwriting the element just unlocked with the
469 * last array element.
471 *lck = tdb->file->lockrecs[--tdb->file->num_lockrecs];
477 get the transaction lock
479 enum TDB_ERROR tdb_transaction_lock(struct tdb_context *tdb, int ltype)
481 return tdb_nest_lock(tdb, TDB_TRANSACTION_LOCK, ltype, TDB_LOCK_WAIT);
485 release the transaction lock
487 void tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
489 tdb_nest_unlock(tdb, TDB_TRANSACTION_LOCK, ltype);
492 /* We only need to lock individual bytes, but Linux merges consecutive locks
493 * so we lock in contiguous ranges. */
494 static enum TDB_ERROR tdb_lock_gradual(struct tdb_context *tdb,
495 int ltype, enum tdb_lock_flags flags,
496 tdb_off_t off, tdb_off_t len)
498 enum TDB_ERROR ecode;
499 enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
502 /* 0 would mean to end-of-file... */
504 /* Single hash. Just do blocking lock. */
505 return tdb_brlock(tdb, ltype, off, len, flags);
508 /* First we try non-blocking. */
509 ecode = tdb_brlock(tdb, ltype, off, len, nb_flags);
510 if (ecode != TDB_ERR_LOCK) {
514 /* Try locking first half, then second. */
515 ecode = tdb_lock_gradual(tdb, ltype, flags, off, len / 2);
516 if (ecode != TDB_SUCCESS)
519 ecode = tdb_lock_gradual(tdb, ltype, flags,
520 off + len / 2, len - len / 2);
521 if (ecode != TDB_SUCCESS) {
522 tdb_brunlock(tdb, ltype, off, len / 2);
527 /* lock/unlock entire database. It can only be upgradable if you have some
528 * other way of guaranteeing exclusivity (ie. transaction write lock). */
529 enum TDB_ERROR tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
530 enum tdb_lock_flags flags, bool upgradable)
532 enum TDB_ERROR ecode;
535 if (tdb->flags & TDB_NOLOCK)
538 if (!check_lock_pid(tdb, "tdb_allrecord_lock", true)) {
542 if (tdb->file->allrecord_lock.count) {
543 if (tdb->file->allrecord_lock.owner != tdb) {
544 return owner_conflict(tdb, "tdb_allrecord_lock");
548 || tdb->file->allrecord_lock.ltype == F_WRLCK) {
549 tdb->file->allrecord_lock.count++;
553 /* a global lock of a different type exists */
554 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
555 "tdb_allrecord_lock: already have %s lock",
556 tdb->file->allrecord_lock.ltype == F_RDLCK
560 if (tdb_has_hash_locks(tdb)) {
561 /* can't combine global and chain locks */
562 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
563 "tdb_allrecord_lock:"
564 " already have chain lock");
567 if (upgradable && ltype != F_RDLCK) {
568 /* tdb error: you can't upgrade a write lock! */
569 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
570 "tdb_allrecord_lock:"
571 " can't upgrade a write lock");
576 /* Lock hashes, gradually. */
577 ecode = tdb_lock_gradual(tdb, ltype, flags, TDB_HASH_LOCK_START,
578 TDB_HASH_LOCK_RANGE);
579 if (ecode != TDB_SUCCESS)
582 /* Lock free tables: there to end of file. */
583 ecode = tdb_brlock(tdb, ltype,
584 TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE,
586 if (ecode != TDB_SUCCESS) {
587 tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START,
588 TDB_HASH_LOCK_RANGE);
592 tdb->file->allrecord_lock.owner = tdb;
593 tdb->file->allrecord_lock.count = 1;
594 /* If it's upgradable, it's actually exclusive so we can treat
595 * it as a write lock. */
596 tdb->file->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
597 tdb->file->allrecord_lock.off = upgradable;
599 /* Now check for needing recovery. */
600 if (flags & TDB_LOCK_NOCHECK)
603 berr = tdb_needs_recovery(tdb);
604 if (likely(berr == false))
607 tdb_allrecord_unlock(tdb, ltype);
609 return TDB_OFF_TO_ERR(berr);
610 ecode = tdb_lock_and_recover(tdb);
611 if (ecode != TDB_SUCCESS) {
617 enum TDB_ERROR tdb_lock_open(struct tdb_context *tdb,
618 int ltype, enum tdb_lock_flags flags)
620 return tdb_nest_lock(tdb, TDB_OPEN_LOCK, ltype, flags);
623 void tdb_unlock_open(struct tdb_context *tdb, int ltype)
625 tdb_nest_unlock(tdb, TDB_OPEN_LOCK, ltype);
628 bool tdb_has_open_lock(struct tdb_context *tdb)
630 return !(tdb->flags & TDB_NOLOCK)
631 && find_nestlock(tdb, TDB_OPEN_LOCK, tdb) != NULL;
634 enum TDB_ERROR tdb_lock_expand(struct tdb_context *tdb, int ltype)
636 /* Lock doesn't protect data, so don't check (we recurse if we do!) */
637 return tdb_nest_lock(tdb, TDB_EXPANSION_LOCK, ltype,
638 TDB_LOCK_WAIT | TDB_LOCK_NOCHECK);
641 void tdb_unlock_expand(struct tdb_context *tdb, int ltype)
643 tdb_nest_unlock(tdb, TDB_EXPANSION_LOCK, ltype);
646 /* unlock entire db */
647 void tdb_allrecord_unlock(struct tdb_context *tdb, int ltype)
649 if (tdb->flags & TDB_NOLOCK)
652 if (tdb->file->allrecord_lock.count == 0) {
653 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
654 "tdb_allrecord_unlock: not locked!");
658 if (tdb->file->allrecord_lock.owner != tdb) {
659 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
660 "tdb_allrecord_unlock: not locked by us!");
664 /* Upgradable locks are marked as write locks. */
665 if (tdb->file->allrecord_lock.ltype != ltype
666 && (!tdb->file->allrecord_lock.off || ltype != F_RDLCK)) {
667 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
668 "tdb_allrecord_unlock: have %s lock",
669 tdb->file->allrecord_lock.ltype == F_RDLCK
674 if (tdb->file->allrecord_lock.count > 1) {
675 tdb->file->allrecord_lock.count--;
679 tdb->file->allrecord_lock.count = 0;
680 tdb->file->allrecord_lock.ltype = 0;
682 tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, 0);
685 bool tdb_has_expansion_lock(struct tdb_context *tdb)
687 return find_nestlock(tdb, TDB_EXPANSION_LOCK, tdb) != NULL;
690 bool tdb_has_hash_locks(struct tdb_context *tdb)
694 for (i=0; i<tdb->file->num_lockrecs; i++) {
695 if (tdb->file->lockrecs[i].off >= TDB_HASH_LOCK_START
696 && tdb->file->lockrecs[i].off < (TDB_HASH_LOCK_START
697 + TDB_HASH_LOCK_RANGE))
703 static bool tdb_has_free_lock(struct tdb_context *tdb)
707 if (tdb->flags & TDB_NOLOCK)
710 for (i=0; i<tdb->file->num_lockrecs; i++) {
711 if (tdb->file->lockrecs[i].off
712 > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE)
718 enum TDB_ERROR tdb_lock_hashes(struct tdb_context *tdb,
720 tdb_len_t hash_range,
721 int ltype, enum tdb_lock_flags waitflag)
723 /* FIXME: Do this properly, using hlock_range */
724 unsigned l = TDB_HASH_LOCK_START
725 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
727 /* a allrecord lock allows us to avoid per chain locks */
728 if (tdb->file->allrecord_lock.count) {
729 if (!check_lock_pid(tdb, "tdb_lock_hashes", true))
732 if (tdb->file->allrecord_lock.owner != tdb)
733 return owner_conflict(tdb, "tdb_lock_hashes");
734 if (ltype == tdb->file->allrecord_lock.ltype
735 || ltype == F_RDLCK) {
739 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
741 " already have %s allrecordlock",
742 tdb->file->allrecord_lock.ltype == F_RDLCK
746 if (tdb_has_free_lock(tdb)) {
747 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
748 "tdb_lock_hashes: already have free lock");
751 if (tdb_has_expansion_lock(tdb)) {
752 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
754 " already have expansion lock");
757 return tdb_nest_lock(tdb, l, ltype, waitflag);
760 enum TDB_ERROR tdb_unlock_hashes(struct tdb_context *tdb,
762 tdb_len_t hash_range, int ltype)
764 unsigned l = TDB_HASH_LOCK_START
765 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
767 if (tdb->flags & TDB_NOLOCK)
770 /* a allrecord lock allows us to avoid per chain locks */
771 if (tdb->file->allrecord_lock.count) {
772 if (tdb->file->allrecord_lock.ltype == F_RDLCK
773 && ltype == F_WRLCK) {
774 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
775 "tdb_unlock_hashes RO allrecord!");
777 if (tdb->file->allrecord_lock.owner != tdb) {
778 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
780 " not locked by us!");
785 return tdb_nest_unlock(tdb, l, ltype);
788 /* Hash locks use TDB_HASH_LOCK_START + the next 30 bits.
789 * Then we begin; bucket offsets are sizeof(tdb_len_t) apart, so we divide.
790 * The result is that on 32 bit systems we don't use lock values > 2^31 on
791 * files that are less than 4GB.
793 static tdb_off_t free_lock_off(tdb_off_t b_off)
795 return TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
796 + b_off / sizeof(tdb_off_t);
799 enum TDB_ERROR tdb_lock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off,
800 enum tdb_lock_flags waitflag)
802 assert(b_off >= sizeof(struct tdb_header));
804 if (tdb->flags & TDB_NOLOCK)
807 /* a allrecord lock allows us to avoid per chain locks */
808 if (tdb->file->allrecord_lock.count) {
809 if (!check_lock_pid(tdb, "tdb_lock_free_bucket", true))
812 if (tdb->file->allrecord_lock.owner != tdb) {
813 return owner_conflict(tdb, "tdb_lock_free_bucket");
816 if (tdb->file->allrecord_lock.ltype == F_WRLCK)
818 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
819 "tdb_lock_free_bucket with"
820 " read-only allrecordlock!");
824 if (tdb_has_expansion_lock(tdb)) {
825 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
826 "tdb_lock_free_bucket:"
827 " already have expansion lock");
831 return tdb_nest_lock(tdb, free_lock_off(b_off), F_WRLCK, waitflag);
834 void tdb_unlock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off)
836 if (tdb->file->allrecord_lock.count)
839 tdb_nest_unlock(tdb, free_lock_off(b_off), F_WRLCK);
842 _PUBLIC_ enum TDB_ERROR tdb_lockall(struct tdb_context *tdb)
844 return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
847 _PUBLIC_ void tdb_unlockall(struct tdb_context *tdb)
849 tdb_allrecord_unlock(tdb, F_WRLCK);
852 _PUBLIC_ enum TDB_ERROR tdb_lockall_read(struct tdb_context *tdb)
854 return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
857 _PUBLIC_ void tdb_unlockall_read(struct tdb_context *tdb)
859 tdb_allrecord_unlock(tdb, F_RDLCK);
862 void tdb_lock_cleanup(struct tdb_context *tdb)
866 /* We don't want to warn: they're allowed to close tdb after fork. */
867 if (!check_lock_pid(tdb, "tdb_close", false))
870 while (tdb->file->allrecord_lock.count
871 && tdb->file->allrecord_lock.owner == tdb) {
872 tdb_allrecord_unlock(tdb, tdb->file->allrecord_lock.ltype);
875 for (i=0; i<tdb->file->num_lockrecs; i++) {
876 if (tdb->file->lockrecs[i].owner == tdb) {
878 tdb->file->lockrecs[i].off,
879 tdb->file->lockrecs[i].ltype);