TDB2: more internal cleanups after TDB1 compatibility removal.
[kai/samba.git] / lib / tdb2 / lock.c
1  /*
2    Unix SMB/CIFS implementation.
3
4    trivial database library
5
6    Copyright (C) Andrew Tridgell              1999-2005
7    Copyright (C) Paul `Rusty' Russell              2000
8    Copyright (C) Jeremy Allison                    2000-2003
9
10      ** NOTE! The following LGPL license applies to the tdb
11      ** library. This does NOT imply that all of Samba is released
12      ** under the LGPL
13
14    This library is free software; you can redistribute it and/or
15    modify it under the terms of the GNU Lesser General Public
16    License as published by the Free Software Foundation; either
17    version 3 of the License, or (at your option) any later version.
18
19    This library is distributed in the hope that it will be useful,
20    but WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    Lesser General Public License for more details.
23
24    You should have received a copy of the GNU Lesser General Public
25    License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 */
27
28 #include "private.h"
29 #include <assert.h>
30 #include <ccan/build_assert/build_assert.h>
31
32 /* If we were threaded, we could wait for unlock, but we're not, so fail. */
33 enum TDB_ERROR owner_conflict(struct tdb_context *tdb, const char *call)
34 {
35         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
36                           "%s: lock owned by another tdb in this process.",
37                           call);
38 }
39
40 /* If we fork, we no longer really own locks. */
41 bool check_lock_pid(struct tdb_context *tdb, const char *call, bool log)
42 {
43         /* No locks?  No problem! */
44         if (tdb->file->allrecord_lock.count == 0
45             && tdb->file->num_lockrecs == 0) {
46                 return true;
47         }
48
49         /* No fork?  No problem! */
50         if (tdb->file->locker == getpid()) {
51                 return true;
52         }
53
54         if (log) {
55                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
56                            "%s: fork() detected after lock acquisition!"
57                            " (%u vs %u)", call, tdb->file->locker, getpid());
58         }
59         return false;
60 }
61
62 int tdb_fcntl_lock(int fd, int rw, off_t off, off_t len, bool waitflag,
63                    void *unused)
64 {
65         struct flock fl;
66         int ret;
67
68         do {
69                 fl.l_type = rw;
70                 fl.l_whence = SEEK_SET;
71                 fl.l_start = off;
72                 fl.l_len = len;
73
74                 if (waitflag)
75                         ret = fcntl(fd, F_SETLKW, &fl);
76                 else
77                         ret = fcntl(fd, F_SETLK, &fl);
78         } while (ret != 0 && errno == EINTR);
79         return ret;
80 }
81
82 int tdb_fcntl_unlock(int fd, int rw, off_t off, off_t len, void *unused)
83 {
84         struct flock fl;
85         int ret;
86
87         do {
88                 fl.l_type = F_UNLCK;
89                 fl.l_whence = SEEK_SET;
90                 fl.l_start = off;
91                 fl.l_len = len;
92
93                 ret = fcntl(fd, F_SETLKW, &fl);
94         } while (ret != 0 && errno == EINTR);
95         return ret;
96 }
97
98 static int lock(struct tdb_context *tdb,
99                       int rw, off_t off, off_t len, bool waitflag)
100 {
101         int ret;
102         if (tdb->file->allrecord_lock.count == 0
103             && tdb->file->num_lockrecs == 0) {
104                 tdb->file->locker = getpid();
105         }
106
107         tdb->stats.lock_lowlevel++;
108         ret = tdb->lock_fn(tdb->file->fd, rw, off, len, waitflag,
109                            tdb->lock_data);
110         if (!waitflag) {
111                 tdb->stats.lock_nonblock++;
112                 if (ret != 0)
113                         tdb->stats.lock_nonblock_fail++;
114         }
115         return ret;
116 }
117
118 static int unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
119 {
120 #if 0 /* Check they matched up locks and unlocks correctly. */
121         char line[80];
122         FILE *locks;
123         bool found = false;
124
125         locks = fopen("/proc/locks", "r");
126
127         while (fgets(line, 80, locks)) {
128                 char *p;
129                 int type, start, l;
130
131                 /* eg. 1: FLOCK  ADVISORY  WRITE 2440 08:01:2180826 0 EOF */
132                 p = strchr(line, ':') + 1;
133                 if (strncmp(p, " POSIX  ADVISORY  ", strlen(" POSIX  ADVISORY  ")))
134                         continue;
135                 p += strlen(" FLOCK  ADVISORY  ");
136                 if (strncmp(p, "READ  ", strlen("READ  ")) == 0)
137                         type = F_RDLCK;
138                 else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
139                         type = F_WRLCK;
140                 else
141                         abort();
142                 p += 6;
143                 if (atoi(p) != getpid())
144                         continue;
145                 p = strchr(strchr(p, ' ') + 1, ' ') + 1;
146                 start = atoi(p);
147                 p = strchr(p, ' ') + 1;
148                 if (strncmp(p, "EOF", 3) == 0)
149                         l = 0;
150                 else
151                         l = atoi(p) - start + 1;
152
153                 if (off == start) {
154                         if (len != l) {
155                                 fprintf(stderr, "Len %u should be %u: %s",
156                                         (int)len, l, line);
157                                 abort();
158                         }
159                         if (type != rw) {
160                                 fprintf(stderr, "Type %s wrong: %s",
161                                         rw == F_RDLCK ? "READ" : "WRITE", line);
162                                 abort();
163                         }
164                         found = true;
165                         break;
166                 }
167         }
168
169         if (!found) {
170                 fprintf(stderr, "Unlock on %u@%u not found!",
171                         (int)off, (int)len);
172                 abort();
173         }
174
175         fclose(locks);
176 #endif
177
178         return tdb->unlock_fn(tdb->file->fd, rw, off, len, tdb->lock_data);
179 }
180
181 /* a byte range locking function - return 0 on success
182    this functions locks len bytes at the specified offset.
183
184    note that a len of zero means lock to end of file
185 */
186 static enum TDB_ERROR tdb_brlock(struct tdb_context *tdb,
187                                  int rw_type, tdb_off_t offset, tdb_off_t len,
188                                  enum tdb_lock_flags flags)
189 {
190         int ret;
191
192         if (tdb->flags & TDB_NOLOCK) {
193                 return TDB_SUCCESS;
194         }
195
196         if (rw_type == F_WRLCK && (tdb->flags & TDB_RDONLY)) {
197                 return tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_LOG_USE_ERROR,
198                                   "Write lock attempted on read-only database");
199         }
200
201         /* A 32 bit system cannot open a 64-bit file, but it could have
202          * expanded since then: check here. */
203         if ((size_t)(offset + len) != offset + len) {
204                 return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
205                                   "tdb_brlock: lock on giant offset %llu",
206                                   (long long)(offset + len));
207         }
208
209         ret = lock(tdb, rw_type, offset, len, flags & TDB_LOCK_WAIT);
210         if (ret != 0) {
211                 /* Generic lock error. errno set by fcntl.
212                  * EAGAIN is an expected return from non-blocking
213                  * locks. */
214                 if (!(flags & TDB_LOCK_PROBE)
215                     && (errno != EAGAIN && errno != EINTR)) {
216                         tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
217                                    "tdb_brlock failed (fd=%d) at"
218                                    " offset %zu rw_type=%d flags=%d len=%zu:"
219                                    " %s",
220                                    tdb->file->fd, (size_t)offset, rw_type,
221                                    flags, (size_t)len, strerror(errno));
222                 }
223                 return TDB_ERR_LOCK;
224         }
225         return TDB_SUCCESS;
226 }
227
228 static enum TDB_ERROR tdb_brunlock(struct tdb_context *tdb,
229                                    int rw_type, tdb_off_t offset, size_t len)
230 {
231         if (tdb->flags & TDB_NOLOCK) {
232                 return TDB_SUCCESS;
233         }
234
235         if (!check_lock_pid(tdb, "tdb_brunlock", true))
236                 return TDB_ERR_LOCK;
237
238         if (unlock(tdb, rw_type, offset, len) == -1) {
239                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
240                                   "tdb_brunlock failed (fd=%d) at offset %zu"
241                                   " rw_type=%d len=%zu: %s",
242                                   tdb->file->fd, (size_t)offset, rw_type,
243                                   (size_t)len, strerror(errno));
244         }
245         return TDB_SUCCESS;
246 }
247
248 /*
249   upgrade a read lock to a write lock. This needs to be handled in a
250   special way as some OSes (such as solaris) have too conservative
251   deadlock detection and claim a deadlock when progress can be
252   made. For those OSes we may loop for a while.
253 */
254 enum TDB_ERROR tdb_allrecord_upgrade(struct tdb_context *tdb, off_t start)
255 {
256         int count = 1000;
257
258         if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true))
259                 return TDB_ERR_LOCK;
260
261         if (tdb->file->allrecord_lock.count != 1) {
262                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
263                                   "tdb_allrecord_upgrade failed:"
264                                   " count %u too high",
265                                   tdb->file->allrecord_lock.count);
266         }
267
268         if (tdb->file->allrecord_lock.off != 1) {
269                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
270                                   "tdb_allrecord_upgrade failed:"
271                                   " already upgraded?");
272         }
273
274         if (tdb->file->allrecord_lock.owner != tdb) {
275                 return owner_conflict(tdb, "tdb_allrecord_upgrade");
276         }
277
278         while (count--) {
279                 struct timeval tv;
280                 if (tdb_brlock(tdb, F_WRLCK, start, 0,
281                                TDB_LOCK_WAIT|TDB_LOCK_PROBE) == TDB_SUCCESS) {
282                         tdb->file->allrecord_lock.ltype = F_WRLCK;
283                         tdb->file->allrecord_lock.off = 0;
284                         return TDB_SUCCESS;
285                 }
286                 if (errno != EDEADLK) {
287                         break;
288                 }
289                 /* sleep for as short a time as we can - more portable than usleep() */
290                 tv.tv_sec = 0;
291                 tv.tv_usec = 1;
292                 select(0, NULL, NULL, NULL, &tv);
293         }
294
295         if (errno != EAGAIN && errno != EINTR)
296                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
297                            "tdb_allrecord_upgrade failed");
298         return TDB_ERR_LOCK;
299 }
300
301 static struct tdb_lock *find_nestlock(struct tdb_context *tdb, tdb_off_t offset,
302                                       const struct tdb_context *owner)
303 {
304         unsigned int i;
305
306         for (i=0; i<tdb->file->num_lockrecs; i++) {
307                 if (tdb->file->lockrecs[i].off == offset) {
308                         if (owner && tdb->file->lockrecs[i].owner != owner)
309                                 return NULL;
310                         return &tdb->file->lockrecs[i];
311                 }
312         }
313         return NULL;
314 }
315
316 enum TDB_ERROR tdb_lock_and_recover(struct tdb_context *tdb)
317 {
318         enum TDB_ERROR ecode;
319
320         if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true))
321                 return TDB_ERR_LOCK;
322
323         ecode = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK,
324                                    false);
325         if (ecode != TDB_SUCCESS) {
326                 return ecode;
327         }
328
329         ecode = tdb_lock_open(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK);
330         if (ecode != TDB_SUCCESS) {
331                 tdb_allrecord_unlock(tdb, F_WRLCK);
332                 return ecode;
333         }
334         ecode = tdb_transaction_recover(tdb);
335         tdb_unlock_open(tdb, F_WRLCK);
336         tdb_allrecord_unlock(tdb, F_WRLCK);
337
338         return ecode;
339 }
340
341 /* lock an offset in the database. */
342 static enum TDB_ERROR tdb_nest_lock(struct tdb_context *tdb,
343                                     tdb_off_t offset, int ltype,
344                                     enum tdb_lock_flags flags)
345 {
346         struct tdb_lock *new_lck;
347         enum TDB_ERROR ecode;
348
349         if (offset > (TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
350                       + tdb->file->map_size / 8)) {
351                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
352                                   "tdb_nest_lock: invalid offset %zu ltype=%d",
353                                   (size_t)offset, ltype);
354         }
355
356         if (tdb->flags & TDB_NOLOCK)
357                 return TDB_SUCCESS;
358
359         if (!check_lock_pid(tdb, "tdb_nest_lock", true)) {
360                 return TDB_ERR_LOCK;
361         }
362
363         tdb->stats.locks++;
364
365         new_lck = find_nestlock(tdb, offset, NULL);
366         if (new_lck) {
367                 if (new_lck->owner != tdb) {
368                         return owner_conflict(tdb, "tdb_nest_lock");
369                 }
370
371                 if (new_lck->ltype == F_RDLCK && ltype == F_WRLCK) {
372                         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
373                                           "tdb_nest_lock:"
374                                           " offset %zu has read lock",
375                                           (size_t)offset);
376                 }
377                 /* Just increment the struct, posix locks don't stack. */
378                 new_lck->count++;
379                 return TDB_SUCCESS;
380         }
381
382 #if 0
383         if (tdb->file->num_lockrecs
384             && offset >= TDB_HASH_LOCK_START
385             && offset < TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE) {
386                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
387                                   "tdb_nest_lock: already have a hash lock?");
388         }
389 #endif
390
391         new_lck = (struct tdb_lock *)realloc(
392                 tdb->file->lockrecs,
393                 sizeof(*tdb->file->lockrecs) * (tdb->file->num_lockrecs+1));
394         if (new_lck == NULL) {
395                 return tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
396                                   "tdb_nest_lock:"
397                                   " unable to allocate %zu lock struct",
398                                   tdb->file->num_lockrecs + 1);
399         }
400         tdb->file->lockrecs = new_lck;
401
402         /* Since fcntl locks don't nest, we do a lock for the first one,
403            and simply bump the count for future ones */
404         ecode = tdb_brlock(tdb, ltype, offset, 1, flags);
405         if (ecode != TDB_SUCCESS) {
406                 return ecode;
407         }
408
409         /* First time we grab a lock, perhaps someone died in commit? */
410         if (!(flags & TDB_LOCK_NOCHECK)
411             && tdb->file->num_lockrecs == 0) {
412                 tdb_bool_err berr = tdb_needs_recovery(tdb);
413                 if (berr != false) {
414                         tdb_brunlock(tdb, ltype, offset, 1);
415
416                         if (berr < 0)
417                                 return TDB_OFF_TO_ERR(berr);
418                         ecode = tdb_lock_and_recover(tdb);
419                         if (ecode == TDB_SUCCESS) {
420                                 ecode = tdb_brlock(tdb, ltype, offset, 1,
421                                                    flags);
422                         }
423                         if (ecode != TDB_SUCCESS) {
424                                 return ecode;
425                         }
426                 }
427         }
428
429         tdb->file->lockrecs[tdb->file->num_lockrecs].owner = tdb;
430         tdb->file->lockrecs[tdb->file->num_lockrecs].off = offset;
431         tdb->file->lockrecs[tdb->file->num_lockrecs].count = 1;
432         tdb->file->lockrecs[tdb->file->num_lockrecs].ltype = ltype;
433         tdb->file->num_lockrecs++;
434
435         return TDB_SUCCESS;
436 }
437
438 static enum TDB_ERROR tdb_nest_unlock(struct tdb_context *tdb,
439                                       tdb_off_t off, int ltype)
440 {
441         struct tdb_lock *lck;
442         enum TDB_ERROR ecode;
443
444         if (tdb->flags & TDB_NOLOCK)
445                 return TDB_SUCCESS;
446
447         lck = find_nestlock(tdb, off, tdb);
448         if ((lck == NULL) || (lck->count == 0)) {
449                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
450                                   "tdb_nest_unlock: no lock for %zu",
451                                   (size_t)off);
452         }
453
454         if (lck->count > 1) {
455                 lck->count--;
456                 return TDB_SUCCESS;
457         }
458
459         /*
460          * This lock has count==1 left, so we need to unlock it in the
461          * kernel. We don't bother with decrementing the in-memory array
462          * element, we're about to overwrite it with the last array element
463          * anyway.
464          */
465         ecode = tdb_brunlock(tdb, ltype, off, 1);
466
467         /*
468          * Shrink the array by overwriting the element just unlocked with the
469          * last array element.
470          */
471         *lck = tdb->file->lockrecs[--tdb->file->num_lockrecs];
472
473         return ecode;
474 }
475
476 /*
477   get the transaction lock
478  */
479 enum TDB_ERROR tdb_transaction_lock(struct tdb_context *tdb, int ltype)
480 {
481         return tdb_nest_lock(tdb, TDB_TRANSACTION_LOCK, ltype, TDB_LOCK_WAIT);
482 }
483
484 /*
485   release the transaction lock
486  */
487 void tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
488 {
489         tdb_nest_unlock(tdb, TDB_TRANSACTION_LOCK, ltype);
490 }
491
492 /* We only need to lock individual bytes, but Linux merges consecutive locks
493  * so we lock in contiguous ranges. */
494 static enum TDB_ERROR tdb_lock_gradual(struct tdb_context *tdb,
495                                        int ltype, enum tdb_lock_flags flags,
496                                        tdb_off_t off, tdb_off_t len)
497 {
498         enum TDB_ERROR ecode;
499         enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
500
501         if (len <= 1) {
502                 /* 0 would mean to end-of-file... */
503                 assert(len != 0);
504                 /* Single hash.  Just do blocking lock. */
505                 return tdb_brlock(tdb, ltype, off, len, flags);
506         }
507
508         /* First we try non-blocking. */
509         ecode = tdb_brlock(tdb, ltype, off, len, nb_flags);
510         if (ecode != TDB_ERR_LOCK) {
511                 return ecode;
512         }
513
514         /* Try locking first half, then second. */
515         ecode = tdb_lock_gradual(tdb, ltype, flags, off, len / 2);
516         if (ecode != TDB_SUCCESS)
517                 return ecode;
518
519         ecode = tdb_lock_gradual(tdb, ltype, flags,
520                                  off + len / 2, len - len / 2);
521         if (ecode != TDB_SUCCESS) {
522                 tdb_brunlock(tdb, ltype, off, len / 2);
523         }
524         return ecode;
525 }
526
527 /* lock/unlock entire database.  It can only be upgradable if you have some
528  * other way of guaranteeing exclusivity (ie. transaction write lock). */
529 enum TDB_ERROR tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
530                                   enum tdb_lock_flags flags, bool upgradable)
531 {
532         enum TDB_ERROR ecode;
533         tdb_bool_err berr;
534
535         if (tdb->flags & TDB_NOLOCK)
536                 return TDB_SUCCESS;
537
538         if (!check_lock_pid(tdb, "tdb_allrecord_lock", true)) {
539                 return TDB_ERR_LOCK;
540         }
541
542         if (tdb->file->allrecord_lock.count) {
543                 if (tdb->file->allrecord_lock.owner != tdb) {
544                         return owner_conflict(tdb, "tdb_allrecord_lock");
545                 }
546
547                 if (ltype == F_RDLCK
548                     || tdb->file->allrecord_lock.ltype == F_WRLCK) {
549                         tdb->file->allrecord_lock.count++;
550                         return TDB_SUCCESS;
551                 }
552
553                 /* a global lock of a different type exists */
554                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
555                                   "tdb_allrecord_lock: already have %s lock",
556                                   tdb->file->allrecord_lock.ltype == F_RDLCK
557                                   ? "read" : "write");
558         }
559
560         if (tdb_has_hash_locks(tdb)) {
561                 /* can't combine global and chain locks */
562                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
563                                   "tdb_allrecord_lock:"
564                                   " already have chain lock");
565         }
566
567         if (upgradable && ltype != F_RDLCK) {
568                 /* tdb error: you can't upgrade a write lock! */
569                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
570                                   "tdb_allrecord_lock:"
571                                   " can't upgrade a write lock");
572         }
573
574         tdb->stats.locks++;
575 again:
576         /* Lock hashes, gradually. */
577         ecode = tdb_lock_gradual(tdb, ltype, flags, TDB_HASH_LOCK_START,
578                                  TDB_HASH_LOCK_RANGE);
579         if (ecode != TDB_SUCCESS)
580                 return ecode;
581
582         /* Lock free tables: there to end of file. */
583         ecode = tdb_brlock(tdb, ltype,
584                            TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE,
585                            0, flags);
586         if (ecode != TDB_SUCCESS) {
587                 tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START,
588                              TDB_HASH_LOCK_RANGE);
589                 return ecode;
590         }
591
592         tdb->file->allrecord_lock.owner = tdb;
593         tdb->file->allrecord_lock.count = 1;
594         /* If it's upgradable, it's actually exclusive so we can treat
595          * it as a write lock. */
596         tdb->file->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
597         tdb->file->allrecord_lock.off = upgradable;
598
599         /* Now check for needing recovery. */
600         if (flags & TDB_LOCK_NOCHECK)
601                 return TDB_SUCCESS;
602
603         berr = tdb_needs_recovery(tdb);
604         if (likely(berr == false))
605                 return TDB_SUCCESS;
606
607         tdb_allrecord_unlock(tdb, ltype);
608         if (berr < 0)
609                 return TDB_OFF_TO_ERR(berr);
610         ecode = tdb_lock_and_recover(tdb);
611         if (ecode != TDB_SUCCESS) {
612                 return ecode;
613         }
614         goto again;
615 }
616
617 enum TDB_ERROR tdb_lock_open(struct tdb_context *tdb,
618                              int ltype, enum tdb_lock_flags flags)
619 {
620         return tdb_nest_lock(tdb, TDB_OPEN_LOCK, ltype, flags);
621 }
622
623 void tdb_unlock_open(struct tdb_context *tdb, int ltype)
624 {
625         tdb_nest_unlock(tdb, TDB_OPEN_LOCK, ltype);
626 }
627
628 bool tdb_has_open_lock(struct tdb_context *tdb)
629 {
630         return !(tdb->flags & TDB_NOLOCK)
631                 && find_nestlock(tdb, TDB_OPEN_LOCK, tdb) != NULL;
632 }
633
634 enum TDB_ERROR tdb_lock_expand(struct tdb_context *tdb, int ltype)
635 {
636         /* Lock doesn't protect data, so don't check (we recurse if we do!) */
637         return tdb_nest_lock(tdb, TDB_EXPANSION_LOCK, ltype,
638                              TDB_LOCK_WAIT | TDB_LOCK_NOCHECK);
639 }
640
641 void tdb_unlock_expand(struct tdb_context *tdb, int ltype)
642 {
643         tdb_nest_unlock(tdb, TDB_EXPANSION_LOCK, ltype);
644 }
645
646 /* unlock entire db */
647 void tdb_allrecord_unlock(struct tdb_context *tdb, int ltype)
648 {
649         if (tdb->flags & TDB_NOLOCK)
650                 return;
651
652         if (tdb->file->allrecord_lock.count == 0) {
653                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
654                            "tdb_allrecord_unlock: not locked!");
655                 return;
656         }
657
658         if (tdb->file->allrecord_lock.owner != tdb) {
659                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
660                            "tdb_allrecord_unlock: not locked by us!");
661                 return;
662         }
663
664         /* Upgradable locks are marked as write locks. */
665         if (tdb->file->allrecord_lock.ltype != ltype
666             && (!tdb->file->allrecord_lock.off || ltype != F_RDLCK)) {
667                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
668                            "tdb_allrecord_unlock: have %s lock",
669                            tdb->file->allrecord_lock.ltype == F_RDLCK
670                            ? "read" : "write");
671                 return;
672         }
673
674         if (tdb->file->allrecord_lock.count > 1) {
675                 tdb->file->allrecord_lock.count--;
676                 return;
677         }
678
679         tdb->file->allrecord_lock.count = 0;
680         tdb->file->allrecord_lock.ltype = 0;
681
682         tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, 0);
683 }
684
685 bool tdb_has_expansion_lock(struct tdb_context *tdb)
686 {
687         return find_nestlock(tdb, TDB_EXPANSION_LOCK, tdb) != NULL;
688 }
689
690 bool tdb_has_hash_locks(struct tdb_context *tdb)
691 {
692         unsigned int i;
693
694         for (i=0; i<tdb->file->num_lockrecs; i++) {
695                 if (tdb->file->lockrecs[i].off >= TDB_HASH_LOCK_START
696                     && tdb->file->lockrecs[i].off < (TDB_HASH_LOCK_START
697                                                      + TDB_HASH_LOCK_RANGE))
698                         return true;
699         }
700         return false;
701 }
702
703 static bool tdb_has_free_lock(struct tdb_context *tdb)
704 {
705         unsigned int i;
706
707         if (tdb->flags & TDB_NOLOCK)
708                 return false;
709
710         for (i=0; i<tdb->file->num_lockrecs; i++) {
711                 if (tdb->file->lockrecs[i].off
712                     > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE)
713                         return true;
714         }
715         return false;
716 }
717
718 enum TDB_ERROR tdb_lock_hashes(struct tdb_context *tdb,
719                                tdb_off_t hash_lock,
720                                tdb_len_t hash_range,
721                                int ltype, enum tdb_lock_flags waitflag)
722 {
723         /* FIXME: Do this properly, using hlock_range */
724         unsigned l = TDB_HASH_LOCK_START
725                 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
726
727         /* a allrecord lock allows us to avoid per chain locks */
728         if (tdb->file->allrecord_lock.count) {
729                 if (!check_lock_pid(tdb, "tdb_lock_hashes", true))
730                         return TDB_ERR_LOCK;
731
732                 if (tdb->file->allrecord_lock.owner != tdb)
733                         return owner_conflict(tdb, "tdb_lock_hashes");
734                 if (ltype == tdb->file->allrecord_lock.ltype
735                     || ltype == F_RDLCK) {
736                         return TDB_SUCCESS;
737                 }
738
739                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
740                                   "tdb_lock_hashes:"
741                                   " already have %s allrecordlock",
742                                   tdb->file->allrecord_lock.ltype == F_RDLCK
743                                   ? "read" : "write");
744         }
745
746         if (tdb_has_free_lock(tdb)) {
747                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
748                                   "tdb_lock_hashes: already have free lock");
749         }
750
751         if (tdb_has_expansion_lock(tdb)) {
752                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
753                                   "tdb_lock_hashes:"
754                                   " already have expansion lock");
755         }
756
757         return tdb_nest_lock(tdb, l, ltype, waitflag);
758 }
759
760 enum TDB_ERROR tdb_unlock_hashes(struct tdb_context *tdb,
761                                  tdb_off_t hash_lock,
762                                  tdb_len_t hash_range, int ltype)
763 {
764         unsigned l = TDB_HASH_LOCK_START
765                 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
766
767         if (tdb->flags & TDB_NOLOCK)
768                 return 0;
769
770         /* a allrecord lock allows us to avoid per chain locks */
771         if (tdb->file->allrecord_lock.count) {
772                 if (tdb->file->allrecord_lock.ltype == F_RDLCK
773                     && ltype == F_WRLCK) {
774                         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
775                                           "tdb_unlock_hashes RO allrecord!");
776                 }
777                 if (tdb->file->allrecord_lock.owner != tdb) {
778                         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
779                                           "tdb_unlock_hashes:"
780                                           " not locked by us!");
781                 }
782                 return TDB_SUCCESS;
783         }
784
785         return tdb_nest_unlock(tdb, l, ltype);
786 }
787
788 /* Hash locks use TDB_HASH_LOCK_START + the next 30 bits.
789  * Then we begin; bucket offsets are sizeof(tdb_len_t) apart, so we divide.
790  * The result is that on 32 bit systems we don't use lock values > 2^31 on
791  * files that are less than 4GB.
792  */
793 static tdb_off_t free_lock_off(tdb_off_t b_off)
794 {
795         return TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
796                 + b_off / sizeof(tdb_off_t);
797 }
798
799 enum TDB_ERROR tdb_lock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off,
800                                     enum tdb_lock_flags waitflag)
801 {
802         assert(b_off >= sizeof(struct tdb_header));
803
804         if (tdb->flags & TDB_NOLOCK)
805                 return 0;
806
807         /* a allrecord lock allows us to avoid per chain locks */
808         if (tdb->file->allrecord_lock.count) {
809                 if (!check_lock_pid(tdb, "tdb_lock_free_bucket", true))
810                         return TDB_ERR_LOCK;
811
812                 if (tdb->file->allrecord_lock.owner != tdb) {
813                         return owner_conflict(tdb, "tdb_lock_free_bucket");
814                 }
815
816                 if (tdb->file->allrecord_lock.ltype == F_WRLCK)
817                         return 0;
818                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
819                                   "tdb_lock_free_bucket with"
820                                   " read-only allrecordlock!");
821         }
822
823 #if 0 /* FIXME */
824         if (tdb_has_expansion_lock(tdb)) {
825                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
826                                   "tdb_lock_free_bucket:"
827                                   " already have expansion lock");
828         }
829 #endif
830
831         return tdb_nest_lock(tdb, free_lock_off(b_off), F_WRLCK, waitflag);
832 }
833
834 void tdb_unlock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off)
835 {
836         if (tdb->file->allrecord_lock.count)
837                 return;
838
839         tdb_nest_unlock(tdb, free_lock_off(b_off), F_WRLCK);
840 }
841
842 _PUBLIC_ enum TDB_ERROR tdb_lockall(struct tdb_context *tdb)
843 {
844         return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
845 }
846
847 _PUBLIC_ void tdb_unlockall(struct tdb_context *tdb)
848 {
849         tdb_allrecord_unlock(tdb, F_WRLCK);
850 }
851
852 _PUBLIC_ enum TDB_ERROR tdb_lockall_read(struct tdb_context *tdb)
853 {
854         return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
855 }
856
857 _PUBLIC_ void tdb_unlockall_read(struct tdb_context *tdb)
858 {
859         tdb_allrecord_unlock(tdb, F_RDLCK);
860 }
861
862 void tdb_lock_cleanup(struct tdb_context *tdb)
863 {
864         unsigned int i;
865
866         /* We don't want to warn: they're allowed to close tdb after fork. */
867         if (!check_lock_pid(tdb, "tdb_close", false))
868                 return;
869
870         while (tdb->file->allrecord_lock.count
871                && tdb->file->allrecord_lock.owner == tdb) {
872                 tdb_allrecord_unlock(tdb, tdb->file->allrecord_lock.ltype);
873         }
874
875         for (i=0; i<tdb->file->num_lockrecs; i++) {
876                 if (tdb->file->lockrecs[i].owner == tdb) {
877                         tdb_nest_unlock(tdb,
878                                         tdb->file->lockrecs[i].off,
879                                         tdb->file->lockrecs[i].ltype);
880                         i--;
881                 }
882         }
883 }