61144eb59a9c801fafe516e0354694a598359a04
[tprouty/samba.git] / source / locking / brlock.c
1 /* 
2    Unix SMB/CIFS implementation.
3    byte range locking code
4    Updated to handle range splits/merges.
5
6    Copyright (C) Andrew Tridgell 1992-2000
7    Copyright (C) Jeremy Allison 1992-2000
8    
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 2 of the License, or
12    (at your option) any later version.
13    
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18    
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24 /* This module implements a tdb based byte range locking service,
25    replacing the fcntl() based byte range locking previously
26    used. This allows us to provide the same semantics as NT */
27
28 #include "includes.h"
29
30 #define ZERO_ZERO 0
31
32 /* This contains elements that differentiate locks. The smbpid is a
33    client supplied pid, and is essentially the locking context for
34    this client */
35
36 struct lock_context {
37         uint16 smbpid;
38         uint16 tid;
39         pid_t pid;
40 };
41
42 /* The data in brlock records is an unsorted linear array of these
43    records.  It is unnecessary to store the count as tdb provides the
44    size of the record */
45
46 struct lock_struct {
47         struct lock_context context;
48         br_off start;
49         br_off size;
50         int fnum;
51         enum brl_type lock_type;
52 };
53
54 /* The key used in the brlock database. */
55
56 struct lock_key {
57         SMB_DEV_T device;
58         SMB_INO_T inode;
59 };
60
61 /* The open brlock.tdb database. */
62
63 static TDB_CONTEXT *tdb;
64
65 /****************************************************************************
66  Create a locking key - ensuring zero filled for pad purposes.
67 ****************************************************************************/
68
69 static TDB_DATA locking_key(SMB_DEV_T dev, SMB_INO_T inode)
70 {
71         static struct lock_key key;
72         TDB_DATA kbuf;
73
74         memset(&key, '\0', sizeof(key));
75         key.device = dev;
76         key.inode = inode;
77         kbuf.dptr = (char *)&key;
78         kbuf.dsize = sizeof(key);
79         return kbuf;
80 }
81
82 /****************************************************************************
83  See if two locking contexts are equal.
84 ****************************************************************************/
85
86 static BOOL brl_same_context(struct lock_context *ctx1, 
87                              struct lock_context *ctx2)
88 {
89         return (ctx1->pid == ctx2->pid) &&
90                 (ctx1->smbpid == ctx2->smbpid) &&
91                 (ctx1->tid == ctx2->tid);
92 }
93
94 /****************************************************************************
95  See if lck1 and lck2 overlap.
96 ****************************************************************************/
97
98 static BOOL brl_overlap(struct lock_struct *lck1,
99                         struct lock_struct *lck2)
100 {
101         /* this extra check is not redundent - it copes with locks
102            that go beyond the end of 64 bit file space */
103         if (lck1->size != 0 &&
104             lck1->start == lck2->start &&
105             lck1->size == lck2->size) {
106                 return True;
107         }
108
109         if (lck1->start >= (lck2->start+lck2->size) ||
110             lck2->start >= (lck1->start+lck1->size)) {
111                 return False;
112         }
113         return True;
114 }
115
116 /****************************************************************************
117  See if lock2 can be added when lock1 is in place.
118 ****************************************************************************/
119
120 static BOOL brl_conflict(struct lock_struct *lck1, 
121                          struct lock_struct *lck2)
122 {
123         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
124                 return False;
125
126         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
127                 return False;
128         }
129
130         if (brl_same_context(&lck1->context, &lck2->context) &&
131             lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
132                 return False;
133         }
134
135         if (lck1->start >= (lck2->start + lck2->size) ||
136             lck2->start >= (lck1->start + lck1->size)) {
137                 return False;
138         }
139             
140         return brl_overlap(lck1, lck2);
141
142
143 #if ZERO_ZERO
144 static BOOL brl_conflict1(struct lock_struct *lck1, 
145                          struct lock_struct *lck2)
146 {
147         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
148                 return False;
149
150         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
151                 return False;
152         }
153
154         if (brl_same_context(&lck1->context, &lck2->context) &&
155             lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
156                 return False;
157         }
158
159         if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
160                 return True;
161         }
162
163         if (lck1->start >= (lck2->start + lck2->size) ||
164             lck2->start >= (lck1->start + lck1->size)) {
165                 return False;
166         }
167             
168         return True;
169
170 #endif
171
172 /****************************************************************************
173  Check to see if this lock conflicts, but ignore our own locks on the
174  same fnum only.
175 ****************************************************************************/
176
177 static BOOL brl_conflict_other(struct lock_struct *lck1, struct lock_struct *lck2)
178 {
179         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
180                 return False;
181
182         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) 
183                 return False;
184
185         /*
186          * Incoming WRITE locks conflict with existing READ locks even
187          * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
188          */
189
190         if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
191                 if (brl_same_context(&lck1->context, &lck2->context) &&
192                                         lck1->fnum == lck2->fnum)
193                         return False;
194         }
195
196         if (lck1->start >= (lck2->start + lck2->size) ||
197             lck2->start >= (lck1->start + lck1->size))
198                 return False;
199             
200         return brl_overlap(lck1, lck2);
201
202
203 /****************************************************************************
204  Amazingly enough, w2k3 "remembers" whether the last lock failure
205  is the same as this one and changes its error code. I wonder if any
206  app depends on this ?
207 ****************************************************************************/
208
209 static NTSTATUS brl_lock_failed(struct lock_struct *lock)
210 {
211         static struct lock_struct last_lock_failure;
212
213         if (brl_same_context(&lock->context, &last_lock_failure.context) &&
214                         lock->fnum == last_lock_failure.fnum &&
215                         lock->start == last_lock_failure.start &&
216                         lock->size == last_lock_failure.size) {
217                 return NT_STATUS_FILE_LOCK_CONFLICT;
218         }
219         last_lock_failure = *lock;
220         if (lock->start >= 0xEF000000 &&
221                         (lock->start >> 63) == 0) {
222                 /* amazing the little things you learn with a test
223                    suite. Locks beyond this offset (as a 64 bit
224                    number!) always generate the conflict error code,
225                    unless the top bit is set */
226                 return NT_STATUS_FILE_LOCK_CONFLICT;
227         }
228         return NT_STATUS_LOCK_NOT_GRANTED;
229 }
230
231 #if DONT_DO_THIS
232         /* doing this traversal could kill solaris machines under high load (tridge) */
233         /* delete any dead locks */
234
235 /****************************************************************************
236  Delete a record if it is for a dead process, if check_self is true, then
237  delete any records belonging to this pid also (there shouldn't be any).
238 ****************************************************************************/
239
240 static int delete_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
241 {
242         struct lock_struct *locks;
243         int count, i;
244         BOOL check_self = *(BOOL *)state;
245         pid_t mypid = sys_getpid();
246
247         tdb_chainlock(tdb, kbuf);
248
249         locks = (struct lock_struct *)dbuf.dptr;
250
251         count = dbuf.dsize / sizeof(*locks);
252         for (i=0; i<count; i++) {
253                 struct lock_struct *lock = &locks[i];
254
255                 /* If check_self is true we want to remove our own records. */
256                 if (check_self && (mypid == lock->context.pid)) {
257
258                         DEBUG(0,("brlock : delete_fn. LOGIC ERROR ! Shutting down and a record for my pid (%u) exists !\n",
259                                         (unsigned int)lock->context.pid ));
260
261                 } else if (process_exists(lock->context.pid)) {
262
263                         DEBUG(10,("brlock : delete_fn. pid %u exists.\n", (unsigned int)lock->context.pid ));
264                         continue;
265                 }
266
267                 DEBUG(10,("brlock : delete_fn. Deleting record for process %u\n",
268                                 (unsigned int)lock->context.pid ));
269
270                 if (count > 1 && i < count-1) {
271                         memmove(&locks[i], &locks[i+1], 
272                                 sizeof(*locks)*((count-1) - i));
273                 }
274                 count--;
275                 i--;
276         }
277
278         if (count == 0) {
279                 tdb_delete(tdb, kbuf);
280         } else if (count < (dbuf.dsize / sizeof(*locks))) {
281                 dbuf.dsize = count * sizeof(*locks);
282                 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
283         }
284
285         tdb_chainunlock(tdb, kbuf);
286         return 0;
287 }
288 #endif
289
290 /****************************************************************************
291  Open up the brlock.tdb database.
292 ****************************************************************************/
293
294 void brl_init(int read_only)
295 {
296         if (tdb)
297                 return;
298         tdb = tdb_open_log(lock_path("brlock.tdb"), 0,  TDB_DEFAULT|(read_only?0x0:TDB_CLEAR_IF_FIRST),
299                        read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
300         if (!tdb) {
301                 DEBUG(0,("Failed to open byte range locking database\n"));
302                 return;
303         }
304
305 #if DONT_DO_THIS
306         /* doing this traversal could kill solaris machines under high load (tridge) */
307         /* delete any dead locks */
308         if (!read_only) {
309                 BOOL check_self = False;
310                 tdb_traverse(tdb, delete_fn, &check_self);
311         }
312 #endif
313 }
314
315 /****************************************************************************
316  Close down the brlock.tdb database.
317 ****************************************************************************/
318
319 void brl_shutdown(int read_only)
320 {
321         if (!tdb)
322                 return;
323
324 #if DONT_DO_THIS
325         /* doing this traversal could kill solaris machines under high load (tridge) */
326         /* delete any dead locks */
327         if (!read_only) {
328                 BOOL check_self = True;
329                 tdb_traverse(tdb, delete_fn, &check_self);
330         }
331 #endif
332
333         tdb_close(tdb);
334 }
335
336 #if ZERO_ZERO
337 /****************************************************************************
338 compare two locks for sorting
339 ****************************************************************************/
340 static int lock_compare(struct lock_struct *lck1, 
341                          struct lock_struct *lck2)
342 {
343         if (lck1->start != lck2->start) return (lck1->start - lck2->start);
344         if (lck2->size != lck1->size) {
345                 return ((int)lck1->size - (int)lck2->size);
346         }
347         return 0;
348 }
349 #endif
350
351 /****************************************************************************
352  Lock a range of bytes.
353 ****************************************************************************/
354
355 NTSTATUS brl_lock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
356                   uint16 smbpid, pid_t pid, uint16 tid,
357                   br_off start, br_off size, 
358                   enum brl_type lock_type, BOOL *my_lock_ctx)
359 {
360         TDB_DATA kbuf, dbuf;
361         int count, i;
362         struct lock_struct lock, *locks;
363         char *tp;
364         NTSTATUS status = NT_STATUS_OK;
365
366         *my_lock_ctx = False;
367         kbuf = locking_key(dev,ino);
368
369         dbuf.dptr = NULL;
370
371 #if !ZERO_ZERO
372         if (start == 0 && size == 0) {
373                 DEBUG(0,("client sent 0/0 lock - please report this\n"));
374         }
375 #endif
376
377         tdb_chainlock(tdb, kbuf);
378         dbuf = tdb_fetch(tdb, kbuf);
379
380         lock.context.smbpid = smbpid;
381         lock.context.pid = pid;
382         lock.context.tid = tid;
383         lock.start = start;
384         lock.size = size;
385         lock.fnum = fnum;
386         lock.lock_type = lock_type;
387
388         if (dbuf.dptr) {
389                 /* there are existing locks - make sure they don't conflict */
390                 locks = (struct lock_struct *)dbuf.dptr;
391                 count = dbuf.dsize / sizeof(*locks);
392                 for (i=0; i<count; i++) {
393                         if (brl_conflict(&locks[i], &lock)) {
394                                 status = brl_lock_failed(&lock);;
395                                 /* Did we block ourselves ? */
396                                 if (brl_same_context(&locks[i].context, &lock.context))
397                                         *my_lock_ctx = True;
398                                 goto fail;
399                         }
400 #if ZERO_ZERO
401                         if (lock.start == 0 && lock.size == 0 && 
402                             locks[i].size == 0) {
403                                 break;
404                         }
405 #endif
406                 }
407         }
408
409         /* no conflicts - add it to the list of locks */
410         tp = Realloc(dbuf.dptr, dbuf.dsize + sizeof(*locks));
411         if (!tp) {
412                 status = NT_STATUS_NO_MEMORY;
413                 goto fail;
414         } else {
415                 dbuf.dptr = tp;
416         }
417         memcpy(dbuf.dptr + dbuf.dsize, &lock, sizeof(lock));
418         dbuf.dsize += sizeof(lock);
419
420 #if ZERO_ZERO
421         /* sort the lock list */
422         qsort(dbuf.dptr, dbuf.dsize/sizeof(lock), sizeof(lock), lock_compare);
423 #endif
424
425         if (tdb_store(tdb, kbuf, dbuf, TDB_REPLACE) != 0) {
426                 status = NT_STATUS_INTERNAL_DB_CORRUPTION;
427                 goto fail;
428         }
429
430         SAFE_FREE(dbuf.dptr);
431         tdb_chainunlock(tdb, kbuf);
432         return NT_STATUS_OK;
433
434  fail:
435
436         SAFE_FREE(dbuf.dptr);
437         tdb_chainunlock(tdb, kbuf);
438         return status;
439 }
440
441 /****************************************************************************
442  Check if an unlock overlaps a pending lock.
443 ****************************************************************************/
444
445 static BOOL brl_pending_overlap(struct lock_struct *lock, struct lock_struct *pend_lock)
446 {
447         if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
448                 return True;
449         if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
450                 return True;
451         return False;
452 }
453
454 /****************************************************************************
455  Unlock a range of bytes.
456 ****************************************************************************/
457
458 BOOL brl_unlock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
459                 uint16 smbpid, pid_t pid, uint16 tid,
460                 br_off start, br_off size,
461                 BOOL remove_pending_locks_only,
462                 void (*pre_unlock_fn)(void *),
463                 void *pre_unlock_data)
464 {
465         TDB_DATA kbuf, dbuf;
466         int count, i, j;
467         struct lock_struct *locks;
468         struct lock_context context;
469
470         kbuf = locking_key(dev,ino);
471
472         dbuf.dptr = NULL;
473
474         tdb_chainlock(tdb, kbuf);
475         dbuf = tdb_fetch(tdb, kbuf);
476
477         if (!dbuf.dptr) {
478                 DEBUG(10,("brl_unlock: tdb_fetch failed !\n"));
479                 goto fail;
480         }
481
482         context.smbpid = smbpid;
483         context.pid = pid;
484         context.tid = tid;
485
486         /* there are existing locks - find a match */
487         locks = (struct lock_struct *)dbuf.dptr;
488         count = dbuf.dsize / sizeof(*locks);
489
490 #if ZERO_ZERO
491         for (i=0; i<count; i++) {
492                 struct lock_struct *lock = &locks[i];
493
494                 if (lock->lock_type == WRITE_LOCK &&
495                     brl_same_context(&lock->context, &context) &&
496                     lock->fnum == fnum &&
497                     lock->start == start &&
498                     lock->size == size) {
499
500                         if (pre_unlock_fn)
501                                 (*pre_unlock_fn)(pre_unlock_data);
502
503                         /* found it - delete it */
504                         if (count == 1) {
505                                 tdb_delete(tdb, kbuf);
506                         } else {
507                                 if (i < count-1) {
508                                         memmove(&locks[i], &locks[i+1], 
509                                                 sizeof(*locks)*((count-1) - i));
510                                 }
511                                 dbuf.dsize -= sizeof(*locks);
512                                 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
513                         }
514
515                         SAFE_FREE(dbuf.dptr);
516                         tdb_chainunlock(tdb, kbuf);
517                         return True;
518                 }
519         }
520 #endif
521
522         locks = (struct lock_struct *)dbuf.dptr;
523         count = dbuf.dsize / sizeof(*locks);
524         for (i=0; i<count; i++) {
525                 struct lock_struct *lock = &locks[i];
526
527                 if (brl_same_context(&lock->context, &context) &&
528                                 lock->fnum == fnum &&
529                                 lock->start == start &&
530                                 lock->size == size) {
531
532                         if (remove_pending_locks_only && lock->lock_type != PENDING_LOCK)
533                                 continue;
534
535                         if (lock->lock_type != PENDING_LOCK) {
536
537                                 /* Do any POSIX unlocks needed. */
538                                 if (pre_unlock_fn)
539                                         (*pre_unlock_fn)(pre_unlock_data);
540
541                                 /* Send unlock messages to any pending waiters that overlap. */
542                                 for (j=0; j<count; j++) {
543                                         struct lock_struct *pend_lock = &locks[j];
544
545                                         /* Ignore non-pending locks. */
546                                         if (pend_lock->lock_type != PENDING_LOCK)
547                                                 continue;
548
549                                         /* We could send specific lock info here... */
550                                         if (brl_pending_overlap(lock, pend_lock)) {
551                                                 DEBUG(10,("brl_unlock: sending unlock message to pid %u\n",
552                                                                         (unsigned int)pend_lock->context.pid ));
553
554                                                 message_send_pid(pend_lock->context.pid,
555                                                                 MSG_SMB_UNLOCK,
556                                                                 NULL, 0, True);
557                                         }
558                                 }
559                         }
560
561                         /* found it - delete it */
562                         if (count == 1) {
563                                 tdb_delete(tdb, kbuf);
564                         } else {
565                                 if (i < count-1) {
566                                         memmove(&locks[i], &locks[i+1], 
567                                                 sizeof(*locks)*((count-1) - i));
568                                 }
569                                 dbuf.dsize -= sizeof(*locks);
570                                 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
571                         }
572
573                         SAFE_FREE(dbuf.dptr);
574                         tdb_chainunlock(tdb, kbuf);
575                         return True;
576                 }
577         }
578
579         /* we didn't find it */
580
581  fail:
582         SAFE_FREE(dbuf.dptr);
583         tdb_chainunlock(tdb, kbuf);
584         return False;
585 }
586
587
588 /****************************************************************************
589  Test if we could add a lock if we wanted to.
590 ****************************************************************************/
591
592 BOOL brl_locktest(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
593                   uint16 smbpid, pid_t pid, uint16 tid,
594                   br_off start, br_off size, 
595                   enum brl_type lock_type, int check_self)
596 {
597         TDB_DATA kbuf, dbuf;
598         int count, i;
599         struct lock_struct lock, *locks;
600
601         kbuf = locking_key(dev,ino);
602
603         dbuf.dptr = NULL;
604
605         dbuf = tdb_fetch(tdb, kbuf);
606
607         lock.context.smbpid = smbpid;
608         lock.context.pid = pid;
609         lock.context.tid = tid;
610         lock.start = start;
611         lock.size = size;
612         lock.fnum = fnum;
613         lock.lock_type = lock_type;
614
615         if (dbuf.dptr) {
616                 /* there are existing locks - make sure they don't conflict */
617                 locks = (struct lock_struct *)dbuf.dptr;
618                 count = dbuf.dsize / sizeof(*locks);
619                 for (i=0; i<count; i++) {
620                         if (check_self) {
621                                 if (brl_conflict(&locks[i], &lock))
622                                         goto fail;
623                         } else {
624                                 /*
625                                  * Our own locks don't conflict.
626                                  */
627                                 if (brl_conflict_other(&locks[i], &lock))
628                                         goto fail;
629                         }
630                 }
631         }
632
633         /* no conflicts - we could have added it */
634         SAFE_FREE(dbuf.dptr);
635         return True;
636
637  fail:
638         SAFE_FREE(dbuf.dptr);
639         return False;
640 }
641
642 /****************************************************************************
643  Remove any locks associated with a open file.
644 ****************************************************************************/
645
646 void brl_close(SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum)
647 {
648         TDB_DATA kbuf, dbuf;
649         int count, i, j, dcount=0;
650         struct lock_struct *locks;
651
652         kbuf = locking_key(dev,ino);
653
654         dbuf.dptr = NULL;
655
656         tdb_chainlock(tdb, kbuf);
657         dbuf = tdb_fetch(tdb, kbuf);
658
659         if (!dbuf.dptr) goto fail;
660
661         /* there are existing locks - remove any for this fnum */
662         locks = (struct lock_struct *)dbuf.dptr;
663         count = dbuf.dsize / sizeof(*locks);
664
665         for (i=0; i<count; i++) {
666                 struct lock_struct *lock = &locks[i];
667
668                 if (lock->context.tid == tid &&
669                     lock->context.pid == pid &&
670                     lock->fnum == fnum) {
671
672                         /* Send unlock messages to any pending waiters that overlap. */
673                         for (j=0; j<count; j++) {
674                                 struct lock_struct *pend_lock = &locks[j];
675
676                                 /* Ignore our own or non-pending locks. */
677                                 if (pend_lock->lock_type != PENDING_LOCK)
678                                         continue;
679
680                                 if (pend_lock->context.tid == tid &&
681                                     pend_lock->context.pid == pid &&
682                                     pend_lock->fnum == fnum)
683                                         continue;
684
685                                 /* We could send specific lock info here... */
686                                 if (brl_pending_overlap(lock, pend_lock))
687                                         message_send_pid(pend_lock->context.pid,
688                                                         MSG_SMB_UNLOCK,
689                                                         NULL, 0, True);
690                         }
691
692                         /* found it - delete it */
693                         if (count > 1 && i < count-1) {
694                                 memmove(&locks[i], &locks[i+1], 
695                                         sizeof(*locks)*((count-1) - i));
696                         }
697                         count--;
698                         i--;
699                         dcount++;
700                 }
701         }
702
703         if (count == 0) {
704                 tdb_delete(tdb, kbuf);
705         } else if (count < (dbuf.dsize / sizeof(*locks))) {
706                 dbuf.dsize -= dcount * sizeof(*locks);
707                 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
708         }
709
710         /* we didn't find it */
711  fail:
712         SAFE_FREE(dbuf.dptr);
713         tdb_chainunlock(tdb, kbuf);
714 }
715
716 /****************************************************************************
717  Traverse the whole database with this function, calling traverse_callback
718  on each lock.
719 ****************************************************************************/
720
721 static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
722 {
723         struct lock_struct *locks;
724         struct lock_key *key;
725         int i;
726
727         BRLOCK_FN(traverse_callback) = (BRLOCK_FN_CAST())state;
728
729         locks = (struct lock_struct *)dbuf.dptr;
730         key = (struct lock_key *)kbuf.dptr;
731
732         for (i=0;i<dbuf.dsize/sizeof(*locks);i++) {
733                 traverse_callback(key->device, key->inode,
734                                   locks[i].context.pid,
735                                   locks[i].lock_type,
736                                   locks[i].start,
737                                   locks[i].size);
738         }
739         return 0;
740 }
741
742 /*******************************************************************
743  Call the specified function on each lock in the database.
744 ********************************************************************/
745
746 int brl_forall(BRLOCK_FN(fn))
747 {
748         if (!tdb) return 0;
749         return tdb_traverse(tdb, traverse_fn, (void *)fn);
750 }