r15018: Merge Volker's ipc/trans2/nttrans changes over
[sfrench/samba-autobuild/.git] / source / locking / brlock.c
1 /* 
2    Unix SMB/CIFS implementation.
3    byte range locking code
4    Updated to handle range splits/merges.
5
6    Copyright (C) Andrew Tridgell 1992-2000
7    Copyright (C) Jeremy Allison 1992-2000
8    
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 2 of the License, or
12    (at your option) any later version.
13    
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18    
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24 /* This module implements a tdb based byte range locking service,
25    replacing the fcntl() based byte range locking previously
26    used. This allows us to provide the same semantics as NT */
27
28 #include "includes.h"
29
30 #undef DBGC_CLASS
31 #define DBGC_CLASS DBGC_LOCKING
32
33 #define ZERO_ZERO 0
34
35 /* This contains elements that differentiate locks. The smbpid is a
36    client supplied pid, and is essentially the locking context for
37    this client */
38
39 struct lock_context {
40         uint16 smbpid;
41         uint16 tid;
42         struct process_id pid;
43 };
44
45 /* The data in brlock records is an unsorted linear array of these
46    records.  It is unnecessary to store the count as tdb provides the
47    size of the record */
48
49 struct lock_struct {
50         struct lock_context context;
51         br_off start;
52         br_off size;
53         int fnum;
54         enum brl_type lock_type;
55         enum brl_flavour lock_flav;
56 };
57
58 /* The key used in the brlock database. */
59
60 struct lock_key {
61         SMB_DEV_T device;
62         SMB_INO_T inode;
63 };
64
65 /* The open brlock.tdb database. */
66
67 static TDB_CONTEXT *tdb;
68
69 /****************************************************************************
70  Debug info at level 10 for lock struct.
71 ****************************************************************************/
72
73 static void print_lock_struct(unsigned int i, struct lock_struct *pls)
74 {
75         DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %u, ",
76                         i,
77                         (unsigned int)pls->context.smbpid,
78                         (unsigned int)pls->context.tid,
79                         (unsigned int)procid_to_pid(&pls->context.pid) ));
80         
81         DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
82                 (double)pls->start,
83                 (double)pls->size,
84                 pls->fnum,
85                 lock_type_name(pls->lock_type),
86                 lock_flav_name(pls->lock_flav) ));
87 }
88
89 /****************************************************************************
90  Create a locking key - ensuring zero filled for pad purposes.
91 ****************************************************************************/
92
93 static TDB_DATA locking_key(SMB_DEV_T dev, SMB_INO_T inode)
94 {
95         static struct lock_key key;
96         TDB_DATA kbuf;
97
98         memset(&key, '\0', sizeof(key));
99         key.device = dev;
100         key.inode = inode;
101         kbuf.dptr = (char *)&key;
102         kbuf.dsize = sizeof(key);
103         return kbuf;
104 }
105
106 /****************************************************************************
107  See if two locking contexts are equal.
108 ****************************************************************************/
109
110 static BOOL brl_same_context(const struct lock_context *ctx1, 
111                              const struct lock_context *ctx2)
112 {
113         return (procid_equal(&ctx1->pid, &ctx2->pid) &&
114                 (ctx1->smbpid == ctx2->smbpid) &&
115                 (ctx1->tid == ctx2->tid));
116 }
117
118 /****************************************************************************
119  See if lck1 and lck2 overlap.
120 ****************************************************************************/
121
122 static BOOL brl_overlap(const struct lock_struct *lck1,
123                         const struct lock_struct *lck2)
124 {
125         /* this extra check is not redundent - it copes with locks
126            that go beyond the end of 64 bit file space */
127         if (lck1->size != 0 &&
128             lck1->start == lck2->start &&
129             lck1->size == lck2->size) {
130                 return True;
131         }
132
133         if (lck1->start >= (lck2->start+lck2->size) ||
134             lck2->start >= (lck1->start+lck1->size)) {
135                 return False;
136         }
137         return True;
138 }
139
140 /****************************************************************************
141  See if lock2 can be added when lock1 is in place.
142 ****************************************************************************/
143
144 static BOOL brl_conflict(const struct lock_struct *lck1, 
145                          const struct lock_struct *lck2)
146 {
147         /* Ignore PENDING locks. */
148         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
149                 return False;
150
151         /* Read locks never conflict. */
152         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
153                 return False;
154         }
155
156         if (brl_same_context(&lck1->context, &lck2->context) &&
157             lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
158                 return False;
159         }
160
161         return brl_overlap(lck1, lck2);
162
163
164 /****************************************************************************
165  See if lock2 can be added when lock1 is in place - when both locks are POSIX
166  flavour. POSIX locks ignore fnum - they only care about dev/ino which we
167  know already match.
168 ****************************************************************************/
169
170 static BOOL brl_conflict_posix(const struct lock_struct *lck1, 
171                                 const struct lock_struct *lck2)
172 {
173 #if defined(DEVELOPER)
174         SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
175         SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
176 #endif
177
178         /* Ignore PENDING locks. */
179         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
180                 return False;
181
182         /* Read locks never conflict. */
183         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
184                 return False;
185         }
186
187         /* Locks on the same context con't conflict. Ignore fnum. */
188         if (brl_same_context(&lck1->context, &lck2->context)) {
189                 return False;
190         }
191
192         /* One is read, the other write, or the context is different,
193            do they overlap ? */
194         return brl_overlap(lck1, lck2);
195
196
197 #if ZERO_ZERO
198 static BOOL brl_conflict1(const struct lock_struct *lck1, 
199                          const struct lock_struct *lck2)
200 {
201         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
202                 return False;
203
204         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
205                 return False;
206         }
207
208         if (brl_same_context(&lck1->context, &lck2->context) &&
209             lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
210                 return False;
211         }
212
213         if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
214                 return True;
215         }
216
217         if (lck1->start >= (lck2->start + lck2->size) ||
218             lck2->start >= (lck1->start + lck1->size)) {
219                 return False;
220         }
221             
222         return True;
223
224 #endif
225
226 /****************************************************************************
227  Check to see if this lock conflicts, but ignore our own locks on the
228  same fnum only. This is the read/write lock check code path.
229  This is never used in the POSIX lock case.
230 ****************************************************************************/
231
232 static BOOL brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
233 {
234         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
235                 return False;
236
237         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) 
238                 return False;
239
240         /* POSIX flavour locks never conflict here - this is only called
241            in the read/write path. */
242
243         if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
244                 return False;
245
246         /*
247          * Incoming WRITE locks conflict with existing READ locks even
248          * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
249          */
250
251         if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
252                 if (brl_same_context(&lck1->context, &lck2->context) &&
253                                         lck1->fnum == lck2->fnum)
254                         return False;
255         }
256
257         return brl_overlap(lck1, lck2);
258
259
260 /****************************************************************************
261  Amazingly enough, w2k3 "remembers" whether the last lock failure
262  is the same as this one and changes its error code. I wonder if any
263  app depends on this ?
264 ****************************************************************************/
265
266 static NTSTATUS brl_lock_failed(const struct lock_struct *lock)
267 {
268         static struct lock_struct last_lock_failure;
269
270         if (brl_same_context(&lock->context, &last_lock_failure.context) &&
271                         lock->fnum == last_lock_failure.fnum &&
272                         lock->start == last_lock_failure.start &&
273                         lock->size == last_lock_failure.size) {
274                 return NT_STATUS_FILE_LOCK_CONFLICT;
275         }
276         last_lock_failure = *lock;
277         if (lock->start >= 0xEF000000 &&
278                         (lock->start >> 63) == 0) {
279                 /* amazing the little things you learn with a test
280                    suite. Locks beyond this offset (as a 64 bit
281                    number!) always generate the conflict error code,
282                    unless the top bit is set */
283                 return NT_STATUS_FILE_LOCK_CONFLICT;
284         }
285         return NT_STATUS_LOCK_NOT_GRANTED;
286 }
287
288 /****************************************************************************
289  Open up the brlock.tdb database.
290 ****************************************************************************/
291
292 void brl_init(int read_only)
293 {
294         if (tdb) {
295                 return;
296         }
297         tdb = tdb_open_log(lock_path("brlock.tdb"),
298                         lp_open_files_db_hash_size(),
299                         TDB_DEFAULT|(read_only?0x0:TDB_CLEAR_IF_FIRST),
300                         read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
301         if (!tdb) {
302                 DEBUG(0,("Failed to open byte range locking database %s\n",
303                         lock_path("brlock.tdb")));
304                 return;
305         }
306 }
307
308 /****************************************************************************
309  Close down the brlock.tdb database.
310 ****************************************************************************/
311
312 void brl_shutdown(int read_only)
313 {
314         if (!tdb) {
315                 return;
316         }
317         tdb_close(tdb);
318 }
319
320 #if ZERO_ZERO
321 /****************************************************************************
322  Compare two locks for sorting.
323 ****************************************************************************/
324
325 static int lock_compare(const struct lock_struct *lck1, 
326                          const struct lock_struct *lck2)
327 {
328         if (lck1->start != lck2->start) {
329                 return (lck1->start - lck2->start);
330         }
331         if (lck2->size != lck1->size) {
332                 return ((int)lck1->size - (int)lck2->size);
333         }
334         return 0;
335 }
336 #endif
337
338 /****************************************************************************
339  Lock a range of bytes - Windows lock semantics.
340 ****************************************************************************/
341
342 static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck,
343                         const struct lock_struct *plock,
344                         BOOL *my_lock_ctx)
345 {
346         unsigned int i;
347         files_struct *fsp = br_lck->fsp;
348         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
349
350         for (i=0; i < br_lck->num_locks; i++) {
351                 /* Do any Windows or POSIX locks conflict ? */
352                 if (brl_conflict(&locks[i], plock)) {
353                         NTSTATUS status = brl_lock_failed(plock);;
354                         /* Did we block ourselves ? */
355                         if (brl_same_context(&locks[i].context, &plock->context)) {
356                                 *my_lock_ctx = True;
357                         }
358                         return status;
359                 }
360 #if ZERO_ZERO
361                 if (plock->start == 0 && plock->size == 0 && 
362                                 locks[i].size == 0) {
363                         break;
364                 }
365 #endif
366         }
367
368         /* We can get the Windows lock, now see if it needs to
369            be mapped into a lower level POSIX one, and if so can
370            we get it ? We tell the lower lock layer about the
371            lock type so it can cope with the difference between
372            Windows "stacking" locks and POSIX "flat" ones. */
373
374         if ((plock->lock_type != PENDING_LOCK) && lp_posix_locking(SNUM(fsp->conn))) {
375                 if (!set_posix_lock(fsp, plock->start, plock->size, plock->lock_type, WINDOWS_LOCK)) {
376                         if (errno == EACCES || errno == EAGAIN) {
377                                 return NT_STATUS_FILE_LOCK_CONFLICT;
378                         } else {
379                                 return map_nt_error_from_unix(errno);
380                         }
381                 }
382         }
383
384         /* no conflicts - add it to the list of locks */
385         locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
386         if (!locks) {
387                 return NT_STATUS_NO_MEMORY;
388         }
389
390         memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
391         br_lck->num_locks += 1;
392         br_lck->lock_data = (void *)locks;
393         br_lck->modified = True;
394
395         return NT_STATUS_OK;
396 }
397
398 /****************************************************************************
399  Cope with POSIX range splits and merges.
400 ****************************************************************************/
401
402 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr,
403                                                 const struct lock_struct *ex,
404                                                 const struct lock_struct *plock,
405                                                 BOOL *lock_was_added)
406 {
407         BOOL lock_types_differ = (ex->lock_type != plock->lock_type);
408
409         /* We can't merge non-conflicting locks on different context - ignore fnum. */
410
411         if (!brl_same_context(&ex->context, &plock->context)) {
412                 /* Just copy. */
413                 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
414                 return 1;
415         }
416
417         /* We now know we have the same context. */
418
419         /* Did we overlap ? */
420
421 /*********************************************
422                                              +---------+
423                                              | ex      |
424                                              +---------+
425                                 +-------+
426                                 | plock |
427                                 +-------+
428 OR....
429              +---------+
430              |  ex     |
431              +---------+
432 **********************************************/
433
434         if ( (ex->start >= (plock->start + plock->size)) ||
435                         (plock->start >= (ex->start + ex->size))) {
436                 /* No overlap with this lock - copy existing. */
437                 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
438                 return 1;
439         }
440
441 /*********************************************
442                 +---------+
443                 |  ex     |
444                 +---------+
445         +---------------------------+
446         |       plock               | -> replace with plock.
447         +---------------------------+
448 **********************************************/
449
450         if ( (ex->start >= plock->start) &&
451                         (ex->start + ex->size <= plock->start + plock->size) ) {
452                 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
453                 *lock_was_added = True;
454                 return 1;
455         }
456
457 /*********************************************
458                 +---------------+
459                 |  ex           |
460                 +---------------+
461         +---------------+
462         |   plock       |
463         +---------------+
464 BECOMES....
465         +---------------+-------+
466         |   plock       | ex    | - different lock types.
467         +---------------+-------+
468 OR....
469         +-----------------------+
470         |   ex                  | - same lock type.
471         +-----------------------+
472 **********************************************/
473
474         if ( (ex->start >= plock->start) &&
475                                 (ex->start < plock->start + plock->size) &&
476                                 (ex->start + ex->size > plock->start + plock->size) ) {
477
478                 *lock_was_added = True;
479
480                 /* If the lock types are the same, we merge, if different, we
481                    add the new lock before the old. */
482
483                 if (lock_types_differ) {
484                         /* Add new. */
485                         memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
486                         memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
487                         /* Adjust existing start and size. */
488                         lck_arr[1].start = plock->start + plock->size;
489                         lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
490                         return 2;
491                 } else {
492                         /* Merge. */
493                         memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
494                         /* Set new start and size. */
495                         lck_arr[0].start = plock->start;
496                         lck_arr[0].size = (ex->start + ex->size) - plock->start;
497                         return 1;
498                 }
499         }
500
501 /*********************************************
502    +---------------+
503    |  ex           |
504    +---------------+
505            +---------------+
506            |   plock       |
507            +---------------+
508 BECOMES....
509    +-------+---------------+
510    | ex    |   plock       | - different lock types
511    +-------+---------------+
512
513 OR
514    +-----------------------+
515    | ex                    | - same lock type.
516    +-----------------------+
517
518 **********************************************/
519
520         if ( (ex->start < plock->start) &&
521                         (ex->start + ex->size > plock->start) &&
522                         (ex->start + ex->size <= plock->start + plock->size) ) {
523
524                 *lock_was_added = True;
525
526                 /* If the lock types are the same, we merge, if different, we
527                    add the new lock after the old. */
528
529                 if (lock_types_differ) {
530                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
531                         memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
532                         /* Adjust existing size. */
533                         lck_arr[0].size = plock->start - ex->start;
534                         return 2;
535                 } else {
536                         /* Merge. */
537                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
538                         /* Adjust existing size. */
539                         lck_arr[0].size = (plock->start + plock->size) - ex->start;
540                         return 1;
541                 }
542         }
543
544 /*********************************************
545         +---------------------------+
546         |        ex                 |
547         +---------------------------+
548                 +---------+
549                 |  plock  |
550                 +---------+
551 BECOMES.....
552         +-------+---------+---------+
553         | ex    |  plock  | ex      | - different lock types.
554         +-------+---------+---------+
555 OR
556         +---------------------------+
557         |        ex                 | - same lock type.
558         +---------------------------+
559 **********************************************/
560
561         if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
562                 *lock_was_added = True;
563
564                 if (lock_types_differ) {
565
566                         /* We have to split ex into two locks here. */
567
568                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
569                         memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
570                         memcpy(&lck_arr[2], ex, sizeof(struct lock_struct));
571
572                         /* Adjust first existing size. */
573                         lck_arr[0].size = plock->start - ex->start;
574
575                         /* Adjust second existing start and size. */
576                         lck_arr[2].start = plock->start + plock->size;
577                         lck_arr[2].size = (ex->start + ex->size) - (plock->start + plock->size);
578                         return 3;
579                 } else {
580                         /* Just eat plock. */
581                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
582                         return 1;
583                 }
584         }
585
586         /* Never get here. */
587         smb_panic("brlock_posix_split_merge\n");
588         /* Notreached. */
589         abort();
590 }
591
592 /****************************************************************************
593  Lock a range of bytes - POSIX lock semantics.
594  We must cope with range splits and merges.
595 ****************************************************************************/
596
597 static NTSTATUS brl_lock_posix(struct byte_range_lock *br_lck,
598                         const struct lock_struct *plock,
599                         BOOL *my_lock_ctx)
600 {
601         unsigned int i, count;
602         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
603         struct lock_struct *tp;
604         files_struct *fsp = br_lck->fsp;
605         BOOL lock_was_added = False;
606
607         /* No zero-zero locks for POSIX. */
608         if (plock->start == 0 && plock->size == 0) {
609                 return NT_STATUS_INVALID_PARAMETER;
610         }
611
612         /* Don't allow 64-bit lock wrap. */
613         if (plock->start + plock->size < plock->start ||
614                         plock->start + plock->size < plock->size) {
615                 return NT_STATUS_INVALID_PARAMETER;
616         }
617
618         /* The worst case scenario here is we have to split an
619            existing POSIX lock range into two, and add our lock,
620            so we need at most 2 more entries. */
621
622         tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
623         if (!tp) {
624                 return NT_STATUS_NO_MEMORY;
625         }
626         
627         count = 0;
628         for (i=0; i < br_lck->num_locks; i++) {
629                 if (locks[i].lock_flav == WINDOWS_LOCK) {
630                         /* Do any Windows flavour locks conflict ? */
631                         if (brl_conflict(&locks[i], plock)) {
632                                 /* Did we block ourselves ? */
633                                 if (brl_same_context(&locks[i].context, &plock->context)) {
634                                         *my_lock_ctx = True;
635                                 }
636                                 /* No games with error messages. */
637                                 SAFE_FREE(tp);
638                                 return NT_STATUS_FILE_LOCK_CONFLICT;
639                         }
640                         /* Just copy the Windows lock into the new array. */
641                         memcpy(&tp[count], &locks[i], sizeof(struct lock_struct));
642                         count++;
643                 } else {
644                         /* POSIX conflict semantics are different. */
645                         if (brl_conflict_posix(&locks[i], plock)) {
646                                 /* Can't block ourselves with POSIX locks. */
647                                 /* No games with error messages. */
648                                 SAFE_FREE(tp);
649                                 return NT_STATUS_FILE_LOCK_CONFLICT;
650                         }
651
652                         /* Work out overlaps. */
653                         count += brlock_posix_split_merge(&tp[count], &locks[i], plock, &lock_was_added);
654                 }
655         }
656
657         /* We can get the POSIX lock, now see if it needs to
658            be mapped into a lower level POSIX one, and if so can
659            we get it ? We well the lower lock layer about the
660            lock type so it can cope with the difference between
661            Windows "stacking" locks and POSIX "flat" ones. */
662
663 #if 0
664         /* FIXME - this call doesn't work correctly yet for POSIX locks... */
665
666         if ((plock->lock_type != PENDING_LOCK) && lp_posix_locking(SNUM(fsp->conn))) {
667
668
669                 if (!set_posix_lock(fsp, plock->start, plock->size, plock->lock_type, POSIX_LOCK)) {
670                         if (errno == EACCES || errno == EAGAIN) {
671                                 SAFE_FREE(tp);
672                                 return NT_STATUS_FILE_LOCK_CONFLICT;
673                         } else {
674                                 SAFE_FREE(tp);
675                                 return map_nt_error_from_unix(errno);
676                         }
677                 }
678         }
679 #endif
680
681         if (!lock_was_added) {
682                 memcpy(&tp[count], plock, sizeof(struct lock_struct));
683                 count++;
684         }
685
686         /* Realloc so we don't leak entries per lock call. */
687         tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
688         if (!tp) {
689                 return NT_STATUS_NO_MEMORY;
690         }
691         br_lck->num_locks = count;
692         br_lck->lock_data = (void *)tp;
693         br_lck->modified = True;
694         return NT_STATUS_OK;
695 }
696
697 /****************************************************************************
698  Lock a range of bytes.
699 ****************************************************************************/
700
701 NTSTATUS brl_lock(struct byte_range_lock *br_lck,
702                 uint16 smbpid,
703                 struct process_id pid,
704                 br_off start,
705                 br_off size, 
706                 enum brl_type lock_type,
707                 enum brl_flavour lock_flav,
708                 BOOL *my_lock_ctx)
709 {
710         NTSTATUS ret;
711         struct lock_struct lock;
712
713         *my_lock_ctx = False;
714
715 #if !ZERO_ZERO
716         if (start == 0 && size == 0) {
717                 DEBUG(0,("client sent 0/0 lock - please report this\n"));
718         }
719 #endif
720
721         lock.context.smbpid = smbpid;
722         lock.context.pid = pid;
723         lock.context.tid = br_lck->fsp->conn->cnum;
724         lock.start = start;
725         lock.size = size;
726         lock.fnum = br_lck->fsp->fnum;
727         lock.lock_type = lock_type;
728         lock.lock_flav = lock_flav;
729
730         if (lock_flav == WINDOWS_LOCK) {
731                 ret = brl_lock_windows(br_lck, &lock, my_lock_ctx);
732         } else {
733                 ret = brl_lock_posix(br_lck, &lock, my_lock_ctx);
734         }
735
736 #if ZERO_ZERO
737         /* sort the lock list */
738         qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);
739 #endif
740
741         return ret;
742 }
743
744 /****************************************************************************
745  Check if an unlock overlaps a pending lock.
746 ****************************************************************************/
747
748 static BOOL brl_pending_overlap(struct lock_struct *lock, struct lock_struct *pend_lock)
749 {
750         if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
751                 return True;
752         if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
753                 return True;
754         return False;
755 }
756
757 /****************************************************************************
758  Unlock a range of bytes - Windows semantics.
759 ****************************************************************************/
760
761 static BOOL brl_unlock_windows(struct byte_range_lock *br_lck, const struct lock_struct *plock)
762 {
763         unsigned int i, j;
764         struct lock_struct *lock = NULL;
765         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
766
767 #if ZERO_ZERO
768         for (i = 0; i < br_lck->num_locks; i++) {
769                 lock = &locks[i];
770
771                 if (lock->lock_type == WRITE_LOCK &&
772                     brl_same_context(&lock->context, &plock->context) &&
773                     lock->fnum == plock->fnum &&
774                     lock->lock_flav == WINDOWS_LOCK &&
775                     lock->start == plock->start &&
776                     lock->size == plock->size) {
777
778                         /* found it - delete it */
779                         if (i < br_lck->num_locks - 1) {
780                                 memmove(&locks[i], &locks[i+1], 
781                                         sizeof(*locks)*((br_lck->num_locks-1) - i));
782                         }
783
784                         br_lck->num_locks -= 1;
785                         br_lck->modified = True;
786                         return True;
787                 }
788         }
789 #endif
790
791         for (i = 0; i < br_lck->num_locks; i++) {
792                 lock = &locks[i];
793
794                 /* Only remove our own locks that match in start, size, and flavour. */
795                 if (brl_same_context(&lock->context, &plock->context) &&
796                                         lock->fnum == plock->fnum &&
797                                         lock->lock_flav == WINDOWS_LOCK &&
798                                         lock->start == plock->start &&
799                                         lock->size == plock->size ) {
800                         break;
801                 }
802         }
803
804         if (i == br_lck->num_locks) {
805                 /* we didn't find it */
806                 return False;
807         }
808
809         /* Unlock any POSIX regions. */
810         if(lp_posix_locking(br_lck->fsp->conn->cnum)) {
811                 release_posix_lock(br_lck->fsp, plock->start, plock->size);
812         }
813
814         /* Send unlock messages to any pending waiters that overlap. */
815         for (j=0; j < br_lck->num_locks; j++) {
816                 struct lock_struct *pend_lock = &locks[j];
817
818                 /* Ignore non-pending locks. */
819                 if (pend_lock->lock_type != PENDING_LOCK) {
820                         continue;
821                 }
822
823                 /* We could send specific lock info here... */
824                 if (brl_pending_overlap(lock, pend_lock)) {
825                         DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
826                                 procid_str_static(&pend_lock->context.pid )));
827
828                         become_root();
829                         message_send_pid(pend_lock->context.pid,
830                                         MSG_SMB_UNLOCK,
831                                         NULL, 0, True);
832                         unbecome_root();
833                 }
834         }
835
836         /* Actually delete the lock. */
837         if (i < br_lck->num_locks - 1) {
838                 memmove(&locks[i], &locks[i+1], 
839                         sizeof(*locks)*((br_lck->num_locks-1) - i));
840         }
841
842         br_lck->num_locks -= 1;
843         br_lck->modified = True;
844         return True;
845 }
846
847 /****************************************************************************
848  Unlock a range of bytes - POSIX semantics.
849 ****************************************************************************/
850
851 static BOOL brl_unlock_posix(struct byte_range_lock *br_lck, const struct lock_struct *plock)
852 {
853         unsigned int i, j, count;
854         struct lock_struct *lock = NULL;
855         struct lock_struct *tp;
856         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
857         BOOL overlap_found = False;
858
859         /* No zero-zero locks for POSIX. */
860         if (plock->start == 0 && plock->size == 0) {
861                 return False;
862         }
863
864         /* Don't allow 64-bit lock wrap. */
865         if (plock->start + plock->size < plock->start ||
866                         plock->start + plock->size < plock->size) {
867                 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
868                 return False;
869         }
870
871         /* The worst case scenario here is we have to split an
872            existing POSIX lock range into two, so we need at most
873            1 more entry. */
874
875         tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
876         if (!tp) {
877                 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
878                 return False;
879         }
880
881         count = 0;
882         for (i = 0; i < br_lck->num_locks; i++) {
883                 struct lock_struct tmp_lock[3];
884                 BOOL lock_was_added = False;
885                 unsigned int tmp_count;
886
887                 lock = &locks[i];
888
889                 /* Only remove our own locks - ignore fnum. */
890                 if (lock->lock_type == PENDING_LOCK ||
891                                 !brl_same_context(&lock->context, &plock->context)) {
892                         memcpy(&tp[count], lock, sizeof(struct lock_struct));
893                         count++;
894                         continue;
895                 }
896
897                 /* Work out overlaps. */
898                 tmp_count = brlock_posix_split_merge(&tmp_lock[0], &locks[i], plock, &lock_was_added);
899
900                 if (tmp_count == 1) {
901                         /* Ether the locks didn't overlap, or the unlock completely
902                            overlapped this lock. If it didn't overlap, then there's
903                            no change in the locks. */
904                         if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
905                                 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
906                                 /* No change in this lock. */
907                                 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
908                                 count++;
909                         } else {
910                                 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
911                                 overlap_found = True;
912                         }
913                         continue;
914                 } else if (tmp_count == 2) {
915                         /* The unlock overlapped an existing lock. Copy the truncated
916                            lock into the lock array. */
917                         if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
918                                 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
919                                 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
920                                 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
921                         } else {
922                                 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
923                                 SMB_ASSERT(tmp_lock[1].lock_type == locks[i].lock_type);
924                                 memcpy(&tp[count], &tmp_lock[1], sizeof(struct lock_struct));
925                         }
926                         count++;
927                         overlap_found = True;
928                         continue;
929                 } else {
930                         /* tmp_count == 3 - (we split a lock range in two). */
931                         SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
932                         SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
933                         SMB_ASSERT(tmp_lock[2].lock_type != locks[i].lock_type);
934
935                         memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
936                         count++;
937                         memcpy(&tp[count], &tmp_lock[2], sizeof(struct lock_struct));
938                         count++;
939                         overlap_found = True;
940                         /* Optimisation... */
941                         /* We know we're finished here as we can't overlap any
942                            more POSIX locks. Copy the rest of the lock array. */
943                         if (i < br_lck->num_locks - 1) {
944                                 memcpy(&tp[count], &locks[i+1], 
945                                         sizeof(*locks)*((br_lck->num_locks-1) - i));
946                                 count += ((br_lck->num_locks-1) - i);
947                         }
948                         break;
949                 }
950         }
951
952         if (!overlap_found) {
953                 /* Just ignore - no change. */
954                 SAFE_FREE(tp);
955                 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
956                 return True;
957         }
958
959 #if 0
960         /* FIXME - this call doesn't work correctly yet for POSIX locks... */
961
962         /* Unlock any POSIX regions. */
963         if(lp_posix_locking(br_lck->fsp->conn->cnum)) {
964                 release_posix_lock(br_lck->fsp, plock->start, plock->size);
965         }
966 #endif
967
968         /* Realloc so we don't leak entries per unlock call. */
969         if (count) {
970                 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
971                 if (!tp) {
972                         DEBUG(10,("brl_unlock_posix: realloc fail\n"));
973                         return False;
974                 }
975         } else {
976                 /* We deleted the last lock. */
977                 SAFE_FREE(tp);
978                 tp = NULL;
979         }
980
981         br_lck->num_locks = count;
982         br_lck->lock_data = (void *)tp;
983         br_lck->modified = True;
984
985         /* Send unlock messages to any pending waiters that overlap. */
986         locks = tp;
987
988         for (j=0; j < br_lck->num_locks; j++) {
989                 struct lock_struct *pend_lock = &locks[j];
990
991                 /* Ignore non-pending locks. */
992                 if (pend_lock->lock_type != PENDING_LOCK) {
993                         continue;
994                 }
995
996                 /* We could send specific lock info here... */
997                 if (brl_pending_overlap(lock, pend_lock)) {
998                         DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
999                                 procid_str_static(&pend_lock->context.pid )));
1000
1001                         become_root();
1002                         message_send_pid(pend_lock->context.pid,
1003                                         MSG_SMB_UNLOCK,
1004                                         NULL, 0, True);
1005                         unbecome_root();
1006                 }
1007         }
1008
1009         return True;
1010 }
1011
1012 /****************************************************************************
1013  Unlock a range of bytes.
1014 ****************************************************************************/
1015
1016 BOOL brl_unlock(struct byte_range_lock *br_lck,
1017                 uint16 smbpid,
1018                 struct process_id pid,
1019                 br_off start,
1020                 br_off size,
1021                 enum brl_flavour lock_flav)
1022 {
1023         struct lock_struct lock;
1024
1025         lock.context.smbpid = smbpid;
1026         lock.context.pid = pid;
1027         lock.context.tid = br_lck->fsp->conn->cnum;
1028         lock.start = start;
1029         lock.size = size;
1030         lock.fnum = br_lck->fsp->fnum;
1031         lock.lock_type = UNLOCK_LOCK;
1032         lock.lock_flav = lock_flav;
1033
1034         if (lock_flav == WINDOWS_LOCK) {
1035                 return brl_unlock_windows(br_lck, &lock);
1036         } else {
1037                 return brl_unlock_posix(br_lck, &lock);
1038         }
1039 }
1040
1041 /****************************************************************************
1042  Test if we could add a lock if we wanted to.
1043  Returns True if the region required is currently unlocked, False if locked.
1044 ****************************************************************************/
1045
1046 BOOL brl_locktest(struct byte_range_lock *br_lck,
1047                 uint16 smbpid,
1048                 struct process_id pid,
1049                 br_off start,
1050                 br_off size, 
1051                 enum brl_type lock_type,
1052                 enum brl_flavour lock_flav)
1053 {
1054         BOOL ret = True;
1055         unsigned int i;
1056         struct lock_struct lock;
1057         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1058         files_struct *fsp = br_lck->fsp;
1059
1060         lock.context.smbpid = smbpid;
1061         lock.context.pid = pid;
1062         lock.context.tid = br_lck->fsp->conn->cnum;
1063         lock.start = start;
1064         lock.size = size;
1065         lock.fnum = fsp->fnum;
1066         lock.lock_type = lock_type;
1067         lock.lock_flav = lock_flav;
1068
1069         /* Make sure existing locks don't conflict */
1070         for (i=0; i < br_lck->num_locks; i++) {
1071                 /*
1072                  * Our own locks don't conflict.
1073                  */
1074                 if (brl_conflict_other(&locks[i], &lock)) {
1075                         return False;
1076                 }
1077         }
1078
1079         /*
1080          * There is no lock held by an SMB daemon, check to
1081          * see if there is a POSIX lock from a UNIX or NFS process.
1082          * This only conflicts with Windows locks, not POSIX locks.
1083          */
1084
1085         if(lp_posix_locking(fsp->conn->cnum) && (lock_flav == WINDOWS_LOCK)) {
1086                 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1087
1088                 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1089                         (double)start, (double)size, ret ? "locked" : "unlocked",
1090                         fsp->fnum, fsp->fsp_name ));
1091
1092                 /* We need to return the inverse of is_posix_locked. */
1093                 ret = !ret;
1094         }
1095
1096         /* no conflicts - we could have added it */
1097         return ret;
1098 }
1099
1100 /****************************************************************************
1101  Query for existing locks.
1102 ****************************************************************************/
1103
1104 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1105                 uint16 *psmbpid,
1106                 struct process_id pid,
1107                 br_off *pstart,
1108                 br_off *psize, 
1109                 enum brl_type *plock_type,
1110                 enum brl_flavour lock_flav)
1111 {
1112         unsigned int i;
1113         struct lock_struct lock;
1114         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1115         files_struct *fsp = br_lck->fsp;
1116
1117         lock.context.smbpid = *psmbpid;
1118         lock.context.pid = pid;
1119         lock.context.tid = br_lck->fsp->conn->cnum;
1120         lock.start = *pstart;
1121         lock.size = *psize;
1122         lock.fnum = fsp->fnum;
1123         lock.lock_type = *plock_type;
1124         lock.lock_flav = lock_flav;
1125
1126         /* Make sure existing locks don't conflict */
1127         for (i=0; i < br_lck->num_locks; i++) {
1128                 struct lock_struct *exlock = &locks[i];
1129                 BOOL conflict = False;
1130
1131                 if (exlock->lock_flav == WINDOWS_LOCK) {
1132                         conflict = brl_conflict(exlock, &lock);
1133                 } else {        
1134                         conflict = brl_conflict_posix(exlock, &lock);
1135                 }
1136
1137                 if (conflict) {
1138                         *psmbpid = exlock->context.smbpid;
1139                         *pstart = exlock->start;
1140                         *psize = exlock->size;
1141                         *plock_type = exlock->lock_type;
1142                         return NT_STATUS_LOCK_NOT_GRANTED;
1143                 }
1144         }
1145
1146         /*
1147          * There is no lock held by an SMB daemon, check to
1148          * see if there is a POSIX lock from a UNIX or NFS process.
1149          */
1150
1151         if(lp_posix_locking(fsp->conn->cnum)) {
1152                 BOOL ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1153
1154                 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1155                         (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1156                         fsp->fnum, fsp->fsp_name ));
1157
1158                 if (ret) {
1159                         /* Hmmm. No clue what to set smbpid to - use -1. */
1160                         *psmbpid = 0xFFFF;
1161                         return NT_STATUS_LOCK_NOT_GRANTED;
1162                 }
1163         }
1164
1165         return NT_STATUS_OK;
1166 }
1167
1168
1169 /****************************************************************************
1170  Remove a particular pending lock.
1171 ****************************************************************************/
1172
1173 BOOL brl_remove_pending_lock(struct byte_range_lock *br_lck,
1174                 uint16 smbpid,
1175                 struct process_id pid,
1176                 br_off start,
1177                 br_off size,
1178                 enum brl_flavour lock_flav)
1179 {
1180         unsigned int i;
1181         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1182         struct lock_context context;
1183
1184         context.smbpid = smbpid;
1185         context.pid = pid;
1186         context.tid = br_lck->fsp->conn->cnum;
1187
1188         for (i = 0; i < br_lck->num_locks; i++) {
1189                 struct lock_struct *lock = &locks[i];
1190
1191                 /* For pending locks we *always* care about the fnum. */
1192                 if (brl_same_context(&lock->context, &context) &&
1193                                 lock->fnum == br_lck->fsp->fnum &&
1194                                 lock->lock_type == PENDING_LOCK &&
1195                                 lock->lock_flav == lock_flav &&
1196                                 lock->start == start &&
1197                                 lock->size == size) {
1198                         break;
1199                 }
1200         }
1201
1202         if (i == br_lck->num_locks) {
1203                 /* Didn't find it. */
1204                 return False;
1205         }
1206
1207         if (i < br_lck->num_locks - 1) {
1208                 /* Found this particular pending lock - delete it */
1209                 memmove(&locks[i], &locks[i+1], 
1210                         sizeof(*locks)*((br_lck->num_locks-1) - i));
1211         }
1212
1213         br_lck->num_locks -= 1;
1214         br_lck->modified = True;
1215         return True;
1216 }
1217
1218
1219 /****************************************************************************
1220  Remove any locks associated with a open file.
1221 ****************************************************************************/
1222
1223 void brl_close_fnum(struct byte_range_lock *br_lck, struct process_id pid)
1224 {
1225         files_struct *fsp = br_lck->fsp;
1226         uint16 tid = fsp->conn->cnum;
1227         int fnum = fsp->fnum;
1228         unsigned int i, j, dcount=0;
1229         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1230
1231         /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1232
1233         for (i=0; i < br_lck->num_locks; i++) {
1234                 struct lock_struct *lock = &locks[i];
1235                 BOOL del_this_lock = False;
1236
1237                 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
1238                         if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
1239                                 del_this_lock = True;
1240                         } else if (lock->lock_flav == POSIX_LOCK) {
1241                                 del_this_lock = True;
1242                         }
1243                 }
1244
1245                 if (del_this_lock) {
1246                         /* Send unlock messages to any pending waiters that overlap. */
1247                         for (j=0; j < br_lck->num_locks; j++) {
1248                                 struct lock_struct *pend_lock = &locks[j];
1249
1250                                 /* Ignore our own or non-pending locks. */
1251                                 if (pend_lock->lock_type != PENDING_LOCK) {
1252                                         continue;
1253                                 }
1254
1255                                 /* Optimisation - don't send to this fnum as we're
1256                                    closing it. */
1257                                 if (pend_lock->context.tid == tid &&
1258                                     procid_equal(&pend_lock->context.pid, &pid) &&
1259                                     pend_lock->fnum == fnum) {
1260                                         continue;
1261                                 }
1262
1263                                 /* We could send specific lock info here... */
1264                                 if (brl_pending_overlap(lock, pend_lock)) {
1265                                         become_root();
1266                                         message_send_pid(pend_lock->context.pid,
1267                                                         MSG_SMB_UNLOCK,
1268                                                         NULL, 0, True);
1269                                         unbecome_root();
1270                                 }
1271                         }
1272
1273                         /* found it - delete it */
1274                         if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
1275                                 memmove(&locks[i], &locks[i+1], 
1276                                         sizeof(*locks)*((br_lck->num_locks-1) - i));
1277                         }
1278                         br_lck->num_locks--;
1279                         br_lck->modified = True;
1280                         i--;
1281                         dcount++;
1282                 }
1283         }
1284 }
1285
1286 /****************************************************************************
1287  Traverse the whole database with this function, calling traverse_callback
1288  on each lock.
1289 ****************************************************************************/
1290
1291 static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
1292 {
1293         struct lock_struct *locks;
1294         struct lock_key *key;
1295         int i;
1296
1297         BRLOCK_FN(traverse_callback) = (BRLOCK_FN_CAST())state;
1298
1299         locks = (struct lock_struct *)dbuf.dptr;
1300         key = (struct lock_key *)kbuf.dptr;
1301
1302         for (i=0;i<dbuf.dsize/sizeof(*locks);i++) {
1303                 traverse_callback(key->device,
1304                                   key->inode,
1305                                   locks[i].context.pid,
1306                                   locks[i].lock_type,
1307                                   locks[i].lock_flav,
1308                                   locks[i].start,
1309                                   locks[i].size);
1310         }
1311         return 0;
1312 }
1313
1314 /*******************************************************************
1315  Call the specified function on each lock in the database.
1316 ********************************************************************/
1317
1318 int brl_forall(BRLOCK_FN(fn))
1319 {
1320         if (!tdb) {
1321                 return 0;
1322         }
1323         return tdb_traverse(tdb, traverse_fn, (void *)fn);
1324 }
1325
1326 /*******************************************************************
1327  Store a potentially modified set of byte range lock data back into
1328  the database.
1329  Unlock the record.
1330 ********************************************************************/
1331
1332 static int byte_range_lock_destructor(void *p)
1333 {
1334         struct byte_range_lock *br_lck =
1335                 talloc_get_type_abort(p, struct byte_range_lock);
1336         TDB_DATA key = locking_key(br_lck->fsp->dev, br_lck->fsp->inode);
1337
1338         if (!br_lck->modified) {
1339                 goto done;
1340         }
1341
1342         if (br_lck->num_locks == 0) {
1343                 /* No locks - delete this entry. */
1344                 if (tdb_delete(tdb, key) == -1) {
1345                         smb_panic("Could not delete byte range lock entry\n");
1346                 }
1347         } else {
1348                 TDB_DATA data;
1349                 data.dptr = br_lck->lock_data;
1350                 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1351
1352                 if (tdb_store(tdb, key, data, TDB_REPLACE) == -1) {
1353                         smb_panic("Could not store byte range mode entry\n");
1354                 }
1355         }
1356
1357  done:
1358
1359         SAFE_FREE(br_lck->lock_data);
1360         tdb_chainunlock(tdb, key);
1361         return 0;
1362 }
1363
1364 /*******************************************************************
1365  Fetch a set of byte range lock data from the database.
1366  Leave the record locked.
1367 ********************************************************************/
1368
1369 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
1370                                         files_struct *fsp)
1371 {
1372         TDB_DATA key = locking_key(fsp->dev, fsp->inode);
1373         TDB_DATA data;
1374         struct byte_range_lock *br_lck;
1375
1376         br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
1377         if (br_lck == NULL) {
1378                 return NULL;
1379         }
1380
1381         br_lck->fsp = fsp;
1382         br_lck->num_locks = 0;
1383         br_lck->modified = False;
1384
1385         if (tdb_chainlock(tdb, key) != 0) {
1386                 DEBUG(3, ("Could not lock byte range lock entry\n"));
1387                 TALLOC_FREE(br_lck);
1388                 return NULL;
1389         }
1390
1391         talloc_set_destructor(br_lck, byte_range_lock_destructor);
1392
1393         data = tdb_fetch(tdb, key);
1394         br_lck->lock_data = (void *)data.dptr;
1395         br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1396
1397         if (DEBUGLEVEL >= 10) {
1398                 unsigned int i;
1399                 struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1400                 DEBUG(10,("brl_get_locks: %u current locks on dev=%.0f, inode=%.0f\n",
1401                         br_lck->num_locks,
1402                         (double)fsp->dev, (double)fsp->inode ));
1403                 for( i = 0; i < br_lck->num_locks; i++) {
1404                         print_lock_struct(i, &locks[i]);
1405                 }
1406         }
1407         return br_lck;
1408 }