r16987: Fix the logic errors in ref-counting Windows locks.
[amitay/samba.git] / source3 / locking / brlock.c
1 /* 
2    Unix SMB/CIFS implementation.
3    byte range locking code
4    Updated to handle range splits/merges.
5
6    Copyright (C) Andrew Tridgell 1992-2000
7    Copyright (C) Jeremy Allison 1992-2000
8    
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 2 of the License, or
12    (at your option) any later version.
13    
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18    
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24 /* This module implements a tdb based byte range locking service,
25    replacing the fcntl() based byte range locking previously
26    used. This allows us to provide the same semantics as NT */
27
28 #include "includes.h"
29
30 #undef DBGC_CLASS
31 #define DBGC_CLASS DBGC_LOCKING
32
33 #define ZERO_ZERO 0
34
35 /* The open brlock.tdb database. */
36
37 static TDB_CONTEXT *tdb;
38
39 /****************************************************************************
40  Debug info at level 10 for lock struct.
41 ****************************************************************************/
42
43 static void print_lock_struct(unsigned int i, struct lock_struct *pls)
44 {
45         DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %u, ",
46                         i,
47                         (unsigned int)pls->context.smbpid,
48                         (unsigned int)pls->context.tid,
49                         (unsigned int)procid_to_pid(&pls->context.pid) ));
50         
51         DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
52                 (double)pls->start,
53                 (double)pls->size,
54                 pls->fnum,
55                 lock_type_name(pls->lock_type),
56                 lock_flav_name(pls->lock_flav) ));
57 }
58
59 /****************************************************************************
60  See if two locking contexts are equal.
61 ****************************************************************************/
62
63 BOOL brl_same_context(const struct lock_context *ctx1, 
64                              const struct lock_context *ctx2)
65 {
66         return (procid_equal(&ctx1->pid, &ctx2->pid) &&
67                 (ctx1->smbpid == ctx2->smbpid) &&
68                 (ctx1->tid == ctx2->tid));
69 }
70
71 /****************************************************************************
72  See if lck1 and lck2 overlap.
73 ****************************************************************************/
74
75 static BOOL brl_overlap(const struct lock_struct *lck1,
76                         const struct lock_struct *lck2)
77 {
78         /* this extra check is not redundent - it copes with locks
79            that go beyond the end of 64 bit file space */
80         if (lck1->size != 0 &&
81             lck1->start == lck2->start &&
82             lck1->size == lck2->size) {
83                 return True;
84         }
85
86         if (lck1->start >= (lck2->start+lck2->size) ||
87             lck2->start >= (lck1->start+lck1->size)) {
88                 return False;
89         }
90         return True;
91 }
92
93 /****************************************************************************
94  See if lock2 can be added when lock1 is in place.
95 ****************************************************************************/
96
97 static BOOL brl_conflict(const struct lock_struct *lck1, 
98                          const struct lock_struct *lck2)
99 {
100         /* Ignore PENDING locks. */
101         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
102                 return False;
103
104         /* Read locks never conflict. */
105         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
106                 return False;
107         }
108
109         if (brl_same_context(&lck1->context, &lck2->context) &&
110             lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
111                 return False;
112         }
113
114         return brl_overlap(lck1, lck2);
115
116
117 /****************************************************************************
118  See if lock2 can be added when lock1 is in place - when both locks are POSIX
119  flavour. POSIX locks ignore fnum - they only care about dev/ino which we
120  know already match.
121 ****************************************************************************/
122
123 static BOOL brl_conflict_posix(const struct lock_struct *lck1, 
124                                 const struct lock_struct *lck2)
125 {
126 #if defined(DEVELOPER)
127         SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
128         SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
129 #endif
130
131         /* Ignore PENDING locks. */
132         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
133                 return False;
134
135         /* Read locks never conflict. */
136         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
137                 return False;
138         }
139
140         /* Locks on the same context con't conflict. Ignore fnum. */
141         if (brl_same_context(&lck1->context, &lck2->context)) {
142                 return False;
143         }
144
145         /* One is read, the other write, or the context is different,
146            do they overlap ? */
147         return brl_overlap(lck1, lck2);
148
149
150 #if ZERO_ZERO
151 static BOOL brl_conflict1(const struct lock_struct *lck1, 
152                          const struct lock_struct *lck2)
153 {
154         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
155                 return False;
156
157         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
158                 return False;
159         }
160
161         if (brl_same_context(&lck1->context, &lck2->context) &&
162             lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
163                 return False;
164         }
165
166         if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
167                 return True;
168         }
169
170         if (lck1->start >= (lck2->start + lck2->size) ||
171             lck2->start >= (lck1->start + lck1->size)) {
172                 return False;
173         }
174             
175         return True;
176
177 #endif
178
179 /****************************************************************************
180  Check to see if this lock conflicts, but ignore our own locks on the
181  same fnum only. This is the read/write lock check code path.
182  This is never used in the POSIX lock case.
183 ****************************************************************************/
184
185 static BOOL brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
186 {
187         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
188                 return False;
189
190         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) 
191                 return False;
192
193         /* POSIX flavour locks never conflict here - this is only called
194            in the read/write path. */
195
196         if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
197                 return False;
198
199         /*
200          * Incoming WRITE locks conflict with existing READ locks even
201          * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
202          */
203
204         if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
205                 if (brl_same_context(&lck1->context, &lck2->context) &&
206                                         lck1->fnum == lck2->fnum)
207                         return False;
208         }
209
210         return brl_overlap(lck1, lck2);
211
212
213 /****************************************************************************
214  Amazingly enough, w2k3 "remembers" whether the last lock failure
215  is the same as this one and changes its error code. I wonder if any
216  app depends on this ?
217 ****************************************************************************/
218
219 static NTSTATUS brl_lock_failed(const struct lock_struct *lock)
220 {
221         static struct lock_struct last_lock_failure;
222
223         if (brl_same_context(&lock->context, &last_lock_failure.context) &&
224                         lock->fnum == last_lock_failure.fnum &&
225                         lock->start == last_lock_failure.start &&
226                         lock->size == last_lock_failure.size) {
227                 return NT_STATUS_FILE_LOCK_CONFLICT;
228         }
229         last_lock_failure = *lock;
230         if (lock->start >= 0xEF000000 &&
231                         (lock->start >> 63) == 0) {
232                 /* amazing the little things you learn with a test
233                    suite. Locks beyond this offset (as a 64 bit
234                    number!) always generate the conflict error code,
235                    unless the top bit is set */
236                 return NT_STATUS_FILE_LOCK_CONFLICT;
237         }
238         return NT_STATUS_LOCK_NOT_GRANTED;
239 }
240
241 /****************************************************************************
242  Open up the brlock.tdb database.
243 ****************************************************************************/
244
245 void brl_init(int read_only)
246 {
247         if (tdb) {
248                 return;
249         }
250         tdb = tdb_open_log(lock_path("brlock.tdb"),
251                         lp_open_files_db_hash_size(),
252                         TDB_DEFAULT|(read_only?0x0:TDB_CLEAR_IF_FIRST),
253                         read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
254         if (!tdb) {
255                 DEBUG(0,("Failed to open byte range locking database %s\n",
256                         lock_path("brlock.tdb")));
257                 return;
258         }
259 }
260
261 /****************************************************************************
262  Close down the brlock.tdb database.
263 ****************************************************************************/
264
265 void brl_shutdown(int read_only)
266 {
267         if (!tdb) {
268                 return;
269         }
270         tdb_close(tdb);
271 }
272
273 #if ZERO_ZERO
274 /****************************************************************************
275  Compare two locks for sorting.
276 ****************************************************************************/
277
278 static int lock_compare(const struct lock_struct *lck1, 
279                          const struct lock_struct *lck2)
280 {
281         if (lck1->start != lck2->start) {
282                 return (lck1->start - lck2->start);
283         }
284         if (lck2->size != lck1->size) {
285                 return ((int)lck1->size - (int)lck2->size);
286         }
287         return 0;
288 }
289 #endif
290
291 /****************************************************************************
292  Lock a range of bytes - Windows lock semantics.
293 ****************************************************************************/
294
295 static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck,
296                         const struct lock_struct *plock,
297                         BOOL *my_lock_ctx)
298 {
299         unsigned int i;
300         files_struct *fsp = br_lck->fsp;
301         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
302
303         for (i=0; i < br_lck->num_locks; i++) {
304                 /* Do any Windows or POSIX locks conflict ? */
305                 if (brl_conflict(&locks[i], plock)) {
306                         NTSTATUS status = brl_lock_failed(plock);;
307                         /* Did we block ourselves ? */
308                         if (brl_same_context(&locks[i].context, &plock->context)) {
309                                 *my_lock_ctx = True;
310                         }
311                         return status;
312                 }
313 #if ZERO_ZERO
314                 if (plock->start == 0 && plock->size == 0 && 
315                                 locks[i].size == 0) {
316                         break;
317                 }
318 #endif
319         }
320
321         /* We can get the Windows lock, now see if it needs to
322            be mapped into a lower level POSIX one, and if so can
323            we get it ? */
324
325         if ((plock->lock_type != PENDING_LOCK) && lp_posix_locking(SNUM(fsp->conn))) {
326                 int errno_ret;
327                 if (!set_posix_lock_windows_flavour(fsp,
328                                 plock->start,
329                                 plock->size,
330                                 plock->lock_type,
331                                 &plock->context,
332                                 locks,
333                                 br_lck->num_locks,
334                                 &errno_ret)) {
335                         if (errno_ret == EACCES || errno_ret == EAGAIN) {
336                                 return NT_STATUS_FILE_LOCK_CONFLICT;
337                         } else {
338                                 return map_nt_error_from_unix(errno);
339                         }
340                 }
341         }
342
343         /* no conflicts - add it to the list of locks */
344         locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
345         if (!locks) {
346                 return NT_STATUS_NO_MEMORY;
347         }
348
349         memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
350         br_lck->num_locks += 1;
351         br_lck->lock_data = (void *)locks;
352         br_lck->modified = True;
353
354         return NT_STATUS_OK;
355 }
356
357 /****************************************************************************
358  Cope with POSIX range splits and merges.
359 ****************************************************************************/
360
361 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr,               /* Output array. */
362                                                 const struct lock_struct *ex,           /* existing lock. */
363                                                 const struct lock_struct *plock,        /* proposed lock. */
364                                                 BOOL *lock_was_added)
365 {
366         BOOL lock_types_differ = (ex->lock_type != plock->lock_type);
367
368         /* We can't merge non-conflicting locks on different context - ignore fnum. */
369
370         if (!brl_same_context(&ex->context, &plock->context)) {
371                 /* Just copy. */
372                 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
373                 return 1;
374         }
375
376         /* We now know we have the same context. */
377
378         /* Did we overlap ? */
379
380 /*********************************************
381                                              +---------+
382                                              | ex      |
383                                              +---------+
384                               +-------+
385                               | plock |
386                               +-------+
387 OR....
388              +---------+
389              |  ex     |
390              +---------+
391 **********************************************/
392
393         if ( (ex->start > (plock->start + plock->size)) ||
394                         (plock->start > (ex->start + ex->size))) {
395                 /* No overlap with this lock - copy existing. */
396                 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
397                 return 1;
398         }
399
400 /*********************************************
401         +---------------------------+
402         |          ex               |
403         +---------------------------+
404         +---------------------------+
405         |       plock               | -> replace with plock.
406         +---------------------------+
407 **********************************************/
408
409         if ( (ex->start >= plock->start) &&
410                         (ex->start + ex->size <= plock->start + plock->size) ) {
411                 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
412                 *lock_was_added = True;
413                 return 1;
414         }
415
416 /*********************************************
417         +-----------------------+
418         |          ex           |
419         +-----------------------+
420         +---------------+
421         |   plock       |
422         +---------------+
423 OR....
424                         +-------+
425                         |  ex   |
426                         +-------+
427         +---------------+
428         |   plock       |
429         +---------------+
430
431 BECOMES....
432         +---------------+-------+
433         |   plock       | ex    | - different lock types.
434         +---------------+-------+
435 OR.... (merge)
436         +-----------------------+
437         |   ex                  | - same lock type.
438         +-----------------------+
439 **********************************************/
440
441         if ( (ex->start >= plock->start) &&
442                                 (ex->start <= plock->start + plock->size) &&
443                                 (ex->start + ex->size > plock->start + plock->size) ) {
444
445                 *lock_was_added = True;
446
447                 /* If the lock types are the same, we merge, if different, we
448                    add the new lock before the old. */
449
450                 if (lock_types_differ) {
451                         /* Add new. */
452                         memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
453                         memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
454                         /* Adjust existing start and size. */
455                         lck_arr[1].start = plock->start + plock->size;
456                         lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
457                         return 2;
458                 } else {
459                         /* Merge. */
460                         memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
461                         /* Set new start and size. */
462                         lck_arr[0].start = plock->start;
463                         lck_arr[0].size = (ex->start + ex->size) - plock->start;
464                         return 1;
465                 }
466         }
467
468 /*********************************************
469    +-----------------------+
470    |  ex                   |
471    +-----------------------+
472            +---------------+
473            |   plock       |
474            +---------------+
475 OR....
476    +-------+        
477    |  ex   |
478    +-------+
479            +---------------+
480            |   plock       |
481            +---------------+
482 BECOMES....
483    +-------+---------------+
484    | ex    |   plock       | - different lock types
485    +-------+---------------+
486
487 OR.... (merge)
488    +-----------------------+
489    | ex                    | - same lock type.
490    +-----------------------+
491
492 **********************************************/
493
494         if ( (ex->start < plock->start) &&
495                         (ex->start + ex->size >= plock->start) &&
496                         (ex->start + ex->size <= plock->start + plock->size) ) {
497
498                 *lock_was_added = True;
499
500                 /* If the lock types are the same, we merge, if different, we
501                    add the new lock after the old. */
502
503                 if (lock_types_differ) {
504                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
505                         memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
506                         /* Adjust existing size. */
507                         lck_arr[0].size = plock->start - ex->start;
508                         return 2;
509                 } else {
510                         /* Merge. */
511                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
512                         /* Adjust existing size. */
513                         lck_arr[0].size = (plock->start + plock->size) - ex->start;
514                         return 1;
515                 }
516         }
517
518 /*********************************************
519         +---------------------------+
520         |        ex                 |
521         +---------------------------+
522                 +---------+
523                 |  plock  |
524                 +---------+
525 BECOMES.....
526         +-------+---------+---------+
527         | ex    |  plock  | ex      | - different lock types.
528         +-------+---------+---------+
529 OR
530         +---------------------------+
531         |        ex                 | - same lock type.
532         +---------------------------+
533 **********************************************/
534
535         if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
536                 *lock_was_added = True;
537
538                 if (lock_types_differ) {
539
540                         /* We have to split ex into two locks here. */
541
542                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
543                         memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
544                         memcpy(&lck_arr[2], ex, sizeof(struct lock_struct));
545
546                         /* Adjust first existing size. */
547                         lck_arr[0].size = plock->start - ex->start;
548
549                         /* Adjust second existing start and size. */
550                         lck_arr[2].start = plock->start + plock->size;
551                         lck_arr[2].size = (ex->start + ex->size) - (plock->start + plock->size);
552                         return 3;
553                 } else {
554                         /* Just eat plock. */
555                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
556                         return 1;
557                 }
558         }
559
560         /* Never get here. */
561         smb_panic("brlock_posix_split_merge\n");
562         /* Notreached. */
563         abort();
564         /* Keep some compilers happy. */
565         return 0;
566 }
567
568 /****************************************************************************
569  Lock a range of bytes - POSIX lock semantics.
570  We must cope with range splits and merges.
571 ****************************************************************************/
572
573 static NTSTATUS brl_lock_posix(struct byte_range_lock *br_lck,
574                         const struct lock_struct *plock,
575                         BOOL *my_lock_ctx)
576 {
577         unsigned int i, count;
578         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
579         struct lock_struct *tp;
580         BOOL lock_was_added = False;
581
582         /* No zero-zero locks for POSIX. */
583         if (plock->start == 0 && plock->size == 0) {
584                 return NT_STATUS_INVALID_PARAMETER;
585         }
586
587         /* Don't allow 64-bit lock wrap. */
588         if (plock->start + plock->size < plock->start ||
589                         plock->start + plock->size < plock->size) {
590                 return NT_STATUS_INVALID_PARAMETER;
591         }
592
593         /* The worst case scenario here is we have to split an
594            existing POSIX lock range into two, and add our lock,
595            so we need at most 2 more entries. */
596
597         tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
598         if (!tp) {
599                 return NT_STATUS_NO_MEMORY;
600         }
601         
602         count = 0;
603         for (i=0; i < br_lck->num_locks; i++) {
604                 if (locks[i].lock_flav == WINDOWS_LOCK) {
605                         /* Do any Windows flavour locks conflict ? */
606                         if (brl_conflict(&locks[i], plock)) {
607                                 /* Did we block ourselves ? */
608                                 if (brl_same_context(&locks[i].context, &plock->context)) {
609                                         *my_lock_ctx = True;
610                                 }
611                                 /* No games with error messages. */
612                                 SAFE_FREE(tp);
613                                 return NT_STATUS_FILE_LOCK_CONFLICT;
614                         }
615                         /* Just copy the Windows lock into the new array. */
616                         memcpy(&tp[count], &locks[i], sizeof(struct lock_struct));
617                         count++;
618                 } else {
619                         /* POSIX conflict semantics are different. */
620                         if (brl_conflict_posix(&locks[i], plock)) {
621                                 /* Can't block ourselves with POSIX locks. */
622                                 /* No games with error messages. */
623                                 SAFE_FREE(tp);
624                                 return NT_STATUS_FILE_LOCK_CONFLICT;
625                         }
626
627                         /* Work out overlaps. */
628                         count += brlock_posix_split_merge(&tp[count], &locks[i], plock, &lock_was_added);
629                 }
630         }
631
632         if (!lock_was_added) {
633                 memcpy(&tp[count], plock, sizeof(struct lock_struct));
634                 count++;
635         }
636
637         /* We can get the POSIX lock, now see if it needs to
638            be mapped into a lower level POSIX one, and if so can
639            we get it ? */
640
641         if ((plock->lock_type != PENDING_LOCK) && lp_posix_locking(SNUM(br_lck->fsp->conn))) {
642                 int errno_ret;
643
644                 /* The lower layer just needs to attempt to
645                    get the system POSIX lock. We've weeded out
646                    any conflicts above. */
647
648                 if (!set_posix_lock_posix_flavour(br_lck->fsp,
649                                 plock->start,
650                                 plock->size,
651                                 plock->lock_type,
652                                 &errno_ret)) {
653                         if (errno_ret == EACCES || errno_ret == EAGAIN) {
654                                 SAFE_FREE(tp);
655                                 return NT_STATUS_FILE_LOCK_CONFLICT;
656                         } else {
657                                 SAFE_FREE(tp);
658                                 return map_nt_error_from_unix(errno);
659                         }
660                 }
661         }
662
663         /* Realloc so we don't leak entries per lock call. */
664         tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
665         if (!tp) {
666                 return NT_STATUS_NO_MEMORY;
667         }
668         br_lck->num_locks = count;
669         br_lck->lock_data = (void *)tp;
670         br_lck->modified = True;
671         return NT_STATUS_OK;
672 }
673
674 /****************************************************************************
675  Lock a range of bytes.
676 ****************************************************************************/
677
678 NTSTATUS brl_lock(struct byte_range_lock *br_lck,
679                 uint32 smbpid,
680                 struct process_id pid,
681                 br_off start,
682                 br_off size, 
683                 enum brl_type lock_type,
684                 enum brl_flavour lock_flav,
685                 BOOL *my_lock_ctx)
686 {
687         NTSTATUS ret;
688         struct lock_struct lock;
689
690         *my_lock_ctx = False;
691
692 #if !ZERO_ZERO
693         if (start == 0 && size == 0) {
694                 DEBUG(0,("client sent 0/0 lock - please report this\n"));
695         }
696 #endif
697
698         lock.context.smbpid = smbpid;
699         lock.context.pid = pid;
700         lock.context.tid = br_lck->fsp->conn->cnum;
701         lock.start = start;
702         lock.size = size;
703         lock.fnum = br_lck->fsp->fnum;
704         lock.lock_type = lock_type;
705         lock.lock_flav = lock_flav;
706
707         if (lock_flav == WINDOWS_LOCK) {
708                 ret = brl_lock_windows(br_lck, &lock, my_lock_ctx);
709         } else {
710                 ret = brl_lock_posix(br_lck, &lock, my_lock_ctx);
711         }
712
713 #if ZERO_ZERO
714         /* sort the lock list */
715         qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);
716 #endif
717
718         return ret;
719 }
720
721 /****************************************************************************
722  Check if an unlock overlaps a pending lock.
723 ****************************************************************************/
724
725 static BOOL brl_pending_overlap(struct lock_struct *lock, struct lock_struct *pend_lock)
726 {
727         if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
728                 return True;
729         if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
730                 return True;
731         return False;
732 }
733
734 /****************************************************************************
735  Unlock a range of bytes - Windows semantics.
736 ****************************************************************************/
737
738 static BOOL brl_unlock_windows(struct byte_range_lock *br_lck, const struct lock_struct *plock)
739 {
740         unsigned int i, j;
741         struct lock_struct *lock = NULL;
742         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
743         enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
744
745 #if ZERO_ZERO
746         /* Delete write locks by preference... The lock list
747            is sorted in the zero zero case. */
748
749         for (i = 0; i < br_lck->num_locks; i++) {
750                 lock = &locks[i];
751
752                 if (lock->lock_type == WRITE_LOCK &&
753                     brl_same_context(&lock->context, &plock->context) &&
754                     lock->fnum == plock->fnum &&
755                     lock->lock_flav == WINDOWS_LOCK &&
756                     lock->start == plock->start &&
757                     lock->size == plock->size) {
758
759                         /* found it - delete it */
760                         deleted_lock_type = lock->lock_type;
761                         break;
762                 }
763         }
764
765         if (i != br_lck->num_locks) {
766                 /* We found it - don't search again. */
767                 goto unlock_continue;
768         }
769 #endif
770
771         for (i = 0; i < br_lck->num_locks; i++) {
772                 lock = &locks[i];
773
774                 /* Only remove our own locks that match in start, size, and flavour. */
775                 if (brl_same_context(&lock->context, &plock->context) &&
776                                         lock->fnum == plock->fnum &&
777                                         lock->lock_flav == WINDOWS_LOCK &&
778                                         lock->start == plock->start &&
779                                         lock->size == plock->size ) {
780                         deleted_lock_type = lock->lock_type;
781                         break;
782                 }
783         }
784
785         if (i == br_lck->num_locks) {
786                 /* we didn't find it */
787                 return False;
788         }
789
790 #if ZERO_ZERO
791   unlock_continue:
792 #endif
793
794         /* Actually delete the lock. */
795         if (i < br_lck->num_locks - 1) {
796                 memmove(&locks[i], &locks[i+1], 
797                         sizeof(*locks)*((br_lck->num_locks-1) - i));
798         }
799
800         br_lck->num_locks -= 1;
801         br_lck->modified = True;
802
803         /* Unlock the underlying POSIX regions. */
804         if(lp_posix_locking(br_lck->fsp->conn->cnum)) {
805                 release_posix_lock_windows_flavour(br_lck->fsp,
806                                 plock->start,
807                                 plock->size,
808                                 deleted_lock_type,
809                                 &plock->context,
810                                 locks,
811                                 br_lck->num_locks);
812         }
813
814         /* Send unlock messages to any pending waiters that overlap. */
815         for (j=0; j < br_lck->num_locks; j++) {
816                 struct lock_struct *pend_lock = &locks[j];
817
818                 /* Ignore non-pending locks. */
819                 if (pend_lock->lock_type != PENDING_LOCK) {
820                         continue;
821                 }
822
823                 /* We could send specific lock info here... */
824                 if (brl_pending_overlap(lock, pend_lock)) {
825                         DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
826                                 procid_str_static(&pend_lock->context.pid )));
827
828                         become_root();
829                         message_send_pid(pend_lock->context.pid,
830                                         MSG_SMB_UNLOCK,
831                                         NULL, 0, True);
832                         unbecome_root();
833                 }
834         }
835
836         return True;
837 }
838
839 /****************************************************************************
840  Unlock a range of bytes - POSIX semantics.
841 ****************************************************************************/
842
843 static BOOL brl_unlock_posix(struct byte_range_lock *br_lck, const struct lock_struct *plock)
844 {
845         unsigned int i, j, count;
846         struct lock_struct *lock = NULL;
847         struct lock_struct *tp;
848         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
849         BOOL overlap_found = False;
850
851         /* No zero-zero locks for POSIX. */
852         if (plock->start == 0 && plock->size == 0) {
853                 return False;
854         }
855
856         /* Don't allow 64-bit lock wrap. */
857         if (plock->start + plock->size < plock->start ||
858                         plock->start + plock->size < plock->size) {
859                 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
860                 return False;
861         }
862
863         /* The worst case scenario here is we have to split an
864            existing POSIX lock range into two, so we need at most
865            1 more entry. */
866
867         tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
868         if (!tp) {
869                 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
870                 return False;
871         }
872
873         count = 0;
874         for (i = 0; i < br_lck->num_locks; i++) {
875                 struct lock_struct tmp_lock[3];
876                 BOOL lock_was_added = False;
877                 unsigned int tmp_count;
878
879                 lock = &locks[i];
880
881                 /* Only remove our own locks - ignore fnum. */
882                 if (lock->lock_type == PENDING_LOCK ||
883                                 !brl_same_context(&lock->context, &plock->context)) {
884                         memcpy(&tp[count], lock, sizeof(struct lock_struct));
885                         count++;
886                         continue;
887                 }
888
889                 /* Work out overlaps. */
890                 tmp_count = brlock_posix_split_merge(&tmp_lock[0], &locks[i], plock, &lock_was_added);
891
892                 if (tmp_count == 1) {
893                         /* Ether the locks didn't overlap, or the unlock completely
894                            overlapped this lock. If it didn't overlap, then there's
895                            no change in the locks. */
896                         if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
897                                 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
898                                 /* No change in this lock. */
899                                 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
900                                 count++;
901                         } else {
902                                 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
903                                 overlap_found = True;
904                         }
905                         continue;
906                 } else if (tmp_count == 2) {
907                         /* The unlock overlapped an existing lock. Copy the truncated
908                            lock into the lock array. */
909                         if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
910                                 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
911                                 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
912                                 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
913                                 if (tmp_lock[0].size != locks[i].size) {
914                                         overlap_found = True;
915                                 }
916                         } else {
917                                 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
918                                 SMB_ASSERT(tmp_lock[1].lock_type == locks[i].lock_type);
919                                 memcpy(&tp[count], &tmp_lock[1], sizeof(struct lock_struct));
920                                 if (tmp_lock[1].start != locks[i].start) {
921                                         overlap_found = True;
922                                 }
923                         }
924                         count++;
925                         continue;
926                 } else {
927                         /* tmp_count == 3 - (we split a lock range in two). */
928                         SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
929                         SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
930                         SMB_ASSERT(tmp_lock[2].lock_type == locks[i].lock_type);
931
932                         memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
933                         count++;
934                         memcpy(&tp[count], &tmp_lock[2], sizeof(struct lock_struct));
935                         count++;
936                         overlap_found = True;
937                         /* Optimisation... */
938                         /* We know we're finished here as we can't overlap any
939                            more POSIX locks. Copy the rest of the lock array. */
940                         if (i < br_lck->num_locks - 1) {
941                                 memcpy(&tp[count], &locks[i+1], 
942                                         sizeof(*locks)*((br_lck->num_locks-1) - i));
943                                 count += ((br_lck->num_locks-1) - i);
944                         }
945                         break;
946                 }
947         }
948
949         if (!overlap_found) {
950                 /* Just ignore - no change. */
951                 SAFE_FREE(tp);
952                 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
953                 return True;
954         }
955
956         /* Unlock any POSIX regions. */
957         if(lp_posix_locking(br_lck->fsp->conn->cnum)) {
958                 release_posix_lock_posix_flavour(br_lck->fsp,
959                                                 plock->start,
960                                                 plock->size,
961                                                 &plock->context,
962                                                 tp,
963                                                 count);
964         }
965
966         /* Realloc so we don't leak entries per unlock call. */
967         if (count) {
968                 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
969                 if (!tp) {
970                         DEBUG(10,("brl_unlock_posix: realloc fail\n"));
971                         return False;
972                 }
973         } else {
974                 /* We deleted the last lock. */
975                 SAFE_FREE(tp);
976                 tp = NULL;
977         }
978
979         br_lck->num_locks = count;
980         br_lck->lock_data = (void *)tp;
981         br_lck->modified = True;
982
983         /* Send unlock messages to any pending waiters that overlap. */
984         locks = tp;
985
986         for (j=0; j < br_lck->num_locks; j++) {
987                 struct lock_struct *pend_lock = &locks[j];
988
989                 /* Ignore non-pending locks. */
990                 if (pend_lock->lock_type != PENDING_LOCK) {
991                         continue;
992                 }
993
994                 /* We could send specific lock info here... */
995                 if (brl_pending_overlap(lock, pend_lock)) {
996                         DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
997                                 procid_str_static(&pend_lock->context.pid )));
998
999                         become_root();
1000                         message_send_pid(pend_lock->context.pid,
1001                                         MSG_SMB_UNLOCK,
1002                                         NULL, 0, True);
1003                         unbecome_root();
1004                 }
1005         }
1006
1007         return True;
1008 }
1009
1010 /****************************************************************************
1011  Unlock a range of bytes.
1012 ****************************************************************************/
1013
1014 BOOL brl_unlock(struct byte_range_lock *br_lck,
1015                 uint32 smbpid,
1016                 struct process_id pid,
1017                 br_off start,
1018                 br_off size,
1019                 enum brl_flavour lock_flav)
1020 {
1021         struct lock_struct lock;
1022
1023         lock.context.smbpid = smbpid;
1024         lock.context.pid = pid;
1025         lock.context.tid = br_lck->fsp->conn->cnum;
1026         lock.start = start;
1027         lock.size = size;
1028         lock.fnum = br_lck->fsp->fnum;
1029         lock.lock_type = UNLOCK_LOCK;
1030         lock.lock_flav = lock_flav;
1031
1032         if (lock_flav == WINDOWS_LOCK) {
1033                 return brl_unlock_windows(br_lck, &lock);
1034         } else {
1035                 return brl_unlock_posix(br_lck, &lock);
1036         }
1037 }
1038
1039 /****************************************************************************
1040  Test if we could add a lock if we wanted to.
1041  Returns True if the region required is currently unlocked, False if locked.
1042 ****************************************************************************/
1043
1044 BOOL brl_locktest(struct byte_range_lock *br_lck,
1045                 uint32 smbpid,
1046                 struct process_id pid,
1047                 br_off start,
1048                 br_off size, 
1049                 enum brl_type lock_type,
1050                 enum brl_flavour lock_flav)
1051 {
1052         BOOL ret = True;
1053         unsigned int i;
1054         struct lock_struct lock;
1055         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1056         files_struct *fsp = br_lck->fsp;
1057
1058         lock.context.smbpid = smbpid;
1059         lock.context.pid = pid;
1060         lock.context.tid = br_lck->fsp->conn->cnum;
1061         lock.start = start;
1062         lock.size = size;
1063         lock.fnum = fsp->fnum;
1064         lock.lock_type = lock_type;
1065         lock.lock_flav = lock_flav;
1066
1067         /* Make sure existing locks don't conflict */
1068         for (i=0; i < br_lck->num_locks; i++) {
1069                 /*
1070                  * Our own locks don't conflict.
1071                  */
1072                 if (brl_conflict_other(&locks[i], &lock)) {
1073                         return False;
1074                 }
1075         }
1076
1077         /*
1078          * There is no lock held by an SMB daemon, check to
1079          * see if there is a POSIX lock from a UNIX or NFS process.
1080          * This only conflicts with Windows locks, not POSIX locks.
1081          */
1082
1083         if(lp_posix_locking(fsp->conn->cnum) && (lock_flav == WINDOWS_LOCK)) {
1084                 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1085
1086                 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1087                         (double)start, (double)size, ret ? "locked" : "unlocked",
1088                         fsp->fnum, fsp->fsp_name ));
1089
1090                 /* We need to return the inverse of is_posix_locked. */
1091                 ret = !ret;
1092         }
1093
1094         /* no conflicts - we could have added it */
1095         return ret;
1096 }
1097
1098 /****************************************************************************
1099  Query for existing locks.
1100 ****************************************************************************/
1101
1102 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1103                 uint32 *psmbpid,
1104                 struct process_id pid,
1105                 br_off *pstart,
1106                 br_off *psize, 
1107                 enum brl_type *plock_type,
1108                 enum brl_flavour lock_flav)
1109 {
1110         unsigned int i;
1111         struct lock_struct lock;
1112         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1113         files_struct *fsp = br_lck->fsp;
1114
1115         lock.context.smbpid = *psmbpid;
1116         lock.context.pid = pid;
1117         lock.context.tid = br_lck->fsp->conn->cnum;
1118         lock.start = *pstart;
1119         lock.size = *psize;
1120         lock.fnum = fsp->fnum;
1121         lock.lock_type = *plock_type;
1122         lock.lock_flav = lock_flav;
1123
1124         /* Make sure existing locks don't conflict */
1125         for (i=0; i < br_lck->num_locks; i++) {
1126                 struct lock_struct *exlock = &locks[i];
1127                 BOOL conflict = False;
1128
1129                 if (exlock->lock_flav == WINDOWS_LOCK) {
1130                         conflict = brl_conflict(exlock, &lock);
1131                 } else {        
1132                         conflict = brl_conflict_posix(exlock, &lock);
1133                 }
1134
1135                 if (conflict) {
1136                         *psmbpid = exlock->context.smbpid;
1137                         *pstart = exlock->start;
1138                         *psize = exlock->size;
1139                         *plock_type = exlock->lock_type;
1140                         return NT_STATUS_LOCK_NOT_GRANTED;
1141                 }
1142         }
1143
1144         /*
1145          * There is no lock held by an SMB daemon, check to
1146          * see if there is a POSIX lock from a UNIX or NFS process.
1147          */
1148
1149         if(lp_posix_locking(fsp->conn->cnum)) {
1150                 BOOL ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1151
1152                 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1153                         (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1154                         fsp->fnum, fsp->fsp_name ));
1155
1156                 if (ret) {
1157                         /* Hmmm. No clue what to set smbpid to - use -1. */
1158                         *psmbpid = 0xFFFF;
1159                         return NT_STATUS_LOCK_NOT_GRANTED;
1160                 }
1161         }
1162
1163         return NT_STATUS_OK;
1164 }
1165
1166
1167 /****************************************************************************
1168  Remove a particular pending lock.
1169 ****************************************************************************/
1170
1171 BOOL brl_remove_pending_lock(struct byte_range_lock *br_lck,
1172                 uint32 smbpid,
1173                 struct process_id pid,
1174                 br_off start,
1175                 br_off size,
1176                 enum brl_flavour lock_flav)
1177 {
1178         unsigned int i;
1179         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1180         struct lock_context context;
1181
1182         context.smbpid = smbpid;
1183         context.pid = pid;
1184         context.tid = br_lck->fsp->conn->cnum;
1185
1186         for (i = 0; i < br_lck->num_locks; i++) {
1187                 struct lock_struct *lock = &locks[i];
1188
1189                 /* For pending locks we *always* care about the fnum. */
1190                 if (brl_same_context(&lock->context, &context) &&
1191                                 lock->fnum == br_lck->fsp->fnum &&
1192                                 lock->lock_type == PENDING_LOCK &&
1193                                 lock->lock_flav == lock_flav &&
1194                                 lock->start == start &&
1195                                 lock->size == size) {
1196                         break;
1197                 }
1198         }
1199
1200         if (i == br_lck->num_locks) {
1201                 /* Didn't find it. */
1202                 return False;
1203         }
1204
1205         if (i < br_lck->num_locks - 1) {
1206                 /* Found this particular pending lock - delete it */
1207                 memmove(&locks[i], &locks[i+1], 
1208                         sizeof(*locks)*((br_lck->num_locks-1) - i));
1209         }
1210
1211         br_lck->num_locks -= 1;
1212         br_lck->modified = True;
1213         return True;
1214 }
1215
1216 /****************************************************************************
1217  Remove any locks associated with a open file.
1218  We return True if this process owns any other Windows locks on this
1219  fd and so we should not immediately close the fd.
1220 ****************************************************************************/
1221
1222 void brl_close_fnum(struct byte_range_lock *br_lck)
1223 {
1224         files_struct *fsp = br_lck->fsp;
1225         uint16 tid = fsp->conn->cnum;
1226         int fnum = fsp->fnum;
1227         unsigned int i, j, dcount=0;
1228         int num_deleted_windows_locks = 0;
1229         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1230         struct process_id pid = procid_self();
1231         BOOL unlock_individually = False;
1232
1233         if(lp_posix_locking(fsp->conn->cnum)) {
1234
1235                 /* Check if there are any Windows locks associated with this dev/ino
1236                    pair that are not this fnum. If so we need to call unlock on each
1237                    one in order to release the system POSIX locks correctly. */
1238
1239                 for (i=0; i < br_lck->num_locks; i++) {
1240                         struct lock_struct *lock = &locks[i];
1241
1242                         if (!procid_equal(&lock->context.pid, &pid)) {
1243                                 continue;
1244                         }
1245
1246                         if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
1247                                 continue; /* Ignore pending. */
1248                         }
1249
1250                         if (lock->context.tid != tid || lock->fnum != fnum) {
1251                                 unlock_individually = True;
1252                                 break;
1253                         }
1254                 }
1255
1256                 if (unlock_individually) {
1257                         struct lock_struct *locks_copy;
1258
1259                         /* Copy the current lock array. */
1260                         locks_copy = TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1261                         if (!locks_copy) {
1262                                 DEBUG(0,("brl_close_fnum: talloc fail.\n"));
1263                         }
1264
1265                         for (i=0; i < br_lck->num_locks; i++) {
1266                                 struct lock_struct *lock = &locks_copy[i];
1267
1268                                 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&
1269                                                 (lock->fnum == fnum)) {
1270                                         brl_unlock(br_lck,
1271                                                 lock->context.smbpid,
1272                                                 pid,
1273                                                 lock->start,
1274                                                 lock->size,
1275                                                 lock->lock_flav);
1276                                 }
1277                         }
1278                         return;
1279                 }
1280         }
1281
1282         /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1283
1284         /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1285
1286         for (i=0; i < br_lck->num_locks; i++) {
1287                 struct lock_struct *lock = &locks[i];
1288                 BOOL del_this_lock = False;
1289
1290                 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
1291                         if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
1292                                 del_this_lock = True;
1293                                 num_deleted_windows_locks++;
1294                         } else if (lock->lock_flav == POSIX_LOCK) {
1295                                 del_this_lock = True;
1296                         }
1297                 }
1298
1299                 if (del_this_lock) {
1300                         /* Send unlock messages to any pending waiters that overlap. */
1301                         for (j=0; j < br_lck->num_locks; j++) {
1302                                 struct lock_struct *pend_lock = &locks[j];
1303
1304                                 /* Ignore our own or non-pending locks. */
1305                                 if (pend_lock->lock_type != PENDING_LOCK) {
1306                                         continue;
1307                                 }
1308
1309                                 /* Optimisation - don't send to this fnum as we're
1310                                    closing it. */
1311                                 if (pend_lock->context.tid == tid &&
1312                                     procid_equal(&pend_lock->context.pid, &pid) &&
1313                                     pend_lock->fnum == fnum) {
1314                                         continue;
1315                                 }
1316
1317                                 /* We could send specific lock info here... */
1318                                 if (brl_pending_overlap(lock, pend_lock)) {
1319                                         become_root();
1320                                         message_send_pid(pend_lock->context.pid,
1321                                                         MSG_SMB_UNLOCK,
1322                                                         NULL, 0, True);
1323                                         unbecome_root();
1324                                 }
1325                         }
1326
1327                         /* found it - delete it */
1328                         if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
1329                                 memmove(&locks[i], &locks[i+1], 
1330                                         sizeof(*locks)*((br_lck->num_locks-1) - i));
1331                         }
1332                         br_lck->num_locks--;
1333                         br_lck->modified = True;
1334                         i--;
1335                         dcount++;
1336                 }
1337         }
1338
1339         if (num_deleted_windows_locks) {
1340                 /* Reduce the Windows lock reference count on this dev/ino pair. */
1341                 reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks);
1342         }
1343 }
1344
1345 /****************************************************************************
1346  Ensure this set of lock entries is valid.
1347 ****************************************************************************/
1348
1349 static BOOL validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks)
1350 {
1351         unsigned int i;
1352         unsigned int num_valid_entries = 0;
1353         struct lock_struct *locks = *pplocks;
1354
1355         for (i = 0; i < *pnum_entries; i++) {
1356                 struct lock_struct *lock_data = &locks[i];
1357                 if (!process_exists(lock_data->context.pid)) {
1358                         /* This process no longer exists - mark this
1359                            entry as invalid by zeroing it. */
1360                         ZERO_STRUCTP(lock_data);
1361                 } else {
1362                         num_valid_entries++;
1363                 }
1364         }
1365
1366         if (num_valid_entries != *pnum_entries) {
1367                 struct lock_struct *new_lock_data = NULL;
1368
1369                 if (num_valid_entries) {
1370                         new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries);
1371                         if (!new_lock_data) {
1372                                 DEBUG(3, ("malloc fail\n"));
1373                                 return False;
1374                         }
1375
1376                         num_valid_entries = 0;
1377                         for (i = 0; i < *pnum_entries; i++) {
1378                                 struct lock_struct *lock_data = &locks[i];
1379                                 if (lock_data->context.smbpid &&
1380                                                 lock_data->context.tid) {
1381                                         /* Valid (nonzero) entry - copy it. */
1382                                         memcpy(&new_lock_data[num_valid_entries],
1383                                                 lock_data, sizeof(struct lock_struct));
1384                                         num_valid_entries++;
1385                                 }
1386                         }
1387                 }
1388
1389                 SAFE_FREE(*pplocks);
1390                 *pplocks = new_lock_data;
1391                 *pnum_entries = num_valid_entries;
1392         }
1393
1394         return True;
1395 }
1396
1397 /****************************************************************************
1398  Traverse the whole database with this function, calling traverse_callback
1399  on each lock.
1400 ****************************************************************************/
1401
1402 static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
1403 {
1404         struct lock_struct *locks;
1405         struct lock_key *key;
1406         unsigned int i;
1407         unsigned int num_locks = 0;
1408         unsigned int orig_num_locks = 0;
1409
1410         BRLOCK_FN(traverse_callback) = (BRLOCK_FN_CAST())state;
1411
1412         /* In a traverse function we must make a copy of
1413            dbuf before modifying it. */
1414
1415         locks = (struct lock_struct *)memdup(dbuf.dptr, dbuf.dsize);
1416         if (!locks) {
1417                 return -1; /* Terminate traversal. */
1418         }
1419
1420         key = (struct lock_key *)kbuf.dptr;
1421         orig_num_locks = num_locks = dbuf.dsize/sizeof(*locks);
1422
1423         /* Ensure the lock db is clean of entries from invalid processes. */
1424
1425         if (!validate_lock_entries(&num_locks, &locks)) {
1426                 SAFE_FREE(locks);
1427                 return -1; /* Terminate traversal */
1428         }
1429
1430         if (orig_num_locks != num_locks) {
1431                 dbuf.dptr = (char *)locks;
1432                 dbuf.dsize = num_locks * sizeof(*locks);
1433
1434                 if (dbuf.dsize) {
1435                         tdb_store(ttdb, kbuf, dbuf, TDB_REPLACE);
1436                 } else {
1437                         tdb_delete(ttdb, kbuf);
1438                 }
1439         }
1440
1441         for ( i=0; i<num_locks; i++) {
1442                 traverse_callback(key->device,
1443                                   key->inode,
1444                                   locks[i].context.pid,
1445                                   locks[i].lock_type,
1446                                   locks[i].lock_flav,
1447                                   locks[i].start,
1448                                   locks[i].size);
1449         }
1450
1451         SAFE_FREE(locks);
1452         return 0;
1453 }
1454
1455 /*******************************************************************
1456  Call the specified function on each lock in the database.
1457 ********************************************************************/
1458
1459 int brl_forall(BRLOCK_FN(fn))
1460 {
1461         if (!tdb) {
1462                 return 0;
1463         }
1464         return tdb_traverse(tdb, traverse_fn, (void *)fn);
1465 }
1466
1467 /*******************************************************************
1468  Store a potentially modified set of byte range lock data back into
1469  the database.
1470  Unlock the record.
1471 ********************************************************************/
1472
1473 static int byte_range_lock_destructor(void *p)
1474 {
1475         struct byte_range_lock *br_lck =
1476                 talloc_get_type_abort(p, struct byte_range_lock);
1477         TDB_DATA key;
1478
1479         key.dptr = (char *)&br_lck->key;
1480         key.dsize = sizeof(struct lock_key);
1481
1482         if (!br_lck->modified) {
1483                 goto done;
1484         }
1485
1486         if (br_lck->num_locks == 0) {
1487                 /* No locks - delete this entry. */
1488                 if (tdb_delete(tdb, key) == -1) {
1489                         smb_panic("Could not delete byte range lock entry\n");
1490                 }
1491         } else {
1492                 TDB_DATA data;
1493                 data.dptr = (char *)br_lck->lock_data;
1494                 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1495
1496                 if (tdb_store(tdb, key, data, TDB_REPLACE) == -1) {
1497                         smb_panic("Could not store byte range mode entry\n");
1498                 }
1499         }
1500
1501  done:
1502
1503         tdb_chainunlock(tdb, key);
1504         SAFE_FREE(br_lck->lock_data);
1505         return 0;
1506 }
1507
1508 /*******************************************************************
1509  Fetch a set of byte range lock data from the database.
1510  Leave the record locked.
1511  TALLOC_FREE(brl) will release the lock in the destructor.
1512 ********************************************************************/
1513
1514 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
1515                                         files_struct *fsp)
1516 {
1517         TDB_DATA key;
1518         TDB_DATA data;
1519         struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
1520
1521         if (br_lck == NULL) {
1522                 return NULL;
1523         }
1524
1525         br_lck->fsp = fsp;
1526         br_lck->num_locks = 0;
1527         br_lck->modified = False;
1528         memset(&br_lck->key, '\0', sizeof(struct lock_key));
1529         br_lck->key.device = fsp->dev;
1530         br_lck->key.inode = fsp->inode;
1531
1532         key.dptr = (char *)&br_lck->key;
1533         key.dsize = sizeof(struct lock_key);
1534
1535         if (tdb_chainlock(tdb, key) != 0) {
1536                 DEBUG(3, ("Could not lock byte range lock entry\n"));
1537                 TALLOC_FREE(br_lck);
1538                 return NULL;
1539         }
1540
1541         talloc_set_destructor(br_lck, byte_range_lock_destructor);
1542
1543         data = tdb_fetch(tdb, key);
1544         br_lck->lock_data = (void *)data.dptr;
1545         br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1546
1547         if (!fsp->lockdb_clean) {
1548
1549                 /* This is the first time we've accessed this. */
1550                 /* Go through and ensure all entries exist - remove any that don't. */
1551                 /* Makes the lockdb self cleaning at low cost. */
1552
1553                 struct lock_struct *locks =
1554                         (struct lock_struct *)br_lck->lock_data;
1555
1556                 if (!validate_lock_entries(&br_lck->num_locks, &locks)) {
1557                         SAFE_FREE(br_lck->lock_data);
1558                         TALLOC_FREE(br_lck);
1559                         return NULL;
1560                 }
1561
1562                 /*
1563                  * validate_lock_entries might have changed locks. We can't
1564                  * use a direct pointer here because otherwise gcc warnes
1565                  * about strict aliasing rules being violated.
1566                  */
1567                 br_lck->lock_data = locks;
1568
1569                 /* Mark the lockdb as "clean" as seen from this open file. */
1570                 fsp->lockdb_clean = True;
1571         }
1572
1573         if (DEBUGLEVEL >= 10) {
1574                 unsigned int i;
1575                 struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1576                 DEBUG(10,("brl_get_locks: %u current locks on dev=%.0f, inode=%.0f\n",
1577                         br_lck->num_locks,
1578                         (double)fsp->dev, (double)fsp->inode ));
1579                 for( i = 0; i < br_lck->num_locks; i++) {
1580                         print_lock_struct(i, &locks[i]);
1581                 }
1582         }
1583         return br_lck;
1584 }