r17276: Don't change the POSIX lock ref count if posix locking = no.
[nivanova/samba-autobuild/.git] / source3 / locking / brlock.c
1 /* 
2    Unix SMB/CIFS implementation.
3    byte range locking code
4    Updated to handle range splits/merges.
5
6    Copyright (C) Andrew Tridgell 1992-2000
7    Copyright (C) Jeremy Allison 1992-2000
8    
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 2 of the License, or
12    (at your option) any later version.
13    
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18    
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24 /* This module implements a tdb based byte range locking service,
25    replacing the fcntl() based byte range locking previously
26    used. This allows us to provide the same semantics as NT */
27
28 #include "includes.h"
29
30 #undef DBGC_CLASS
31 #define DBGC_CLASS DBGC_LOCKING
32
33 #define ZERO_ZERO 0
34
35 /* The open brlock.tdb database. */
36
37 static TDB_CONTEXT *tdb;
38
39 /****************************************************************************
40  Debug info at level 10 for lock struct.
41 ****************************************************************************/
42
43 static void print_lock_struct(unsigned int i, struct lock_struct *pls)
44 {
45         DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %u, ",
46                         i,
47                         (unsigned int)pls->context.smbpid,
48                         (unsigned int)pls->context.tid,
49                         (unsigned int)procid_to_pid(&pls->context.pid) ));
50         
51         DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
52                 (double)pls->start,
53                 (double)pls->size,
54                 pls->fnum,
55                 lock_type_name(pls->lock_type),
56                 lock_flav_name(pls->lock_flav) ));
57 }
58
59 /****************************************************************************
60  See if two locking contexts are equal.
61 ****************************************************************************/
62
63 BOOL brl_same_context(const struct lock_context *ctx1, 
64                              const struct lock_context *ctx2)
65 {
66         return (procid_equal(&ctx1->pid, &ctx2->pid) &&
67                 (ctx1->smbpid == ctx2->smbpid) &&
68                 (ctx1->tid == ctx2->tid));
69 }
70
71 /****************************************************************************
72  See if lck1 and lck2 overlap.
73 ****************************************************************************/
74
75 static BOOL brl_overlap(const struct lock_struct *lck1,
76                         const struct lock_struct *lck2)
77 {
78         /* this extra check is not redundent - it copes with locks
79            that go beyond the end of 64 bit file space */
80         if (lck1->size != 0 &&
81             lck1->start == lck2->start &&
82             lck1->size == lck2->size) {
83                 return True;
84         }
85
86         if (lck1->start >= (lck2->start+lck2->size) ||
87             lck2->start >= (lck1->start+lck1->size)) {
88                 return False;
89         }
90         return True;
91 }
92
93 /****************************************************************************
94  See if lock2 can be added when lock1 is in place.
95 ****************************************************************************/
96
97 static BOOL brl_conflict(const struct lock_struct *lck1, 
98                          const struct lock_struct *lck2)
99 {
100         /* Ignore PENDING locks. */
101         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
102                 return False;
103
104         /* Read locks never conflict. */
105         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
106                 return False;
107         }
108
109         if (brl_same_context(&lck1->context, &lck2->context) &&
110             lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
111                 return False;
112         }
113
114         return brl_overlap(lck1, lck2);
115
116
117 /****************************************************************************
118  See if lock2 can be added when lock1 is in place - when both locks are POSIX
119  flavour. POSIX locks ignore fnum - they only care about dev/ino which we
120  know already match.
121 ****************************************************************************/
122
123 static BOOL brl_conflict_posix(const struct lock_struct *lck1, 
124                                 const struct lock_struct *lck2)
125 {
126 #if defined(DEVELOPER)
127         SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
128         SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
129 #endif
130
131         /* Ignore PENDING locks. */
132         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
133                 return False;
134
135         /* Read locks never conflict. */
136         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
137                 return False;
138         }
139
140         /* Locks on the same context con't conflict. Ignore fnum. */
141         if (brl_same_context(&lck1->context, &lck2->context)) {
142                 return False;
143         }
144
145         /* One is read, the other write, or the context is different,
146            do they overlap ? */
147         return brl_overlap(lck1, lck2);
148
149
150 #if ZERO_ZERO
151 static BOOL brl_conflict1(const struct lock_struct *lck1, 
152                          const struct lock_struct *lck2)
153 {
154         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
155                 return False;
156
157         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
158                 return False;
159         }
160
161         if (brl_same_context(&lck1->context, &lck2->context) &&
162             lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
163                 return False;
164         }
165
166         if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
167                 return True;
168         }
169
170         if (lck1->start >= (lck2->start + lck2->size) ||
171             lck2->start >= (lck1->start + lck1->size)) {
172                 return False;
173         }
174             
175         return True;
176
177 #endif
178
179 /****************************************************************************
180  Check to see if this lock conflicts, but ignore our own locks on the
181  same fnum only. This is the read/write lock check code path.
182  This is never used in the POSIX lock case.
183 ****************************************************************************/
184
185 static BOOL brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
186 {
187         if (lck1->lock_type == PENDING_LOCK || lck2->lock_type == PENDING_LOCK )
188                 return False;
189
190         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) 
191                 return False;
192
193         /* POSIX flavour locks never conflict here - this is only called
194            in the read/write path. */
195
196         if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
197                 return False;
198
199         /*
200          * Incoming WRITE locks conflict with existing READ locks even
201          * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
202          */
203
204         if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
205                 if (brl_same_context(&lck1->context, &lck2->context) &&
206                                         lck1->fnum == lck2->fnum)
207                         return False;
208         }
209
210         return brl_overlap(lck1, lck2);
211
212
213 /****************************************************************************
214  Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
215  is the same as this one and changes its error code. I wonder if any
216  app depends on this ?
217 ****************************************************************************/
218
219 static NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, BOOL blocking_lock)
220 {
221         if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
222                 /* amazing the little things you learn with a test
223                    suite. Locks beyond this offset (as a 64 bit
224                    number!) always generate the conflict error code,
225                    unless the top bit is set */
226                 if (!blocking_lock) {
227                         fsp->last_lock_failure = *lock;
228                 }
229                 return NT_STATUS_FILE_LOCK_CONFLICT;
230         }
231
232         if (procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
233                         lock->context.tid == fsp->last_lock_failure.context.tid &&
234                         lock->fnum == fsp->last_lock_failure.fnum &&
235                         lock->start == fsp->last_lock_failure.start) {
236                 return NT_STATUS_FILE_LOCK_CONFLICT;
237         }
238
239         if (!blocking_lock) {
240                 fsp->last_lock_failure = *lock;
241         }
242         return NT_STATUS_LOCK_NOT_GRANTED;
243 }
244
245 /****************************************************************************
246  Open up the brlock.tdb database.
247 ****************************************************************************/
248
249 void brl_init(int read_only)
250 {
251         if (tdb) {
252                 return;
253         }
254         tdb = tdb_open_log(lock_path("brlock.tdb"),
255                         lp_open_files_db_hash_size(),
256                         TDB_DEFAULT|(read_only?0x0:TDB_CLEAR_IF_FIRST),
257                         read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
258         if (!tdb) {
259                 DEBUG(0,("Failed to open byte range locking database %s\n",
260                         lock_path("brlock.tdb")));
261                 return;
262         }
263 }
264
265 /****************************************************************************
266  Close down the brlock.tdb database.
267 ****************************************************************************/
268
269 void brl_shutdown(int read_only)
270 {
271         if (!tdb) {
272                 return;
273         }
274         tdb_close(tdb);
275 }
276
277 #if ZERO_ZERO
278 /****************************************************************************
279  Compare two locks for sorting.
280 ****************************************************************************/
281
282 static int lock_compare(const struct lock_struct *lck1, 
283                          const struct lock_struct *lck2)
284 {
285         if (lck1->start != lck2->start) {
286                 return (lck1->start - lck2->start);
287         }
288         if (lck2->size != lck1->size) {
289                 return ((int)lck1->size - (int)lck2->size);
290         }
291         return 0;
292 }
293 #endif
294
295 /****************************************************************************
296  Lock a range of bytes - Windows lock semantics.
297 ****************************************************************************/
298
299 static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck,
300                         const struct lock_struct *plock, BOOL blocking_lock)
301 {
302         unsigned int i;
303         files_struct *fsp = br_lck->fsp;
304         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
305
306         for (i=0; i < br_lck->num_locks; i++) {
307                 /* Do any Windows or POSIX locks conflict ? */
308                 if (brl_conflict(&locks[i], plock)) {
309                         return brl_lock_failed(fsp,plock,blocking_lock);
310                 }
311 #if ZERO_ZERO
312                 if (plock->start == 0 && plock->size == 0 && 
313                                 locks[i].size == 0) {
314                         break;
315                 }
316 #endif
317         }
318
319         /* We can get the Windows lock, now see if it needs to
320            be mapped into a lower level POSIX one, and if so can
321            we get it ? */
322
323         if ((plock->lock_type != PENDING_LOCK) && lp_posix_locking(SNUM(fsp->conn))) {
324                 int errno_ret;
325                 if (!set_posix_lock_windows_flavour(fsp,
326                                 plock->start,
327                                 plock->size,
328                                 plock->lock_type,
329                                 &plock->context,
330                                 locks,
331                                 br_lck->num_locks,
332                                 &errno_ret)) {
333                         if (errno_ret == EACCES || errno_ret == EAGAIN) {
334                                 return NT_STATUS_FILE_LOCK_CONFLICT;
335                         } else {
336                                 return map_nt_error_from_unix(errno);
337                         }
338                 }
339         }
340
341         /* no conflicts - add it to the list of locks */
342         locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
343         if (!locks) {
344                 return NT_STATUS_NO_MEMORY;
345         }
346
347         memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
348         br_lck->num_locks += 1;
349         br_lck->lock_data = (void *)locks;
350         br_lck->modified = True;
351
352         return NT_STATUS_OK;
353 }
354
355 /****************************************************************************
356  Cope with POSIX range splits and merges.
357 ****************************************************************************/
358
359 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr,               /* Output array. */
360                                                 const struct lock_struct *ex,           /* existing lock. */
361                                                 const struct lock_struct *plock,        /* proposed lock. */
362                                                 BOOL *lock_was_added)
363 {
364         BOOL lock_types_differ = (ex->lock_type != plock->lock_type);
365
366         /* We can't merge non-conflicting locks on different context - ignore fnum. */
367
368         if (!brl_same_context(&ex->context, &plock->context)) {
369                 /* Just copy. */
370                 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
371                 return 1;
372         }
373
374         /* We now know we have the same context. */
375
376         /* Did we overlap ? */
377
378 /*********************************************
379                                              +---------+
380                                              | ex      |
381                                              +---------+
382                               +-------+
383                               | plock |
384                               +-------+
385 OR....
386              +---------+
387              |  ex     |
388              +---------+
389 **********************************************/
390
391         if ( (ex->start > (plock->start + plock->size)) ||
392                         (plock->start > (ex->start + ex->size))) {
393                 /* No overlap with this lock - copy existing. */
394                 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
395                 return 1;
396         }
397
398 /*********************************************
399         +---------------------------+
400         |          ex               |
401         +---------------------------+
402         +---------------------------+
403         |       plock               | -> replace with plock.
404         +---------------------------+
405 **********************************************/
406
407         if ( (ex->start >= plock->start) &&
408                         (ex->start + ex->size <= plock->start + plock->size) ) {
409                 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
410                 *lock_was_added = True;
411                 return 1;
412         }
413
414 /*********************************************
415         +-----------------------+
416         |          ex           |
417         +-----------------------+
418         +---------------+
419         |   plock       |
420         +---------------+
421 OR....
422                         +-------+
423                         |  ex   |
424                         +-------+
425         +---------------+
426         |   plock       |
427         +---------------+
428
429 BECOMES....
430         +---------------+-------+
431         |   plock       | ex    | - different lock types.
432         +---------------+-------+
433 OR.... (merge)
434         +-----------------------+
435         |   ex                  | - same lock type.
436         +-----------------------+
437 **********************************************/
438
439         if ( (ex->start >= plock->start) &&
440                                 (ex->start <= plock->start + plock->size) &&
441                                 (ex->start + ex->size > plock->start + plock->size) ) {
442
443                 *lock_was_added = True;
444
445                 /* If the lock types are the same, we merge, if different, we
446                    add the new lock before the old. */
447
448                 if (lock_types_differ) {
449                         /* Add new. */
450                         memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
451                         memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
452                         /* Adjust existing start and size. */
453                         lck_arr[1].start = plock->start + plock->size;
454                         lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
455                         return 2;
456                 } else {
457                         /* Merge. */
458                         memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
459                         /* Set new start and size. */
460                         lck_arr[0].start = plock->start;
461                         lck_arr[0].size = (ex->start + ex->size) - plock->start;
462                         return 1;
463                 }
464         }
465
466 /*********************************************
467    +-----------------------+
468    |  ex                   |
469    +-----------------------+
470            +---------------+
471            |   plock       |
472            +---------------+
473 OR....
474    +-------+        
475    |  ex   |
476    +-------+
477            +---------------+
478            |   plock       |
479            +---------------+
480 BECOMES....
481    +-------+---------------+
482    | ex    |   plock       | - different lock types
483    +-------+---------------+
484
485 OR.... (merge)
486    +-----------------------+
487    | ex                    | - same lock type.
488    +-----------------------+
489
490 **********************************************/
491
492         if ( (ex->start < plock->start) &&
493                         (ex->start + ex->size >= plock->start) &&
494                         (ex->start + ex->size <= plock->start + plock->size) ) {
495
496                 *lock_was_added = True;
497
498                 /* If the lock types are the same, we merge, if different, we
499                    add the new lock after the old. */
500
501                 if (lock_types_differ) {
502                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
503                         memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
504                         /* Adjust existing size. */
505                         lck_arr[0].size = plock->start - ex->start;
506                         return 2;
507                 } else {
508                         /* Merge. */
509                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
510                         /* Adjust existing size. */
511                         lck_arr[0].size = (plock->start + plock->size) - ex->start;
512                         return 1;
513                 }
514         }
515
516 /*********************************************
517         +---------------------------+
518         |        ex                 |
519         +---------------------------+
520                 +---------+
521                 |  plock  |
522                 +---------+
523 BECOMES.....
524         +-------+---------+---------+
525         | ex    |  plock  | ex      | - different lock types.
526         +-------+---------+---------+
527 OR
528         +---------------------------+
529         |        ex                 | - same lock type.
530         +---------------------------+
531 **********************************************/
532
533         if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
534                 *lock_was_added = True;
535
536                 if (lock_types_differ) {
537
538                         /* We have to split ex into two locks here. */
539
540                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
541                         memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
542                         memcpy(&lck_arr[2], ex, sizeof(struct lock_struct));
543
544                         /* Adjust first existing size. */
545                         lck_arr[0].size = plock->start - ex->start;
546
547                         /* Adjust second existing start and size. */
548                         lck_arr[2].start = plock->start + plock->size;
549                         lck_arr[2].size = (ex->start + ex->size) - (plock->start + plock->size);
550                         return 3;
551                 } else {
552                         /* Just eat plock. */
553                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
554                         return 1;
555                 }
556         }
557
558         /* Never get here. */
559         smb_panic("brlock_posix_split_merge\n");
560         /* Notreached. */
561         abort();
562         /* Keep some compilers happy. */
563         return 0;
564 }
565
566 /****************************************************************************
567  Lock a range of bytes - POSIX lock semantics.
568  We must cope with range splits and merges.
569 ****************************************************************************/
570
571 static NTSTATUS brl_lock_posix(struct byte_range_lock *br_lck,
572                         const struct lock_struct *plock)
573 {
574         unsigned int i, count;
575         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
576         struct lock_struct *tp;
577         BOOL lock_was_added = False;
578
579         /* No zero-zero locks for POSIX. */
580         if (plock->start == 0 && plock->size == 0) {
581                 return NT_STATUS_INVALID_PARAMETER;
582         }
583
584         /* Don't allow 64-bit lock wrap. */
585         if (plock->start + plock->size < plock->start ||
586                         plock->start + plock->size < plock->size) {
587                 return NT_STATUS_INVALID_PARAMETER;
588         }
589
590         /* The worst case scenario here is we have to split an
591            existing POSIX lock range into two, and add our lock,
592            so we need at most 2 more entries. */
593
594         tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
595         if (!tp) {
596                 return NT_STATUS_NO_MEMORY;
597         }
598         
599         count = 0;
600         for (i=0; i < br_lck->num_locks; i++) {
601                 if (locks[i].lock_flav == WINDOWS_LOCK) {
602                         /* Do any Windows flavour locks conflict ? */
603                         if (brl_conflict(&locks[i], plock)) {
604                                 /* No games with error messages. */
605                                 SAFE_FREE(tp);
606                                 return NT_STATUS_FILE_LOCK_CONFLICT;
607                         }
608                         /* Just copy the Windows lock into the new array. */
609                         memcpy(&tp[count], &locks[i], sizeof(struct lock_struct));
610                         count++;
611                 } else {
612                         /* POSIX conflict semantics are different. */
613                         if (brl_conflict_posix(&locks[i], plock)) {
614                                 /* Can't block ourselves with POSIX locks. */
615                                 /* No games with error messages. */
616                                 SAFE_FREE(tp);
617                                 return NT_STATUS_FILE_LOCK_CONFLICT;
618                         }
619
620                         /* Work out overlaps. */
621                         count += brlock_posix_split_merge(&tp[count], &locks[i], plock, &lock_was_added);
622                 }
623         }
624
625         if (!lock_was_added) {
626                 memcpy(&tp[count], plock, sizeof(struct lock_struct));
627                 count++;
628         }
629
630         /* We can get the POSIX lock, now see if it needs to
631            be mapped into a lower level POSIX one, and if so can
632            we get it ? */
633
634         if ((plock->lock_type != PENDING_LOCK) && lp_posix_locking(SNUM(br_lck->fsp->conn))) {
635                 int errno_ret;
636
637                 /* The lower layer just needs to attempt to
638                    get the system POSIX lock. We've weeded out
639                    any conflicts above. */
640
641                 if (!set_posix_lock_posix_flavour(br_lck->fsp,
642                                 plock->start,
643                                 plock->size,
644                                 plock->lock_type,
645                                 &errno_ret)) {
646                         if (errno_ret == EACCES || errno_ret == EAGAIN) {
647                                 SAFE_FREE(tp);
648                                 return NT_STATUS_FILE_LOCK_CONFLICT;
649                         } else {
650                                 SAFE_FREE(tp);
651                                 return map_nt_error_from_unix(errno);
652                         }
653                 }
654         }
655
656         /* Realloc so we don't leak entries per lock call. */
657         tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
658         if (!tp) {
659                 return NT_STATUS_NO_MEMORY;
660         }
661         br_lck->num_locks = count;
662         SAFE_FREE(br_lck->lock_data);
663         br_lck->lock_data = (void *)tp;
664         br_lck->modified = True;
665         return NT_STATUS_OK;
666 }
667
668 /****************************************************************************
669  Lock a range of bytes.
670 ****************************************************************************/
671
672 NTSTATUS brl_lock(struct byte_range_lock *br_lck,
673                 uint32 smbpid,
674                 struct process_id pid,
675                 br_off start,
676                 br_off size, 
677                 enum brl_type lock_type,
678                 enum brl_flavour lock_flav,
679                 BOOL blocking_lock)
680 {
681         NTSTATUS ret;
682         struct lock_struct lock;
683
684 #if !ZERO_ZERO
685         if (start == 0 && size == 0) {
686                 DEBUG(0,("client sent 0/0 lock - please report this\n"));
687         }
688 #endif
689
690         lock.context.smbpid = smbpid;
691         lock.context.pid = pid;
692         lock.context.tid = br_lck->fsp->conn->cnum;
693         lock.start = start;
694         lock.size = size;
695         lock.fnum = br_lck->fsp->fnum;
696         lock.lock_type = lock_type;
697         lock.lock_flav = lock_flav;
698
699         if (lock_flav == WINDOWS_LOCK) {
700                 ret = brl_lock_windows(br_lck, &lock, blocking_lock);
701         } else {
702                 ret = brl_lock_posix(br_lck, &lock);
703         }
704
705 #if ZERO_ZERO
706         /* sort the lock list */
707         qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);
708 #endif
709
710         return ret;
711 }
712
713 /****************************************************************************
714  Check if an unlock overlaps a pending lock.
715 ****************************************************************************/
716
717 static BOOL brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
718 {
719         if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
720                 return True;
721         if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
722                 return True;
723         return False;
724 }
725
726 /****************************************************************************
727  Unlock a range of bytes - Windows semantics.
728 ****************************************************************************/
729
730 static BOOL brl_unlock_windows(struct byte_range_lock *br_lck, const struct lock_struct *plock)
731 {
732         unsigned int i, j;
733         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
734         enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
735
736 #if ZERO_ZERO
737         /* Delete write locks by preference... The lock list
738            is sorted in the zero zero case. */
739
740         for (i = 0; i < br_lck->num_locks; i++) {
741                 struct lock_struct *lock = &locks[i];
742
743                 if (lock->lock_type == WRITE_LOCK &&
744                     brl_same_context(&lock->context, &plock->context) &&
745                     lock->fnum == plock->fnum &&
746                     lock->lock_flav == WINDOWS_LOCK &&
747                     lock->start == plock->start &&
748                     lock->size == plock->size) {
749
750                         /* found it - delete it */
751                         deleted_lock_type = lock->lock_type;
752                         break;
753                 }
754         }
755
756         if (i != br_lck->num_locks) {
757                 /* We found it - don't search again. */
758                 goto unlock_continue;
759         }
760 #endif
761
762         for (i = 0; i < br_lck->num_locks; i++) {
763                 struct lock_struct *lock = &locks[i];
764
765                 /* Only remove our own locks that match in start, size, and flavour. */
766                 if (brl_same_context(&lock->context, &plock->context) &&
767                                         lock->fnum == plock->fnum &&
768                                         lock->lock_flav == WINDOWS_LOCK &&
769                                         lock->start == plock->start &&
770                                         lock->size == plock->size ) {
771                         deleted_lock_type = lock->lock_type;
772                         break;
773                 }
774         }
775
776         if (i == br_lck->num_locks) {
777                 /* we didn't find it */
778                 return False;
779         }
780
781 #if ZERO_ZERO
782   unlock_continue:
783 #endif
784
785         /* Actually delete the lock. */
786         if (i < br_lck->num_locks - 1) {
787                 memmove(&locks[i], &locks[i+1], 
788                         sizeof(*locks)*((br_lck->num_locks-1) - i));
789         }
790
791         br_lck->num_locks -= 1;
792         br_lck->modified = True;
793
794         /* Unlock the underlying POSIX regions. */
795         if(lp_posix_locking(br_lck->fsp->conn->cnum)) {
796                 release_posix_lock_windows_flavour(br_lck->fsp,
797                                 plock->start,
798                                 plock->size,
799                                 deleted_lock_type,
800                                 &plock->context,
801                                 locks,
802                                 br_lck->num_locks);
803         }
804
805         /* Send unlock messages to any pending waiters that overlap. */
806         for (j=0; j < br_lck->num_locks; j++) {
807                 struct lock_struct *pend_lock = &locks[j];
808
809                 /* Ignore non-pending locks. */
810                 if (pend_lock->lock_type != PENDING_LOCK) {
811                         continue;
812                 }
813
814                 /* We could send specific lock info here... */
815                 if (brl_pending_overlap(plock, pend_lock)) {
816                         DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
817                                 procid_str_static(&pend_lock->context.pid )));
818
819                         become_root();
820                         message_send_pid(pend_lock->context.pid,
821                                         MSG_SMB_UNLOCK,
822                                         NULL, 0, True);
823                         unbecome_root();
824                 }
825         }
826
827         return True;
828 }
829
830 /****************************************************************************
831  Unlock a range of bytes - POSIX semantics.
832 ****************************************************************************/
833
834 static BOOL brl_unlock_posix(struct byte_range_lock *br_lck, const struct lock_struct *plock)
835 {
836         unsigned int i, j, count;
837         struct lock_struct *tp;
838         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
839         BOOL overlap_found = False;
840
841         /* No zero-zero locks for POSIX. */
842         if (plock->start == 0 && plock->size == 0) {
843                 return False;
844         }
845
846         /* Don't allow 64-bit lock wrap. */
847         if (plock->start + plock->size < plock->start ||
848                         plock->start + plock->size < plock->size) {
849                 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
850                 return False;
851         }
852
853         /* The worst case scenario here is we have to split an
854            existing POSIX lock range into two, so we need at most
855            1 more entry. */
856
857         tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
858         if (!tp) {
859                 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
860                 return False;
861         }
862
863         count = 0;
864         for (i = 0; i < br_lck->num_locks; i++) {
865                 struct lock_struct *lock = &locks[i];
866                 struct lock_struct tmp_lock[3];
867                 BOOL lock_was_added = False;
868                 unsigned int tmp_count;
869
870                 /* Only remove our own locks - ignore fnum. */
871                 if (lock->lock_type == PENDING_LOCK ||
872                                 !brl_same_context(&lock->context, &plock->context)) {
873                         memcpy(&tp[count], lock, sizeof(struct lock_struct));
874                         count++;
875                         continue;
876                 }
877
878                 /* Work out overlaps. */
879                 tmp_count = brlock_posix_split_merge(&tmp_lock[0], &locks[i], plock, &lock_was_added);
880
881                 if (tmp_count == 1) {
882                         /* Ether the locks didn't overlap, or the unlock completely
883                            overlapped this lock. If it didn't overlap, then there's
884                            no change in the locks. */
885                         if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
886                                 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
887                                 /* No change in this lock. */
888                                 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
889                                 count++;
890                         } else {
891                                 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
892                                 overlap_found = True;
893                         }
894                         continue;
895                 } else if (tmp_count == 2) {
896                         /* The unlock overlapped an existing lock. Copy the truncated
897                            lock into the lock array. */
898                         if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
899                                 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
900                                 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
901                                 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
902                                 if (tmp_lock[0].size != locks[i].size) {
903                                         overlap_found = True;
904                                 }
905                         } else {
906                                 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
907                                 SMB_ASSERT(tmp_lock[1].lock_type == locks[i].lock_type);
908                                 memcpy(&tp[count], &tmp_lock[1], sizeof(struct lock_struct));
909                                 if (tmp_lock[1].start != locks[i].start) {
910                                         overlap_found = True;
911                                 }
912                         }
913                         count++;
914                         continue;
915                 } else {
916                         /* tmp_count == 3 - (we split a lock range in two). */
917                         SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
918                         SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
919                         SMB_ASSERT(tmp_lock[2].lock_type == locks[i].lock_type);
920
921                         memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
922                         count++;
923                         memcpy(&tp[count], &tmp_lock[2], sizeof(struct lock_struct));
924                         count++;
925                         overlap_found = True;
926                         /* Optimisation... */
927                         /* We know we're finished here as we can't overlap any
928                            more POSIX locks. Copy the rest of the lock array. */
929                         if (i < br_lck->num_locks - 1) {
930                                 memcpy(&tp[count], &locks[i+1], 
931                                         sizeof(*locks)*((br_lck->num_locks-1) - i));
932                                 count += ((br_lck->num_locks-1) - i);
933                         }
934                         break;
935                 }
936         }
937
938         if (!overlap_found) {
939                 /* Just ignore - no change. */
940                 SAFE_FREE(tp);
941                 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
942                 return True;
943         }
944
945         /* Unlock any POSIX regions. */
946         if(lp_posix_locking(br_lck->fsp->conn->cnum)) {
947                 release_posix_lock_posix_flavour(br_lck->fsp,
948                                                 plock->start,
949                                                 plock->size,
950                                                 &plock->context,
951                                                 tp,
952                                                 count);
953         }
954
955         /* Realloc so we don't leak entries per unlock call. */
956         if (count) {
957                 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
958                 if (!tp) {
959                         DEBUG(10,("brl_unlock_posix: realloc fail\n"));
960                         return False;
961                 }
962         } else {
963                 /* We deleted the last lock. */
964                 SAFE_FREE(tp);
965                 tp = NULL;
966         }
967
968         br_lck->num_locks = count;
969         SAFE_FREE(br_lck->lock_data);
970         locks = br_lck->lock_data = (void *)tp;
971         br_lck->modified = True;
972
973         /* Send unlock messages to any pending waiters that overlap. */
974
975         for (j=0; j < br_lck->num_locks; j++) {
976                 struct lock_struct *pend_lock = &locks[j];
977
978                 /* Ignore non-pending locks. */
979                 if (pend_lock->lock_type != PENDING_LOCK) {
980                         continue;
981                 }
982
983                 /* We could send specific lock info here... */
984                 if (brl_pending_overlap(plock, pend_lock)) {
985                         DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
986                                 procid_str_static(&pend_lock->context.pid )));
987
988                         become_root();
989                         message_send_pid(pend_lock->context.pid,
990                                         MSG_SMB_UNLOCK,
991                                         NULL, 0, True);
992                         unbecome_root();
993                 }
994         }
995
996         return True;
997 }
998
999 /****************************************************************************
1000  Unlock a range of bytes.
1001 ****************************************************************************/
1002
1003 BOOL brl_unlock(struct byte_range_lock *br_lck,
1004                 uint32 smbpid,
1005                 struct process_id pid,
1006                 br_off start,
1007                 br_off size,
1008                 enum brl_flavour lock_flav)
1009 {
1010         struct lock_struct lock;
1011
1012         lock.context.smbpid = smbpid;
1013         lock.context.pid = pid;
1014         lock.context.tid = br_lck->fsp->conn->cnum;
1015         lock.start = start;
1016         lock.size = size;
1017         lock.fnum = br_lck->fsp->fnum;
1018         lock.lock_type = UNLOCK_LOCK;
1019         lock.lock_flav = lock_flav;
1020
1021         if (lock_flav == WINDOWS_LOCK) {
1022                 return brl_unlock_windows(br_lck, &lock);
1023         } else {
1024                 return brl_unlock_posix(br_lck, &lock);
1025         }
1026 }
1027
1028 /****************************************************************************
1029  Test if we could add a lock if we wanted to.
1030  Returns True if the region required is currently unlocked, False if locked.
1031 ****************************************************************************/
1032
1033 BOOL brl_locktest(struct byte_range_lock *br_lck,
1034                 uint32 smbpid,
1035                 struct process_id pid,
1036                 br_off start,
1037                 br_off size, 
1038                 enum brl_type lock_type,
1039                 enum brl_flavour lock_flav)
1040 {
1041         BOOL ret = True;
1042         unsigned int i;
1043         struct lock_struct lock;
1044         const struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1045         files_struct *fsp = br_lck->fsp;
1046
1047         lock.context.smbpid = smbpid;
1048         lock.context.pid = pid;
1049         lock.context.tid = br_lck->fsp->conn->cnum;
1050         lock.start = start;
1051         lock.size = size;
1052         lock.fnum = fsp->fnum;
1053         lock.lock_type = lock_type;
1054         lock.lock_flav = lock_flav;
1055
1056         /* Make sure existing locks don't conflict */
1057         for (i=0; i < br_lck->num_locks; i++) {
1058                 /*
1059                  * Our own locks don't conflict.
1060                  */
1061                 if (brl_conflict_other(&locks[i], &lock)) {
1062                         return False;
1063                 }
1064         }
1065
1066         /*
1067          * There is no lock held by an SMB daemon, check to
1068          * see if there is a POSIX lock from a UNIX or NFS process.
1069          * This only conflicts with Windows locks, not POSIX locks.
1070          */
1071
1072         if(lp_posix_locking(fsp->conn->cnum) && (lock_flav == WINDOWS_LOCK)) {
1073                 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1074
1075                 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1076                         (double)start, (double)size, ret ? "locked" : "unlocked",
1077                         fsp->fnum, fsp->fsp_name ));
1078
1079                 /* We need to return the inverse of is_posix_locked. */
1080                 ret = !ret;
1081         }
1082
1083         /* no conflicts - we could have added it */
1084         return ret;
1085 }
1086
1087 /****************************************************************************
1088  Query for existing locks.
1089 ****************************************************************************/
1090
1091 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1092                 uint32 *psmbpid,
1093                 struct process_id pid,
1094                 br_off *pstart,
1095                 br_off *psize, 
1096                 enum brl_type *plock_type,
1097                 enum brl_flavour lock_flav)
1098 {
1099         unsigned int i;
1100         struct lock_struct lock;
1101         const struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1102         files_struct *fsp = br_lck->fsp;
1103
1104         lock.context.smbpid = *psmbpid;
1105         lock.context.pid = pid;
1106         lock.context.tid = br_lck->fsp->conn->cnum;
1107         lock.start = *pstart;
1108         lock.size = *psize;
1109         lock.fnum = fsp->fnum;
1110         lock.lock_type = *plock_type;
1111         lock.lock_flav = lock_flav;
1112
1113         /* Make sure existing locks don't conflict */
1114         for (i=0; i < br_lck->num_locks; i++) {
1115                 const struct lock_struct *exlock = &locks[i];
1116                 BOOL conflict = False;
1117
1118                 if (exlock->lock_flav == WINDOWS_LOCK) {
1119                         conflict = brl_conflict(exlock, &lock);
1120                 } else {        
1121                         conflict = brl_conflict_posix(exlock, &lock);
1122                 }
1123
1124                 if (conflict) {
1125                         *psmbpid = exlock->context.smbpid;
1126                         *pstart = exlock->start;
1127                         *psize = exlock->size;
1128                         *plock_type = exlock->lock_type;
1129                         return NT_STATUS_LOCK_NOT_GRANTED;
1130                 }
1131         }
1132
1133         /*
1134          * There is no lock held by an SMB daemon, check to
1135          * see if there is a POSIX lock from a UNIX or NFS process.
1136          */
1137
1138         if(lp_posix_locking(fsp->conn->cnum)) {
1139                 BOOL ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1140
1141                 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1142                         (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1143                         fsp->fnum, fsp->fsp_name ));
1144
1145                 if (ret) {
1146                         /* Hmmm. No clue what to set smbpid to - use -1. */
1147                         *psmbpid = 0xFFFF;
1148                         return NT_STATUS_LOCK_NOT_GRANTED;
1149                 }
1150         }
1151
1152         return NT_STATUS_OK;
1153 }
1154
1155 /****************************************************************************
1156  Remove a particular pending lock.
1157 ****************************************************************************/
1158
1159 BOOL brl_lock_cancel(struct byte_range_lock *br_lck,
1160                 uint32 smbpid,
1161                 struct process_id pid,
1162                 br_off start,
1163                 br_off size,
1164                 enum brl_flavour lock_flav)
1165 {
1166         unsigned int i;
1167         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1168         struct lock_context context;
1169
1170         context.smbpid = smbpid;
1171         context.pid = pid;
1172         context.tid = br_lck->fsp->conn->cnum;
1173
1174         for (i = 0; i < br_lck->num_locks; i++) {
1175                 struct lock_struct *lock = &locks[i];
1176
1177                 /* For pending locks we *always* care about the fnum. */
1178                 if (brl_same_context(&lock->context, &context) &&
1179                                 lock->fnum == br_lck->fsp->fnum &&
1180                                 lock->lock_type == PENDING_LOCK &&
1181                                 lock->lock_flav == lock_flav &&
1182                                 lock->start == start &&
1183                                 lock->size == size) {
1184                         break;
1185                 }
1186         }
1187
1188         if (i == br_lck->num_locks) {
1189                 /* Didn't find it. */
1190                 return False;
1191         }
1192
1193         if (i < br_lck->num_locks - 1) {
1194                 /* Found this particular pending lock - delete it */
1195                 memmove(&locks[i], &locks[i+1], 
1196                         sizeof(*locks)*((br_lck->num_locks-1) - i));
1197         }
1198
1199         br_lck->num_locks -= 1;
1200         br_lck->modified = True;
1201         return True;
1202 }
1203
1204 /****************************************************************************
1205  Remove any locks associated with a open file.
1206  We return True if this process owns any other Windows locks on this
1207  fd and so we should not immediately close the fd.
1208 ****************************************************************************/
1209
1210 void brl_close_fnum(struct byte_range_lock *br_lck)
1211 {
1212         files_struct *fsp = br_lck->fsp;
1213         uint16 tid = fsp->conn->cnum;
1214         int fnum = fsp->fnum;
1215         unsigned int i, j, dcount=0;
1216         int num_deleted_windows_locks = 0;
1217         struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1218         struct process_id pid = procid_self();
1219         BOOL unlock_individually = False;
1220
1221         if(lp_posix_locking(fsp->conn->cnum)) {
1222
1223                 /* Check if there are any Windows locks associated with this dev/ino
1224                    pair that are not this fnum. If so we need to call unlock on each
1225                    one in order to release the system POSIX locks correctly. */
1226
1227                 for (i=0; i < br_lck->num_locks; i++) {
1228                         struct lock_struct *lock = &locks[i];
1229
1230                         if (!procid_equal(&lock->context.pid, &pid)) {
1231                                 continue;
1232                         }
1233
1234                         if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
1235                                 continue; /* Ignore pending. */
1236                         }
1237
1238                         if (lock->context.tid != tid || lock->fnum != fnum) {
1239                                 unlock_individually = True;
1240                                 break;
1241                         }
1242                 }
1243
1244                 if (unlock_individually) {
1245                         struct lock_struct *locks_copy;
1246                         unsigned int num_locks_copy;
1247
1248                         /* Copy the current lock array. */
1249                         locks_copy = TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1250                         if (!locks_copy) {
1251                                 smb_panic("brl_close_fnum: talloc fail.\n");
1252                         }
1253                         num_locks_copy = br_lck->num_locks;
1254
1255                         for (i=0; i < num_locks_copy; i++) {
1256                                 struct lock_struct *lock = &locks_copy[i];
1257
1258                                 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&
1259                                                 (lock->fnum == fnum)) {
1260                                         brl_unlock(br_lck,
1261                                                 lock->context.smbpid,
1262                                                 pid,
1263                                                 lock->start,
1264                                                 lock->size,
1265                                                 lock->lock_flav);
1266                                 }
1267                         }
1268                         return;
1269                 }
1270         }
1271
1272         /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1273
1274         /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1275
1276         for (i=0; i < br_lck->num_locks; i++) {
1277                 struct lock_struct *lock = &locks[i];
1278                 BOOL del_this_lock = False;
1279
1280                 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
1281                         if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
1282                                 del_this_lock = True;
1283                                 num_deleted_windows_locks++;
1284                         } else if (lock->lock_flav == POSIX_LOCK) {
1285                                 del_this_lock = True;
1286                         }
1287                 }
1288
1289                 if (del_this_lock) {
1290                         /* Send unlock messages to any pending waiters that overlap. */
1291                         for (j=0; j < br_lck->num_locks; j++) {
1292                                 struct lock_struct *pend_lock = &locks[j];
1293
1294                                 /* Ignore our own or non-pending locks. */
1295                                 if (pend_lock->lock_type != PENDING_LOCK) {
1296                                         continue;
1297                                 }
1298
1299                                 /* Optimisation - don't send to this fnum as we're
1300                                    closing it. */
1301                                 if (pend_lock->context.tid == tid &&
1302                                     procid_equal(&pend_lock->context.pid, &pid) &&
1303                                     pend_lock->fnum == fnum) {
1304                                         continue;
1305                                 }
1306
1307                                 /* We could send specific lock info here... */
1308                                 if (brl_pending_overlap(lock, pend_lock)) {
1309                                         become_root();
1310                                         message_send_pid(pend_lock->context.pid,
1311                                                         MSG_SMB_UNLOCK,
1312                                                         NULL, 0, True);
1313                                         unbecome_root();
1314                                 }
1315                         }
1316
1317                         /* found it - delete it */
1318                         if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
1319                                 memmove(&locks[i], &locks[i+1], 
1320                                         sizeof(*locks)*((br_lck->num_locks-1) - i));
1321                         }
1322                         br_lck->num_locks--;
1323                         br_lck->modified = True;
1324                         i--;
1325                         dcount++;
1326                 }
1327         }
1328
1329         if(lp_posix_locking(fsp->conn->cnum) && num_deleted_windows_locks) {
1330                 /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
1331                 reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks);
1332         }
1333 }
1334
1335 /****************************************************************************
1336  Ensure this set of lock entries is valid.
1337 ****************************************************************************/
1338
1339 static BOOL validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks)
1340 {
1341         unsigned int i;
1342         unsigned int num_valid_entries = 0;
1343         struct lock_struct *locks = *pplocks;
1344
1345         for (i = 0; i < *pnum_entries; i++) {
1346                 struct lock_struct *lock_data = &locks[i];
1347                 if (!process_exists(lock_data->context.pid)) {
1348                         /* This process no longer exists - mark this
1349                            entry as invalid by zeroing it. */
1350                         ZERO_STRUCTP(lock_data);
1351                 } else {
1352                         num_valid_entries++;
1353                 }
1354         }
1355
1356         if (num_valid_entries != *pnum_entries) {
1357                 struct lock_struct *new_lock_data = NULL;
1358
1359                 if (num_valid_entries) {
1360                         new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries);
1361                         if (!new_lock_data) {
1362                                 DEBUG(3, ("malloc fail\n"));
1363                                 return False;
1364                         }
1365
1366                         num_valid_entries = 0;
1367                         for (i = 0; i < *pnum_entries; i++) {
1368                                 struct lock_struct *lock_data = &locks[i];
1369                                 if (lock_data->context.smbpid &&
1370                                                 lock_data->context.tid) {
1371                                         /* Valid (nonzero) entry - copy it. */
1372                                         memcpy(&new_lock_data[num_valid_entries],
1373                                                 lock_data, sizeof(struct lock_struct));
1374                                         num_valid_entries++;
1375                                 }
1376                         }
1377                 }
1378
1379                 SAFE_FREE(*pplocks);
1380                 *pplocks = new_lock_data;
1381                 *pnum_entries = num_valid_entries;
1382         }
1383
1384         return True;
1385 }
1386
1387 /****************************************************************************
1388  Traverse the whole database with this function, calling traverse_callback
1389  on each lock.
1390 ****************************************************************************/
1391
1392 static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
1393 {
1394         struct lock_struct *locks;
1395         struct lock_key *key;
1396         unsigned int i;
1397         unsigned int num_locks = 0;
1398         unsigned int orig_num_locks = 0;
1399
1400         BRLOCK_FN(traverse_callback) = (BRLOCK_FN_CAST())state;
1401
1402         /* In a traverse function we must make a copy of
1403            dbuf before modifying it. */
1404
1405         locks = (struct lock_struct *)memdup(dbuf.dptr, dbuf.dsize);
1406         if (!locks) {
1407                 return -1; /* Terminate traversal. */
1408         }
1409
1410         key = (struct lock_key *)kbuf.dptr;
1411         orig_num_locks = num_locks = dbuf.dsize/sizeof(*locks);
1412
1413         /* Ensure the lock db is clean of entries from invalid processes. */
1414
1415         if (!validate_lock_entries(&num_locks, &locks)) {
1416                 SAFE_FREE(locks);
1417                 return -1; /* Terminate traversal */
1418         }
1419
1420         if (orig_num_locks != num_locks) {
1421                 dbuf.dptr = (char *)locks;
1422                 dbuf.dsize = num_locks * sizeof(*locks);
1423
1424                 if (dbuf.dsize) {
1425                         tdb_store(ttdb, kbuf, dbuf, TDB_REPLACE);
1426                 } else {
1427                         tdb_delete(ttdb, kbuf);
1428                 }
1429         }
1430
1431         for ( i=0; i<num_locks; i++) {
1432                 traverse_callback(key->device,
1433                                   key->inode,
1434                                   locks[i].context.pid,
1435                                   locks[i].lock_type,
1436                                   locks[i].lock_flav,
1437                                   locks[i].start,
1438                                   locks[i].size);
1439         }
1440
1441         SAFE_FREE(locks);
1442         return 0;
1443 }
1444
1445 /*******************************************************************
1446  Call the specified function on each lock in the database.
1447 ********************************************************************/
1448
1449 int brl_forall(BRLOCK_FN(fn))
1450 {
1451         if (!tdb) {
1452                 return 0;
1453         }
1454         return tdb_traverse(tdb, traverse_fn, (void *)fn);
1455 }
1456
1457 /*******************************************************************
1458  Store a potentially modified set of byte range lock data back into
1459  the database.
1460  Unlock the record.
1461 ********************************************************************/
1462
1463 static int byte_range_lock_destructor(void *p)
1464 {
1465         struct byte_range_lock *br_lck =
1466                 talloc_get_type_abort(p, struct byte_range_lock);
1467         TDB_DATA key;
1468
1469         key.dptr = (char *)&br_lck->key;
1470         key.dsize = sizeof(struct lock_key);
1471
1472         if (!br_lck->modified) {
1473                 goto done;
1474         }
1475
1476         if (br_lck->num_locks == 0) {
1477                 /* No locks - delete this entry. */
1478                 if (tdb_delete(tdb, key) == -1) {
1479                         smb_panic("Could not delete byte range lock entry\n");
1480                 }
1481         } else {
1482                 TDB_DATA data;
1483                 data.dptr = (char *)br_lck->lock_data;
1484                 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1485
1486                 if (tdb_store(tdb, key, data, TDB_REPLACE) == -1) {
1487                         smb_panic("Could not store byte range mode entry\n");
1488                 }
1489         }
1490
1491  done:
1492
1493         tdb_chainunlock(tdb, key);
1494         SAFE_FREE(br_lck->lock_data);
1495         return 0;
1496 }
1497
1498 /*******************************************************************
1499  Fetch a set of byte range lock data from the database.
1500  Leave the record locked.
1501  TALLOC_FREE(brl) will release the lock in the destructor.
1502 ********************************************************************/
1503
1504 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
1505                                         files_struct *fsp)
1506 {
1507         TDB_DATA key;
1508         TDB_DATA data;
1509         struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
1510
1511         if (br_lck == NULL) {
1512                 return NULL;
1513         }
1514
1515         br_lck->fsp = fsp;
1516         br_lck->num_locks = 0;
1517         br_lck->modified = False;
1518         memset(&br_lck->key, '\0', sizeof(struct lock_key));
1519         br_lck->key.device = fsp->dev;
1520         br_lck->key.inode = fsp->inode;
1521
1522         key.dptr = (char *)&br_lck->key;
1523         key.dsize = sizeof(struct lock_key);
1524
1525         if (tdb_chainlock(tdb, key) != 0) {
1526                 DEBUG(3, ("Could not lock byte range lock entry\n"));
1527                 TALLOC_FREE(br_lck);
1528                 return NULL;
1529         }
1530
1531         talloc_set_destructor(br_lck, byte_range_lock_destructor);
1532
1533         data = tdb_fetch(tdb, key);
1534         br_lck->lock_data = (void *)data.dptr;
1535         br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1536
1537         if (!fsp->lockdb_clean) {
1538
1539                 /* This is the first time we've accessed this. */
1540                 /* Go through and ensure all entries exist - remove any that don't. */
1541                 /* Makes the lockdb self cleaning at low cost. */
1542
1543                 struct lock_struct *locks =
1544                         (struct lock_struct *)br_lck->lock_data;
1545
1546                 if (!validate_lock_entries(&br_lck->num_locks, &locks)) {
1547                         SAFE_FREE(br_lck->lock_data);
1548                         TALLOC_FREE(br_lck);
1549                         return NULL;
1550                 }
1551
1552                 /*
1553                  * validate_lock_entries might have changed locks. We can't
1554                  * use a direct pointer here because otherwise gcc warnes
1555                  * about strict aliasing rules being violated.
1556                  */
1557                 br_lck->lock_data = locks;
1558
1559                 /* Mark the lockdb as "clean" as seen from this open file. */
1560                 fsp->lockdb_clean = True;
1561         }
1562
1563         if (DEBUGLEVEL >= 10) {
1564                 unsigned int i;
1565                 struct lock_struct *locks = (struct lock_struct *)br_lck->lock_data;
1566                 DEBUG(10,("brl_get_locks: %u current locks on dev=%.0f, inode=%.0f\n",
1567                         br_lck->num_locks,
1568                         (double)fsp->dev, (double)fsp->inode ));
1569                 for( i = 0; i < br_lck->num_locks; i++) {
1570                         print_lock_struct(i, &locks[i]);
1571                 }
1572         }
1573         return br_lck;
1574 }