smbd: Add brl_num_locks access function
[obnox/samba/samba-obnox.git] / source3 / locking / brlock.c
1 /*
2    Unix SMB/CIFS implementation.
3    byte range locking code
4    Updated to handle range splits/merges.
5
6    Copyright (C) Andrew Tridgell 1992-2000
7    Copyright (C) Jeremy Allison 1992-2000
8
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18
19    You should have received a copy of the GNU General Public License
20    along with this program.  If not, see <http://www.gnu.org/licenses/>.
21 */
22
23 /* This module implements a tdb based byte range locking service,
24    replacing the fcntl() based byte range locking previously
25    used. This allows us to provide the same semantics as NT */
26
27 #include "includes.h"
28 #include "system/filesys.h"
29 #include "locking/proto.h"
30 #include "smbd/globals.h"
31 #include "dbwrap/dbwrap.h"
32 #include "dbwrap/dbwrap_open.h"
33 #include "serverid.h"
34 #include "messages.h"
35 #include "util_tdb.h"
36
37 #undef DBGC_CLASS
38 #define DBGC_CLASS DBGC_LOCKING
39
40 #define ZERO_ZERO 0
41
42 /* The open brlock.tdb database. */
43
44 static struct db_context *brlock_db;
45
46 /****************************************************************************
47  Debug info at level 10 for lock struct.
48 ****************************************************************************/
49
50 static void print_lock_struct(unsigned int i, const struct lock_struct *pls)
51 {
52         DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ",
53                         i,
54                         (unsigned long long)pls->context.smblctx,
55                         (unsigned int)pls->context.tid,
56                         server_id_str(talloc_tos(), &pls->context.pid) ));
57
58         DEBUG(10,("start = %.0f, size = %.0f, fnum = %llu, %s %s\n",
59                 (double)pls->start,
60                 (double)pls->size,
61                 (unsigned long long)pls->fnum,
62                 lock_type_name(pls->lock_type),
63                 lock_flav_name(pls->lock_flav) ));
64 }
65
66 unsigned int brl_num_locks(const struct byte_range_lock *brl)
67 {
68         return brl->num_locks;
69 }
70
71 /****************************************************************************
72  See if two locking contexts are equal.
73 ****************************************************************************/
74
75 static bool brl_same_context(const struct lock_context *ctx1,
76                              const struct lock_context *ctx2)
77 {
78         return (serverid_equal(&ctx1->pid, &ctx2->pid) &&
79                 (ctx1->smblctx == ctx2->smblctx) &&
80                 (ctx1->tid == ctx2->tid));
81 }
82
83 /****************************************************************************
84  See if lck1 and lck2 overlap.
85 ****************************************************************************/
86
87 static bool brl_overlap(const struct lock_struct *lck1,
88                         const struct lock_struct *lck2)
89 {
90         /* XXX Remove for Win7 compatibility. */
91         /* this extra check is not redundant - it copes with locks
92            that go beyond the end of 64 bit file space */
93         if (lck1->size != 0 &&
94             lck1->start == lck2->start &&
95             lck1->size == lck2->size) {
96                 return True;
97         }
98
99         if (lck1->start >= (lck2->start+lck2->size) ||
100             lck2->start >= (lck1->start+lck1->size)) {
101                 return False;
102         }
103         return True;
104 }
105
106 /****************************************************************************
107  See if lock2 can be added when lock1 is in place.
108 ****************************************************************************/
109
110 static bool brl_conflict(const struct lock_struct *lck1,
111                          const struct lock_struct *lck2)
112 {
113         /* Ignore PENDING locks. */
114         if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
115                 return False;
116
117         /* Read locks never conflict. */
118         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
119                 return False;
120         }
121
122         /* A READ lock can stack on top of a WRITE lock if they have the same
123          * context & fnum. */
124         if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
125             brl_same_context(&lck1->context, &lck2->context) &&
126             lck1->fnum == lck2->fnum) {
127                 return False;
128         }
129
130         return brl_overlap(lck1, lck2);
131 }
132
133 /****************************************************************************
134  See if lock2 can be added when lock1 is in place - when both locks are POSIX
135  flavour. POSIX locks ignore fnum - they only care about dev/ino which we
136  know already match.
137 ****************************************************************************/
138
139 static bool brl_conflict_posix(const struct lock_struct *lck1,
140                                 const struct lock_struct *lck2)
141 {
142 #if defined(DEVELOPER)
143         SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
144         SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
145 #endif
146
147         /* Ignore PENDING locks. */
148         if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
149                 return False;
150
151         /* Read locks never conflict. */
152         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
153                 return False;
154         }
155
156         /* Locks on the same context con't conflict. Ignore fnum. */
157         if (brl_same_context(&lck1->context, &lck2->context)) {
158                 return False;
159         }
160
161         /* One is read, the other write, or the context is different,
162            do they overlap ? */
163         return brl_overlap(lck1, lck2);
164 }
165
166 #if ZERO_ZERO
167 static bool brl_conflict1(const struct lock_struct *lck1,
168                          const struct lock_struct *lck2)
169 {
170         if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
171                 return False;
172
173         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
174                 return False;
175         }
176
177         if (brl_same_context(&lck1->context, &lck2->context) &&
178             lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
179                 return False;
180         }
181
182         if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
183                 return True;
184         }
185
186         if (lck1->start >= (lck2->start + lck2->size) ||
187             lck2->start >= (lck1->start + lck1->size)) {
188                 return False;
189         }
190
191         return True;
192 }
193 #endif
194
195 /****************************************************************************
196  Check to see if this lock conflicts, but ignore our own locks on the
197  same fnum only. This is the read/write lock check code path.
198  This is never used in the POSIX lock case.
199 ****************************************************************************/
200
201 static bool brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
202 {
203         if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
204                 return False;
205
206         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
207                 return False;
208
209         /* POSIX flavour locks never conflict here - this is only called
210            in the read/write path. */
211
212         if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
213                 return False;
214
215         /*
216          * Incoming WRITE locks conflict with existing READ locks even
217          * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
218          */
219
220         if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
221                 if (brl_same_context(&lck1->context, &lck2->context) &&
222                                         lck1->fnum == lck2->fnum)
223                         return False;
224         }
225
226         return brl_overlap(lck1, lck2);
227 }
228
229 /****************************************************************************
230  Check if an unlock overlaps a pending lock.
231 ****************************************************************************/
232
233 static bool brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
234 {
235         if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
236                 return True;
237         if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
238                 return True;
239         return False;
240 }
241
242 /****************************************************************************
243  Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
244  is the same as this one and changes its error code. I wonder if any
245  app depends on this ?
246 ****************************************************************************/
247
248 static NTSTATUS brl_lock_failed(files_struct *fsp,
249                                 const struct lock_struct *lock,
250                                 bool blocking_lock)
251 {
252         if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
253                 /* amazing the little things you learn with a test
254                    suite. Locks beyond this offset (as a 64 bit
255                    number!) always generate the conflict error code,
256                    unless the top bit is set */
257                 if (!blocking_lock) {
258                         fsp->last_lock_failure = *lock;
259                 }
260                 return NT_STATUS_FILE_LOCK_CONFLICT;
261         }
262
263         if (serverid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
264                         lock->context.tid == fsp->last_lock_failure.context.tid &&
265                         lock->fnum == fsp->last_lock_failure.fnum &&
266                         lock->start == fsp->last_lock_failure.start) {
267                 return NT_STATUS_FILE_LOCK_CONFLICT;
268         }
269
270         if (!blocking_lock) {
271                 fsp->last_lock_failure = *lock;
272         }
273         return NT_STATUS_LOCK_NOT_GRANTED;
274 }
275
276 /****************************************************************************
277  Open up the brlock.tdb database.
278 ****************************************************************************/
279
280 void brl_init(bool read_only)
281 {
282         int tdb_flags;
283
284         if (brlock_db) {
285                 return;
286         }
287
288         tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH;
289
290         if (!lp_clustering()) {
291                 /*
292                  * We can't use the SEQNUM trick to cache brlock
293                  * entries in the clustering case because ctdb seqnum
294                  * propagation has a delay.
295                  */
296                 tdb_flags |= TDB_SEQNUM;
297         }
298
299         brlock_db = db_open(NULL, lock_path("brlock.tdb"),
300                             lp_open_files_db_hash_size(), tdb_flags,
301                             read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644,
302                             DBWRAP_LOCK_ORDER_2);
303         if (!brlock_db) {
304                 DEBUG(0,("Failed to open byte range locking database %s\n",
305                         lock_path("brlock.tdb")));
306                 return;
307         }
308 }
309
310 /****************************************************************************
311  Close down the brlock.tdb database.
312 ****************************************************************************/
313
314 void brl_shutdown(void)
315 {
316         TALLOC_FREE(brlock_db);
317 }
318
319 #if ZERO_ZERO
320 /****************************************************************************
321  Compare two locks for sorting.
322 ****************************************************************************/
323
324 static int lock_compare(const struct lock_struct *lck1,
325                          const struct lock_struct *lck2)
326 {
327         if (lck1->start != lck2->start) {
328                 return (lck1->start - lck2->start);
329         }
330         if (lck2->size != lck1->size) {
331                 return ((int)lck1->size - (int)lck2->size);
332         }
333         return 0;
334 }
335 #endif
336
337 /****************************************************************************
338  Lock a range of bytes - Windows lock semantics.
339 ****************************************************************************/
340
341 NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
342     struct lock_struct *plock, bool blocking_lock)
343 {
344         unsigned int i;
345         files_struct *fsp = br_lck->fsp;
346         struct lock_struct *locks = br_lck->lock_data;
347         NTSTATUS status;
348
349         SMB_ASSERT(plock->lock_type != UNLOCK_LOCK);
350
351         if ((plock->start + plock->size - 1 < plock->start) &&
352                         plock->size != 0) {
353                 return NT_STATUS_INVALID_LOCK_RANGE;
354         }
355
356         for (i=0; i < br_lck->num_locks; i++) {
357                 /* Do any Windows or POSIX locks conflict ? */
358                 if (brl_conflict(&locks[i], plock)) {
359                         /* Remember who blocked us. */
360                         plock->context.smblctx = locks[i].context.smblctx;
361                         return brl_lock_failed(fsp,plock,blocking_lock);
362                 }
363 #if ZERO_ZERO
364                 if (plock->start == 0 && plock->size == 0 &&
365                                 locks[i].size == 0) {
366                         break;
367                 }
368 #endif
369         }
370
371         if (!IS_PENDING_LOCK(plock->lock_type)) {
372                 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
373         }
374
375         /* We can get the Windows lock, now see if it needs to
376            be mapped into a lower level POSIX one, and if so can
377            we get it ? */
378
379         if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
380                 int errno_ret;
381                 if (!set_posix_lock_windows_flavour(fsp,
382                                 plock->start,
383                                 plock->size,
384                                 plock->lock_type,
385                                 &plock->context,
386                                 locks,
387                                 br_lck->num_locks,
388                                 &errno_ret)) {
389
390                         /* We don't know who blocked us. */
391                         plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
392
393                         if (errno_ret == EACCES || errno_ret == EAGAIN) {
394                                 status = NT_STATUS_FILE_LOCK_CONFLICT;
395                                 goto fail;
396                         } else {
397                                 status = map_nt_error_from_unix(errno);
398                                 goto fail;
399                         }
400                 }
401         }
402
403         /* no conflicts - add it to the list of locks */
404         locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
405         if (!locks) {
406                 status = NT_STATUS_NO_MEMORY;
407                 goto fail;
408         }
409
410         memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
411         br_lck->num_locks += 1;
412         br_lck->lock_data = locks;
413         br_lck->modified = True;
414
415         return NT_STATUS_OK;
416  fail:
417         if (!IS_PENDING_LOCK(plock->lock_type)) {
418                 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
419         }
420         return status;
421 }
422
423 /****************************************************************************
424  Cope with POSIX range splits and merges.
425 ****************************************************************************/
426
427 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr,       /* Output array. */
428                                                 struct lock_struct *ex,         /* existing lock. */
429                                                 struct lock_struct *plock)      /* proposed lock. */
430 {
431         bool lock_types_differ = (ex->lock_type != plock->lock_type);
432
433         /* We can't merge non-conflicting locks on different context - ignore fnum. */
434
435         if (!brl_same_context(&ex->context, &plock->context)) {
436                 /* Just copy. */
437                 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
438                 return 1;
439         }
440
441         /* We now know we have the same context. */
442
443         /* Did we overlap ? */
444
445 /*********************************************
446                                         +---------+
447                                         | ex      |
448                                         +---------+
449                          +-------+
450                          | plock |
451                          +-------+
452 OR....
453         +---------+
454         |  ex     |
455         +---------+
456 **********************************************/
457
458         if ( (ex->start > (plock->start + plock->size)) ||
459                 (plock->start > (ex->start + ex->size))) {
460
461                 /* No overlap with this lock - copy existing. */
462
463                 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
464                 return 1;
465         }
466
467 /*********************************************
468         +---------------------------+
469         |          ex               |
470         +---------------------------+
471         +---------------------------+
472         |       plock               | -> replace with plock.
473         +---------------------------+
474 OR
475              +---------------+
476              |       ex      |
477              +---------------+
478         +---------------------------+
479         |       plock               | -> replace with plock.
480         +---------------------------+
481
482 **********************************************/
483
484         if ( (ex->start >= plock->start) &&
485                 (ex->start + ex->size <= plock->start + plock->size) ) {
486
487                 /* Replace - discard existing lock. */
488
489                 return 0;
490         }
491
492 /*********************************************
493 Adjacent after.
494                         +-------+
495                         |  ex   |
496                         +-------+
497         +---------------+
498         |   plock       |
499         +---------------+
500
501 BECOMES....
502         +---------------+-------+
503         |   plock       | ex    | - different lock types.
504         +---------------+-------+
505 OR.... (merge)
506         +-----------------------+
507         |   plock               | - same lock type.
508         +-----------------------+
509 **********************************************/
510
511         if (plock->start + plock->size == ex->start) {
512
513                 /* If the lock types are the same, we merge, if different, we
514                    add the remainder of the old lock. */
515
516                 if (lock_types_differ) {
517                         /* Add existing. */
518                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
519                         return 1;
520                 } else {
521                         /* Merge - adjust incoming lock as we may have more
522                          * merging to come. */
523                         plock->size += ex->size;
524                         return 0;
525                 }
526         }
527
528 /*********************************************
529 Adjacent before.
530         +-------+
531         |  ex   |
532         +-------+
533                 +---------------+
534                 |   plock       |
535                 +---------------+
536 BECOMES....
537         +-------+---------------+
538         | ex    |   plock       | - different lock types
539         +-------+---------------+
540
541 OR.... (merge)
542         +-----------------------+
543         |      plock            | - same lock type.
544         +-----------------------+
545
546 **********************************************/
547
548         if (ex->start + ex->size == plock->start) {
549
550                 /* If the lock types are the same, we merge, if different, we
551                    add the existing lock. */
552
553                 if (lock_types_differ) {
554                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
555                         return 1;
556                 } else {
557                         /* Merge - adjust incoming lock as we may have more
558                          * merging to come. */
559                         plock->start = ex->start;
560                         plock->size += ex->size;
561                         return 0;
562                 }
563         }
564
565 /*********************************************
566 Overlap after.
567         +-----------------------+
568         |          ex           |
569         +-----------------------+
570         +---------------+
571         |   plock       |
572         +---------------+
573 OR
574                +----------------+
575                |       ex       |
576                +----------------+
577         +---------------+
578         |   plock       |
579         +---------------+
580
581 BECOMES....
582         +---------------+-------+
583         |   plock       | ex    | - different lock types.
584         +---------------+-------+
585 OR.... (merge)
586         +-----------------------+
587         |   plock               | - same lock type.
588         +-----------------------+
589 **********************************************/
590
591         if ( (ex->start >= plock->start) &&
592                 (ex->start <= plock->start + plock->size) &&
593                 (ex->start + ex->size > plock->start + plock->size) ) {
594
595                 /* If the lock types are the same, we merge, if different, we
596                    add the remainder of the old lock. */
597
598                 if (lock_types_differ) {
599                         /* Add remaining existing. */
600                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
601                         /* Adjust existing start and size. */
602                         lck_arr[0].start = plock->start + plock->size;
603                         lck_arr[0].size = (ex->start + ex->size) - (plock->start + plock->size);
604                         return 1;
605                 } else {
606                         /* Merge - adjust incoming lock as we may have more
607                          * merging to come. */
608                         plock->size += (ex->start + ex->size) - (plock->start + plock->size);
609                         return 0;
610                 }
611         }
612
613 /*********************************************
614 Overlap before.
615         +-----------------------+
616         |  ex                   |
617         +-----------------------+
618                 +---------------+
619                 |   plock       |
620                 +---------------+
621 OR
622         +-------------+
623         |  ex         |
624         +-------------+
625                 +---------------+
626                 |   plock       |
627                 +---------------+
628
629 BECOMES....
630         +-------+---------------+
631         | ex    |   plock       | - different lock types
632         +-------+---------------+
633
634 OR.... (merge)
635         +-----------------------+
636         |      plock            | - same lock type.
637         +-----------------------+
638
639 **********************************************/
640
641         if ( (ex->start < plock->start) &&
642                         (ex->start + ex->size >= plock->start) &&
643                         (ex->start + ex->size <= plock->start + plock->size) ) {
644
645                 /* If the lock types are the same, we merge, if different, we
646                    add the truncated old lock. */
647
648                 if (lock_types_differ) {
649                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
650                         /* Adjust existing size. */
651                         lck_arr[0].size = plock->start - ex->start;
652                         return 1;
653                 } else {
654                         /* Merge - adjust incoming lock as we may have more
655                          * merging to come. MUST ADJUST plock SIZE FIRST ! */
656                         plock->size += (plock->start - ex->start);
657                         plock->start = ex->start;
658                         return 0;
659                 }
660         }
661
662 /*********************************************
663 Complete overlap.
664         +---------------------------+
665         |        ex                 |
666         +---------------------------+
667                 +---------+
668                 |  plock  |
669                 +---------+
670 BECOMES.....
671         +-------+---------+---------+
672         | ex    |  plock  | ex      | - different lock types.
673         +-------+---------+---------+
674 OR
675         +---------------------------+
676         |        plock              | - same lock type.
677         +---------------------------+
678 **********************************************/
679
680         if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
681
682                 if (lock_types_differ) {
683
684                         /* We have to split ex into two locks here. */
685
686                         memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
687                         memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
688
689                         /* Adjust first existing size. */
690                         lck_arr[0].size = plock->start - ex->start;
691
692                         /* Adjust second existing start and size. */
693                         lck_arr[1].start = plock->start + plock->size;
694                         lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
695                         return 2;
696                 } else {
697                         /* Just eat the existing locks, merge them into plock. */
698                         plock->start = ex->start;
699                         plock->size = ex->size;
700                         return 0;
701                 }
702         }
703
704         /* Never get here. */
705         smb_panic("brlock_posix_split_merge");
706         /* Notreached. */
707
708         /* Keep some compilers happy. */
709         return 0;
710 }
711
712 /****************************************************************************
713  Lock a range of bytes - POSIX lock semantics.
714  We must cope with range splits and merges.
715 ****************************************************************************/
716
717 static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
718                                struct byte_range_lock *br_lck,
719                                struct lock_struct *plock)
720 {
721         unsigned int i, count, posix_count;
722         struct lock_struct *locks = br_lck->lock_data;
723         struct lock_struct *tp;
724         bool signal_pending_read = False;
725         bool break_oplocks = false;
726         NTSTATUS status;
727
728         /* No zero-zero locks for POSIX. */
729         if (plock->start == 0 && plock->size == 0) {
730                 return NT_STATUS_INVALID_PARAMETER;
731         }
732
733         /* Don't allow 64-bit lock wrap. */
734         if (plock->start + plock->size - 1 < plock->start) {
735                 return NT_STATUS_INVALID_PARAMETER;
736         }
737
738         /* The worst case scenario here is we have to split an
739            existing POSIX lock range into two, and add our lock,
740            so we need at most 2 more entries. */
741
742         tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
743         if (!tp) {
744                 return NT_STATUS_NO_MEMORY;
745         }
746
747         count = posix_count = 0;
748
749         for (i=0; i < br_lck->num_locks; i++) {
750                 struct lock_struct *curr_lock = &locks[i];
751
752                 /* If we have a pending read lock, a lock downgrade should
753                    trigger a lock re-evaluation. */
754                 if (curr_lock->lock_type == PENDING_READ_LOCK &&
755                                 brl_pending_overlap(plock, curr_lock)) {
756                         signal_pending_read = True;
757                 }
758
759                 if (curr_lock->lock_flav == WINDOWS_LOCK) {
760                         /* Do any Windows flavour locks conflict ? */
761                         if (brl_conflict(curr_lock, plock)) {
762                                 /* No games with error messages. */
763                                 SAFE_FREE(tp);
764                                 /* Remember who blocked us. */
765                                 plock->context.smblctx = curr_lock->context.smblctx;
766                                 return NT_STATUS_FILE_LOCK_CONFLICT;
767                         }
768                         /* Just copy the Windows lock into the new array. */
769                         memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
770                         count++;
771                 } else {
772                         unsigned int tmp_count = 0;
773
774                         /* POSIX conflict semantics are different. */
775                         if (brl_conflict_posix(curr_lock, plock)) {
776                                 /* Can't block ourselves with POSIX locks. */
777                                 /* No games with error messages. */
778                                 SAFE_FREE(tp);
779                                 /* Remember who blocked us. */
780                                 plock->context.smblctx = curr_lock->context.smblctx;
781                                 return NT_STATUS_FILE_LOCK_CONFLICT;
782                         }
783
784                         /* Work out overlaps. */
785                         tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock);
786                         posix_count += tmp_count;
787                         count += tmp_count;
788                 }
789         }
790
791         /*
792          * Break oplocks while we hold a brl. Since lock() and unlock() calls
793          * are not symetric with POSIX semantics, we cannot guarantee our
794          * contend_level2_oplocks_begin/end calls will be acquired and
795          * released one-for-one as with Windows semantics. Therefore we only
796          * call contend_level2_oplocks_begin if this is the first POSIX brl on
797          * the file.
798          */
799         break_oplocks = (!IS_PENDING_LOCK(plock->lock_type) &&
800                          posix_count == 0);
801         if (break_oplocks) {
802                 contend_level2_oplocks_begin(br_lck->fsp,
803                                              LEVEL2_CONTEND_POSIX_BRL);
804         }
805
806         /* Try and add the lock in order, sorted by lock start. */
807         for (i=0; i < count; i++) {
808                 struct lock_struct *curr_lock = &tp[i];
809
810                 if (curr_lock->start <= plock->start) {
811                         continue;
812                 }
813         }
814
815         if (i < count) {
816                 memmove(&tp[i+1], &tp[i],
817                         (count - i)*sizeof(struct lock_struct));
818         }
819         memcpy(&tp[i], plock, sizeof(struct lock_struct));
820         count++;
821
822         /* We can get the POSIX lock, now see if it needs to
823            be mapped into a lower level POSIX one, and if so can
824            we get it ? */
825
826         if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
827                 int errno_ret;
828
829                 /* The lower layer just needs to attempt to
830                    get the system POSIX lock. We've weeded out
831                    any conflicts above. */
832
833                 if (!set_posix_lock_posix_flavour(br_lck->fsp,
834                                 plock->start,
835                                 plock->size,
836                                 plock->lock_type,
837                                 &errno_ret)) {
838
839                         /* We don't know who blocked us. */
840                         plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
841
842                         if (errno_ret == EACCES || errno_ret == EAGAIN) {
843                                 SAFE_FREE(tp);
844                                 status = NT_STATUS_FILE_LOCK_CONFLICT;
845                                 goto fail;
846                         } else {
847                                 SAFE_FREE(tp);
848                                 status = map_nt_error_from_unix(errno);
849                                 goto fail;
850                         }
851                 }
852         }
853
854         /* If we didn't use all the allocated size,
855          * Realloc so we don't leak entries per lock call. */
856         if (count < br_lck->num_locks + 2) {
857                 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
858                 if (!tp) {
859                         status = NT_STATUS_NO_MEMORY;
860                         goto fail;
861                 }
862         }
863
864         br_lck->num_locks = count;
865         SAFE_FREE(br_lck->lock_data);
866         br_lck->lock_data = tp;
867         locks = tp;
868         br_lck->modified = True;
869
870         /* A successful downgrade from write to read lock can trigger a lock
871            re-evalutation where waiting readers can now proceed. */
872
873         if (signal_pending_read) {
874                 /* Send unlock messages to any pending read waiters that overlap. */
875                 for (i=0; i < br_lck->num_locks; i++) {
876                         struct lock_struct *pend_lock = &locks[i];
877
878                         /* Ignore non-pending locks. */
879                         if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
880                                 continue;
881                         }
882
883                         if (pend_lock->lock_type == PENDING_READ_LOCK &&
884                                         brl_pending_overlap(plock, pend_lock)) {
885                                 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
886                                         procid_str_static(&pend_lock->context.pid )));
887
888                                 messaging_send(msg_ctx, pend_lock->context.pid,
889                                                MSG_SMB_UNLOCK, &data_blob_null);
890                         }
891                 }
892         }
893
894         return NT_STATUS_OK;
895  fail:
896         if (break_oplocks) {
897                 contend_level2_oplocks_end(br_lck->fsp,
898                                            LEVEL2_CONTEND_POSIX_BRL);
899         }
900         return status;
901 }
902
903 NTSTATUS smb_vfs_call_brl_lock_windows(struct vfs_handle_struct *handle,
904                                        struct byte_range_lock *br_lck,
905                                        struct lock_struct *plock,
906                                        bool blocking_lock,
907                                        struct blocking_lock_record *blr)
908 {
909         VFS_FIND(brl_lock_windows);
910         return handle->fns->brl_lock_windows_fn(handle, br_lck, plock,
911                                                 blocking_lock, blr);
912 }
913
914 /****************************************************************************
915  Lock a range of bytes.
916 ****************************************************************************/
917
918 NTSTATUS brl_lock(struct messaging_context *msg_ctx,
919                 struct byte_range_lock *br_lck,
920                 uint64_t smblctx,
921                 struct server_id pid,
922                 br_off start,
923                 br_off size,
924                 enum brl_type lock_type,
925                 enum brl_flavour lock_flav,
926                 bool blocking_lock,
927                 uint64_t *psmblctx,
928                 struct blocking_lock_record *blr)
929 {
930         NTSTATUS ret;
931         struct lock_struct lock;
932
933 #if !ZERO_ZERO
934         if (start == 0 && size == 0) {
935                 DEBUG(0,("client sent 0/0 lock - please report this\n"));
936         }
937 #endif
938
939 #ifdef DEVELOPER
940         /* Quieten valgrind on test. */
941         ZERO_STRUCT(lock);
942 #endif
943
944         lock.context.smblctx = smblctx;
945         lock.context.pid = pid;
946         lock.context.tid = br_lck->fsp->conn->cnum;
947         lock.start = start;
948         lock.size = size;
949         lock.fnum = br_lck->fsp->fnum;
950         lock.lock_type = lock_type;
951         lock.lock_flav = lock_flav;
952
953         if (lock_flav == WINDOWS_LOCK) {
954                 ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck,
955                     &lock, blocking_lock, blr);
956         } else {
957                 ret = brl_lock_posix(msg_ctx, br_lck, &lock);
958         }
959
960 #if ZERO_ZERO
961         /* sort the lock list */
962         TYPESAFE_QSORT(br_lck->lock_data, (size_t)br_lck->num_locks, lock_compare);
963 #endif
964
965         /* If we're returning an error, return who blocked us. */
966         if (!NT_STATUS_IS_OK(ret) && psmblctx) {
967                 *psmblctx = lock.context.smblctx;
968         }
969         return ret;
970 }
971
972 /****************************************************************************
973  Unlock a range of bytes - Windows semantics.
974 ****************************************************************************/
975
976 bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
977                                struct byte_range_lock *br_lck,
978                                const struct lock_struct *plock)
979 {
980         unsigned int i, j;
981         struct lock_struct *locks = br_lck->lock_data;
982         enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
983
984         SMB_ASSERT(plock->lock_type == UNLOCK_LOCK);
985
986 #if ZERO_ZERO
987         /* Delete write locks by preference... The lock list
988            is sorted in the zero zero case. */
989
990         for (i = 0; i < br_lck->num_locks; i++) {
991                 struct lock_struct *lock = &locks[i];
992
993                 if (lock->lock_type == WRITE_LOCK &&
994                     brl_same_context(&lock->context, &plock->context) &&
995                     lock->fnum == plock->fnum &&
996                     lock->lock_flav == WINDOWS_LOCK &&
997                     lock->start == plock->start &&
998                     lock->size == plock->size) {
999
1000                         /* found it - delete it */
1001                         deleted_lock_type = lock->lock_type;
1002                         break;
1003                 }
1004         }
1005
1006         if (i != br_lck->num_locks) {
1007                 /* We found it - don't search again. */
1008                 goto unlock_continue;
1009         }
1010 #endif
1011
1012         for (i = 0; i < br_lck->num_locks; i++) {
1013                 struct lock_struct *lock = &locks[i];
1014
1015                 if (IS_PENDING_LOCK(lock->lock_type)) {
1016                         continue;
1017                 }
1018
1019                 /* Only remove our own locks that match in start, size, and flavour. */
1020                 if (brl_same_context(&lock->context, &plock->context) &&
1021                                         lock->fnum == plock->fnum &&
1022                                         lock->lock_flav == WINDOWS_LOCK &&
1023                                         lock->start == plock->start &&
1024                                         lock->size == plock->size ) {
1025                         deleted_lock_type = lock->lock_type;
1026                         break;
1027                 }
1028         }
1029
1030         if (i == br_lck->num_locks) {
1031                 /* we didn't find it */
1032                 return False;
1033         }
1034
1035 #if ZERO_ZERO
1036   unlock_continue:
1037 #endif
1038
1039         /* Actually delete the lock. */
1040         if (i < br_lck->num_locks - 1) {
1041                 memmove(&locks[i], &locks[i+1],
1042                         sizeof(*locks)*((br_lck->num_locks-1) - i));
1043         }
1044
1045         br_lck->num_locks -= 1;
1046         br_lck->modified = True;
1047
1048         /* Unlock the underlying POSIX regions. */
1049         if(lp_posix_locking(br_lck->fsp->conn->params)) {
1050                 release_posix_lock_windows_flavour(br_lck->fsp,
1051                                 plock->start,
1052                                 plock->size,
1053                                 deleted_lock_type,
1054                                 &plock->context,
1055                                 locks,
1056                                 br_lck->num_locks);
1057         }
1058
1059         /* Send unlock messages to any pending waiters that overlap. */
1060         for (j=0; j < br_lck->num_locks; j++) {
1061                 struct lock_struct *pend_lock = &locks[j];
1062
1063                 /* Ignore non-pending locks. */
1064                 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1065                         continue;
1066                 }
1067
1068                 /* We could send specific lock info here... */
1069                 if (brl_pending_overlap(plock, pend_lock)) {
1070                         DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1071                                 procid_str_static(&pend_lock->context.pid )));
1072
1073                         messaging_send(msg_ctx, pend_lock->context.pid,
1074                                        MSG_SMB_UNLOCK, &data_blob_null);
1075                 }
1076         }
1077
1078         contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
1079         return True;
1080 }
1081
1082 /****************************************************************************
1083  Unlock a range of bytes - POSIX semantics.
1084 ****************************************************************************/
1085
1086 static bool brl_unlock_posix(struct messaging_context *msg_ctx,
1087                              struct byte_range_lock *br_lck,
1088                              struct lock_struct *plock)
1089 {
1090         unsigned int i, j, count;
1091         struct lock_struct *tp;
1092         struct lock_struct *locks = br_lck->lock_data;
1093         bool overlap_found = False;
1094
1095         /* No zero-zero locks for POSIX. */
1096         if (plock->start == 0 && plock->size == 0) {
1097                 return False;
1098         }
1099
1100         /* Don't allow 64-bit lock wrap. */
1101         if (plock->start + plock->size < plock->start ||
1102                         plock->start + plock->size < plock->size) {
1103                 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1104                 return False;
1105         }
1106
1107         /* The worst case scenario here is we have to split an
1108            existing POSIX lock range into two, so we need at most
1109            1 more entry. */
1110
1111         tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
1112         if (!tp) {
1113                 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1114                 return False;
1115         }
1116
1117         count = 0;
1118         for (i = 0; i < br_lck->num_locks; i++) {
1119                 struct lock_struct *lock = &locks[i];
1120                 unsigned int tmp_count;
1121
1122                 /* Only remove our own locks - ignore fnum. */
1123                 if (IS_PENDING_LOCK(lock->lock_type) ||
1124                                 !brl_same_context(&lock->context, &plock->context)) {
1125                         memcpy(&tp[count], lock, sizeof(struct lock_struct));
1126                         count++;
1127                         continue;
1128                 }
1129
1130                 if (lock->lock_flav == WINDOWS_LOCK) {
1131                         /* Do any Windows flavour locks conflict ? */
1132                         if (brl_conflict(lock, plock)) {
1133                                 SAFE_FREE(tp);
1134                                 return false;
1135                         }
1136                         /* Just copy the Windows lock into the new array. */
1137                         memcpy(&tp[count], lock, sizeof(struct lock_struct));
1138                         count++;
1139                         continue;
1140                 }
1141
1142                 /* Work out overlaps. */
1143                 tmp_count = brlock_posix_split_merge(&tp[count], lock, plock);
1144
1145                 if (tmp_count == 0) {
1146                         /* plock overlapped the existing lock completely,
1147                            or replaced it. Don't copy the existing lock. */
1148                         overlap_found = true;
1149                 } else if (tmp_count == 1) {
1150                         /* Either no overlap, (simple copy of existing lock) or
1151                          * an overlap of an existing lock. */
1152                         /* If the lock changed size, we had an overlap. */
1153                         if (tp[count].size != lock->size) {
1154                                 overlap_found = true;
1155                         }
1156                         count += tmp_count;
1157                 } else if (tmp_count == 2) {
1158                         /* We split a lock range in two. */
1159                         overlap_found = true;
1160                         count += tmp_count;
1161
1162                         /* Optimisation... */
1163                         /* We know we're finished here as we can't overlap any
1164                            more POSIX locks. Copy the rest of the lock array. */
1165
1166                         if (i < br_lck->num_locks - 1) {
1167                                 memcpy(&tp[count], &locks[i+1],
1168                                         sizeof(*locks)*((br_lck->num_locks-1) - i));
1169                                 count += ((br_lck->num_locks-1) - i);
1170                         }
1171                         break;
1172                 }
1173
1174         }
1175
1176         if (!overlap_found) {
1177                 /* Just ignore - no change. */
1178                 SAFE_FREE(tp);
1179                 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1180                 return True;
1181         }
1182
1183         /* Unlock any POSIX regions. */
1184         if(lp_posix_locking(br_lck->fsp->conn->params)) {
1185                 release_posix_lock_posix_flavour(br_lck->fsp,
1186                                                 plock->start,
1187                                                 plock->size,
1188                                                 &plock->context,
1189                                                 tp,
1190                                                 count);
1191         }
1192
1193         /* Realloc so we don't leak entries per unlock call. */
1194         if (count) {
1195                 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
1196                 if (!tp) {
1197                         DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1198                         return False;
1199                 }
1200         } else {
1201                 /* We deleted the last lock. */
1202                 SAFE_FREE(tp);
1203                 tp = NULL;
1204         }
1205
1206         contend_level2_oplocks_end(br_lck->fsp,
1207                                    LEVEL2_CONTEND_POSIX_BRL);
1208
1209         br_lck->num_locks = count;
1210         SAFE_FREE(br_lck->lock_data);
1211         locks = tp;
1212         br_lck->lock_data = tp;
1213         br_lck->modified = True;
1214
1215         /* Send unlock messages to any pending waiters that overlap. */
1216
1217         for (j=0; j < br_lck->num_locks; j++) {
1218                 struct lock_struct *pend_lock = &locks[j];
1219
1220                 /* Ignore non-pending locks. */
1221                 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1222                         continue;
1223                 }
1224
1225                 /* We could send specific lock info here... */
1226                 if (brl_pending_overlap(plock, pend_lock)) {
1227                         DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1228                                 procid_str_static(&pend_lock->context.pid )));
1229
1230                         messaging_send(msg_ctx, pend_lock->context.pid,
1231                                        MSG_SMB_UNLOCK, &data_blob_null);
1232                 }
1233         }
1234
1235         return True;
1236 }
1237
1238 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct *handle,
1239                                      struct messaging_context *msg_ctx,
1240                                      struct byte_range_lock *br_lck,
1241                                      const struct lock_struct *plock)
1242 {
1243         VFS_FIND(brl_unlock_windows);
1244         return handle->fns->brl_unlock_windows_fn(handle, msg_ctx, br_lck,
1245                                                   plock);
1246 }
1247
1248 /****************************************************************************
1249  Unlock a range of bytes.
1250 ****************************************************************************/
1251
1252 bool brl_unlock(struct messaging_context *msg_ctx,
1253                 struct byte_range_lock *br_lck,
1254                 uint64_t smblctx,
1255                 struct server_id pid,
1256                 br_off start,
1257                 br_off size,
1258                 enum brl_flavour lock_flav)
1259 {
1260         struct lock_struct lock;
1261
1262         lock.context.smblctx = smblctx;
1263         lock.context.pid = pid;
1264         lock.context.tid = br_lck->fsp->conn->cnum;
1265         lock.start = start;
1266         lock.size = size;
1267         lock.fnum = br_lck->fsp->fnum;
1268         lock.lock_type = UNLOCK_LOCK;
1269         lock.lock_flav = lock_flav;
1270
1271         if (lock_flav == WINDOWS_LOCK) {
1272                 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck->fsp->conn, msg_ctx,
1273                     br_lck, &lock);
1274         } else {
1275                 return brl_unlock_posix(msg_ctx, br_lck, &lock);
1276         }
1277 }
1278
1279 /****************************************************************************
1280  Test if we could add a lock if we wanted to.
1281  Returns True if the region required is currently unlocked, False if locked.
1282 ****************************************************************************/
1283
1284 bool brl_locktest(struct byte_range_lock *br_lck,
1285                 uint64_t smblctx,
1286                 struct server_id pid,
1287                 br_off start,
1288                 br_off size,
1289                 enum brl_type lock_type,
1290                 enum brl_flavour lock_flav)
1291 {
1292         bool ret = True;
1293         unsigned int i;
1294         struct lock_struct lock;
1295         const struct lock_struct *locks = br_lck->lock_data;
1296         files_struct *fsp = br_lck->fsp;
1297
1298         lock.context.smblctx = smblctx;
1299         lock.context.pid = pid;
1300         lock.context.tid = br_lck->fsp->conn->cnum;
1301         lock.start = start;
1302         lock.size = size;
1303         lock.fnum = fsp->fnum;
1304         lock.lock_type = lock_type;
1305         lock.lock_flav = lock_flav;
1306
1307         /* Make sure existing locks don't conflict */
1308         for (i=0; i < br_lck->num_locks; i++) {
1309                 /*
1310                  * Our own locks don't conflict.
1311                  */
1312                 if (brl_conflict_other(&locks[i], &lock)) {
1313                         return False;
1314                 }
1315         }
1316
1317         /*
1318          * There is no lock held by an SMB daemon, check to
1319          * see if there is a POSIX lock from a UNIX or NFS process.
1320          * This only conflicts with Windows locks, not POSIX locks.
1321          */
1322
1323         if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
1324                 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1325
1326                 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for %s file %s\n",
1327                         (double)start, (double)size, ret ? "locked" : "unlocked",
1328                         fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
1329
1330                 /* We need to return the inverse of is_posix_locked. */
1331                 ret = !ret;
1332         }
1333
1334         /* no conflicts - we could have added it */
1335         return ret;
1336 }
1337
1338 /****************************************************************************
1339  Query for existing locks.
1340 ****************************************************************************/
1341
1342 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1343                 uint64_t *psmblctx,
1344                 struct server_id pid,
1345                 br_off *pstart,
1346                 br_off *psize,
1347                 enum brl_type *plock_type,
1348                 enum brl_flavour lock_flav)
1349 {
1350         unsigned int i;
1351         struct lock_struct lock;
1352         const struct lock_struct *locks = br_lck->lock_data;
1353         files_struct *fsp = br_lck->fsp;
1354
1355         lock.context.smblctx = *psmblctx;
1356         lock.context.pid = pid;
1357         lock.context.tid = br_lck->fsp->conn->cnum;
1358         lock.start = *pstart;
1359         lock.size = *psize;
1360         lock.fnum = fsp->fnum;
1361         lock.lock_type = *plock_type;
1362         lock.lock_flav = lock_flav;
1363
1364         /* Make sure existing locks don't conflict */
1365         for (i=0; i < br_lck->num_locks; i++) {
1366                 const struct lock_struct *exlock = &locks[i];
1367                 bool conflict = False;
1368
1369                 if (exlock->lock_flav == WINDOWS_LOCK) {
1370                         conflict = brl_conflict(exlock, &lock);
1371                 } else {
1372                         conflict = brl_conflict_posix(exlock, &lock);
1373                 }
1374
1375                 if (conflict) {
1376                         *psmblctx = exlock->context.smblctx;
1377                         *pstart = exlock->start;
1378                         *psize = exlock->size;
1379                         *plock_type = exlock->lock_type;
1380                         return NT_STATUS_LOCK_NOT_GRANTED;
1381                 }
1382         }
1383
1384         /*
1385          * There is no lock held by an SMB daemon, check to
1386          * see if there is a POSIX lock from a UNIX or NFS process.
1387          */
1388
1389         if(lp_posix_locking(fsp->conn->params)) {
1390                 bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1391
1392                 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for %s file %s\n",
1393                         (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1394                         fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
1395
1396                 if (ret) {
1397                         /* Hmmm. No clue what to set smblctx to - use -1. */
1398                         *psmblctx = 0xFFFFFFFFFFFFFFFFLL;
1399                         return NT_STATUS_LOCK_NOT_GRANTED;
1400                 }
1401         }
1402
1403         return NT_STATUS_OK;
1404 }
1405
1406
1407 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle,
1408                                      struct byte_range_lock *br_lck,
1409                                      struct lock_struct *plock,
1410                                      struct blocking_lock_record *blr)
1411 {
1412         VFS_FIND(brl_cancel_windows);
1413         return handle->fns->brl_cancel_windows_fn(handle, br_lck, plock, blr);
1414 }
1415
1416 /****************************************************************************
1417  Remove a particular pending lock.
1418 ****************************************************************************/
1419 bool brl_lock_cancel(struct byte_range_lock *br_lck,
1420                 uint64_t smblctx,
1421                 struct server_id pid,
1422                 br_off start,
1423                 br_off size,
1424                 enum brl_flavour lock_flav,
1425                 struct blocking_lock_record *blr)
1426 {
1427         bool ret;
1428         struct lock_struct lock;
1429
1430         lock.context.smblctx = smblctx;
1431         lock.context.pid = pid;
1432         lock.context.tid = br_lck->fsp->conn->cnum;
1433         lock.start = start;
1434         lock.size = size;
1435         lock.fnum = br_lck->fsp->fnum;
1436         lock.lock_flav = lock_flav;
1437         /* lock.lock_type doesn't matter */
1438
1439         if (lock_flav == WINDOWS_LOCK) {
1440                 ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck,
1441                     &lock, blr);
1442         } else {
1443                 ret = brl_lock_cancel_default(br_lck, &lock);
1444         }
1445
1446         return ret;
1447 }
1448
1449 bool brl_lock_cancel_default(struct byte_range_lock *br_lck,
1450                 struct lock_struct *plock)
1451 {
1452         unsigned int i;
1453         struct lock_struct *locks = br_lck->lock_data;
1454
1455         SMB_ASSERT(plock);
1456
1457         for (i = 0; i < br_lck->num_locks; i++) {
1458                 struct lock_struct *lock = &locks[i];
1459
1460                 /* For pending locks we *always* care about the fnum. */
1461                 if (brl_same_context(&lock->context, &plock->context) &&
1462                                 lock->fnum == plock->fnum &&
1463                                 IS_PENDING_LOCK(lock->lock_type) &&
1464                                 lock->lock_flav == plock->lock_flav &&
1465                                 lock->start == plock->start &&
1466                                 lock->size == plock->size) {
1467                         break;
1468                 }
1469         }
1470
1471         if (i == br_lck->num_locks) {
1472                 /* Didn't find it. */
1473                 return False;
1474         }
1475
1476         if (i < br_lck->num_locks - 1) {
1477                 /* Found this particular pending lock - delete it */
1478                 memmove(&locks[i], &locks[i+1],
1479                         sizeof(*locks)*((br_lck->num_locks-1) - i));
1480         }
1481
1482         br_lck->num_locks -= 1;
1483         br_lck->modified = True;
1484         return True;
1485 }
1486
1487 /****************************************************************************
1488  Remove any locks associated with a open file.
1489  We return True if this process owns any other Windows locks on this
1490  fd and so we should not immediately close the fd.
1491 ****************************************************************************/
1492
1493 void brl_close_fnum(struct messaging_context *msg_ctx,
1494                     struct byte_range_lock *br_lck)
1495 {
1496         files_struct *fsp = br_lck->fsp;
1497         uint32_t tid = fsp->conn->cnum;
1498         uint64_t fnum = fsp->fnum;
1499         unsigned int i;
1500         struct lock_struct *locks = br_lck->lock_data;
1501         struct server_id pid = messaging_server_id(fsp->conn->sconn->msg_ctx);
1502         struct lock_struct *locks_copy;
1503         unsigned int num_locks_copy;
1504
1505         /* Copy the current lock array. */
1506         if (br_lck->num_locks) {
1507                 locks_copy = (struct lock_struct *)talloc_memdup(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1508                 if (!locks_copy) {
1509                         smb_panic("brl_close_fnum: talloc failed");
1510                         }
1511         } else {
1512                 locks_copy = NULL;
1513         }
1514
1515         num_locks_copy = br_lck->num_locks;
1516
1517         for (i=0; i < num_locks_copy; i++) {
1518                 struct lock_struct *lock = &locks_copy[i];
1519
1520                 if (lock->context.tid == tid && serverid_equal(&lock->context.pid, &pid) &&
1521                                 (lock->fnum == fnum)) {
1522                         brl_unlock(msg_ctx,
1523                                 br_lck,
1524                                 lock->context.smblctx,
1525                                 pid,
1526                                 lock->start,
1527                                 lock->size,
1528                                 lock->lock_flav);
1529                 }
1530         }
1531 }
1532
1533 bool brl_mark_disconnected(struct files_struct *fsp)
1534 {
1535         uint32_t tid = fsp->conn->cnum;
1536         uint64_t smblctx = fsp->op->global->open_persistent_id;
1537         uint64_t fnum = fsp->fnum;
1538         unsigned int i;
1539         struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
1540         struct byte_range_lock *br_lck = NULL;
1541
1542         if (!fsp->op->global->durable) {
1543                 return false;
1544         }
1545
1546         if (fsp->current_lock_count == 0) {
1547                 return true;
1548         }
1549
1550         br_lck = brl_get_locks(talloc_tos(), fsp);
1551         if (br_lck == NULL) {
1552                 return false;
1553         }
1554
1555         for (i=0; i < br_lck->num_locks; i++) {
1556                 struct lock_struct *lock = &br_lck->lock_data[i];
1557
1558                 /*
1559                  * as this is a durable handle, we only expect locks
1560                  * of the current file handle!
1561                  */
1562
1563                 if (lock->context.smblctx != smblctx) {
1564                         TALLOC_FREE(br_lck);
1565                         return false;
1566                 }
1567
1568                 if (lock->context.tid != tid) {
1569                         TALLOC_FREE(br_lck);
1570                         return false;
1571                 }
1572
1573                 if (!serverid_equal(&lock->context.pid, &self)) {
1574                         TALLOC_FREE(br_lck);
1575                         return false;
1576                 }
1577
1578                 if (lock->fnum != fnum) {
1579                         TALLOC_FREE(br_lck);
1580                         return false;
1581                 }
1582
1583                 server_id_set_disconnected(&lock->context.pid);
1584                 lock->context.tid = TID_FIELD_INVALID;
1585                 lock->fnum = FNUM_FIELD_INVALID;
1586         }
1587
1588         br_lck->modified = true;
1589         TALLOC_FREE(br_lck);
1590         return true;
1591 }
1592
1593 bool brl_reconnect_disconnected(struct files_struct *fsp)
1594 {
1595         uint32_t tid = fsp->conn->cnum;
1596         uint64_t smblctx = fsp->op->global->open_persistent_id;
1597         uint64_t fnum = fsp->fnum;
1598         unsigned int i;
1599         struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
1600         struct byte_range_lock *br_lck = NULL;
1601
1602         if (!fsp->op->global->durable) {
1603                 return false;
1604         }
1605
1606         /*
1607          * When reconnecting, we do not want to validate the brlock entries
1608          * and thereby remove our own (disconnected) entries but reactivate
1609          * them instead.
1610          */
1611         fsp->lockdb_clean = true;
1612
1613         br_lck = brl_get_locks(talloc_tos(), fsp);
1614         if (br_lck == NULL) {
1615                 return false;
1616         }
1617
1618         if (br_lck->num_locks == 0) {
1619                 TALLOC_FREE(br_lck);
1620                 return true;
1621         }
1622
1623         for (i=0; i < br_lck->num_locks; i++) {
1624                 struct lock_struct *lock = &br_lck->lock_data[i];
1625
1626                 /*
1627                  * as this is a durable handle we only expect locks
1628                  * of the current file handle!
1629                  */
1630
1631                 if (lock->context.smblctx != smblctx) {
1632                         TALLOC_FREE(br_lck);
1633                         return false;
1634                 }
1635
1636                 if (lock->context.tid != TID_FIELD_INVALID) {
1637                         TALLOC_FREE(br_lck);
1638                         return false;
1639                 }
1640
1641                 if (!server_id_is_disconnected(&lock->context.pid)) {
1642                         TALLOC_FREE(br_lck);
1643                         return false;
1644                 }
1645
1646                 if (lock->fnum != FNUM_FIELD_INVALID) {
1647                         TALLOC_FREE(br_lck);
1648                         return false;
1649                 }
1650
1651                 lock->context.pid = self;
1652                 lock->context.tid = tid;
1653                 lock->fnum = fnum;
1654         }
1655
1656         fsp->current_lock_count = br_lck->num_locks;
1657         br_lck->modified = true;
1658         TALLOC_FREE(br_lck);
1659         return true;
1660 }
1661
1662 /****************************************************************************
1663  Ensure this set of lock entries is valid.
1664 ****************************************************************************/
1665 static bool validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks,
1666                                   bool keep_disconnected)
1667 {
1668         unsigned int i;
1669         unsigned int num_valid_entries = 0;
1670         struct lock_struct *locks = *pplocks;
1671         TALLOC_CTX *frame = talloc_stackframe();
1672         struct server_id *ids;
1673         bool *exists;
1674
1675         ids = talloc_array(frame, struct server_id, *pnum_entries);
1676         if (ids == NULL) {
1677                 DEBUG(0, ("validate_lock_entries: "
1678                           "talloc_array(struct server_id, %u) failed\n",
1679                           *pnum_entries));
1680                 talloc_free(frame);
1681                 return false;
1682         }
1683
1684         exists = talloc_array(frame, bool, *pnum_entries);
1685         if (exists == NULL) {
1686                 DEBUG(0, ("validate_lock_entries: "
1687                           "talloc_array(bool, %u) failed\n",
1688                           *pnum_entries));
1689                 talloc_free(frame);
1690                 return false;
1691         }
1692
1693         for (i = 0; i < *pnum_entries; i++) {
1694                 ids[i] = locks[i].context.pid;
1695         }
1696
1697         if (!serverids_exist(ids, *pnum_entries, exists)) {
1698                 DEBUG(3, ("validate_lock_entries: serverids_exists failed\n"));
1699                 talloc_free(frame);
1700                 return false;
1701         }
1702
1703         for (i = 0; i < *pnum_entries; i++) {
1704                 if (exists[i]) {
1705                         num_valid_entries++;
1706                         continue;
1707                 }
1708
1709                 if (keep_disconnected &&
1710                     server_id_is_disconnected(&ids[i]))
1711                 {
1712                         num_valid_entries++;
1713                         continue;
1714                 }
1715
1716                 /* This process no longer exists - mark this
1717                    entry as invalid by zeroing it. */
1718                 ZERO_STRUCTP(&locks[i]);
1719         }
1720         TALLOC_FREE(frame);
1721
1722         if (num_valid_entries != *pnum_entries) {
1723                 struct lock_struct *new_lock_data = NULL;
1724
1725                 if (num_valid_entries) {
1726                         new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries);
1727                         if (!new_lock_data) {
1728                                 DEBUG(3, ("malloc fail\n"));
1729                                 return False;
1730                         }
1731
1732                         num_valid_entries = 0;
1733                         for (i = 0; i < *pnum_entries; i++) {
1734                                 struct lock_struct *lock_data = &locks[i];
1735                                 if (lock_data->context.smblctx &&
1736                                                 lock_data->context.tid) {
1737                                         /* Valid (nonzero) entry - copy it. */
1738                                         memcpy(&new_lock_data[num_valid_entries],
1739                                                 lock_data, sizeof(struct lock_struct));
1740                                         num_valid_entries++;
1741                                 }
1742                         }
1743                 }
1744
1745                 SAFE_FREE(*pplocks);
1746                 *pplocks = new_lock_data;
1747                 *pnum_entries = num_valid_entries;
1748         }
1749
1750         return True;
1751 }
1752
1753 struct brl_forall_cb {
1754         void (*fn)(struct file_id id, struct server_id pid,
1755                    enum brl_type lock_type,
1756                    enum brl_flavour lock_flav,
1757                    br_off start, br_off size,
1758                    void *private_data);
1759         void *private_data;
1760 };
1761
1762 /****************************************************************************
1763  Traverse the whole database with this function, calling traverse_callback
1764  on each lock.
1765 ****************************************************************************/
1766
1767 static int brl_traverse_fn(struct db_record *rec, void *state)
1768 {
1769         struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
1770         struct lock_struct *locks;
1771         struct file_id *key;
1772         unsigned int i;
1773         unsigned int num_locks = 0;
1774         unsigned int orig_num_locks = 0;
1775         TDB_DATA dbkey;
1776         TDB_DATA value;
1777
1778         dbkey = dbwrap_record_get_key(rec);
1779         value = dbwrap_record_get_value(rec);
1780
1781         /* In a traverse function we must make a copy of
1782            dbuf before modifying it. */
1783
1784         locks = (struct lock_struct *)memdup(value.dptr, value.dsize);
1785         if (!locks) {
1786                 return -1; /* Terminate traversal. */
1787         }
1788
1789         key = (struct file_id *)dbkey.dptr;
1790         orig_num_locks = num_locks = value.dsize/sizeof(*locks);
1791
1792         /* Ensure the lock db is clean of entries from invalid processes. */
1793
1794         if (!validate_lock_entries(&num_locks, &locks, true)) {
1795                 SAFE_FREE(locks);
1796                 return -1; /* Terminate traversal */
1797         }
1798
1799         if (orig_num_locks != num_locks) {
1800                 if (num_locks) {
1801                         TDB_DATA data;
1802                         data.dptr = (uint8_t *)locks;
1803                         data.dsize = num_locks*sizeof(struct lock_struct);
1804                         dbwrap_record_store(rec, data, TDB_REPLACE);
1805                 } else {
1806                         dbwrap_record_delete(rec);
1807                 }
1808         }
1809
1810         if (cb->fn) {
1811                 for ( i=0; i<num_locks; i++) {
1812                         cb->fn(*key,
1813                                 locks[i].context.pid,
1814                                 locks[i].lock_type,
1815                                 locks[i].lock_flav,
1816                                 locks[i].start,
1817                                 locks[i].size,
1818                                 cb->private_data);
1819                 }
1820         }
1821
1822         SAFE_FREE(locks);
1823         return 0;
1824 }
1825
1826 /*******************************************************************
1827  Call the specified function on each lock in the database.
1828 ********************************************************************/
1829
1830 int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
1831                           enum brl_type lock_type,
1832                           enum brl_flavour lock_flav,
1833                           br_off start, br_off size,
1834                           void *private_data),
1835                void *private_data)
1836 {
1837         struct brl_forall_cb cb;
1838         NTSTATUS status;
1839         int count = 0;
1840
1841         if (!brlock_db) {
1842                 return 0;
1843         }
1844         cb.fn = fn;
1845         cb.private_data = private_data;
1846         status = dbwrap_traverse(brlock_db, brl_traverse_fn, &cb, &count);
1847
1848         if (!NT_STATUS_IS_OK(status)) {
1849                 return -1;
1850         } else {
1851                 return count;
1852         }
1853 }
1854
1855 /*******************************************************************
1856  Store a potentially modified set of byte range lock data back into
1857  the database.
1858  Unlock the record.
1859 ********************************************************************/
1860
1861 static void byte_range_lock_flush(struct byte_range_lock *br_lck)
1862 {
1863         if (br_lck->read_only) {
1864                 SMB_ASSERT(!br_lck->modified);
1865         }
1866
1867         if (!br_lck->modified) {
1868                 goto done;
1869         }
1870
1871         if (br_lck->num_locks == 0) {
1872                 /* No locks - delete this entry. */
1873                 NTSTATUS status = dbwrap_record_delete(br_lck->record);
1874                 if (!NT_STATUS_IS_OK(status)) {
1875                         DEBUG(0, ("delete_rec returned %s\n",
1876                                   nt_errstr(status)));
1877                         smb_panic("Could not delete byte range lock entry");
1878                 }
1879         } else {
1880                 TDB_DATA data;
1881                 NTSTATUS status;
1882
1883                 data.dptr = (uint8 *)br_lck->lock_data;
1884                 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1885
1886                 status = dbwrap_record_store(br_lck->record, data, TDB_REPLACE);
1887                 if (!NT_STATUS_IS_OK(status)) {
1888                         DEBUG(0, ("store returned %s\n", nt_errstr(status)));
1889                         smb_panic("Could not store byte range mode entry");
1890                 }
1891         }
1892
1893  done:
1894
1895         br_lck->read_only = true;
1896         br_lck->modified = false;
1897
1898         TALLOC_FREE(br_lck->record);
1899 }
1900
1901 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1902 {
1903         byte_range_lock_flush(br_lck);
1904         SAFE_FREE(br_lck->lock_data);
1905         return 0;
1906 }
1907
1908 /*******************************************************************
1909  Fetch a set of byte range lock data from the database.
1910  Leave the record locked.
1911  TALLOC_FREE(brl) will release the lock in the destructor.
1912 ********************************************************************/
1913
1914 static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
1915                                         files_struct *fsp, bool read_only)
1916 {
1917         TDB_DATA key, data;
1918         struct byte_range_lock *br_lck = talloc(mem_ctx, struct byte_range_lock);
1919         bool do_read_only = read_only;
1920
1921         if (br_lck == NULL) {
1922                 return NULL;
1923         }
1924
1925         br_lck->fsp = fsp;
1926         br_lck->num_locks = 0;
1927         br_lck->modified = False;
1928         br_lck->key = fsp->file_id;
1929
1930         key.dptr = (uint8 *)&br_lck->key;
1931         key.dsize = sizeof(struct file_id);
1932
1933         if (!fsp->lockdb_clean) {
1934                 /* We must be read/write to clean
1935                    the dead entries. */
1936                 do_read_only = false;
1937         }
1938
1939         if (do_read_only) {
1940                 NTSTATUS status;
1941                 status = dbwrap_fetch(brlock_db, br_lck, key, &data);
1942                 if (!NT_STATUS_IS_OK(status)) {
1943                         DEBUG(3, ("Could not fetch byte range lock record\n"));
1944                         TALLOC_FREE(br_lck);
1945                         return NULL;
1946                 }
1947                 br_lck->record = NULL;
1948         } else {
1949                 br_lck->record = dbwrap_fetch_locked(brlock_db, br_lck, key);
1950
1951                 if (br_lck->record == NULL) {
1952                         DEBUG(3, ("Could not lock byte range lock entry\n"));
1953                         TALLOC_FREE(br_lck);
1954                         return NULL;
1955                 }
1956
1957                 data = dbwrap_record_get_value(br_lck->record);
1958         }
1959
1960         br_lck->read_only = do_read_only;
1961         br_lck->lock_data = NULL;
1962
1963         talloc_set_destructor(br_lck, byte_range_lock_destructor);
1964
1965         br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1966
1967         if (br_lck->num_locks != 0) {
1968                 br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct,
1969                                                      br_lck->num_locks);
1970                 if (br_lck->lock_data == NULL) {
1971                         DEBUG(0, ("malloc failed\n"));
1972                         TALLOC_FREE(br_lck);
1973                         return NULL;
1974                 }
1975
1976                 memcpy(br_lck->lock_data, data.dptr, data.dsize);
1977         }
1978
1979         if (!fsp->lockdb_clean) {
1980                 int orig_num_locks = br_lck->num_locks;
1981
1982                 /*
1983                  * This is the first time we access the byte range lock
1984                  * record with this fsp. Go through and ensure all entries
1985                  * are valid - remove any that don't.
1986                  * This makes the lockdb self cleaning at low cost.
1987                  *
1988                  * Note: Disconnected entries belong to disconnected
1989                  * durable handles. So at this point, we have a new
1990                  * handle on the file and the disconnected durable has
1991                  * already been closed (we are not a durable reconnect).
1992                  * So we need to clean the disconnected brl entry.
1993                  */
1994
1995                 if (!validate_lock_entries(&br_lck->num_locks,
1996                                            &br_lck->lock_data, false)) {
1997                         SAFE_FREE(br_lck->lock_data);
1998                         TALLOC_FREE(br_lck);
1999                         return NULL;
2000                 }
2001
2002                 /* Ensure invalid locks are cleaned up in the destructor. */
2003                 if (orig_num_locks != br_lck->num_locks) {
2004                         br_lck->modified = True;
2005                 }
2006
2007                 /* Mark the lockdb as "clean" as seen from this open file. */
2008                 fsp->lockdb_clean = True;
2009         }
2010
2011         if (DEBUGLEVEL >= 10) {
2012                 unsigned int i;
2013                 struct lock_struct *locks = br_lck->lock_data;
2014                 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
2015                         br_lck->num_locks,
2016                           file_id_string_tos(&fsp->file_id)));
2017                 for( i = 0; i < br_lck->num_locks; i++) {
2018                         print_lock_struct(i, &locks[i]);
2019                 }
2020         }
2021
2022         if (do_read_only != read_only) {
2023                 /*
2024                  * this stores the record and gets rid of
2025                  * the write lock that is needed for a cleanup
2026                  */
2027                 byte_range_lock_flush(br_lck);
2028         }
2029
2030         return br_lck;
2031 }
2032
2033 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
2034                                         files_struct *fsp)
2035 {
2036         return brl_get_locks_internal(mem_ctx, fsp, False);
2037 }
2038
2039 struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
2040 {
2041         struct byte_range_lock *br_lock;
2042
2043         if (lp_clustering()) {
2044                 return brl_get_locks_internal(talloc_tos(), fsp, true);
2045         }
2046
2047         if ((fsp->brlock_rec != NULL)
2048             && (dbwrap_get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
2049                 return fsp->brlock_rec;
2050         }
2051
2052         TALLOC_FREE(fsp->brlock_rec);
2053
2054         br_lock = brl_get_locks_internal(talloc_tos(), fsp, true);
2055         if (br_lock == NULL) {
2056                 return NULL;
2057         }
2058         fsp->brlock_seqnum = dbwrap_get_seqnum(brlock_db);
2059
2060         fsp->brlock_rec = talloc_move(fsp, &br_lock);
2061
2062         return fsp->brlock_rec;
2063 }
2064
2065 struct brl_revalidate_state {
2066         ssize_t array_size;
2067         uint32 num_pids;
2068         struct server_id *pids;
2069 };
2070
2071 /*
2072  * Collect PIDs of all processes with pending entries
2073  */
2074
2075 static void brl_revalidate_collect(struct file_id id, struct server_id pid,
2076                                    enum brl_type lock_type,
2077                                    enum brl_flavour lock_flav,
2078                                    br_off start, br_off size,
2079                                    void *private_data)
2080 {
2081         struct brl_revalidate_state *state =
2082                 (struct brl_revalidate_state *)private_data;
2083
2084         if (!IS_PENDING_LOCK(lock_type)) {
2085                 return;
2086         }
2087
2088         add_to_large_array(state, sizeof(pid), (void *)&pid,
2089                            &state->pids, &state->num_pids,
2090                            &state->array_size);
2091 }
2092
2093 /*
2094  * qsort callback to sort the processes
2095  */
2096
2097 static int compare_procids(const void *p1, const void *p2)
2098 {
2099         const struct server_id *i1 = (const struct server_id *)p1;
2100         const struct server_id *i2 = (const struct server_id *)p2;
2101
2102         if (i1->pid < i2->pid) return -1;
2103         if (i2->pid > i2->pid) return 1;
2104         return 0;
2105 }
2106
2107 /*
2108  * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
2109  * locks so that they retry. Mainly used in the cluster code after a node has
2110  * died.
2111  *
2112  * Done in two steps to avoid double-sends: First we collect all entries in an
2113  * array, then qsort that array and only send to non-dupes.
2114  */
2115
2116 void brl_revalidate(struct messaging_context *msg_ctx,
2117                     void *private_data,
2118                     uint32_t msg_type,
2119                     struct server_id server_id,
2120                     DATA_BLOB *data)
2121 {
2122         struct brl_revalidate_state *state;
2123         uint32 i;
2124         struct server_id last_pid;
2125
2126         if (!(state = talloc_zero(NULL, struct brl_revalidate_state))) {
2127                 DEBUG(0, ("talloc failed\n"));
2128                 return;
2129         }
2130
2131         brl_forall(brl_revalidate_collect, state);
2132
2133         if (state->array_size == -1) {
2134                 DEBUG(0, ("talloc failed\n"));
2135                 goto done;
2136         }
2137
2138         if (state->num_pids == 0) {
2139                 goto done;
2140         }
2141
2142         TYPESAFE_QSORT(state->pids, state->num_pids, compare_procids);
2143
2144         ZERO_STRUCT(last_pid);
2145
2146         for (i=0; i<state->num_pids; i++) {
2147                 if (serverid_equal(&last_pid, &state->pids[i])) {
2148                         /*
2149                          * We've seen that one already
2150                          */
2151                         continue;
2152                 }
2153
2154                 messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
2155                                &data_blob_null);
2156                 last_pid = state->pids[i];
2157         }
2158
2159  done:
2160         TALLOC_FREE(state);
2161         return;
2162 }
2163
2164 bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id)
2165 {
2166         bool ret = false;
2167         TALLOC_CTX *frame = talloc_stackframe();
2168         TDB_DATA key, val;
2169         struct db_record *rec;
2170         struct lock_struct *lock;
2171         unsigned n, num;
2172         NTSTATUS status;
2173
2174         key = make_tdb_data((void*)&fid, sizeof(fid));
2175
2176         rec = dbwrap_fetch_locked(brlock_db, frame, key);
2177         if (rec == NULL) {
2178                 DEBUG(5, ("brl_cleanup_disconnected: failed to fetch record "
2179                           "for file %s\n", file_id_string(frame, &fid)));
2180                 goto done;
2181         }
2182
2183         val = dbwrap_record_get_value(rec);
2184         lock = (struct lock_struct*)val.dptr;
2185         num = val.dsize / sizeof(struct lock_struct);
2186         if (lock == NULL) {
2187                 DEBUG(10, ("brl_cleanup_disconnected: no byte range locks for "
2188                            "file %s\n", file_id_string(frame, &fid)));
2189                 ret = true;
2190                 goto done;
2191         }
2192
2193         for (n=0; n<num; n++) {
2194                 struct lock_context *ctx = &lock[n].context;
2195
2196                 if (!server_id_is_disconnected(&ctx->pid)) {
2197                         DEBUG(5, ("brl_cleanup_disconnected: byte range lock "
2198                                   "%s used by server %s, do not cleanup\n",
2199                                   file_id_string(frame, &fid),
2200                                   server_id_str(frame, &ctx->pid)));
2201                         goto done;
2202                 }
2203
2204                 if (ctx->smblctx != open_persistent_id) {
2205                         DEBUG(5, ("brl_cleanup_disconnected: byte range lock "
2206                                   "%s expected smblctx %llu but found %llu"
2207                                   ", do not cleanup\n",
2208                                   file_id_string(frame, &fid),
2209                                   (unsigned long long)open_persistent_id,
2210                                   (unsigned long long)ctx->smblctx));
2211                         goto done;
2212                 }
2213         }
2214
2215         status = dbwrap_record_delete(rec);
2216         if (!NT_STATUS_IS_OK(status)) {
2217                 DEBUG(5, ("brl_cleanup_disconnected: failed to delete record "
2218                           "for file %s from %s, open %llu: %s\n",
2219                           file_id_string(frame, &fid), dbwrap_name(brlock_db),
2220                           (unsigned long long)open_persistent_id,
2221                           nt_errstr(status)));
2222                 goto done;
2223         }
2224
2225         DEBUG(10, ("brl_cleanup_disconnected: "
2226                    "file %s cleaned up %u entries from open %llu\n",
2227                    file_id_string(frame, &fid), num,
2228                    (unsigned long long)open_persistent_id));
2229
2230         ret = true;
2231 done:
2232         talloc_free(frame);
2233         return ret;
2234 }