410fb3fc0bdea667a02e7eb25bed82a7aa437e74
[samba.git] / source3 / locking / brlock.c
1 /* 
2    Unix SMB/Netbios implementation.
3    Version 3.0
4    byte range locking code
5    Updated to handle range splits/merges.
6
7    Copyright (C) Andrew Tridgell 1992-2000
8    Copyright (C) Jeremy Allison 1992-2000
9    
10    This program is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2 of the License, or
13    (at your option) any later version.
14    
15    This program is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19    
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25 /* This module implements a tdb based byte range locking service,
26    replacing the fcntl() based byte range locking previously
27    used. This allows us to provide the same semantics as NT */
28
29 #include "includes.h"
30
31 extern int DEBUGLEVEL;
32
33 /* This contains elements that differentiate locks. The smbpid is a
34    client supplied pid, and is essentially the locking context for
35    this client */
36
37 struct lock_context {
38         uint16 smbpid;
39         uint16 tid;
40         pid_t pid;
41 };
42
43 /* The data in brlock records is an unsorted linear array of these
44    records.  It is unnecessary to store the count as tdb provides the
45    size of the record */
46
47 struct lock_struct {
48         struct lock_context context;
49         br_off start;
50         br_off size;
51         int fnum;
52         enum brl_type lock_type;
53 };
54
55 /* The key used in the brlock database. */
56
57 struct lock_key {
58         SMB_DEV_T device;
59         SMB_INO_T inode;
60 };
61
62 /* The open brlock.tdb database. */
63
64 static TDB_CONTEXT *tdb;
65
66 /****************************************************************************
67  See if two locking contexts are equal.
68 ****************************************************************************/
69
70 static BOOL brl_same_context(struct lock_context *ctx1, 
71                              struct lock_context *ctx2)
72 {
73         return (ctx1->pid == ctx2->pid) &&
74                 (ctx1->smbpid == ctx2->smbpid) &&
75                 (ctx1->tid == ctx2->tid);
76 }
77
78 /****************************************************************************
79  See if lock2 can be added when lock1 is in place.
80 ****************************************************************************/
81
82 static BOOL brl_conflict(struct lock_struct *lck1, 
83                          struct lock_struct *lck2)
84 {
85         if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) 
86                 return False;
87
88         if (brl_same_context(&lck1->context, &lck2->context) &&
89             lck2->lock_type == READ_LOCK) return False;
90
91         if (lck1->start >= (lck2->start + lck2->size) ||
92             lck2->start >= (lck1->start + lck1->size)) return False;
93             
94         return True;
95
96
97
98 /****************************************************************************
99  Open up the brlock.tdb database.
100 ****************************************************************************/
101
102 void brl_init(int read_only)
103 {
104         if (tdb) return;
105         tdb = tdb_open(lock_path("brlock.tdb"), 0, TDB_CLEAR_IF_FIRST, 
106                        read_only?O_RDONLY:O_RDWR|O_CREAT, 0644);
107         if (!tdb) {
108                 DEBUG(0,("Failed to open byte range locking database\n"));
109         }
110 }
111
112
113 /****************************************************************************
114  Lock a range of bytes.
115 ****************************************************************************/
116
117 BOOL brl_lock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
118               uint16 smbpid, pid_t pid, uint16 tid,
119               br_off start, br_off size, 
120               enum brl_type lock_type)
121 {
122         struct lock_key key;
123         TDB_DATA kbuf, dbuf;
124         int count, i;
125         struct lock_struct lock, *locks;
126
127         key.device = dev;
128         key.inode = ino;
129         kbuf.dptr = (char *)&key;
130         kbuf.dsize = sizeof(key);
131
132         dbuf.dptr = NULL;
133
134         tdb_lockchain(tdb, kbuf);
135         dbuf = tdb_fetch(tdb, kbuf);
136
137         lock.context.smbpid = smbpid;
138         lock.context.pid = pid;
139         lock.context.tid = tid;
140         lock.start = start;
141         lock.size = size;
142         lock.fnum = fnum;
143         lock.lock_type = lock_type;
144
145         if (dbuf.dptr) {
146                 /* there are existing locks - make sure they don't conflict */
147                 locks = (struct lock_struct *)dbuf.dptr;
148                 count = dbuf.dsize / sizeof(*locks);
149                 for (i=0; i<count; i++) {
150                         if (brl_conflict(&locks[i], &lock)) {
151                                 goto fail;
152                         }
153                 }
154         }
155
156         /* no conflicts - add it to the list of locks */
157         dbuf.dptr = Realloc(dbuf.dptr, dbuf.dsize + sizeof(*locks));
158         if (!dbuf.dptr) goto fail;
159         memcpy(dbuf.dptr + dbuf.dsize, &lock, sizeof(lock));
160         dbuf.dsize += sizeof(lock);
161         tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
162
163         free(dbuf.dptr);
164         tdb_unlockchain(tdb, kbuf);
165         return True;
166
167  fail:
168         if (dbuf.dptr) free(dbuf.dptr);
169         tdb_unlockchain(tdb, kbuf);
170         return False;
171 }
172
173 /****************************************************************************
174  Create a list of lock ranges that don't overlap a given range. Used in calculating
175  POSIX lock unlocks. This is a difficult function that requires ASCII art to
176  understand it :-).
177 ****************************************************************************/
178
179 struct unlock_list *brl_unlock_list(TALLOC_CTX *ctx, struct unlock_list *ulhead,
180                                                         pid_t pid, SMB_DEV_T dev, SMB_INO_T ino)
181 {
182         struct lock_key key;
183         TDB_DATA kbuf, dbuf;
184         struct lock_struct *locks;
185         int num_locks, i;
186
187         /*
188          * Setup the key for this fetch.
189          */
190         key.device = dev;
191         key.inode = ino;
192         kbuf.dptr = (char *)&key;
193         kbuf.dsize = sizeof(key);
194
195         dbuf.dptr = NULL;
196
197         tdb_lockchain(tdb, kbuf);
198         dbuf = tdb_fetch(tdb, kbuf);
199
200         if (!dbuf.dptr) {
201                 tdb_unlockchain(tdb, kbuf);
202                 return ulhead;
203         }
204         
205         locks = (struct lock_struct *)dbuf.dptr;
206         num_locks = dbuf.dsize / sizeof(*locks);
207
208         /*
209          * Check the current lock list on this dev/inode pair.
210          * Quit if the list is deleted.
211          */
212
213         DEBUG(10,("brl_unlock_list: curr: start=%.0f,size=%.0f\n",
214                 (double)ulhead->start, (double)ulhead->size ));
215
216         for (i=0; i<num_locks && ulhead; i++) {
217
218                 struct lock_struct *lock = &locks[i];
219                 struct unlock_list *ul_curr;
220
221                 /* If it's not this process, ignore it. */
222                 if (lock->context.pid != pid)
223                         continue;
224
225                 /*
226                  * Walk the unlock list, checking for overlaps. Note that
227                  * the unlock list can expand within this loop if the current
228                  * range being examined needs to be split.
229                  */
230
231                 for (ul_curr = ulhead; ul_curr;) {
232
233                         DEBUG(10,("brl_unlock_list: lock: start=%.0f,size=%.0f:",
234                                 (double)lock->start, (double)lock->size ));
235
236                         if ( (ul_curr->start >= (lock->start + lock->size)) ||
237                                  (lock->start > (ul_curr->start + ul_curr->size))) {
238
239                                 /* No overlap with this lock - leave this range alone. */
240 /*********************************************
241                                              +---------+
242                                              | ul_curr |
243                                              +---------+
244                                 +-------+
245                                 | lock  |
246                                 +-------+
247 OR....
248              +---------+
249              | ul_curr |
250              +---------+
251 **********************************************/
252
253                                 DEBUG(10,("no overlap case.\n" ));
254
255                                 ul_curr = ul_curr->next;
256
257                         } else if ( (ul_curr->start >= lock->start) &&
258                                                 (ul_curr->start + ul_curr->size <= lock->start + lock->size) ) {
259
260                                 /*
261                                  * This unlock is completely overlapped by this existing lock range
262                                  * and thus should have no effect (not be unlocked). Delete it from the list.
263                                  */
264 /*********************************************
265                 +---------+
266                 | ul_curr |
267                 +---------+
268         +---------------------------+
269         |       lock                |
270         +---------------------------+
271 **********************************************/
272                                 /* Save the next pointer */
273                                 struct unlock_list *ul_next = ul_curr->next;
274
275                                 DEBUG(10,("delete case.\n" ));
276
277                                 DLIST_REMOVE(ulhead, ul_curr);
278                                 if(ulhead == NULL)
279                                         break; /* No more list... */
280
281                                 ul_curr = ul_next;
282                                 
283                         } else if ( (ul_curr->start >= lock->start) &&
284                                                 (ul_curr->start < lock->start + lock->size) &&
285                                                 (ul_curr->start + ul_curr->size > lock->start + lock->size) ) {
286
287                                 /*
288                                  * This unlock overlaps the existing lock range at the high end.
289                                  * Truncate by moving start to existing range end and reducing size.
290                                  */
291 /*********************************************
292                 +---------------+
293                 | ul_curr       |
294                 +---------------+
295         +---------------+
296         |    lock       |
297         +---------------+
298 BECOMES....
299                         +-------+
300                         |ul_curr|
301                         +-------+
302 **********************************************/
303
304                                 ul_curr->size = (ul_curr->start + ul_curr->size) - (lock->start + lock->size);
305                                 ul_curr->start = lock->start + lock->size;
306
307                                 DEBUG(10,("truncate high case: start=%.0f,size=%.0f\n",
308                                                                 (double)ul_curr->start, (double)ul_curr->size ));
309
310                                 ul_curr = ul_curr->next;
311
312                         } else if ( (ul_curr->start < lock->start) &&
313                                                 (ul_curr->start + ul_curr->size > lock->start) ) {
314
315                                 /*
316                                  * This unlock overlaps the existing lock range at the low end.
317                                  * Truncate by reducing size.
318                                  */
319 /*********************************************
320    +---------------+
321    | ul_curr       |
322    +---------------+
323            +---------------+
324            |    lock       |
325            +---------------+
326 BECOMES....
327    +-------+
328    |ul_curr|
329    +-------+
330 **********************************************/
331
332                                 ul_curr->size = lock->start - ul_curr->start;
333
334                                 DEBUG(10,("truncate low case: start=%.0f,size=%.0f\n",
335                                                                 (double)ul_curr->start, (double)ul_curr->size ));
336
337                                 ul_curr = ul_curr->next;
338                 
339                         } else if ( (ul_curr->start < lock->start) &&
340                                                 (ul_curr->start + ul_curr->size > lock->start + lock->size) ) {
341                                 /*
342                                  * Worst case scenario. Unlock request completely overlaps an existing
343                                  * lock range. Split the request into two, push the new (upper) request
344                                  * into the dlink list, and continue with the entry after ul_new (as we
345                                  * know that ul_new will not overlap with this lock).
346                                  */
347 /*********************************************
348         +---------------------------+
349         |       ul_curr             |
350         +---------------------------+
351                 +---------+
352                 | lock    |
353                 +---------+
354 BECOMES.....
355         +-------+         +---------+
356         |ul_curr|         |ul_new   |
357         +-------+         +---------+
358 **********************************************/
359                                 struct unlock_list *ul_new = (struct unlock_list *)talloc(ctx,
360                                                                                                         sizeof(struct unlock_list));
361
362                                 if(ul_new == NULL) {
363                                         DEBUG(0,("brl_unlock_list: talloc fail.\n"));
364                                         return NULL; /* The talloc_destroy takes care of cleanup. */
365                                 }
366
367                                 ZERO_STRUCTP(ul_new);
368                                 ul_new->start = lock->start + lock->size;
369                                 ul_new->size = ul_curr->start + ul_curr->size - ul_new->start;
370                                 ul_new->smbpid = ul_curr->smbpid;
371
372                                 /* Add into the dlink list after the ul_curr point - NOT at ulhead. */
373                                 DLIST_ADD(ul_curr, ul_new);
374
375                                 /* Truncate the ul_curr. */
376                                 ul_curr->size = lock->start - ul_curr->start;
377
378                                 DEBUG(10,("split case: curr: start=%.0f,size=%.0f \
379 new: start=%.0f,size=%.0f\n", (double)ul_curr->start, (double)ul_curr->size,
380                                                                 (double)ul_new->start, (double)ul_new->size ));
381
382                                 ul_curr = ul_new->next;
383
384                         } else {
385
386                                 /*
387                                  * This logic case should never happen. Ensure this is the
388                                  * case by forcing an abort.... Remove in production.
389                                  */
390
391                                 smb_panic("logic flaw in cases...\n");
392                         }
393                 } /* end for ( ul_curr = ulhead; ul_curr;) */
394         } /* end for (i=0; i<num_locks && ul_head; i++) */
395
396         tdb_unlockchain(tdb, kbuf);
397
398         if (dbuf.dptr)
399                 free(dbuf.dptr);
400         
401         return ulhead;
402 }
403
404 /****************************************************************************
405  Unlock a range of bytes.
406 ****************************************************************************/
407
408 BOOL brl_unlock(SMB_DEV_T dev, SMB_INO_T ino, int fnum,
409                 uint16 smbpid, pid_t pid, uint16 tid,
410                 br_off start, br_off size)
411 {
412         struct lock_key key;
413         TDB_DATA kbuf, dbuf;
414         int count, i;
415         struct lock_struct *locks;
416         struct lock_context context;
417
418         key.device = dev;
419         key.inode = ino;
420         kbuf.dptr = (char *)&key;
421         kbuf.dsize = sizeof(key);
422
423         dbuf.dptr = NULL;
424
425         tdb_lockchain(tdb, kbuf);
426         dbuf = tdb_fetch(tdb, kbuf);
427
428         if (!dbuf.dptr) {
429                 DEBUG(0,("brl_unlock: tdb_fetch failed !\n"));
430                 goto fail;
431         }
432
433         context.smbpid = smbpid;
434         context.pid = pid;
435         context.tid = tid;
436
437         /* there are existing locks - find a match */
438         locks = (struct lock_struct *)dbuf.dptr;
439         count = dbuf.dsize / sizeof(*locks);
440         for (i=0; i<count; i++) {
441
442                 struct lock_struct *lock = &locks[i];
443
444 #if 0
445                 /* JRATEST - DEBUGGING INFO */
446                 if(!brl_same_context(&lock->context, &context)) {
447                         DEBUG(10,("brl_unlock: Not same context. l_smbpid = %u, l_pid = %u, l_tid = %u: \
448 smbpid = %u, pid = %u, tid = %u\n",
449                                 lock->context.smbpid, lock->context.pid, lock->context.tid,
450                                 context.smbpid, context.pid, context.tid ));
451
452                 }
453                 /* JRATEST */
454 #endif
455
456                 if (brl_same_context(&lock->context, &context) &&
457                     lock->fnum == fnum &&
458                     lock->start == start &&
459                     lock->size == size) {
460                         /* found it - delete it */
461                         if (count == 1) {
462                                 tdb_delete(tdb, kbuf);
463                         } else {
464                                 if (i < count-1) {
465                                         memmove(&locks[i], &locks[i+1], 
466                                                 sizeof(*locks)*((count-1) - i));
467                                 }
468                                 dbuf.dsize -= sizeof(*locks);
469                                 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
470                         }
471
472                         free(dbuf.dptr);
473                         tdb_unlockchain(tdb, kbuf);
474                         return True;
475                 }
476         }
477
478         /* we didn't find it */
479
480  fail:
481         if (dbuf.dptr) free(dbuf.dptr);
482         tdb_unlockchain(tdb, kbuf);
483         return False;
484 }
485
486 /****************************************************************************
487  Test if we could add a lock if we wanted to.
488 ****************************************************************************/
489
490 BOOL brl_locktest(SMB_DEV_T dev, SMB_INO_T ino, 
491                   uint16 smbpid, pid_t pid, uint16 tid,
492                   br_off start, br_off size, 
493                   enum brl_type lock_type)
494 {
495         struct lock_key key;
496         TDB_DATA kbuf, dbuf;
497         int count, i;
498         struct lock_struct lock, *locks;
499
500         key.device = dev;
501         key.inode = ino;
502         kbuf.dptr = (char *)&key;
503         kbuf.dsize = sizeof(key);
504
505         dbuf.dptr = NULL;
506
507         tdb_lockchain(tdb, kbuf);
508         dbuf = tdb_fetch(tdb, kbuf);
509
510         lock.context.smbpid = smbpid;
511         lock.context.pid = pid;
512         lock.context.tid = tid;
513         lock.start = start;
514         lock.size = size;
515         lock.lock_type = lock_type;
516
517         if (dbuf.dptr) {
518                 /* there are existing locks - make sure they don't conflict */
519                 locks = (struct lock_struct *)dbuf.dptr;
520                 count = dbuf.dsize / sizeof(*locks);
521                 for (i=0; i<count; i++) {
522                         if (brl_conflict(&locks[i], &lock)) {
523                                 goto fail;
524                         }
525                 }
526         }
527
528         /* no conflicts - we could have added it */
529         free(dbuf.dptr);
530         tdb_unlockchain(tdb, kbuf);
531         return True;
532
533  fail:
534         if (dbuf.dptr) free(dbuf.dptr);
535         tdb_unlockchain(tdb, kbuf);
536         return False;
537 }
538
539 /****************************************************************************
540  Remove any locks associated with a open file.
541 ****************************************************************************/
542
543 void brl_close(SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum)
544 {
545         struct lock_key key;
546         TDB_DATA kbuf, dbuf;
547         int count, i;
548         struct lock_struct *locks;
549
550         key.device = dev;
551         key.inode = ino;
552         kbuf.dptr = (char *)&key;
553         kbuf.dsize = sizeof(key);
554
555         dbuf.dptr = NULL;
556
557         tdb_lockchain(tdb, kbuf);
558         dbuf = tdb_fetch(tdb, kbuf);
559
560         if (!dbuf.dptr) goto fail;
561
562         /* there are existing locks - remove any for this fnum */
563         locks = (struct lock_struct *)dbuf.dptr;
564         count = dbuf.dsize / sizeof(*locks);
565         for (i=0; i<count; i++) {
566                 struct lock_struct *lock = &locks[i];
567
568                 if (lock->context.tid == tid &&
569                     lock->context.pid == pid &&
570                     lock->fnum == fnum) {
571                         /* found it - delete it */
572                         if (count > 1 && i < count-1) {
573                                 memmove(&locks[i], &locks[i+1], 
574                                         sizeof(*locks)*((count-1) - i));
575                         }
576                         count--;
577                         i--;
578                 }
579         }
580
581         if (count == 0) {
582                 tdb_delete(tdb, kbuf);
583         } else if (count < (dbuf.dsize / sizeof(*locks))) {
584                 tdb_store(tdb, kbuf, dbuf, TDB_REPLACE);
585         }
586
587         /* we didn't find it */
588  fail:
589         if (dbuf.dptr) free(dbuf.dptr);
590         tdb_unlockchain(tdb, kbuf);
591 }
592
593 /****************************************************************************
594  Return a lock list associated with an open file.
595 ****************************************************************************/
596
597 struct unlock_list *brl_getlocklist( TALLOC_CTX *ctx, SMB_DEV_T dev, SMB_INO_T ino, pid_t pid, int tid, int fnum)
598 {
599         struct lock_key key;
600         TDB_DATA kbuf, dbuf;
601         int i, count;
602         struct lock_struct *locks;
603         struct unlock_list *ulist = NULL;
604
605         key.device = dev;
606         key.inode = ino;
607         kbuf.dptr = (char *)&key;
608         kbuf.dsize = sizeof(key);
609
610         dbuf.dptr = NULL;
611
612         tdb_lockchain(tdb, kbuf);
613         dbuf = tdb_fetch(tdb, kbuf);
614
615         if (!dbuf.dptr) {
616                 tdb_unlockchain(tdb, kbuf);
617                 return NULL;
618         }
619
620         /* There are existing locks - allocate an entry for each one. */
621         locks = (struct lock_struct *)dbuf.dptr;
622         count = dbuf.dsize / sizeof(*locks);
623
624         for (i=0; i<count; i++) {
625                 struct lock_struct *lock = &locks[i];
626
627                 if (lock->context.tid == tid &&
628                     lock->context.pid == pid &&
629                     lock->fnum == fnum) {
630
631                                 struct unlock_list *ul_new = (struct unlock_list *)talloc(ctx,
632                                                                                                         sizeof(struct unlock_list));
633
634                                 if(ul_new == NULL) {
635                                         DEBUG(0,("brl_getlocklist: talloc fail.\n"));
636                                         return NULL; /* The talloc_destroy takes care of cleanup. */
637                                 }
638
639                                 ZERO_STRUCTP(ul_new);
640                                 ul_new->start = lock->start;
641                                 ul_new->size = lock->size;
642                                 ul_new->smbpid = lock->context.smbpid;
643
644                                 DLIST_ADD(ulist, ul_new);
645                 }
646         }
647
648         if (dbuf.dptr)
649                 free(dbuf.dptr);
650         tdb_unlockchain(tdb, kbuf);
651
652         return ulist;
653 }
654
655
656 /****************************************************************************
657  Traverse the whole database with this function, calling traverse_callback
658  on each lock.
659 ****************************************************************************/
660
661 static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
662 {
663         struct lock_struct *locks;
664         struct lock_key *key;
665         int i;
666
667         BRLOCK_FN(traverse_callback) = (BRLOCK_FN_CAST())state;
668
669         locks = (struct lock_struct *)dbuf.dptr;
670         key = (struct lock_key *)kbuf.dptr;
671
672         for (i=0;i<dbuf.dsize/sizeof(*locks);i++) {
673                 traverse_callback(key->device, key->inode,
674                                   locks[i].context.pid,
675                                   locks[i].lock_type,
676                                   locks[i].start,
677                                   locks[i].size);
678         }
679         return 0;
680 }
681
682 /*******************************************************************
683  Call the specified function on each lock in the database.
684 ********************************************************************/
685
686 int brl_forall(BRLOCK_FN(fn))
687 {
688         if (!tdb) return 0;
689         return tdb_traverse(tdb, traverse_fn, (BRLOCK_FN_CAST())fn);
690 }