r3031: added support for lock cancelation, which effectively just triggers an early...
[bbaumbach/samba-autobuild/.git] / source4 / ntvfs / posix / pvfs_lock.c
1 /* 
2    Unix SMB/CIFS implementation.
3
4    POSIX NTVFS backend - locking
5
6    Copyright (C) Andrew Tridgell 2004
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 2 of the License, or
11    (at your option) any later version.
12    
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17    
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 #include "include/includes.h"
24 #include "vfs_posix.h"
25
26
27 /*
28   check if we can perform IO on a range that might be locked
29 */
30 NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
31                          struct pvfs_file *f,
32                          uint16_t smbpid,
33                          uint64_t offset, uint64_t count,
34                          enum brl_type rw)
35 {
36         if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
37                 return NT_STATUS_OK;
38         }
39
40         return brl_locktest(pvfs->brl_context,
41                             &f->locking_key,
42                             f->fnum,
43                             smbpid,
44                             offset, count, rw);
45 }
46
47 /* this state structure holds information about a lock we are waiting on */
48 struct pvfs_pending_lock {
49         struct pvfs_pending_lock *next, *prev;
50         struct pvfs_state *pvfs;
51         union smb_lock *lck;
52         struct pvfs_file *f;
53         struct smbsrv_request *req;
54         int pending_lock;
55         void *wait_handle;
56         time_t end_time;
57 };
58
59 /*
60   a secondary attempt to setup a lock has failed - back out
61   the locks we did get and send an error
62 */
63 static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
64                                    struct smbsrv_request *req,
65                                    struct pvfs_file *f,
66                                    struct smb_lock_entry *locks,
67                                    int i,
68                                    NTSTATUS status)
69 {
70         /* undo the locks we just did */
71         for (i=i-1;i>=0;i--) {
72                 brl_unlock(pvfs->brl_context,
73                            &f->locking_key,
74                            locks[i].pid,
75                            f->fnum,
76                            locks[i].offset,
77                            locks[i].count);
78         }
79         req->async.status = status;
80         req->async.send_fn(req);
81 }
82
83
84 /*
85   called when we receive a pending lock notification. It means that
86   either our lock timed out or somoene else has unlocked a overlapping
87   range, so we should try the lock again. Note that on timeout we
88   do retry the lock, giving it a last chance.
89 */
90 static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
91 {
92         struct pvfs_pending_lock *pending = private;
93         struct pvfs_state *pvfs = pending->pvfs;
94         struct pvfs_file *f = pending->f;
95         struct smbsrv_request *req = pending->req;
96         union smb_lock *lck = pending->lck;
97         struct smb_lock_entry *locks;
98         enum brl_type rw;
99         NTSTATUS status;
100         int i;
101
102         locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
103
104         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
105                 rw = READ_LOCK;
106         } else {
107                 rw = WRITE_LOCK;
108         }
109
110         DLIST_REMOVE(f->pending_list, pending);
111
112         status = brl_lock(pvfs->brl_context,
113                           &f->locking_key,
114                           req->smbpid,
115                           f->fnum,
116                           locks[pending->pending_lock].offset,
117                           locks[pending->pending_lock].count,
118                           rw, NULL);
119
120         /* if we have failed and timed out, or succeeded, then we
121            don't need the pending lock any more */
122         if (NT_STATUS_IS_OK(status) || timed_out) {
123                 NTSTATUS status2;
124                 status2 = brl_remove_pending(pvfs->brl_context, &f->locking_key, pending);
125                 if (!NT_STATUS_IS_OK(status2)) {
126                         DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
127                 }
128                 talloc_free(pending->wait_handle);
129         }
130
131         if (!NT_STATUS_IS_OK(status)) {
132                 if (timed_out) {
133                         /* no more chances */
134                         pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
135                 } else {
136                         /* we can try again */
137                         DLIST_ADD(f->pending_list, pending);
138                 }
139                 return;
140         }
141
142         /* if we haven't timed out yet, then we can do more pending locks */
143         if (timed_out) {
144                 pending = NULL;
145         } else {
146                 if (rw == READ_LOCK) {
147                         rw = PENDING_READ_LOCK;
148                 } else {
149                         rw = PENDING_WRITE_LOCK;
150                 }
151         }
152
153         /* we've now got the pending lock. try and get the rest, which might
154            lead to more pending locks */
155         for (i=pending->pending_lock;i<lck->lockx.in.lock_cnt;i++) {            
156                 if (pending) {
157                         pending->pending_lock = i;
158                 }
159
160                 status = brl_lock(pvfs->brl_context,
161                                   &f->locking_key,
162                                   req->smbpid,
163                                   f->fnum,
164                                   locks[i].offset,
165                                   locks[i].count,
166                                   rw, pending);
167                 if (!NT_STATUS_IS_OK(status)) {
168                         if (pending) {
169                                 /* a timed lock failed - setup a wait message to handle
170                                    the pending lock notification or a timeout */
171                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
172                                                                          pending->end_time,
173                                                                          pvfs_pending_lock_continue,
174                                                                          pending);
175                                 if (pending->wait_handle == NULL) {
176                                         pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
177                                 } else {
178                                         DLIST_ADD(f->pending_list, pending);
179                                 }
180                                 return;
181                         }
182                         pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
183                         return;
184                 }
185         }
186
187         brl_unlock(pvfs->brl_context,
188                    &f->locking_key,
189                    req->smbpid,
190                    f->fnum,
191                    lck->lock.in.offset,
192                    lck->lock.in.count);
193
194         /* we've managed to get all the locks. Tell the client */
195         req->async.status = NT_STATUS_OK;
196         req->async.send_fn(req);
197 }
198
199
200 /*
201   cancel a set of locks
202 */
203 static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct smbsrv_request *req, union smb_lock *lck,
204                                  struct pvfs_file *f)
205 {
206         struct pvfs_pending_lock *p;
207
208         for (p=f->pending_list;p;p=p->next) {
209                 /* check if the lock request matches exactly - you can only cancel with exact matches */
210                 if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
211                     p->lck->lockx.in.lock_cnt  == lck->lockx.in.lock_cnt &&
212                     p->lck->lockx.in.fnum      == lck->lockx.in.fnum &&
213                     p->lck->lockx.in.mode      == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
214                         int i;
215
216                         for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
217                                 if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
218                                     p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
219                                     p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
220                                         break;
221                                 }
222                         }
223                         if (i < lck->lockx.in.ulock_cnt) continue;
224
225                         /* an exact match! we can cancel it, which is equivalent
226                            to triggering the timeout early */
227                         pvfs_pending_lock_continue(p ,True);
228                         return NT_STATUS_OK;
229                 }
230         }
231
232         return NT_STATUS_UNSUCCESSFUL;
233 }
234
235
236 /*
237   lock or unlock a byte range
238 */
239 NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
240                    struct smbsrv_request *req, union smb_lock *lck)
241 {
242         struct pvfs_state *pvfs = ntvfs->private_data;
243         struct pvfs_file *f;
244         struct smb_lock_entry *locks;
245         int i;
246         enum brl_type rw;
247         struct pvfs_pending_lock *pending = NULL;
248
249         f = pvfs_find_fd(pvfs, req, lck->generic.in.fnum);
250         if (!f) {
251                 return NT_STATUS_INVALID_HANDLE;
252         }
253
254         switch (lck->generic.level) {
255         case RAW_LOCK_LOCK:
256                 return brl_lock(pvfs->brl_context,
257                                 &f->locking_key,
258                                 req->smbpid,
259                                 f->fnum,
260                                 lck->lock.in.offset,
261                                 lck->lock.in.count,
262                                 WRITE_LOCK, NULL);
263                                 
264         case RAW_LOCK_UNLOCK:
265                 return brl_unlock(pvfs->brl_context,
266                                   &f->locking_key,
267                                   req->smbpid,
268                                   f->fnum,
269                                   lck->lock.in.offset,
270                                   lck->lock.in.count);
271
272         case RAW_LOCK_GENERIC:
273                 return NT_STATUS_INVALID_LEVEL;
274
275         case RAW_LOCK_LOCKX:
276                 /* fall through to the most complex case */
277                 break;
278         }
279
280         /* now the lockingX case, most common and also most complex */
281         if (lck->lockx.in.timeout != 0) {
282                 pending = talloc_p(req, struct pvfs_pending_lock);
283                 if (pending == NULL) {
284                         return NT_STATUS_NO_MEMORY;
285                 }
286
287                 pending->pvfs = pvfs;
288                 pending->lck = lck;
289                 pending->f = f;
290                 pending->req = req;
291
292                 /* round up to the nearest second */
293                 pending->end_time = time(NULL) + ((lck->lockx.in.timeout+999)/1000);
294         }
295
296         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
297                 rw = pending? PENDING_READ_LOCK : READ_LOCK;
298         } else {
299                 rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
300         }
301
302         if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
303                 return pvfs_lock_cancel(pvfs, req, lck, f);
304         }
305
306         if (lck->lockx.in.mode & 
307             (LOCKING_ANDX_OPLOCK_RELEASE |
308              LOCKING_ANDX_CHANGE_LOCKTYPE |
309              LOCKING_ANDX_CANCEL_LOCK)) {
310                 /* todo: need to add support for these */
311                 return NT_STATUS_NOT_IMPLEMENTED;
312         }
313
314
315         /* the unlocks happen first */
316         locks = lck->lockx.in.locks;
317
318         for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
319                 NTSTATUS status;
320                 status = brl_unlock(pvfs->brl_context,
321                                     &f->locking_key,
322                                     locks[i].pid,
323                                     f->fnum,
324                                     locks[i].offset,
325                                     locks[i].count);
326                 if (!NT_STATUS_IS_OK(status)) {
327                         return status;
328                 }
329         }
330
331         locks += i;
332
333         for (i=0;i<lck->lockx.in.lock_cnt;i++) {
334                 NTSTATUS status;
335
336                 if (pending) {
337                         pending->pending_lock = i;
338                 }
339
340                 status = brl_lock(pvfs->brl_context,
341                                   &f->locking_key,
342                                   locks[i].pid,
343                                   f->fnum,
344                                   locks[i].offset,
345                                   locks[i].count,
346                                   rw, pending);
347                 if (!NT_STATUS_IS_OK(status)) {
348                         if (pending) {
349                                 /* a timed lock failed - setup a wait message to handle
350                                    the pending lock notification or a timeout */
351                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
352                                                                          pending->end_time,
353                                                                          pvfs_pending_lock_continue,
354                                                                          pending);
355                                 if (pending->wait_handle == NULL) {
356                                         return NT_STATUS_NO_MEMORY;
357                                 }
358                                 DLIST_ADD(f->pending_list, pending);
359                                 return NT_STATUS_OK;
360                         }
361                         /* undo the locks we just did */
362                         for (i=i-1;i>=0;i--) {
363                                 brl_unlock(pvfs->brl_context,
364                                            &f->locking_key,
365                                            locks[i].pid,
366                                            f->fnum,
367                                            locks[i].offset,
368                                            locks[i].count);
369                         }
370                         return status;
371                 }
372         }
373
374         return NT_STATUS_OK;
375 }
376