r3034: - fixed a bug in message dispatch, when the dispatch function called messagin...
[jelmer/samba4-debian.git] / source / ntvfs / posix / pvfs_lock.c
1 /* 
2    Unix SMB/CIFS implementation.
3
4    POSIX NTVFS backend - locking
5
6    Copyright (C) Andrew Tridgell 2004
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 2 of the License, or
11    (at your option) any later version.
12    
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17    
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 #include "include/includes.h"
24 #include "vfs_posix.h"
25
26
27 /*
28   check if we can perform IO on a range that might be locked
29 */
30 NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
31                          struct pvfs_file *f,
32                          uint16_t smbpid,
33                          uint64_t offset, uint64_t count,
34                          enum brl_type rw)
35 {
36         if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
37                 return NT_STATUS_OK;
38         }
39
40         return brl_locktest(pvfs->brl_context,
41                             &f->locking_key,
42                             f->fnum,
43                             smbpid,
44                             offset, count, rw);
45 }
46
47 /* this state structure holds information about a lock we are waiting on */
48 struct pvfs_pending_lock {
49         struct pvfs_pending_lock *next, *prev;
50         struct pvfs_state *pvfs;
51         union smb_lock *lck;
52         struct pvfs_file *f;
53         struct smbsrv_request *req;
54         int pending_lock;
55         void *wait_handle;
56         time_t end_time;
57 };
58
59 /*
60   a secondary attempt to setup a lock has failed - back out
61   the locks we did get and send an error
62 */
63 static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
64                                    struct smbsrv_request *req,
65                                    struct pvfs_file *f,
66                                    struct smb_lock_entry *locks,
67                                    int i,
68                                    NTSTATUS status)
69 {
70         /* undo the locks we just did */
71         for (i=i-1;i>=0;i--) {
72                 brl_unlock(pvfs->brl_context,
73                            &f->locking_key,
74                            locks[i].pid,
75                            f->fnum,
76                            locks[i].offset,
77                            locks[i].count);
78         }
79         req->async.status = status;
80         req->async.send_fn(req);
81 }
82
83
84 /*
85   called when we receive a pending lock notification. It means that
86   either our lock timed out or somoene else has unlocked a overlapping
87   range, so we should try the lock again. Note that on timeout we
88   do retry the lock, giving it a last chance.
89 */
90 static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
91 {
92         struct pvfs_pending_lock *pending = private;
93         struct pvfs_state *pvfs = pending->pvfs;
94         struct pvfs_file *f = pending->f;
95         struct smbsrv_request *req = pending->req;
96         union smb_lock *lck = pending->lck;
97         struct smb_lock_entry *locks;
98         enum brl_type rw;
99         NTSTATUS status;
100         int i;
101
102         locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
103
104         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
105                 rw = READ_LOCK;
106         } else {
107                 rw = WRITE_LOCK;
108         }
109
110         DLIST_REMOVE(f->pending_list, pending);
111
112         status = brl_lock(pvfs->brl_context,
113                           &f->locking_key,
114                           req->smbpid,
115                           f->fnum,
116                           locks[pending->pending_lock].offset,
117                           locks[pending->pending_lock].count,
118                           rw, NULL);
119
120         /* if we have failed and timed out, or succeeded, then we
121            don't need the pending lock any more */
122         if (NT_STATUS_IS_OK(status) || timed_out) {
123                 NTSTATUS status2;
124                 status2 = brl_remove_pending(pvfs->brl_context, &f->locking_key, pending);
125                 if (!NT_STATUS_IS_OK(status2)) {
126                         DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
127                 }
128                 talloc_free(pending->wait_handle);
129         }
130
131         if (!NT_STATUS_IS_OK(status)) {
132                 if (timed_out) {
133                         /* no more chances */
134                         pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
135                 } else {
136                         /* we can try again */
137                         DLIST_ADD(f->pending_list, pending);
138                 }
139                 return;
140         }
141
142         /* if we haven't timed out yet, then we can do more pending locks */
143         if (timed_out) {
144                 pending = NULL;
145         } else {
146                 if (rw == READ_LOCK) {
147                         rw = PENDING_READ_LOCK;
148                 } else {
149                         rw = PENDING_WRITE_LOCK;
150                 }
151         }
152
153         /* we've now got the pending lock. try and get the rest, which might
154            lead to more pending locks */
155         for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {          
156                 if (pending) {
157                         pending->pending_lock = i;
158                 }
159
160                 status = brl_lock(pvfs->brl_context,
161                                   &f->locking_key,
162                                   req->smbpid,
163                                   f->fnum,
164                                   locks[i].offset,
165                                   locks[i].count,
166                                   rw, pending);
167                 if (!NT_STATUS_IS_OK(status)) {
168                         if (pending) {
169                                 /* a timed lock failed - setup a wait message to handle
170                                    the pending lock notification or a timeout */
171                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
172                                                                          pending->end_time,
173                                                                          pvfs_pending_lock_continue,
174                                                                          pending);
175                                 if (pending->wait_handle == NULL) {
176                                         pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
177                                 } else {
178                                         DLIST_ADD(f->pending_list, pending);
179                                 }
180                                 return;
181                         }
182                         pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
183                         return;
184                 }
185         }
186
187         /* we've managed to get all the locks. Tell the client */
188         req->async.status = NT_STATUS_OK;
189         req->async.send_fn(req);
190 }
191
192
193 /*
194   called when we close a file that might have pending locks
195 */
196 void pvfs_lock_close_pending(struct pvfs_state *pvfs, struct pvfs_file *f)
197 {
198         struct pvfs_pending_lock *p, *next;
199         NTSTATUS status;
200
201         for (p=f->pending_list;p;p=next) {
202                 next = p->next;
203                 DLIST_REMOVE(f->pending_list, p);
204                 status = brl_remove_pending(pvfs->brl_context, &f->locking_key, p);
205                 if (!NT_STATUS_IS_OK(status)) {
206                         DEBUG(0,("pvfs_lock_close_pending: failed to remove pending lock - %s\n", 
207                                  nt_errstr(status)));
208                 }
209                 talloc_free(p->wait_handle);
210                 p->req->async.status = NT_STATUS_RANGE_NOT_LOCKED;
211                 p->req->async.send_fn(p->req);
212         }
213
214 }
215
216
217 /*
218   cancel a set of locks
219 */
220 static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct smbsrv_request *req, union smb_lock *lck,
221                                  struct pvfs_file *f)
222 {
223         struct pvfs_pending_lock *p;
224
225         for (p=f->pending_list;p;p=p->next) {
226                 /* check if the lock request matches exactly - you can only cancel with exact matches */
227                 if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
228                     p->lck->lockx.in.lock_cnt  == lck->lockx.in.lock_cnt &&
229                     p->lck->lockx.in.fnum      == lck->lockx.in.fnum &&
230                     p->lck->lockx.in.mode      == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
231                         int i;
232
233                         for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
234                                 if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
235                                     p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
236                                     p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
237                                         break;
238                                 }
239                         }
240                         if (i < lck->lockx.in.ulock_cnt) continue;
241
242                         /* an exact match! we can cancel it, which is equivalent
243                            to triggering the timeout early */
244                         pvfs_pending_lock_continue(p ,True);
245                         return NT_STATUS_OK;
246                 }
247         }
248
249         return NT_STATUS_UNSUCCESSFUL;
250 }
251
252
253 /*
254   lock or unlock a byte range
255 */
256 NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
257                    struct smbsrv_request *req, union smb_lock *lck)
258 {
259         struct pvfs_state *pvfs = ntvfs->private_data;
260         struct pvfs_file *f;
261         struct smb_lock_entry *locks;
262         int i;
263         enum brl_type rw;
264         struct pvfs_pending_lock *pending = NULL;
265
266         f = pvfs_find_fd(pvfs, req, lck->generic.in.fnum);
267         if (!f) {
268                 return NT_STATUS_INVALID_HANDLE;
269         }
270
271         switch (lck->generic.level) {
272         case RAW_LOCK_LOCK:
273                 return brl_lock(pvfs->brl_context,
274                                 &f->locking_key,
275                                 req->smbpid,
276                                 f->fnum,
277                                 lck->lock.in.offset,
278                                 lck->lock.in.count,
279                                 WRITE_LOCK, NULL);
280                                 
281         case RAW_LOCK_UNLOCK:
282                 return brl_unlock(pvfs->brl_context,
283                                   &f->locking_key,
284                                   req->smbpid,
285                                   f->fnum,
286                                   lck->lock.in.offset,
287                                   lck->lock.in.count);
288
289         case RAW_LOCK_GENERIC:
290                 return NT_STATUS_INVALID_LEVEL;
291
292         case RAW_LOCK_LOCKX:
293                 /* fall through to the most complex case */
294                 break;
295         }
296
297         /* now the lockingX case, most common and also most complex */
298         if (lck->lockx.in.timeout != 0) {
299                 pending = talloc_p(req, struct pvfs_pending_lock);
300                 if (pending == NULL) {
301                         return NT_STATUS_NO_MEMORY;
302                 }
303
304                 pending->pvfs = pvfs;
305                 pending->lck = lck;
306                 pending->f = f;
307                 pending->req = req;
308
309                 /* round up to the nearest second */
310                 pending->end_time = time(NULL) + ((lck->lockx.in.timeout+999)/1000);
311         }
312
313         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
314                 rw = pending? PENDING_READ_LOCK : READ_LOCK;
315         } else {
316                 rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
317         }
318
319         if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
320                 return pvfs_lock_cancel(pvfs, req, lck, f);
321         }
322
323         if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
324                 /* this seems to not be supported by any windows server,
325                    or used by any clients */
326                 return NT_STATUS_UNSUCCESSFUL;
327         }
328
329         if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
330                 DEBUG(0,("received unexpected oplock break\n"));
331                 return NT_STATUS_NOT_IMPLEMENTED;
332         }
333
334
335         /* the unlocks happen first */
336         locks = lck->lockx.in.locks;
337
338         for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
339                 NTSTATUS status;
340                 status = brl_unlock(pvfs->brl_context,
341                                     &f->locking_key,
342                                     locks[i].pid,
343                                     f->fnum,
344                                     locks[i].offset,
345                                     locks[i].count);
346                 if (!NT_STATUS_IS_OK(status)) {
347                         return status;
348                 }
349         }
350
351         locks += i;
352
353         for (i=0;i<lck->lockx.in.lock_cnt;i++) {
354                 NTSTATUS status;
355
356                 if (pending) {
357                         pending->pending_lock = i;
358                 }
359
360                 status = brl_lock(pvfs->brl_context,
361                                   &f->locking_key,
362                                   locks[i].pid,
363                                   f->fnum,
364                                   locks[i].offset,
365                                   locks[i].count,
366                                   rw, pending);
367                 if (!NT_STATUS_IS_OK(status)) {
368                         if (pending) {
369                                 /* a timed lock failed - setup a wait message to handle
370                                    the pending lock notification or a timeout */
371                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
372                                                                          pending->end_time,
373                                                                          pvfs_pending_lock_continue,
374                                                                          pending);
375                                 if (pending->wait_handle == NULL) {
376                                         return NT_STATUS_NO_MEMORY;
377                                 }
378                                 DLIST_ADD(f->pending_list, pending);
379                                 return NT_STATUS_OK;
380                         }
381                         /* undo the locks we just did */
382                         for (i=i-1;i>=0;i--) {
383                                 brl_unlock(pvfs->brl_context,
384                                            &f->locking_key,
385                                            locks[i].pid,
386                                            f->fnum,
387                                            locks[i].offset,
388                                            locks[i].count);
389                         }
390                         return status;
391                 }
392         }
393
394         return NT_STATUS_OK;
395 }
396