r3336: use a struct ntvfs_async_state to be able to do async chaning of ntvfs modules
[samba.git] / source4 / ntvfs / posix / pvfs_lock.c
1 /* 
2    Unix SMB/CIFS implementation.
3
4    POSIX NTVFS backend - locking
5
6    Copyright (C) Andrew Tridgell 2004
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 2 of the License, or
11    (at your option) any later version.
12    
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17    
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 #include "include/includes.h"
24 #include "vfs_posix.h"
25
26
27 /*
28   check if we can perform IO on a range that might be locked
29 */
30 NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
31                          struct pvfs_file *f,
32                          uint16_t smbpid,
33                          uint64_t offset, uint64_t count,
34                          enum brl_type rw)
35 {
36         if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
37                 return NT_STATUS_OK;
38         }
39
40         return brl_locktest(pvfs->brl_context,
41                             &f->locking_key,
42                             f->fnum,
43                             smbpid,
44                             offset, count, rw);
45 }
46
47 /* this state structure holds information about a lock we are waiting on */
48 struct pvfs_pending_lock {
49         struct pvfs_pending_lock *next, *prev;
50         struct pvfs_state *pvfs;
51         union smb_lock *lck;
52         struct pvfs_file *f;
53         struct smbsrv_request *req;
54         int pending_lock;
55         void *wait_handle;
56         time_t end_time;
57 };
58
59 /*
60   a secondary attempt to setup a lock has failed - back out
61   the locks we did get and send an error
62 */
63 static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
64                                    struct smbsrv_request *req,
65                                    struct pvfs_file *f,
66                                    struct smb_lock_entry *locks,
67                                    int i,
68                                    NTSTATUS status)
69 {
70         /* undo the locks we just did */
71         for (i=i-1;i>=0;i--) {
72                 brl_unlock(pvfs->brl_context,
73                            &f->locking_key,
74                            locks[i].pid,
75                            f->fnum,
76                            locks[i].offset,
77                            locks[i].count);
78                 f->lock_count--;
79         }
80         req->async_states->status = status;
81         req->async_states->send_fn(req);
82 }
83
84
85 /*
86   called when we receive a pending lock notification. It means that
87   either our lock timed out or somoene else has unlocked a overlapping
88   range, so we should try the lock again. Note that on timeout we
89   do retry the lock, giving it a last chance.
90 */
91 static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
92 {
93         struct pvfs_pending_lock *pending = private;
94         struct pvfs_state *pvfs = pending->pvfs;
95         struct pvfs_file *f = pending->f;
96         struct smbsrv_request *req = pending->req;
97         union smb_lock *lck = pending->lck;
98         struct smb_lock_entry *locks;
99         enum brl_type rw;
100         NTSTATUS status;
101         int i;
102
103         locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
104
105         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
106                 rw = READ_LOCK;
107         } else {
108                 rw = WRITE_LOCK;
109         }
110
111         DLIST_REMOVE(f->pending_list, pending);
112
113         status = brl_lock(pvfs->brl_context,
114                           &f->locking_key,
115                           req->smbpid,
116                           f->fnum,
117                           locks[pending->pending_lock].offset,
118                           locks[pending->pending_lock].count,
119                           rw, NULL);
120
121         if (NT_STATUS_IS_OK(status)) {
122                 f->lock_count++;
123         }
124
125         /* if we have failed and timed out, or succeeded, then we
126            don't need the pending lock any more */
127         if (NT_STATUS_IS_OK(status) || timed_out) {
128                 NTSTATUS status2;
129                 status2 = brl_remove_pending(pvfs->brl_context, &f->locking_key, pending);
130                 if (!NT_STATUS_IS_OK(status2)) {
131                         DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
132                 }
133                 talloc_free(pending->wait_handle);
134         }
135
136         if (!NT_STATUS_IS_OK(status)) {
137                 if (timed_out) {
138                         /* no more chances */
139                         pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
140                 } else {
141                         /* we can try again */
142                         DLIST_ADD(f->pending_list, pending);
143                 }
144                 return;
145         }
146
147         /* if we haven't timed out yet, then we can do more pending locks */
148         if (timed_out) {
149                 pending = NULL;
150         } else {
151                 if (rw == READ_LOCK) {
152                         rw = PENDING_READ_LOCK;
153                 } else {
154                         rw = PENDING_WRITE_LOCK;
155                 }
156         }
157
158         /* we've now got the pending lock. try and get the rest, which might
159            lead to more pending locks */
160         for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {          
161                 if (pending) {
162                         pending->pending_lock = i;
163                 }
164
165                 status = brl_lock(pvfs->brl_context,
166                                   &f->locking_key,
167                                   req->smbpid,
168                                   f->fnum,
169                                   locks[i].offset,
170                                   locks[i].count,
171                                   rw, pending);
172                 if (!NT_STATUS_IS_OK(status)) {
173                         if (pending) {
174                                 /* a timed lock failed - setup a wait message to handle
175                                    the pending lock notification or a timeout */
176                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
177                                                                          pending->end_time,
178                                                                          pvfs_pending_lock_continue,
179                                                                          pending);
180                                 if (pending->wait_handle == NULL) {
181                                         pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
182                                 } else {
183                                         DLIST_ADD(f->pending_list, pending);
184                                 }
185                                 return;
186                         }
187                         pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
188                         return;
189                 }
190
191                 f->lock_count++;
192         }
193
194         /* we've managed to get all the locks. Tell the client */
195         req->async_states->status = NT_STATUS_OK;
196         req->async_states->send_fn(req);
197 }
198
199
200 /*
201   called when we close a file that might have locks
202 */
203 void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
204 {
205         struct pvfs_pending_lock *p, *next;
206
207         if (f->lock_count || f->pending_list) {
208                 DEBUG(5,("pvfs_lock: removing %.0f locks on close\n", 
209                          (double)f->lock_count));
210                 brl_close(f->pvfs->brl_context, &f->locking_key, f->fnum);
211                 f->lock_count = 0;
212         }
213
214         /* reply to all the pending lock requests, telling them the 
215            lock failed */
216         for (p=f->pending_list;p;p=next) {
217                 next = p->next;
218                 DLIST_REMOVE(f->pending_list, p);
219                 talloc_free(p->wait_handle);
220                 p->req->async_states->status = NT_STATUS_RANGE_NOT_LOCKED;
221                 p->req->async_states->send_fn(p->req);
222         }
223 }
224
225
226 /*
227   cancel a set of locks
228 */
229 static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct smbsrv_request *req, union smb_lock *lck,
230                                  struct pvfs_file *f)
231 {
232         struct pvfs_pending_lock *p;
233
234         for (p=f->pending_list;p;p=p->next) {
235                 /* check if the lock request matches exactly - you can only cancel with exact matches */
236                 if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
237                     p->lck->lockx.in.lock_cnt  == lck->lockx.in.lock_cnt &&
238                     p->lck->lockx.in.fnum      == lck->lockx.in.fnum &&
239                     p->lck->lockx.in.mode      == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
240                         int i;
241
242                         for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
243                                 if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
244                                     p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
245                                     p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
246                                         break;
247                                 }
248                         }
249                         if (i < lck->lockx.in.ulock_cnt) continue;
250
251                         /* an exact match! we can cancel it, which is equivalent
252                            to triggering the timeout early */
253                         pvfs_pending_lock_continue(p ,True);
254                         return NT_STATUS_OK;
255                 }
256         }
257
258         return NT_STATUS_UNSUCCESSFUL;
259 }
260
261
262 /*
263   lock or unlock a byte range
264 */
265 NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
266                    struct smbsrv_request *req, union smb_lock *lck)
267 {
268         struct pvfs_state *pvfs = ntvfs->private_data;
269         struct pvfs_file *f;
270         struct smb_lock_entry *locks;
271         int i;
272         enum brl_type rw;
273         struct pvfs_pending_lock *pending = NULL;
274         NTSTATUS status;
275
276         if (lck->generic.level != RAW_LOCK_GENERIC) {
277                 return ntvfs_map_lock(req, lck, ntvfs);
278         }
279
280         f = pvfs_find_fd(pvfs, req, lck->lockx.in.fnum);
281         if (!f) {
282                 return NT_STATUS_INVALID_HANDLE;
283         }
284
285         if (f->name->dos.attrib & FILE_ATTRIBUTE_DIRECTORY) {
286                 return NT_STATUS_FILE_IS_A_DIRECTORY;
287         }
288
289         if (lck->lockx.in.timeout != 0 && 
290             (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
291                 pending = talloc_p(req, struct pvfs_pending_lock);
292                 if (pending == NULL) {
293                         return NT_STATUS_NO_MEMORY;
294                 }
295
296                 pending->pvfs = pvfs;
297                 pending->lck = lck;
298                 pending->f = f;
299                 pending->req = req;
300
301                 /* round up to the nearest second */
302                 pending->end_time = time(NULL) + ((lck->lockx.in.timeout+999)/1000);
303         }
304
305         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
306                 rw = pending? PENDING_READ_LOCK : READ_LOCK;
307         } else {
308                 rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
309         }
310
311         if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
312                 return pvfs_lock_cancel(pvfs, req, lck, f);
313         }
314
315         if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
316                 /* this seems to not be supported by any windows server,
317                    or used by any clients */
318                 return NT_STATUS_UNSUCCESSFUL;
319         }
320
321         if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
322                 DEBUG(0,("received unexpected oplock break\n"));
323                 return NT_STATUS_NOT_IMPLEMENTED;
324         }
325
326
327         /* the unlocks happen first */
328         locks = lck->lockx.in.locks;
329
330         for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
331                 status = brl_unlock(pvfs->brl_context,
332                                     &f->locking_key,
333                                     locks[i].pid,
334                                     f->fnum,
335                                     locks[i].offset,
336                                     locks[i].count);
337                 if (!NT_STATUS_IS_OK(status)) {
338                         return status;
339                 }
340                 f->lock_count--;
341         }
342
343         locks += i;
344
345         for (i=0;i<lck->lockx.in.lock_cnt;i++) {
346                 if (pending) {
347                         pending->pending_lock = i;
348                 }
349
350                 status = brl_lock(pvfs->brl_context,
351                                   &f->locking_key,
352                                   locks[i].pid,
353                                   f->fnum,
354                                   locks[i].offset,
355                                   locks[i].count,
356                                   rw, pending);
357                 if (!NT_STATUS_IS_OK(status)) {
358                         if (pending) {
359                                 /* a timed lock failed - setup a wait message to handle
360                                    the pending lock notification or a timeout */
361                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
362                                                                          pending->end_time,
363                                                                          pvfs_pending_lock_continue,
364                                                                          pending);
365                                 if (pending->wait_handle == NULL) {
366                                         return NT_STATUS_NO_MEMORY;
367                                 }
368                                 DLIST_ADD(f->pending_list, pending);
369                                 return NT_STATUS_OK;
370                         }
371                         /* undo the locks we just did */
372                         for (i=i-1;i>=0;i--) {
373                                 brl_unlock(pvfs->brl_context,
374                                            &f->locking_key,
375                                            locks[i].pid,
376                                            f->fnum,
377                                            locks[i].offset,
378                                            locks[i].count);
379                                 f->lock_count--;
380                         }
381                         return status;
382                 }
383                 f->lock_count++;
384         }
385
386         return NT_STATUS_OK;
387 }
388