r3658: use handle->fd == -1 as the primary indicator of a directory. This
[ira/wip.git] / source4 / ntvfs / posix / pvfs_lock.c
1 /* 
2    Unix SMB/CIFS implementation.
3
4    POSIX NTVFS backend - locking
5
6    Copyright (C) Andrew Tridgell 2004
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 2 of the License, or
11    (at your option) any later version.
12    
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17    
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 #include "includes.h"
24 #include "vfs_posix.h"
25 #include "system/time.h"
26 #include "dlinklist.h"
27 #include "messages.h"
28
29
30 /*
31   check if we can perform IO on a range that might be locked
32 */
33 NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
34                          struct pvfs_file *f,
35                          uint16_t smbpid,
36                          uint64_t offset, uint64_t count,
37                          enum brl_type rw)
38 {
39         if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
40                 return NT_STATUS_OK;
41         }
42
43         return brl_locktest(pvfs->brl_context,
44                             &f->handle->locking_key,
45                             f->fnum,
46                             smbpid,
47                             offset, count, rw);
48 }
49
50 /* this state structure holds information about a lock we are waiting on */
51 struct pvfs_pending_lock {
52         struct pvfs_pending_lock *next, *prev;
53         struct pvfs_state *pvfs;
54         union smb_lock *lck;
55         struct pvfs_file *f;
56         struct smbsrv_request *req;
57         int pending_lock;
58         void *wait_handle;
59         struct timeval end_time;
60 };
61
62 /*
63   a secondary attempt to setup a lock has failed - back out
64   the locks we did get and send an error
65 */
66 static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
67                                    struct smbsrv_request *req,
68                                    struct pvfs_file *f,
69                                    struct smb_lock_entry *locks,
70                                    int i,
71                                    NTSTATUS status)
72 {
73         /* undo the locks we just did */
74         for (i=i-1;i>=0;i--) {
75                 brl_unlock(pvfs->brl_context,
76                            &f->handle->locking_key,
77                            locks[i].pid,
78                            f->fnum,
79                            locks[i].offset,
80                            locks[i].count);
81                 f->lock_count--;
82         }
83         req->async_states->status = status;
84         req->async_states->send_fn(req);
85 }
86
87
88 /*
89   called when we receive a pending lock notification. It means that
90   either our lock timed out or somoene else has unlocked a overlapping
91   range, so we should try the lock again. Note that on timeout we
92   do retry the lock, giving it a last chance.
93 */
94 static void pvfs_pending_lock_continue(void *private, enum pvfs_wait_notice reason)
95 {
96         struct pvfs_pending_lock *pending = private;
97         struct pvfs_state *pvfs = pending->pvfs;
98         struct pvfs_file *f = pending->f;
99         struct smbsrv_request *req = pending->req;
100         union smb_lock *lck = pending->lck;
101         struct smb_lock_entry *locks;
102         enum brl_type rw;
103         NTSTATUS status;
104         int i;
105         BOOL timed_out;
106
107         /* we consider a cancel to be a timeout */
108         timed_out = (reason != PVFS_WAIT_EVENT);
109
110         locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
111
112         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
113                 rw = READ_LOCK;
114         } else {
115                 rw = WRITE_LOCK;
116         }
117
118         DLIST_REMOVE(f->pending_list, pending);
119
120         status = brl_lock(pvfs->brl_context,
121                           &f->handle->locking_key,
122                           req->smbpid,
123                           f->fnum,
124                           locks[pending->pending_lock].offset,
125                           locks[pending->pending_lock].count,
126                           rw, NULL);
127
128         if (NT_STATUS_IS_OK(status)) {
129                 f->lock_count++;
130         }
131
132         /* if we have failed and timed out, or succeeded, then we
133            don't need the pending lock any more */
134         if (NT_STATUS_IS_OK(status) || timed_out) {
135                 NTSTATUS status2;
136                 status2 = brl_remove_pending(pvfs->brl_context, 
137                                              &f->handle->locking_key, pending);
138                 if (!NT_STATUS_IS_OK(status2)) {
139                         DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
140                 }
141                 talloc_free(pending->wait_handle);
142         }
143
144         if (!NT_STATUS_IS_OK(status)) {
145                 if (timed_out) {
146                         /* no more chances */
147                         pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
148                 } else {
149                         /* we can try again */
150                         DLIST_ADD(f->pending_list, pending);
151                 }
152                 return;
153         }
154
155         /* if we haven't timed out yet, then we can do more pending locks */
156         if (timed_out) {
157                 pending = NULL;
158         } else {
159                 if (rw == READ_LOCK) {
160                         rw = PENDING_READ_LOCK;
161                 } else {
162                         rw = PENDING_WRITE_LOCK;
163                 }
164         }
165
166         /* we've now got the pending lock. try and get the rest, which might
167            lead to more pending locks */
168         for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {          
169                 if (pending) {
170                         pending->pending_lock = i;
171                 }
172
173                 status = brl_lock(pvfs->brl_context,
174                                   &f->handle->locking_key,
175                                   req->smbpid,
176                                   f->fnum,
177                                   locks[i].offset,
178                                   locks[i].count,
179                                   rw, pending);
180                 if (!NT_STATUS_IS_OK(status)) {
181                         if (pending) {
182                                 /* a timed lock failed - setup a wait message to handle
183                                    the pending lock notification or a timeout */
184                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
185                                                                          pending->end_time,
186                                                                          pvfs_pending_lock_continue,
187                                                                          pending);
188                                 if (pending->wait_handle == NULL) {
189                                         pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
190                                 } else {
191                                         talloc_steal(pending, pending->wait_handle);
192                                         DLIST_ADD(f->pending_list, pending);
193                                 }
194                                 return;
195                         }
196                         pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
197                         return;
198                 }
199
200                 f->lock_count++;
201         }
202
203         /* we've managed to get all the locks. Tell the client */
204         req->async_states->status = NT_STATUS_OK;
205         req->async_states->send_fn(req);
206 }
207
208
209 /*
210   called when we close a file that might have locks
211 */
212 void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
213 {
214         struct pvfs_pending_lock *p, *next;
215
216         if (f->lock_count || f->pending_list) {
217                 DEBUG(5,("pvfs_lock: removing %.0f locks on close\n", 
218                          (double)f->lock_count));
219                 brl_close(f->pvfs->brl_context, &f->handle->locking_key, f->fnum);
220                 f->lock_count = 0;
221         }
222
223         /* reply to all the pending lock requests, telling them the 
224            lock failed */
225         for (p=f->pending_list;p;p=next) {
226                 next = p->next;
227                 DLIST_REMOVE(f->pending_list, p);
228                 p->req->async_states->status = NT_STATUS_RANGE_NOT_LOCKED;
229                 p->req->async_states->send_fn(p->req);
230         }
231 }
232
233
234 /*
235   cancel a set of locks
236 */
237 static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct smbsrv_request *req, union smb_lock *lck,
238                                  struct pvfs_file *f)
239 {
240         struct pvfs_pending_lock *p;
241
242         for (p=f->pending_list;p;p=p->next) {
243                 /* check if the lock request matches exactly - you can only cancel with exact matches */
244                 if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
245                     p->lck->lockx.in.lock_cnt  == lck->lockx.in.lock_cnt &&
246                     p->lck->lockx.in.fnum      == lck->lockx.in.fnum &&
247                     p->lck->lockx.in.mode      == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
248                         int i;
249
250                         for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
251                                 if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
252                                     p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
253                                     p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
254                                         break;
255                                 }
256                         }
257                         if (i < lck->lockx.in.ulock_cnt) continue;
258
259                         /* an exact match! we can cancel it, which is equivalent
260                            to triggering the timeout early */
261                         pvfs_pending_lock_continue(p ,True);
262                         return NT_STATUS_OK;
263                 }
264         }
265
266         return NT_STATUS_UNSUCCESSFUL;
267 }
268
269
270 /*
271   lock or unlock a byte range
272 */
273 NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
274                    struct smbsrv_request *req, union smb_lock *lck)
275 {
276         struct pvfs_state *pvfs = ntvfs->private_data;
277         struct pvfs_file *f;
278         struct smb_lock_entry *locks;
279         int i;
280         enum brl_type rw;
281         struct pvfs_pending_lock *pending = NULL;
282         NTSTATUS status;
283
284         if (lck->generic.level != RAW_LOCK_GENERIC) {
285                 return ntvfs_map_lock(req, lck, ntvfs);
286         }
287
288         f = pvfs_find_fd(pvfs, req, lck->lockx.in.fnum);
289         if (!f) {
290                 return NT_STATUS_INVALID_HANDLE;
291         }
292
293         if (f->handle->fd == -1) {
294                 return NT_STATUS_FILE_IS_A_DIRECTORY;
295         }
296
297         if (lck->lockx.in.timeout != 0 && 
298             (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
299                 pending = talloc_p(f, struct pvfs_pending_lock);
300                 if (pending == NULL) {
301                         return NT_STATUS_NO_MEMORY;
302                 }
303
304                 pending->pvfs = pvfs;
305                 pending->lck = lck;
306                 pending->f = f;
307                 pending->req = req;
308
309                 pending->end_time = 
310                         timeval_current_ofs(lck->lockx.in.timeout/1000,
311                                             1000*(lck->lockx.in.timeout%1000));
312         }
313
314         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
315                 rw = pending? PENDING_READ_LOCK : READ_LOCK;
316         } else {
317                 rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
318         }
319
320         if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
321                 return pvfs_lock_cancel(pvfs, req, lck, f);
322         }
323
324         if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
325                 /* this seems to not be supported by any windows server,
326                    or used by any clients */
327                 return NT_STATUS_UNSUCCESSFUL;
328         }
329
330         if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
331                 DEBUG(0,("received unexpected oplock break\n"));
332                 return NT_STATUS_NOT_IMPLEMENTED;
333         }
334
335
336         /* the unlocks happen first */
337         locks = lck->lockx.in.locks;
338
339         for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
340                 status = brl_unlock(pvfs->brl_context,
341                                     &f->handle->locking_key,
342                                     locks[i].pid,
343                                     f->fnum,
344                                     locks[i].offset,
345                                     locks[i].count);
346                 if (!NT_STATUS_IS_OK(status)) {
347                         return status;
348                 }
349                 f->lock_count--;
350         }
351
352         locks += i;
353
354         for (i=0;i<lck->lockx.in.lock_cnt;i++) {
355                 if (pending) {
356                         pending->pending_lock = i;
357                 }
358
359                 status = brl_lock(pvfs->brl_context,
360                                   &f->handle->locking_key,
361                                   locks[i].pid,
362                                   f->fnum,
363                                   locks[i].offset,
364                                   locks[i].count,
365                                   rw, pending);
366                 if (!NT_STATUS_IS_OK(status)) {
367                         if (pending) {
368                                 /* a timed lock failed - setup a wait message to handle
369                                    the pending lock notification or a timeout */
370                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
371                                                                          pending->end_time,
372                                                                          pvfs_pending_lock_continue,
373                                                                          pending);
374                                 if (pending->wait_handle == NULL) {
375                                         return NT_STATUS_NO_MEMORY;
376                                 }
377                                 talloc_steal(pending, pending->wait_handle);
378                                 DLIST_ADD(f->pending_list, pending);
379                                 return NT_STATUS_OK;
380                         }
381                         /* undo the locks we just did */
382                         for (i=i-1;i>=0;i--) {
383                                 brl_unlock(pvfs->brl_context,
384                                            &f->handle->locking_key,
385                                            locks[i].pid,
386                                            f->fnum,
387                                            locks[i].offset,
388                                            locks[i].count);
389                                 f->lock_count--;
390                         }
391                         return status;
392                 }
393                 f->lock_count++;
394         }
395
396         return NT_STATUS_OK;
397 }
398