Align the Python and EJS ldap tests.
[kai/samba.git] / source / ntvfs / posix / pvfs_lock.c
1 /* 
2    Unix SMB/CIFS implementation.
3
4    POSIX NTVFS backend - locking
5
6    Copyright (C) Andrew Tridgell 2004
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12    
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17    
18    You should have received a copy of the GNU General Public License
19    along with this program.  If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include "includes.h"
23 #include "vfs_posix.h"
24 #include "system/time.h"
25 #include "lib/util/dlinklist.h"
26 #include "messaging/messaging.h"
27
28
29 /*
30   check if we can perform IO on a range that might be locked
31 */
32 NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
33                          struct pvfs_file *f,
34                          uint16_t smbpid,
35                          uint64_t offset, uint64_t count,
36                          enum brl_type rw)
37 {
38         if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
39                 return NT_STATUS_OK;
40         }
41
42         return brl_locktest(pvfs->brl_context,
43                             f->brl_handle,
44                             smbpid,
45                             offset, count, rw);
46 }
47
48 /* this state structure holds information about a lock we are waiting on */
49 struct pvfs_pending_lock {
50         struct pvfs_pending_lock *next, *prev;
51         struct pvfs_state *pvfs;
52         union smb_lock *lck;
53         struct pvfs_file *f;
54         struct ntvfs_request *req;
55         int pending_lock;
56         struct pvfs_wait *wait_handle;
57         struct timeval end_time;
58 };
59
60 /*
61   a secondary attempt to setup a lock has failed - back out
62   the locks we did get and send an error
63 */
64 static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
65                                    struct ntvfs_request *req,
66                                    struct pvfs_file *f,
67                                    struct smb_lock_entry *locks,
68                                    int i,
69                                    NTSTATUS status)
70 {
71         /* undo the locks we just did */
72         for (i--;i>=0;i--) {
73                 brl_unlock(pvfs->brl_context,
74                            f->brl_handle,
75                            locks[i].pid,
76                            locks[i].offset,
77                            locks[i].count);
78                 f->lock_count--;
79         }
80         req->async_states->status = status;
81         req->async_states->send_fn(req);
82 }
83
84
85 /*
86   called when we receive a pending lock notification. It means that
87   either our lock timed out or someone else has unlocked a overlapping
88   range, so we should try the lock again. Note that on timeout we
89   do retry the lock, giving it a last chance.
90 */
91 static void pvfs_pending_lock_continue(void *private, enum pvfs_wait_notice reason)
92 {
93         struct pvfs_pending_lock *pending = private;
94         struct pvfs_state *pvfs = pending->pvfs;
95         struct pvfs_file *f = pending->f;
96         struct ntvfs_request *req = pending->req;
97         union smb_lock *lck = pending->lck;
98         struct smb_lock_entry *locks;
99         enum brl_type rw;
100         NTSTATUS status;
101         int i;
102         bool timed_out;
103
104         timed_out = (reason != PVFS_WAIT_EVENT);
105
106         locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
107
108         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
109                 rw = READ_LOCK;
110         } else {
111                 rw = WRITE_LOCK;
112         }
113
114         DLIST_REMOVE(f->pending_list, pending);
115
116         /* we don't retry on a cancel */
117         if (reason == PVFS_WAIT_CANCEL) {
118                 status = NT_STATUS_FILE_LOCK_CONFLICT;
119         } else {
120                 /* 
121                  * here it's important to pass the pending pointer
122                  * because with this we'll get the correct error code
123                  * FILE_LOCK_CONFLICT in the error case
124                  */
125                 status = brl_lock(pvfs->brl_context,
126                                   f->brl_handle,
127                                   locks[pending->pending_lock].pid,
128                                   locks[pending->pending_lock].offset,
129                                   locks[pending->pending_lock].count,
130                                   rw, pending);
131         }
132         if (NT_STATUS_IS_OK(status)) {
133                 f->lock_count++;
134                 timed_out = false;
135         }
136
137         /* if we have failed and timed out, or succeeded, then we
138            don't need the pending lock any more */
139         if (NT_STATUS_IS_OK(status) || timed_out) {
140                 NTSTATUS status2;
141                 status2 = brl_remove_pending(pvfs->brl_context, 
142                                              f->brl_handle, pending);
143                 if (!NT_STATUS_IS_OK(status2)) {
144                         DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
145                 }
146                 talloc_free(pending->wait_handle);
147         }
148
149         if (!NT_STATUS_IS_OK(status)) {
150                 if (timed_out) {
151                         /* no more chances */
152                         pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
153                         talloc_free(pending);
154                 } else {
155                         /* we can try again */
156                         DLIST_ADD(f->pending_list, pending);
157                 }
158                 return;
159         }
160
161         /* if we haven't timed out yet, then we can do more pending locks */
162         if (rw == READ_LOCK) {
163                 rw = PENDING_READ_LOCK;
164         } else {
165                 rw = PENDING_WRITE_LOCK;
166         }
167
168         /* we've now got the pending lock. try and get the rest, which might
169            lead to more pending locks */
170         for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {          
171                 if (pending) {
172                         pending->pending_lock = i;
173                 }
174
175                 status = brl_lock(pvfs->brl_context,
176                                   f->brl_handle,
177                                   locks[i].pid,
178                                   locks[i].offset,
179                                   locks[i].count,
180                                   rw, pending);
181                 if (!NT_STATUS_IS_OK(status)) {
182                         if (pending) {
183                                 /* a timed lock failed - setup a wait message to handle
184                                    the pending lock notification or a timeout */
185                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
186                                                                          pending->end_time,
187                                                                          pvfs_pending_lock_continue,
188                                                                          pending);
189                                 if (pending->wait_handle == NULL) {
190                                         pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
191                                         talloc_free(pending);
192                                 } else {
193                                         talloc_steal(pending, pending->wait_handle);
194                                         DLIST_ADD(f->pending_list, pending);
195                                 }
196                                 return;
197                         }
198                         pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
199                         talloc_free(pending);
200                         return;
201                 }
202
203                 f->lock_count++;
204         }
205
206         /* we've managed to get all the locks. Tell the client */
207         req->async_states->status = NT_STATUS_OK;
208         req->async_states->send_fn(req);
209         talloc_free(pending);
210 }
211
212
213 /*
214   called when we close a file that might have locks
215 */
216 void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
217 {
218         struct pvfs_pending_lock *p, *next;
219
220         if (f->lock_count || f->pending_list) {
221                 DEBUG(5,("pvfs_lock: removing %.0f locks on close\n", 
222                          (double)f->lock_count));
223                 brl_close(f->pvfs->brl_context, f->brl_handle);
224                 f->lock_count = 0;
225         }
226
227         /* reply to all the pending lock requests, telling them the 
228            lock failed */
229         for (p=f->pending_list;p;p=next) {
230                 next = p->next;
231                 DLIST_REMOVE(f->pending_list, p);
232                 p->req->async_states->status = NT_STATUS_RANGE_NOT_LOCKED;
233                 p->req->async_states->send_fn(p->req);
234         }
235 }
236
237
238 /*
239   cancel a set of locks
240 */
241 static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct ntvfs_request *req, union smb_lock *lck,
242                                  struct pvfs_file *f)
243 {
244         struct pvfs_pending_lock *p;
245
246         for (p=f->pending_list;p;p=p->next) {
247                 /* check if the lock request matches exactly - you can only cancel with exact matches */
248                 if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
249                     p->lck->lockx.in.lock_cnt  == lck->lockx.in.lock_cnt &&
250                     p->lck->lockx.in.file.ntvfs== lck->lockx.in.file.ntvfs &&
251                     p->lck->lockx.in.mode      == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
252                         int i;
253
254                         for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
255                                 if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
256                                     p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
257                                     p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
258                                         break;
259                                 }
260                         }
261                         if (i < lck->lockx.in.ulock_cnt) continue;
262
263                         /* an exact match! we can cancel it, which is equivalent
264                            to triggering the timeout early */
265                         pvfs_pending_lock_continue(p, PVFS_WAIT_TIMEOUT);
266                         return NT_STATUS_OK;
267                 }
268         }
269
270         return NT_STATUS_DOS(ERRDOS, ERRcancelviolation);
271 }
272
273
274 /*
275   lock or unlock a byte range
276 */
277 NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
278                    struct ntvfs_request *req, union smb_lock *lck)
279 {
280         struct pvfs_state *pvfs = ntvfs->private_data;
281         struct pvfs_file *f;
282         struct smb_lock_entry *locks;
283         int i;
284         enum brl_type rw;
285         struct pvfs_pending_lock *pending = NULL;
286         NTSTATUS status;
287
288         if (lck->generic.level != RAW_LOCK_GENERIC) {
289                 return ntvfs_map_lock(ntvfs, req, lck);
290         }
291
292         if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
293                 return pvfs_oplock_release(ntvfs, req, lck);
294         }
295
296         f = pvfs_find_fd(pvfs, req, lck->lockx.in.file.ntvfs);
297         if (!f) {
298                 return NT_STATUS_INVALID_HANDLE;
299         }
300
301         if (f->handle->fd == -1) {
302                 return NT_STATUS_FILE_IS_A_DIRECTORY;
303         }
304
305         status = pvfs_break_level2_oplocks(f);
306         NT_STATUS_NOT_OK_RETURN(status);
307
308         if (lck->lockx.in.timeout != 0 && 
309             (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
310                 pending = talloc(f, struct pvfs_pending_lock);
311                 if (pending == NULL) {
312                         return NT_STATUS_NO_MEMORY;
313                 }
314
315                 pending->pvfs = pvfs;
316                 pending->lck = lck;
317                 pending->f = f;
318                 pending->req = req;
319
320                 pending->end_time = 
321                         timeval_current_ofs(lck->lockx.in.timeout/1000,
322                                             1000*(lck->lockx.in.timeout%1000));
323         }
324
325         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
326                 rw = pending? PENDING_READ_LOCK : READ_LOCK;
327         } else {
328                 rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
329         }
330
331         if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
332                 talloc_free(pending);
333                 return pvfs_lock_cancel(pvfs, req, lck, f);
334         }
335
336         if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
337                 /* this seems to not be supported by any windows server,
338                    or used by any clients */
339                 talloc_free(pending);
340                 return NT_STATUS_DOS(ERRDOS, ERRnoatomiclocks);
341         }
342
343         /* the unlocks happen first */
344         locks = lck->lockx.in.locks;
345
346         for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
347                 status = brl_unlock(pvfs->brl_context,
348                                     f->brl_handle,
349                                     locks[i].pid,
350                                     locks[i].offset,
351                                     locks[i].count);
352                 if (!NT_STATUS_IS_OK(status)) {
353                         talloc_free(pending);
354                         return status;
355                 }
356                 f->lock_count--;
357         }
358
359         locks += i;
360
361         for (i=0;i<lck->lockx.in.lock_cnt;i++) {
362                 if (pending) {
363                         pending->pending_lock = i;
364                 }
365
366                 status = brl_lock(pvfs->brl_context,
367                                   f->brl_handle,
368                                   locks[i].pid,
369                                   locks[i].offset,
370                                   locks[i].count,
371                                   rw, pending);
372                 if (!NT_STATUS_IS_OK(status)) {
373                         if (pending) {
374                                 /* a timed lock failed - setup a wait message to handle
375                                    the pending lock notification or a timeout */
376                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
377                                                                          pending->end_time,
378                                                                          pvfs_pending_lock_continue,
379                                                                          pending);
380                                 if (pending->wait_handle == NULL) {
381                                         talloc_free(pending);
382                                         return NT_STATUS_NO_MEMORY;
383                                 }
384                                 talloc_steal(pending, pending->wait_handle);
385                                 DLIST_ADD(f->pending_list, pending);
386                                 return NT_STATUS_OK;
387                         }
388
389                         /* undo the locks we just did */
390                         for (i--;i>=0;i--) {
391                                 brl_unlock(pvfs->brl_context,
392                                            f->brl_handle,
393                                            locks[i].pid,
394                                            locks[i].offset,
395                                            locks[i].count);
396                                 f->lock_count--;
397                         }
398                         talloc_free(pending);
399                         return status;
400                 }
401                 f->lock_count++;
402         }
403
404         talloc_free(pending);
405         return NT_STATUS_OK;
406 }