r23792: convert Samba4 to GPLv3
[bbaumbach/samba-autobuild/.git] / source4 / ntvfs / posix / pvfs_lock.c
1 /* 
2    Unix SMB/CIFS implementation.
3
4    POSIX NTVFS backend - locking
5
6    Copyright (C) Andrew Tridgell 2004
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12    
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17    
18    You should have received a copy of the GNU General Public License
19    along with this program.  If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include "includes.h"
23 #include "vfs_posix.h"
24 #include "system/time.h"
25 #include "lib/util/dlinklist.h"
26 #include "messaging/messaging.h"
27
28
29 /*
30   check if we can perform IO on a range that might be locked
31 */
32 NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
33                          struct pvfs_file *f,
34                          uint16_t smbpid,
35                          uint64_t offset, uint64_t count,
36                          enum brl_type rw)
37 {
38         if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
39                 return NT_STATUS_OK;
40         }
41
42         return brl_locktest(pvfs->brl_context,
43                             f->brl_handle,
44                             smbpid,
45                             offset, count, rw);
46 }
47
48 /* this state structure holds information about a lock we are waiting on */
49 struct pvfs_pending_lock {
50         struct pvfs_pending_lock *next, *prev;
51         struct pvfs_state *pvfs;
52         union smb_lock *lck;
53         struct pvfs_file *f;
54         struct ntvfs_request *req;
55         int pending_lock;
56         void *wait_handle;
57         struct timeval end_time;
58 };
59
60 /*
61   a secondary attempt to setup a lock has failed - back out
62   the locks we did get and send an error
63 */
64 static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
65                                    struct ntvfs_request *req,
66                                    struct pvfs_file *f,
67                                    struct smb_lock_entry *locks,
68                                    int i,
69                                    NTSTATUS status)
70 {
71         /* in SMB2 mode we also try to unlock failing lock */ 
72         if (req->ctx->protocol != PROTOCOL_SMB2) {
73                 i--;
74         }
75
76         /* undo the locks we just did */
77         for (;i>=0;i--) {
78                 brl_unlock(pvfs->brl_context,
79                            f->brl_handle,
80                            locks[i].pid,
81                            locks[i].offset,
82                            locks[i].count);
83                 f->lock_count--;
84         }
85         req->async_states->status = status;
86         req->async_states->send_fn(req);
87 }
88
89
90 /*
91   called when we receive a pending lock notification. It means that
92   either our lock timed out or someone else has unlocked a overlapping
93   range, so we should try the lock again. Note that on timeout we
94   do retry the lock, giving it a last chance.
95 */
96 static void pvfs_pending_lock_continue(void *private, enum pvfs_wait_notice reason)
97 {
98         struct pvfs_pending_lock *pending = private;
99         struct pvfs_state *pvfs = pending->pvfs;
100         struct pvfs_file *f = pending->f;
101         struct ntvfs_request *req = pending->req;
102         union smb_lock *lck = pending->lck;
103         struct smb_lock_entry *locks;
104         enum brl_type rw;
105         NTSTATUS status;
106         int i;
107         BOOL timed_out;
108
109         timed_out = (reason != PVFS_WAIT_EVENT);
110
111         locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
112
113         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
114                 rw = READ_LOCK;
115         } else {
116                 rw = WRITE_LOCK;
117         }
118
119         DLIST_REMOVE(f->pending_list, pending);
120
121         /* we don't retry on a cancel */
122         if (reason == PVFS_WAIT_CANCEL) {
123                 status = NT_STATUS_FILE_LOCK_CONFLICT;
124         } else {
125                 /* 
126                  * here it's important to pass the pending pointer
127                  * because with this we'll get the correct error code
128                  * FILE_LOCK_CONFLICT in the error case
129                  */
130                 status = brl_lock(pvfs->brl_context,
131                                   f->brl_handle,
132                                   locks[pending->pending_lock].pid,
133                                   locks[pending->pending_lock].offset,
134                                   locks[pending->pending_lock].count,
135                                   rw, pending);
136         }
137         if (NT_STATUS_IS_OK(status)) {
138                 f->lock_count++;
139                 timed_out = False;
140         }
141
142         /* if we have failed and timed out, or succeeded, then we
143            don't need the pending lock any more */
144         if (NT_STATUS_IS_OK(status) || timed_out) {
145                 NTSTATUS status2;
146                 status2 = brl_remove_pending(pvfs->brl_context, 
147                                              f->brl_handle, pending);
148                 if (!NT_STATUS_IS_OK(status2)) {
149                         DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
150                 }
151                 talloc_free(pending->wait_handle);
152         }
153
154         if (!NT_STATUS_IS_OK(status)) {
155                 if (timed_out) {
156                         /* no more chances */
157                         pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
158                 } else {
159                         /* we can try again */
160                         DLIST_ADD(f->pending_list, pending);
161                 }
162                 return;
163         }
164
165         /* if we haven't timed out yet, then we can do more pending locks */
166         if (rw == READ_LOCK) {
167                 rw = PENDING_READ_LOCK;
168         } else {
169                 rw = PENDING_WRITE_LOCK;
170         }
171
172         /* we've now got the pending lock. try and get the rest, which might
173            lead to more pending locks */
174         for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {          
175                 if (pending) {
176                         pending->pending_lock = i;
177                 }
178
179                 status = brl_lock(pvfs->brl_context,
180                                   f->brl_handle,
181                                   locks[i].pid,
182                                   locks[i].offset,
183                                   locks[i].count,
184                                   rw, pending);
185                 if (!NT_STATUS_IS_OK(status)) {
186                         if (pending) {
187                                 /* a timed lock failed - setup a wait message to handle
188                                    the pending lock notification or a timeout */
189                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
190                                                                          pending->end_time,
191                                                                          pvfs_pending_lock_continue,
192                                                                          pending);
193                                 if (pending->wait_handle == NULL) {
194                                         pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
195                                 } else {
196                                         talloc_steal(pending, pending->wait_handle);
197                                         DLIST_ADD(f->pending_list, pending);
198                                 }
199                                 return;
200                         }
201                         pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
202                         return;
203                 }
204
205                 f->lock_count++;
206         }
207
208         /* we've managed to get all the locks. Tell the client */
209         req->async_states->status = NT_STATUS_OK;
210         req->async_states->send_fn(req);
211 }
212
213
214 /*
215   called when we close a file that might have locks
216 */
217 void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
218 {
219         struct pvfs_pending_lock *p, *next;
220
221         if (f->lock_count || f->pending_list) {
222                 DEBUG(5,("pvfs_lock: removing %.0f locks on close\n", 
223                          (double)f->lock_count));
224                 brl_close(f->pvfs->brl_context, f->brl_handle);
225                 f->lock_count = 0;
226         }
227
228         /* reply to all the pending lock requests, telling them the 
229            lock failed */
230         for (p=f->pending_list;p;p=next) {
231                 next = p->next;
232                 DLIST_REMOVE(f->pending_list, p);
233                 p->req->async_states->status = NT_STATUS_RANGE_NOT_LOCKED;
234                 p->req->async_states->send_fn(p->req);
235         }
236 }
237
238
239 /*
240   cancel a set of locks
241 */
242 static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct ntvfs_request *req, union smb_lock *lck,
243                                  struct pvfs_file *f)
244 {
245         struct pvfs_pending_lock *p;
246
247         for (p=f->pending_list;p;p=p->next) {
248                 /* check if the lock request matches exactly - you can only cancel with exact matches */
249                 if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
250                     p->lck->lockx.in.lock_cnt  == lck->lockx.in.lock_cnt &&
251                     p->lck->lockx.in.file.ntvfs== lck->lockx.in.file.ntvfs &&
252                     p->lck->lockx.in.mode      == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
253                         int i;
254
255                         for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
256                                 if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
257                                     p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
258                                     p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
259                                         break;
260                                 }
261                         }
262                         if (i < lck->lockx.in.ulock_cnt) continue;
263
264                         /* an exact match! we can cancel it, which is equivalent
265                            to triggering the timeout early */
266                         pvfs_pending_lock_continue(p, PVFS_WAIT_TIMEOUT);
267                         return NT_STATUS_OK;
268                 }
269         }
270
271         return NT_STATUS_DOS(ERRDOS, ERRcancelviolation);
272 }
273
274
275 /*
276   lock or unlock a byte range
277 */
278 NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
279                    struct ntvfs_request *req, union smb_lock *lck)
280 {
281         struct pvfs_state *pvfs = ntvfs->private_data;
282         struct pvfs_file *f;
283         struct smb_lock_entry *locks;
284         int i;
285         enum brl_type rw;
286         struct pvfs_pending_lock *pending = NULL;
287         NTSTATUS status;
288
289         if (lck->generic.level != RAW_LOCK_GENERIC) {
290                 return ntvfs_map_lock(ntvfs, req, lck);
291         }
292
293         f = pvfs_find_fd(pvfs, req, lck->lockx.in.file.ntvfs);
294         if (!f) {
295                 return NT_STATUS_INVALID_HANDLE;
296         }
297
298         if (f->handle->fd == -1) {
299                 return NT_STATUS_FILE_IS_A_DIRECTORY;
300         }
301
302         if (lck->lockx.in.timeout != 0 && 
303             (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
304                 pending = talloc(f, struct pvfs_pending_lock);
305                 if (pending == NULL) {
306                         return NT_STATUS_NO_MEMORY;
307                 }
308
309                 pending->pvfs = pvfs;
310                 pending->lck = lck;
311                 pending->f = f;
312                 pending->req = req;
313
314                 pending->end_time = 
315                         timeval_current_ofs(lck->lockx.in.timeout/1000,
316                                             1000*(lck->lockx.in.timeout%1000));
317         }
318
319         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
320                 rw = pending? PENDING_READ_LOCK : READ_LOCK;
321         } else {
322                 rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
323         }
324
325         if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
326                 return pvfs_lock_cancel(pvfs, req, lck, f);
327         }
328
329         if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
330                 /* this seems to not be supported by any windows server,
331                    or used by any clients */
332                 return NT_STATUS_DOS(ERRDOS, ERRnoatomiclocks);
333         }
334
335         if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
336                 DEBUG(0,("received unexpected oplock break\n"));
337                 return NT_STATUS_NOT_IMPLEMENTED;
338         }
339
340
341         /* the unlocks happen first */
342         locks = lck->lockx.in.locks;
343
344         for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
345                 status = brl_unlock(pvfs->brl_context,
346                                     f->brl_handle,
347                                     locks[i].pid,
348                                     locks[i].offset,
349                                     locks[i].count);
350                 if (!NT_STATUS_IS_OK(status)) {
351                         return status;
352                 }
353                 f->lock_count--;
354         }
355
356         locks += i;
357
358         for (i=0;i<lck->lockx.in.lock_cnt;i++) {
359                 if (pending) {
360                         pending->pending_lock = i;
361                 }
362
363                 status = brl_lock(pvfs->brl_context,
364                                   f->brl_handle,
365                                   locks[i].pid,
366                                   locks[i].offset,
367                                   locks[i].count,
368                                   rw, pending);
369                 if (!NT_STATUS_IS_OK(status)) {
370                         if (pending) {
371                                 /* a timed lock failed - setup a wait message to handle
372                                    the pending lock notification or a timeout */
373                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
374                                                                          pending->end_time,
375                                                                          pvfs_pending_lock_continue,
376                                                                          pending);
377                                 if (pending->wait_handle == NULL) {
378                                         return NT_STATUS_NO_MEMORY;
379                                 }
380                                 talloc_steal(pending, pending->wait_handle);
381                                 DLIST_ADD(f->pending_list, pending);
382                                 return NT_STATUS_OK;
383                         }
384                         /* in SMB2 mode we also try to unlock failing lock */ 
385                         if (req->ctx->protocol != PROTOCOL_SMB2) {
386                                 i--;
387                         }
388                         /* undo the locks we just did */
389                         for (;i>=0;i--) {
390                                 brl_unlock(pvfs->brl_context,
391                                            f->brl_handle,
392                                            locks[i].pid,
393                                            locks[i].offset,
394                                            locks[i].count);
395                                 f->lock_count--;
396                         }
397                         return status;
398                 }
399                 f->lock_count++;
400         }
401
402         return NT_STATUS_OK;
403 }