dns: Use new DNS debugclass in DNS server
[kai/samba.git] / source4 / ntvfs / posix / pvfs_lock.c
1 /* 
2    Unix SMB/CIFS implementation.
3
4    POSIX NTVFS backend - locking
5
6    Copyright (C) Andrew Tridgell 2004
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12    
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17    
18    You should have received a copy of the GNU General Public License
19    along with this program.  If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include "includes.h"
23 #include "vfs_posix.h"
24 #include "system/time.h"
25 #include "../lib/util/dlinklist.h"
26 #include "messaging/messaging.h"
27
28
29 /*
30   check if we can perform IO on a range that might be locked
31 */
32 NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
33                          struct pvfs_file *f,
34                          uint32_t smbpid,
35                          uint64_t offset, uint64_t count,
36                          enum brl_type rw)
37 {
38         if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
39                 return NT_STATUS_OK;
40         }
41
42         return brlock_locktest(pvfs->brl_context,
43                             f->brl_handle,
44                             smbpid,
45                             offset, count, rw);
46 }
47
48 /* this state structure holds information about a lock we are waiting on */
49 struct pvfs_pending_lock {
50         struct pvfs_pending_lock *next, *prev;
51         struct pvfs_state *pvfs;
52         union smb_lock *lck;
53         struct pvfs_file *f;
54         struct ntvfs_request *req;
55         int pending_lock;
56         struct pvfs_wait *wait_handle;
57         struct timeval end_time;
58 };
59
60 /*
61   a secondary attempt to setup a lock has failed - back out
62   the locks we did get and send an error
63 */
64 static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
65                                    struct ntvfs_request *req,
66                                    struct pvfs_file *f,
67                                    struct smb_lock_entry *locks,
68                                    int i,
69                                    NTSTATUS status)
70 {
71         /* undo the locks we just did */
72         for (i--;i>=0;i--) {
73                 brlock_unlock(pvfs->brl_context,
74                            f->brl_handle,
75                            locks[i].pid,
76                            locks[i].offset,
77                            locks[i].count);
78                 f->lock_count--;
79         }
80         req->async_states->status = status;
81         req->async_states->send_fn(req);
82 }
83
84
85 /*
86   called when we receive a pending lock notification. It means that
87   either our lock timed out or someone else has unlocked a overlapping
88   range, so we should try the lock again. Note that on timeout we
89   do retry the lock, giving it a last chance.
90 */
91 static void pvfs_pending_lock_continue(void *private_data, enum pvfs_wait_notice reason)
92 {
93         struct pvfs_pending_lock *pending = talloc_get_type(private_data,
94                                             struct pvfs_pending_lock);
95         struct pvfs_state *pvfs = pending->pvfs;
96         struct pvfs_file *f = pending->f;
97         struct ntvfs_request *req = pending->req;
98         union smb_lock *lck = pending->lck;
99         struct smb_lock_entry *locks;
100         enum brl_type rw;
101         NTSTATUS status;
102         int i;
103         bool timed_out;
104
105         timed_out = (reason != PVFS_WAIT_EVENT);
106
107         locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
108
109         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
110                 rw = READ_LOCK;
111         } else {
112                 rw = WRITE_LOCK;
113         }
114
115         DLIST_REMOVE(f->pending_list, pending);
116
117         /* we don't retry on a cancel */
118         if (reason == PVFS_WAIT_CANCEL) {
119                 if (pvfs->ntvfs->ctx->protocol < PROTOCOL_SMB2_02) {
120                         status = NT_STATUS_FILE_LOCK_CONFLICT;
121                 } else {
122                         status = NT_STATUS_CANCELLED;
123                 }
124         } else {
125                 /* 
126                  * here it's important to pass the pending pointer
127                  * because with this we'll get the correct error code
128                  * FILE_LOCK_CONFLICT in the error case
129                  */
130                 status = brlock_lock(pvfs->brl_context,
131                                   f->brl_handle,
132                                   locks[pending->pending_lock].pid,
133                                   locks[pending->pending_lock].offset,
134                                   locks[pending->pending_lock].count,
135                                   rw, pending);
136         }
137         if (NT_STATUS_IS_OK(status)) {
138                 f->lock_count++;
139                 timed_out = false;
140         }
141
142         /* if we have failed and timed out, or succeeded, then we
143            don't need the pending lock any more */
144         if (NT_STATUS_IS_OK(status) || timed_out) {
145                 NTSTATUS status2;
146                 status2 = brlock_remove_pending(pvfs->brl_context, 
147                                              f->brl_handle, pending);
148                 if (!NT_STATUS_IS_OK(status2)) {
149                         DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
150                 }
151                 talloc_free(pending->wait_handle);
152         }
153
154         if (!NT_STATUS_IS_OK(status)) {
155                 if (timed_out) {
156                         /* no more chances */
157                         pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
158                         talloc_free(pending);
159                 } else {
160                         /* we can try again */
161                         DLIST_ADD(f->pending_list, pending);
162                 }
163                 return;
164         }
165
166         /* if we haven't timed out yet, then we can do more pending locks */
167         if (rw == READ_LOCK) {
168                 rw = PENDING_READ_LOCK;
169         } else {
170                 rw = PENDING_WRITE_LOCK;
171         }
172
173         /* we've now got the pending lock. try and get the rest, which might
174            lead to more pending locks */
175         for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {          
176                 if (pending) {
177                         pending->pending_lock = i;
178                 }
179
180                 status = brlock_lock(pvfs->brl_context,
181                                   f->brl_handle,
182                                   locks[i].pid,
183                                   locks[i].offset,
184                                   locks[i].count,
185                                   rw, pending);
186                 if (!NT_STATUS_IS_OK(status)) {
187                         if (pending) {
188                                 /* a timed lock failed - setup a wait message to handle
189                                    the pending lock notification or a timeout */
190                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
191                                                                          pending->end_time,
192                                                                          pvfs_pending_lock_continue,
193                                                                          pending);
194                                 if (pending->wait_handle == NULL) {
195                                         pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
196                                         talloc_free(pending);
197                                 } else {
198                                         talloc_steal(pending, pending->wait_handle);
199                                         DLIST_ADD(f->pending_list, pending);
200                                 }
201                                 return;
202                         }
203                         pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
204                         talloc_free(pending);
205                         return;
206                 }
207
208                 f->lock_count++;
209         }
210
211         /* we've managed to get all the locks. Tell the client */
212         req->async_states->status = NT_STATUS_OK;
213         req->async_states->send_fn(req);
214         talloc_free(pending);
215 }
216
217
218 /*
219   called when we close a file that might have locks
220 */
221 void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
222 {
223         struct pvfs_pending_lock *p, *next;
224
225         if (f->lock_count || f->pending_list) {
226                 DEBUG(5,("pvfs_lock: removing %.0f locks on close\n", 
227                          (double)f->lock_count));
228                 brlock_close(f->pvfs->brl_context, f->brl_handle);
229                 f->lock_count = 0;
230         }
231
232         /* reply to all the pending lock requests, telling them the 
233            lock failed */
234         for (p=f->pending_list;p;p=next) {
235                 next = p->next;
236                 DLIST_REMOVE(f->pending_list, p);
237                 p->req->async_states->status = NT_STATUS_RANGE_NOT_LOCKED;
238                 p->req->async_states->send_fn(p->req);
239         }
240 }
241
242
243 /*
244   cancel a set of locks
245 */
246 static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct ntvfs_request *req, union smb_lock *lck,
247                                  struct pvfs_file *f)
248 {
249         struct pvfs_pending_lock *p;
250
251         for (p=f->pending_list;p;p=p->next) {
252                 /* check if the lock request matches exactly - you can only cancel with exact matches */
253                 if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
254                     p->lck->lockx.in.lock_cnt  == lck->lockx.in.lock_cnt &&
255                     p->lck->lockx.in.file.ntvfs== lck->lockx.in.file.ntvfs &&
256                     p->lck->lockx.in.mode      == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
257                         int i;
258
259                         for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
260                                 if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
261                                     p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
262                                     p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
263                                         break;
264                                 }
265                         }
266                         if (i < lck->lockx.in.ulock_cnt) continue;
267
268                         /* an exact match! we can cancel it, which is equivalent
269                            to triggering the timeout early */
270                         pvfs_pending_lock_continue(p, PVFS_WAIT_TIMEOUT);
271                         return NT_STATUS_OK;
272                 }
273         }
274
275         return NT_STATUS_DOS(ERRDOS, ERRcancelviolation);
276 }
277
278
279 /*
280   lock or unlock a byte range
281 */
282 NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
283                    struct ntvfs_request *req, union smb_lock *lck)
284 {
285         struct pvfs_state *pvfs = talloc_get_type(ntvfs->private_data,
286                                   struct pvfs_state);
287         struct pvfs_file *f;
288         struct smb_lock_entry *locks;
289         int i;
290         enum brl_type rw;
291         struct pvfs_pending_lock *pending = NULL;
292         NTSTATUS status;
293
294         if (lck->generic.level != RAW_LOCK_GENERIC) {
295                 return ntvfs_map_lock(ntvfs, req, lck);
296         }
297
298         if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
299                 return pvfs_oplock_release(ntvfs, req, lck);
300         }
301
302         f = pvfs_find_fd(pvfs, req, lck->lockx.in.file.ntvfs);
303         if (!f) {
304                 return NT_STATUS_INVALID_HANDLE;
305         }
306
307         if (f->handle->fd == -1) {
308                 return NT_STATUS_FILE_IS_A_DIRECTORY;
309         }
310
311         status = pvfs_break_level2_oplocks(f);
312         NT_STATUS_NOT_OK_RETURN(status);
313
314         if (lck->lockx.in.timeout != 0 && 
315             (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
316                 pending = talloc(f, struct pvfs_pending_lock);
317                 if (pending == NULL) {
318                         return NT_STATUS_NO_MEMORY;
319                 }
320
321                 pending->pvfs = pvfs;
322                 pending->lck = lck;
323                 pending->f = f;
324                 pending->req = req;
325
326                 pending->end_time = 
327                         timeval_current_ofs_msec(lck->lockx.in.timeout);
328         }
329
330         if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
331                 rw = pending? PENDING_READ_LOCK : READ_LOCK;
332         } else {
333                 rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
334         }
335
336         if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
337                 talloc_free(pending);
338                 return pvfs_lock_cancel(pvfs, req, lck, f);
339         }
340
341         if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
342                 /* this seems to not be supported by any windows server,
343                    or used by any clients */
344                 talloc_free(pending);
345                 return NT_STATUS_DOS(ERRDOS, ERRnoatomiclocks);
346         }
347
348         /* the unlocks happen first */
349         locks = lck->lockx.in.locks;
350
351         for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
352                 status = brlock_unlock(pvfs->brl_context,
353                                     f->brl_handle,
354                                     locks[i].pid,
355                                     locks[i].offset,
356                                     locks[i].count);
357                 if (!NT_STATUS_IS_OK(status)) {
358                         talloc_free(pending);
359                         return status;
360                 }
361                 f->lock_count--;
362         }
363
364         locks += i;
365
366         for (i=0;i<lck->lockx.in.lock_cnt;i++) {
367                 if (pending) {
368                         pending->pending_lock = i;
369                 }
370
371                 status = brlock_lock(pvfs->brl_context,
372                                   f->brl_handle,
373                                   locks[i].pid,
374                                   locks[i].offset,
375                                   locks[i].count,
376                                   rw, pending);
377                 if (!NT_STATUS_IS_OK(status)) {
378                         if (pending) {
379                                 /* a timed lock failed - setup a wait message to handle
380                                    the pending lock notification or a timeout */
381                                 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY, 
382                                                                          pending->end_time,
383                                                                          pvfs_pending_lock_continue,
384                                                                          pending);
385                                 if (pending->wait_handle == NULL) {
386                                         talloc_free(pending);
387                                         return NT_STATUS_NO_MEMORY;
388                                 }
389                                 talloc_steal(pending, pending->wait_handle);
390                                 DLIST_ADD(f->pending_list, pending);
391                                 return NT_STATUS_OK;
392                         }
393
394                         /* undo the locks we just did */
395                         for (i--;i>=0;i--) {
396                                 brlock_unlock(pvfs->brl_context,
397                                            f->brl_handle,
398                                            locks[i].pid,
399                                            locks[i].offset,
400                                            locks[i].count);
401                                 f->lock_count--;
402                         }
403                         talloc_free(pending);
404                         return status;
405                 }
406                 f->lock_count++;
407         }
408
409         talloc_free(pending);
410         return NT_STATUS_OK;
411 }