2 Unix SMB/CIFS implementation.
4 POSIX NTVFS backend - locking
6 Copyright (C) Andrew Tridgell 2004
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "include/includes.h"
24 #include "vfs_posix.h"
28 check if we can perform IO on a range that might be locked
30 NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
33 uint64_t offset, uint64_t count,
36 if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
40 return brl_locktest(pvfs->brl_context,
47 /* this state structure holds information about a lock we are waiting on */
48 struct pvfs_pending_lock {
49 struct pvfs_pending_lock *next, *prev;
50 struct pvfs_state *pvfs;
53 struct smbsrv_request *req;
60 a secondary attempt to setup a lock has failed - back out
61 the locks we did get and send an error
63 static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
64 struct smbsrv_request *req,
66 struct smb_lock_entry *locks,
70 /* undo the locks we just did */
71 for (i=i-1;i>=0;i--) {
72 brl_unlock(pvfs->brl_context,
80 req->async_states->status = status;
81 req->async_states->send_fn(req);
86 called when we receive a pending lock notification. It means that
87 either our lock timed out or somoene else has unlocked a overlapping
88 range, so we should try the lock again. Note that on timeout we
89 do retry the lock, giving it a last chance.
91 static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
93 struct pvfs_pending_lock *pending = private;
94 struct pvfs_state *pvfs = pending->pvfs;
95 struct pvfs_file *f = pending->f;
96 struct smbsrv_request *req = pending->req;
97 union smb_lock *lck = pending->lck;
98 struct smb_lock_entry *locks;
103 locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
105 if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
111 DLIST_REMOVE(f->pending_list, pending);
113 status = brl_lock(pvfs->brl_context,
117 locks[pending->pending_lock].offset,
118 locks[pending->pending_lock].count,
121 if (NT_STATUS_IS_OK(status)) {
125 /* if we have failed and timed out, or succeeded, then we
126 don't need the pending lock any more */
127 if (NT_STATUS_IS_OK(status) || timed_out) {
129 status2 = brl_remove_pending(pvfs->brl_context, &f->locking_key, pending);
130 if (!NT_STATUS_IS_OK(status2)) {
131 DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
133 talloc_free(pending->wait_handle);
136 if (!NT_STATUS_IS_OK(status)) {
138 /* no more chances */
139 pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
141 /* we can try again */
142 DLIST_ADD(f->pending_list, pending);
147 /* if we haven't timed out yet, then we can do more pending locks */
151 if (rw == READ_LOCK) {
152 rw = PENDING_READ_LOCK;
154 rw = PENDING_WRITE_LOCK;
158 /* we've now got the pending lock. try and get the rest, which might
159 lead to more pending locks */
160 for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {
162 pending->pending_lock = i;
165 status = brl_lock(pvfs->brl_context,
172 if (!NT_STATUS_IS_OK(status)) {
174 /* a timed lock failed - setup a wait message to handle
175 the pending lock notification or a timeout */
176 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
178 pvfs_pending_lock_continue,
180 if (pending->wait_handle == NULL) {
181 pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
183 DLIST_ADD(f->pending_list, pending);
187 pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
194 /* we've managed to get all the locks. Tell the client */
195 req->async_states->status = NT_STATUS_OK;
196 req->async_states->send_fn(req);
201 called when we close a file that might have locks
203 void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
205 struct pvfs_pending_lock *p, *next;
207 if (f->lock_count || f->pending_list) {
208 DEBUG(5,("pvfs_lock: removing %.0f locks on close\n",
209 (double)f->lock_count));
210 brl_close(f->pvfs->brl_context, &f->locking_key, f->fnum);
214 /* reply to all the pending lock requests, telling them the
216 for (p=f->pending_list;p;p=next) {
218 DLIST_REMOVE(f->pending_list, p);
219 talloc_free(p->wait_handle);
220 p->req->async_states->status = NT_STATUS_RANGE_NOT_LOCKED;
221 p->req->async_states->send_fn(p->req);
227 cancel a set of locks
229 static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct smbsrv_request *req, union smb_lock *lck,
232 struct pvfs_pending_lock *p;
234 for (p=f->pending_list;p;p=p->next) {
235 /* check if the lock request matches exactly - you can only cancel with exact matches */
236 if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
237 p->lck->lockx.in.lock_cnt == lck->lockx.in.lock_cnt &&
238 p->lck->lockx.in.fnum == lck->lockx.in.fnum &&
239 p->lck->lockx.in.mode == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
242 for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
243 if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
244 p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
245 p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
249 if (i < lck->lockx.in.ulock_cnt) continue;
251 /* an exact match! we can cancel it, which is equivalent
252 to triggering the timeout early */
253 pvfs_pending_lock_continue(p ,True);
258 return NT_STATUS_UNSUCCESSFUL;
263 lock or unlock a byte range
265 NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
266 struct smbsrv_request *req, union smb_lock *lck)
268 struct pvfs_state *pvfs = ntvfs->private_data;
270 struct smb_lock_entry *locks;
273 struct pvfs_pending_lock *pending = NULL;
276 if (lck->generic.level != RAW_LOCK_GENERIC) {
277 return ntvfs_map_lock(req, lck, ntvfs);
280 f = pvfs_find_fd(pvfs, req, lck->lockx.in.fnum);
282 return NT_STATUS_INVALID_HANDLE;
285 if (f->name->dos.attrib & FILE_ATTRIBUTE_DIRECTORY) {
286 return NT_STATUS_FILE_IS_A_DIRECTORY;
289 if (lck->lockx.in.timeout != 0 &&
290 (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
291 pending = talloc_p(req, struct pvfs_pending_lock);
292 if (pending == NULL) {
293 return NT_STATUS_NO_MEMORY;
296 pending->pvfs = pvfs;
301 /* round up to the nearest second */
302 pending->end_time = time(NULL) + ((lck->lockx.in.timeout+999)/1000);
305 if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
306 rw = pending? PENDING_READ_LOCK : READ_LOCK;
308 rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
311 if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
312 return pvfs_lock_cancel(pvfs, req, lck, f);
315 if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
316 /* this seems to not be supported by any windows server,
317 or used by any clients */
318 return NT_STATUS_UNSUCCESSFUL;
321 if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
322 DEBUG(0,("received unexpected oplock break\n"));
323 return NT_STATUS_NOT_IMPLEMENTED;
327 /* the unlocks happen first */
328 locks = lck->lockx.in.locks;
330 for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
331 status = brl_unlock(pvfs->brl_context,
337 if (!NT_STATUS_IS_OK(status)) {
345 for (i=0;i<lck->lockx.in.lock_cnt;i++) {
347 pending->pending_lock = i;
350 status = brl_lock(pvfs->brl_context,
357 if (!NT_STATUS_IS_OK(status)) {
359 /* a timed lock failed - setup a wait message to handle
360 the pending lock notification or a timeout */
361 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
363 pvfs_pending_lock_continue,
365 if (pending->wait_handle == NULL) {
366 return NT_STATUS_NO_MEMORY;
368 DLIST_ADD(f->pending_list, pending);
371 /* undo the locks we just did */
372 for (i=i-1;i>=0;i--) {
373 brl_unlock(pvfs->brl_context,