r3035: if the ntvfs layers prior to us have said that we can't perform an
[bbaumbach/samba-autobuild/.git] / source4 / ntvfs / posix / pvfs_lock.c
index 548c5bd82ce0db93180d75e39debafde907a05fe..e32fcb2e3a44e2b1ef64456081766b4c527aca9f 100644 (file)
@@ -45,7 +45,8 @@ NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
 }
 
 /* this state structure holds information about a lock we are waiting on */
-struct pending_state {
+struct pvfs_pending_lock {
+       struct pvfs_pending_lock *next, *prev;
        struct pvfs_state *pvfs;
        union smb_lock *lck;
        struct pvfs_file *f;
@@ -55,7 +56,6 @@ struct pending_state {
        time_t end_time;
 };
 
-
 /*
   a secondary attempt to setup a lock has failed - back out
   the locks we did get and send an error
@@ -89,7 +89,7 @@ static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
 */
 static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
 {
-       struct pending_state *pending = private;
+       struct pvfs_pending_lock *pending = private;
        struct pvfs_state *pvfs = pending->pvfs;
        struct pvfs_file *f = pending->f;
        struct smbsrv_request *req = pending->req;
@@ -107,6 +107,8 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
                rw = WRITE_LOCK;
        }
 
+       DLIST_REMOVE(f->pending_list, pending);
+
        status = brl_lock(pvfs->brl_context,
                          &f->locking_key,
                          req->smbpid,
@@ -130,8 +132,10 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
                if (timed_out) {
                        /* no more chances */
                        pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
+               } else {
+                       /* we can try again */
+                       DLIST_ADD(f->pending_list, pending);
                }
-               /* we can try again */
                return;
        }
 
@@ -148,7 +152,7 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
 
        /* we've now got the pending lock. try and get the rest, which might
           lead to more pending locks */
-       for (i=pending->pending_lock;i<lck->lockx.in.lock_cnt;i++) {            
+       for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {          
                if (pending) {
                        pending->pending_lock = i;
                }
@@ -170,6 +174,8 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
                                                                         pending);
                                if (pending->wait_handle == NULL) {
                                        pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
+                               } else {
+                                       DLIST_ADD(f->pending_list, pending);
                                }
                                return;
                        }
@@ -178,19 +184,72 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
                }
        }
 
-       brl_unlock(pvfs->brl_context,
-                  &f->locking_key,
-                  req->smbpid,
-                  f->fnum,
-                  lck->lock.in.offset,
-                  lck->lock.in.count);
-
        /* we've managed to get all the locks. Tell the client */
        req->async.status = NT_STATUS_OK;
        req->async.send_fn(req);
 }
 
 
+/*
+  called when we close a file that might have pending locks
+*/
+void pvfs_lock_close_pending(struct pvfs_state *pvfs, struct pvfs_file *f)
+{
+       struct pvfs_pending_lock *p, *next;
+       NTSTATUS status;
+
+       for (p=f->pending_list;p;p=next) {
+               next = p->next;
+               DLIST_REMOVE(f->pending_list, p);
+               status = brl_remove_pending(pvfs->brl_context, &f->locking_key, p);
+               if (!NT_STATUS_IS_OK(status)) {
+                       DEBUG(0,("pvfs_lock_close_pending: failed to remove pending lock - %s\n", 
+                                nt_errstr(status)));
+               }
+               talloc_free(p->wait_handle);
+               p->req->async.status = NT_STATUS_RANGE_NOT_LOCKED;
+               p->req->async.send_fn(p->req);
+       }
+
+}
+
+
+/*
+  cancel a set of locks
+*/
+static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct smbsrv_request *req, union smb_lock *lck,
+                                struct pvfs_file *f)
+{
+       struct pvfs_pending_lock *p;
+
+       for (p=f->pending_list;p;p=p->next) {
+               /* check if the lock request matches exactly - you can only cancel with exact matches */
+               if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
+                   p->lck->lockx.in.lock_cnt  == lck->lockx.in.lock_cnt &&
+                   p->lck->lockx.in.fnum      == lck->lockx.in.fnum &&
+                   p->lck->lockx.in.mode      == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
+                       int i;
+
+                       for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
+                               if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
+                                   p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
+                                   p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
+                                       break;
+                               }
+                       }
+                       if (i < lck->lockx.in.ulock_cnt) continue;
+
+                       /* an exact match! we can cancel it, which is equivalent
+                          to triggering the timeout early */
+                       pvfs_pending_lock_continue(p ,True);
+                       return NT_STATUS_OK;
+               }
+       }
+
+       return NT_STATUS_UNSUCCESSFUL;
+}
+
+
 /*
   lock or unlock a byte range
 */
@@ -202,7 +261,7 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
        struct smb_lock_entry *locks;
        int i;
        enum brl_type rw;
-       struct pending_state *pending = NULL;
+       struct pvfs_pending_lock *pending = NULL;
 
        f = pvfs_find_fd(pvfs, req, lck->generic.in.fnum);
        if (!f) {
@@ -236,8 +295,9 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
        }
 
        /* now the lockingX case, most common and also most complex */
-       if (lck->lockx.in.timeout != 0) {
-               pending = talloc_p(req, struct pending_state);
+       if (lck->lockx.in.timeout != 0 && 
+           req->async.send_fn) {
+               pending = talloc_p(req, struct pvfs_pending_lock);
                if (pending == NULL) {
                        return NT_STATUS_NO_MEMORY;
                }
@@ -257,11 +317,18 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
                rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
        }
 
-       if (lck->lockx.in.mode & 
-           (LOCKING_ANDX_OPLOCK_RELEASE |
-            LOCKING_ANDX_CHANGE_LOCKTYPE |
-            LOCKING_ANDX_CANCEL_LOCK)) {
-               /* todo: need to add support for these */
+       if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
+               return pvfs_lock_cancel(pvfs, req, lck, f);
+       }
+
+       if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
+               /* this seems to not be supported by any windows server,
+                  or used by any clients */
+               return NT_STATUS_UNSUCCESSFUL;
+       }
+
+       if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
+               DEBUG(0,("received unexpected oplock break\n"));
                return NT_STATUS_NOT_IMPLEMENTED;
        }
 
@@ -309,6 +376,7 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
                                if (pending->wait_handle == NULL) {
                                        return NT_STATUS_NO_MEMORY;
                                }
+                               DLIST_ADD(f->pending_list, pending);
                                return NT_STATUS_OK;
                        }
                        /* undo the locks we just did */