don't emulate broken SMB2 locking behaviour from windows
[kai/samba.git] / source / ntvfs / posix / pvfs_lock.c
index e32fcb2e3a44e2b1ef64456081766b4c527aca9f..822b28246ad6f5a7a36cb8c490b78db7f0ccd26a 100644 (file)
@@ -7,7 +7,7 @@
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
-   the Free Software Foundation; either version 2 of the License, or
+   the Free Software Foundation; either version 3 of the License, or
    (at your option) any later version.
    
    This program is distributed in the hope that it will be useful,
    GNU General Public License for more details.
    
    You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software
-   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
 
-#include "include/includes.h"
+#include "includes.h"
 #include "vfs_posix.h"
+#include "system/time.h"
+#include "lib/util/dlinklist.h"
+#include "messaging/messaging.h"
 
 
 /*
@@ -38,8 +40,7 @@ NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
        }
 
        return brl_locktest(pvfs->brl_context,
-                           &f->locking_key,
-                           f->fnum,
+                           f->brl_handle,
                            smbpid,
                            offset, count, rw);
 }
@@ -50,10 +51,10 @@ struct pvfs_pending_lock {
        struct pvfs_state *pvfs;
        union smb_lock *lck;
        struct pvfs_file *f;
-       struct smbsrv_request *req;
+       struct ntvfs_request *req;
        int pending_lock;
-       void *wait_handle;
-       time_t end_time;
+       struct pvfs_wait *wait_handle;
+       struct timeval end_time;
 };
 
 /*
@@ -61,43 +62,46 @@ struct pvfs_pending_lock {
   the locks we did get and send an error
 */
 static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
-                                  struct smbsrv_request *req,
+                                  struct ntvfs_request *req,
                                   struct pvfs_file *f,
                                   struct smb_lock_entry *locks,
                                   int i,
                                   NTSTATUS status)
 {
        /* undo the locks we just did */
-       for (i=i-1;i>=0;i--) {
+       for (i--;i>=0;i--) {
                brl_unlock(pvfs->brl_context,
-                          &f->locking_key,
+                          f->brl_handle,
                           locks[i].pid,
-                          f->fnum,
                           locks[i].offset,
                           locks[i].count);
+               f->lock_count--;
        }
-       req->async.status = status;
-       req->async.send_fn(req);
+       req->async_states->status = status;
+       req->async_states->send_fn(req);
 }
 
 
 /*
   called when we receive a pending lock notification. It means that
-  either our lock timed out or somoene else has unlocked a overlapping
+  either our lock timed out or someone else has unlocked a overlapping
   range, so we should try the lock again. Note that on timeout we
   do retry the lock, giving it a last chance.
 */
-static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
+static void pvfs_pending_lock_continue(void *private, enum pvfs_wait_notice reason)
 {
        struct pvfs_pending_lock *pending = private;
        struct pvfs_state *pvfs = pending->pvfs;
        struct pvfs_file *f = pending->f;
-       struct smbsrv_request *req = pending->req;
+       struct ntvfs_request *req = pending->req;
        union smb_lock *lck = pending->lck;
        struct smb_lock_entry *locks;
        enum brl_type rw;
        NTSTATUS status;
        int i;
+       bool timed_out;
+
+       timed_out = (reason != PVFS_WAIT_EVENT);
 
        locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
 
@@ -109,19 +113,33 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
 
        DLIST_REMOVE(f->pending_list, pending);
 
-       status = brl_lock(pvfs->brl_context,
-                         &f->locking_key,
-                         req->smbpid,
-                         f->fnum,
-                         locks[pending->pending_lock].offset,
-                         locks[pending->pending_lock].count,
-                         rw, NULL);
+       /* we don't retry on a cancel */
+       if (reason == PVFS_WAIT_CANCEL) {
+               status = NT_STATUS_FILE_LOCK_CONFLICT;
+       } else {
+               /* 
+                * here it's important to pass the pending pointer
+                * because with this we'll get the correct error code
+                * FILE_LOCK_CONFLICT in the error case
+                */
+               status = brl_lock(pvfs->brl_context,
+                                 f->brl_handle,
+                                 locks[pending->pending_lock].pid,
+                                 locks[pending->pending_lock].offset,
+                                 locks[pending->pending_lock].count,
+                                 rw, pending);
+       }
+       if (NT_STATUS_IS_OK(status)) {
+               f->lock_count++;
+               timed_out = false;
+       }
 
        /* if we have failed and timed out, or succeeded, then we
           don't need the pending lock any more */
        if (NT_STATUS_IS_OK(status) || timed_out) {
                NTSTATUS status2;
-               status2 = brl_remove_pending(pvfs->brl_context, &f->locking_key, pending);
+               status2 = brl_remove_pending(pvfs->brl_context, 
+                                            f->brl_handle, pending);
                if (!NT_STATUS_IS_OK(status2)) {
                        DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
                }
@@ -132,6 +150,7 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
                if (timed_out) {
                        /* no more chances */
                        pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
+                       talloc_free(pending);
                } else {
                        /* we can try again */
                        DLIST_ADD(f->pending_list, pending);
@@ -140,14 +159,10 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
        }
 
        /* if we haven't timed out yet, then we can do more pending locks */
-       if (timed_out) {
-               pending = NULL;
+       if (rw == READ_LOCK) {
+               rw = PENDING_READ_LOCK;
        } else {
-               if (rw == READ_LOCK) {
-                       rw = PENDING_READ_LOCK;
-               } else {
-                       rw = PENDING_WRITE_LOCK;
-               }
+               rw = PENDING_WRITE_LOCK;
        }
 
        /* we've now got the pending lock. try and get the rest, which might
@@ -158,9 +173,8 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
                }
 
                status = brl_lock(pvfs->brl_context,
-                                 &f->locking_key,
-                                 req->smbpid,
-                                 f->fnum,
+                                 f->brl_handle,
+                                 locks[i].pid,
                                  locks[i].offset,
                                  locks[i].count,
                                  rw, pending);
@@ -174,50 +188,57 @@ static void pvfs_pending_lock_continue(void *private, BOOL timed_out)
                                                                         pending);
                                if (pending->wait_handle == NULL) {
                                        pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
+                                       talloc_free(pending);
                                } else {
+                                       talloc_steal(pending, pending->wait_handle);
                                        DLIST_ADD(f->pending_list, pending);
                                }
                                return;
                        }
                        pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
+                       talloc_free(pending);
                        return;
                }
+
+               f->lock_count++;
        }
 
        /* we've managed to get all the locks. Tell the client */
-       req->async.status = NT_STATUS_OK;
-       req->async.send_fn(req);
+       req->async_states->status = NT_STATUS_OK;
+       req->async_states->send_fn(req);
+       talloc_free(pending);
 }
 
 
 /*
-  called when we close a file that might have pending locks
+  called when we close a file that might have locks
 */
-void pvfs_lock_close_pending(struct pvfs_state *pvfs, struct pvfs_file *f)
+void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
 {
        struct pvfs_pending_lock *p, *next;
-       NTSTATUS status;
 
+       if (f->lock_count || f->pending_list) {
+               DEBUG(5,("pvfs_lock: removing %.0f locks on close\n", 
+                        (double)f->lock_count));
+               brl_close(f->pvfs->brl_context, f->brl_handle);
+               f->lock_count = 0;
+       }
+
+       /* reply to all the pending lock requests, telling them the 
+          lock failed */
        for (p=f->pending_list;p;p=next) {
                next = p->next;
                DLIST_REMOVE(f->pending_list, p);
-               status = brl_remove_pending(pvfs->brl_context, &f->locking_key, p);
-               if (!NT_STATUS_IS_OK(status)) {
-                       DEBUG(0,("pvfs_lock_close_pending: failed to remove pending lock - %s\n", 
-                                nt_errstr(status)));
-               }
-               talloc_free(p->wait_handle);
-               p->req->async.status = NT_STATUS_RANGE_NOT_LOCKED;
-               p->req->async.send_fn(p->req);
+               p->req->async_states->status = NT_STATUS_RANGE_NOT_LOCKED;
+               p->req->async_states->send_fn(p->req);
        }
-
 }
 
 
 /*
   cancel a set of locks
 */
-static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct smbsrv_request *req, union smb_lock *lck,
+static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct ntvfs_request *req, union smb_lock *lck,
                                 struct pvfs_file *f)
 {
        struct pvfs_pending_lock *p;
@@ -226,7 +247,7 @@ static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct smbsrv_request
                /* check if the lock request matches exactly - you can only cancel with exact matches */
                if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
                    p->lck->lockx.in.lock_cnt  == lck->lockx.in.lock_cnt &&
-                   p->lck->lockx.in.fnum      == lck->lockx.in.fnum &&
+                   p->lck->lockx.in.file.ntvfs== lck->lockx.in.file.ntvfs &&
                    p->lck->lockx.in.mode      == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
                        int i;
 
@@ -241,12 +262,12 @@ static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct smbsrv_request
 
                        /* an exact match! we can cancel it, which is equivalent
                           to triggering the timeout early */
-                       pvfs_pending_lock_continue(p ,True);
+                       pvfs_pending_lock_continue(p, PVFS_WAIT_TIMEOUT);
                        return NT_STATUS_OK;
                }
        }
 
-       return NT_STATUS_UNSUCCESSFUL;
+       return NT_STATUS_DOS(ERRDOS, ERRcancelviolation);
 }
 
 
@@ -254,7 +275,7 @@ static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct smbsrv_request
   lock or unlock a byte range
 */
 NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
-                  struct smbsrv_request *req, union smb_lock *lck)
+                  struct ntvfs_request *req, union smb_lock *lck)
 {
        struct pvfs_state *pvfs = ntvfs->private_data;
        struct pvfs_file *f;
@@ -262,42 +283,31 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
        int i;
        enum brl_type rw;
        struct pvfs_pending_lock *pending = NULL;
+       NTSTATUS status;
+
+       if (lck->generic.level != RAW_LOCK_GENERIC) {
+               return ntvfs_map_lock(ntvfs, req, lck);
+       }
 
-       f = pvfs_find_fd(pvfs, req, lck->generic.in.fnum);
+       if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
+               return pvfs_oplock_release(ntvfs, req, lck);
+       }
+
+       f = pvfs_find_fd(pvfs, req, lck->lockx.in.file.ntvfs);
        if (!f) {
                return NT_STATUS_INVALID_HANDLE;
        }
 
-       switch (lck->generic.level) {
-       case RAW_LOCK_LOCK:
-               return brl_lock(pvfs->brl_context,
-                               &f->locking_key,
-                               req->smbpid,
-                               f->fnum,
-                               lck->lock.in.offset,
-                               lck->lock.in.count,
-                               WRITE_LOCK, NULL);
-                               
-       case RAW_LOCK_UNLOCK:
-               return brl_unlock(pvfs->brl_context,
-                                 &f->locking_key,
-                                 req->smbpid,
-                                 f->fnum,
-                                 lck->lock.in.offset,
-                                 lck->lock.in.count);
-
-       case RAW_LOCK_GENERIC:
-               return NT_STATUS_INVALID_LEVEL;
-
-       case RAW_LOCK_LOCKX:
-               /* fall through to the most complex case */
-               break;
+       if (f->handle->fd == -1) {
+               return NT_STATUS_FILE_IS_A_DIRECTORY;
        }
 
-       /* now the lockingX case, most common and also most complex */
+       status = pvfs_break_level2_oplocks(f);
+       NT_STATUS_NOT_OK_RETURN(status);
+
        if (lck->lockx.in.timeout != 0 && 
-           req->async.send_fn) {
-               pending = talloc_p(req, struct pvfs_pending_lock);
+           (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
+               pending = talloc(f, struct pvfs_pending_lock);
                if (pending == NULL) {
                        return NT_STATUS_NO_MEMORY;
                }
@@ -307,8 +317,9 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
                pending->f = f;
                pending->req = req;
 
-               /* round up to the nearest second */
-               pending->end_time = time(NULL) + ((lck->lockx.in.timeout+999)/1000);
+               pending->end_time = 
+                       timeval_current_ofs(lck->lockx.in.timeout/1000,
+                                           1000*(lck->lockx.in.timeout%1000));
        }
 
        if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
@@ -318,50 +329,43 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
        }
 
        if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
+               talloc_free(pending);
                return pvfs_lock_cancel(pvfs, req, lck, f);
        }
 
        if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
                /* this seems to not be supported by any windows server,
                   or used by any clients */
-               return NT_STATUS_UNSUCCESSFUL;
+               talloc_free(pending);
+               return NT_STATUS_DOS(ERRDOS, ERRnoatomiclocks);
        }
 
-       if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
-               DEBUG(0,("received unexpected oplock break\n"));
-               return NT_STATUS_NOT_IMPLEMENTED;
-       }
-
-
        /* the unlocks happen first */
        locks = lck->lockx.in.locks;
 
        for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
-               NTSTATUS status;
                status = brl_unlock(pvfs->brl_context,
-                                   &f->locking_key,
+                                   f->brl_handle,
                                    locks[i].pid,
-                                   f->fnum,
                                    locks[i].offset,
                                    locks[i].count);
                if (!NT_STATUS_IS_OK(status)) {
+                       talloc_free(pending);
                        return status;
                }
+               f->lock_count--;
        }
 
        locks += i;
 
        for (i=0;i<lck->lockx.in.lock_cnt;i++) {
-               NTSTATUS status;
-
                if (pending) {
                        pending->pending_lock = i;
                }
 
                status = brl_lock(pvfs->brl_context,
-                                 &f->locking_key,
+                                 f->brl_handle,
                                  locks[i].pid,
-                                 f->fnum,
                                  locks[i].offset,
                                  locks[i].count,
                                  rw, pending);
@@ -374,24 +378,29 @@ NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
                                                                         pvfs_pending_lock_continue,
                                                                         pending);
                                if (pending->wait_handle == NULL) {
+                                       talloc_free(pending);
                                        return NT_STATUS_NO_MEMORY;
                                }
+                               talloc_steal(pending, pending->wait_handle);
                                DLIST_ADD(f->pending_list, pending);
                                return NT_STATUS_OK;
                        }
+
                        /* undo the locks we just did */
-                       for (i=i-1;i>=0;i--) {
+                       for (i--;i>=0;i--) {
                                brl_unlock(pvfs->brl_context,
-                                          &f->locking_key,
+                                          f->brl_handle,
                                           locks[i].pid,
-                                          f->fnum,
                                           locks[i].offset,
                                           locks[i].count);
+                               f->lock_count--;
                        }
+                       talloc_free(pending);
                        return status;
                }
+               f->lock_count++;
        }
 
+       talloc_free(pending);
        return NT_STATUS_OK;
 }
-