r3539: much nicer async open delay code.
authorAndrew Tridgell <tridge@samba.org>
Fri, 5 Nov 2004 01:14:06 +0000 (01:14 +0000)
committerGerald (Jerry) Carter <jerry@samba.org>
Wed, 10 Oct 2007 18:05:29 +0000 (13:05 -0500)
The previous code didn't handle the case where the file got renamed or
deleted while waiting for the sharing violation delay. To handle this
we need to make the 2nd open a full open call, including the name
resolve call etc. Luckily this simplifies the logic.

I also expanded the RAW-MUX test to include the case where we do
open/open/open/close/close, with the 3rd open async, and that open
gets retried after both the first close and the 2nd close, with the
first retry failing and the 2nd retry working. The tests the "async
reply after a async reply" logic in pvfs_open().
(This used to be commit eded2ad9c91f5ba587ef4f7f5f5a6dceb4b51ff3)

source4/ntvfs/common/opendb.c
source4/ntvfs/posix/pvfs_open.c
source4/smb_server/smb_server.c
source4/torture/raw/mux.c

index 39d4f37ec2e144d96aefa6760e2bd89866e614ec..bed4910be4659b4148fa97e668c76e607353891e 100644 (file)
@@ -331,12 +331,14 @@ NTSTATUS odb_close_file(struct odb_lock *lck, uint16_t fnum)
        elist = (struct odb_entry *)dbuf.dptr;
        count = dbuf.dsize / sizeof(struct odb_entry);
 
-       /* send any pending notifications */
+       /* send any pending notifications, removing them once sent */
        for (i=0;i<count;i++) {
                if (elist[i].pending) {
                        messaging_send_ptr(odb->messaging_ctx, elist[i].server, 
                                           MSG_PVFS_RETRY_OPEN, elist[i].notify_ptr);
-                       
+                       memmove(&elist[i], &elist[i+1], sizeof(struct odb_entry)*(count-(i+1)));
+                       i--;
+                       count--;
                }
        }
 
index 06191cee0a2f7f84ea2b0d37afe7f6c5088e836c..73b1949acbdcba07cc9102d09901e8043a45c570 100644 (file)
@@ -401,71 +401,28 @@ static NTSTATUS pvfs_create_file(struct pvfs_state *pvfs,
 }
 
 
-/*
-  open am existing file - called from both the open retry code
-  and the main open code
-*/
-NTSTATUS pvfs_open_existing(struct pvfs_file *f,
-                           union smb_open *io,
-                           int open_flags)
-{
-       int fd;
-       NTSTATUS status;
-
-       /* do the actual open */
-       fd = open(f->name->full_name, open_flags);
-       if (fd == -1) {
-               return pvfs_map_errno(f->pvfs, errno);
-       }
-
-       f->fd = fd;
-
-       /* re-resolve the open fd */
-       status = pvfs_resolve_name_fd(f->pvfs, fd, f->name);
-       if (!NT_STATUS_IS_OK(status)) {
-               return status;
-       }
-
-       io->generic.out.oplock_level  = NO_OPLOCK;
-       io->generic.out.fnum          = f->fnum;
-       io->generic.out.create_action = NTCREATEX_ACTION_EXISTED;
-       io->generic.out.create_time   = f->name->dos.create_time;
-       io->generic.out.access_time   = f->name->dos.access_time;
-       io->generic.out.write_time    = f->name->dos.write_time;
-       io->generic.out.change_time   = f->name->dos.change_time;
-       io->generic.out.attrib        = f->name->dos.attrib;
-       io->generic.out.alloc_size    = f->name->dos.alloc_size;
-       io->generic.out.size          = f->name->st.st_size;
-       io->generic.out.file_type     = FILE_TYPE_DISK;
-       io->generic.out.ipc_state     = 0;
-       io->generic.out.is_directory  = 0;
-
-       /* success - keep the file handle */
-       talloc_steal(f->pvfs, f);
-
-       return NT_STATUS_OK;
-}
-
 /*
   state of a pending open retry
 */
 struct pvfs_open_retry {
-       union smb_open *io;
-       struct pvfs_file *f;
+       struct ntvfs_module_context *ntvfs;
        struct smbsrv_request *req;
+       union smb_open *io;
        void *wait_handle;
-       struct timeval end_time;
-       int open_flags;
+       DATA_BLOB locking_key;
 };
 
 /* destroy a pending open request */
 static int pvfs_retry_destructor(void *ptr)
 {
        struct pvfs_open_retry *r = ptr;
-       struct odb_lock *lck;
-       lck = odb_lock(r->req, r->f->pvfs->odb_context, &r->f->locking_key);
-       if (lck != NULL) {
-               odb_remove_pending(lck, r);
+       struct pvfs_state *pvfs = r->ntvfs->private_data;
+       if (r->locking_key.data) {
+               struct odb_lock *lck;
+               lck = odb_lock(r->req, pvfs->odb_context, &r->locking_key);
+               if (lck != NULL) {
+                       odb_remove_pending(lck, r);
+               }
        }
        return 0;
 }
@@ -476,70 +433,70 @@ static int pvfs_retry_destructor(void *ptr)
 static void pvfs_open_retry(void *private, BOOL timed_out)
 {
        struct pvfs_open_retry *r = private;
-       struct odb_lock *lck;
-       struct pvfs_file *f = r->f;
+       struct ntvfs_module_context *ntvfs = r->ntvfs;
        struct smbsrv_request *req = r->req;
+       union smb_open *io = r->io;
        NTSTATUS status;
-       
-       lck = odb_lock(req, f->pvfs->odb_context, &f->locking_key);
-       if (lck == NULL) {
-               req->async_states->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
-               req->async_states->send_fn(req);
-               return;
-       }
-
-       /* see if we are allowed to open at the same time as existing opens */
-       status = odb_open_file(lck, f->fnum, f->share_access, 
-                              f->create_options, f->access_mask);
-       if (NT_STATUS_EQUAL(status, NT_STATUS_SHARING_VIOLATION) && !timed_out) {
-               talloc_free(lck);
-               return;
-       }
 
        talloc_free(r->wait_handle);
 
-       if (!NT_STATUS_IS_OK(status)) {
-               req->async_states->status = status;
+       if (timed_out) {
+               /* if it timed out, then give the failure
+                  immediately */
+               talloc_free(r);
+               req->async_states->status = NT_STATUS_SHARING_VIOLATION;
                req->async_states->send_fn(req);
                return;
        }
 
-       f->have_opendb_entry = True;
+       /* the pending odb entry is already removed. We use a null locking
+          key to indicate this */
+       data_blob_free(&r->locking_key);
+       talloc_free(r);
 
-       /* do the rest of the open work */
-       status = pvfs_open_existing(f, r->io, r->open_flags);
+       /* try the open again, which could trigger another retry setup
+          if it wants to, so we have to unmark the async flag so we
+          will know if it does a second async reply */
+       req->async_states->state &= ~NTVFS_ASYNC_STATE_ASYNC;
 
-       if (NT_STATUS_IS_OK(status)) {
-               talloc_steal(f->pvfs, f);
+       status = pvfs_open(ntvfs, req, io);
+       if (req->async_states->state & NTVFS_ASYNC_STATE_ASYNC) {
+               /* the 2nd try also replied async, so we don't send
+                  the reply yet */
+               return;
        }
 
+       /* send the reply up the chain */
        req->async_states->status = status;
        req->async_states->send_fn(req);
 }
 
+
 /*
   setup for a open retry after a sharing violation
 */
-static NTSTATUS pvfs_open_setup_retry(struct smbsrv_request *req, 
+static NTSTATUS pvfs_open_setup_retry(struct ntvfs_module_context *ntvfs,
+                                     struct smbsrv_request *req, 
                                      union smb_open *io,
-                                     struct pvfs_file *f, 
-                                     struct odb_lock *lck,
-                                     int open_flags)
+                                     struct pvfs_file *f,
+                                     struct odb_lock *lck)
 {
+       struct pvfs_state *pvfs = ntvfs->private_data;
        struct pvfs_open_retry *r;
-       struct pvfs_state *pvfs = f->pvfs;
        NTSTATUS status;
+       struct timeval end_time;
 
        r = talloc_p(req, struct pvfs_open_retry);
        if (r == NULL) {
                return NT_STATUS_NO_MEMORY;
        }
 
-       r->io = io;
-       r->f = f;
+       r->ntvfs = ntvfs;
        r->req = req;
-       r->end_time = timeval_current_ofs(0, pvfs->sharing_violation_delay);
-       r->open_flags = open_flags;
+       r->io = io;
+       r->locking_key = data_blob_talloc(r, f->locking_key.data, f->locking_key.length);
+
+       end_time = timeval_add(&req->request_time, 0, pvfs->sharing_violation_delay);
 
        /* setup a pending lock */
        status = odb_open_file_pending(lck, r);
@@ -547,16 +504,17 @@ static NTSTATUS pvfs_open_setup_retry(struct smbsrv_request *req,
                return status;
        }
 
-       r->wait_handle = pvfs_wait_message(pvfs, req, MSG_PVFS_RETRY_OPEN, r->end_time, 
+       talloc_free(lck);
+       talloc_free(f);
+
+       talloc_set_destructor(r, pvfs_retry_destructor);
+
+       r->wait_handle = pvfs_wait_message(pvfs, req, MSG_PVFS_RETRY_OPEN, end_time, 
                                           pvfs_open_retry, r);
        if (r->wait_handle == NULL) {
                return NT_STATUS_NO_MEMORY;
        }
 
-       talloc_free(lck);
-
-       talloc_set_destructor(r, pvfs_retry_destructor);
-
        return NT_STATUS_OK;
 }
 
@@ -571,7 +529,7 @@ NTSTATUS pvfs_open(struct ntvfs_module_context *ntvfs,
        struct pvfs_filename *name;
        struct pvfs_file *f;
        NTSTATUS status;
-       int fnum;
+       int fnum, fd;
        struct odb_lock *lck;
        uint32_t create_options;
        uint32_t share_access;
@@ -749,7 +707,7 @@ NTSTATUS pvfs_open(struct ntvfs_module_context *ntvfs,
           the other user, or after 1 second */
        if (NT_STATUS_EQUAL(status, NT_STATUS_SHARING_VIOLATION) &&
            (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
-               return pvfs_open_setup_retry(req, io, f, lck, flags);
+               return pvfs_open_setup_retry(ntvfs, req, io, f, lck);
        }
 
        if (!NT_STATUS_IS_OK(status)) {
@@ -758,8 +716,38 @@ NTSTATUS pvfs_open(struct ntvfs_module_context *ntvfs,
 
        f->have_opendb_entry = True;
 
-       /* do the rest of the open work */
-       return pvfs_open_existing(f, io, flags);
+       /* do the actual open */
+       fd = open(f->name->full_name, flags);
+       if (fd == -1) {
+               return pvfs_map_errno(f->pvfs, errno);
+       }
+
+       f->fd = fd;
+
+       /* re-resolve the open fd */
+       status = pvfs_resolve_name_fd(f->pvfs, fd, f->name);
+       if (!NT_STATUS_IS_OK(status)) {
+               return status;
+       }
+
+       io->generic.out.oplock_level  = NO_OPLOCK;
+       io->generic.out.fnum          = f->fnum;
+       io->generic.out.create_action = NTCREATEX_ACTION_EXISTED;
+       io->generic.out.create_time   = f->name->dos.create_time;
+       io->generic.out.access_time   = f->name->dos.access_time;
+       io->generic.out.write_time    = f->name->dos.write_time;
+       io->generic.out.change_time   = f->name->dos.change_time;
+       io->generic.out.attrib        = f->name->dos.attrib;
+       io->generic.out.alloc_size    = f->name->dos.alloc_size;
+       io->generic.out.size          = f->name->st.st_size;
+       io->generic.out.file_type     = FILE_TYPE_DISK;
+       io->generic.out.ipc_state     = 0;
+       io->generic.out.is_directory  = 0;
+
+       /* success - keep the file handle */
+       talloc_steal(f->pvfs, f);
+
+       return NT_STATUS_OK;
 }
 
 
index 3c17abe6fbb3359bbc183cd1f02cddf0efe74900..24d1c7eeb628e78e95b39cb955386cd28681f10f 100644 (file)
@@ -66,7 +66,7 @@ static void construct_reply(struct smbsrv_request *req);
 receive a SMB request header from the wire, forming a request_context
 from the result
 ****************************************************************************/
-static NTSTATUS receive_smb_request(struct smbsrv_connection *smb_conn)
+static NTSTATUS receive_smb_request(struct smbsrv_connection *smb_conn, struct timeval t)
 {
        NTSTATUS status;
        ssize_t len;
@@ -138,7 +138,7 @@ static NTSTATUS receive_smb_request(struct smbsrv_connection *smb_conn)
        }
 
        /* we have a full packet */
-       GetTimeOfDay(&req->request_time);
+       req->request_time = t;
        req->chained_fnum = -1;
        req->in.allocated = req->in.size;
        req->in.hdr = req->in.buffer + NBT_HDR_SIZE;
@@ -721,7 +721,7 @@ static void smbsrv_recv(struct server_connection *conn, struct timeval t, uint16
 
        DEBUG(10,("smbsrv_recv\n"));
 
-       status = receive_smb_request(smb_conn);
+       status = receive_smb_request(smb_conn, t);
        if (NT_STATUS_IS_ERR(status)) {
                conn->event.fde->flags = 0;
                smbsrv_terminate_connection(smb_conn, nt_errstr(status));
@@ -808,7 +808,7 @@ void smbd_process_async(struct smbsrv_connection *smb_conn)
 {
        NTSTATUS status;
        
-       status = receive_smb_request(smb_conn);
+       status = receive_smb_request(smb_conn, timeval_current());
        if (NT_STATUS_IS_ERR(status)) {
                smbsrv_terminate_connection(smb_conn, nt_errstr(status));
        }
index 7dddd0602c04b338d720a2ed4b666c9bfa54feb1..6f9426490eb8c9ba9cc871071142e37f3370ddf0 100644 (file)
@@ -39,7 +39,7 @@ static BOOL test_mux_open(struct smbcli_state *cli, TALLOC_CTX *mem_ctx)
 {
        union smb_open io;
        NTSTATUS status;
-       int fnum;
+       int fnum1, fnum2;
        BOOL ret = True;
        struct smbcli_request *req;
        struct timeval tv;
@@ -53,10 +53,10 @@ static BOOL test_mux_open(struct smbcli_state *cli, TALLOC_CTX *mem_ctx)
        io.generic.level = RAW_OPEN_NTCREATEX;
        io.ntcreatex.in.root_fid = 0;
        io.ntcreatex.in.flags = 0;
-       io.ntcreatex.in.access_mask = SEC_RIGHT_MAXIMUM_ALLOWED;
+       io.ntcreatex.in.access_mask = SA_RIGHT_FILE_READ_DATA;
        io.ntcreatex.in.create_options = 0;
        io.ntcreatex.in.file_attr = FILE_ATTRIBUTE_NORMAL;
-       io.ntcreatex.in.share_access = 0;
+       io.ntcreatex.in.share_access = NTCREATEX_SHARE_ACCESS_READ;
        io.ntcreatex.in.alloc_size = 0;
        io.ntcreatex.in.open_disposition = NTCREATEX_DISP_CREATE;
        io.ntcreatex.in.impersonation = NTCREATEX_IMPERSONATION_ANONYMOUS;
@@ -64,12 +64,18 @@ static BOOL test_mux_open(struct smbcli_state *cli, TALLOC_CTX *mem_ctx)
        io.ntcreatex.in.fname = BASEDIR "\\open.dat";
        status = smb_raw_open(cli->tree, mem_ctx, &io);
        CHECK_STATUS(status, NT_STATUS_OK);
-       fnum = io.ntcreatex.out.fnum;
+       fnum1 = io.ntcreatex.out.fnum;
+
+       /* and a 2nd open, this will not conflict */
+       io.ntcreatex.in.open_disposition = NTCREATEX_DISP_OPEN;
+       status = smb_raw_open(cli->tree, mem_ctx, &io);
+       CHECK_STATUS(status, NT_STATUS_OK);
+       fnum2 = io.ntcreatex.out.fnum;
 
        tv = timeval_current();
 
        /* send an open that will conflict */
-       io.ntcreatex.in.open_disposition = NTCREATEX_DISP_OPEN;
+       io.ntcreatex.in.share_access = 0;
        status = smb_raw_open(cli->tree, mem_ctx, &io);
        CHECK_STATUS(status, NT_STATUS_SHARING_VIOLATION);
 
@@ -87,8 +93,11 @@ static BOOL test_mux_open(struct smbcli_state *cli, TALLOC_CTX *mem_ctx)
        tv = timeval_current();
        req = smb_raw_open_send(cli->tree, &io);
        
-       /* and close the file */
-       smbcli_close(cli->tree, fnum);
+       /* and close the first file */
+       smbcli_close(cli->tree, fnum1);
+
+       /* then the 2nd file */
+       smbcli_close(cli->tree, fnum2);
 
        /* see if the async open succeeded */
        status = smb_raw_open_recv(req, mem_ctx, &io);