/* a set of flags to control handling of request structures */
#define REQ_CONTROL_LARGE (1<<1) /* allow replies larger than max_xmit */
#define REQ_CONTROL_ASYNC (1<<2) /* the backend will answer this one later */
+#define REQ_CONTROL_MAY_ASYNC (1<<3) /* the backend is allowed to answer async */
/* passed to br lock code */
enum brl_type {READ_LOCK, WRITE_LOCK, PENDING_READ_LOCK, PENDING_WRITE_LOCK};
-enum smb_read_level {RAW_READ_GENERIC, RAW_READ_READBRAW, RAW_READ_LOCKREAD, RAW_READ_READ, RAW_READ_READX};
+enum smb_read_level {RAW_READ_READBRAW, RAW_READ_LOCKREAD, RAW_READ_READ, RAW_READ_READX};
+
+#define RAW_READ_GENERIC RAW_READ_READX
/* union for read() backend call
called. It will be big enough to hold the maximum size asked for
*/
union smb_read {
- /* generic interface */
+ /* SMBreadX (and generic) interface */
struct {
enum smb_read_level level;
struct {
uint16_t fnum;
uint64_t offset;
- uint32_t size;
+ uint16_t mincnt;
+ uint16_t maxcnt;
+ uint16_t remaining;
} in;
struct {
char *data;
- uint32_t nread;
+ uint16_t remaining;
+ uint16_t compaction_mode;
+ uint16_t nread;
} out;
- } generic;
-
+ } readx, generic;
/* SMBreadbraw interface */
struct {
uint16_t nread;
} out;
} read;
-
- /* SMBreadX interface */
- struct {
- enum smb_read_level level;
-
- struct {
- uint16_t fnum;
- uint64_t offset;
- uint16_t mincnt;
- uint16_t maxcnt;
- uint16_t remaining;
- } in;
- struct {
- char *data;
- uint16_t remaining;
- uint16_t compaction_mode;
- uint16_t nread;
- } out;
- } readx;
};
-enum smb_write_level {
- RAW_WRITE_GENERIC, RAW_WRITE_WRITEUNLOCK, RAW_WRITE_WRITE,
- RAW_WRITE_WRITEX, RAW_WRITE_WRITECLOSE, RAW_WRITE_SPLWRITE};
+enum smb_write_level {RAW_WRITE_WRITEUNLOCK, RAW_WRITE_WRITE,
+ RAW_WRITE_WRITEX, RAW_WRITE_WRITECLOSE, RAW_WRITE_SPLWRITE};
+
+#define RAW_WRITE_GENERIC RAW_WRITE_WRITEX
/* union for write() backend call
*/
union smb_write {
- /* generic interface */
+ /* SMBwriteX interface */
struct {
enum smb_write_level level;
struct {
uint16_t fnum;
uint64_t offset;
- uint32_t count;
+ uint16_t wmode;
+ uint16_t remaining;
+ uint32_t count;
const char *data;
} in;
struct {
uint32_t nwritten;
+ uint16_t remaining;
} out;
- } generic;
-
+ } writex, generic;
/* SMBwriteunlock interface */
struct {
} out;
} write;
- /* SMBwriteX interface */
- struct {
- enum smb_write_level level;
-
- struct {
- uint16_t fnum;
- uint64_t offset;
- uint16_t wmode;
- uint16_t remaining;
- uint32_t count;
- const char *data;
- } in;
- struct {
- uint32_t nwritten;
- uint16_t remaining;
- } out;
- } writex;
-
/* SMBwriteclose interface */
struct {
enum smb_write_level level;
};
-enum smb_lock_level {RAW_LOCK_GENERIC, RAW_LOCK_LOCK, RAW_LOCK_UNLOCK, RAW_LOCK_LOCKX};
+enum smb_lock_level {RAW_LOCK_LOCK, RAW_LOCK_UNLOCK, RAW_LOCK_LOCKX};
+
+/* the generic interface is defined to be equal to the lockingX interface */
+#define RAW_LOCK_GENERIC RAW_LOCK_LOCKX
/* union for lock() backend call
*/
union smb_lock {
- /* generic interface */
- struct {
- enum smb_lock_level level;
- struct {
- uint16_t fnum;
- } in;
- } generic;
-
- /* SMBlock interface */
- struct {
- enum smb_lock_level level;
-
- struct {
- uint16_t fnum;
- uint32_t count;
- uint32_t offset;
- } in;
- } lock;
-
- /* SMBunlock interface */
- struct {
- enum smb_lock_level level;
-
- struct {
- uint16_t fnum;
- uint32_t count;
- uint32_t offset;
- } in;
- } unlock;
-
- /* SMBlockingX interface */
+ /* SMBlockingX (and generic) interface */
struct {
enum smb_lock_level level;
uint64_t count;
} *locks; /* unlocks are first in the arrray */
} in;
- } lockx;
+ } lockx, generic;
+
+ /* SMBlock and SMBunlock interface */
+ struct {
+ enum smb_lock_level level;
+
+ struct {
+ uint16_t fnum;
+ uint32_t count;
+ uint32_t offset;
+ } in;
+ } lock, unlock;
};
-enum smb_close_level {RAW_CLOSE_GENERIC, RAW_CLOSE_CLOSE, RAW_CLOSE_SPLCLOSE};
+enum smb_close_level {RAW_CLOSE_CLOSE, RAW_CLOSE_SPLCLOSE};
+
+#define RAW_CLOSE_GENERIC RAW_CLOSE_CLOSE
/*
union for close() backend call
*/
union smb_close {
- /* generic interface */
- struct {
- enum smb_close_level level;
-
- struct {
- uint16_t fnum;
- } in;
- } generic;
-
- /* SMBclose interface */
+ /* SMBclose (and generic) interface */
struct {
enum smb_close_level level;
uint16_t fnum;
time_t write_time;
} in;
- } close;
+ } close, generic;
/* SMBsplclose interface - empty! */
struct {
struct smbcli_request *req = NULL;
switch (parms->generic.level) {
- case RAW_CLOSE_GENERIC:
- return NULL;
-
case RAW_CLOSE_CLOSE:
SETUP_REQUEST(SMBclose, 3, 0);
SSVAL(req->out.vwv, VWV(0), parms->close.in.fnum);
struct smbcli_request *req = NULL;
switch (parms->generic.level) {
- case RAW_LOCK_GENERIC:
- return NULL;
-
case RAW_LOCK_LOCK:
SETUP_REQUEST(SMBlock, 5, 0);
SSVAL(req->out.vwv, VWV(0), parms->lock.in.fnum);
struct smbcli_request *req = NULL;
switch (parms->generic.level) {
- case RAW_READ_GENERIC:
- return NULL;
-
case RAW_READ_READBRAW:
if (tree->session->transport->negotiate.capabilities & CAP_LARGE_FILES) {
bigoffset = True;
}
switch (parms->generic.level) {
- case RAW_READ_GENERIC:
- /* handled in _send() */
- break;
-
case RAW_READ_READBRAW:
parms->readbraw.out.nread = req->in.size - NBT_HDR_SIZE;
if (parms->readbraw.out.nread >
struct smbcli_request *req = NULL;
switch (parms->generic.level) {
- case RAW_WRITE_GENERIC:
- return NULL;
-
case RAW_WRITE_WRITEUNLOCK:
SETUP_REQUEST(SMBwriteunlock, 5, 3 + parms->writeunlock.in.count);
SSVAL(req->out.vwv, VWV(0), parms->writeunlock.in.fnum);
}
switch (parms->generic.level) {
- case RAW_WRITE_GENERIC:
- break;
case RAW_WRITE_WRITEUNLOCK:
SMBCLI_CHECK_WCT(req, 1);
parms->writeunlock.out.nwritten = SVAL(req->in.vwv, VWV(0));
/* see if the front end will allow us to perform this
function asynchronously. */
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_unlink(private->tree, unl);
}
/* see if the front end will allow us to perform this
function asynchronously. */
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_ioctl(private->tree, req, io);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_chkpath(private->tree, cp);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_pathinfo(private->tree, req, info);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_fileinfo(private->tree, req, info);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_setpathinfo(private->tree, st);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_open(private->tree, req, io);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_mkdir(private->tree, md);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_rmdir(private->tree, rd);
}
c_req = smb_raw_rmdir_send(private->tree, rd);
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_rename(private->tree, ren);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_read(private->tree, rd);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_write(private->tree, wr);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_close(private->tree, io);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_exit(private->tree->session);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_lock(private->tree, lck);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_setfileinfo(private->tree, info);
}
c_req = smb_raw_setfileinfo_send(private->tree, info);
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_fsinfo(private->tree, req, fs);
}
struct cvfs_private *private = ntvfs->private_data;
struct smbcli_request *c_req;
- if (!req->async.send_fn) {
+ if (!(req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
return smb_raw_trans2(private->tree, req, trans2);
}
ipc$ connection. It needs to keep information about all open
pipes */
struct ipc_private {
-
- uint16_t next_fnum;
- uint16_t num_open;
+ void *idtree_fnum;
/* a list of open pipes */
struct pipe_state {
struct pipe_state *next, *prev;
+ struct ipc_private *private;
const char *pipe_name;
uint16_t fnum;
struct dcesrv_connection *dce_conn;
};
-/*
- find the next fnum available on this connection
-*/
-static uint16_t find_next_fnum(struct ipc_private *ipc)
-{
- struct pipe_state *p;
- uint32_t ret;
-
- if (ipc->num_open == 0xFFFF) {
- return 0;
- }
-
-again:
- ret = ipc->next_fnum++;
-
- for (p=ipc->pipe_list; p; p=p->next) {
- if (p->fnum == ret) {
- goto again;
- }
- }
-
- return ret;
-}
-
-
-/*
- shutdown a single pipe. Called on a close or disconnect
-*/
-static void pipe_shutdown(struct ipc_private *private, struct pipe_state *p)
-{
- talloc_free(p->dce_conn);
- DLIST_REMOVE(private->pipe_list, p);
- talloc_destroy(p);
-}
-
-
/*
find a open pipe give a file descriptor
*/
static struct pipe_state *pipe_state_find(struct ipc_private *private, uint16_t fnum)
{
- struct pipe_state *p;
-
- for (p=private->pipe_list; p; p=p->next) {
- if (p->fnum == fnum) {
- return p;
- }
- }
-
- return NULL;
+ return idr_find(private->idtree_fnum, fnum);
}
ntvfs->private_data = private;
private->pipe_list = NULL;
- private->next_fnum = 1;
- private->num_open = 0;
+
+ private->idtree_fnum = idr_init(private);
+ if (private->idtree_fnum == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
return NT_STATUS_OK;
}
/* close any pipes that are open. Discard any unread data */
while (private->pipe_list) {
- pipe_shutdown(private, private->pipe_list);
+ talloc_free(private->pipe_list);
}
return NT_STATUS_OK;
}
+/*
+ destroy a open pipe structure
+*/
+static int ipc_fd_destructor(void *ptr)
+{
+ struct pipe_state *p = ptr;
+ idr_remove(p->private->idtree_fnum, p->fnum);
+ DLIST_REMOVE(p->private->pipe_list, p);
+ talloc_free(p->dce_conn);
+ return 0;
+}
+
/*
open a file backend - used for MSRPC pipes
struct dcesrv_ep_description ep_description;
struct auth_session_info *session_info = NULL;
struct ipc_private *private = ntvfs->private_data;
+ int fnum;
- p = talloc_p(private, struct pipe_state);
+ p = talloc_p(req, struct pipe_state);
if (!p) {
return NT_STATUS_NO_MEMORY;
}
p->pipe_name = talloc_asprintf(p, "\\pipe\\%s", fname);
if (!p->pipe_name) {
- talloc_free(p);
return NT_STATUS_NO_MEMORY;
}
- p->fnum = find_next_fnum(private);
- if (p->fnum == 0) {
- talloc_free(p);
+ fnum = idr_get_new(private->idtree_fnum, p, UINT16_MAX);
+ if (fnum == -1) {
return NT_STATUS_TOO_MANY_OPENED_FILES;
}
+ p->fnum = fnum;
p->ipc_state = 0x5ff;
/*
will need to do that once the credentials infrastructure is
finalised for Samba4
*/
-
- printf("FINDING: %s\n", p->pipe_name);
ep_description.type = ENDPOINT_SMB;
ep_description.info.smb_pipe = p->pipe_name;
if (req->session) {
/* The session info is refcount-increased in the
dcesrv_endpoint_search_connect() function */
-
session_info = req->session->session_info;
}
session_info,
&p->dce_conn);
if (!NT_STATUS_IS_OK(status)) {
- talloc_free(p);
+ idr_remove(private->idtree_fnum, p->fnum);
return status;
}
- private->num_open++;
-
DLIST_ADD(private->pipe_list, p);
p->smbpid = req->smbpid;
p->session = req->session;
+ p->private = private;
*ps = p;
+ talloc_steal(private, p);
+
+ talloc_set_destructor(p, ipc_fd_destructor);
+
return NT_STATUS_OK;
}
struct pipe_state *p;
NTSTATUS status;
- switch (rd->generic.level) {
- case RAW_READ_READ:
- fnum = rd->read.in.fnum;
- data.length = rd->read.in.count;
- data.data = rd->read.out.data;
- break;
- case RAW_READ_READX:
- fnum = rd->readx.in.fnum;
- data.length = rd->readx.in.maxcnt;
- data.data = rd->readx.out.data;
- break;
- default:
- return NT_STATUS_NOT_SUPPORTED;
+ if (rd->generic.level != RAW_READ_GENERIC) {
+ return ntvfs_map_read(req, rd, ntvfs);
}
+ fnum = rd->readx.in.fnum;
+ data.length = rd->readx.in.maxcnt;
+ data.data = rd->readx.out.data;
+
p = pipe_state_find(private, fnum);
if (!p) {
return NT_STATUS_INVALID_HANDLE;
return status;
}
- switch (rd->generic.level) {
- case RAW_READ_READ:
- rd->read.out.nread = data.length;
- break;
- case RAW_READ_READX:
- rd->readx.out.remaining = 0;
- rd->readx.out.compaction_mode = 0;
- rd->readx.out.nread = data.length;
- break;
- default:
- return NT_STATUS_NOT_SUPPORTED;
- }
+ rd->readx.out.remaining = 0;
+ rd->readx.out.compaction_mode = 0;
+ rd->readx.out.nread = data.length;
return status;
}
struct pipe_state *p;
NTSTATUS status;
- switch (wr->generic.level) {
- case RAW_WRITE_WRITE:
- fnum = wr->write.in.fnum;
- data.data = discard_const_p(void, wr->write.in.data);
- data.length = wr->write.in.count;
- break;
-
- case RAW_WRITE_WRITEX:
- fnum = wr->writex.in.fnum;
- data.data = discard_const_p(void, wr->writex.in.data);
- data.length = wr->writex.in.count;
- break;
-
- default:
- return NT_STATUS_NOT_SUPPORTED;
+ if (wr->generic.level != RAW_WRITE_GENERIC) {
+ return ntvfs_map_write(req, wr, ntvfs);
}
+ fnum = wr->writex.in.fnum;
+ data.data = discard_const_p(void, wr->writex.in.data);
+ data.length = wr->writex.in.count;
+
p = pipe_state_find(private, fnum);
if (!p) {
return NT_STATUS_INVALID_HANDLE;
return status;
}
- switch (wr->generic.level) {
- case RAW_WRITE_WRITE:
- wr->write.out.nwritten = data.length;
- break;
- case RAW_WRITE_WRITEX:
- wr->writex.out.nwritten = data.length;
- wr->writex.out.remaining = 0;
- break;
- default:
- return NT_STATUS_NOT_SUPPORTED;
- }
+ wr->writex.out.nwritten = data.length;
+ wr->writex.out.remaining = 0;
return NT_STATUS_OK;
}
struct pipe_state *p;
if (io->generic.level != RAW_CLOSE_CLOSE) {
- return NT_STATUS_ACCESS_DENIED;
+ return ntvfs_map_close(req, io, ntvfs);
}
p = pipe_state_find(private, io->close.in.fnum);
return NT_STATUS_INVALID_HANDLE;
}
- pipe_shutdown(private, p);
- private->num_open--;
+ talloc_free(p);
return NT_STATUS_OK;
}
for (p=private->pipe_list; p; p=next) {
next = p->next;
if (p->smbpid == req->smbpid) {
- pipe_shutdown(private, p);
+ talloc_free(p);
}
}
for (p=private->pipe_list; p; p=next) {
next = p->next;
if (p->session == req->session) {
- pipe_shutdown(private, p);
+ talloc_free(p);
}
}
status code and any result parameters much harder.
*/
#define PASS_THRU_REQ(ntvfs, req, op, args) do { \
- void *send_fn_saved = req->async.send_fn; \
- req->async.send_fn = NULL; \
+ req->control_flags &= ~REQ_CONTROL_MAY_ASYNC; \
status = ntvfs_next_##op args; \
- req->async.send_fn = send_fn_saved; \
} while (0)
NTVFS generic level mapping code
- Copyright (C) Andrew Tridgell 2003
+ Copyright (C) Andrew Tridgell 2003-2004
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#include "includes.h"
/*
- see if a filename ends in EXE COM DLL or SYM. This is needed for the DENY_DOS mapping for OpenX
+ see if a filename ends in EXE COM DLL or SYM. This is needed for the
+ DENY_DOS mapping for OpenX
*/
static BOOL is_exe_file(const char *fname)
{
struct ntvfs_module_context *ntvfs)
{
NTSTATUS status;
- union smb_open io2;
+ union smb_open *io2;
- if (io->generic.level == RAW_OPEN_GENERIC) {
- return NT_STATUS_INVALID_LEVEL;
+ io2 = talloc_p(req, union smb_open);
+ if (io2 == NULL) {
+ return NT_STATUS_NO_MEMORY;
}
+ /* must be synchronous, or we won't be called to do the
+ translation */
+ req->control_flags &= ~REQ_CONTROL_MAY_ASYNC;
+
switch (io->generic.level) {
+ case RAW_OPEN_GENERIC:
+ return NT_STATUS_INVALID_LEVEL;
+
case RAW_OPEN_OPENX:
- ZERO_STRUCT(io2.generic.in);
- io2.generic.level = RAW_OPEN_GENERIC;
+ ZERO_STRUCT(io2->generic.in);
+ io2->generic.level = RAW_OPEN_GENERIC;
if (io->openx.in.flags & OPENX_FLAGS_REQUEST_OPLOCK) {
- io2.generic.in.flags |= NTCREATEX_FLAGS_REQUEST_OPLOCK;
+ io2->generic.in.flags |= NTCREATEX_FLAGS_REQUEST_OPLOCK;
}
if (io->openx.in.flags & OPENX_FLAGS_REQUEST_BATCH_OPLOCK) {
- io2.generic.in.flags |= NTCREATEX_FLAGS_REQUEST_BATCH_OPLOCK;
+ io2->generic.in.flags |= NTCREATEX_FLAGS_REQUEST_BATCH_OPLOCK;
}
switch (io->openx.in.open_mode & OPENX_MODE_ACCESS_MASK) {
case OPENX_MODE_ACCESS_READ:
- io2.generic.in.access_mask = GENERIC_RIGHTS_FILE_READ;
+ io2->generic.in.access_mask = GENERIC_RIGHTS_FILE_READ;
break;
case OPENX_MODE_ACCESS_WRITE:
- io2.generic.in.access_mask = GENERIC_RIGHTS_FILE_WRITE;
+ io2->generic.in.access_mask = GENERIC_RIGHTS_FILE_WRITE;
break;
case OPENX_MODE_ACCESS_RDWR:
case OPENX_MODE_ACCESS_FCB:
- io2.generic.in.access_mask = GENERIC_RIGHTS_FILE_ALL_ACCESS;
+ io2->generic.in.access_mask = GENERIC_RIGHTS_FILE_ALL_ACCESS;
break;
}
switch (io->openx.in.open_mode & OPENX_MODE_DENY_MASK) {
case OPENX_MODE_DENY_READ:
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_WRITE;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_WRITE;
break;
case OPENX_MODE_DENY_WRITE:
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ;
break;
case OPENX_MODE_DENY_ALL:
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_NONE;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_NONE;
break;
case OPENX_MODE_DENY_NONE:
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ | NTCREATEX_SHARE_ACCESS_WRITE;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ | NTCREATEX_SHARE_ACCESS_WRITE;
break;
case OPENX_MODE_DENY_DOS:
/* DENY_DOS is quite strange - it depends on the filename! */
if (is_exe_file(io->openx.in.fname)) {
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ | NTCREATEX_SHARE_ACCESS_WRITE;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ | NTCREATEX_SHARE_ACCESS_WRITE;
} else {
if ((io->openx.in.open_mode & OPENX_MODE_ACCESS_MASK) ==
OPENX_MODE_ACCESS_READ) {
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ;
} else {
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_NONE;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_NONE;
}
}
break;
case OPENX_MODE_DENY_FCB:
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_NONE;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_NONE;
break;
}
switch (io->openx.in.open_func) {
case (OPENX_OPEN_FUNC_FAIL):
- io2.generic.in.open_disposition = NTCREATEX_DISP_CREATE;
+ io2->generic.in.open_disposition = NTCREATEX_DISP_CREATE;
break;
case (OPENX_OPEN_FUNC_OPEN):
- io2.generic.in.open_disposition = NTCREATEX_DISP_OPEN;
+ io2->generic.in.open_disposition = NTCREATEX_DISP_OPEN;
break;
case (OPENX_OPEN_FUNC_TRUNC):
- io2.generic.in.open_disposition = NTCREATEX_DISP_OVERWRITE;
+ io2->generic.in.open_disposition = NTCREATEX_DISP_OVERWRITE;
break;
case (OPENX_OPEN_FUNC_FAIL | OPENX_OPEN_FUNC_CREATE):
- io2.generic.in.open_disposition = NTCREATEX_DISP_CREATE;
+ io2->generic.in.open_disposition = NTCREATEX_DISP_CREATE;
break;
case (OPENX_OPEN_FUNC_OPEN | OPENX_OPEN_FUNC_CREATE):
- io2.generic.in.open_disposition = NTCREATEX_DISP_OPEN_IF;
+ io2->generic.in.open_disposition = NTCREATEX_DISP_OPEN_IF;
break;
case (OPENX_OPEN_FUNC_TRUNC | OPENX_OPEN_FUNC_CREATE):
- io2.generic.in.open_disposition = NTCREATEX_DISP_OVERWRITE_IF;
+ io2->generic.in.open_disposition = NTCREATEX_DISP_OVERWRITE_IF;
break;
}
- io2.generic.in.alloc_size = io->openx.in.size;
- io2.generic.in.file_attr = io->openx.in.file_attrs;
- io2.generic.in.fname = io->openx.in.fname;
+ io2->generic.in.alloc_size = io->openx.in.size;
+ io2->generic.in.file_attr = io->openx.in.file_attrs;
+ io2->generic.in.fname = io->openx.in.fname;
- status = ntvfs->ops->open(ntvfs, req, &io2);
+ status = ntvfs->ops->open(ntvfs, req, io2);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
ZERO_STRUCT(io->openx.out);
- io->openx.out.fnum = io2.generic.out.fnum;
- io->openx.out.attrib = io2.generic.out.attrib;
- io->openx.out.write_time = nt_time_to_unix(io2.generic.out.write_time);
- io->openx.out.size = io2.generic.out.size;
+ io->openx.out.fnum = io2->generic.out.fnum;
+ io->openx.out.attrib = io2->generic.out.attrib;
+ io->openx.out.write_time = nt_time_to_unix(io2->generic.out.write_time);
+ io->openx.out.size = io2->generic.out.size;
return NT_STATUS_OK;
case RAW_OPEN_OPEN:
- ZERO_STRUCT(io2.generic.in);
- io2.generic.level = RAW_OPEN_GENERIC;
- io2.generic.in.file_attr = io->open.in.search_attrs;
- io2.generic.in.fname = io->open.in.fname;
- io2.generic.in.open_disposition = NTCREATEX_DISP_OPEN;
+ ZERO_STRUCT(io2->generic.in);
+ io2->generic.level = RAW_OPEN_GENERIC;
+ io2->generic.in.file_attr = io->open.in.search_attrs;
+ io2->generic.in.fname = io->open.in.fname;
+ io2->generic.in.open_disposition = NTCREATEX_DISP_OPEN;
DEBUG(9,("ntvfs_map_open(OPEN): mapping flags=0x%x\n",
io->open.in.flags));
switch (io->open.in.flags & OPEN_FLAGS_MODE_MASK) {
case OPEN_FLAGS_OPEN_READ:
- io2.generic.in.access_mask = GENERIC_RIGHTS_FILE_READ;
+ io2->generic.in.access_mask = GENERIC_RIGHTS_FILE_READ;
io->open.out.rmode = DOS_OPEN_RDONLY;
break;
case OPEN_FLAGS_OPEN_WRITE:
- io2.generic.in.access_mask = GENERIC_RIGHTS_FILE_WRITE;
+ io2->generic.in.access_mask = GENERIC_RIGHTS_FILE_WRITE;
io->open.out.rmode = DOS_OPEN_WRONLY;
break;
case OPEN_FLAGS_OPEN_RDWR:
case 0xf: /* FCB mode */
- io2.generic.in.access_mask = GENERIC_RIGHTS_FILE_READ |
+ io2->generic.in.access_mask = GENERIC_RIGHTS_FILE_READ |
GENERIC_RIGHTS_FILE_WRITE;
io->open.out.rmode = DOS_OPEN_RDWR; /* assume we got r/w */
break;
/* DENY_DOS is quite strange - it depends on the filename! */
/* REWRITE: is this necessary for OPEN? */
if (is_exe_file(io->open.in.fname)) {
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ | NTCREATEX_SHARE_ACCESS_WRITE;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ | NTCREATEX_SHARE_ACCESS_WRITE;
} else {
if ((io->open.in.flags & OPEN_FLAGS_MODE_MASK) ==
OPEN_FLAGS_OPEN_READ) {
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ;
} else {
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_NONE;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_NONE;
}
}
break;
case OPEN_FLAGS_DENY_ALL:
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_NONE;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_NONE;
break;
case OPEN_FLAGS_DENY_WRITE:
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_READ;
break;
case OPEN_FLAGS_DENY_READ:
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_WRITE;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_WRITE;
break;
case OPEN_FLAGS_DENY_NONE:
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_WRITE |
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_WRITE |
NTCREATEX_SHARE_ACCESS_READ | NTCREATEX_SHARE_ACCESS_DELETE;
break;
case 0x70: /* FCB mode */
- io2.generic.in.share_access = NTCREATEX_SHARE_ACCESS_NONE;
+ io2->generic.in.share_access = NTCREATEX_SHARE_ACCESS_NONE;
break;
default:
DEBUG(2,("ntvfs_map_open(OPEN): invalid DENY 0x%x\n",
return NT_STATUS_INVALID_PARAMETER;
}
DEBUG(9,("ntvfs_map_open(OPEN): mapped flags=0x%x to access_mask=0x%x and share_access=0x%x\n",
- io->open.in.flags, io2.generic.in.access_mask, io2.generic.in.share_access));
+ io->open.in.flags, io2->generic.in.access_mask, io2->generic.in.share_access));
- status = ntvfs->ops->open(ntvfs, req, &io2);
+ status = ntvfs->ops->open(ntvfs, req, io2);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
ZERO_STRUCT(io->openx.out);
- io->open.out.fnum = io2.generic.out.fnum;
- io->open.out.attrib = io2.generic.out.attrib;
- io->open.out.write_time = nt_time_to_unix(io2.generic.out.write_time);
- io->open.out.size = io2.generic.out.size;
+ io->open.out.fnum = io2->generic.out.fnum;
+ io->open.out.attrib = io2->generic.out.attrib;
+ io->open.out.write_time = nt_time_to_unix(io2->generic.out.write_time);
+ io->open.out.size = io2->generic.out.size;
io->open.out.rmode = DOS_OPEN_RDWR;
return NT_STATUS_OK;
struct ntvfs_module_context *ntvfs)
{
NTSTATUS status;
- union smb_fsinfo fs2;
+ union smb_fsinfo *fs2;
+
+ fs2 = talloc_p(req, union smb_fsinfo);
+ if (fs2 == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
if (fs->generic.level == RAW_QFS_GENERIC) {
return NT_STATUS_INVALID_LEVEL;
}
/* ask the backend for the generic info */
- fs2.generic.level = RAW_QFS_GENERIC;
+ fs2->generic.level = RAW_QFS_GENERIC;
- status = ntvfs->ops->fsinfo(ntvfs, req, &fs2);
+ status = ntvfs->ops->fsinfo(ntvfs, req, fs2);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
/* we need to scale the sizes to fit */
for (bpunit=64; bpunit<0x10000; bpunit *= 2) {
- if (fs2.generic.out.blocks_total * (double)fs2.generic.out.block_size < bpunit * 512 * 65535.0) {
+ if (fs2->generic.out.blocks_total * (double)fs2->generic.out.block_size < bpunit * 512 * 65535.0) {
break;
}
}
fs->dskattr.out.blocks_per_unit = bpunit;
fs->dskattr.out.block_size = 512;
fs->dskattr.out.units_total =
- (fs2.generic.out.blocks_total * (double)fs2.generic.out.block_size) / (bpunit * 512);
+ (fs2->generic.out.blocks_total * (double)fs2->generic.out.block_size) / (bpunit * 512);
fs->dskattr.out.units_free =
- (fs2.generic.out.blocks_free * (double)fs2.generic.out.block_size) / (bpunit * 512);
+ (fs2->generic.out.blocks_free * (double)fs2->generic.out.block_size) / (bpunit * 512);
/* we must return a maximum of 2G to old DOS systems, or they get very confused */
if (bpunit > 64 && req->smb_conn->negotiate.protocol <= PROTOCOL_LANMAN2) {
}
case RAW_QFS_ALLOCATION:
- fs->allocation.out.fs_id = fs2.generic.out.fs_id;
- fs->allocation.out.total_alloc_units = fs2.generic.out.blocks_total;
- fs->allocation.out.avail_alloc_units = fs2.generic.out.blocks_free;
+ fs->allocation.out.fs_id = fs2->generic.out.fs_id;
+ fs->allocation.out.total_alloc_units = fs2->generic.out.blocks_total;
+ fs->allocation.out.avail_alloc_units = fs2->generic.out.blocks_free;
fs->allocation.out.sectors_per_unit = 1;
- fs->allocation.out.bytes_per_sector = fs2.generic.out.block_size;
+ fs->allocation.out.bytes_per_sector = fs2->generic.out.block_size;
return NT_STATUS_OK;
case RAW_QFS_VOLUME:
- fs->volume.out.serial_number = fs2.generic.out.serial_number;
- fs->volume.out.volume_name.s = fs2.generic.out.volume_name;
+ fs->volume.out.serial_number = fs2->generic.out.serial_number;
+ fs->volume.out.volume_name.s = fs2->generic.out.volume_name;
return NT_STATUS_OK;
case RAW_QFS_VOLUME_INFO:
case RAW_QFS_VOLUME_INFORMATION:
- fs->volume_info.out.create_time = fs2.generic.out.create_time;
- fs->volume_info.out.serial_number = fs2.generic.out.serial_number;
- fs->volume_info.out.volume_name.s = fs2.generic.out.volume_name;
+ fs->volume_info.out.create_time = fs2->generic.out.create_time;
+ fs->volume_info.out.serial_number = fs2->generic.out.serial_number;
+ fs->volume_info.out.volume_name.s = fs2->generic.out.volume_name;
return NT_STATUS_OK;
case RAW_QFS_SIZE_INFO:
case RAW_QFS_SIZE_INFORMATION:
- fs->size_info.out.total_alloc_units = fs2.generic.out.blocks_total;
- fs->size_info.out.avail_alloc_units = fs2.generic.out.blocks_free;
+ fs->size_info.out.total_alloc_units = fs2->generic.out.blocks_total;
+ fs->size_info.out.avail_alloc_units = fs2->generic.out.blocks_free;
fs->size_info.out.sectors_per_unit = 1;
- fs->size_info.out.bytes_per_sector = fs2.generic.out.block_size;
+ fs->size_info.out.bytes_per_sector = fs2->generic.out.block_size;
return NT_STATUS_OK;
case RAW_QFS_DEVICE_INFO:
case RAW_QFS_DEVICE_INFORMATION:
- fs->device_info.out.device_type = fs2.generic.out.device_type;
- fs->device_info.out.characteristics = fs2.generic.out.device_characteristics;
+ fs->device_info.out.device_type = fs2->generic.out.device_type;
+ fs->device_info.out.characteristics = fs2->generic.out.device_characteristics;
return NT_STATUS_OK;
case RAW_QFS_ATTRIBUTE_INFO:
case RAW_QFS_ATTRIBUTE_INFORMATION:
- fs->attribute_info.out.fs_attr = fs2.generic.out.fs_attr;
- fs->attribute_info.out.max_file_component_length = fs2.generic.out.max_file_component_length;
- fs->attribute_info.out.fs_type.s = fs2.generic.out.fs_type;
+ fs->attribute_info.out.fs_attr = fs2->generic.out.fs_attr;
+ fs->attribute_info.out.max_file_component_length = fs2->generic.out.max_file_component_length;
+ fs->attribute_info.out.fs_type.s = fs2->generic.out.fs_type;
return NT_STATUS_OK;
case RAW_QFS_QUOTA_INFORMATION:
ZERO_STRUCT(fs->quota_information.out.unknown);
- fs->quota_information.out.quota_soft = fs2.generic.out.quota_soft;
- fs->quota_information.out.quota_hard = fs2.generic.out.quota_hard;
- fs->quota_information.out.quota_flags = fs2.generic.out.quota_flags;
+ fs->quota_information.out.quota_soft = fs2->generic.out.quota_soft;
+ fs->quota_information.out.quota_hard = fs2->generic.out.quota_hard;
+ fs->quota_information.out.quota_flags = fs2->generic.out.quota_flags;
return NT_STATUS_OK;
case RAW_QFS_FULL_SIZE_INFORMATION:
- fs->full_size_information.out.total_alloc_units = fs2.generic.out.blocks_total;
- fs->full_size_information.out.call_avail_alloc_units = fs2.generic.out.blocks_free;
- fs->full_size_information.out.actual_avail_alloc_units = fs2.generic.out.blocks_free;
+ fs->full_size_information.out.total_alloc_units = fs2->generic.out.blocks_total;
+ fs->full_size_information.out.call_avail_alloc_units = fs2->generic.out.blocks_free;
+ fs->full_size_information.out.actual_avail_alloc_units = fs2->generic.out.blocks_free;
fs->full_size_information.out.sectors_per_unit = 1;
- fs->full_size_information.out.bytes_per_sector = fs2.generic.out.block_size;
+ fs->full_size_information.out.bytes_per_sector = fs2->generic.out.block_size;
return NT_STATUS_OK;
case RAW_QFS_OBJECTID_INFORMATION:
- fs->objectid_information.out.guid = fs2.generic.out.guid;
+ fs->objectid_information.out.guid = fs2->generic.out.guid;
ZERO_STRUCT(fs->objectid_information.out.unknown);
return NT_STATUS_OK;
}
/*
NTVFS fileinfo generic to any mapper
*/
-NTSTATUS ntvfs_map_fileinfo(struct smbsrv_request *req, union smb_fileinfo *info, union smb_fileinfo *info2)
+NTSTATUS ntvfs_map_fileinfo(struct smbsrv_request *req, union smb_fileinfo *info,
+ union smb_fileinfo *info2)
{
int i;
/* and convert it to the required level using results in info2 */
struct ntvfs_module_context *ntvfs)
{
NTSTATUS status;
- union smb_fileinfo info2;
+ union smb_fileinfo *info2;
+
+ info2 = talloc_p(req, union smb_fileinfo);
+ if (info2 == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
if (info->generic.level == RAW_FILEINFO_GENERIC) {
return NT_STATUS_INVALID_LEVEL;
}
/* ask the backend for the generic info */
- info2.generic.level = RAW_FILEINFO_GENERIC;
- info2.generic.in.fnum = info->generic.in.fnum;
+ info2->generic.level = RAW_FILEINFO_GENERIC;
+ info2->generic.in.fnum = info->generic.in.fnum;
- status = ntvfs->ops->qfileinfo(ntvfs, req, &info2);
+ status = ntvfs->ops->qfileinfo(ntvfs, req, info2);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
- return ntvfs_map_fileinfo(req, info, &info2);
+ return ntvfs_map_fileinfo(req, info, info2);
}
/*
struct ntvfs_module_context *ntvfs)
{
NTSTATUS status;
- union smb_fileinfo info2;
+ union smb_fileinfo *info2;
+
+ info2 = talloc_p(req, union smb_fileinfo);
+ if (info2 == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
if (info->generic.level == RAW_FILEINFO_GENERIC) {
return NT_STATUS_INVALID_LEVEL;
}
/* ask the backend for the generic info */
- info2.generic.level = RAW_FILEINFO_GENERIC;
- info2.generic.in.fname = info->generic.in.fname;
+ info2->generic.level = RAW_FILEINFO_GENERIC;
+ info2->generic.in.fname = info->generic.in.fname;
+
+ /* must be synchronous, or we won't be called to do the
+ translation */
+ req->control_flags &= ~REQ_CONTROL_MAY_ASYNC;
- status = ntvfs->ops->qpathinfo(ntvfs, req, &info2);
+ status = ntvfs->ops->qpathinfo(ntvfs, req, info2);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
- return ntvfs_map_fileinfo(req, info, &info2);
+ return ntvfs_map_fileinfo(req, info, info2);
+}
+
+
+/*
+ NTVFS lock generic to any mapper
+*/
+NTSTATUS ntvfs_map_lock(struct smbsrv_request *req, union smb_lock *lck,
+ struct ntvfs_module_context *ntvfs)
+{
+ union smb_lock *lck2;
+ struct smb_lock_entry *locks;
+
+ lck2 = talloc_p(req, union smb_lock);
+ if (lck2 == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ locks = talloc_array_p(lck2, struct smb_lock_entry, 1);
+ if (locks == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ switch (lck->generic.level) {
+ case RAW_LOCK_LOCKX:
+ return NT_STATUS_INVALID_LEVEL;
+
+ case RAW_LOCK_LOCK:
+ lck2->generic.in.ulock_cnt = 0;
+ lck2->generic.in.lock_cnt = 1;
+ break;
+
+ case RAW_LOCK_UNLOCK:
+ lck2->generic.in.ulock_cnt = 1;
+ lck2->generic.in.lock_cnt = 0;
+ break;
+ }
+
+ lck2->generic.level = RAW_LOCK_GENERIC;
+ lck2->generic.in.fnum = lck->lock.in.fnum;
+ lck2->generic.in.mode = 0;
+ lck2->generic.in.timeout = 0;
+ lck2->generic.in.locks = locks;
+ locks->pid = req->smbpid;
+ locks->offset = lck->lock.in.offset;
+ locks->count = lck->lock.in.count;
+
+ return ntvfs->ops->lock(ntvfs, req, lck2);
+}
+
+
+/*
+ NTVFS write generic to any mapper
+*/
+NTSTATUS ntvfs_map_write(struct smbsrv_request *req, union smb_write *wr,
+ struct ntvfs_module_context *ntvfs)
+{
+ union smb_write *wr2;
+ union smb_lock *lck;
+ union smb_close *cl;
+ NTSTATUS status;
+
+ wr2 = talloc_p(req, union smb_write);
+ if (wr2 == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ wr2->generic.level = RAW_WRITE_GENERIC;
+
+ /* we can't map asynchronously */
+ req->control_flags &= ~REQ_CONTROL_MAY_ASYNC;
+
+ switch (wr->generic.level) {
+ case RAW_WRITE_WRITEX:
+ status = NT_STATUS_INVALID_LEVEL;
+ break;
+
+ case RAW_WRITE_WRITE:
+ wr2->generic.in.fnum = wr->write.in.fnum;
+ wr2->generic.in.offset = wr->write.in.offset;
+ wr2->generic.in.wmode = 0;
+ wr2->generic.in.remaining = wr->write.in.remaining;
+ wr2->generic.in.count = wr->write.in.count;
+ wr2->generic.in.data = wr->write.in.data;
+ status = ntvfs->ops->write(ntvfs, req, wr2);
+ wr->write.out.nwritten = wr2->generic.out.nwritten;
+ break;
+
+ case RAW_WRITE_WRITEUNLOCK:
+ lck = talloc_p(wr2, union smb_lock);
+ if (lck == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ wr2->generic.in.fnum = wr->writeunlock.in.fnum;
+ wr2->generic.in.offset = wr->writeunlock.in.offset;
+ wr2->generic.in.wmode = 0;
+ wr2->generic.in.remaining = wr->writeunlock.in.remaining;
+ wr2->generic.in.count = wr->writeunlock.in.count;
+ wr2->generic.in.data = wr->writeunlock.in.data;
+
+ lck->unlock.level = RAW_LOCK_UNLOCK;
+ lck->unlock.in.fnum = wr->writeunlock.in.fnum;
+ lck->unlock.in.count = wr->writeunlock.in.count;
+ lck->unlock.in.offset = wr->writeunlock.in.offset;
+
+ status = ntvfs->ops->write(ntvfs, req, wr2);
+
+ wr->writeunlock.out.nwritten = wr2->generic.out.nwritten;
+
+ if (NT_STATUS_IS_OK(status)) {
+ status = ntvfs->ops->lock(ntvfs, req, lck);
+ }
+ break;
+
+ case RAW_WRITE_WRITECLOSE:
+ cl = talloc_p(wr2, union smb_close);
+ if (cl == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ wr2->generic.in.fnum = wr->writeclose.in.fnum;
+ wr2->generic.in.offset = wr->writeclose.in.offset;
+ wr2->generic.in.wmode = 0;
+ wr2->generic.in.remaining = 0;
+ wr2->generic.in.count = wr->writeclose.in.count;
+ wr2->generic.in.data = wr->writeclose.in.data;
+
+ cl->close.level = RAW_CLOSE_CLOSE;
+ cl->close.in.fnum = wr->writeclose.in.fnum;
+ cl->close.in.write_time = wr->writeclose.in.mtime;
+
+ status = ntvfs->ops->write(ntvfs, req, wr2);
+ wr->writeclose.out.nwritten = wr2->generic.out.nwritten;
+
+ if (NT_STATUS_IS_OK(status)) {
+ status = ntvfs->ops->close(ntvfs, req, cl);
+ }
+ break;
+
+ case RAW_WRITE_SPLWRITE:
+ wr2->generic.in.fnum = wr->splwrite.in.fnum;
+ wr2->generic.in.offset = 0;
+ wr2->generic.in.wmode = 0;
+ wr2->generic.in.remaining = 0;
+ wr2->generic.in.count = wr->splwrite.in.count;
+ wr2->generic.in.data = wr->splwrite.in.data;
+ status = ntvfs->ops->write(ntvfs, req, wr2);
+ break;
+ }
+
+
+ return status;
+}
+
+
+/*
+ NTVFS read generic to any mapper
+*/
+NTSTATUS ntvfs_map_read(struct smbsrv_request *req, union smb_read *rd,
+ struct ntvfs_module_context *ntvfs)
+{
+ union smb_read *rd2;
+ union smb_lock *lck;
+ NTSTATUS status;
+
+ rd2 = talloc_p(req, union smb_read);
+ if (rd2 == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ rd2->generic.level = RAW_READ_GENERIC;
+
+ /* we can't map asynchronously */
+ req->control_flags &= ~REQ_CONTROL_MAY_ASYNC;
+
+ switch (rd->generic.level) {
+ case RAW_READ_READX:
+ status = NT_STATUS_INVALID_LEVEL;
+ break;
+
+ case RAW_READ_READ:
+ rd2->generic.in.fnum = rd->read.in.fnum;
+ rd2->generic.in.offset = rd->read.in.offset;
+ rd2->generic.in.mincnt = rd->read.in.count;
+ rd2->generic.in.maxcnt = rd->read.in.count;
+ rd2->generic.in.remaining = rd->read.in.remaining;
+ rd2->generic.out.data = rd->read.out.data;
+ status = ntvfs->ops->read(ntvfs, req, rd2);
+ rd->read.out.nread = rd2->generic.out.nread;
+ break;
+
+ case RAW_READ_READBRAW:
+ rd2->generic.in.fnum = rd->readbraw.in.fnum;
+ rd2->generic.in.offset = rd->readbraw.in.offset;
+ rd2->generic.in.mincnt = rd->readbraw.in.mincnt;
+ rd2->generic.in.maxcnt = rd->readbraw.in.maxcnt;
+ rd2->generic.in.remaining = 0;
+ rd2->generic.out.data = rd->readbraw.out.data;
+ status = ntvfs->ops->read(ntvfs, req, rd2);
+ rd->readbraw.out.nread = rd2->generic.out.nread;
+ break;
+
+ case RAW_READ_LOCKREAD:
+ lck = talloc_p(rd2, union smb_lock);
+ if (lck == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ rd2->generic.in.fnum = rd->lockread.in.fnum;
+ rd2->generic.in.offset = rd->lockread.in.offset;
+ rd2->generic.in.mincnt = rd->lockread.in.count;
+ rd2->generic.in.maxcnt = rd->lockread.in.count;
+ rd2->generic.in.remaining = rd->lockread.in.remaining;
+ rd2->generic.out.data = rd->lockread.out.data;
+
+ lck->lock.level = RAW_LOCK_LOCK;
+ lck->lock.in.fnum = rd->lockread.in.fnum;
+ lck->lock.in.count = rd->lockread.in.count;
+ lck->lock.in.offset = rd->lockread.in.offset;
+
+ status = ntvfs->ops->lock(ntvfs, req, lck);
+
+ if (NT_STATUS_IS_OK(status)) {
+ status = ntvfs->ops->read(ntvfs, req, rd2);
+ rd->lockread.out.nread = rd2->generic.out.nread;
+ }
+ break;
+ }
+
+
+ return status;
+}
+
+
+/*
+ NTVFS close generic to any mapper
+*/
+NTSTATUS ntvfs_map_close(struct smbsrv_request *req, union smb_close *cl,
+ struct ntvfs_module_context *ntvfs)
+{
+ union smb_close *cl2;
+
+ cl2 = talloc_p(req, union smb_close);
+ if (cl2 == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ switch (cl2->generic.level) {
+ case RAW_CLOSE_CLOSE:
+ return NT_STATUS_INVALID_LEVEL;
+
+ case RAW_CLOSE_SPLCLOSE:
+ cl2->close.level = RAW_CLOSE_CLOSE;
+ cl2->close.in.fnum = cl->splclose.in.fnum;
+ break;
+ }
+
+ return ntvfs->ops->close(ntvfs, req, cl2);
}
struct pvfs_pending_lock *pending = NULL;
NTSTATUS status;
- f = pvfs_find_fd(pvfs, req, lck->generic.in.fnum);
+ if (lck->generic.level != RAW_LOCK_GENERIC) {
+ return ntvfs_map_lock(req, lck, ntvfs);
+ }
+
+ f = pvfs_find_fd(pvfs, req, lck->lockx.in.fnum);
if (!f) {
return NT_STATUS_INVALID_HANDLE;
}
- switch (lck->generic.level) {
- case RAW_LOCK_LOCK:
- status = brl_lock(pvfs->brl_context,
- &f->locking_key,
- req->smbpid,
- f->fnum,
- lck->lock.in.offset,
- lck->lock.in.count,
- WRITE_LOCK, NULL);
- if (NT_STATUS_IS_OK(status)) {
- f->lock_count++;
- }
- return status;
-
- case RAW_LOCK_UNLOCK:
- status = brl_unlock(pvfs->brl_context,
- &f->locking_key,
- req->smbpid,
- f->fnum,
- lck->lock.in.offset,
- lck->lock.in.count);
- if (NT_STATUS_IS_OK(status)) {
- f->lock_count--;
- }
- return status;
-
- case RAW_LOCK_GENERIC:
- return NT_STATUS_INVALID_LEVEL;
-
- case RAW_LOCK_LOCKX:
- /* fall through to the most complex case */
- break;
+ if (f->name->dos.attrib & FILE_ATTRIBUTE_DIRECTORY) {
+ return NT_STATUS_FILE_IS_A_DIRECTORY;
}
- /* now the lockingX case, most common and also most complex */
if (lck->lockx.in.timeout != 0 &&
- req->async.send_fn) {
+ (req->control_flags & REQ_CONTROL_MAY_ASYNC)) {
pending = talloc_p(req, struct pvfs_pending_lock);
if (pending == NULL) {
return NT_STATUS_NO_MEMORY;
return f;
}
+
+/*
+ cleanup a open directory handle
+*/
+static int pvfs_dir_fd_destructor(void *p)
+{
+ struct pvfs_file *f = p;
+ DLIST_REMOVE(f->pvfs->open_files, f);
+ idr_remove(f->pvfs->idtree_fnum, f->fnum);
+ return 0;
+}
+
+
+/*
+ open a directory
+*/
+static NTSTATUS pvfs_open_directory(struct pvfs_state *pvfs,
+ struct smbsrv_request *req,
+ struct pvfs_filename *name,
+ union smb_open *io)
+{
+ struct pvfs_file *f;
+ int fnum;
+ NTSTATUS status;
+
+ /* if the client says it must be a directory, and it isn't,
+ then fail */
+ if (name->exists && !(name->dos.attrib & FILE_ATTRIBUTE_DIRECTORY)) {
+ return NT_STATUS_NOT_A_DIRECTORY;
+ }
+
+ f = talloc_p(req, struct pvfs_file);
+ if (f == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ fnum = idr_get_new(pvfs->idtree_fnum, f, UINT16_MAX);
+ if (fnum == -1) {
+ talloc_free(f);
+ return NT_STATUS_TOO_MANY_OPENED_FILES;
+ }
+
+ f->fnum = fnum;
+ f->fd = -1;
+ f->name = talloc_steal(f, name);
+ f->session = req->session;
+ f->smbpid = req->smbpid;
+ f->pvfs = pvfs;
+ f->pending_list = NULL;
+ f->lock_count = 0;
+ f->locking_key = data_blob(NULL, 0);
+
+ /* setup a destructor to avoid leaks on abnormal termination */
+ talloc_set_destructor(f, pvfs_dir_fd_destructor);
+
+ switch (io->generic.in.open_disposition) {
+ case NTCREATEX_DISP_OPEN_IF:
+ break;
+
+ case NTCREATEX_DISP_OPEN:
+ if (!name->exists) {
+ return NT_STATUS_OBJECT_NAME_NOT_FOUND;
+ }
+ break;
+
+ case NTCREATEX_DISP_CREATE:
+ if (name->exists) {
+ return NT_STATUS_OBJECT_NAME_COLLISION;
+ }
+ break;
+
+ case NTCREATEX_DISP_OVERWRITE_IF:
+ case NTCREATEX_DISP_OVERWRITE:
+ case NTCREATEX_DISP_SUPERSEDE:
+ default:
+ return NT_STATUS_INVALID_PARAMETER;
+ }
+
+ if (!name->exists) {
+ if (mkdir(name->full_name, 0755) == -1) {
+ return pvfs_map_errno(pvfs,errno);
+ }
+ status = pvfs_resolve_name(pvfs, req, io->ntcreatex.in.fname,
+ PVFS_RESOLVE_NO_WILDCARD, &name);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+ }
+
+ if (!name->exists) {
+ return NT_STATUS_OBJECT_NAME_NOT_FOUND;
+ }
+
+ DLIST_ADD(pvfs->open_files, f);
+
+ /* the open succeeded, keep this handle permanently */
+ talloc_steal(pvfs, f);
+
+ ZERO_STRUCT(io->generic.out);
+
+ io->generic.out.create_time = name->dos.create_time;
+ io->generic.out.access_time = name->dos.access_time;
+ io->generic.out.write_time = name->dos.write_time;
+ io->generic.out.change_time = name->dos.change_time;
+ io->generic.out.fnum = f->fnum;
+ io->generic.out.alloc_size = 0;
+ io->generic.out.size = 0;
+ io->generic.out.attrib = name->dos.attrib;
+ io->generic.out.is_directory = 1;
+
+ return NT_STATUS_OK;
+}
+
+
/*
by using a destructor we make sure that abnormal cleanup will not
leak file descriptors (assuming at least the top level pointer is freed, which
{
struct pvfs_file *f = p;
+ DLIST_REMOVE(f->pvfs->open_files, f);
+
pvfs_lock_close(f->pvfs, f);
if (f->fd != -1) {
return status;
}
+ /* directory opens are handled separately */
+ if ((name->exists && (name->dos.attrib & FILE_ATTRIBUTE_DIRECTORY)) ||
+ (io->generic.in.create_options & NTCREATEX_OPTIONS_DIRECTORY)) {
+ return pvfs_open_directory(pvfs, req, name, io);
+ }
+
+
switch (io->generic.in.open_disposition) {
case NTCREATEX_DISP_SUPERSEDE:
+ if (!name->exists) {
+ return NT_STATUS_OBJECT_NAME_NOT_FOUND;
+ }
+ flags = O_TRUNC;
+ break;
+
case NTCREATEX_DISP_OVERWRITE_IF:
flags = O_CREAT | O_TRUNC;
break;
+
case NTCREATEX_DISP_OPEN:
+ if (!name->exists) {
+ return NT_STATUS_OBJECT_NAME_NOT_FOUND;
+ }
flags = 0;
break;
+
case NTCREATEX_DISP_OVERWRITE:
+ if (!name->exists) {
+ return NT_STATUS_OBJECT_NAME_NOT_FOUND;
+ }
flags = O_TRUNC;
break;
+
case NTCREATEX_DISP_CREATE:
+ if (name->exists) {
+ return NT_STATUS_OBJECT_NAME_COLLISION;
+ }
flags = O_CREAT | O_EXCL;
break;
+
case NTCREATEX_DISP_OPEN_IF:
flags = O_CREAT;
break;
- default:
- flags = 0;
- break;
- }
-
- flags |= O_RDWR;
-
-/* we need to do this differently to support systems without O_DIRECTORY */
-#ifndef O_DIRECTORY
-#define O_DIRECTORY 0
-#endif
- if (name->exists &&
- (io->generic.in.create_options & NTCREATEX_OPTIONS_DIRECTORY) &&
- !(name->dos.attrib & FILE_ATTRIBUTE_DIRECTORY)) {
- return NT_STATUS_NOT_A_DIRECTORY;
+ default:
+ return NT_STATUS_INVALID_PARAMETER;
}
- if ((name->exists && name->dos.attrib & FILE_ATTRIBUTE_DIRECTORY) ||
- (io->generic.in.create_options & NTCREATEX_OPTIONS_DIRECTORY)) {
- flags = O_RDONLY | O_DIRECTORY;
- if (pvfs->flags & PVFS_FLAG_READONLY) {
- goto do_open;
- }
- switch (io->generic.in.open_disposition) {
- case NTCREATEX_DISP_CREATE:
- if (mkdir(name->full_name, 0755) == -1) {
- return pvfs_map_errno(pvfs,errno);
- }
- break;
- case NTCREATEX_DISP_OPEN_IF:
- if (mkdir(name->full_name, 0755) == -1 && errno != EEXIST) {
- return pvfs_map_errno(pvfs,errno);
- }
- break;
- }
- }
+ flags |= O_RDWR;
- f = talloc_p(pvfs, struct pvfs_file);
+ f = talloc_p(req, struct pvfs_file);
if (f == NULL) {
return NT_STATUS_NO_MEMORY;
}
fnum = idr_get_new(pvfs->idtree_fnum, f, UINT16_MAX);
if (fnum == -1) {
- talloc_free(f);
return NT_STATUS_TOO_MANY_OPENED_FILES;
}
-do_open:
fd = open(name->full_name, flags, 0644);
if (fd == -1) {
- if (errno == 0)
+ if (errno == 0) {
errno = ENOENT;
- return pvfs_map_errno(pvfs,errno);
+ }
+ idr_remove(pvfs->idtree_fnum, fnum);
+ return pvfs_map_errno(pvfs, errno);
}
/* re-resolve the open fd */
status = pvfs_resolve_name_fd(pvfs, fd, name);
if (!NT_STATUS_IS_OK(status)) {
+ idr_remove(pvfs->idtree_fnum, fnum);
return status;
}
io->generic.out.attrib = name->dos.attrib;
io->generic.out.is_directory = (name->dos.attrib & FILE_ATTRIBUTE_DIRECTORY)?1:0;
+ /* success - keep the file handle */
+ talloc_steal(pvfs, f);
+
return NT_STATUS_OK;
}
NTSTATUS status;
if (io->generic.level != RAW_CLOSE_CLOSE) {
- /* we need a mapping function */
- return NT_STATUS_INVALID_LEVEL;
+ return ntvfs_map_close(req, io, ntvfs);
}
f = pvfs_find_fd(pvfs, req, io->close.in.fnum);
return NT_STATUS_INVALID_HANDLE;
}
- if (close(f->fd) != 0) {
+ if (f->fd != -1 &&
+ close(f->fd) != 0) {
status = pvfs_map_errno(pvfs, errno);
} else {
status = NT_STATUS_OK;
}
f->fd = -1;
- DLIST_REMOVE(pvfs->open_files, f);
-
/* the destructor takes care of the rest */
talloc_free(f);
NTSTATUS status;
if (rd->generic.level != RAW_READ_READX) {
- return NT_STATUS_NOT_SUPPORTED;
+ return ntvfs_map_read(req, rd, ntvfs);
}
-
f = pvfs_find_fd(pvfs, req, rd->readx.in.fnum);
if (!f) {
return NT_STATUS_INVALID_HANDLE;
}
+ if (f->name->dos.attrib & FILE_ATTRIBUTE_DIRECTORY) {
+ return NT_STATUS_FILE_IS_A_DIRECTORY;
+ }
+
status = pvfs_check_lock(pvfs, f, req->smbpid,
rd->readx.in.offset,
rd->readx.in.maxcnt,
struct pvfs_file *f;
NTSTATUS status;
- switch (wr->generic.level) {
- case RAW_WRITE_WRITEX:
- f = pvfs_find_fd(pvfs, req, wr->writex.in.fnum);
- if (!f) {
- return NT_STATUS_INVALID_HANDLE;
- }
- status = pvfs_check_lock(pvfs, f, req->smbpid,
- wr->writex.in.offset,
- wr->writex.in.count,
- WRITE_LOCK);
- if (!NT_STATUS_IS_OK(status)) {
- return status;
- }
-
- ret = pwrite(f->fd,
- wr->writex.in.data,
- wr->writex.in.count,
- wr->writex.in.offset);
- if (ret == -1) {
- return map_nt_error_from_unix(errno);
- }
-
- wr->writex.out.nwritten = ret;
- wr->writex.out.remaining = 0; /* should fill this in? */
-
- return NT_STATUS_OK;
-
- case RAW_WRITE_WRITE:
- f = pvfs_find_fd(pvfs, req, wr->write.in.fnum);
- if (!f) {
- return NT_STATUS_INVALID_HANDLE;
- }
- if (wr->write.in.count == 0) {
- /* a truncate! */
- ret = ftruncate(f->fd, wr->write.in.offset);
- } else {
- status = pvfs_check_lock(pvfs, f, req->smbpid,
- wr->write.in.offset,
- wr->write.in.count,
- WRITE_LOCK);
- if (!NT_STATUS_IS_OK(status)) {
- return status;
- }
+ if (wr->generic.level != RAW_WRITE_WRITEX) {
+ return ntvfs_map_write(req, wr, ntvfs);
+ }
- ret = pwrite(f->fd,
- wr->write.in.data,
- wr->write.in.count,
- wr->write.in.offset);
- }
- if (ret == -1) {
- return pvfs_map_errno(pvfs, errno);
- }
-
- wr->write.out.nwritten = ret;
+ f = pvfs_find_fd(pvfs, req, wr->writex.in.fnum);
+ if (!f) {
+ return NT_STATUS_INVALID_HANDLE;
+ }
- return NT_STATUS_OK;
+ if (f->name->dos.attrib & FILE_ATTRIBUTE_DIRECTORY) {
+ return NT_STATUS_FILE_IS_A_DIRECTORY;
}
- return NT_STATUS_NOT_SUPPORTED;
+ status = pvfs_check_lock(pvfs, f, req->smbpid,
+ wr->writex.in.offset,
+ wr->writex.in.count,
+ WRITE_LOCK);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+
+ ret = pwrite(f->fd,
+ wr->writex.in.data,
+ wr->writex.in.count,
+ wr->writex.in.offset);
+ if (ret == -1) {
+ return map_nt_error_from_unix(errno);
+ }
+
+ wr->writex.out.nwritten = ret;
+ wr->writex.out.remaining = 0; /* should fill this in? */
+
+ return NT_STATUS_OK;
}
io->ioctl.in.fnum = req_fnum(req, req->in.vwv, VWV(0));
io->ioctl.in.request = IVAL(req->in.vwv, VWV(1));
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_ioctl_send;
req->async.private = io;
req_pull_ascii4(req, &io->in.path, req->in.data, STR_TERMINATE);
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
req->async.status = ntvfs_chkpath(req, io);
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_getatr_send;
req->async.private = st;
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
fs->dskattr.level = RAW_QFS_DSKATTR;
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_dskattr_send;
req->async.private = fs;
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_open_send;
req->async.private = oi;
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_open_and_X_send;
req->async.private = oi;
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_mknew_send;
req->async.private = oi;
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_ctemp_send;
req->async.private = oi;
req_pull_ascii4(req, &unl->in.pattern, req->in.data, STR_TERMINATE);
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
io.readbraw.level = RAW_READ_READBRAW;
/* there are two variants, one with 10 and one with 8 command words */
- if (req->in.wct != 10) {
- REQ_CHECK_WCT(req, 8);
+ if (req->in.wct < 8) {
+ goto failed;
}
io.readbraw.in.fnum = req_fnum(req, req->in.vwv, VWV(0));
req->out.size = io.readbraw.out.nread + NBT_HDR_SIZE;
- req_send_reply(req);
+ req_send_reply_nosign(req);
return;
failed:
/* tell the backend where to put the data */
io->lockread.out.data = req->out.data + 3;
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_lockread_send;
req->async.private = io;
/* tell the backend where to put the data */
io->read.out.data = req->out.data + 3;
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_read_send;
req->async.private = io;
/* tell the backend where to put the data. Notice the pad byte. */
io->readx.out.data = req->out.data + 1;
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_read_and_X_send;
req->async.private = io;
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_writeunlock_send;
req->async.private = io;
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_write_send;
req->async.private = io;
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_write_and_X_send;
req->async.private = io;
io->in.mode = SVAL(req->in.vwv, VWV(1));
io->in.offset = IVALS(req->in.vwv, VWV(2));
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_lseek_send;
req->async.private = io;
io->in.fnum = req_fnum(req, req->in.vwv, VWV(0));
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
io->close.in.fnum = req_fnum(req, req->in.vwv, VWV(0));
io->close.in.write_time = srv_pull_dos_date3(req->smb_conn, req->in.vwv + VWV(1));
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_writeclose_send;
req->async.private = io;
lck->lock.in.count = IVAL(req->in.vwv, VWV(1));
lck->lock.in.offset = IVAL(req->in.vwv, VWV(3));
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
lck->unlock.in.count = IVAL(req->in.vwv, VWV(1));
lck->unlock.in.offset = IVAL(req->in.vwv, VWV(3));
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
req_pull_ascii4(req, &oi->splopen.in.ident, req->in.data, STR_TERMINATE);
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_printopen_send;
req->async.private = oi;
io->splclose.level = RAW_CLOSE_SPLCLOSE;
io->splclose.in.fnum = req_fnum(req, req->in.vwv, VWV(0));
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
lpq->retq.in.maxcount = SVAL(req->in.vwv, VWV(0));
lpq->retq.in.startidx = SVAL(req->in.vwv, VWV(1));
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_printqueue_send;
req->async.private = lpq;
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
io->generic.level = RAW_MKDIR_MKDIR;
req_pull_ascii4(req, &io->mkdir.in.path, req->in.data, STR_TERMINATE);
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
req_pull_ascii4(req, &io->in.path, req->in.data, STR_TERMINATE);
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_copy_send;
req->async.private = cp;
p += lck_size;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_lockingX_send;
req->async.private = lck;
info->setattre.in.access_time = srv_pull_dos_date2(req->smb_conn, req->in.vwv + VWV(3));
info->setattre.in.write_time = srv_pull_dos_date2(req->smb_conn, req->in.vwv + VWV(5));
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_simple_send;
/* call backend */
info->getattr.level = RAW_FILEINFO_GETATTRE;
info->getattr.in.fnum = req_fnum(req, req->in.vwv, VWV(0));
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_getattrE_send;
req->async.private = info;
return;
}
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_ntcreate_and_X_send;
req->async.private = io;
sc->fclose.in.id.client_cookie = IVAL(p, 17);
/* do a search close operation */
+ req->control_flags |= REQ_CONTROL_MAY_ASYNC;
req->async.send_fn = reply_fclose_send;
req->async.private = sc;
control_flag on the request to indicate that it wishes to
delay the reply
- If async.send_fn is NULL then the backend cannot ask for a
- delayed reply for this request
+ If REQ_CONTROL_MAY_ASYNC is not set then the backend cannot
+ ask for a delayed reply for this request
note that the async.private pointer is private to the front
end not the backend. The backend must not change it.