return glfs_pread(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle, fsp), data, n, offset, 0);
}
+struct glusterfs_aio_state;
+
+struct glusterfs_aio_wrapper {
+ struct glusterfs_aio_state *state;
+};
+
struct glusterfs_aio_state {
ssize_t ret;
int err;
+ struct tevent_req *req;
+ bool cancelled;
};
+static int aio_wrapper_destructor(void *ptr)
+{
+ struct glusterfs_aio_wrapper *wrap = (struct glusterfs_aio_wrapper *)ptr;
+
+ wrap->state->cancelled = true;
+
+ return 0;
+}
+
/*
* This function is the callback that will be called on glusterfs
* threads once the async IO submitted is complete. To notify
*/
static void aio_glusterfs_done(glfs_fd_t *fd, ssize_t ret, void *data)
{
- struct tevent_req *req = NULL;
struct glusterfs_aio_state *state = NULL;
int sts = 0;
- req = talloc_get_type_abort(data, struct tevent_req);
- state = tevent_req_data(req, struct glusterfs_aio_state);
+ state = (struct glusterfs_aio_state *)data;
if (ret < 0) {
state->ret = -1;
}
/*
- * Write the pointer to each req that needs to be completed
- * by calling tevent_req_done(). tevent_req_done() cannot
- * be called here, as it is not designed to be executed
- * in the multithread environment, tevent_req_done() must be
+ * Write the state pointer to glusterfs_aio_state to the
+ * pipe, so we can call tevent_req_done() from the main thread,
+ * because tevent_req_done() is not designed to be executed in
+ * the multithread environment, so tevent_req_done() must be
* executed from the smbd main thread.
*
* write(2) on pipes with sizes under _POSIX_PIPE_BUF
* that we can trust it here.
*/
- sts = sys_write(write_fd, &req, sizeof(struct tevent_req *));
+ sts = sys_write(write_fd, &state, sizeof(struct glusterfs_aio_state *));
if (sts < 0) {
DEBUG(0,("\nWrite to pipe failed (%s)", strerror(errno)));
}
uint16_t flags, void *data)
{
struct tevent_req *req = NULL;
+ struct glusterfs_aio_state *state = NULL;
int sts = 0;
/*
* can trust it here.
*/
- sts = sys_read(read_fd, &req, sizeof(struct tevent_req *));
+ sts = sys_read(read_fd, &state, sizeof(struct glusterfs_aio_state *));
+
if (sts < 0) {
DEBUG(0,("\nRead from pipe failed (%s)", strerror(errno)));
}
+ if (state->cancelled) {
+ return;
+ }
+
+ req = state->req;
+
+ /* if we've cancelled the op, there is no req, so just clean up. */
+ if (state->cancelled == true) {
+ TALLOC_FREE(state);
+ return;
+ }
+
if (req) {
tevent_req_done(req);
}
return false;
}
-static struct tevent_req *vfs_gluster_pread_send(struct vfs_handle_struct
- *handle, TALLOC_CTX *mem_ctx,
- struct tevent_context *ev,
- files_struct *fsp, void *data,
- size_t n, off_t offset)
+static struct glusterfs_aio_state *aio_state_create(TALLOC_CTX *mem_ctx)
{
struct tevent_req *req = NULL;
struct glusterfs_aio_state *state = NULL;
- int ret = 0;
+ struct glusterfs_aio_wrapper *wrapper = NULL;
+
+ req = tevent_req_create(mem_ctx, &wrapper, struct glusterfs_aio_wrapper);
- req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
if (req == NULL) {
return NULL;
}
+ state = talloc(NULL, struct glusterfs_aio_state);
+
+ if (state == NULL) {
+ TALLOC_FREE(req);
+ return NULL;
+ }
+
+ state->cancelled = false;
+ state->ret = 0;
+ state->err = 0;
+ state->req = req;
+
+ wrapper->state = state;
+
+ return state;
+}
+
+static struct tevent_req *vfs_gluster_pread_send(struct vfs_handle_struct
+ *handle, TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ files_struct *fsp,
+ void *data, size_t n,
+ off_t offset)
+{
+ struct glusterfs_aio_state *state = NULL;
+ struct tevent_req *req = NULL;
+ int ret = 0;
+
+ state = aio_state_create(mem_ctx);
+
+ if (state == NULL) {
+ return NULL;
+ }
+
+ req = state->req;
+
if (!init_gluster_aio(handle)) {
tevent_req_error(req, EIO);
return tevent_req_post(req, ev);
}
ret = glfs_pread_async(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle,
fsp), data, n, offset, 0, aio_glusterfs_done,
- req);
+ state);
+
if (ret < 0) {
tevent_req_error(req, -ret);
return tevent_req_post(req, ev);
const void *data, size_t n,
off_t offset)
{
- struct tevent_req *req = NULL;
struct glusterfs_aio_state *state = NULL;
+ struct tevent_req *req = NULL;
int ret = 0;
- req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
- if (req == NULL) {
+ state = aio_state_create(mem_ctx);
+
+ if (state == NULL) {
return NULL;
}
+
+ req = state->req;
+
if (!init_gluster_aio(handle)) {
tevent_req_error(req, EIO);
return tevent_req_post(req, ev);
}
ret = glfs_pwrite_async(*(glfs_fd_t **)VFS_FETCH_FSP_EXTENSION(handle,
fsp), data, n, offset, 0, aio_glusterfs_done,
- req);
+ state);
if (ret < 0) {
tevent_req_error(req, -ret);
return tevent_req_post(req, ev);
static ssize_t vfs_gluster_recv(struct tevent_req *req, int *err)
{
struct glusterfs_aio_state *state = NULL;
+ struct glusterfs_aio_wrapper *wrapper = NULL;
+ int ret = 0;
+
+ wrapper = tevent_req_data(req, struct glusterfs_aio_wrapper);
+
+ if (wrapper == NULL) {
+ return -1;
+ }
+
+ state = wrapper->state;
- state = tevent_req_data(req, struct glusterfs_aio_state);
if (state == NULL) {
return -1;
}
if (state->ret == -1) {
*err = state->err;
}
- return state->ret;
+
+ ret = state->ret;
+
+ /* Clean up the state, it is in a NULL context. */
+
+ TALLOC_FREE(state);
+
+ return ret;
}
static off_t vfs_gluster_lseek(struct vfs_handle_struct *handle,
struct glusterfs_aio_state *state = NULL;
int ret = 0;
- req = tevent_req_create(mem_ctx, &state, struct glusterfs_aio_state);
- if (req == NULL) {
+ state = aio_state_create(mem_ctx);
+
+ if (state == NULL) {
return NULL;
}
+
+ req = state->req;
+
if (!init_gluster_aio(handle)) {
tevent_req_error(req, EIO);
return tevent_req_post(req, ev);