+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct aio_fork_pread_state *state = tevent_req_data(
+ req, struct aio_fork_pread_state);
+ ssize_t nread;
+ uint8_t *buf;
+ int err;
+ struct rw_ret *retbuf;
+
+ nread = read_packet_recv(subreq, talloc_tos(), &buf, &err);
+ TALLOC_FREE(subreq);
+ if (nread == -1) {
+ TALLOC_FREE(state->child);
+ tevent_req_error(req, err);
+ return;
+ }
+
+ retbuf = (struct rw_ret *)buf;
+ state->ret = retbuf->size;
+ state->vfs_aio_state.error = retbuf->ret_errno;
+ state->vfs_aio_state.duration = retbuf->duration;
+
+ if ((size_t)state->ret > state->n) {
+ tevent_req_error(req, EIO);
+ state->child->busy = false;
+ return;
+ }
+ memcpy(state->data, state->child->map->ptr, state->ret);
+
+ state->child->busy = false;
+
+ tevent_req_done(req);
+}
+
+static ssize_t aio_fork_pread_recv(struct tevent_req *req,
+ struct vfs_aio_state *vfs_aio_state)
+{
+ struct aio_fork_pread_state *state = tevent_req_data(
+ req, struct aio_fork_pread_state);
+
+ if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
+ return -1;
+ }
+ *vfs_aio_state = state->vfs_aio_state;
+ return state->ret;
+}
+
+struct aio_fork_pwrite_state {