+struct glusterfs_aio_state {
+ ssize_t ret;
+ int err;
+};
+
+/*
+ * This function is the callback that will be called on glusterfs
+ * threads once the async IO submitted is complete. To notify
+ * Samba of the completion we use eventfd mechanism.
+ */
+static void aio_glusterfs_done(glfs_fd_t *fd, ssize_t ret, void *data)
+{
+#if HAVE_EVENTFD
+ struct tevent_req *req = NULL;
+ struct glusterfs_aio_state *state = NULL;
+ int i, sts = 0;
+ uint64_t u = 1;
+
+ req = talloc_get_type_abort(data, struct tevent_req);
+ state = tevent_req_data(req, struct glusterfs_aio_state);
+
+ if (ret < 0) {
+ state->ret = -1;
+ state->err = errno;
+ } else {
+ state->ret = ret;
+ state->err = 0;
+ }
+
+ /*
+ * Store the reqs that needs to be completed by calling
+ * tevent_req_done(). tevent_req_done() cannot be called
+ * here, as it is not designed to be executed in the
+ * multithread environment, tevent_req_done() should be
+ * executed from the smbd main thread.
+ */
+ pthread_mutex_lock (&lock_req_list);
+ {
+ for (i = 0 ; i < aio_pending_size ; i++) {
+ if(!req_producer_list[i]) {
+ req_producer_list[i] = req;
+ req_counter = req_counter + 1;
+ break;
+ }
+ }
+ }
+ pthread_mutex_unlock (&lock_req_list);
+
+ /*
+ * For a bunch of fops notify only once
+ */
+ if (req_counter == 1) {
+ sts = write (event_fd, &u, sizeof(uint64_t));
+ if (sts < 0 && errno == EAGAIN)
+ DEBUG(0,("\nWRITE: reached max value"));
+ }
+ return;
+#endif
+}
+
+#ifdef HAVE_EVENTFD
+static void aio_tevent_fd_done(struct tevent_context *event_ctx,
+ struct tevent_fd *fde,
+ uint16 flags, void *data)
+{
+ struct tevent_req *req = NULL;
+ struct tevent_req **temp = NULL;
+ int i = 0, sts = 0;
+ uint64_t u = 0;
+
+ sts = read (event_fd, &u, sizeof(uint64_t));
+ if (sts < 0 && errno == EAGAIN)
+ DEBUG(0,("\nREAD: eventfd read failed (%s)",strerror(errno)));
+
+ pthread_mutex_lock (&lock_req_list);
+ {
+ temp = req_producer_list;
+ req_producer_list = req_consumer_list;
+ req_consumer_list = temp;
+ req_counter = 0;
+ }
+ pthread_mutex_unlock (&lock_req_list);
+
+ for (i = 0 ; i < aio_pending_size ; i++) {
+ req = req_consumer_list[i];
+ if (req) {
+ tevent_req_done(req);
+ req_consumer_list[i] = 0;
+ }
+ }
+ return;
+}
+#endif
+
+static bool init_gluster_aio(struct vfs_handle_struct *handle)
+{
+#ifdef HAVE_EVENTFD
+ if (event_fd != -1) {
+ /*
+ * Already initialized.
+ */
+ return true;
+ }
+
+ event_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (event_fd == -1) {
+ goto fail;
+ }
+
+ aio_read_event = tevent_add_fd(handle->conn->sconn->ev_ctx,
+ NULL,
+ event_fd,
+ TEVENT_FD_READ,
+ aio_tevent_fd_done,
+ NULL);
+ if (aio_read_event == NULL) {
+ goto fail;
+ }
+
+ req_producer_list = talloc_zero_array(NULL, struct tevent_req *,
+ aio_pending_size);
+ req_consumer_list = talloc_zero_array(NULL, struct tevent_req *,
+ aio_pending_size);
+
+ return true;
+fail:
+ TALLOC_FREE(aio_read_event);
+ if (event_fd != -1) {
+ close(event_fd);
+ event_fd = -1;
+ }
+#endif
+ return false;
+}
+