2 Unix SMB/CIFS implementation.
3 Infrastructure for async requests
4 Copyright (C) Volker Lendecke 2008
5 Copyright (C) Stefan Metzmacher 2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "tevent_internal.h"
28 #include "tevent_util.h"
30 struct tevent_queue_entry {
31 struct tevent_queue_entry *prev, *next;
32 struct tevent_queue *queue;
36 struct tevent_req *req;
37 struct tevent_context *ev;
39 tevent_queue_trigger_fn_t trigger;
48 struct tevent_immediate *immediate;
51 struct tevent_queue_entry *list;
54 static void tevent_queue_immediate_trigger(struct tevent_context *ev,
55 struct tevent_immediate *im,
58 static int tevent_queue_entry_destructor(struct tevent_queue_entry *e)
60 struct tevent_queue *q = e->queue;
66 DLIST_REMOVE(q->list, e);
77 if (q->list->triggered) {
81 tevent_schedule_immediate(q->immediate,
83 tevent_queue_immediate_trigger,
89 static int tevent_queue_destructor(struct tevent_queue *q)
94 struct tevent_queue_entry *e = q->list;
101 struct tevent_queue *_tevent_queue_create(TALLOC_CTX *mem_ctx,
103 const char *location)
105 struct tevent_queue *queue;
107 queue = talloc_zero(mem_ctx, struct tevent_queue);
112 queue->name = talloc_strdup(queue, name);
117 queue->immediate = tevent_create_immediate(queue);
118 if (!queue->immediate) {
123 queue->location = location;
125 /* queue is running by default */
126 queue->running = true;
128 talloc_set_destructor(queue, tevent_queue_destructor);
132 static void tevent_queue_immediate_trigger(struct tevent_context *ev,
133 struct tevent_immediate *im,
136 struct tevent_queue *q =
137 talloc_get_type_abort(private_data,
138 struct tevent_queue);
148 q->list->triggered = true;
149 q->list->trigger(q->list->req, q->list->private_data);
152 static struct tevent_queue_entry *tevent_queue_add_internal(
153 struct tevent_queue *queue,
154 struct tevent_context *ev,
155 struct tevent_req *req,
156 tevent_queue_trigger_fn_t trigger,
160 struct tevent_queue_entry *e;
162 e = talloc_zero(req, struct tevent_queue_entry);
170 e->trigger = trigger;
171 e->private_data = private_data;
174 * if there is no trigger, it is just a blocker
176 if (trigger == NULL) {
180 if (queue->length > 0) {
182 * if there are already entries in the
183 * queue do not optimize.
185 allow_direct = false;
188 if (req->async.fn != NULL) {
190 * If the caller wants to optimize for the
191 * empty queue case, call the trigger only
192 * if there is no callback defined for the
195 allow_direct = false;
198 DLIST_ADD_END(queue->list, e);
200 talloc_set_destructor(e, tevent_queue_entry_destructor);
202 if (!queue->running) {
206 if (queue->list->triggered) {
211 * If allowed we directly call the trigger
212 * avoiding possible delays caused by
213 * an immediate event.
216 queue->list->triggered = true;
217 queue->list->trigger(queue->list->req,
218 queue->list->private_data);
222 tevent_schedule_immediate(queue->immediate,
224 tevent_queue_immediate_trigger,
230 bool tevent_queue_add(struct tevent_queue *queue,
231 struct tevent_context *ev,
232 struct tevent_req *req,
233 tevent_queue_trigger_fn_t trigger,
236 struct tevent_queue_entry *e;
238 e = tevent_queue_add_internal(queue, ev, req,
239 trigger, private_data, false);
247 struct tevent_queue_entry *tevent_queue_add_entry(
248 struct tevent_queue *queue,
249 struct tevent_context *ev,
250 struct tevent_req *req,
251 tevent_queue_trigger_fn_t trigger,
254 return tevent_queue_add_internal(queue, ev, req,
255 trigger, private_data, false);
258 struct tevent_queue_entry *tevent_queue_add_optimize_empty(
259 struct tevent_queue *queue,
260 struct tevent_context *ev,
261 struct tevent_req *req,
262 tevent_queue_trigger_fn_t trigger,
265 return tevent_queue_add_internal(queue, ev, req,
266 trigger, private_data, true);
269 void tevent_queue_entry_untrigger(struct tevent_queue_entry *entry)
271 if (entry->queue->running) {
275 if (entry->queue->list != entry) {
279 entry->triggered = false;
282 void tevent_queue_start(struct tevent_queue *queue)
284 if (queue->running) {
285 /* already started */
289 queue->running = true;
295 if (queue->list->triggered) {
299 tevent_schedule_immediate(queue->immediate,
301 tevent_queue_immediate_trigger,
305 void tevent_queue_stop(struct tevent_queue *queue)
307 queue->running = false;
310 size_t tevent_queue_length(struct tevent_queue *queue)
312 return queue->length;
315 bool tevent_queue_running(struct tevent_queue *queue)
317 return queue->running;
320 struct tevent_queue_wait_state {
324 static void tevent_queue_wait_trigger(struct tevent_req *req,
327 struct tevent_req *tevent_queue_wait_send(TALLOC_CTX *mem_ctx,
328 struct tevent_context *ev,
329 struct tevent_queue *queue)
331 struct tevent_req *req;
332 struct tevent_queue_wait_state *state;
335 req = tevent_req_create(mem_ctx, &state,
336 struct tevent_queue_wait_state);
341 ok = tevent_queue_add(queue, ev, req,
342 tevent_queue_wait_trigger,
346 return tevent_req_post(req, ev);
352 static void tevent_queue_wait_trigger(struct tevent_req *req,
355 tevent_req_done(req);
358 bool tevent_queue_wait_recv(struct tevent_req *req)
360 enum tevent_req_state state;
363 if (tevent_req_is_error(req, &state, &err)) {
364 tevent_req_received(req);
368 tevent_req_received(req);