2 Unix SMB/CIFS implementation.
4 Copyright (C) Andrew Tridgell 1992-1998
5 Copyright (C) Volker Lendecke 2005
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 struct timed_event *next, *prev;
25 struct event_context *event_ctx;
27 const char *event_name;
28 void (*handler)(struct event_context *event_ctx,
29 struct timed_event *te,
36 struct fd_event *prev, *next;
37 struct event_context *event_ctx;
39 uint16_t flags; /* see EVENT_FD_* flags */
40 void (*handler)(struct event_context *event_ctx,
41 struct fd_event *event,
47 struct event_context {
48 struct timed_event *timed_events;
49 struct fd_event *fd_events;
52 static int timed_event_destructor(struct timed_event *te)
54 DEBUG(10, ("Destroying timed event %lx \"%s\"\n", (unsigned long)te,
56 if (te->event_ctx != NULL) {
57 DLIST_REMOVE(te->event_ctx->timed_events, te);
62 /****************************************************************************
64 ****************************************************************************/
66 static void add_event_by_time(struct timed_event *te)
68 struct event_context *ctx = te->event_ctx;
69 struct timed_event *last_te, *cur_te;
71 /* Keep the list ordered by time. We must preserve this. */
73 for (cur_te = ctx->timed_events; cur_te; cur_te = cur_te->next) {
74 /* if the new event comes before the current one break */
75 if (!timeval_is_zero(&cur_te->when) &&
76 timeval_compare(&te->when, &cur_te->when) < 0) {
82 DLIST_ADD_AFTER(ctx->timed_events, te, last_te);
85 /****************************************************************************
86 Schedule a function for future calling, cancel with TALLOC_FREE().
87 It's the responsibility of the handler to call TALLOC_FREE() on the event
89 ****************************************************************************/
91 struct timed_event *_event_add_timed(struct event_context *event_ctx,
94 const char *event_name,
95 void (*handler)(struct event_context *event_ctx,
96 struct timed_event *te,
101 struct timed_event *te;
103 te = TALLOC_P(mem_ctx, struct timed_event);
105 DEBUG(0, ("talloc failed\n"));
109 te->event_ctx = event_ctx;
111 te->event_name = event_name;
112 te->handler = handler;
113 te->private_data = private_data;
115 add_event_by_time(te);
117 talloc_set_destructor(te, timed_event_destructor);
119 DEBUG(10, ("Added timed event \"%s\": %lx\n", event_name,
124 static int fd_event_destructor(struct fd_event *fde)
126 if (fde->event_ctx != NULL) {
127 DLIST_REMOVE(fde->event_ctx->fd_events, fde);
132 struct fd_event *event_add_fd(struct event_context *event_ctx,
134 int fd, uint16_t flags,
135 void (*handler)(struct event_context *event_ctx,
136 struct fd_event *event,
141 struct fd_event *fde;
143 if (!(fde = TALLOC_P(mem_ctx, struct fd_event))) {
147 fde->event_ctx = event_ctx;
150 fde->handler = handler;
151 fde->private_data = private_data;
153 DLIST_ADD(event_ctx->fd_events, fde);
155 talloc_set_destructor(fde, fd_event_destructor);
159 void event_fd_set_writeable(struct fd_event *fde)
161 fde->flags |= EVENT_FD_WRITE;
164 void event_fd_set_not_writeable(struct fd_event *fde)
166 fde->flags &= ~EVENT_FD_WRITE;
169 void event_fd_set_readable(struct fd_event *fde)
171 fde->flags |= EVENT_FD_READ;
174 void event_fd_set_not_readable(struct fd_event *fde)
176 fde->flags &= ~EVENT_FD_READ;
180 * Return if there's something in the queue
183 bool event_add_to_select_args(struct event_context *event_ctx,
184 const struct timeval *now,
185 fd_set *read_fds, fd_set *write_fds,
186 struct timeval *timeout, int *maxfd)
188 struct fd_event *fde;
192 for (fde = event_ctx->fd_events; fde; fde = fde->next) {
193 if (fde->flags & EVENT_FD_READ) {
194 FD_SET(fde->fd, read_fds);
197 if (fde->flags & EVENT_FD_WRITE) {
198 FD_SET(fde->fd, write_fds);
202 if ((fde->flags & (EVENT_FD_READ|EVENT_FD_WRITE))
203 && (fde->fd > *maxfd)) {
208 if (event_ctx->timed_events == NULL) {
212 diff = timeval_until(now, &event_ctx->timed_events->when);
213 *timeout = timeval_min(timeout, &diff);
218 bool run_events(struct event_context *event_ctx,
219 int selrtn, fd_set *read_fds, fd_set *write_fds)
222 struct fd_event *fde, *next;
224 /* Run all events that are pending, not just one (as we
227 while (event_ctx->timed_events) {
232 &now, &event_ctx->timed_events->when) < 0) {
233 /* Nothing to do yet */
234 DEBUG(11, ("run_events: Nothing to do\n"));
238 DEBUG(10, ("Running event \"%s\" %lx\n",
239 event_ctx->timed_events->event_name,
240 (unsigned long)event_ctx->timed_events));
242 event_ctx->timed_events->handler(
244 event_ctx->timed_events, now,
245 event_ctx->timed_events->private_data);
252 * We might have changed the socket status during the timed
253 * events, return to run select again.
265 for (fde = event_ctx->fd_events; fde; fde = next) {
269 if (FD_ISSET(fde->fd, read_fds)) flags |= EVENT_FD_READ;
270 if (FD_ISSET(fde->fd, write_fds)) flags |= EVENT_FD_WRITE;
272 if (flags & fde->flags) {
273 fde->handler(event_ctx, fde, flags, fde->private_data);
282 struct timeval *get_timed_events_timeout(struct event_context *event_ctx,
283 struct timeval *to_ret)
287 if (event_ctx->timed_events == NULL) {
291 now = timeval_current();
292 *to_ret = timeval_until(&now, &event_ctx->timed_events->when);
294 DEBUG(10, ("timed_events_timeout: %d/%d\n", (int)to_ret->tv_sec,
295 (int)to_ret->tv_usec));
300 int event_loop_once(struct event_context *ev)
302 struct timeval now, to;
310 to.tv_sec = 9999; /* Max timeout */
315 if (!event_add_to_select_args(ev, &now, &r_fds, &w_fds, &to, &maxfd)) {
319 if (timeval_is_zero(&to)) {
320 run_events(ev, 0, NULL, NULL);
324 ret = sys_select(maxfd+1, &r_fds, &w_fds, NULL, &to);
326 if (ret == -1 && errno != EINTR) {
330 run_events(ev, ret, &r_fds, &w_fds);
334 static int event_context_destructor(struct event_context *ev)
336 while (ev->fd_events != NULL) {
337 ev->fd_events->event_ctx = NULL;
338 DLIST_REMOVE(ev->fd_events, ev->fd_events);
340 while (ev->timed_events != NULL) {
341 ev->timed_events->event_ctx = NULL;
342 DLIST_REMOVE(ev->timed_events, ev->timed_events);
347 void event_context_reinit(struct event_context *ev)
349 event_context_destructor(ev);
353 struct event_context *event_context_init(TALLOC_CTX *mem_ctx)
355 struct event_context *result;
357 result = TALLOC_ZERO_P(mem_ctx, struct event_context);
358 if (result == NULL) {
362 talloc_set_destructor(result, event_context_destructor);
366 void dump_event_list(struct event_context *event_ctx)
368 struct timed_event *te;
370 struct timeval evt, now;
376 now = timeval_current();
378 DEBUG(10,("dump_event_list:\n"));
380 for (te = event_ctx->timed_events; te; te = te->next) {
382 evt = timeval_until(&now, &te->when);
384 DEBUGADD(10,("Timed Event \"%s\" %lx handled in %d seconds (at %s)\n",
388 http_timestring(talloc_tos(), te->when.tv_sec)));
391 for (fe = event_ctx->fd_events; fe; fe = fe->next) {
393 DEBUGADD(10,("FD Event %d %lx, flags: 0x%04x\n",