tevent: Make talloc_free safe when threaded_contexts exist
authorVolker Lendecke <vl@samba.org>
Wed, 7 Sep 2016 18:25:36 +0000 (20:25 +0200)
committerJeremy Allison <jra@samba.org>
Tue, 4 Oct 2016 22:06:21 +0000 (00:06 +0200)
I did not find a way to do this safely without a mutex per threaded_context.

Signed-off-by: Volker Lendecke <vl@samba.org>
Reviewed-by: Jeremy Allison <jra@samba.org>
lib/tevent/tevent.c
lib/tevent/tevent_internal.h
lib/tevent/tevent_threads.c

index 575337d41092efd7efdfa861992fb353dc340ef8..65b101f28e5715d8b576b89c655e2d2662962c5b 100644 (file)
@@ -200,6 +200,16 @@ static void tevent_atfork_prepare(void)
        }
 
        for (ev = tevent_contexts; ev != NULL; ev = ev->next) {
+               struct tevent_threaded_context *tctx;
+
+               for (tctx = ev->threaded_contexts; tctx != NULL;
+                    tctx = tctx->next) {
+                       ret = pthread_mutex_lock(&tctx->event_ctx_mutex);
+                       if (ret != 0) {
+                               tevent_abort(ev, "pthread_mutex_lock failed");
+                       }
+               }
+
                ret = pthread_mutex_lock(&ev->scheduled_mutex);
                if (ret != 0) {
                        tevent_abort(ev, "pthread_mutex_lock failed");
@@ -214,10 +224,21 @@ static void tevent_atfork_parent(void)
 
        for (ev = DLIST_TAIL(tevent_contexts); ev != NULL;
             ev = DLIST_PREV(ev)) {
+               struct tevent_threaded_context *tctx;
+
                ret = pthread_mutex_unlock(&ev->scheduled_mutex);
                if (ret != 0) {
                        tevent_abort(ev, "pthread_mutex_unlock failed");
                }
+
+               for (tctx = DLIST_TAIL(ev->threaded_contexts); tctx != NULL;
+                    tctx = DLIST_PREV(tctx)) {
+                       ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
+                       if (ret != 0) {
+                               tevent_abort(
+                                       ev, "pthread_mutex_unlock failed");
+                       }
+               }
        }
 
        ret = pthread_mutex_unlock(&tevent_contexts_mutex);
@@ -235,9 +256,15 @@ static void tevent_atfork_child(void)
             ev = DLIST_PREV(ev)) {
                struct tevent_threaded_context *tctx;
 
-               for (tctx = ev->threaded_contexts; tctx != NULL;
-                    tctx = tctx->next) {
+               for (tctx = DLIST_TAIL(ev->threaded_contexts); tctx != NULL;
+                    tctx = DLIST_PREV(tctx)) {
                        tctx->event_ctx = NULL;
+
+                       ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
+                       if (ret != 0) {
+                               tevent_abort(
+                                       ev, "pthread_mutex_unlock failed");
+                       }
                }
 
                ev->threaded_contexts = NULL;
@@ -289,18 +316,32 @@ int tevent_common_context_destructor(struct tevent_context *ev)
        if (ret != 0) {
                abort();
        }
-#endif
 
-       if (ev->threaded_contexts != NULL) {
+       while (ev->threaded_contexts != NULL) {
+               struct tevent_threaded_context *tctx = ev->threaded_contexts;
+
+               ret = pthread_mutex_lock(&tctx->event_ctx_mutex);
+               if (ret != 0) {
+                       abort();
+               }
+
                /*
-                * Threaded contexts are indicators that threads are
-                * about to send us immediates via
-                * tevent_threaded_schedule_immediate. The caller
-                * needs to make sure that the tevent context lives
-                * long enough to receive immediates from all threads.
+                * Indicate to the thread that the tevent_context is
+                * gone. The counterpart of this is in
+                * _tevent_threaded_schedule_immediate, there we read
+                * this under the threaded_context's mutex.
                 */
-               tevent_abort(ev, "threaded contexts exist");
+
+               tctx->event_ctx = NULL;
+
+               ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
+               if (ret != 0) {
+                       abort();
+               }
+
+               DLIST_REMOVE(ev->threaded_contexts, tctx);
        }
+#endif
 
        tevent_common_wakeup_fini(ev);
 
index a4af79e85ac4c200f7887dd74b5f81c2e2666ebb..a5f1ebdefd53db76c3ba730b7c6b82b7bee0acf4 100644 (file)
@@ -230,7 +230,12 @@ struct tevent_signal {
 
 struct tevent_threaded_context {
        struct tevent_threaded_context *next, *prev;
+
+#ifdef HAVE_PTHREAD
+       pthread_mutex_t event_ctx_mutex;
+#endif
        struct tevent_context *event_ctx;
+       int wakeup_fd;
 };
 
 struct tevent_debug_ops {
index e42759efd83a9064941e40cdfc8dd558db6d679d..8197323af020e43444415da0f3d42191e8590452 100644 (file)
@@ -375,9 +375,17 @@ void tevent_thread_proxy_schedule(struct tevent_thread_proxy *tp,
 static int tevent_threaded_context_destructor(
        struct tevent_threaded_context *tctx)
 {
+       int ret;
+
        if (tctx->event_ctx != NULL) {
                DLIST_REMOVE(tctx->event_ctx->threaded_contexts, tctx);
        }
+
+       ret = pthread_mutex_destroy(&tctx->event_ctx_mutex);
+       if (ret != 0) {
+               abort();
+       }
+
        return 0;
 }
 
@@ -399,6 +407,13 @@ struct tevent_threaded_context *tevent_threaded_context_create(
                return NULL;
        }
        tctx->event_ctx = ev;
+       tctx->wakeup_fd = ev->wakeup_fd;
+
+       ret = pthread_mutex_init(&tctx->event_ctx_mutex, NULL);
+       if (ret != 0) {
+               TALLOC_FREE(tctx);
+               return NULL;
+       }
 
        DLIST_ADD(ev->threaded_contexts, tctx);
        talloc_set_destructor(tctx, tevent_threaded_context_destructor);
@@ -418,9 +433,28 @@ void _tevent_threaded_schedule_immediate(struct tevent_threaded_context *tctx,
                                         const char *location)
 {
 #ifdef HAVE_PTHREAD
-       struct tevent_context *ev = tctx->event_ctx;
+       struct tevent_context *ev;
        int ret;
 
+       ret = pthread_mutex_lock(&tctx->event_ctx_mutex);
+       if (ret != 0) {
+               abort();
+       }
+
+       ev = tctx->event_ctx;
+
+       ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
+       if (ret != 0) {
+               abort();
+       }
+
+       if (ev == NULL) {
+               /*
+                * Our event context is already gone.
+                */
+               return;
+       }
+
        if ((im->event_ctx != NULL) || (handler == NULL)) {
                abort();
        }
@@ -455,7 +489,7 @@ void _tevent_threaded_schedule_immediate(struct tevent_threaded_context *tctx,
         * than a noncontended one. So I'd opt for the lower footprint
         * initially. Maybe we have to change that later.
         */
-       tevent_common_wakeup(ev);
+       tevent_common_wakeup_fd(tctx->wakeup_fd);
 #else
        /*
         * tevent_threaded_context_create() returned NULL with ENOSYS...