pthreadpool: add some lockless coordination between the main and job threads
authorStefan Metzmacher <metze@samba.org>
Wed, 20 Jun 2018 11:38:19 +0000 (13:38 +0200)
committerRalph Boehme <slow@samba.org>
Tue, 24 Jul 2018 15:38:26 +0000 (17:38 +0200)
In the direction from the main process to the job thread, we have:

- 'maycancel', which is set when tevent_req_cancel() is called,
- 'orphaned' is the job request, tevent_context or pthreadpool_tevent
  was talloc_free'ed.

The job function can consume these by using:

   /*
    * return true - if tevent_req_cancel() was called.
    */
   bool pthreadpool_tevent_current_job_canceled(void);

   /*
    * return true - if talloc_free() was called on the job request,
    * tevent_context or pthreadpool_tevent.
    */
   bool pthreadpool_tevent_current_job_orphaned(void);

   /*
    * return true if canceled and orphaned are both false.
    */
   bool pthreadpool_tevent_current_job_continue(void);

In the other direction we remember the following points
in the job execution:

- 'started'  - set when the job is picked up by a worker thread
- 'executed' - set once the job function returned.
- 'finished' - set when pthreadpool_tevent_job_signal() is entered
- 'dropped'  - set when pthreadpool_tevent_job_signal() leaves with orphaned
- 'signaled' - set when pthreadpool_tevent_job_signal() leaves normal

There're only one side writing each element,
either the main process or the job thread.

This means we can do the coordination with a full memory
barrier using atomic_thread_fence(memory_order_seq_cst).
lib/replace provides fallbacks if C11 stdatomic.h is not available.

A real pthreadpool requires pthread and atomic_thread_fence() (or an
replacement) to be available, otherwise we only have pthreadpool_sync.c.
But this should not make a real difference, as at least
__sync_synchronize() is availabe since 2005 in gcc.
We also require __thread which is available since 2002.

Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Ralph Boehme <slow@samba.org>
lib/pthreadpool/pthreadpool_tevent.c
lib/pthreadpool/pthreadpool_tevent.h
wscript

index e7e17d3bf0f7ce3804a653634039ec987c892a13..fbf9c0e835a6420f851193921fd0ca2a74855969 100644 (file)
  */
 
 #include "replace.h"
+#include "system/threads.h"
 #include "pthreadpool_tevent.h"
 #include "pthreadpool.h"
 #include "lib/util/tevent_unix.h"
 #include "lib/util/dlinklist.h"
+#include "lib/util/attr.h"
+
+#define PTHREAD_TEVENT_JOB_THREAD_FENCE_INIT(__job) do { \
+       _UNUSED_ const struct pthreadpool_tevent_job *__j = __job; \
+} while(0);
+
+#ifdef WITH_PTHREADPOOL
+/*
+ * configure checked we have pthread and atomic_thread_fence() available
+ */
+#define __PTHREAD_TEVENT_JOB_THREAD_FENCE(__order) do { \
+       atomic_thread_fence(__order); \
+} while(0)
+#else
+/*
+ * we're using lib/pthreadpool/pthreadpool_sync.c ...
+ */
+#define __PTHREAD_TEVENT_JOB_THREAD_FENCE(__order) do { } while(0)
+#ifndef HAVE___THREAD
+#define __thread
+#endif
+#endif
+
+#define PTHREAD_TEVENT_JOB_THREAD_FENCE(__job) do { \
+       _UNUSED_ const struct pthreadpool_tevent_job *__j = __job; \
+       __PTHREAD_TEVENT_JOB_THREAD_FENCE(memory_order_seq_cst); \
+} while(0);
+
+#define PTHREAD_TEVENT_JOB_THREAD_FENCE_FINI(__job) do { \
+       _UNUSED_ const struct pthreadpool_tevent_job *__j = __job; \
+} while(0);
 
 struct pthreadpool_tevent_job_state;
 
@@ -75,6 +107,70 @@ struct pthreadpool_tevent_job {
 
        void (*fn)(void *private_data);
        void *private_data;
+
+       /*
+        * Coordination between threads
+        *
+        * There're only one side writing each element
+        * either the main process or the job thread.
+        *
+        * The coordination is done by a full memory
+        * barrier using atomic_thread_fence(memory_order_seq_cst)
+        * wrapped in PTHREAD_TEVENT_JOB_THREAD_FENCE()
+        */
+       struct {
+               /*
+                * 'maycancel'
+                * set when tevent_req_cancel() is called.
+                * (only written by main thread!)
+                */
+               bool maycancel;
+
+               /*
+                * 'orphaned'
+                * set when talloc_free is called on the job request,
+                * tevent_context or pthreadpool_tevent.
+                * (only written by main thread!)
+                */
+               bool orphaned;
+
+               /*
+                * 'started'
+                * set when the job is picked up by a worker thread
+                * (only written by job thread!)
+                */
+               bool started;
+
+               /*
+                * 'executed'
+                * set once the job function returned.
+                * (only written by job thread!)
+                */
+               bool executed;
+
+               /*
+                * 'finished'
+                * set when pthreadpool_tevent_job_signal() is entered
+                * (only written by job thread!)
+                */
+               bool finished;
+
+               /*
+                * 'dropped'
+                * set when pthreadpool_tevent_job_signal() leaves with
+                * orphaned already set.
+                * (only written by job thread!)
+                */
+               bool dropped;
+
+               /*
+                * 'signaled'
+                * set when pthreadpool_tevent_job_signal() leaves normal
+                * and the immediate event was scheduled.
+                * (only written by job thread!)
+                */
+               bool signaled;
+       } needs_fence;
 };
 
 static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool);
@@ -299,11 +395,11 @@ static bool pthreadpool_tevent_job_cancel(struct tevent_req *req);
 static int pthreadpool_tevent_job_destructor(struct pthreadpool_tevent_job *job)
 {
        /*
-        * We should never be called with state->state != NULL.
+        * We should never be called with needs_fence.orphaned == false.
         * Only pthreadpool_tevent_job_orphan() will call TALLOC_FREE(job)
         * after detaching from the request state and pool list.
         */
-       if (job->state != NULL) {
+       if (!job->needs_fence.orphaned) {
                abort();
        }
 
@@ -328,6 +424,17 @@ static int pthreadpool_tevent_job_destructor(struct pthreadpool_tevent_job *job)
                }
        }
 
+       PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
+       if (job->needs_fence.dropped) {
+               /*
+                * The signal function saw job->needs_fence.orphaned
+                * before it started the signaling via the immediate
+                * event. So we'll never geht triggered and can
+                * remove job->im and let the whole job go...
+                */
+               TALLOC_FREE(job->im);
+       }
+
        /*
         * pthreadpool_tevent_job_orphan() already removed
         * it from pool->jobs. And we don't need try
@@ -351,11 +458,15 @@ static int pthreadpool_tevent_job_destructor(struct pthreadpool_tevent_job *job)
         */
        DLIST_REMOVE(orphaned_jobs, job);
 
+       PTHREAD_TEVENT_JOB_THREAD_FENCE_FINI(job);
        return 0;
 }
 
 static void pthreadpool_tevent_job_orphan(struct pthreadpool_tevent_job *job)
 {
+       job->needs_fence.orphaned = true;
+       PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
+
        /*
         * We're the only function that sets
         * job->state = NULL;
@@ -476,6 +587,7 @@ struct tevent_req *pthreadpool_tevent_job_send(
        if (tevent_req_nomem(job->im, req)) {
                return tevent_req_post(req, ev);
        }
+       PTHREAD_TEVENT_JOB_THREAD_FENCE_INIT(job);
        talloc_set_destructor(job, pthreadpool_tevent_job_destructor);
        DLIST_ADD_END(job->pool->jobs, job);
        job->state = state;
@@ -492,13 +604,76 @@ struct tevent_req *pthreadpool_tevent_job_send(
        return req;
 }
 
+static __thread struct pthreadpool_tevent_job *current_job;
+
+bool pthreadpool_tevent_current_job_canceled(void)
+{
+       if (current_job == NULL) {
+               /*
+                * Should only be called from within
+                * the job function.
+                */
+               abort();
+               return false;
+       }
+
+       PTHREAD_TEVENT_JOB_THREAD_FENCE(current_job);
+       return current_job->needs_fence.maycancel;
+}
+
+bool pthreadpool_tevent_current_job_orphaned(void)
+{
+       if (current_job == NULL) {
+               /*
+                * Should only be called from within
+                * the job function.
+                */
+               abort();
+               return false;
+       }
+
+       PTHREAD_TEVENT_JOB_THREAD_FENCE(current_job);
+       return current_job->needs_fence.orphaned;
+}
+
+bool pthreadpool_tevent_current_job_continue(void)
+{
+       if (current_job == NULL) {
+               /*
+                * Should only be called from within
+                * the job function.
+                */
+               abort();
+               return false;
+       }
+
+       PTHREAD_TEVENT_JOB_THREAD_FENCE(current_job);
+       if (current_job->needs_fence.maycancel) {
+               return false;
+       }
+       PTHREAD_TEVENT_JOB_THREAD_FENCE(current_job);
+       if (current_job->needs_fence.orphaned) {
+               return false;
+       }
+
+       return true;
+}
+
 static void pthreadpool_tevent_job_fn(void *private_data)
 {
        struct pthreadpool_tevent_job *job =
                talloc_get_type_abort(private_data,
                struct pthreadpool_tevent_job);
 
+       current_job = job;
+       job->needs_fence.started = true;
+       PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
+
        job->fn(job->private_data);
+
+       job->needs_fence.executed = true;
+       PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
+       current_job = NULL;
 }
 
 static int pthreadpool_tevent_job_signal(int jobid,
@@ -513,8 +688,12 @@ static int pthreadpool_tevent_job_signal(int jobid,
        struct tevent_threaded_context *tctx = NULL;
        struct pthreadpool_tevent_glue *g = NULL;
 
-       if (state == NULL) {
+       job->needs_fence.finished = true;
+       PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
+       if (job->needs_fence.orphaned) {
                /* Request already gone */
+               job->needs_fence.dropped = true;
+               PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
                return 0;
        }
 
@@ -543,6 +722,8 @@ static int pthreadpool_tevent_job_signal(int jobid,
                                          job);
        }
 
+       job->needs_fence.signaled = true;
+       PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
        return 0;
 }
 
@@ -565,9 +746,17 @@ static void pthreadpool_tevent_job_done(struct tevent_context *ctx,
 
        /*
         * pthreadpool_tevent_job_cleanup()
-        * will destroy the job.
+        * (called by tevent_req_done() or
+        * tevent_req_error()) will destroy the job.
         */
-       tevent_req_done(state->req);
+
+       if (job->needs_fence.executed) {
+               tevent_req_done(state->req);
+               return;
+       }
+
+       tevent_req_error(state->req, ENOEXEC);
+       return;
 }
 
 static bool pthreadpool_tevent_job_cancel(struct tevent_req *req)
@@ -582,6 +771,19 @@ static bool pthreadpool_tevent_job_cancel(struct tevent_req *req)
                return false;
        }
 
+       job->needs_fence.maycancel = true;
+       PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
+       if (job->needs_fence.started) {
+               /*
+                * It was too late to cancel the request.
+                *
+                * The job still has the chance to look
+                * at pthreadpool_tevent_current_job_canceled()
+                * or pthreadpool_tevent_current_job_continue()
+                */
+               return false;
+       }
+
        num = pthreadpool_cancel_job(job->pool->pool, 0,
                                     pthreadpool_tevent_job_fn,
                                     job);
index fdb86e23757af78836b117736b0f83dca2ad6d84..37e491e17c47ab8f8ebb4e69e0461ef7b9176619 100644 (file)
@@ -32,6 +32,20 @@ int pthreadpool_tevent_init(TALLOC_CTX *mem_ctx, unsigned max_threads,
 size_t pthreadpool_tevent_max_threads(struct pthreadpool_tevent *pool);
 size_t pthreadpool_tevent_queued_jobs(struct pthreadpool_tevent *pool);
 
+/*
+ * return true - if tevent_req_cancel() was called.
+ */
+bool pthreadpool_tevent_current_job_canceled(void);
+/*
+ * return true - if talloc_free() was called on the job request,
+ * tevent_context or pthreadpool_tevent.
+ */
+bool pthreadpool_tevent_current_job_orphaned(void);
+/*
+ * return true if canceled and orphaned are both false.
+ */
+bool pthreadpool_tevent_current_job_continue(void);
+
 struct tevent_req *pthreadpool_tevent_job_send(
        TALLOC_CTX *mem_ctx, struct tevent_context *ev,
        struct pthreadpool_tevent *pool,
diff --git a/wscript b/wscript
index 19fc6d12118e8b83e875530382b2305be916e816..f11c49dde676e428d914f173cf5306889835a996 100644 (file)
--- a/wscript
+++ b/wscript
@@ -259,10 +259,18 @@ def configure(conf):
         conf.DEFINE('WITH_NTVFS_FILESERVER', 1)
 
     if Options.options.with_pthreadpool:
-        if conf.CONFIG_SET('HAVE_PTHREAD'):
+        if conf.CONFIG_SET('HAVE_PTHREAD') and \
+           conf.CONFIG_SET('HAVE___THREAD') and \
+           conf.CONFIG_SET('HAVE_ATOMIC_THREAD_FENCE_SUPPORT'):
             conf.DEFINE('WITH_PTHREADPOOL', '1')
         else:
-            Logs.warn("pthreadpool support cannot be enabled when pthread support was not found")
+            if not conf.CONFIG_SET('HAVE_PTHREAD'):
+                Logs.warn("pthreadpool support cannot be enabled when pthread support was not found")
+            if not conf.CONFIG_SET('HAVE_ATOMIC_THREAD_FENCE_SUPPORT'):
+                Logs.warn("""pthreadpool support cannot be enabled when there is
+                          no support for atomic_thead_fence()""")
+            if not conf.CONFIG_SET('HAVE___THREAD'):
+                Logs.warn("pthreadpool support cannot be enabled when __thread support was not found")
             conf.undefine('WITH_PTHREADPOOL')
 
     conf.RECURSE('source3')