b0a757aa1db591dbff2f287dcb1f5af96d03b9a0
[samba.git] / lib / pthreadpool / pthreadpool_tevent.c
1 /*
2  * Unix SMB/CIFS implementation.
3  * threadpool implementation based on pthreads
4  * Copyright (C) Volker Lendecke 2009,2011
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 3 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "replace.h"
21 #include "system/select.h"
22 #include "system/threads.h"
23 #include "system/filesys.h"
24 #include "pthreadpool_tevent.h"
25 #include "pthreadpool.h"
26 #include "lib/util/tevent_unix.h"
27 #include "lib/util/dlinklist.h"
28 #include "lib/util/attr.h"
29
30 /*
31  * We try to give some hints to helgrind/drd
32  *
33  * Note ANNOTATE_BENIGN_RACE_SIZED(address, size, describtion)
34  * takes an memory address range that ignored by helgrind/drd
35  * 'description' is just ignored...
36  *
37  *
38  * Note that ANNOTATE_HAPPENS_*(unique_uintptr)
39  * just takes a DWORD/(void *) as unique key
40  * for the barrier.
41  */
42 #ifdef HAVE_VALGRIND_HELGRIND_H
43 #include <valgrind/helgrind.h>
44 #endif
45 #ifndef ANNOTATE_BENIGN_RACE_SIZED
46 #define ANNOTATE_BENIGN_RACE_SIZED(address, size, describtion)
47 #endif
48 #ifndef ANNOTATE_HAPPENS_BEFORE
49 #define ANNOTATE_HAPPENS_BEFORE(unique_uintptr)
50 #endif
51 #ifndef ANNOTATE_HAPPENS_AFTER
52 #define ANNOTATE_HAPPENS_AFTER(unique_uintptr)
53 #endif
54 #ifndef ANNOTATE_HAPPENS_BEFORE_FORGET_ALL
55 #define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(unique_uintptr)
56 #endif
57
58 #define PTHREAD_TEVENT_JOB_THREAD_FENCE_INIT(__job) do { \
59         _UNUSED_ const struct pthreadpool_tevent_job *__j = __job; \
60         ANNOTATE_BENIGN_RACE_SIZED(&__j->needs_fence, \
61                                    sizeof(__j->needs_fence), \
62                                    "race by design, protected by fence"); \
63 } while(0);
64
65 #ifdef WITH_PTHREADPOOL
66 /*
67  * configure checked we have pthread and atomic_thread_fence() available
68  */
69 #define __PTHREAD_TEVENT_JOB_THREAD_FENCE(__order) do { \
70         atomic_thread_fence(__order); \
71 } while(0)
72 #else
73 /*
74  * we're using lib/pthreadpool/pthreadpool_sync.c ...
75  */
76 #define __PTHREAD_TEVENT_JOB_THREAD_FENCE(__order) do { } while(0)
77 #ifndef HAVE___THREAD
78 #define __thread
79 #endif
80 #endif
81
82 #define PTHREAD_TEVENT_JOB_THREAD_FENCE(__job) do { \
83         _UNUSED_ const struct pthreadpool_tevent_job *__j = __job; \
84         ANNOTATE_HAPPENS_BEFORE(&__job->needs_fence); \
85         __PTHREAD_TEVENT_JOB_THREAD_FENCE(memory_order_seq_cst); \
86         ANNOTATE_HAPPENS_AFTER(&__job->needs_fence); \
87 } while(0);
88
89 #define PTHREAD_TEVENT_JOB_THREAD_FENCE_FINI(__job) do { \
90         _UNUSED_ const struct pthreadpool_tevent_job *__j = __job; \
91         ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(&__job->needs_fence); \
92 } while(0);
93
94 struct pthreadpool_tevent_job_state;
95
96 /*
97  * We need one pthreadpool_tevent_glue object per unique combintaion of tevent
98  * contexts and pthreadpool_tevent objects. Maintain a list of used tevent
99  * contexts in a pthreadpool_tevent.
100  */
101 struct pthreadpool_tevent_glue {
102         struct pthreadpool_tevent_glue *prev, *next;
103         struct pthreadpool_tevent *pool; /* back-pointer to owning object. */
104         /* Tuple we are keeping track of in this list. */
105         struct tevent_context *ev;
106         struct tevent_threaded_context *tctx;
107         /* Pointer to link object owned by *ev. */
108         struct pthreadpool_tevent_glue_ev_link *ev_link;
109         /* active jobs */
110         struct pthreadpool_tevent_job_state *states;
111 };
112
113 /*
114  * The pthreadpool_tevent_glue_ev_link and its destructor ensure we remove the
115  * tevent context from our list of active event contexts if the event context
116  * is destroyed.
117  * This structure is talloc()'ed from the struct tevent_context *, and is a
118  * back-pointer allowing the related struct pthreadpool_tevent_glue object
119  * to be removed from the struct pthreadpool_tevent glue list if the owning
120  * tevent_context is talloc_free()'ed.
121  */
122 struct pthreadpool_tevent_glue_ev_link {
123         struct pthreadpool_tevent_glue *glue;
124 };
125
126 struct pthreadpool_tevent {
127         struct pthreadpool *pool;
128         struct pthreadpool_tevent_glue *glue_list;
129
130         struct pthreadpool_tevent_job *jobs;
131 };
132
133 struct pthreadpool_tevent_job_state {
134         struct pthreadpool_tevent_job_state *prev, *next;
135         struct pthreadpool_tevent_glue *glue;
136         struct tevent_context *ev;
137         struct tevent_req *req;
138         struct pthreadpool_tevent_job *job;
139 };
140
141 struct pthreadpool_tevent_job {
142         struct pthreadpool_tevent_job *prev, *next;
143
144         struct pthreadpool_tevent *pool;
145         struct pthreadpool_tevent_job_state *state;
146         struct tevent_immediate *im;
147
148         void (*fn)(void *private_data);
149         void *private_data;
150
151         /*
152          * Coordination between threads
153          *
154          * There're only one side writing each element
155          * either the main process or the job thread.
156          *
157          * The coordination is done by a full memory
158          * barrier using atomic_thread_fence(memory_order_seq_cst)
159          * wrapped in PTHREAD_TEVENT_JOB_THREAD_FENCE()
160          */
161         struct {
162                 /*
163                  * 'maycancel'
164                  * set when tevent_req_cancel() is called.
165                  * (only written by main thread!)
166                  */
167                 bool maycancel;
168
169                 /*
170                  * 'orphaned'
171                  * set when talloc_free is called on the job request,
172                  * tevent_context or pthreadpool_tevent.
173                  * (only written by main thread!)
174                  */
175                 bool orphaned;
176
177                 /*
178                  * 'started'
179                  * set when the job is picked up by a worker thread
180                  * (only written by job thread!)
181                  */
182                 bool started;
183
184                 /*
185                  * 'executed'
186                  * set once the job function returned.
187                  * (only written by job thread!)
188                  */
189                 bool executed;
190
191                 /*
192                  * 'finished'
193                  * set when pthreadpool_tevent_job_signal() is entered
194                  * (only written by job thread!)
195                  */
196                 bool finished;
197
198                 /*
199                  * 'dropped'
200                  * set when pthreadpool_tevent_job_signal() leaves with
201                  * orphaned already set.
202                  * (only written by job thread!)
203                  */
204                 bool dropped;
205
206                 /*
207                  * 'signaled'
208                  * set when pthreadpool_tevent_job_signal() leaves normal
209                  * and the immediate event was scheduled.
210                  * (only written by job thread!)
211                  */
212                 bool signaled;
213         } needs_fence;
214
215         bool per_thread_cwd;
216 };
217
218 static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool);
219
220 static void pthreadpool_tevent_job_orphan(struct pthreadpool_tevent_job *job);
221
222 static struct pthreadpool_tevent_job *orphaned_jobs;
223
224 void pthreadpool_tevent_cleanup_orphaned_jobs(void)
225 {
226         struct pthreadpool_tevent_job *job = NULL;
227         struct pthreadpool_tevent_job *njob = NULL;
228
229         for (job = orphaned_jobs; job != NULL; job = njob) {
230                 njob = job->next;
231
232                 /*
233                  * The job destructor keeps the job alive
234                  * (and in the list) or removes it from the list.
235                  */
236                 TALLOC_FREE(job);
237         }
238 }
239
240 static int pthreadpool_tevent_job_signal(int jobid,
241                                          void (*job_fn)(void *private_data),
242                                          void *job_private_data,
243                                          void *private_data);
244
245 int pthreadpool_tevent_init(TALLOC_CTX *mem_ctx, unsigned max_threads,
246                             struct pthreadpool_tevent **presult)
247 {
248         struct pthreadpool_tevent *pool;
249         int ret;
250
251         pthreadpool_tevent_cleanup_orphaned_jobs();
252
253         pool = talloc_zero(mem_ctx, struct pthreadpool_tevent);
254         if (pool == NULL) {
255                 return ENOMEM;
256         }
257
258         ret = pthreadpool_init(max_threads, &pool->pool,
259                                pthreadpool_tevent_job_signal, pool);
260         if (ret != 0) {
261                 TALLOC_FREE(pool);
262                 return ret;
263         }
264
265         talloc_set_destructor(pool, pthreadpool_tevent_destructor);
266
267         *presult = pool;
268         return 0;
269 }
270
271 size_t pthreadpool_tevent_max_threads(struct pthreadpool_tevent *pool)
272 {
273         if (pool->pool == NULL) {
274                 return 0;
275         }
276
277         return pthreadpool_max_threads(pool->pool);
278 }
279
280 size_t pthreadpool_tevent_queued_jobs(struct pthreadpool_tevent *pool)
281 {
282         if (pool->pool == NULL) {
283                 return 0;
284         }
285
286         return pthreadpool_queued_jobs(pool->pool);
287 }
288
289 bool pthreadpool_tevent_per_thread_cwd(struct pthreadpool_tevent *pool)
290 {
291         if (pool->pool == NULL) {
292                 return false;
293         }
294
295         return pthreadpool_per_thread_cwd(pool->pool);
296 }
297
298 static int pthreadpool_tevent_destructor(struct pthreadpool_tevent *pool)
299 {
300         struct pthreadpool_tevent_job *job = NULL;
301         struct pthreadpool_tevent_job *njob = NULL;
302         struct pthreadpool_tevent_glue *glue = NULL;
303         int ret;
304
305         ret = pthreadpool_stop(pool->pool);
306         if (ret != 0) {
307                 return ret;
308         }
309
310         for (job = pool->jobs; job != NULL; job = njob) {
311                 njob = job->next;
312
313                 /* The job this removes it from the list */
314                 pthreadpool_tevent_job_orphan(job);
315         }
316
317         /*
318          * Delete all the registered
319          * tevent_context/tevent_threaded_context
320          * pairs.
321          */
322         for (glue = pool->glue_list; glue != NULL; glue = pool->glue_list) {
323                 /* The glue destructor removes it from the list */
324                 TALLOC_FREE(glue);
325         }
326         pool->glue_list = NULL;
327
328         ret = pthreadpool_destroy(pool->pool);
329         if (ret != 0) {
330                 return ret;
331         }
332         pool->pool = NULL;
333
334         pthreadpool_tevent_cleanup_orphaned_jobs();
335
336         return 0;
337 }
338
339 static int pthreadpool_tevent_glue_destructor(
340         struct pthreadpool_tevent_glue *glue)
341 {
342         struct pthreadpool_tevent_job_state *state = NULL;
343         struct pthreadpool_tevent_job_state *nstate = NULL;
344
345         for (state = glue->states; state != NULL; state = nstate) {
346                 nstate = state->next;
347
348                 /* The job this removes it from the list */
349                 pthreadpool_tevent_job_orphan(state->job);
350         }
351
352         if (glue->pool->glue_list != NULL) {
353                 DLIST_REMOVE(glue->pool->glue_list, glue);
354         }
355
356         /* Ensure the ev_link destructor knows we're gone */
357         glue->ev_link->glue = NULL;
358
359         TALLOC_FREE(glue->ev_link);
360         TALLOC_FREE(glue->tctx);
361
362         return 0;
363 }
364
365 /*
366  * Destructor called either explicitly from
367  * pthreadpool_tevent_glue_destructor(), or indirectly
368  * when owning tevent_context is destroyed.
369  *
370  * When called from pthreadpool_tevent_glue_destructor()
371  * ev_link->glue is already NULL, so this does nothing.
372  *
373  * When called from talloc_free() of the owning
374  * tevent_context we must ensure we also remove the
375  * linked glue object from the list inside
376  * struct pthreadpool_tevent.
377  */
378 static int pthreadpool_tevent_glue_link_destructor(
379         struct pthreadpool_tevent_glue_ev_link *ev_link)
380 {
381         TALLOC_FREE(ev_link->glue);
382         return 0;
383 }
384
385 static int pthreadpool_tevent_register_ev(
386                                 struct pthreadpool_tevent *pool,
387                                 struct pthreadpool_tevent_job_state *state)
388 {
389         struct tevent_context *ev = state->ev;
390         struct pthreadpool_tevent_glue *glue = NULL;
391         struct pthreadpool_tevent_glue_ev_link *ev_link = NULL;
392
393         /*
394          * See if this tevent_context was already registered by
395          * searching the glue object list. If so we have nothing
396          * to do here - we already have a tevent_context/tevent_threaded_context
397          * pair.
398          */
399         for (glue = pool->glue_list; glue != NULL; glue = glue->next) {
400                 if (glue->ev == state->ev) {
401                         state->glue = glue;
402                         DLIST_ADD_END(glue->states, state);
403                         return 0;
404                 }
405         }
406
407         /*
408          * Event context not yet registered - create a new glue
409          * object containing a tevent_context/tevent_threaded_context
410          * pair and put it on the list to remember this registration.
411          * We also need a link object to ensure the event context
412          * can't go away without us knowing about it.
413          */
414         glue = talloc_zero(pool, struct pthreadpool_tevent_glue);
415         if (glue == NULL) {
416                 return ENOMEM;
417         }
418         *glue = (struct pthreadpool_tevent_glue) {
419                 .pool = pool,
420                 .ev = ev,
421         };
422         talloc_set_destructor(glue, pthreadpool_tevent_glue_destructor);
423
424         /*
425          * Now allocate the link object to the event context. Note this
426          * is allocated OFF THE EVENT CONTEXT ITSELF, so if the event
427          * context is freed we are able to cleanup the glue object
428          * in the link object destructor.
429          */
430
431         ev_link = talloc_zero(ev, struct pthreadpool_tevent_glue_ev_link);
432         if (ev_link == NULL) {
433                 TALLOC_FREE(glue);
434                 return ENOMEM;
435         }
436         ev_link->glue = glue;
437         talloc_set_destructor(ev_link, pthreadpool_tevent_glue_link_destructor);
438
439         glue->ev_link = ev_link;
440
441 #ifdef HAVE_PTHREAD
442         glue->tctx = tevent_threaded_context_create(glue, ev);
443         if (glue->tctx == NULL) {
444                 TALLOC_FREE(ev_link);
445                 TALLOC_FREE(glue);
446                 return ENOMEM;
447         }
448 #endif
449
450         state->glue = glue;
451         DLIST_ADD_END(glue->states, state);
452
453         DLIST_ADD(pool->glue_list, glue);
454         return 0;
455 }
456
457 static void pthreadpool_tevent_job_fn(void *private_data);
458 static void pthreadpool_tevent_job_done(struct tevent_context *ctx,
459                                         struct tevent_immediate *im,
460                                         void *private_data);
461 static bool pthreadpool_tevent_job_cancel(struct tevent_req *req);
462
463 static int pthreadpool_tevent_job_destructor(struct pthreadpool_tevent_job *job)
464 {
465         /*
466          * We should never be called with needs_fence.orphaned == false.
467          * Only pthreadpool_tevent_job_orphan() will call TALLOC_FREE(job)
468          * after detaching from the request state, glue and pool list.
469          */
470         if (!job->needs_fence.orphaned) {
471                 abort();
472         }
473
474         /*
475          * If the job is not finished (job->im still there)
476          * and it's still attached to the pool,
477          * we try to cancel it (before it was starts)
478          */
479         if (job->im != NULL && job->pool != NULL) {
480                 size_t num;
481
482                 num = pthreadpool_cancel_job(job->pool->pool, 0,
483                                              pthreadpool_tevent_job_fn,
484                                              job);
485                 if (num != 0) {
486                         /*
487                          * It was not too late to cancel the request.
488                          *
489                          * We can remove job->im, as it will never be used.
490                          */
491                         TALLOC_FREE(job->im);
492                 }
493         }
494
495         PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
496         if (job->needs_fence.dropped) {
497                 /*
498                  * The signal function saw job->needs_fence.orphaned
499                  * before it started the signaling via the immediate
500                  * event. So we'll never geht triggered and can
501                  * remove job->im and let the whole job go...
502                  */
503                 TALLOC_FREE(job->im);
504         }
505
506         /*
507          * TODO?: We could further improve this by adjusting
508          * tevent_threaded_schedule_immediate_destructor()
509          * and allow TALLOC_FREE() during its time
510          * in the main_ev->scheduled_immediates list.
511          *
512          * PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
513          * if (state->needs_fence.signaled) {
514          *       *
515          *       * The signal function is completed
516          *       * in future we may be allowed
517          *       * to call TALLOC_FREE(job->im).
518          *       *
519          *      TALLOC_FREE(job->im);
520          * }
521          */
522
523         /*
524          * pthreadpool_tevent_job_orphan() already removed
525          * it from pool->jobs. And we don't need try
526          * pthreadpool_cancel_job() again.
527          */
528         job->pool = NULL;
529
530         if (job->im != NULL) {
531                 /*
532                  * state->im still there means, we need to wait for the
533                  * immediate event to be triggered or just leak the memory.
534                  *
535                  * Move it to the orphaned list, if it's not already there.
536                  */
537                 return -1;
538         }
539
540         /*
541          * Finally remove from the orphaned_jobs list
542          * and let talloc destroy us.
543          */
544         DLIST_REMOVE(orphaned_jobs, job);
545
546         PTHREAD_TEVENT_JOB_THREAD_FENCE_FINI(job);
547         return 0;
548 }
549
550 static void pthreadpool_tevent_job_orphan(struct pthreadpool_tevent_job *job)
551 {
552         job->needs_fence.orphaned = true;
553         PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
554
555         /*
556          * We're the only function that sets
557          * job->state = NULL;
558          */
559         if (job->state == NULL) {
560                 abort();
561         }
562
563         /*
564          * Once we marked the request as 'orphaned'
565          * we spin/loop if it's already marked
566          * as 'finished' (which means that
567          * pthreadpool_tevent_job_signal() was entered.
568          * If it saw 'orphaned' it will exit after setting
569          * 'dropped', otherwise it dereferences
570          * job->state->glue->{tctx,ev} until it exited
571          * after setting 'signaled'.
572          *
573          * We need to close this potential gab before
574          * we can set job->state = NULL.
575          *
576          * This is some kind of spinlock, but with
577          * 1 millisecond sleeps in between, in order
578          * to give the thread more cpu time to finish.
579          */
580         PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
581         while (job->needs_fence.finished) {
582                 if (job->needs_fence.dropped) {
583                         break;
584                 }
585                 if (job->needs_fence.signaled) {
586                         break;
587                 }
588                 poll(NULL, 0, 1);
589                 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
590         }
591
592         /*
593          * Once the gab is closed, we can remove
594          * the glue link.
595          */
596         DLIST_REMOVE(job->state->glue->states, job->state);
597         job->state->glue = NULL;
598
599         /*
600          * We need to reparent to a long term context.
601          * And detach from the request state.
602          * Maybe the destructor will keep the memory
603          * and leak it for now.
604          */
605         (void)talloc_reparent(job->state, NULL, job);
606         job->state->job = NULL;
607         job->state = NULL;
608
609         /*
610          * job->pool will only be set to NULL
611          * in the first destructur run.
612          */
613         if (job->pool == NULL) {
614                 abort();
615         }
616
617         /*
618          * Dettach it from the pool.
619          *
620          * The job might still be running,
621          * so we keep job->pool.
622          * The destructor will set it to NULL
623          * after trying pthreadpool_cancel_job()
624          */
625         DLIST_REMOVE(job->pool->jobs, job);
626
627         /*
628          * Add it to the list of orphaned jobs,
629          * which may be cleaned up later.
630          *
631          * The destructor removes it from the list
632          * when possible or it denies the free
633          * and keep it in the list.
634          */
635         DLIST_ADD_END(orphaned_jobs, job);
636         TALLOC_FREE(job);
637 }
638
639 static void pthreadpool_tevent_job_cleanup(struct tevent_req *req,
640                                            enum tevent_req_state req_state)
641 {
642         struct pthreadpool_tevent_job_state *state =
643                 tevent_req_data(req,
644                 struct pthreadpool_tevent_job_state);
645
646         if (state->job == NULL) {
647                 /*
648                  * The job request is not scheduled in the pool
649                  * yet or anymore.
650                  */
651                 if (state->glue != NULL) {
652                         DLIST_REMOVE(state->glue->states, state);
653                         state->glue = NULL;
654                 }
655                 return;
656         }
657
658         /*
659          * We need to reparent to a long term context.
660          * Maybe the destructor will keep the memory
661          * and leak it for now.
662          */
663         pthreadpool_tevent_job_orphan(state->job);
664         state->job = NULL; /* not needed but looks better */
665         return;
666 }
667
668 struct tevent_req *pthreadpool_tevent_job_send(
669         TALLOC_CTX *mem_ctx, struct tevent_context *ev,
670         struct pthreadpool_tevent *pool,
671         void (*fn)(void *private_data), void *private_data)
672 {
673         struct tevent_req *req = NULL;
674         struct pthreadpool_tevent_job_state *state = NULL;
675         struct pthreadpool_tevent_job *job = NULL;
676         int ret;
677
678         pthreadpool_tevent_cleanup_orphaned_jobs();
679
680         req = tevent_req_create(mem_ctx, &state,
681                                 struct pthreadpool_tevent_job_state);
682         if (req == NULL) {
683                 return NULL;
684         }
685         state->ev = ev;
686         state->req = req;
687
688         tevent_req_set_cleanup_fn(req, pthreadpool_tevent_job_cleanup);
689
690         if (pool == NULL) {
691                 tevent_req_error(req, EINVAL);
692                 return tevent_req_post(req, ev);
693         }
694         if (pool->pool == NULL) {
695                 tevent_req_error(req, EINVAL);
696                 return tevent_req_post(req, ev);
697         }
698
699         ret = pthreadpool_tevent_register_ev(pool, state);
700         if (tevent_req_error(req, ret)) {
701                 return tevent_req_post(req, ev);
702         }
703
704         job = talloc_zero(state, struct pthreadpool_tevent_job);
705         if (tevent_req_nomem(job, req)) {
706                 return tevent_req_post(req, ev);
707         }
708         job->pool = pool;
709         job->fn = fn;
710         job->private_data = private_data;
711         job->im = tevent_create_immediate(state->job);
712         if (tevent_req_nomem(job->im, req)) {
713                 return tevent_req_post(req, ev);
714         }
715         PTHREAD_TEVENT_JOB_THREAD_FENCE_INIT(job);
716         job->per_thread_cwd = pthreadpool_tevent_per_thread_cwd(pool);
717         talloc_set_destructor(job, pthreadpool_tevent_job_destructor);
718         DLIST_ADD_END(job->pool->jobs, job);
719         job->state = state;
720         state->job = job;
721
722         ret = pthreadpool_add_job(job->pool->pool, 0,
723                                   pthreadpool_tevent_job_fn,
724                                   job);
725         if (tevent_req_error(req, ret)) {
726                 return tevent_req_post(req, ev);
727         }
728
729         tevent_req_set_cancel_fn(req, pthreadpool_tevent_job_cancel);
730         return req;
731 }
732
733 static __thread struct pthreadpool_tevent_job *current_job;
734
735 bool pthreadpool_tevent_current_job_canceled(void)
736 {
737         if (current_job == NULL) {
738                 /*
739                  * Should only be called from within
740                  * the job function.
741                  */
742                 abort();
743                 return false;
744         }
745
746         PTHREAD_TEVENT_JOB_THREAD_FENCE(current_job);
747         return current_job->needs_fence.maycancel;
748 }
749
750 bool pthreadpool_tevent_current_job_orphaned(void)
751 {
752         if (current_job == NULL) {
753                 /*
754                  * Should only be called from within
755                  * the job function.
756                  */
757                 abort();
758                 return false;
759         }
760
761         PTHREAD_TEVENT_JOB_THREAD_FENCE(current_job);
762         return current_job->needs_fence.orphaned;
763 }
764
765 bool pthreadpool_tevent_current_job_continue(void)
766 {
767         if (current_job == NULL) {
768                 /*
769                  * Should only be called from within
770                  * the job function.
771                  */
772                 abort();
773                 return false;
774         }
775
776         PTHREAD_TEVENT_JOB_THREAD_FENCE(current_job);
777         if (current_job->needs_fence.maycancel) {
778                 return false;
779         }
780         PTHREAD_TEVENT_JOB_THREAD_FENCE(current_job);
781         if (current_job->needs_fence.orphaned) {
782                 return false;
783         }
784
785         return true;
786 }
787
788 bool pthreadpool_tevent_current_job_per_thread_cwd(void)
789 {
790         if (current_job == NULL) {
791                 /*
792                  * Should only be called from within
793                  * the job function.
794                  */
795                 abort();
796                 return false;
797         }
798
799         return current_job->per_thread_cwd;
800 }
801
802 static void pthreadpool_tevent_job_fn(void *private_data)
803 {
804         struct pthreadpool_tevent_job *job =
805                 talloc_get_type_abort(private_data,
806                 struct pthreadpool_tevent_job);
807
808         current_job = job;
809         job->needs_fence.started = true;
810         PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
811
812         job->fn(job->private_data);
813
814         job->needs_fence.executed = true;
815         PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
816         current_job = NULL;
817 }
818
819 static int pthreadpool_tevent_job_signal(int jobid,
820                                          void (*job_fn)(void *private_data),
821                                          void *job_private_data,
822                                          void *private_data)
823 {
824         struct pthreadpool_tevent_job *job =
825                 talloc_get_type_abort(job_private_data,
826                 struct pthreadpool_tevent_job);
827
828         job->needs_fence.finished = true;
829         PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
830         if (job->needs_fence.orphaned) {
831                 /* Request already gone */
832                 job->needs_fence.dropped = true;
833                 PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
834                 return 0;
835         }
836
837         /*
838          * state and state->glue are valid,
839          * see the job->needs_fence.finished
840          * "spinlock" loop in
841          * pthreadpool_tevent_job_orphan()
842          */
843         if (job->state->glue->tctx != NULL) {
844                 /* with HAVE_PTHREAD */
845                 tevent_threaded_schedule_immediate(job->state->glue->tctx,
846                                                    job->im,
847                                                    pthreadpool_tevent_job_done,
848                                                    job);
849         } else {
850                 /* without HAVE_PTHREAD */
851                 tevent_schedule_immediate(job->im,
852                                           job->state->glue->ev,
853                                           pthreadpool_tevent_job_done,
854                                           job);
855         }
856
857         job->needs_fence.signaled = true;
858         PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
859         return 0;
860 }
861
862 static void pthreadpool_tevent_job_done(struct tevent_context *ctx,
863                                         struct tevent_immediate *im,
864                                         void *private_data)
865 {
866         struct pthreadpool_tevent_job *job =
867                 talloc_get_type_abort(private_data,
868                 struct pthreadpool_tevent_job);
869         struct pthreadpool_tevent_job_state *state = job->state;
870
871         TALLOC_FREE(job->im);
872
873         if (state == NULL) {
874                 /* Request already gone */
875                 TALLOC_FREE(job);
876                 return;
877         }
878
879         /*
880          * pthreadpool_tevent_job_cleanup()
881          * (called by tevent_req_done() or
882          * tevent_req_error()) will destroy the job.
883          */
884
885         if (job->needs_fence.executed) {
886                 tevent_req_done(state->req);
887                 return;
888         }
889
890         tevent_req_error(state->req, ENOEXEC);
891         return;
892 }
893
894 static bool pthreadpool_tevent_job_cancel(struct tevent_req *req)
895 {
896         struct pthreadpool_tevent_job_state *state =
897                 tevent_req_data(req,
898                 struct pthreadpool_tevent_job_state);
899         struct pthreadpool_tevent_job *job = state->job;
900         size_t num;
901
902         if (job == NULL) {
903                 return false;
904         }
905
906         job->needs_fence.maycancel = true;
907         PTHREAD_TEVENT_JOB_THREAD_FENCE(job);
908         if (job->needs_fence.started) {
909                 /*
910                  * It was too late to cancel the request.
911                  *
912                  * The job still has the chance to look
913                  * at pthreadpool_tevent_current_job_canceled()
914                  * or pthreadpool_tevent_current_job_continue()
915                  */
916                 return false;
917         }
918
919         num = pthreadpool_cancel_job(job->pool->pool, 0,
920                                      pthreadpool_tevent_job_fn,
921                                      job);
922         if (num == 0) {
923                 /*
924                  * It was too late to cancel the request.
925                  */
926                 return false;
927         }
928
929         /*
930          * It was not too late to cancel the request.
931          *
932          * We can remove job->im, as it will never be used.
933          */
934         TALLOC_FREE(job->im);
935
936         /*
937          * pthreadpool_tevent_job_cleanup()
938          * will destroy the job.
939          */
940         tevent_req_defer_callback(req, state->ev);
941         tevent_req_error(req, ECANCELED);
942         return true;
943 }
944
945 int pthreadpool_tevent_job_recv(struct tevent_req *req)
946 {
947         return tevent_req_simple_recv_unix(req);
948 }