static bool tdb_have_mutex_chainlocks(struct tdb_context *tdb)
{
- size_t i;
+ int i;
for (i=0; i < tdb->num_lockrecs; i++) {
bool ret;
/*
* Our only caller tdb_allrecord_upgrade()
- * garantees that we already own the allrecord lock.
+ * guarantees that we already own the allrecord lock.
*
* Which means m->allrecord_mutex is still locked by us.
*/
/*
* Our only caller tdb_allrecord_upgrade() (in the error case)
- * garantees that we already own the allrecord lock.
+ * guarantees that we already own the allrecord lock.
*
* Which means m->allrecord_mutex is still locked by us.
*/
/*
* Our only callers tdb_allrecord_unlock() and
* tdb_allrecord_lock() (in the error path)
- * garantee that we already own the allrecord lock.
+ * guarantee that we already own the allrecord lock.
*
* Which means m->allrecord_mutex is still locked by us.
*/
{
struct tdb_mutexes *m;
pthread_mutexattr_t ma;
- int i, ret;
+ uint32_t i;
+ int ret;
ret = tdb_mutex_mmap(tdb);
if (ret == -1) {
fail:
pthread_mutexattr_destroy(&ma);
fail_munmap:
- tdb_mutex_munmap(tdb);
if (ret == 0) {
return 0;
}
+ tdb_mutex_munmap(tdb);
+
errno = ret;
return -1;
}
return 0;
}
+ if (tdb->mutexes != NULL) {
+ return 0;
+ }
+
ptr = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FILE,
tdb->fd, 0);
if (ptr == MAP_FAILED) {
int tdb_mutex_munmap(struct tdb_context *tdb)
{
size_t len;
+ int ret;
len = tdb_mutex_size(tdb);
if (len == 0) {
return 0;
}
- return munmap(tdb->mutexes, len);
+ ret = munmap(tdb->mutexes, len);
+ if (ret == -1) {
+ return -1;
+ }
+ tdb->mutexes = NULL;
+
+ return 0;
}
static bool tdb_mutex_locking_cached;
static void tdb_robust_mutex_handler(int sig)
{
- if (tdb_robust_mutex_pid != -1) {
+ pid_t child_pid = tdb_robust_mutex_pid;
+
+ if (child_pid != -1) {
pid_t pid;
- int status;
- pid = waitpid(tdb_robust_mutex_pid, &status, WNOHANG);
- if (pid == tdb_robust_mutex_pid) {
+ pid = waitpid(child_pid, NULL, WNOHANG);
+ if (pid == -1) {
+ switch (errno) {
+ case ECHILD:
+ tdb_robust_mutex_pid = -1;
+ return;
+
+ default:
+ return;
+ }
+ }
+ if (pid == child_pid) {
tdb_robust_mutex_pid = -1;
return;
}
tdb_robust_mutext_old_handler(sig);
}
+static void tdb_robust_mutex_wait_for_child(pid_t *child_pid)
+{
+ int options = WNOHANG;
+
+ if (*child_pid == -1) {
+ return;
+ }
+
+ while (tdb_robust_mutex_pid > 0) {
+ pid_t pid;
+
+ /*
+ * First we try with WNOHANG, as the process might not exist
+ * anymore. Once we've sent SIGKILL we block waiting for the
+ * exit.
+ */
+ pid = waitpid(*child_pid, NULL, options);
+ if (pid == -1) {
+ if (errno == EINTR) {
+ continue;
+ } else if (errno == ECHILD) {
+ break;
+ } else {
+ abort();
+ }
+ }
+ if (pid == *child_pid) {
+ break;
+ }
+
+ kill(*child_pid, SIGKILL);
+ options = 0;
+ }
+
+ tdb_robust_mutex_pid = -1;
+ *child_pid = -1;
+}
+
_PUBLIC_ bool tdb_runtime_check_for_robust_mutexes(void)
{
void *ptr = NULL;
char c = 0;
bool ok;
static bool initialized;
- sigset_t mask, old_mask, suspend_mask;
+ pid_t saved_child_pid = -1;
bool cleanup_ma = false;
- bool cleanup_sigmask = false;
if (initialized) {
return tdb_mutex_locking_cached;
initialized = true;
- sigemptyset(&suspend_mask);
-
ok = tdb_mutex_locking_supported();
if (!ok) {
return false;
}
m = (pthread_mutex_t *)ptr;
- /*
- * Block SIGCHLD so we can atomically wait for it later with
- * sigsuspend()
- */
- sigemptyset(&mask);
- sigaddset(&mask, SIGCHLD);
- ret = pthread_sigmask(SIG_BLOCK, &mask, &old_mask);
- if (ret != 0) {
- goto cleanup;
- }
- cleanup_sigmask = true;
- suspend_mask = old_mask;
- sigdelset(&suspend_mask, SIGCHLD);
-
if (tdb_robust_mutex_setup_sigchild(tdb_robust_mutex_handler,
&tdb_robust_mutext_old_handler) == false) {
goto cleanup;
}
tdb_robust_mutex_pid = fork();
+ saved_child_pid = tdb_robust_mutex_pid;
if (tdb_robust_mutex_pid == 0) {
size_t nwritten;
close(pipe_down[1]);
goto cleanup;
}
- while (tdb_robust_mutex_pid > 0) {
- ret = sigsuspend(&suspend_mask);
- if (ret != -1 || errno != EINTR) {
- abort();
- }
- }
- tdb_robust_mutex_setup_sigchild(tdb_robust_mutext_old_handler, NULL);
- tdb_robust_mutext_old_handler = SIG_ERR;
+ tdb_robust_mutex_wait_for_child(&saved_child_pid);
ret = pthread_mutex_trylock(m);
if (ret != EOWNERDEAD) {
}
ret = pthread_mutex_trylock(m);
- if (ret != EDEADLK) {
+ if (ret != EDEADLK && ret != EBUSY) {
pthread_mutex_unlock(m);
goto cleanup;
}
tdb_mutex_locking_cached = true;
cleanup:
- while (tdb_robust_mutex_pid > 0) {
- kill(tdb_robust_mutex_pid, SIGKILL);
- ret = sigsuspend(&suspend_mask);
- if (ret != -1 || errno != EINTR) {
- abort();
- }
- }
+ /*
+ * Note that we don't reset the signal handler we just reset
+ * tdb_robust_mutex_pid to -1. This is ok as this code path is only
+ * called once per process.
+ *
+ * Leaving our signal handler avoids races with other threads potentially
+ * setting up their SIGCHLD handlers.
+ *
+ * The worst thing that can happen is that the other newer signal
+ * handler will get the SIGCHLD signal for our child and/or reap the
+ * child with a wait() function. tdb_robust_mutex_wait_for_child()
+ * handles the case where waitpid returns ECHILD.
+ */
+ tdb_robust_mutex_wait_for_child(&saved_child_pid);
- if (tdb_robust_mutext_old_handler != SIG_ERR) {
- tdb_robust_mutex_setup_sigchild(tdb_robust_mutext_old_handler, NULL);
- }
- if (cleanup_sigmask) {
- ret = pthread_sigmask(SIG_SETMASK, &old_mask, NULL);
- if (ret != 0) {
- abort();
- }
- }
if (m != NULL) {
pthread_mutex_destroy(m);
}