2.5-18.1
[jlayton/glibc.git] / nptl / pthread_mutex_lock.c
index ee39f208203272d54708f2e49437d7ffa84738ae..1c3ee4fe25555d7815a3cd4aa4d8c72a8191ddcd 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -19,6 +19,9 @@
 
 #include <assert.h>
 #include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <not-cancel.h>
 #include "pthreadP.h"
 #include <lowlevellock.h>
 
@@ -26,6 +29,7 @@
 #ifndef LLL_MUTEX_LOCK
 # define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
 # define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
+# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id)
 #endif
 
 
@@ -35,8 +39,10 @@ __pthread_mutex_lock (mutex)
 {
   assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
 
+  int oldval;
   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
 
+  int retval = 0;
   switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
     {
       /* Recursive mutex.  */
@@ -57,23 +63,23 @@ __pthread_mutex_lock (mutex)
       /* We have to get the mutex.  */
       LLL_MUTEX_LOCK (mutex->__data.__lock);
 
+      assert (mutex->__data.__owner == 0);
       mutex->__data.__count = 1;
       break;
 
       /* Error checking mutex.  */
     case PTHREAD_MUTEX_ERRORCHECK_NP:
       /* Check whether we already hold the mutex.  */
-      if (mutex->__data.__owner == id)
+      if (__builtin_expect (mutex->__data.__owner == id, 0))
        return EDEADLK;
 
       /* FALLTHROUGH */
 
-    default:
-      /* Correct code cannot set any other type.  */
     case PTHREAD_MUTEX_TIMED_NP:
     simple:
       /* Normal mutex.  */
       LLL_MUTEX_LOCK (mutex->__data.__lock);
+      assert (mutex->__data.__owner == 0);
       break;
 
     case PTHREAD_MUTEX_ADAPTIVE_NP:
@@ -101,17 +107,332 @@ __pthread_mutex_lock (mutex)
 
          mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
        }
+      assert (mutex->__data.__owner == 0);
       break;
+
+    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
+    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                    &mutex->__data.__list.__next);
+
+      oldval = mutex->__data.__lock;
+      do
+       {
+       again:
+         if ((oldval & FUTEX_OWNER_DIED) != 0)
+           {
+             /* The previous owner died.  Try locking the mutex.  */
+             int newval = id;
+#ifdef NO_INCR
+             newval |= FUTEX_WAITERS;
+#else
+             newval |= (oldval & FUTEX_WAITERS);
+#endif
+
+             newval
+               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                      newval, oldval);
+
+             if (newval != oldval)
+               {
+                 oldval = newval;
+                 goto again;
+               }
+
+             /* We got the mutex.  */
+             mutex->__data.__count = 1;
+             /* But it is inconsistent unless marked otherwise.  */
+             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+             ENQUEUE_MUTEX (mutex);
+             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+             /* Note that we deliberately exit here.  If we fall
+                through to the end of the function __nusers would be
+                incremented which is not correct because the old
+                owner has to be discounted.  If we are not supposed
+                to increment __nusers we actually have to decrement
+                it here.  */
+#ifdef NO_INCR
+             --mutex->__data.__nusers;
+#endif
+
+             return EOWNERDEAD;
+           }
+
+         /* Check whether we already hold the mutex.  */
+         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+           {
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
+               {
+                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                                NULL);
+                 return EDEADLK;
+               }
+
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
+               {
+                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                                NULL);
+
+                 /* Just bump the counter.  */
+                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                   /* Overflow of the counter.  */
+                   return EAGAIN;
+
+                 ++mutex->__data.__count;
+
+                 return 0;
+               }
+           }
+
+         oldval = LLL_ROBUST_MUTEX_LOCK (mutex->__data.__lock, id);
+
+         if (__builtin_expect (mutex->__data.__owner
+                               == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+           {
+             /* This mutex is now not recoverable.  */
+             mutex->__data.__count = 0;
+             lll_mutex_unlock (mutex->__data.__lock);
+             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+             return ENOTRECOVERABLE;
+           }
+       }
+      while ((oldval & FUTEX_OWNER_DIED) != 0);
+
+      mutex->__data.__count = 1;
+      ENQUEUE_MUTEX (mutex);
+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+      break;
+
+    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
+    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PI_NORMAL_NP:
+    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
+    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
+      {
+       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+       int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+
+       if (robust)
+         /* Note: robust PI futexes are signaled by setting bit 0.  */
+         THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
+                        (void *) (((uintptr_t) &mutex->__data.__list.__next)
+                                  | 1));
+
+       oldval = mutex->__data.__lock;
+
+       /* Check whether we already hold the mutex.  */
+       if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+         {
+           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+             {
+               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+               return EDEADLK;
+             }
+
+           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+             {
+               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+               /* Just bump the counter.  */
+               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                 /* Overflow of the counter.  */
+                 return EAGAIN;
+
+               ++mutex->__data.__count;
+
+               return 0;
+             }
+         }
+
+       int newval = id;
+#ifdef NO_INCR
+       newval |= FUTEX_WAITERS;
+#endif
+       oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                     newval, 0);
+
+       if (oldval != 0)
+         {
+           /* The mutex is locked.  The kernel will now take care of
+              everything.  */
+           INTERNAL_SYSCALL_DECL (__err);
+           int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+                                     FUTEX_LOCK_PI, 1, 0);
+
+           if (INTERNAL_SYSCALL_ERROR_P (e, __err)
+               && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
+                   || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
+             {
+               assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
+                       || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
+                           && kind != PTHREAD_MUTEX_RECURSIVE_NP));
+               /* ESRCH can happen only for non-robust PI mutexes where
+                  the owner of the lock died.  */
+               assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
+
+               /* Delay the thread indefinitely.  */
+               while (1)
+                 pause_not_cancel ();
+             }
+
+           oldval = mutex->__data.__lock;
+
+           assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
+         }
+
+       if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+         {
+           atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+
+           /* We got the mutex.  */
+           mutex->__data.__count = 1;
+           /* But it is inconsistent unless marked otherwise.  */
+           mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+           ENQUEUE_MUTEX_PI (mutex);
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+
+           /* Note that we deliberately exit here.  If we fall
+              through to the end of the function __nusers would be
+              incremented which is not correct because the old owner
+              has to be discounted.  If we are not supposed to
+              increment __nusers we actually have to decrement it here.  */
+#ifdef NO_INCR
+           --mutex->__data.__nusers;
+#endif
+
+           return EOWNERDEAD;
+         }
+
+       if (robust
+           && __builtin_expect (mutex->__data.__owner
+                                == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+         {
+           /* This mutex is now not recoverable.  */
+           mutex->__data.__count = 0;
+
+           INTERNAL_SYSCALL_DECL (__err);
+           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
+                             FUTEX_UNLOCK_PI, 0, 0);
+
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+           return ENOTRECOVERABLE;
+         }
+
+       mutex->__data.__count = 1;
+       if (robust)
+         {
+           ENQUEUE_MUTEX_PI (mutex);
+           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
+         }
+      }
+      break;
+
+    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
+    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_PP_NORMAL_NP:
+    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
+      {
+       int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
+
+       oldval = mutex->__data.__lock;
+
+       /* Check whether we already hold the mutex.  */
+       if (mutex->__data.__owner == id)
+         {
+           if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
+             return EDEADLK;
+
+           if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
+             {
+               /* Just bump the counter.  */
+               if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                 /* Overflow of the counter.  */
+                 return EAGAIN;
+
+               ++mutex->__data.__count;
+
+               return 0;
+             }
+         }
+
+       int oldprio = -1, ceilval;
+       do
+         {
+           int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
+                         >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+
+           if (__pthread_current_priority () > ceiling)
+             {
+               if (oldprio != -1)
+                 __pthread_tpp_change_priority (oldprio, -1);
+               return EINVAL;
+             }
+
+           retval = __pthread_tpp_change_priority (oldprio, ceiling);
+           if (retval)
+             return retval;
+
+           ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
+           oldprio = ceiling;
+
+           oldval
+             = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+#ifdef NO_INCR
+                                                    ceilval | 2,
+#else
+                                                    ceilval | 1,
+#endif
+                                                    ceilval);
+
+           if (oldval == ceilval)
+             break;
+
+           do
+             {
+               oldval
+                 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                        ceilval | 2,
+                                                        ceilval | 1);
+
+               if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
+                 break;
+
+               if (oldval != ceilval)
+                 lll_futex_wait (&mutex->__data.__lock, ceilval | 2);
+             }
+           while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                       ceilval | 2, ceilval)
+                  != ceilval);
+         }
+       while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
+
+       assert (mutex->__data.__owner == 0);
+       mutex->__data.__count = 1;
+      }
+      break;
+
+    default:
+      /* Correct code cannot set any other type.  */
+      return EINVAL;
     }
 
   /* Record the ownership.  */
-  assert (mutex->__data.__owner == 0);
   mutex->__data.__owner = id;
 #ifndef NO_INCR
   ++mutex->__data.__nusers;
 #endif
 
-  return 0;
+  return retval;
 }
 #ifndef __pthread_mutex_lock
 strong_alias (__pthread_mutex_lock, pthread_mutex_lock)