[PATCH] lightweight robust futexes updates
authorIngo Molnar <mingo@elte.hu>
Mon, 27 Mar 2006 09:16:27 +0000 (01:16 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Mon, 27 Mar 2006 16:44:49 +0000 (08:44 -0800)
- fix: initialize the robust list(s) to NULL in copy_process.

- doc update

- cleanup: rename _inuser to _inatomic

- __user cleanups and other small cleanups

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Ulrich Drepper <drepper@redhat.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
12 files changed:
Documentation/robust-futex-ABI.txt
Documentation/robust-futexes.txt
include/asm-frv/futex.h
include/asm-generic/futex.h
include/asm-i386/futex.h
include/asm-mips/futex.h
include/asm-powerpc/futex.h
include/asm-x86_64/futex.h
include/linux/futex.h
kernel/fork.c
kernel/futex.c
kernel/futex_compat.c

index def5d8735286ba200e08ef5efcc0f6d2ed142424..8529a17ffaa16a17060d38232085f99e5e8a3624 100644 (file)
@@ -142,8 +142,6 @@ On insertion:
     of the 'lock word', to the linked list starting at 'head', and
  4) clear the 'list_op_pending' word.
 
-       XXX I am particularly unsure of the following -pj XXX
-
 On removal:
  1) set the 'list_op_pending' word to the address of the 'lock word'
     to be removed,
index 7aecc67b13618d6ad5f86b9f7241d26d271107d3..df82d75245a01b5055c6793fa822efe949982a2e 100644 (file)
@@ -213,6 +213,6 @@ robust-mutex testcases.
 All other architectures should build just fine too - but they wont have
 the new syscalls yet.
 
-Architectures need to implement the new futex_atomic_cmpxchg_inuser()
+Architectures need to implement the new futex_atomic_cmpxchg_inatomic()
 inline function before writing up the syscalls (that function returns
 -ENOSYS right now).
index 9a0e9026ba5ea5c4abc2f449e1441c1fa6b1fbfe..08b3d1da358398e111ccf01f50227ef8f60a7ffc 100644 (file)
@@ -10,7 +10,7 @@
 extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr);
 
 static inline int
-futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
 {
        return -ENOSYS;
 }
index 514bd401cd7e07fe50bc42a28f6c257ce2b0f973..df893c160318a6ac84ae076bc7ac5b98c33722ac 100644 (file)
@@ -50,7 +50,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
 {
        return -ENOSYS;
 }
index 41184a31885c2d573676e55e03d1b47a9505f51d..7b8ceefd010f0cba225231990e775a4d9d506759 100644 (file)
@@ -105,7 +105,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
 {
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
index c5fb2d6d918ae52d3e8734c27ced258a98076b6d..a554089991f26ce3407da2e1d26b9904dd025800 100644 (file)
@@ -100,7 +100,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
 {
        return -ENOSYS;
 }
index 80ed9854e42bd12720c0713f166c0ba8eb97fa0a..f1b3c00bc1ce8d8ca0254f57cc76ac6d648820ae 100644 (file)
@@ -82,7 +82,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
 {
        return -ENOSYS;
 }
index 7d9eb1a845467f17654b940c6c36b46df61f5672..9804bf07b092f4bb154f19284047a9a9711bc989 100644 (file)
@@ -95,7 +95,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 }
 
 static inline int
-futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
 {
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
index 20face6b798d9c9087c131d3237ad61688397f23..55fff96ae85914c7d6138428a471bb8ef4a2a369 100644 (file)
@@ -100,7 +100,7 @@ long do_futex(unsigned long uaddr, int op, int val,
                unsigned long timeout, unsigned long uaddr2, int val2,
                int val3);
 
-extern int handle_futex_death(unsigned int *uaddr, struct task_struct *curr);
+extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr);
 
 #ifdef CONFIG_FUTEX
 extern void exit_robust_list(struct task_struct *curr);
index e0a2b449dea64ea1b0d79bdc7124a74cb5c4cbdf..c49bd193b058a2ff7e3e398c9c42fdc04baeef4c 100644 (file)
@@ -1061,7 +1061,10 @@ static task_t *copy_process(unsigned long clone_flags,
         * Clear TID on mm_release()?
         */
        p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
-
+       p->robust_list = NULL;
+#ifdef CONFIG_COMPAT
+       p->compat_robust_list = NULL;
+#endif
        /*
         * sigaltstack should be cleared when sharing the same VM
         */
index feb724b2554efe19a9d9b830c7b957162203e890..9c9b2b6b22dd692ec14784331d4d3197503b86fe 100644 (file)
@@ -913,15 +913,15 @@ err_unlock:
  * Process a futex-list entry, check whether it's owned by the
  * dying task, and do notification if so:
  */
-int handle_futex_death(unsigned int *uaddr, struct task_struct *curr)
+int handle_futex_death(u32 __user *uaddr, struct task_struct *curr)
 {
-       unsigned int futex_val;
+       u32 uval;
 
-repeat:
-       if (get_user(futex_val, uaddr))
+retry:
+       if (get_user(uval, uaddr))
                return -1;
 
-       if ((futex_val & FUTEX_TID_MASK) == curr->pid) {
+       if ((uval & FUTEX_TID_MASK) == curr->pid) {
                /*
                 * Ok, this dying thread is truly holding a futex
                 * of interest. Set the OWNER_DIED bit atomically
@@ -932,12 +932,11 @@ repeat:
                 * thread-death.) The rest of the cleanup is done in
                 * userspace.
                 */
-               if (futex_atomic_cmpxchg_inuser(uaddr, futex_val,
-                                        futex_val | FUTEX_OWNER_DIED) !=
-                                                                  futex_val)
-                       goto repeat;
+               if (futex_atomic_cmpxchg_inatomic(uaddr, uval,
+                                        uval | FUTEX_OWNER_DIED) != uval)
+                       goto retry;
 
-               if (futex_val & FUTEX_WAITERS)
+               if (uval & FUTEX_WAITERS)
                        futex_wake((unsigned long)uaddr, 1);
        }
        return 0;
@@ -985,7 +984,6 @@ void exit_robust_list(struct task_struct *curr)
                        if (handle_futex_death((void *)entry + futex_offset,
                                                curr))
                                return;
-
                /*
                 * Fetch the next entry in the list:
                 */
index c153559ef289d88b86e39ae6f533e3f4b3714b3f..9c077cf9aa84b81c0da8d87450b6fb491b3a231f 100644 (file)
@@ -121,9 +121,9 @@ err_unlock:
        return ret;
 }
 
-asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, int val,
+asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
                struct compat_timespec __user *utime, u32 __user *uaddr2,
-               int val3)
+               u32 val3)
 {
        struct timespec t;
        unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
@@ -137,6 +137,5 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, int val,
        if (op >= FUTEX_REQUEUE)
                val2 = (int) (unsigned long) utime;
 
-       return do_futex((unsigned long)uaddr, op, val, timeout,
-                       (unsigned long)uaddr2, val2, val3);
+       return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
 }