Give futex init a proper name
[sfrench/cifs-2.6.git] / kernel / futex.c
index 221f2128a43760ae1accb35bce8daaeb7c17affb..87a6428cb5b67363a23252628c6a03d8b2a789c8 100644 (file)
@@ -60,6 +60,8 @@
 
 #include "rtmutex_common.h"
 
+int __read_mostly futex_cmpxchg_enabled;
+
 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
 
 /*
@@ -469,6 +471,8 @@ void exit_pi_state_list(struct task_struct *curr)
        struct futex_hash_bucket *hb;
        union futex_key key;
 
+       if (!futex_cmpxchg_enabled)
+               return;
        /*
         * We are a ZOMBIE and nobody can enqueue itself on
         * pi_state_list anymore, but we have to be careful
@@ -1870,6 +1874,8 @@ asmlinkage long
 sys_set_robust_list(struct robust_list_head __user *head,
                    size_t len)
 {
+       if (!futex_cmpxchg_enabled)
+               return -ENOSYS;
        /*
         * The kernel knows only one size for now:
         */
@@ -1894,6 +1900,9 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
        struct robust_list_head __user *head;
        unsigned long ret;
 
+       if (!futex_cmpxchg_enabled)
+               return -ENOSYS;
+
        if (!pid)
                head = current->robust_list;
        else {
@@ -1997,6 +2006,9 @@ void exit_robust_list(struct task_struct *curr)
        unsigned long futex_offset;
        int rc;
 
+       if (!futex_cmpxchg_enabled)
+               return;
+
        /*
         * Fetch the list head (which was registered earlier, via
         * sys_set_robust_list()):
@@ -2051,7 +2063,7 @@ void exit_robust_list(struct task_struct *curr)
 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
                u32 __user *uaddr2, u32 val2, u32 val3)
 {
-       int ret;
+       int ret = -ENOSYS;
        int cmd = op & FUTEX_CMD_MASK;
        struct rw_semaphore *fshared = NULL;
 
@@ -2083,13 +2095,16 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
                ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
                break;
        case FUTEX_LOCK_PI:
-               ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
+               if (futex_cmpxchg_enabled)
+                       ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
                break;
        case FUTEX_UNLOCK_PI:
-               ret = futex_unlock_pi(uaddr, fshared);
+               if (futex_cmpxchg_enabled)
+                       ret = futex_unlock_pi(uaddr, fshared);
                break;
        case FUTEX_TRYLOCK_PI:
-               ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
+               if (futex_cmpxchg_enabled)
+                       ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
                break;
        default:
                ret = -ENOSYS;
@@ -2143,10 +2158,31 @@ static struct file_system_type futex_fs_type = {
        .kill_sb        = kill_anon_super,
 };
 
-static int __init init(void)
+static int __init futex_init(void)
 {
-       int i = register_filesystem(&futex_fs_type);
+       u32 curval;
+       int i;
+
+       /*
+        * This will fail and we want it. Some arch implementations do
+        * runtime detection of the futex_atomic_cmpxchg_inatomic()
+        * functionality. We want to know that before we call in any
+        * of the complex code paths. Also we want to prevent
+        * registration of robust lists in that case. NULL is
+        * guaranteed to fault and we get -EFAULT on functional
+        * implementation, the non functional ones will return
+        * -ENOSYS.
+        */
+       curval = cmpxchg_futex_value_locked(NULL, 0, 0);
+       if (curval == -EFAULT)
+               futex_cmpxchg_enabled = 1;
 
+       for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
+               plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
+               spin_lock_init(&futex_queues[i].lock);
+       }
+
+       i = register_filesystem(&futex_fs_type);
        if (i)
                return i;
 
@@ -2156,10 +2192,6 @@ static int __init init(void)
                return PTR_ERR(futex_mnt);
        }
 
-       for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
-               plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
-               spin_lock_init(&futex_queues[i].lock);
-       }
        return 0;
 }
-__initcall(init);
+__initcall(futex_init);