Update.
authorUlrich Drepper <drepper@redhat.com>
Wed, 8 Sep 2004 06:09:02 +0000 (06:09 +0000)
committerUlrich Drepper <drepper@redhat.com>
Wed, 8 Sep 2004 06:09:02 +0000 (06:09 +0000)
* sysdeps/powerpc/bits/atomic.h (atomic_increment): Define.
(atomic_decrement): Define.

* sysdeps/powerpc/bits/atomic.h: Implement atomic_increment_val and
atomic_decrement_val.
* sysdeps/powerpc/powerpc32/bits/atomic.h: Likewise.
* sysdeps/powerpc/powerpc64/bits/atomic.h: Likewise.

* csu/tst-atomic.c (do_test): Add tests of atomic_increment_val
and atomic_decrement_val.

ChangeLog
csu/tst-atomic.c
nptl/allocatestack.c
nptl/sysdeps/pthread/pthread_barrier_wait.c
nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c
nptl/sysdeps/unix/sysv/linux/sem_post.c
sysdeps/powerpc/bits/atomic.h
sysdeps/powerpc/powerpc32/bits/atomic.h
sysdeps/powerpc/powerpc64/bits/atomic.h

index d9e415ff3a687b4fc36d3d7fac781323f9169a14..94a848796f72a513825feb3cc54a9d09f104e19e 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,16 @@
 2004-09-07  Ulrich Drepper  <drepper@redhat.com>
 
+       * sysdeps/powerpc/bits/atomic.h (atomic_increment): Define.
+       (atomic_decrement): Define.
+
+       * sysdeps/powerpc/bits/atomic.h: Implement atomic_increment_val and
+       atomic_decrement_val.
+       * sysdeps/powerpc/powerpc32/bits/atomic.h: Likewise.
+       * sysdeps/powerpc/powerpc64/bits/atomic.h: Likewise.
+
+       * csu/tst-atomic.c (do_test): Add tests of atomic_increment_val
+       and atomic_decrement_val.
+
        * include/atomic.h: Define atomic_increment_val, atomic_decrement_val,
        and atomic_delay is not already defined.
        * sysdeps/i386/i486/bits/atomic.h: Define atomic_delay.
index 6104466739f3ccf7685f4af9c1ee8d4f7f0967a3..cb6b6ba3d4b33ff6383203aee5b6aba466262e14 100644 (file)
@@ -1,5 +1,5 @@
 /* Tests for atomic.h macros.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
 
@@ -130,6 +130,12 @@ do_test (void)
       ret = 1;
     }
 
+  if (atomic_increment_val (&mem) != 1)
+    {
+      puts ("atomic_increment_val test failed");
+      ret = 1;
+    }
+
   mem = 0;
   if (atomic_increment_and_test (&mem)
       || mem != 1)
@@ -162,6 +168,12 @@ do_test (void)
       ret = 1;
     }
 
+  if (atomic_decrement_val (&mem) != 15)
+    {
+      puts ("atomic_decrement_val test failed");
+      ret = 1;
+    }
+
   mem = 0;
   if (atomic_decrement_and_test (&mem)
       || mem != -1)
index 33fbbaa850a200f9d0135bf7daf4b3d7b1555146..59f00d9231cdae84fba17b216dd8b3b524cfee1e 100644 (file)
@@ -445,8 +445,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
 
 #if COLORING_INCREMENT != 0
          /* Atomically increment NCREATED.  */
-         unsigned int ncreated = (atomic_exchange_and_add (&nptl_ncreated, 1)
-                                  + 1);
+         unsigned int ncreated = atomic_increment_val (&nptl_ncreated);
 
          /* We chose the offset for coloring by incrementing it for
             every new thread by a fixed amount.  The offset used
index aa5b42d419be3a59d9fd969b33f863340fac226c..c6b563f2420b3d5929987bd27827c92950fba8b7 100644 (file)
@@ -69,7 +69,7 @@ pthread_barrier_wait (barrier)
   unsigned int init_count = ibarrier->init_count;
 
   /* If this was the last woken thread, unlock.  */
-  if (atomic_exchange_and_add (&ibarrier->left, 1) == init_count - 1)
+  if (atomic_increment_val (&ibarrier->left) == init_count)
     /* We are done.  */
     lll_unlock (ibarrier->lock);
 
index d9ee5d50b75d6c0796904f042ecf1364bf44d2a6..91b9955181fb11bf574ccee8f22e3c8d23ace77d 100644 (file)
@@ -1,5 +1,5 @@
 /* sem_post -- post to a POSIX semaphore.  Powerpc version.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
@@ -30,11 +30,10 @@ int
 __new_sem_post (sem_t *sem)
 {
   int *futex = (int *) sem;
-  int err, nr;
 
   __asm __volatile (__lll_rel_instr ::: "memory");
-  nr = atomic_exchange_and_add (futex, 1);
-  err = lll_futex_wake (futex, nr + 1);
+  int nr = atomic_increment_val (futex);
+  int err = lll_futex_wake (futex, nr);
   if (__builtin_expect (err, 0) < 0)
     {
       __set_errno (-err);
index b4ee4cfc8ab1d93e617e6097744a0a39c72f572c..671b43f7f79fff5a518b3554fc695c9b45dde76e 100644 (file)
@@ -1,5 +1,5 @@
 /* sem_post -- post to a POSIX semaphore.  Generic futex-using version.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
 
@@ -30,10 +30,9 @@ int
 __new_sem_post (sem_t *sem)
 {
   int *futex = (int *) sem;
-  int err, nr;
 
-  nr = atomic_exchange_and_add (futex, 1);
-  err = lll_futex_wake (futex, nr + 1);
+  int nr = atomic_increment_val (futex);
+  int err = lll_futex_wake (futex, nr);
   if (__builtin_expect (err, 0) < 0)
     {
       __set_errno (-err);
index 4b6a761b724d6ef4ef7e35ff69bc5a82e391b56b..31f27e9e104b5bd21c95c3a79fbb96cce0f07ceb 100644 (file)
@@ -147,6 +147,32 @@ typedef uintmax_t uatomic_max_t;
     __val;                                                                   \
   })
 
+#define __arch_atomic_increment_val_32(mem) \
+  ({                                                                         \
+    __typeof (*(mem)) __val;                                                 \
+    __asm __volatile ("1:      lwarx   %0,0,%2\n"                            \
+                     "         addi    %0,%0,1\n"                            \
+                     "         stwcx.  %0,0,%2\n"                            \
+                     "         bne-    1b"                                   \
+                     : "=&b" (__val), "=m" (*mem)                            \
+                     : "b" (mem), "m" (*mem)                                 \
+                     : "cr0", "memory");                                     \
+    __val;                                                                   \
+  })
+
+#define __arch_atomic_decrement_val_32(mem) \
+  ({                                                                         \
+    __typeof (*(mem)) __val;                                                 \
+    __asm __volatile ("1:      lwarx   %0,0,%2\n"                            \
+                     "         subi    %0,%0,1\n"                            \
+                     "         stwcx.  %0,0,%2\n"                            \
+                     "         bne-    1b"                                   \
+                     : "=&b" (__val), "=m" (*mem)                            \
+                     : "b" (mem), "m" (*mem)                                 \
+                     : "cr0", "memory");                                     \
+    __val;                                                                   \
+  })
+
 #define __arch_atomic_decrement_if_positive_32(mem) \
   ({ int __val, __tmp;                                                       \
      __asm __volatile ("1:     lwarx   %0,0,%3\n"                            \
@@ -222,6 +248,34 @@ typedef uintmax_t uatomic_max_t;
     __result;                                                                \
   })
 
+#define atomic_increment_val(mem) \
+  ({                                                                         \
+    __typeof (*(mem)) __result;                                                      \
+    if (sizeof (*(mem)) == 4)                                                \
+      __result = __arch_atomic_increment_val_32 (mem);                       \
+    else if (sizeof (*(mem)) == 8)                                           \
+      __result = __arch_atomic_increment_val_64 (mem);                       \
+    else                                                                     \
+       abort ();                                                             \
+    __result;                                                                \
+  })
+
+#define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; })
+
+#define atomic_decrement_val(mem) \
+  ({                                                                         \
+    __typeof (*(mem)) __result;                                                      \
+    if (sizeof (*(mem)) == 4)                                                \
+      __result = __arch_atomic_decrement_val_32 (mem);                       \
+    else if (sizeof (*(mem)) == 8)                                           \
+      __result = __arch_atomic_decrement_val_64 (mem);                       \
+    else                                                                     \
+       abort ();                                                             \
+    __result;                                                                \
+  })
+
+#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
+
 
 /* Decrement *MEM if it is > 0, and return the old value.  */
 #define atomic_decrement_if_positive(mem) \
index 4e2e24335d86252fbce54ff9a290088528ce8a10..0f1a72335f34fdd1ec4054414a7260a8f118eb5e 100644 (file)
@@ -1,5 +1,5 @@
 /* Atomic operations.  PowerPC32 version.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
   __tmp != 0;                                                                \
 })
 
-/* 
- * Powerpc32 processors don't implement the 64-bit (doubleword) forms of
- * load and reserve (ldarx) and store conditional (stdcx.) instructions.  
- * So for powerpc32 we stub out the 64-bit forms.
- */
+/* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
+   load and reserve (ldarx) and store conditional (stdcx.) instructions.
+   So for powerpc32 we stub out the 64-bit forms.  */
 # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
   (abort (), 0)
 
 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
   (abort (), (__typeof (*mem)) 0)
-  
+
 # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
   (abort (), 0)
 
 # define __arch_atomic_exchange_and_add_64(mem, value) \
     ({ abort (); (*mem) = (value); })
 
+# define __arch_atomic_increment_val_64(mem) \
+    ({ abort (); (*mem)++; })
+
+# define __arch_atomic_decrement_val_64(mem) \
+    ({ abort (); (*mem)--; })
+
 # define __arch_atomic_decrement_if_positive_64(mem) \
     ({ abort (); (*mem)--; })
-    
-/* 
- * Older powerpc32 processors don't support the new "light weight" 
- * sync (lwsync).  So the only safe option is to use normal sync 
- * for all powerpc32 applications. 
+
+/*
+ * Older powerpc32 processors don't support the new "light weight"
+ * sync (lwsync).  So the only safe option is to use normal sync
+ * for all powerpc32 applications.
  */
 # define atomic_read_barrier() __asm ("sync" ::: "memory")
 
 /*
  * Include the rest of the atomic ops macros which are common to both
- * powerpc32 and powerpc64. 
+ * powerpc32 and powerpc64.
  */
 #include_next <bits/atomic.h>
-
index fa256784106e9317f1efed19737d63020822126e..e46dc1e4d752af80d9da8595a18445058b42340b 100644 (file)
       __val;                                                                 \
     })
 
+# define __arch_atomic_increment_val_64(mem) \
+    ({                                                                       \
+      __typeof (*(mem)) __val;                                               \
+      __asm __volatile ("1:    ldarx   %0,0,%2\n"                            \
+                       "       addi    %0,%0,1\n"                            \
+                       "       stdcx.  %0,0,%2\n"                            \
+                       "       bne-    1b"                                   \
+                       : "=&b" (__val), "=m" (*mem)                          \
+                       : "b" (mem), "m" (*mem)                               \
+                       : "cr0", "memory");                                   \
+      __val;                                                                 \
+    })
+
+# define __arch_atomic_decrement_val_64(mem) \
+    ({                                                                       \
+      __typeof (*(mem)) __val;                                               \
+      __asm __volatile ("1:    ldarx   %0,0,%2\n"                            \
+                       "       subi    %0,%0,1\n"                            \
+                       "       stdcx.  %0,0,%2\n"                            \
+                       "       bne-    1b"                                   \
+                       : "=&b" (__val), "=m" (*mem)                          \
+                       : "b" (mem), "m" (*mem)                               \
+                       : "cr0", "memory");                                   \
+      __val;                                                                 \
+    })
+
 # define __arch_atomic_decrement_if_positive_64(mem) \
   ({ int __val, __tmp;                                                       \
      __asm __volatile ("1:     ldarx   %0,0,%3\n"                            \