* sysdeps/x86_64/bits/atomic.h: Likewise. Fix a few minor problems.
* stdlib/cxa_finalize.c: Use catomic_* operations instead of atomic_*.
* malloc/memusage.c: Likewise.
* gmon/mcount.c: Likewise.
* elf/dl-close.c: Likewise.
* elf/dl-open.c: Likewise.
* elf/dl-profile.c: Likewise.
* elf/dl-sym.c: Likewise.
* elf/dl-runtime.c: Likewise.
* elf/dl-fptr.c: Likewise.
* resolv/res_libc.c: Likewise.
+2006-10-11 Ulrich Drepper <drepper@redhat.com>
+
+ * include/atomic.c: Define catomic_* operations.
+ * sysdeps/x86_64/bits/atomic.h: Likewise. Fix a few minor problems.
+ * stdlib/cxa_finalize.c: Use catomic_* operations instead of atomic_*.
+ * malloc/memusage.c: Likewise.
+ * gmon/mcount.c: Likewise.
+ * elf/dl-close.c: Likewise.
+ * elf/dl-open.c: Likewise.
+ * elf/dl-profile.c: Likewise.
+ * elf/dl-sym.c: Likewise.
+ * elf/dl-runtime.c: Likewise.
+ * elf/dl-fptr.c: Likewise.
+ * resolv/res_libc.c: Likewise.
+
2006-10-10 Ulrich Drepper <drepper@redhat.com>
* nis/nis_subr.c (nis_getnames): Add trailing dot to NIS_PATH
imap->l_scoperec = newp;
__rtld_mrlock_done (imap->l_scoperec_lock);
- if (atomic_increment_val (&old->nusers) != 1)
+ if (catomic_increment_val (&old->nusers) != 1)
{
old->remove_after_use = true;
old->notify = true;
- if (atomic_decrement_val (&old->nusers) != 0)
+ if (catomic_decrement_val (&old->nusers) != 0)
__rtld_waitzero (old->nusers);
}
/* Manage function descriptors. Generic version.
- Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc.
+ Copyright (C) 1999-2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
#ifndef COMPARE_AND_SWAP
# define COMPARE_AND_SWAP(ptr, old, new) \
- (atomic_compare_and_exchange_bool_acq (ptr, new, old) == 0)
+ (catomic_compare_and_exchange_bool_acq (ptr, new, old) == 0)
#endif
ElfW(Addr) _dl_boot_fptr_table [ELF_MACHINE_BOOT_FPTR_TABLE_LEN];
imap->l_scoperec = newp;
__rtld_mrlock_done (imap->l_scoperec_lock);
- atomic_increment (&old->nusers);
+ catomic_increment (&old->nusers);
old->remove_after_use = true;
- if (atomic_decrement_val (&old->nusers) == 0)
+ if (catomic_decrement_val (&old->nusers) == 0)
/* No user, we can free it here and now. */
free (old);
}
/* Profiling of shared libraries.
- Copyright (C) 1997-2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 1997-2002, 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
Based on the BSD mcount implementation.
size_t newfromidx;
to_index = (data[narcs].self_pc
/ (HASHFRACTION * sizeof (*tos)));
- newfromidx = atomic_exchange_and_add (&fromidx, 1) + 1;
+ newfromidx = catomic_exchange_and_add (&fromidx, 1) + 1;
froms[newfromidx].here = &data[narcs];
froms[newfromidx].link = tos[to_index];
tos[to_index] = newfromidx;
- atomic_increment (&narcs);
+ catomic_increment (&narcs);
}
/* If we still have no entry stop searching and insert. */
if (*topcindex == 0)
{
- uint_fast32_t newarc = atomic_exchange_and_add (narcsp, 1);
+ uint_fast32_t newarc = catomic_exchange_and_add (narcsp, 1);
/* In rare cases it could happen that all entries in FROMS are
occupied. So we cannot count this anymore. */
if (newarc >= fromlimit)
goto done;
- *topcindex = atomic_exchange_and_add (&fromidx, 1) + 1;
+ *topcindex = catomic_exchange_and_add (&fromidx, 1) + 1;
fromp = &froms[*topcindex];
fromp->here = &data[newarc];
data[newarc].self_pc = selfpc;
data[newarc].count = 0;
fromp->link = 0;
- atomic_increment (&narcs);
+ catomic_increment (&narcs);
break;
}
}
/* Increment the counter. */
- atomic_increment (&fromp->here->count);
+ catomic_increment (&fromp->here->count);
done:
;
{
__rtld_mrlock_lock (l->l_scoperec_lock);
scoperec = l->l_scoperec;
- atomic_increment (&scoperec->nusers);
+ catomic_increment (&scoperec->nusers);
__rtld_mrlock_unlock (l->l_scoperec_lock);
}
DL_LOOKUP_ADD_DEPENDENCY, NULL);
if (l->l_type == lt_loaded
- && atomic_decrement_val (&scoperec->nusers) == 0
+ && catomic_decrement_val (&scoperec->nusers) == 0
&& __builtin_expect (scoperec->remove_after_use, 0))
{
if (scoperec->notify)
{
__rtld_mrlock_lock (l->l_scoperec_lock);
scoperec = l->l_scoperec;
- atomic_increment (&scoperec->nusers);
+ catomic_increment (&scoperec->nusers);
__rtld_mrlock_unlock (l->l_scoperec_lock);
}
DL_LOOKUP_ADD_DEPENDENCY, NULL);
if (l->l_type == lt_loaded
- && atomic_decrement_val (&scoperec->nusers) == 0
+ && catomic_decrement_val (&scoperec->nusers) == 0
&& __builtin_expect (scoperec->remove_after_use, 0))
{
if (scoperec->notify)
{
__rtld_mrlock_lock (match->l_scoperec_lock);
struct r_scoperec *scoperec = match->l_scoperec;
- atomic_increment (&scoperec->nusers);
+ catomic_increment (&scoperec->nusers);
__rtld_mrlock_unlock (match->l_scoperec_lock);
struct call_dl_lookup_args args;
int err = GLRO(dl_catch_error) (&objname, &errstring, &malloced,
call_dl_lookup, &args);
- if (atomic_decrement_val (&scoperec->nusers) == 0
+ if (catomic_decrement_val (&scoperec->nusers) == 0
&& __builtin_expect (scoperec->remove_after_use, 0))
{
if (scoperec->notify)
* check that we are profiling
* and that we aren't recursively invoked.
*/
- if (atomic_compare_and_exchange_bool_acq (&p->state, GMON_PROF_BUSY,
- GMON_PROF_ON))
+ if (catomic_compare_and_exchange_bool_acq (&p->state, GMON_PROF_BUSY,
+ GMON_PROF_ON))
return;
/*
#ifndef _ATOMIC_H
#define _ATOMIC_H 1
+/* This header defines three types of macros:
+
+ - atomic arithmetic and logic operation on memory. They all
+ have the prefix "atomic_".
+
+ - conditionally atomic operations of the same kinds. These
+ always behave identical but can be faster when atomicity
+ is not really needed since only one thread has access to
+ the memory location. In that case the code is slower in
+ the multi-thread case. The interfaces have the prefix
+ "catomic_".
+
+ - support functions like barriers. They also have the preifx
+ "atomic_".
+
+ Architectures must provide a few lowlevel macros (the compare
+ and exchange definitions). All others are optional. They
+ should only be provided if the architecture has specific
+ support for the operation. */
+
#include <stdlib.h>
#include <bits/atomic.h>
#endif
+#if !defined catomic_compare_and_exchange_val_acq \
+ && defined __arch_c_compare_and_exchange_val_32_acq
+# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
+ __atomic_val_bysize (__arch_c_compare_and_exchange_val,acq, \
+ mem, newval, oldval)
+#else
+# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
+ atomic_compare_and_exchange_val_acq (mem, newval, oldval)
+#endif
+
+
#ifndef atomic_compare_and_exchange_val_rel
# define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
atomic_compare_and_exchange_val_acq (mem, newval, oldval)
#endif
+#ifndef catomic_compare_and_exchange_val_rel
+# define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
+ atomic_compare_and_exchange_val_acq (mem, newval, oldval)
+#endif
+
+
/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
Return zero if *MEM was changed or non-zero if no exchange happened. */
#ifndef atomic_compare_and_exchange_bool_acq
#endif
+#ifndef catomic_compare_and_exchange_bool_acq
+# ifdef __arch_c_compare_and_exchange_bool_32_acq
+# define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
+ __atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq, \
+ mem, newval, oldval)
+# else
+# define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
+ ({ /* Cannot use __oldval here, because macros later in this file might \
+ call this macro with __oldval argument. */ \
+ __typeof (oldval) __old = (oldval); \
+ catomic_compare_and_exchange_val_acq (mem, newval, __old) != __old; \
+ })
+# endif
+#endif
+
+
#ifndef atomic_compare_and_exchange_bool_rel
# define atomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
atomic_compare_and_exchange_bool_acq (mem, newval, oldval)
#endif
+#ifndef catomic_compare_and_exchange_bool_rel
+# define catomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
+ catomic_compare_and_exchange_bool_acq (mem, newval, oldval)
+#endif
+
+
/* Store NEWVALUE in *MEM and return the old value. */
#ifndef atomic_exchange_acq
# define atomic_exchange_acq(mem, newvalue) \
#endif
+#ifndef catomic_exchange_and_add
+# define catomic_exchange_and_add(mem, value) \
+ ({ __typeof (*(mem)) __oldv; \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __value = (value); \
+ \
+ do \
+ __oldv = *__memp; \
+ while (__builtin_expect (catomic_compare_and_exchange_bool_acq (__memp, \
+ __oldv \
+ + __value,\
+ __oldv), \
+ 0)); \
+ \
+ __oldv; })
+#endif
+
#ifndef atomic_max
# define atomic_max(mem, value) \
} while (0)
#endif
+
+#ifndef catomic_max
+# define catomic_max(mem, value) \
+ do { \
+ __typeof (*(mem)) __oldv; \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __value = (value); \
+ do { \
+ __oldv = *__memp; \
+ if (__oldv >= __value) \
+ break; \
+ } while (__builtin_expect (catomic_compare_and_exchange_bool_acq (__memp, \
+ __value,\
+ __oldv),\
+ 0)); \
+ } while (0)
+#endif
+
+
#ifndef atomic_min
# define atomic_min(mem, value) \
do { \
} while (0)
#endif
+
#ifndef atomic_add
# define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value))
#endif
+#ifndef catomic_add
+# define catomic_add(mem, value) \
+ (void) catomic_exchange_and_add ((mem), (value))
+#endif
+
+
#ifndef atomic_increment
# define atomic_increment(mem) atomic_add ((mem), 1)
#endif
+#ifndef catomic_increment
+# define catomic_increment(mem) catomic_add ((mem), 1)
+#endif
+
+
#ifndef atomic_increment_val
# define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1)
#endif
+#ifndef catomic_increment_val
+# define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1)
+#endif
+
+
/* Add one to *MEM and return true iff it's now zero. */
#ifndef atomic_increment_and_test
# define atomic_increment_and_test(mem) \
#endif
+#ifndef catomic_decrement
+# define catomic_decrement(mem) catomic_add ((mem), -1)
+#endif
+
+
#ifndef atomic_decrement_val
# define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
#endif
+#ifndef catomic_decrement_val
+# define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1)
+#endif
+
+
/* Subtract 1 from *MEM and return true iff it's now zero. */
#ifndef atomic_decrement_and_test
# define atomic_decrement_and_test(mem) \
} while (0)
#endif
+#ifndef catomic_or
+# define catomic_or(mem, mask) \
+ do { \
+ __typeof (*(mem)) __oldval; \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __mask = (mask); \
+ \
+ do \
+ __oldval = (*__memp); \
+ while (__builtin_expect (catomic_compare_and_exchange_bool_acq (__memp, \
+ __oldval \
+ | __mask, \
+ __oldval),\
+ 0)); \
+ } while (0)
+#endif
+
/* Atomically *mem |= mask and return the old value of *mem. */
#ifndef atomic_or_val
# define atomic_or_val(mem, mask) \
/* Profile heap and stack memory usage of running program.
- Copyright (C) 1998-2002, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 1998-2002, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
/* Compute current heap usage and compare it with the maximum value. */
memusage_size_t heap
- = atomic_exchange_and_add (¤t_heap, len - old_len) + len - old_len;
- atomic_max (&peak_heap, heap);
+ = catomic_exchange_and_add (¤t_heap, len - old_len) + len - old_len;
+ catomic_max (&peak_heap, heap);
/* Compute current stack usage and compare it with the maximum
value. The base stack pointer might not be set if this is not
start_sp = sp;
size_t current_stack = start_sp - sp;
#endif
- atomic_max (&peak_stack, current_stack);
+ catomic_max (&peak_stack, current_stack);
/* Add up heap and stack usage and compare it with the maximum value. */
- atomic_max (&peak_total, heap + current_stack);
+ catomic_max (&peak_total, heap + current_stack);
/* Store the value only if we are writing to a file. */
if (fd != -1)
{
- uatomic32_t idx = atomic_exchange_and_add (&buffer_cnt, 1);
+ uatomic32_t idx = catomic_exchange_and_add (&buffer_cnt, 1);
if (idx >= 2 * buffer_size)
{
/* We try to reset the counter to the correct range. If
counter it does not matter since that thread will take
care of the correction. */
unsigned int reset = idx - 2 * buffer_size;
- atomic_compare_and_exchange_val_acq (&buffer_size, reset, idx);
+ catomic_compare_and_exchange_val_acq (&buffer_size, reset, idx);
idx = reset;
}
return (*mallocp) (len);
/* Keep track of number of calls. */
- atomic_increment (&calls[idx_malloc]);
+ catomic_increment (&calls[idx_malloc]);
/* Keep track of total memory consumption for `malloc'. */
- atomic_add (&total[idx_malloc], len);
+ catomic_add (&total[idx_malloc], len);
/* Keep track of total memory requirement. */
- atomic_add (&grand_total, len);
+ catomic_add (&grand_total, len);
/* Remember the size of the request. */
if (len < 65536)
- atomic_increment (&histogram[len / 16]);
+ catomic_increment (&histogram[len / 16]);
else
- atomic_increment (&large);
+ catomic_increment (&large);
/* Total number of calls of any of the functions. */
- atomic_increment (&calls_total);
+ catomic_increment (&calls_total);
/* Do the real work. */
result = (struct header *) (*mallocp) (len + sizeof (struct header));
if (result == NULL)
{
- atomic_increment (&failed[idx_malloc]);
+ catomic_increment (&failed[idx_malloc]);
return NULL;
}
}
/* Keep track of number of calls. */
- atomic_increment (&calls[idx_realloc]);
+ catomic_increment (&calls[idx_realloc]);
if (len > old_len)
{
/* Keep track of total memory consumption for `realloc'. */
- atomic_add (&total[idx_realloc], len - old_len);
+ catomic_add (&total[idx_realloc], len - old_len);
/* Keep track of total memory requirement. */
- atomic_add (&grand_total, len - old_len);
+ catomic_add (&grand_total, len - old_len);
}
/* Remember the size of the request. */
if (len < 65536)
- atomic_increment (&histogram[len / 16]);
+ catomic_increment (&histogram[len / 16]);
else
- atomic_increment (&large);
+ catomic_increment (&large);
/* Total number of calls of any of the functions. */
- atomic_increment (&calls_total);
+ catomic_increment (&calls_total);
/* Do the real work. */
result = (struct header *) (*reallocp) (real, len + sizeof (struct header));
if (result == NULL)
{
- atomic_increment (&failed[idx_realloc]);
+ catomic_increment (&failed[idx_realloc]);
return NULL;
}
/* Record whether the reduction/increase happened in place. */
if (real == result)
- atomic_increment (&inplace);
+ catomic_increment (&inplace);
/* Was the buffer increased? */
if (old_len > len)
- atomic_increment (&decreasing);
+ catomic_increment (&decreasing);
/* Update the allocation data and write out the records if necessary. */
update_data (result, len, old_len);
return (*callocp) (n, len);
/* Keep track of number of calls. */
- atomic_increment (&calls[idx_calloc]);
+ catomic_increment (&calls[idx_calloc]);
/* Keep track of total memory consumption for `calloc'. */
- atomic_add (&total[idx_calloc], size);
+ catomic_add (&total[idx_calloc], size);
/* Keep track of total memory requirement. */
- atomic_add (&grand_total, size);
+ catomic_add (&grand_total, size);
/* Remember the size of the request. */
if (size < 65536)
- atomic_increment (&histogram[size / 16]);
+ catomic_increment (&histogram[size / 16]);
else
- atomic_increment (&large);
+ catomic_increment (&large);
/* Total number of calls of any of the functions. */
++calls_total;
result = (struct header *) (*mallocp) (size + sizeof (struct header));
if (result == NULL)
{
- atomic_increment (&failed[idx_calloc]);
+ catomic_increment (&failed[idx_calloc]);
return NULL;
}
/* `free (NULL)' has no effect. */
if (ptr == NULL)
{
- atomic_increment (&calls[idx_free]);
+ catomic_increment (&calls[idx_free]);
return;
}
}
/* Keep track of number of calls. */
- atomic_increment (&calls[idx_free]);
+ catomic_increment (&calls[idx_free]);
/* Keep track of total memory freed using `free'. */
- atomic_add (&total[idx_free], real->length);
+ catomic_add (&total[idx_free], real->length);
/* Update the allocation data and write out the records if necessary. */
update_data (NULL, 0, real->length);
? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
/* Keep track of number of calls. */
- atomic_increment (&calls[idx]);
+ catomic_increment (&calls[idx]);
/* Keep track of total memory consumption for `malloc'. */
- atomic_add (&total[idx], len);
+ catomic_add (&total[idx], len);
/* Keep track of total memory requirement. */
- atomic_add (&grand_total, len);
+ catomic_add (&grand_total, len);
/* Remember the size of the request. */
if (len < 65536)
- atomic_increment (&histogram[len / 16]);
+ catomic_increment (&histogram[len / 16]);
else
- atomic_increment (&large);
+ catomic_increment (&large);
/* Total number of calls of any of the functions. */
- atomic_increment (&calls_total);
+ catomic_increment (&calls_total);
/* Check for failures. */
if (result == NULL)
- atomic_increment (&failed[idx]);
+ catomic_increment (&failed[idx]);
else if (idx == idx_mmap_w)
/* Update the allocation data and write out the records if
necessary. Note the first parameter is NULL which means
? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
/* Keep track of number of calls. */
- atomic_increment (&calls[idx]);
+ catomic_increment (&calls[idx]);
/* Keep track of total memory consumption for `malloc'. */
- atomic_add (&total[idx], len);
+ catomic_add (&total[idx], len);
/* Keep track of total memory requirement. */
- atomic_add (&grand_total, len);
+ catomic_add (&grand_total, len);
/* Remember the size of the request. */
if (len < 65536)
- atomic_increment (&histogram[len / 16]);
+ catomic_increment (&histogram[len / 16]);
else
- atomic_increment (&large);
+ catomic_increment (&large);
/* Total number of calls of any of the functions. */
- atomic_increment (&calls_total);
+ catomic_increment (&calls_total);
/* Check for failures. */
if (result == NULL)
- atomic_increment (&failed[idx]);
+ catomic_increment (&failed[idx]);
else if (idx == idx_mmap_w)
/* Update the allocation data and write out the records if
necessary. Note the first parameter is NULL which means
if (!not_me && trace_mmap)
{
/* Keep track of number of calls. */
- atomic_increment (&calls[idx_mremap]);
+ catomic_increment (&calls[idx_mremap]);
if (len > old_len)
{
/* Keep track of total memory consumption for `malloc'. */
- atomic_add (&total[idx_mremap], len - old_len);
+ catomic_add (&total[idx_mremap], len - old_len);
/* Keep track of total memory requirement. */
- atomic_add (&grand_total, len - old_len);
+ catomic_add (&grand_total, len - old_len);
}
/* Remember the size of the request. */
if (len < 65536)
- atomic_increment (&histogram[len / 16]);
+ catomic_increment (&histogram[len / 16]);
else
- atomic_increment (&large);
+ catomic_increment (&large);
/* Total number of calls of any of the functions. */
- atomic_increment (&calls_total);
+ catomic_increment (&calls_total);
/* Check for failures. */
if (result == NULL)
- atomic_increment (&failed[idx_mremap]);
+ catomic_increment (&failed[idx_mremap]);
else
{
/* Record whether the reduction/increase happened in place. */
if (start == result)
- atomic_increment (&inplace_mremap);
+ catomic_increment (&inplace_mremap);
/* Was the buffer increased? */
if (old_len > len)
- atomic_increment (&decreasing_mremap);
+ catomic_increment (&decreasing_mremap);
/* Update the allocation data and write out the records if
necessary. Note the first parameter is NULL which means
if (!not_me && trace_mmap)
{
/* Keep track of number of calls. */
- atomic_increment (&calls[idx_munmap]);
+ catomic_increment (&calls[idx_munmap]);
if (__builtin_expect (result == 0, 1))
{
/* Keep track of total memory freed using `free'. */
- atomic_add (&total[idx_munmap], len);
+ catomic_add (&total[idx_munmap], len);
/* Update the allocation data and write out the records if
necessary. */
update_data (NULL, 0, len);
}
else
- atomic_increment (&failed[idx_munmap]);
+ catomic_increment (&failed[idx_munmap]);
}
return result;
+2006-10-11 Ulrich Drepper <drepper@redhat.com>
+
+ * sysdeps/unix/sysv/linux/rtld-lowlevel.h: Use catomic_*
+ operations instead of atomic_*.
+
2006-10-09 Ulrich Drepper <drepper@redhat.com>
* sysdeps/unix/sysv/linux/rtld-lowlevel.h: New file..
{ \
int newval = ((oldval & __RTLD_MRLOCK_RBITS) \
+ __RTLD_MRLOCK_INC); \
- int ret = atomic_compare_and_exchange_val_acq (&(lock), \
- newval, \
- oldval); \
+ int ret = catomic_compare_and_exchange_val_acq (&(lock), \
+ newval, \
+ oldval); \
if (__builtin_expect (ret == oldval, 1)) \
goto out; \
} \
} \
if ((oldval & __RTLD_MRLOCK_RWAIT) == 0) \
{ \
- atomic_or (&(lock), __RTLD_MRLOCK_RWAIT); \
+ catomic_or (&(lock), __RTLD_MRLOCK_RWAIT); \
oldval |= __RTLD_MRLOCK_RWAIT; \
} \
lll_futex_wait (lock, oldval); \
#define __rtld_mrlock_unlock(lock) \
do { \
- int oldval = atomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_INC); \
+ int oldval = catomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_INC); \
if (__builtin_expect ((oldval \
& (__RTLD_MRLOCK_RBITS | __RTLD_MRLOCK_WWAIT)) \
- == __RTLD_MRLOCK_INC | __RTLD_MRLOCK_WWAIT, 0)) \
+ == (__RTLD_MRLOCK_INC | __RTLD_MRLOCK_WWAIT), 0)) \
/* We have to wake all threads since there might be some queued \
readers already. */ \
lll_futex_wake (&(lock), 0x7fffffff); \
{ \
int newval = ((oldval & __RTLD_MRLOCK_RWAIT) \
+ __RTLD_MRLOCK_WRITER); \
- int ret = atomic_compare_and_exchange_val_acq (&(lock), \
+ int ret = catomic_compare_and_exchange_val_acq (&(lock), \
newval, \
oldval); \
if (__builtin_expect (ret == oldval, 1)) \
} \
atomic_delay (); \
} \
- atomic_or (&(lock), __RTLD_MRLOCK_WWAIT); \
+ catomic_or (&(lock), __RTLD_MRLOCK_WWAIT); \
oldval |= __RTLD_MRLOCK_WWAIT; \
lll_futex_wait (lock, oldval); \
} \
#define __rtld_mrlock_done(lock) \
do { \
- int oldval = atomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_WRITER); \
+ int oldval = catomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_WRITER); \
if (__builtin_expect ((oldval & __RTLD_MRLOCK_RWAIT) != 0, 0)) \
lll_futex_wake (&(lock), 0x7fffffff); \
} while (0)
#if __WORDSIZE == 64
# define atomicinclock(lock) (void) 0
# define atomicincunlock(lock) (void) 0
-# define atomicinc(var) atomic_increment (&(var))
+# define atomicinc(var) catomic_increment (&(var))
#else
__libc_lock_define_initialized (static, lock);
# define atomicinclock(lock) __libc_lock_lock (lock)
/* We don't want to run this cleanup more than once. */
&& (cxafn = f->func.cxa.fn,
cxaarg = f->func.cxa.arg,
- ! atomic_compare_and_exchange_bool_acq (&f->flavor, ef_free,
- ef_cxa)))
+ ! catomic_compare_and_exchange_bool_acq (&f->flavor, ef_free,
+ ef_cxa)))
{
uint64_t check = __new_exitfn_called;
02111-1307 USA. */
#include <stdint.h>
+#include <tls.h> /* For tcbhead_t. */
typedef int8_t atomic8_t;
ret; })
+#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tcmpxchgb %b2, %1" \
+ : "=a" (ret), "=m" (*mem) \
+ : "q" (newval), "m" (*mem), "0" (oldval), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
+ ret; })
+
+#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tcmpxchgw %w2, %1" \
+ : "=a" (ret), "=m" (*mem) \
+ : "q" (newval), "m" (*mem), "0" (oldval), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
+ ret; })
+
+#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tcmpxchgl %2, %1" \
+ : "=a" (ret), "=m" (*mem) \
+ : "q" (newval), "m" (*mem), "0" (oldval), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
+ ret; })
+
+#define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+ __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tcmpxchgq %q2, %1" \
+ : "=a" (ret), "=m" (*mem) \
+ : "q" (newval), "m" (*mem), "0" (oldval), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
+ ret; })
+
+
/* Note that we need no lock prefix. */
#define atomic_exchange_acq(mem, newvalue) \
({ __typeof (*mem) result; \
result; })
-#define atomic_exchange_and_add(mem, value) \
+#define __arch_exchange_and_add_body(lock, mem, value) \
({ __typeof (*mem) result; \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "xaddb %b0, %1" \
+ __asm __volatile (lock "xaddb %b0, %1" \
: "=r" (result), "=m" (*mem) \
- : "0" (value), "m" (*mem)); \
+ : "0" (value), "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "xaddw %w0, %1" \
+ __asm __volatile (lock "xaddw %w0, %1" \
: "=r" (result), "=m" (*mem) \
- : "0" (value), "m" (*mem)); \
+ : "0" (value), "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "xaddl %0, %1" \
+ __asm __volatile (lock "xaddl %0, %1" \
: "=r" (result), "=m" (*mem) \
- : "0" (value), "m" (*mem)); \
+ : "0" (value), "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
else \
- __asm __volatile (LOCK_PREFIX "xaddq %q0, %1" \
+ __asm __volatile (lock "xaddq %q0, %1" \
: "=r" (result), "=m" (*mem) \
- : "0" ((long) (value)), "m" (*mem)); \
+ : "0" ((long) (value)), "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
result; })
+#define atomic_exchange_and_add(mem, value) \
+ __arch_exchange_and_add_body (LOCK_PREFIX, mem, value)
+
+#define __arch_exchange_and_add_cprefix \
+ "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_exchange_and_add(mem, value) \
+ __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value)
+
+
+#define __arch_add_body(lock, pfx, mem, value) \
+ do { \
+ if (__builtin_constant_p (value) && (value) == 1) \
+ pfx##_increment (mem); \
+ else if (__builtin_constant_p (value) && (value) == -1) \
+ pfx##_decrement (mem); \
+ else if (sizeof (*mem) == 1) \
+ __asm __volatile (lock "addb %b1, %0" \
+ : "=m" (*mem) \
+ : "ir" (value), "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile (lock "addw %w1, %0" \
+ : "=m" (*mem) \
+ : "ir" (value), "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile (lock "addl %1, %0" \
+ : "=m" (*mem) \
+ : "ir" (value), "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
+ else \
+ __asm __volatile (lock "addq %q1, %0" \
+ : "=m" (*mem) \
+ : "ir" ((long) (value)), "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
+ } while (0)
#define atomic_add(mem, value) \
- (void) ({ if (__builtin_constant_p (value) && (value) == 1) \
- atomic_increment (mem); \
- else if (__builtin_constant_p (value) && (value) == 1) \
- atomic_decrement (mem); \
- else if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "addb %b1, %0" \
- : "=m" (*mem) \
- : "ir" (value), "m" (*mem)); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "addw %w1, %0" \
- : "=m" (*mem) \
- : "ir" (value), "m" (*mem)); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "addl %1, %0" \
- : "=m" (*mem) \
- : "ir" (value), "m" (*mem)); \
- else \
- __asm __volatile (LOCK_PREFIX "addq %q1, %0" \
- : "=m" (*mem) \
- : "ir" ((long) (value)), "m" (*mem)); \
- })
+ __arch_add_body (LOCK_PREFIX, atomic, mem, value)
+
+#define __arch_add_cprefix \
+ "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_add(mem, value) \
+ __arch_add_body (__arch_add_cprefix, catomic, mem, value)
#define atomic_add_negative(mem, value) \
__result; })
-#define atomic_increment(mem) \
+#define __arch_increment_body(lock, mem) \
do { \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "incb %b0" \
+ __asm __volatile (lock "incb %b0" \
: "=m" (*mem) \
- : "m" (*mem)); \
+ : "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "incw %w0" \
+ __asm __volatile (lock "incw %w0" \
: "=m" (*mem) \
- : "m" (*mem)); \
+ : "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "incl %0" \
+ __asm __volatile (lock "incl %0" \
: "=m" (*mem) \
- : "m" (*mem)); \
+ : "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
else \
- __asm __volatile (LOCK_PREFIX "incq %q0" \
+ __asm __volatile (lock "incq %q0" \
: "=m" (*mem) \
- : "m" (*mem)); \
+ : "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
} while (0)
+#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem)
+
+#define __arch_increment_cprefix \
+ "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_increment(mem) \
+ __arch_increment_body (__arch_increment_cprefix, mem)
+
#define atomic_increment_and_test(mem) \
({ unsigned char __result; \
__result; })
-#define atomic_decrement(mem) \
+#define __arch_decrement_body(lock, mem) \
do { \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "decb %b0" \
+ __asm __volatile (lock "decb %b0" \
: "=m" (*mem) \
- : "m" (*mem)); \
+ : "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "decw %w0" \
+ __asm __volatile (lock "decw %w0" \
: "=m" (*mem) \
- : "m" (*mem)); \
+ : "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "decl %0" \
+ __asm __volatile (lock "decl %0" \
: "=m" (*mem) \
- : "m" (*mem)); \
+ : "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
else \
- __asm __volatile (LOCK_PREFIX "decq %q0" \
+ __asm __volatile (lock "decq %q0" \
: "=m" (*mem) \
- : "m" (*mem)); \
+ : "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
} while (0)
+#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem)
+
+#define __arch_decrement_cprefix \
+ "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_decrement(mem) \
+ __arch_increment_body (__arch_decrement_cprefix, mem)
+
#define atomic_decrement_and_test(mem) \
({ unsigned char __result; \
} while (0)
-#define atomic_or(mem, mask) \
+#define __arch_or_body(lock, mem, mask) \
do { \
if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "orb %1, %b0" \
+ __asm __volatile (lock "orb %1, %b0" \
: "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
+ : "ir" (mask), "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "orw %1, %w0" \
+ __asm __volatile (lock "orw %1, %w0" \
: "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
+ : "ir" (mask), "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "orl %1, %0" \
+ __asm __volatile (lock "orl %1, %0" \
: "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
+ : "ir" (mask), "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
else \
- __asm __volatile (LOCK_PREFIX "orq %1, %q0" \
+ __asm __volatile (lock "orq %1, %q0" \
: "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
+ : "ir" (mask), "m" (*mem), \
+ "i" (offsetof (tcbhead_t, multiple_threads))); \
} while (0)
+
+#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
+
+#define __arch_or_cprefix \
+ "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_or(mem, mask) __arch_or_body (__arch_or_cprefix, mem, mask)