Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / arch / cris / include / arch-v32 / arch / spinlock.h
index 367a53ea10c5714de719888a5ed12de83c97bc87..f171a6600fbcac6424376d65aeb35424d57e2b6e 100644 (file)
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
 extern void cris_spin_lock(void *l);
 extern int cris_spin_trylock(void *l);
 
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
 {
        return *(volatile signed char *)(&(x)->slock) <= 0;
 }
 
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
        __asm__ volatile ("move.d %1,%0" \
                          : "=m" (lock->slock) \
@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
                          : "memory");
 }
 
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
 {
-       while (__raw_spin_is_locked(lock))
+       while (arch_spin_is_locked(lock))
                cpu_relax();
 }
 
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
 {
        return cris_spin_trylock((void *)&lock->slock);
 }
 
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
        cris_spin_lock((void *)&lock->slock);
 }
 
 static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 {
-       __raw_spin_lock(lock);
+       arch_spin_lock(lock);
 }
 
 /*
@@ -56,76 +56,76 @@ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
  *
  */
 
-static inline int __raw_read_can_lock(raw_rwlock_t *x)
+static inline int arch_read_can_lock(arch_rwlock_t *x)
 {
        return (int)(x)->lock > 0;
 }
 
-static inline int __raw_write_can_lock(raw_rwlock_t *x)
+static inline int arch_write_can_lock(arch_rwlock_t *x)
 {
        return (x)->lock == RW_LOCK_BIAS;
 }
 
-static  inline void __raw_read_lock(raw_rwlock_t *rw)
+static  inline void arch_read_lock(arch_rwlock_t *rw)
 {
-       __raw_spin_lock(&rw->slock);
+       arch_spin_lock(&rw->slock);
        while (rw->lock == 0);
        rw->lock--;
-       __raw_spin_unlock(&rw->slock);
+       arch_spin_unlock(&rw->slock);
 }
 
-static  inline void __raw_write_lock(raw_rwlock_t *rw)
+static  inline void arch_write_lock(arch_rwlock_t *rw)
 {
-       __raw_spin_lock(&rw->slock);
+       arch_spin_lock(&rw->slock);
        while (rw->lock != RW_LOCK_BIAS);
        rw->lock = 0;
-       __raw_spin_unlock(&rw->slock);
+       arch_spin_unlock(&rw->slock);
 }
 
-static  inline void __raw_read_unlock(raw_rwlock_t *rw)
+static  inline void arch_read_unlock(arch_rwlock_t *rw)
 {
-       __raw_spin_lock(&rw->slock);
+       arch_spin_lock(&rw->slock);
        rw->lock++;
-       __raw_spin_unlock(&rw->slock);
+       arch_spin_unlock(&rw->slock);
 }
 
-static  inline void __raw_write_unlock(raw_rwlock_t *rw)
+static  inline void arch_write_unlock(arch_rwlock_t *rw)
 {
-       __raw_spin_lock(&rw->slock);
+       arch_spin_lock(&rw->slock);
        while (rw->lock != RW_LOCK_BIAS);
        rw->lock = RW_LOCK_BIAS;
-       __raw_spin_unlock(&rw->slock);
+       arch_spin_unlock(&rw->slock);
 }
 
-static  inline int __raw_read_trylock(raw_rwlock_t *rw)
+static  inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        int ret = 0;
-       __raw_spin_lock(&rw->slock);
+       arch_spin_lock(&rw->slock);
        if (rw->lock != 0) {
                rw->lock--;
                ret = 1;
        }
-       __raw_spin_unlock(&rw->slock);
+       arch_spin_unlock(&rw->slock);
        return ret;
 }
 
-static  inline int __raw_write_trylock(raw_rwlock_t *rw)
+static  inline int arch_write_trylock(arch_rwlock_t *rw)
 {
        int ret = 0;
-       __raw_spin_lock(&rw->slock);
+       arch_spin_lock(&rw->slock);
        if (rw->lock == RW_LOCK_BIAS) {
                rw->lock = 0;
                ret = 1;
        }
-       __raw_spin_unlock(&rw->slock);
+       arch_spin_unlock(&rw->slock);
        return 1;
 }
 
 #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
 #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
 
-#define _raw_spin_relax(lock)  cpu_relax()
-#define _raw_read_relax(lock)  cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock)  cpu_relax()
+#define arch_read_relax(lock)  cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
 
 #endif /* __ASM_ARCH_SPINLOCK_H */