locking/rwsem: Remove arch specific rwsem files
authorWaiman Long <longman@redhat.com>
Fri, 22 Mar 2019 14:30:06 +0000 (10:30 -0400)
committerIngo Molnar <mingo@kernel.org>
Wed, 3 Apr 2019 12:50:50 +0000 (14:50 +0200)
As the generic rwsem-xadd code is using the appropriate acquire and
release versions of the atomic operations, the arch specific rwsem.h
files will not be that much faster than the generic code as long as the
atomic functions are properly implemented. So we can remove those arch
specific rwsem.h and stop building asm/rwsem.h to reduce maintenance
effort.

Currently, only x86, alpha and ia64 have implemented architecture
specific fast paths. I don't have access to alpha and ia64 systems for
testing, but they are legacy systems that are not likely to be updated
to the latest kernel anyway.

By using a rwsem microbenchmark, the total locking rates on a 4-socket
56-core 112-thread x86-64 system before and after the patch were as
follows (mixed means equal # of read and write locks):

                      Before Patch              After Patch
   # of Threads  wlock   rlock   mixed     wlock   rlock   mixed
   ------------  -----   -----   -----     -----   -----   -----
        1        29,201  30,143  29,458    28,615  30,172  29,201
        2         6,807  13,299   1,171     7,725  15,025   1,804
        4         6,504  12,755   1,520     7,127  14,286   1,345
        8         6,762  13,412     764     6,826  13,652     726
       16         6,693  15,408     662     6,599  15,938     626
       32         6,145  15,286     496     5,549  15,487     511
       64         5,812  15,495      60     5,858  15,572      60

There were some run-to-run variations for the multi-thread tests. For
x86-64, using the generic C code fast path seems to be a little bit
faster than the assembly version with low lock contention.  Looking at
the assembly version of the fast paths, there are assembly to/from C
code wrappers that save and restore all the callee-clobbered registers
(7 registers on x86-64). The assembly generated from the generic C
code doesn't need to do that. That may explain the slight performance
gain here.

The generic asm rwsem.h can also be merged into kernel/locking/rwsem.h
with no code change as no other code other than those under
kernel/locking needs to access the internal rwsem macros and functions.

Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-c6x-dev@linux-c6x.org
Cc: linux-m68k@lists.linux-m68k.org
Cc: linux-riscv@lists.infradead.org
Cc: linux-um@lists.infradead.org
Cc: linux-xtensa@linux-xtensa.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: nios2-dev@lists.rocketboards.org
Cc: openrisc@lists.librecores.org
Cc: uclinux-h8-devel@lists.sourceforge.jp
Link: https://lkml.kernel.org/r/20190322143008.21313-2-longman@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
19 files changed:
MAINTAINERS
arch/alpha/include/asm/rwsem.h [deleted file]
arch/arm/include/asm/Kbuild
arch/arm64/include/asm/Kbuild
arch/hexagon/include/asm/Kbuild
arch/ia64/include/asm/rwsem.h [deleted file]
arch/powerpc/include/asm/Kbuild
arch/s390/include/asm/Kbuild
arch/sh/include/asm/Kbuild
arch/sparc/include/asm/Kbuild
arch/x86/include/asm/rwsem.h [deleted file]
arch/x86/lib/Makefile
arch/x86/lib/rwsem.S [deleted file]
arch/x86/um/Makefile
arch/xtensa/include/asm/Kbuild
include/asm-generic/rwsem.h [deleted file]
include/linux/rwsem.h
kernel/locking/percpu-rwsem.c
kernel/locking/rwsem.h

index 43b36dbed48e77bffcc94ea79b8a2e6d79ad0aae..8876f5de7738601bb8b6793249c4a6a636286598 100644 (file)
@@ -9098,7 +9098,6 @@ F:        arch/*/include/asm/spinlock*.h
 F:     include/linux/rwlock*.h
 F:     include/linux/mutex*.h
 F:     include/linux/rwsem*.h
-F:     arch/*/include/asm/rwsem.h
 F:     include/linux/seqlock.h
 F:     lib/locking*.[ch]
 F:     kernel/locking/
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
deleted file mode 100644 (file)
index cf8fc8f..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ALPHA_RWSEM_H
-#define _ALPHA_RWSEM_H
-
-/*
- * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
- * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
- */
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-
-#include <linux/compiler.h>
-
-#define RWSEM_UNLOCKED_VALUE           0x0000000000000000L
-#define RWSEM_ACTIVE_BIAS              0x0000000000000001L
-#define RWSEM_ACTIVE_MASK              0x00000000ffffffffL
-#define RWSEM_WAITING_BIAS             (-0x0000000100000000L)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-static inline int ___down_read(struct rw_semaphore *sem)
-{
-       long oldcount;
-#ifndef        CONFIG_SMP
-       oldcount = sem->count.counter;
-       sem->count.counter += RWSEM_ACTIVE_READ_BIAS;
-#else
-       long temp;
-       __asm__ __volatile__(
-       "1:     ldq_l   %0,%1\n"
-       "       addq    %0,%3,%2\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,2f\n"
-       "       mb\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
-       :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
-#endif
-       return (oldcount < 0);
-}
-
-static inline void __down_read(struct rw_semaphore *sem)
-{
-       if (unlikely(___down_read(sem)))
-               rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
-       if (unlikely(___down_read(sem)))
-               if (IS_ERR(rwsem_down_read_failed_killable(sem)))
-                       return -EINTR;
-
-       return 0;
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-       long old, new, res;
-
-       res = atomic_long_read(&sem->count);
-       do {
-               new = res + RWSEM_ACTIVE_READ_BIAS;
-               if (new <= 0)
-                       break;
-               old = res;
-               res = atomic_long_cmpxchg(&sem->count, old, new);
-       } while (res != old);
-       return res >= 0 ? 1 : 0;
-}
-
-static inline long ___down_write(struct rw_semaphore *sem)
-{
-       long oldcount;
-#ifndef        CONFIG_SMP
-       oldcount = sem->count.counter;
-       sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS;
-#else
-       long temp;
-       __asm__ __volatile__(
-       "1:     ldq_l   %0,%1\n"
-       "       addq    %0,%3,%2\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,2f\n"
-       "       mb\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
-       :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
-#endif
-       return oldcount;
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
-       if (unlikely(___down_write(sem)))
-               rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
-       if (unlikely(___down_write(sem))) {
-               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
-                       return -EINTR;
-       }
-
-       return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-       long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
-                          RWSEM_ACTIVE_WRITE_BIAS);
-       if (ret == RWSEM_UNLOCKED_VALUE)
-               return 1;
-       return 0;
-}
-
-static inline void __up_read(struct rw_semaphore *sem)
-{
-       long oldcount;
-#ifndef        CONFIG_SMP
-       oldcount = sem->count.counter;
-       sem->count.counter -= RWSEM_ACTIVE_READ_BIAS;
-#else
-       long temp;
-       __asm__ __volatile__(
-       "       mb\n"
-       "1:     ldq_l   %0,%1\n"
-       "       subq    %0,%3,%2\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,2f\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
-       :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
-#endif
-       if (unlikely(oldcount < 0))
-               if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
-                       rwsem_wake(sem);
-}
-
-static inline void __up_write(struct rw_semaphore *sem)
-{
-       long count;
-#ifndef        CONFIG_SMP
-       sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS;
-       count = sem->count.counter;
-#else
-       long temp;
-       __asm__ __volatile__(
-       "       mb\n"
-       "1:     ldq_l   %0,%1\n"
-       "       subq    %0,%3,%2\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,2f\n"
-       "       subq    %0,%3,%0\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (count), "=m" (sem->count), "=&r" (temp)
-       :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
-#endif
-       if (unlikely(count))
-               if ((int)count == 0)
-                       rwsem_wake(sem);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-       long oldcount;
-#ifndef        CONFIG_SMP
-       oldcount = sem->count.counter;
-       sem->count.counter -= RWSEM_WAITING_BIAS;
-#else
-       long temp;
-       __asm__ __volatile__(
-       "1:     ldq_l   %0,%1\n"
-       "       addq    %0,%3,%2\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,2f\n"
-       "       mb\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
-       :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
-#endif
-       if (unlikely(oldcount < 0))
-               rwsem_downgrade_wake(sem);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ALPHA_RWSEM_H */
index a8a4eb7f6dae0371940a9cc70c077e2194fb02bc..8fb51b7bf1d587499f2f782d1f699c47adc91b7d 100644 (file)
@@ -12,7 +12,6 @@ generic-y += mm-arch-hooks.h
 generic-y += msi.h
 generic-y += parport.h
 generic-y += preempt.h
-generic-y += rwsem.h
 generic-y += seccomp.h
 generic-y += segment.h
 generic-y += serial.h
index 1e17ea5c372b2782cb11bde1a8cfb9162bb4e9e8..60a933b070019a9d8cd43a9d779d92e802b1c08b 100644 (file)
@@ -16,7 +16,6 @@ generic-y += mm-arch-hooks.h
 generic-y += msi.h
 generic-y += qrwlock.h
 generic-y += qspinlock.h
-generic-y += rwsem.h
 generic-y += segment.h
 generic-y += serial.h
 generic-y += set_memory.h
index d046e8ccdf786be5029237ad722d819de88d6124..3ff5f297acda7783d4d1494098636ad8e017fb1a 100644 (file)
@@ -27,7 +27,6 @@ generic-y += mm-arch-hooks.h
 generic-y += pci.h
 generic-y += percpu.h
 generic-y += preempt.h
-generic-y += rwsem.h
 generic-y += sections.h
 generic-y += segment.h
 generic-y += serial.h
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
deleted file mode 100644 (file)
index 9179106..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * R/W semaphores for ia64
- *
- * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
- * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
- * Copyright (C) 2005 Christoph Lameter <cl@linux.com>
- *
- * Based on asm-i386/rwsem.h and other architecture implementation.
- *
- * The MSW of the count is the negated number of active writers and
- * waiting lockers, and the LSW is the total number of active locks.
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
- * the case of an uncontended lock. Readers increment by 1 and see a positive
- * value when uncontended, negative if there are writers (and maybe) readers
- * waiting (in which case it goes to sleep).
- */
-
-#ifndef _ASM_IA64_RWSEM_H
-#define _ASM_IA64_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#include <asm/intrinsics.h>
-
-#define RWSEM_UNLOCKED_VALUE           __IA64_UL_CONST(0x0000000000000000)
-#define RWSEM_ACTIVE_BIAS              (1L)
-#define RWSEM_ACTIVE_MASK              (0xffffffffL)
-#define RWSEM_WAITING_BIAS             (-0x100000000L)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline int
-___down_read (struct rw_semaphore *sem)
-{
-       long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
-
-       return (result < 0);
-}
-
-static inline void
-__down_read (struct rw_semaphore *sem)
-{
-       if (___down_read(sem))
-               rwsem_down_read_failed(sem);
-}
-
-static inline int
-__down_read_killable (struct rw_semaphore *sem)
-{
-       if (___down_read(sem))
-               if (IS_ERR(rwsem_down_read_failed_killable(sem)))
-                       return -EINTR;
-
-       return 0;
-}
-
-/*
- * lock for writing
- */
-static inline long
-___down_write (struct rw_semaphore *sem)
-{
-       long old, new;
-
-       do {
-               old = atomic_long_read(&sem->count);
-               new = old + RWSEM_ACTIVE_WRITE_BIAS;
-       } while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old);
-
-       return old;
-}
-
-static inline void
-__down_write (struct rw_semaphore *sem)
-{
-       if (___down_write(sem))
-               rwsem_down_write_failed(sem);
-}
-
-static inline int
-__down_write_killable (struct rw_semaphore *sem)
-{
-       if (___down_write(sem)) {
-               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
-                       return -EINTR;
-       }
-
-       return 0;
-}
-
-/*
- * unlock after reading
- */
-static inline void
-__up_read (struct rw_semaphore *sem)
-{
-       long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1);
-
-       if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
-               rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void
-__up_write (struct rw_semaphore *sem)
-{
-       long old, new;
-
-       do {
-               old = atomic_long_read(&sem->count);
-               new = old - RWSEM_ACTIVE_WRITE_BIAS;
-       } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
-
-       if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
-               rwsem_wake(sem);
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline int
-__down_read_trylock (struct rw_semaphore *sem)
-{
-       long tmp;
-       while ((tmp = atomic_long_read(&sem->count)) >= 0) {
-               if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) {
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline int
-__down_write_trylock (struct rw_semaphore *sem)
-{
-       long tmp = atomic_long_cmpxchg_acquire(&sem->count,
-                       RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS);
-       return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void
-__downgrade_write (struct rw_semaphore *sem)
-{
-       long old, new;
-
-       do {
-               old = atomic_long_read(&sem->count);
-               new = old - RWSEM_WAITING_BIAS;
-       } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
-
-       if (old < 0)
-               rwsem_downgrade_wake(sem);
-}
-
-#endif /* _ASM_IA64_RWSEM_H */
index a0c132bedfae86965c2f7c850098b65420c2c5fc..36bda391e549f87dc477a9c0997e93b855e556b9 100644 (file)
@@ -8,6 +8,5 @@ generic-y += irq_regs.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += preempt.h
-generic-y += rwsem.h
 generic-y += vtime.h
 generic-y += msi.h
index 12d77cb11fe5a96269a7bde5fe6b8c6f11a23ea8..d5fadefea33ca862c3074e070669e6ff65d95a8f 100644 (file)
@@ -20,7 +20,6 @@ generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
-generic-y += rwsem.h
 generic-y += trace_clock.h
 generic-y += unaligned.h
 generic-y += word-at-a-time.h
index 7bf2cb680d328462c4e621eae24005f1c9f35afc..73fff39a0122f0405f0940036a09cc4283946d44 100644 (file)
@@ -17,7 +17,6 @@ generic-y += mm-arch-hooks.h
 generic-y += parport.h
 generic-y += percpu.h
 generic-y += preempt.h
-generic-y += rwsem.h
 generic-y += serial.h
 generic-y += sizes.h
 generic-y += trace_clock.h
index a22cfd5c0ee8665d96f40dcdfacd2c784a2fad62..2ca3200d3616abb950e87c7570937f46426df311 100644 (file)
@@ -18,7 +18,6 @@ generic-y += mm-arch-hooks.h
 generic-y += module.h
 generic-y += msi.h
 generic-y += preempt.h
-generic-y += rwsem.h
 generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += word-at-a-time.h
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
deleted file mode 100644 (file)
index 4c25cf6..0000000
+++ /dev/null
@@ -1,237 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
- *
- * Written by David Howells (dhowells@redhat.com).
- *
- * Derived from asm-x86/semaphore.h
- *
- *
- * The MSW of the count is the negated number of active writers and waiting
- * lockers, and the LSW is the total number of active locks
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
- * uncontended lock. This can be determined because XADD returns the old value.
- * Readers increment by 1 and see a positive value when uncontended, negative
- * if there are writers (and maybe) readers waiting (in which case it goes to
- * sleep).
- *
- * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
- * be extended to 65534 by manually checking the whole MSW rather than relying
- * on the S flag.
- *
- * The value of ACTIVE_BIAS supports up to 65535 active processes.
- *
- * This should be totally fair - if anything is waiting, a process that wants a
- * lock will go to the back of the queue. When the currently active lock is
- * released, if there's a writer at the front of the queue, then that and only
- * that will be woken up; if there's a bunch of consecutive readers at the
- * front, then they'll all be woken up, but no other readers will be.
- */
-
-#ifndef _ASM_X86_RWSEM_H
-#define _ASM_X86_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-#include <asm/asm.h>
-
-/*
- * The bias values and the counter type limits the number of
- * potential readers/writers to 32767 for 32 bits and 2147483647
- * for 64 bits.
- */
-
-#ifdef CONFIG_X86_64
-# define RWSEM_ACTIVE_MASK             0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK             0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE           0x00000000L
-#define RWSEM_ACTIVE_BIAS              0x00000001L
-#define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-#define ____down_read(sem, slow_path)                                  \
-({                                                                     \
-       struct rw_semaphore* ret;                                       \
-       asm volatile("# beginning down_read\n\t"                        \
-                    LOCK_PREFIX _ASM_INC "(%[sem])\n\t"                \
-                    /* adds 0x00000001 */                              \
-                    "  jns        1f\n"                                \
-                    "  call " slow_path "\n"                           \
-                    "1:\n\t"                                           \
-                    "# ending down_read\n\t"                           \
-                    : "+m" (sem->count), "=a" (ret),                   \
-                       ASM_CALL_CONSTRAINT                             \
-                    : [sem] "a" (sem)                                  \
-                    : "memory", "cc");                                 \
-       ret;                                                            \
-})
-
-static inline void __down_read(struct rw_semaphore *sem)
-{
-       ____down_read(sem, "call_rwsem_down_read_failed");
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
-       if (IS_ERR(____down_read(sem, "call_rwsem_down_read_failed_killable")))
-               return -EINTR;
-       return 0;
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline bool __down_read_trylock(struct rw_semaphore *sem)
-{
-       long result, tmp;
-       asm volatile("# beginning __down_read_trylock\n\t"
-                    "  mov          %[count],%[result]\n\t"
-                    "1:\n\t"
-                    "  mov          %[result],%[tmp]\n\t"
-                    "  add          %[inc],%[tmp]\n\t"
-                    "  jle          2f\n\t"
-                    LOCK_PREFIX "  cmpxchg  %[tmp],%[count]\n\t"
-                    "  jnz          1b\n\t"
-                    "2:\n\t"
-                    "# ending __down_read_trylock\n\t"
-                    : [count] "+m" (sem->count), [result] "=&a" (result),
-                      [tmp] "=&r" (tmp)
-                    : [inc] "i" (RWSEM_ACTIVE_READ_BIAS)
-                    : "memory", "cc");
-       return result >= 0;
-}
-
-/*
- * lock for writing
- */
-#define ____down_write(sem, slow_path)                 \
-({                                                     \
-       long tmp;                                       \
-       struct rw_semaphore* ret;                       \
-                                                       \
-       asm volatile("# beginning down_write\n\t"       \
-                    LOCK_PREFIX "  xadd      %[tmp],(%[sem])\n\t"      \
-                    /* adds 0xffff0001, returns the old value */ \
-                    "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
-                    /* was the active mask 0 before? */\
-                    "  jz        1f\n"                 \
-                    "  call " slow_path "\n"           \
-                    "1:\n"                             \
-                    "# ending down_write"              \
-                    : "+m" (sem->count), [tmp] "=d" (tmp),     \
-                      "=a" (ret), ASM_CALL_CONSTRAINT  \
-                    : [sem] "a" (sem), "[tmp]" (RWSEM_ACTIVE_WRITE_BIAS) \
-                    : "memory", "cc");                 \
-       ret;                                            \
-})
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
-       ____down_write(sem, "call_rwsem_down_write_failed");
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
-       if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable")))
-               return -EINTR;
-
-       return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline bool __down_write_trylock(struct rw_semaphore *sem)
-{
-       bool result;
-       long tmp0, tmp1;
-       asm volatile("# beginning __down_write_trylock\n\t"
-                    "  mov          %[count],%[tmp0]\n\t"
-                    "1:\n\t"
-                    "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
-                    /* was the active mask 0 before? */
-                    "  jnz          2f\n\t"
-                    "  mov          %[tmp0],%[tmp1]\n\t"
-                    "  add          %[inc],%[tmp1]\n\t"
-                    LOCK_PREFIX "  cmpxchg  %[tmp1],%[count]\n\t"
-                    "  jnz          1b\n\t"
-                    "2:\n\t"
-                    CC_SET(e)
-                    "# ending __down_write_trylock\n\t"
-                    : [count] "+m" (sem->count), [tmp0] "=&a" (tmp0),
-                      [tmp1] "=&r" (tmp1), CC_OUT(e) (result)
-                    : [inc] "er" (RWSEM_ACTIVE_WRITE_BIAS)
-                    : "memory");
-       return result;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-       long tmp;
-       asm volatile("# beginning __up_read\n\t"
-                    LOCK_PREFIX "  xadd      %[tmp],(%[sem])\n\t"
-                    /* subtracts 1, returns the old value */
-                    "  jns        1f\n\t"
-                    "  call call_rwsem_wake\n" /* expects old value in %edx */
-                    "1:\n"
-                    "# ending __up_read\n"
-                    : "+m" (sem->count), [tmp] "=d" (tmp)
-                    : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_READ_BIAS)
-                    : "memory", "cc");
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-       long tmp;
-       asm volatile("# beginning __up_write\n\t"
-                    LOCK_PREFIX "  xadd      %[tmp],(%[sem])\n\t"
-                    /* subtracts 0xffff0001, returns the old value */
-                    "  jns        1f\n\t"
-                    "  call call_rwsem_wake\n" /* expects old value in %edx */
-                    "1:\n\t"
-                    "# ending __up_write\n"
-                    : "+m" (sem->count), [tmp] "=d" (tmp)
-                    : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_WRITE_BIAS)
-                    : "memory", "cc");
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-       asm volatile("# beginning __downgrade_write\n\t"
-                    LOCK_PREFIX _ASM_ADD "%[inc],(%[sem])\n\t"
-                    /*
-                     * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
-                     *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
-                     */
-                    "  jns       1f\n\t"
-                    "  call call_rwsem_downgrade_wake\n"
-                    "1:\n\t"
-                    "# ending __downgrade_write\n"
-                    : "+m" (sem->count)
-                    : [sem] "a" (sem), [inc] "er" (-RWSEM_WAITING_BIAS)
-                    : "memory", "cc");
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_X86_RWSEM_H */
index 140e61843a079e3da471783455414c6480574ac8..986652064b151e8e3cc265763f75fcf69d32651c 100644 (file)
@@ -23,7 +23,6 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
 lib-y := delay.o misc.o cmdline.o cpu.o
 lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
 lib-y += memcpy_$(BITS).o
-lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
 lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
deleted file mode 100644 (file)
index dc2ab6e..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * x86 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
- */
-
-#include <linux/linkage.h>
-#include <asm/alternative-asm.h>
-#include <asm/frame.h>
-
-#define __ASM_HALF_REG(reg)    __ASM_SEL(reg, e##reg)
-#define __ASM_HALF_SIZE(inst)  __ASM_SEL(inst##w, inst##l)
-
-#ifdef CONFIG_X86_32
-
-/*
- * The semaphore operations have a special calling sequence that
- * allow us to do a simpler in-line version of them. These routines
- * need to convert that sequence back into the C sequence when
- * there is contention on the semaphore.
- *
- * %eax contains the semaphore pointer on entry. Save the C-clobbered
- * registers (%eax, %edx and %ecx) except %eax which is either a return
- * value or just gets clobbered. Same is true for %edx so make sure GCC
- * reloads it after the slow path, by making it hold a temporary, for
- * example see ____down_write().
- */
-
-#define save_common_regs \
-       pushl %ecx
-
-#define restore_common_regs \
-       popl %ecx
-
-       /* Avoid uglifying the argument copying x86-64 needs to do. */
-       .macro movq src, dst
-       .endm
-
-#else
-
-/*
- * x86-64 rwsem wrappers
- *
- * This interfaces the inline asm code to the slow-path
- * C routines. We need to save the call-clobbered regs
- * that the asm does not mark as clobbered, and move the
- * argument from %rax to %rdi.
- *
- * NOTE! We don't need to save %rax, because the functions
- * will always return the semaphore pointer in %rax (which
- * is also the input argument to these helpers)
- *
- * The following can clobber %rdx because the asm clobbers it:
- *   call_rwsem_down_write_failed
- *   call_rwsem_wake
- * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
- */
-
-#define save_common_regs \
-       pushq %rdi; \
-       pushq %rsi; \
-       pushq %rcx; \
-       pushq %r8;  \
-       pushq %r9;  \
-       pushq %r10; \
-       pushq %r11
-
-#define restore_common_regs \
-       popq %r11; \
-       popq %r10; \
-       popq %r9; \
-       popq %r8; \
-       popq %rcx; \
-       popq %rsi; \
-       popq %rdi
-
-#endif
-
-/* Fix up special calling conventions */
-ENTRY(call_rwsem_down_read_failed)
-       FRAME_BEGIN
-       save_common_regs
-       __ASM_SIZE(push,) %__ASM_REG(dx)
-       movq %rax,%rdi
-       call rwsem_down_read_failed
-       __ASM_SIZE(pop,) %__ASM_REG(dx)
-       restore_common_regs
-       FRAME_END
-       ret
-ENDPROC(call_rwsem_down_read_failed)
-
-ENTRY(call_rwsem_down_read_failed_killable)
-       FRAME_BEGIN
-       save_common_regs
-       __ASM_SIZE(push,) %__ASM_REG(dx)
-       movq %rax,%rdi
-       call rwsem_down_read_failed_killable
-       __ASM_SIZE(pop,) %__ASM_REG(dx)
-       restore_common_regs
-       FRAME_END
-       ret
-ENDPROC(call_rwsem_down_read_failed_killable)
-
-ENTRY(call_rwsem_down_write_failed)
-       FRAME_BEGIN
-       save_common_regs
-       movq %rax,%rdi
-       call rwsem_down_write_failed
-       restore_common_regs
-       FRAME_END
-       ret
-ENDPROC(call_rwsem_down_write_failed)
-
-ENTRY(call_rwsem_down_write_failed_killable)
-       FRAME_BEGIN
-       save_common_regs
-       movq %rax,%rdi
-       call rwsem_down_write_failed_killable
-       restore_common_regs
-       FRAME_END
-       ret
-ENDPROC(call_rwsem_down_write_failed_killable)
-
-ENTRY(call_rwsem_wake)
-       FRAME_BEGIN
-       /* do nothing if still outstanding active readers */
-       __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
-       jnz 1f
-       save_common_regs
-       movq %rax,%rdi
-       call rwsem_wake
-       restore_common_regs
-1:     FRAME_END
-       ret
-ENDPROC(call_rwsem_wake)
-
-ENTRY(call_rwsem_downgrade_wake)
-       FRAME_BEGIN
-       save_common_regs
-       __ASM_SIZE(push,) %__ASM_REG(dx)
-       movq %rax,%rdi
-       call rwsem_downgrade_wake
-       __ASM_SIZE(pop,) %__ASM_REG(dx)
-       restore_common_regs
-       FRAME_END
-       ret
-ENDPROC(call_rwsem_downgrade_wake)
index 2d686ae54681d5a8991d22b69b1673f70e8522d1..33c51c064c77e83242e6dc62e3254af438b626d4 100644 (file)
@@ -21,14 +21,12 @@ obj-y += checksum_32.o syscalls_32.o
 obj-$(CONFIG_ELF_CORE) += elfcore.o
 
 subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o
-subarch-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += ../lib/rwsem.o
 
 else
 
 obj-y += syscalls_64.o vdso/
 
-subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o \
-               ../lib/rwsem.o
+subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o
 
 endif
 
index 3843198e03d4ba1b8772caeada4897d2cc1fbad1..4148090cafb00a05a6fe5c7158190eba803a79db 100644 (file)
@@ -25,7 +25,6 @@ generic-y += percpu.h
 generic-y += preempt.h
 generic-y += qrwlock.h
 generic-y += qspinlock.h
-generic-y += rwsem.h
 generic-y += sections.h
 generic-y += socket.h
 generic-y += topology.h
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
deleted file mode 100644 (file)
index 93e67a0..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_RWSEM_H
-#define _ASM_GENERIC_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#ifdef __KERNEL__
-
-/*
- * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
- * Adapted largely from include/asm-i386/rwsem.h
- * by Paul Mackerras <paulus@samba.org>.
- */
-
-/*
- * the semaphore definition
- */
-#ifdef CONFIG_64BIT
-# define RWSEM_ACTIVE_MASK             0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK             0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE           0x00000000L
-#define RWSEM_ACTIVE_BIAS              0x00000001L
-#define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-       if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
-               rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
-       if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
-               if (IS_ERR(rwsem_down_read_failed_killable(sem)))
-                       return -EINTR;
-       }
-
-       return 0;
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       while ((tmp = atomic_long_read(&sem->count)) >= 0) {
-               if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
-                                  tmp + RWSEM_ACTIVE_READ_BIAS)) {
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
-                                            &sem->count);
-       if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
-               rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
-                                            &sem->count);
-       if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
-               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
-                       return -EINTR;
-       return 0;
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
-                     RWSEM_ACTIVE_WRITE_BIAS);
-       return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       tmp = atomic_long_dec_return_release(&sem->count);
-       if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
-               rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-       if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
-                                                   &sem->count) < 0))
-               rwsem_wake(sem);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       /*
-        * When downgrading from exclusive to shared ownership,
-        * anything inside the write-locked region cannot leak
-        * into the read side. In contrast, anything in the
-        * read-locked region is ok to be re-ordered into the
-        * write side. As such, rely on RELEASE semantics.
-        */
-       tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
-       if (tmp < 0)
-               rwsem_downgrade_wake(sem);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_GENERIC_RWSEM_H */
index 67dbb57508b1f2824338b3169da8887a19cc551a..6e56006b2cb6b7017c712cc76fe6e2cbeb61e247 100644 (file)
@@ -57,15 +57,13 @@ extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore
 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
 
-/* Include the arch specific part */
-#include <asm/rwsem.h>
-
 /* In all implementations count != 0 means locked */
 static inline int rwsem_is_locked(struct rw_semaphore *sem)
 {
        return atomic_long_read(&sem->count) != 0;
 }
 
+#define RWSEM_UNLOCKED_VALUE           0L
 #define __RWSEM_INIT_COUNT(name)       .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
 #endif
 
index 883cf1b92d9084f30a21f699211d6cd2ca3b9362..f17dad99eec8b76ca3e1b83963308ac6a4c10a7d 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/sched.h>
 #include <linux/errno.h>
 
+#include "rwsem.h"
+
 int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
                        const char *name, struct lock_class_key *rwsem_key)
 {
index bad2bca0268b13f295c44201d15b3d34e25c1364..067e265fa5c1df5da7efef44554247c7d2260282 100644 (file)
 # define DEBUG_RWSEMS_WARN_ON(c)
 #endif
 
+/*
+ * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
+ * Adapted largely from include/asm-i386/rwsem.h
+ * by Paul Mackerras <paulus@samba.org>.
+ */
+
+/*
+ * the semaphore definition
+ */
+#ifdef CONFIG_64BIT
+# define RWSEM_ACTIVE_MASK             0xffffffffL
+#else
+# define RWSEM_ACTIVE_MASK             0x0000ffffL
+#endif
+
+#define RWSEM_ACTIVE_BIAS              0x00000001L
+#define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
+#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 /*
  * All writes to owner are protected by WRITE_ONCE() to make sure that
@@ -132,3 +152,113 @@ static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
 {
 }
 #endif
+
+#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+       if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
+               rwsem_down_read_failed(sem);
+}
+
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+       if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
+               if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+                       return -EINTR;
+       }
+
+       return 0;
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       while ((tmp = atomic_long_read(&sem->count)) >= 0) {
+               if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
+                                  tmp + RWSEM_ACTIVE_READ_BIAS)) {
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
+                                            &sem->count);
+       if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+               rwsem_down_write_failed(sem);
+}
+
+static inline int __down_write_killable(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
+                                            &sem->count);
+       if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+                       return -EINTR;
+       return 0;
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
+                     RWSEM_ACTIVE_WRITE_BIAS);
+       return tmp == RWSEM_UNLOCKED_VALUE;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       tmp = atomic_long_dec_return_release(&sem->count);
+       if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
+               rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+       if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
+                                                   &sem->count) < 0))
+               rwsem_wake(sem);
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       /*
+        * When downgrading from exclusive to shared ownership,
+        * anything inside the write-locked region cannot leak
+        * into the read side. In contrast, anything in the
+        * read-locked region is ok to be re-ordered into the
+        * write side. As such, rely on RELEASE semantics.
+        */
+       tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
+       if (tmp < 0)
+               rwsem_downgrade_wake(sem);
+}
+
+#endif /* CONFIG_RWSEM_XCHGADD_ALGORITHM */