Merge tag 'afs-next-20171113' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowel...
[sfrench/cifs-2.6.git] / arch / x86 / include / asm / qspinlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_QSPINLOCK_H
3 #define _ASM_X86_QSPINLOCK_H
4
5 #include <linux/jump_label.h>
6 #include <asm/cpufeature.h>
7 #include <asm-generic/qspinlock_types.h>
8 #include <asm/paravirt.h>
9
10 #define queued_spin_unlock queued_spin_unlock
11 /**
12  * queued_spin_unlock - release a queued spinlock
13  * @lock : Pointer to queued spinlock structure
14  *
15  * A smp_store_release() on the least-significant byte.
16  */
17 static inline void native_queued_spin_unlock(struct qspinlock *lock)
18 {
19         smp_store_release((u8 *)lock, 0);
20 }
21
22 #ifdef CONFIG_PARAVIRT_SPINLOCKS
23 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
24 extern void __pv_init_lock_hash(void);
25 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
26 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
27
28 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
29 {
30         pv_queued_spin_lock_slowpath(lock, val);
31 }
32
33 static inline void queued_spin_unlock(struct qspinlock *lock)
34 {
35         pv_queued_spin_unlock(lock);
36 }
37
38 #define vcpu_is_preempted vcpu_is_preempted
39 static inline bool vcpu_is_preempted(long cpu)
40 {
41         return pv_vcpu_is_preempted(cpu);
42 }
43 #else
44 static inline void queued_spin_unlock(struct qspinlock *lock)
45 {
46         native_queued_spin_unlock(lock);
47 }
48 #endif
49
50 #ifdef CONFIG_PARAVIRT
51 DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
52
53 void native_pv_lock_init(void) __init;
54
55 #define virt_spin_lock virt_spin_lock
56 static inline bool virt_spin_lock(struct qspinlock *lock)
57 {
58         if (!static_branch_likely(&virt_spin_lock_key))
59                 return false;
60
61         /*
62          * On hypervisors without PARAVIRT_SPINLOCKS support we fall
63          * back to a Test-and-Set spinlock, because fair locks have
64          * horrible lock 'holder' preemption issues.
65          */
66
67         do {
68                 while (atomic_read(&lock->val) != 0)
69                         cpu_relax();
70         } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
71
72         return true;
73 }
74 #else
75 static inline void native_pv_lock_init(void)
76 {
77 }
78 #endif /* CONFIG_PARAVIRT */
79
80 #include <asm-generic/qspinlock.h>
81
82 #endif /* _ASM_X86_QSPINLOCK_H */