Merge remote-tracking branches 'asoc/topic/ac97', 'asoc/topic/ac97-mfd', 'asoc/topic...
[sfrench/cifs-2.6.git] / arch / x86 / include / asm / qspinlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_QSPINLOCK_H
3 #define _ASM_X86_QSPINLOCK_H
4
5 #include <asm/cpufeature.h>
6 #include <asm-generic/qspinlock_types.h>
7 #include <asm/paravirt.h>
8
9 #define queued_spin_unlock queued_spin_unlock
10 /**
11  * queued_spin_unlock - release a queued spinlock
12  * @lock : Pointer to queued spinlock structure
13  *
14  * A smp_store_release() on the least-significant byte.
15  */
16 static inline void native_queued_spin_unlock(struct qspinlock *lock)
17 {
18         smp_store_release((u8 *)lock, 0);
19 }
20
21 #ifdef CONFIG_PARAVIRT_SPINLOCKS
22 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
23 extern void __pv_init_lock_hash(void);
24 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
25 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
26
27 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
28 {
29         pv_queued_spin_lock_slowpath(lock, val);
30 }
31
32 static inline void queued_spin_unlock(struct qspinlock *lock)
33 {
34         pv_queued_spin_unlock(lock);
35 }
36
37 #define vcpu_is_preempted vcpu_is_preempted
38 static inline bool vcpu_is_preempted(long cpu)
39 {
40         return pv_vcpu_is_preempted(cpu);
41 }
42 #else
43 static inline void queued_spin_unlock(struct qspinlock *lock)
44 {
45         native_queued_spin_unlock(lock);
46 }
47 #endif
48
49 #ifdef CONFIG_PARAVIRT
50 #define virt_spin_lock virt_spin_lock
51 static inline bool virt_spin_lock(struct qspinlock *lock)
52 {
53         if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
54                 return false;
55
56         /*
57          * On hypervisors without PARAVIRT_SPINLOCKS support we fall
58          * back to a Test-and-Set spinlock, because fair locks have
59          * horrible lock 'holder' preemption issues.
60          */
61
62         do {
63                 while (atomic_read(&lock->val) != 0)
64                         cpu_relax();
65         } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
66
67         return true;
68 }
69 #endif /* CONFIG_PARAVIRT */
70
71 #include <asm-generic/qspinlock.h>
72
73 #endif /* _ASM_X86_QSPINLOCK_H */