Linux 6.9-rc6
[sfrench/cifs-2.6.git] / arch / x86 / include / asm / qspinlock_paravirt.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_QSPINLOCK_PARAVIRT_H
3 #define __ASM_QSPINLOCK_PARAVIRT_H
4
5 #include <asm/ibt.h>
6
7 void __lockfunc __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked);
8
9 /*
10  * For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
11  * registers. For i386, however, only 1 32-bit register needs to be saved
12  * and restored. So an optimized version of __pv_queued_spin_unlock() is
13  * hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit.
14  */
15 #ifdef CONFIG_64BIT
16
17 __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
18 #define __pv_queued_spin_unlock __pv_queued_spin_unlock
19
20 /*
21  * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
22  * which combines the registers saving trunk and the body of the following
23  * C code.  Note that it puts the code in the .spinlock.text section which
24  * is equivalent to adding __lockfunc in the C code:
25  *
26  * void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock)
27  * {
28  *      u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
29  *
30  *      if (likely(lockval == _Q_LOCKED_VAL))
31  *              return;
32  *      pv_queued_spin_unlock_slowpath(lock, lockval);
33  * }
34  *
35  * For x86-64,
36  *   rdi = lock              (first argument)
37  *   rsi = lockval           (second argument)
38  *   rdx = internal variable (set to 0)
39  */
40 #define PV_UNLOCK_ASM                                                   \
41         FRAME_BEGIN                                                     \
42         "push  %rdx\n\t"                                                \
43         "mov   $0x1,%eax\n\t"                                           \
44         "xor   %edx,%edx\n\t"                                           \
45         LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t"                            \
46         "cmp   $0x1,%al\n\t"                                            \
47         "jne   .slowpath\n\t"                                           \
48         "pop   %rdx\n\t"                                                \
49         FRAME_END                                                       \
50         ASM_RET                                                         \
51         ".slowpath:\n\t"                                                \
52         "push   %rsi\n\t"                                               \
53         "movzbl %al,%esi\n\t"                                           \
54         "call __raw_callee_save___pv_queued_spin_unlock_slowpath\n\t"   \
55         "pop    %rsi\n\t"                                               \
56         "pop    %rdx\n\t"                                               \
57         FRAME_END
58
59 DEFINE_ASM_FUNC(__raw_callee_save___pv_queued_spin_unlock,
60                 PV_UNLOCK_ASM, .spinlock.text);
61
62 #else /* CONFIG_64BIT */
63
64 extern void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock);
65 __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock, ".spinlock.text");
66
67 #endif /* CONFIG_64BIT */
68 #endif