Merge branches 'work.misc' and 'work.dcache' of git://git.kernel.org/pub/scm/linux...
[sfrench/cifs-2.6.git] / arch / x86 / include / asm / qspinlock_paravirt.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_QSPINLOCK_PARAVIRT_H
3 #define __ASM_QSPINLOCK_PARAVIRT_H
4
5 /*
6  * For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
7  * registers. For i386, however, only 1 32-bit register needs to be saved
8  * and restored. So an optimized version of __pv_queued_spin_unlock() is
9  * hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit.
10  */
11 #ifdef CONFIG_64BIT
12
13 PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath);
14 #define __pv_queued_spin_unlock __pv_queued_spin_unlock
15 #define PV_UNLOCK               "__raw_callee_save___pv_queued_spin_unlock"
16 #define PV_UNLOCK_SLOWPATH      "__raw_callee_save___pv_queued_spin_unlock_slowpath"
17
18 /*
19  * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
20  * which combines the registers saving trunk and the body of the following
21  * C code:
22  *
23  * void __pv_queued_spin_unlock(struct qspinlock *lock)
24  * {
25  *      u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
26  *
27  *      if (likely(lockval == _Q_LOCKED_VAL))
28  *              return;
29  *      pv_queued_spin_unlock_slowpath(lock, lockval);
30  * }
31  *
32  * For x86-64,
33  *   rdi = lock              (first argument)
34  *   rsi = lockval           (second argument)
35  *   rdx = internal variable (set to 0)
36  */
37 asm    (".pushsection .text;"
38         ".globl " PV_UNLOCK ";"
39         ".type " PV_UNLOCK ", @function;"
40         ".align 4,0x90;"
41         PV_UNLOCK ": "
42         FRAME_BEGIN
43         "push  %rdx;"
44         "mov   $0x1,%eax;"
45         "xor   %edx,%edx;"
46         LOCK_PREFIX "cmpxchg %dl,(%rdi);"
47         "cmp   $0x1,%al;"
48         "jne   .slowpath;"
49         "pop   %rdx;"
50         FRAME_END
51         "ret;"
52         ".slowpath: "
53         "push   %rsi;"
54         "movzbl %al,%esi;"
55         "call " PV_UNLOCK_SLOWPATH ";"
56         "pop    %rsi;"
57         "pop    %rdx;"
58         FRAME_END
59         "ret;"
60         ".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
61         ".popsection");
62
63 #else /* CONFIG_64BIT */
64
65 extern void __pv_queued_spin_unlock(struct qspinlock *lock);
66 PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock);
67
68 #endif /* CONFIG_64BIT */
69 #endif