x86/spinlocks/paravirt: Fix memory corruption on unlock
[sfrench/cifs-2.6.git] / arch / x86 / include / asm / spinlock.h
1 #ifndef _ASM_X86_SPINLOCK_H
2 #define _ASM_X86_SPINLOCK_H
3
4 #include <linux/jump_label.h>
5 #include <linux/atomic.h>
6 #include <asm/page.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
9 #include <asm/paravirt.h>
10 #include <asm/bitops.h>
11
12 /*
13  * Your basic SMP spinlocks, allowing only a single CPU anywhere
14  *
15  * Simple spin lock operations.  There are two variants, one clears IRQ's
16  * on the local processor, one does not.
17  *
18  * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
19  *
20  * (the type definitions are in asm/spinlock_types.h)
21  */
22
23 #ifdef CONFIG_X86_32
24 # define LOCK_PTR_REG "a"
25 #else
26 # define LOCK_PTR_REG "D"
27 #endif
28
29 #if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
30 /*
31  * On PPro SMP, we use a locked operation to unlock
32  * (PPro errata 66, 92)
33  */
34 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
35 #else
36 # define UNLOCK_LOCK_PREFIX
37 #endif
38
39 /* How long a lock should spin before we consider blocking */
40 #define SPIN_THRESHOLD  (1 << 15)
41
42 extern struct static_key paravirt_ticketlocks_enabled;
43 static __always_inline bool static_key_false(struct static_key *key);
44
45 #ifdef CONFIG_PARAVIRT_SPINLOCKS
46
47 static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
48 {
49         set_bit(0, (volatile unsigned long *)&lock->tickets.head);
50 }
51
52 #else  /* !CONFIG_PARAVIRT_SPINLOCKS */
53 static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
54                                                         __ticket_t ticket)
55 {
56 }
57 static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
58                                                         __ticket_t ticket)
59 {
60 }
61
62 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
63 static inline int  __tickets_equal(__ticket_t one, __ticket_t two)
64 {
65         return !((one ^ two) & ~TICKET_SLOWPATH_FLAG);
66 }
67
68 static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock,
69                                                         __ticket_t head)
70 {
71         if (head & TICKET_SLOWPATH_FLAG) {
72                 arch_spinlock_t old, new;
73
74                 old.tickets.head = head;
75                 new.tickets.head = head & ~TICKET_SLOWPATH_FLAG;
76                 old.tickets.tail = new.tickets.head + TICKET_LOCK_INC;
77                 new.tickets.tail = old.tickets.tail;
78
79                 /* try to clear slowpath flag when there are no contenders */
80                 cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
81         }
82 }
83
84 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
85 {
86         return __tickets_equal(lock.tickets.head, lock.tickets.tail);
87 }
88
89 /*
90  * Ticket locks are conceptually two parts, one indicating the current head of
91  * the queue, and the other indicating the current tail. The lock is acquired
92  * by atomically noting the tail and incrementing it by one (thus adding
93  * ourself to the queue and noting our position), then waiting until the head
94  * becomes equal to the the initial value of the tail.
95  *
96  * We use an xadd covering *both* parts of the lock, to increment the tail and
97  * also load the position of the head, which takes care of memory ordering
98  * issues and should be optimal for the uncontended case. Note the tail must be
99  * in the high part, because a wide xadd increment of the low part would carry
100  * up and contaminate the high part.
101  */
102 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
103 {
104         register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
105
106         inc = xadd(&lock->tickets, inc);
107         if (likely(inc.head == inc.tail))
108                 goto out;
109
110         for (;;) {
111                 unsigned count = SPIN_THRESHOLD;
112
113                 do {
114                         inc.head = READ_ONCE(lock->tickets.head);
115                         if (__tickets_equal(inc.head, inc.tail))
116                                 goto clear_slowpath;
117                         cpu_relax();
118                 } while (--count);
119                 __ticket_lock_spinning(lock, inc.tail);
120         }
121 clear_slowpath:
122         __ticket_check_and_clear_slowpath(lock, inc.head);
123 out:
124         barrier();      /* make sure nothing creeps before the lock is taken */
125 }
126
127 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
128 {
129         arch_spinlock_t old, new;
130
131         old.tickets = READ_ONCE(lock->tickets);
132         if (!__tickets_equal(old.tickets.head, old.tickets.tail))
133                 return 0;
134
135         new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
136         new.head_tail &= ~TICKET_SLOWPATH_FLAG;
137
138         /* cmpxchg is a full barrier, so nothing can move before it */
139         return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
140 }
141
142 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
143 {
144         if (TICKET_SLOWPATH_FLAG &&
145                 static_key_false(&paravirt_ticketlocks_enabled)) {
146                 __ticket_t head;
147
148                 BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
149
150                 head = xadd(&lock->tickets.head, TICKET_LOCK_INC);
151
152                 if (unlikely(head & TICKET_SLOWPATH_FLAG)) {
153                         head &= ~TICKET_SLOWPATH_FLAG;
154                         __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC));
155                 }
156         } else
157                 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
158 }
159
160 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
161 {
162         struct __raw_tickets tmp = READ_ONCE(lock->tickets);
163
164         return !__tickets_equal(tmp.tail, tmp.head);
165 }
166
167 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
168 {
169         struct __raw_tickets tmp = READ_ONCE(lock->tickets);
170
171         tmp.head &= ~TICKET_SLOWPATH_FLAG;
172         return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
173 }
174 #define arch_spin_is_contended  arch_spin_is_contended
175
176 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
177                                                   unsigned long flags)
178 {
179         arch_spin_lock(lock);
180 }
181
182 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
183 {
184         __ticket_t head = READ_ONCE(lock->tickets.head);
185
186         for (;;) {
187                 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
188                 /*
189                  * We need to check "unlocked" in a loop, tmp.head == head
190                  * can be false positive because of overflow.
191                  */
192                 if (__tickets_equal(tmp.head, tmp.tail) ||
193                                 !__tickets_equal(tmp.head, head))
194                         break;
195
196                 cpu_relax();
197         }
198 }
199
200 /*
201  * Read-write spinlocks, allowing multiple readers
202  * but only one writer.
203  *
204  * NOTE! it is quite common to have readers in interrupts
205  * but no interrupt writers. For those circumstances we
206  * can "mix" irq-safe locks - any writer needs to get a
207  * irq-safe write-lock, but readers can get non-irqsafe
208  * read-locks.
209  *
210  * On x86, we implement read-write locks using the generic qrwlock with
211  * x86 specific optimization.
212  */
213
214 #include <asm/qrwlock.h>
215
216 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
217 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
218
219 #define arch_spin_relax(lock)   cpu_relax()
220 #define arch_read_relax(lock)   cpu_relax()
221 #define arch_write_relax(lock)  cpu_relax()
222
223 #endif /* _ASM_X86_SPINLOCK_H */