]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_SPINLOCK_H |
2 | #define _ASM_X86_SPINLOCK_H | |
2fed0c50 | 3 | |
60063497 | 4 | #include <linux/atomic.h> |
1075cf7a TG |
5 | #include <asm/page.h> |
6 | #include <asm/processor.h> | |
314cdbef | 7 | #include <linux/compiler.h> |
74d4affd | 8 | #include <asm/paravirt.h> |
1075cf7a TG |
9 | /* |
10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
11 | * | |
12 | * Simple spin lock operations. There are two variants, one clears IRQ's | |
13 | * on the local processor, one does not. | |
14 | * | |
314cdbef NP |
15 | * These are fair FIFO ticket locks, which are currently limited to 256 |
16 | * CPUs. | |
1075cf7a TG |
17 | * |
18 | * (the type definitions are in asm/spinlock_types.h) | |
19 | */ | |
20 | ||
96a388de | 21 | #ifdef CONFIG_X86_32 |
1075cf7a | 22 | # define LOCK_PTR_REG "a" |
74e91604 | 23 | # define REG_PTR_MODE "k" |
96a388de | 24 | #else |
1075cf7a | 25 | # define LOCK_PTR_REG "D" |
74e91604 | 26 | # define REG_PTR_MODE "q" |
1075cf7a TG |
27 | #endif |
28 | ||
3a556b26 NP |
29 | #if defined(CONFIG_X86_32) && \ |
30 | (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) | |
31 | /* | |
32 | * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock | |
33 | * (PPro errata 66, 92) | |
34 | */ | |
35 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX | |
36 | #else | |
37 | # define UNLOCK_LOCK_PREFIX | |
314cdbef NP |
38 | #endif |
39 | ||
3a556b26 NP |
40 | /* |
41 | * Ticket locks are conceptually two parts, one indicating the current head of | |
42 | * the queue, and the other indicating the current tail. The lock is acquired | |
43 | * by atomically noting the tail and incrementing it by one (thus adding | |
44 | * ourself to the queue and noting our position), then waiting until the head | |
45 | * becomes equal to the the initial value of the tail. | |
46 | * | |
47 | * We use an xadd covering *both* parts of the lock, to increment the tail and | |
48 | * also load the position of the head, which takes care of memory ordering | |
49 | * issues and should be optimal for the uncontended case. Note the tail must be | |
50 | * in the high part, because a wide xadd increment of the low part would carry | |
51 | * up and contaminate the high part. | |
52 | * | |
53 | * With fewer than 2^8 possible CPUs, we can use x86's partial registers to | |
54 | * save some instructions and make the code more elegant. There really isn't | |
55 | * much between them in performance though, especially as locks are out of line. | |
56 | */ | |
57 | #if (NR_CPUS < 256) | |
08f5fcbe | 58 | #define TICKET_SHIFT 8 |
1075cf7a | 59 | |
445c8951 | 60 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
1075cf7a | 61 | { |
314cdbef NP |
62 | short inc = 0x0100; |
63 | ||
d3bf60a6 | 64 | asm volatile ( |
314cdbef NP |
65 | LOCK_PREFIX "xaddw %w0, %1\n" |
66 | "1:\t" | |
67 | "cmpb %h0, %b0\n\t" | |
68 | "je 2f\n\t" | |
69 | "rep ; nop\n\t" | |
70 | "movb %1, %b0\n\t" | |
71 | /* don't need lfence here, because loads are in-order */ | |
1075cf7a | 72 | "jmp 1b\n" |
314cdbef | 73 | "2:" |
d3bf60a6 | 74 | : "+Q" (inc), "+m" (lock->slock) |
314cdbef | 75 | : |
d3bf60a6 | 76 | : "memory", "cc"); |
1075cf7a | 77 | } |
314cdbef | 78 | |
445c8951 | 79 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
1075cf7a | 80 | { |
74e91604 | 81 | int tmp, new; |
1075cf7a | 82 | |
74e91604 | 83 | asm volatile("movzwl %2, %0\n\t" |
d3bf60a6 | 84 | "cmpb %h0,%b0\n\t" |
74e91604 | 85 | "leal 0x100(%" REG_PTR_MODE "0), %1\n\t" |
d3bf60a6 | 86 | "jne 1f\n\t" |
5bbd4c37 | 87 | LOCK_PREFIX "cmpxchgw %w1,%2\n\t" |
d3bf60a6 JP |
88 | "1:" |
89 | "sete %b1\n\t" | |
90 | "movzbl %b1,%0\n\t" | |
74e91604 | 91 | : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) |
d3bf60a6 JP |
92 | : |
93 | : "memory", "cc"); | |
314cdbef NP |
94 | |
95 | return tmp; | |
1075cf7a TG |
96 | } |
97 | ||
445c8951 | 98 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
3a556b26 | 99 | { |
d3bf60a6 JP |
100 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
101 | : "+m" (lock->slock) | |
102 | : | |
103 | : "memory", "cc"); | |
3a556b26 | 104 | } |
1075cf7a | 105 | #else |
08f5fcbe | 106 | #define TICKET_SHIFT 16 |
3a556b26 | 107 | |
445c8951 | 108 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
3a556b26 NP |
109 | { |
110 | int inc = 0x00010000; | |
111 | int tmp; | |
112 | ||
5bbd4c37 | 113 | asm volatile(LOCK_PREFIX "xaddl %0, %1\n" |
d3bf60a6 JP |
114 | "movzwl %w0, %2\n\t" |
115 | "shrl $16, %0\n\t" | |
116 | "1:\t" | |
117 | "cmpl %0, %2\n\t" | |
118 | "je 2f\n\t" | |
119 | "rep ; nop\n\t" | |
120 | "movzwl %1, %2\n\t" | |
121 | /* don't need lfence here, because loads are in-order */ | |
122 | "jmp 1b\n" | |
123 | "2:" | |
ef1f3413 | 124 | : "+r" (inc), "+m" (lock->slock), "=&r" (tmp) |
d3bf60a6 JP |
125 | : |
126 | : "memory", "cc"); | |
3a556b26 NP |
127 | } |
128 | ||
445c8951 | 129 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
3a556b26 NP |
130 | { |
131 | int tmp; | |
132 | int new; | |
133 | ||
d3bf60a6 JP |
134 | asm volatile("movl %2,%0\n\t" |
135 | "movl %0,%1\n\t" | |
136 | "roll $16, %0\n\t" | |
137 | "cmpl %0,%1\n\t" | |
74e91604 | 138 | "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t" |
d3bf60a6 | 139 | "jne 1f\n\t" |
5bbd4c37 | 140 | LOCK_PREFIX "cmpxchgl %1,%2\n\t" |
d3bf60a6 JP |
141 | "1:" |
142 | "sete %b1\n\t" | |
143 | "movzbl %b1,%0\n\t" | |
ef1f3413 | 144 | : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) |
d3bf60a6 JP |
145 | : |
146 | : "memory", "cc"); | |
3a556b26 NP |
147 | |
148 | return tmp; | |
149 | } | |
1075cf7a | 150 | |
445c8951 | 151 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
1075cf7a | 152 | { |
d3bf60a6 JP |
153 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
154 | : "+m" (lock->slock) | |
155 | : | |
156 | : "memory", "cc"); | |
1075cf7a | 157 | } |
3a556b26 | 158 | #endif |
1075cf7a | 159 | |
445c8951 | 160 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) |
08f5fcbe JB |
161 | { |
162 | int tmp = ACCESS_ONCE(lock->slock); | |
163 | ||
164 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); | |
165 | } | |
166 | ||
445c8951 | 167 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) |
08f5fcbe JB |
168 | { |
169 | int tmp = ACCESS_ONCE(lock->slock); | |
170 | ||
171 | return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; | |
172 | } | |
74d4affd | 173 | |
b4ecc126 | 174 | #ifndef CONFIG_PARAVIRT_SPINLOCKS |
8efcbab6 | 175 | |
0199c4e6 | 176 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
74d4affd JF |
177 | { |
178 | return __ticket_spin_is_locked(lock); | |
179 | } | |
180 | ||
0199c4e6 | 181 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
74d4affd JF |
182 | { |
183 | return __ticket_spin_is_contended(lock); | |
184 | } | |
0199c4e6 | 185 | #define arch_spin_is_contended arch_spin_is_contended |
74d4affd | 186 | |
0199c4e6 | 187 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
74d4affd JF |
188 | { |
189 | __ticket_spin_lock(lock); | |
190 | } | |
191 | ||
0199c4e6 | 192 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
74d4affd JF |
193 | { |
194 | return __ticket_spin_trylock(lock); | |
195 | } | |
196 | ||
0199c4e6 | 197 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
74d4affd JF |
198 | { |
199 | __ticket_spin_unlock(lock); | |
200 | } | |
63d3a75d | 201 | |
0199c4e6 | 202 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
63d3a75d JF |
203 | unsigned long flags) |
204 | { | |
0199c4e6 | 205 | arch_spin_lock(lock); |
63d3a75d JF |
206 | } |
207 | ||
b4ecc126 | 208 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
74d4affd | 209 | |
0199c4e6 | 210 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
1075cf7a | 211 | { |
0199c4e6 | 212 | while (arch_spin_is_locked(lock)) |
1075cf7a TG |
213 | cpu_relax(); |
214 | } | |
215 | ||
216 | /* | |
217 | * Read-write spinlocks, allowing multiple readers | |
218 | * but only one writer. | |
219 | * | |
220 | * NOTE! it is quite common to have readers in interrupts | |
221 | * but no interrupt writers. For those circumstances we | |
222 | * can "mix" irq-safe locks - any writer needs to get a | |
223 | * irq-safe write-lock, but readers can get non-irqsafe | |
224 | * read-locks. | |
225 | * | |
226 | * On x86, we implement read-write locks as a 32-bit counter | |
227 | * with the high bit (sign) being the "contended" bit. | |
228 | */ | |
229 | ||
314cdbef NP |
230 | /** |
231 | * read_can_lock - would read_trylock() succeed? | |
232 | * @lock: the rwlock in question. | |
233 | */ | |
e5931943 | 234 | static inline int arch_read_can_lock(arch_rwlock_t *lock) |
1075cf7a | 235 | { |
a750036f | 236 | return lock->lock > 0; |
1075cf7a TG |
237 | } |
238 | ||
314cdbef NP |
239 | /** |
240 | * write_can_lock - would write_trylock() succeed? | |
241 | * @lock: the rwlock in question. | |
242 | */ | |
e5931943 | 243 | static inline int arch_write_can_lock(arch_rwlock_t *lock) |
1075cf7a | 244 | { |
a750036f | 245 | return lock->write == WRITE_LOCK_CMP; |
1075cf7a TG |
246 | } |
247 | ||
e5931943 | 248 | static inline void arch_read_lock(arch_rwlock_t *rw) |
1075cf7a | 249 | { |
a750036f | 250 | asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" |
1075cf7a TG |
251 | "jns 1f\n" |
252 | "call __read_lock_failed\n\t" | |
253 | "1:\n" | |
254 | ::LOCK_PTR_REG (rw) : "memory"); | |
255 | } | |
256 | ||
e5931943 | 257 | static inline void arch_write_lock(arch_rwlock_t *rw) |
1075cf7a | 258 | { |
a750036f | 259 | asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" |
1075cf7a TG |
260 | "jz 1f\n" |
261 | "call __write_lock_failed\n\t" | |
262 | "1:\n" | |
a750036f JB |
263 | ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS) |
264 | : "memory"); | |
1075cf7a TG |
265 | } |
266 | ||
e5931943 | 267 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
1075cf7a | 268 | { |
a750036f | 269 | READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; |
1075cf7a | 270 | |
a750036f | 271 | if (READ_LOCK_ATOMIC(dec_return)(count) >= 0) |
1075cf7a | 272 | return 1; |
a750036f | 273 | READ_LOCK_ATOMIC(inc)(count); |
1075cf7a TG |
274 | return 0; |
275 | } | |
276 | ||
e5931943 | 277 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
1075cf7a | 278 | { |
a750036f | 279 | atomic_t *count = (atomic_t *)&lock->write; |
1075cf7a | 280 | |
a750036f | 281 | if (atomic_sub_and_test(WRITE_LOCK_CMP, count)) |
1075cf7a | 282 | return 1; |
a750036f | 283 | atomic_add(WRITE_LOCK_CMP, count); |
1075cf7a TG |
284 | return 0; |
285 | } | |
286 | ||
e5931943 | 287 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
1075cf7a | 288 | { |
a750036f JB |
289 | asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" |
290 | :"+m" (rw->lock) : : "memory"); | |
1075cf7a TG |
291 | } |
292 | ||
e5931943 | 293 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
1075cf7a | 294 | { |
a750036f JB |
295 | asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" |
296 | : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); | |
1075cf7a TG |
297 | } |
298 | ||
e5931943 TG |
299 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
300 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
f5f7eac4 | 301 | |
a750036f JB |
302 | #undef READ_LOCK_SIZE |
303 | #undef READ_LOCK_ATOMIC | |
304 | #undef WRITE_LOCK_ADD | |
305 | #undef WRITE_LOCK_SUB | |
306 | #undef WRITE_LOCK_CMP | |
307 | ||
0199c4e6 TG |
308 | #define arch_spin_relax(lock) cpu_relax() |
309 | #define arch_read_relax(lock) cpu_relax() | |
310 | #define arch_write_relax(lock) cpu_relax() | |
1075cf7a | 311 | |
ad462769 JO |
312 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ |
313 | static inline void smp_mb__after_lock(void) { } | |
314 | #define ARCH_HAS_SMP_MB_AFTER_LOCK | |
315 | ||
1965aae3 | 316 | #endif /* _ASM_X86_SPINLOCK_H */ |