]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/mips/include/asm/spinlock.h
MIPS: Optimize spinlocks.
[mirror_ubuntu-bionic-kernel.git] / arch / mips / include / asm / spinlock.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
f65e4fa8 6 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
1da177e4
LT
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H
11
2a31b033
RB
12#include <linux/compiler.h>
13
0004a9df 14#include <asm/barrier.h>
1da177e4
LT
15#include <asm/war.h>
16
17/*
18 * Your basic SMP spinlocks, allowing only a single CPU anywhere
2a31b033
RB
19 *
20 * Simple spin lock operations. There are two variants, one clears IRQ's
21 * on the local processor, one does not.
22 *
23 * These are fair FIFO ticket locks
24 *
25 * (the type definitions are in asm/spinlock_types.h)
1da177e4
LT
26 */
27
1da177e4
LT
28
29/*
2a31b033
RB
30 * Ticket locks are conceptually two parts, one indicating the current head of
31 * the queue, and the other indicating the current tail. The lock is acquired
32 * by atomically noting the tail and incrementing it by one (thus adding
33 * ourself to the queue and noting our position), then waiting until the head
34 * becomes equal to the the initial value of the tail.
1da177e4
LT
35 */
36
0199c4e6 37static inline int arch_spin_is_locked(arch_spinlock_t *lock)
2a31b033 38{
500c2e1f 39 u32 counters = ACCESS_ONCE(lock->lock);
2a31b033 40
500c2e1f 41 return ((counters >> 16) ^ counters) & 0xffff;
2a31b033
RB
42}
43
0199c4e6
TG
44#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
45#define arch_spin_unlock_wait(x) \
46 while (arch_spin_is_locked(x)) { cpu_relax(); }
2a31b033 47
0199c4e6 48static inline int arch_spin_is_contended(arch_spinlock_t *lock)
2a31b033 49{
500c2e1f 50 u32 counters = ACCESS_ONCE(lock->lock);
2a31b033 51
500c2e1f 52 return (((counters >> 16) - counters) & 0xffff) > 1;
2a31b033 53}
0199c4e6 54#define arch_spin_is_contended arch_spin_is_contended
2a31b033 55
0199c4e6 56static inline void arch_spin_lock(arch_spinlock_t *lock)
1da177e4 57{
2a31b033
RB
58 int my_ticket;
59 int tmp;
500c2e1f 60 int inc = 0x10000;
1da177e4
LT
61
62 if (R10000_LLSC_WAR) {
2a31b033 63 __asm__ __volatile__ (
0199c4e6 64 " .set push # arch_spin_lock \n"
2a31b033
RB
65 " .set noreorder \n"
66 " \n"
67 "1: ll %[ticket], %[ticket_ptr] \n"
500c2e1f 68 " addu %[my_ticket], %[ticket], %[inc] \n"
2a31b033
RB
69 " sc %[my_ticket], %[ticket_ptr] \n"
70 " beqzl %[my_ticket], 1b \n"
1da177e4 71 " nop \n"
500c2e1f
DD
72 " srl %[my_ticket], %[ticket], 16 \n"
73 " andi %[ticket], %[ticket], 0xffff \n"
74 " andi %[my_ticket], %[my_ticket], 0xffff \n"
2a31b033
RB
75 " bne %[ticket], %[my_ticket], 4f \n"
76 " subu %[ticket], %[my_ticket], %[ticket] \n"
77 "2: \n"
78 " .subsection 2 \n"
500c2e1f 79 "4: andi %[ticket], %[ticket], 0xffff \n"
0e6826c7 80 " sll %[ticket], 5 \n"
2a31b033
RB
81 " \n"
82 "6: bnez %[ticket], 6b \n"
83 " subu %[ticket], 1 \n"
84 " \n"
500c2e1f 85 " lhu %[ticket], %[serving_now_ptr] \n"
2a31b033
RB
86 " beq %[ticket], %[my_ticket], 2b \n"
87 " subu %[ticket], %[my_ticket], %[ticket] \n"
0e6826c7 88 " b 4b \n"
2a31b033
RB
89 " subu %[ticket], %[ticket], 1 \n"
90 " .previous \n"
91 " .set pop \n"
92 : [ticket_ptr] "+m" (lock->lock),
500c2e1f 93 [serving_now_ptr] "+m" (lock->h.serving_now),
2a31b033 94 [ticket] "=&r" (tmp),
500c2e1f
DD
95 [my_ticket] "=&r" (my_ticket)
96 : [inc] "r" (inc));
1da177e4 97 } else {
2a31b033 98 __asm__ __volatile__ (
0199c4e6 99 " .set push # arch_spin_lock \n"
2a31b033
RB
100 " .set noreorder \n"
101 " \n"
500c2e1f
DD
102 "1: ll %[ticket], %[ticket_ptr] \n"
103 " addu %[my_ticket], %[ticket], %[inc] \n"
2a31b033 104 " sc %[my_ticket], %[ticket_ptr] \n"
500c2e1f
DD
105 " beqz %[my_ticket], 1b \n"
106 " srl %[my_ticket], %[ticket], 16 \n"
107 " andi %[ticket], %[ticket], 0xffff \n"
108 " andi %[my_ticket], %[my_ticket], 0xffff \n"
2a31b033
RB
109 " bne %[ticket], %[my_ticket], 4f \n"
110 " subu %[ticket], %[my_ticket], %[ticket] \n"
111 "2: \n"
f65e4fa8 112 " .subsection 2 \n"
2a31b033 113 "4: andi %[ticket], %[ticket], 0x1fff \n"
0e6826c7 114 " sll %[ticket], 5 \n"
2a31b033
RB
115 " \n"
116 "6: bnez %[ticket], 6b \n"
117 " subu %[ticket], 1 \n"
118 " \n"
500c2e1f 119 " lhu %[ticket], %[serving_now_ptr] \n"
2a31b033
RB
120 " beq %[ticket], %[my_ticket], 2b \n"
121 " subu %[ticket], %[my_ticket], %[ticket] \n"
0e6826c7 122 " b 4b \n"
2a31b033 123 " subu %[ticket], %[ticket], 1 \n"
f65e4fa8 124 " .previous \n"
2a31b033
RB
125 " .set pop \n"
126 : [ticket_ptr] "+m" (lock->lock),
500c2e1f 127 [serving_now_ptr] "+m" (lock->h.serving_now),
2a31b033 128 [ticket] "=&r" (tmp),
500c2e1f
DD
129 [my_ticket] "=&r" (my_ticket)
130 : [inc] "r" (inc));
1da177e4 131 }
0004a9df 132
17099b11 133 smp_llsc_mb();
1da177e4
LT
134}
135
0199c4e6 136static inline void arch_spin_unlock(arch_spinlock_t *lock)
1da177e4 137{
500c2e1f
DD
138 unsigned int serving_now = lock->h.serving_now + 1;
139 wmb();
140 lock->h.serving_now = (u16)serving_now;
141 nudge_writes();
1da177e4
LT
142}
143
0199c4e6 144static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
1da177e4 145{
2a31b033 146 int tmp, tmp2, tmp3;
500c2e1f 147 int inc = 0x10000;
1da177e4
LT
148
149 if (R10000_LLSC_WAR) {
2a31b033 150 __asm__ __volatile__ (
0199c4e6 151 " .set push # arch_spin_trylock \n"
2a31b033
RB
152 " .set noreorder \n"
153 " \n"
154 "1: ll %[ticket], %[ticket_ptr] \n"
500c2e1f
DD
155 " srl %[my_ticket], %[ticket], 16 \n"
156 " andi %[my_ticket], %[my_ticket], 0xffff \n"
157 " andi %[now_serving], %[ticket], 0xffff \n"
2a31b033 158 " bne %[my_ticket], %[now_serving], 3f \n"
500c2e1f 159 " addu %[ticket], %[ticket], %[inc] \n"
2a31b033
RB
160 " sc %[ticket], %[ticket_ptr] \n"
161 " beqzl %[ticket], 1b \n"
162 " li %[ticket], 1 \n"
163 "2: \n"
164 " .subsection 2 \n"
165 "3: b 2b \n"
166 " li %[ticket], 0 \n"
167 " .previous \n"
168 " .set pop \n"
169 : [ticket_ptr] "+m" (lock->lock),
170 [ticket] "=&r" (tmp),
171 [my_ticket] "=&r" (tmp2),
500c2e1f
DD
172 [now_serving] "=&r" (tmp3)
173 : [inc] "r" (inc));
1da177e4 174 } else {
2a31b033 175 __asm__ __volatile__ (
0199c4e6 176 " .set push # arch_spin_trylock \n"
2a31b033
RB
177 " .set noreorder \n"
178 " \n"
500c2e1f
DD
179 "1: ll %[ticket], %[ticket_ptr] \n"
180 " srl %[my_ticket], %[ticket], 16 \n"
181 " andi %[my_ticket], %[my_ticket], 0xffff \n"
182 " andi %[now_serving], %[ticket], 0xffff \n"
2a31b033 183 " bne %[my_ticket], %[now_serving], 3f \n"
500c2e1f 184 " addu %[ticket], %[ticket], %[inc] \n"
2a31b033 185 " sc %[ticket], %[ticket_ptr] \n"
500c2e1f 186 " beqz %[ticket], 1b \n"
2a31b033
RB
187 " li %[ticket], 1 \n"
188 "2: \n"
f65e4fa8 189 " .subsection 2 \n"
2a31b033
RB
190 "3: b 2b \n"
191 " li %[ticket], 0 \n"
f65e4fa8 192 " .previous \n"
2a31b033
RB
193 " .set pop \n"
194 : [ticket_ptr] "+m" (lock->lock),
195 [ticket] "=&r" (tmp),
196 [my_ticket] "=&r" (tmp2),
500c2e1f
DD
197 [now_serving] "=&r" (tmp3)
198 : [inc] "r" (inc));
1da177e4
LT
199 }
200
17099b11 201 smp_llsc_mb();
0004a9df 202
2a31b033 203 return tmp;
1da177e4
LT
204}
205
206/*
207 * Read-write spinlocks, allowing multiple readers but only one writer.
208 *
209 * NOTE! it is quite common to have readers in interrupts but no interrupt
210 * writers. For those circumstances we can "mix" irq-safe locks - any writer
211 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
212 * read-locks.
213 */
214
e3c48078
RB
215/*
216 * read_can_lock - would read_trylock() succeed?
217 * @lock: the rwlock in question.
218 */
e5931943 219#define arch_read_can_lock(rw) ((rw)->lock >= 0)
e3c48078
RB
220
221/*
222 * write_can_lock - would write_trylock() succeed?
223 * @lock: the rwlock in question.
224 */
e5931943 225#define arch_write_can_lock(rw) (!(rw)->lock)
e3c48078 226
e5931943 227static inline void arch_read_lock(arch_rwlock_t *rw)
1da177e4
LT
228{
229 unsigned int tmp;
230
231 if (R10000_LLSC_WAR) {
232 __asm__ __volatile__(
e5931943 233 " .set noreorder # arch_read_lock \n"
1da177e4
LT
234 "1: ll %1, %2 \n"
235 " bltz %1, 1b \n"
236 " addu %1, 1 \n"
237 " sc %1, %0 \n"
238 " beqzl %1, 1b \n"
239 " nop \n"
1da177e4
LT
240 " .set reorder \n"
241 : "=m" (rw->lock), "=&r" (tmp)
242 : "m" (rw->lock)
243 : "memory");
244 } else {
245 __asm__ __volatile__(
e5931943 246 " .set noreorder # arch_read_lock \n"
1da177e4 247 "1: ll %1, %2 \n"
f65e4fa8 248 " bltz %1, 2f \n"
1da177e4
LT
249 " addu %1, 1 \n"
250 " sc %1, %0 \n"
251 " beqz %1, 1b \n"
0004a9df 252 " nop \n"
f65e4fa8
RB
253 " .subsection 2 \n"
254 "2: ll %1, %2 \n"
255 " bltz %1, 2b \n"
256 " addu %1, 1 \n"
257 " b 1b \n"
258 " nop \n"
259 " .previous \n"
1da177e4
LT
260 " .set reorder \n"
261 : "=m" (rw->lock), "=&r" (tmp)
262 : "m" (rw->lock)
263 : "memory");
264 }
0004a9df 265
17099b11 266 smp_llsc_mb();
1da177e4
LT
267}
268
269/* Note the use of sub, not subu which will make the kernel die with an
270 overflow exception if we ever try to unlock an rwlock that is already
271 unlocked or is being held by a writer. */
e5931943 272static inline void arch_read_unlock(arch_rwlock_t *rw)
1da177e4
LT
273{
274 unsigned int tmp;
275
f252ffd5 276 smp_mb__before_llsc();
0004a9df 277
1da177e4
LT
278 if (R10000_LLSC_WAR) {
279 __asm__ __volatile__(
e5931943 280 "1: ll %1, %2 # arch_read_unlock \n"
1da177e4
LT
281 " sub %1, 1 \n"
282 " sc %1, %0 \n"
283 " beqzl %1, 1b \n"
1da177e4
LT
284 : "=m" (rw->lock), "=&r" (tmp)
285 : "m" (rw->lock)
286 : "memory");
287 } else {
288 __asm__ __volatile__(
e5931943 289 " .set noreorder # arch_read_unlock \n"
1da177e4
LT
290 "1: ll %1, %2 \n"
291 " sub %1, 1 \n"
292 " sc %1, %0 \n"
f65e4fa8
RB
293 " beqz %1, 2f \n"
294 " nop \n"
295 " .subsection 2 \n"
296 "2: b 1b \n"
0004a9df 297 " nop \n"
f65e4fa8 298 " .previous \n"
1da177e4
LT
299 " .set reorder \n"
300 : "=m" (rw->lock), "=&r" (tmp)
301 : "m" (rw->lock)
302 : "memory");
303 }
304}
305
e5931943 306static inline void arch_write_lock(arch_rwlock_t *rw)
1da177e4
LT
307{
308 unsigned int tmp;
309
310 if (R10000_LLSC_WAR) {
311 __asm__ __volatile__(
e5931943 312 " .set noreorder # arch_write_lock \n"
1da177e4
LT
313 "1: ll %1, %2 \n"
314 " bnez %1, 1b \n"
315 " lui %1, 0x8000 \n"
316 " sc %1, %0 \n"
317 " beqzl %1, 1b \n"
0004a9df 318 " nop \n"
1da177e4
LT
319 " .set reorder \n"
320 : "=m" (rw->lock), "=&r" (tmp)
321 : "m" (rw->lock)
322 : "memory");
323 } else {
324 __asm__ __volatile__(
e5931943 325 " .set noreorder # arch_write_lock \n"
1da177e4 326 "1: ll %1, %2 \n"
f65e4fa8 327 " bnez %1, 2f \n"
1da177e4
LT
328 " lui %1, 0x8000 \n"
329 " sc %1, %0 \n"
f65e4fa8
RB
330 " beqz %1, 2f \n"
331 " nop \n"
332 " .subsection 2 \n"
333 "2: ll %1, %2 \n"
334 " bnez %1, 2b \n"
335 " lui %1, 0x8000 \n"
336 " b 1b \n"
0004a9df 337 " nop \n"
f65e4fa8 338 " .previous \n"
1da177e4
LT
339 " .set reorder \n"
340 : "=m" (rw->lock), "=&r" (tmp)
341 : "m" (rw->lock)
342 : "memory");
343 }
0004a9df 344
17099b11 345 smp_llsc_mb();
1da177e4
LT
346}
347
e5931943 348static inline void arch_write_unlock(arch_rwlock_t *rw)
1da177e4 349{
0004a9df
RB
350 smp_mb();
351
1da177e4 352 __asm__ __volatile__(
e5931943 353 " # arch_write_unlock \n"
1da177e4
LT
354 " sw $0, %0 \n"
355 : "=m" (rw->lock)
356 : "m" (rw->lock)
357 : "memory");
358}
359
e5931943 360static inline int arch_read_trylock(arch_rwlock_t *rw)
65316fd1
RB
361{
362 unsigned int tmp;
363 int ret;
364
365 if (R10000_LLSC_WAR) {
366 __asm__ __volatile__(
e5931943 367 " .set noreorder # arch_read_trylock \n"
65316fd1
RB
368 " li %2, 0 \n"
369 "1: ll %1, %3 \n"
d52c2d5a 370 " bltz %1, 2f \n"
65316fd1
RB
371 " addu %1, 1 \n"
372 " sc %1, %0 \n"
65316fd1 373 " .set reorder \n"
0004a9df
RB
374 " beqzl %1, 1b \n"
375 " nop \n"
17099b11 376 __WEAK_LLSC_MB
65316fd1
RB
377 " li %2, 1 \n"
378 "2: \n"
379 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
380 : "m" (rw->lock)
381 : "memory");
382 } else {
383 __asm__ __volatile__(
e5931943 384 " .set noreorder # arch_read_trylock \n"
65316fd1
RB
385 " li %2, 0 \n"
386 "1: ll %1, %3 \n"
d52c2d5a 387 " bltz %1, 2f \n"
65316fd1
RB
388 " addu %1, 1 \n"
389 " sc %1, %0 \n"
390 " beqz %1, 1b \n"
0004a9df 391 " nop \n"
65316fd1 392 " .set reorder \n"
17099b11 393 __WEAK_LLSC_MB
65316fd1
RB
394 " li %2, 1 \n"
395 "2: \n"
396 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
397 : "m" (rw->lock)
398 : "memory");
399 }
400
401 return ret;
402}
1da177e4 403
e5931943 404static inline int arch_write_trylock(arch_rwlock_t *rw)
1da177e4
LT
405{
406 unsigned int tmp;
407 int ret;
408
409 if (R10000_LLSC_WAR) {
410 __asm__ __volatile__(
e5931943 411 " .set noreorder # arch_write_trylock \n"
1da177e4
LT
412 " li %2, 0 \n"
413 "1: ll %1, %3 \n"
414 " bnez %1, 2f \n"
415 " lui %1, 0x8000 \n"
416 " sc %1, %0 \n"
417 " beqzl %1, 1b \n"
0004a9df 418 " nop \n"
17099b11 419 __WEAK_LLSC_MB
1da177e4
LT
420 " li %2, 1 \n"
421 " .set reorder \n"
422 "2: \n"
423 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
424 : "m" (rw->lock)
425 : "memory");
426 } else {
427 __asm__ __volatile__(
e5931943 428 " .set noreorder # arch_write_trylock \n"
1da177e4
LT
429 " li %2, 0 \n"
430 "1: ll %1, %3 \n"
431 " bnez %1, 2f \n"
432 " lui %1, 0x8000 \n"
433 " sc %1, %0 \n"
f65e4fa8
RB
434 " beqz %1, 3f \n"
435 " li %2, 1 \n"
436 "2: \n"
17099b11 437 __WEAK_LLSC_MB
f65e4fa8
RB
438 " .subsection 2 \n"
439 "3: b 1b \n"
440 " li %2, 0 \n"
441 " .previous \n"
1da177e4 442 " .set reorder \n"
1da177e4
LT
443 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
444 : "m" (rw->lock)
445 : "memory");
446 }
447
448 return ret;
449}
450
e5931943
TG
451#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
452#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
65316fd1 453
0199c4e6
TG
454#define arch_spin_relax(lock) cpu_relax()
455#define arch_read_relax(lock) cpu_relax()
456#define arch_write_relax(lock) cpu_relax()
ef6edc97 457
1da177e4 458#endif /* _ASM_SPINLOCK_H */