]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/arm/include/asm/spinlock.h
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / arch / arm / include / asm / spinlock.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __ASM_SPINLOCK_H
3#define __ASM_SPINLOCK_H
4
5#if __LINUX_ARM_ARCH__ < 6
6#error SMP not supported on pre-ARMv6 CPUs
7#endif
8
9bb17be0 9#include <linux/prefetch.h>
726328d9
PZ
10#include <asm/barrier.h>
11#include <asm/processor.h>
603605ab 12
000d9c78
RK
13/*
14 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
15 * extensions, so when running on UP, we have to patch these instructions away.
16 */
000d9c78 17#ifdef CONFIG_THUMB2_KERNEL
917692f5
DM
18/*
19 * For Thumb-2, special care is needed to ensure that the conditional WFE
20 * instruction really does assemble to exactly 4 bytes (as required by
21 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
22 * assembler to insert a extra (16-bit) IT instruction, depending on the
23 * presence or absence of neighbouring conditional instructions.
24 *
25 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
26 * the assembler won't change IT instructions which are explicitly present
27 * in the input.
28 */
27a84793 29#define WFE(cond) __ALT_SMP_ASM( \
917692f5
DM
30 "it " cond "\n\t" \
31 "wfe" cond ".n", \
32 \
33 "nop.w" \
34)
000d9c78 35#else
27a84793 36#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
000d9c78
RK
37#endif
38
27a84793
WD
39#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
40
c5113b61
RV
41static inline void dsb_sev(void)
42{
7c8746a9
WD
43
44 dsb(ishst);
45 __asm__(SEV);
c5113b61
RV
46}
47
1da177e4 48/*
546c2896 49 * ARMv6 ticket-based spin-locking.
1da177e4 50 *
546c2896
WD
51 * A memory barrier is required after we get a lock, and before we
52 * release it, because V6 CPUs are assumed to have weakly ordered
53 * memory.
1da177e4 54 */
1da177e4 55
0199c4e6 56static inline void arch_spin_lock(arch_spinlock_t *lock)
1da177e4
LT
57{
58 unsigned long tmp;
546c2896
WD
59 u32 newval;
60 arch_spinlock_t lockval;
1da177e4 61
9bb17be0 62 prefetchw(&lock->slock);
1da177e4 63 __asm__ __volatile__(
546c2896
WD
64"1: ldrex %0, [%3]\n"
65" add %1, %0, %4\n"
66" strex %2, %1, [%3]\n"
67" teq %2, #0\n"
1da177e4 68" bne 1b"
546c2896
WD
69 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
70 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
6d9b37a3
RK
71 : "cc");
72
546c2896
WD
73 while (lockval.tickets.next != lockval.tickets.owner) {
74 wfe();
6aa7de05 75 lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
546c2896
WD
76 }
77
6d9b37a3 78 smp_mb();
1da177e4
LT
79}
80
0199c4e6 81static inline int arch_spin_trylock(arch_spinlock_t *lock)
1da177e4 82{
15e7e5c1 83 unsigned long contended, res;
546c2896 84 u32 slock;
1da177e4 85
9bb17be0 86 prefetchw(&lock->slock);
15e7e5c1
WD
87 do {
88 __asm__ __volatile__(
89 " ldrex %0, [%3]\n"
90 " mov %2, #0\n"
91 " subs %1, %0, %0, ror #16\n"
92 " addeq %0, %0, %4\n"
93 " strexeq %2, %0, [%3]"
afa31d8e 94 : "=&r" (slock), "=&r" (contended), "=&r" (res)
15e7e5c1
WD
95 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
96 : "cc");
97 } while (res);
98
99 if (!contended) {
6d9b37a3
RK
100 smp_mb();
101 return 1;
102 } else {
103 return 0;
104 }
1da177e4
LT
105}
106
0199c4e6 107static inline void arch_spin_unlock(arch_spinlock_t *lock)
1da177e4 108{
6d9b37a3 109 smp_mb();
20e260b6 110 lock->tickets.owner++;
c5113b61 111 dsb_sev();
1da177e4
LT
112}
113
0cbad9c9
WD
114static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
115{
116 return lock.tickets.owner == lock.tickets.next;
117}
118
546c2896
WD
119static inline int arch_spin_is_locked(arch_spinlock_t *lock)
120{
488beef1 121 return !arch_spin_value_unlocked(READ_ONCE(*lock));
546c2896
WD
122}
123
124static inline int arch_spin_is_contended(arch_spinlock_t *lock)
125{
488beef1 126 struct __raw_tickets tickets = READ_ONCE(lock->tickets);
546c2896
WD
127 return (tickets.next - tickets.owner) > 1;
128}
129#define arch_spin_is_contended arch_spin_is_contended
130
1da177e4
LT
131/*
132 * RWLOCKS
fb1c8f93
IM
133 *
134 *
1da177e4
LT
135 * Write locks are easy - we just set bit 31. When unlocking, we can
136 * just write zero since the lock is exclusively held.
137 */
fb1c8f93 138
e5931943 139static inline void arch_write_lock(arch_rwlock_t *rw)
1da177e4
LT
140{
141 unsigned long tmp;
142
9bb17be0 143 prefetchw(&rw->lock);
1da177e4
LT
144 __asm__ __volatile__(
145"1: ldrex %0, [%1]\n"
146" teq %0, #0\n"
000d9c78 147 WFE("ne")
1da177e4
LT
148" strexeq %0, %2, [%1]\n"
149" teq %0, #0\n"
150" bne 1b"
151 : "=&r" (tmp)
152 : "r" (&rw->lock), "r" (0x80000000)
6d9b37a3
RK
153 : "cc");
154
155 smp_mb();
1da177e4
LT
156}
157
e5931943 158static inline int arch_write_trylock(arch_rwlock_t *rw)
4e8fd22b 159{
00efaa02 160 unsigned long contended, res;
4e8fd22b 161
9bb17be0 162 prefetchw(&rw->lock);
00efaa02
WD
163 do {
164 __asm__ __volatile__(
165 " ldrex %0, [%2]\n"
166 " mov %1, #0\n"
167 " teq %0, #0\n"
168 " strexeq %1, %3, [%2]"
169 : "=&r" (contended), "=&r" (res)
170 : "r" (&rw->lock), "r" (0x80000000)
171 : "cc");
172 } while (res);
6d9b37a3 173
00efaa02 174 if (!contended) {
6d9b37a3
RK
175 smp_mb();
176 return 1;
177 } else {
178 return 0;
179 }
4e8fd22b
RK
180}
181
e5931943 182static inline void arch_write_unlock(arch_rwlock_t *rw)
1da177e4 183{
6d9b37a3
RK
184 smp_mb();
185
1da177e4 186 __asm__ __volatile__(
00b4c907 187 "str %1, [%0]\n"
1da177e4
LT
188 :
189 : "r" (&rw->lock), "r" (0)
6d9b37a3 190 : "cc");
c5113b61
RV
191
192 dsb_sev();
1da177e4
LT
193}
194
195/*
196 * Read locks are a bit more hairy:
197 * - Exclusively load the lock value.
198 * - Increment it.
199 * - Store new lock value if positive, and we still own this location.
200 * If the value is negative, we've already failed.
201 * - If we failed to store the value, we want a negative result.
202 * - If we failed, try again.
203 * Unlocking is similarly hairy. We may have multiple read locks
204 * currently active. However, we know we won't have any write
205 * locks.
206 */
e5931943 207static inline void arch_read_lock(arch_rwlock_t *rw)
1da177e4
LT
208{
209 unsigned long tmp, tmp2;
210
9bb17be0 211 prefetchw(&rw->lock);
1da177e4 212 __asm__ __volatile__(
eb7ff902 213" .syntax unified\n"
1da177e4
LT
214"1: ldrex %0, [%2]\n"
215" adds %0, %0, #1\n"
216" strexpl %1, %0, [%2]\n"
000d9c78 217 WFE("mi")
eb7ff902 218" rsbspl %0, %1, #0\n"
1da177e4
LT
219" bmi 1b"
220 : "=&r" (tmp), "=&r" (tmp2)
221 : "r" (&rw->lock)
6d9b37a3
RK
222 : "cc");
223
224 smp_mb();
1da177e4
LT
225}
226
e5931943 227static inline void arch_read_unlock(arch_rwlock_t *rw)
1da177e4 228{
4e8fd22b
RK
229 unsigned long tmp, tmp2;
230
6d9b37a3
RK
231 smp_mb();
232
9bb17be0 233 prefetchw(&rw->lock);
1da177e4
LT
234 __asm__ __volatile__(
235"1: ldrex %0, [%2]\n"
236" sub %0, %0, #1\n"
237" strex %1, %0, [%2]\n"
238" teq %1, #0\n"
239" bne 1b"
240 : "=&r" (tmp), "=&r" (tmp2)
241 : "r" (&rw->lock)
6d9b37a3 242 : "cc");
c5113b61
RV
243
244 if (tmp == 0)
245 dsb_sev();
1da177e4
LT
246}
247
e5931943 248static inline int arch_read_trylock(arch_rwlock_t *rw)
8e34703b 249{
00efaa02 250 unsigned long contended, res;
8e34703b 251
9bb17be0 252 prefetchw(&rw->lock);
00efaa02
WD
253 do {
254 __asm__ __volatile__(
255 " ldrex %0, [%2]\n"
256 " mov %1, #0\n"
257 " adds %0, %0, #1\n"
258 " strexpl %1, %0, [%2]"
259 : "=&r" (contended), "=&r" (res)
260 : "r" (&rw->lock)
261 : "cc");
262 } while (res);
8e34703b 263
00efaa02
WD
264 /* If the lock is negative, then it is already held for write. */
265 if (contended < 0x80000000) {
266 smp_mb();
267 return 1;
268 } else {
269 return 0;
270 }
8e34703b 271}
1da177e4 272
1da177e4 273#endif /* __ASM_SPINLOCK_H */