]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #if __LINUX_ARM_ARCH__ < 6 | |
5 | #error SMP not supported on pre-ARMv6 CPUs | |
6 | #endif | |
7 | ||
9bb17be0 | 8 | #include <linux/prefetch.h> |
726328d9 PZ |
9 | #include <asm/barrier.h> |
10 | #include <asm/processor.h> | |
603605ab | 11 | |
000d9c78 RK |
12 | /* |
13 | * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K | |
14 | * extensions, so when running on UP, we have to patch these instructions away. | |
15 | */ | |
000d9c78 | 16 | #ifdef CONFIG_THUMB2_KERNEL |
917692f5 DM |
17 | /* |
18 | * For Thumb-2, special care is needed to ensure that the conditional WFE | |
19 | * instruction really does assemble to exactly 4 bytes (as required by | |
20 | * the SMP_ON_UP fixup code). By itself "wfene" might cause the | |
21 | * assembler to insert a extra (16-bit) IT instruction, depending on the | |
22 | * presence or absence of neighbouring conditional instructions. | |
23 | * | |
24 | * To avoid this unpredictableness, an approprite IT is inserted explicitly: | |
25 | * the assembler won't change IT instructions which are explicitly present | |
26 | * in the input. | |
27 | */ | |
27a84793 | 28 | #define WFE(cond) __ALT_SMP_ASM( \ |
917692f5 DM |
29 | "it " cond "\n\t" \ |
30 | "wfe" cond ".n", \ | |
31 | \ | |
32 | "nop.w" \ | |
33 | ) | |
000d9c78 | 34 | #else |
27a84793 | 35 | #define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop") |
000d9c78 RK |
36 | #endif |
37 | ||
27a84793 WD |
38 | #define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop)) |
39 | ||
c5113b61 RV |
40 | static inline void dsb_sev(void) |
41 | { | |
7c8746a9 WD |
42 | |
43 | dsb(ishst); | |
44 | __asm__(SEV); | |
c5113b61 RV |
45 | } |
46 | ||
1da177e4 | 47 | /* |
546c2896 | 48 | * ARMv6 ticket-based spin-locking. |
1da177e4 | 49 | * |
546c2896 WD |
50 | * A memory barrier is required after we get a lock, and before we |
51 | * release it, because V6 CPUs are assumed to have weakly ordered | |
52 | * memory. | |
1da177e4 | 53 | */ |
1da177e4 | 54 | |
726328d9 PZ |
55 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
56 | { | |
57 | u16 owner = READ_ONCE(lock->tickets.owner); | |
58 | ||
59 | for (;;) { | |
60 | arch_spinlock_t tmp = READ_ONCE(*lock); | |
61 | ||
62 | if (tmp.tickets.owner == tmp.tickets.next || | |
63 | tmp.tickets.owner != owner) | |
64 | break; | |
65 | ||
66 | wfe(); | |
67 | } | |
68 | smp_acquire__after_ctrl_dep(); | |
69 | } | |
1da177e4 | 70 | |
0199c4e6 | 71 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
1da177e4 | 72 | |
0199c4e6 | 73 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
1da177e4 LT |
74 | { |
75 | unsigned long tmp; | |
546c2896 WD |
76 | u32 newval; |
77 | arch_spinlock_t lockval; | |
1da177e4 | 78 | |
9bb17be0 | 79 | prefetchw(&lock->slock); |
1da177e4 | 80 | __asm__ __volatile__( |
546c2896 WD |
81 | "1: ldrex %0, [%3]\n" |
82 | " add %1, %0, %4\n" | |
83 | " strex %2, %1, [%3]\n" | |
84 | " teq %2, #0\n" | |
1da177e4 | 85 | " bne 1b" |
546c2896 WD |
86 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) |
87 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | |
6d9b37a3 RK |
88 | : "cc"); |
89 | ||
546c2896 WD |
90 | while (lockval.tickets.next != lockval.tickets.owner) { |
91 | wfe(); | |
92 | lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); | |
93 | } | |
94 | ||
6d9b37a3 | 95 | smp_mb(); |
1da177e4 LT |
96 | } |
97 | ||
0199c4e6 | 98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
1da177e4 | 99 | { |
15e7e5c1 | 100 | unsigned long contended, res; |
546c2896 | 101 | u32 slock; |
1da177e4 | 102 | |
9bb17be0 | 103 | prefetchw(&lock->slock); |
15e7e5c1 WD |
104 | do { |
105 | __asm__ __volatile__( | |
106 | " ldrex %0, [%3]\n" | |
107 | " mov %2, #0\n" | |
108 | " subs %1, %0, %0, ror #16\n" | |
109 | " addeq %0, %0, %4\n" | |
110 | " strexeq %2, %0, [%3]" | |
afa31d8e | 111 | : "=&r" (slock), "=&r" (contended), "=&r" (res) |
15e7e5c1 WD |
112 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
113 | : "cc"); | |
114 | } while (res); | |
115 | ||
116 | if (!contended) { | |
6d9b37a3 RK |
117 | smp_mb(); |
118 | return 1; | |
119 | } else { | |
120 | return 0; | |
121 | } | |
1da177e4 LT |
122 | } |
123 | ||
0199c4e6 | 124 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
1da177e4 | 125 | { |
6d9b37a3 | 126 | smp_mb(); |
20e260b6 | 127 | lock->tickets.owner++; |
c5113b61 | 128 | dsb_sev(); |
1da177e4 LT |
129 | } |
130 | ||
0cbad9c9 WD |
131 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
132 | { | |
133 | return lock.tickets.owner == lock.tickets.next; | |
134 | } | |
135 | ||
546c2896 WD |
136 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
137 | { | |
488beef1 | 138 | return !arch_spin_value_unlocked(READ_ONCE(*lock)); |
546c2896 WD |
139 | } |
140 | ||
141 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | |
142 | { | |
488beef1 | 143 | struct __raw_tickets tickets = READ_ONCE(lock->tickets); |
546c2896 WD |
144 | return (tickets.next - tickets.owner) > 1; |
145 | } | |
146 | #define arch_spin_is_contended arch_spin_is_contended | |
147 | ||
1da177e4 LT |
148 | /* |
149 | * RWLOCKS | |
fb1c8f93 IM |
150 | * |
151 | * | |
1da177e4 LT |
152 | * Write locks are easy - we just set bit 31. When unlocking, we can |
153 | * just write zero since the lock is exclusively held. | |
154 | */ | |
fb1c8f93 | 155 | |
e5931943 | 156 | static inline void arch_write_lock(arch_rwlock_t *rw) |
1da177e4 LT |
157 | { |
158 | unsigned long tmp; | |
159 | ||
9bb17be0 | 160 | prefetchw(&rw->lock); |
1da177e4 LT |
161 | __asm__ __volatile__( |
162 | "1: ldrex %0, [%1]\n" | |
163 | " teq %0, #0\n" | |
000d9c78 | 164 | WFE("ne") |
1da177e4 LT |
165 | " strexeq %0, %2, [%1]\n" |
166 | " teq %0, #0\n" | |
167 | " bne 1b" | |
168 | : "=&r" (tmp) | |
169 | : "r" (&rw->lock), "r" (0x80000000) | |
6d9b37a3 RK |
170 | : "cc"); |
171 | ||
172 | smp_mb(); | |
1da177e4 LT |
173 | } |
174 | ||
e5931943 | 175 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
4e8fd22b | 176 | { |
00efaa02 | 177 | unsigned long contended, res; |
4e8fd22b | 178 | |
9bb17be0 | 179 | prefetchw(&rw->lock); |
00efaa02 WD |
180 | do { |
181 | __asm__ __volatile__( | |
182 | " ldrex %0, [%2]\n" | |
183 | " mov %1, #0\n" | |
184 | " teq %0, #0\n" | |
185 | " strexeq %1, %3, [%2]" | |
186 | : "=&r" (contended), "=&r" (res) | |
187 | : "r" (&rw->lock), "r" (0x80000000) | |
188 | : "cc"); | |
189 | } while (res); | |
6d9b37a3 | 190 | |
00efaa02 | 191 | if (!contended) { |
6d9b37a3 RK |
192 | smp_mb(); |
193 | return 1; | |
194 | } else { | |
195 | return 0; | |
196 | } | |
4e8fd22b RK |
197 | } |
198 | ||
e5931943 | 199 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
1da177e4 | 200 | { |
6d9b37a3 RK |
201 | smp_mb(); |
202 | ||
1da177e4 | 203 | __asm__ __volatile__( |
00b4c907 | 204 | "str %1, [%0]\n" |
1da177e4 LT |
205 | : |
206 | : "r" (&rw->lock), "r" (0) | |
6d9b37a3 | 207 | : "cc"); |
c5113b61 RV |
208 | |
209 | dsb_sev(); | |
1da177e4 LT |
210 | } |
211 | ||
c2a4c406 | 212 | /* write_can_lock - would write_trylock() succeed? */ |
9bb17be0 | 213 | #define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0) |
c2a4c406 | 214 | |
1da177e4 LT |
215 | /* |
216 | * Read locks are a bit more hairy: | |
217 | * - Exclusively load the lock value. | |
218 | * - Increment it. | |
219 | * - Store new lock value if positive, and we still own this location. | |
220 | * If the value is negative, we've already failed. | |
221 | * - If we failed to store the value, we want a negative result. | |
222 | * - If we failed, try again. | |
223 | * Unlocking is similarly hairy. We may have multiple read locks | |
224 | * currently active. However, we know we won't have any write | |
225 | * locks. | |
226 | */ | |
e5931943 | 227 | static inline void arch_read_lock(arch_rwlock_t *rw) |
1da177e4 LT |
228 | { |
229 | unsigned long tmp, tmp2; | |
230 | ||
9bb17be0 | 231 | prefetchw(&rw->lock); |
1da177e4 LT |
232 | __asm__ __volatile__( |
233 | "1: ldrex %0, [%2]\n" | |
234 | " adds %0, %0, #1\n" | |
235 | " strexpl %1, %0, [%2]\n" | |
000d9c78 | 236 | WFE("mi") |
1da177e4 LT |
237 | " rsbpls %0, %1, #0\n" |
238 | " bmi 1b" | |
239 | : "=&r" (tmp), "=&r" (tmp2) | |
240 | : "r" (&rw->lock) | |
6d9b37a3 RK |
241 | : "cc"); |
242 | ||
243 | smp_mb(); | |
1da177e4 LT |
244 | } |
245 | ||
e5931943 | 246 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
1da177e4 | 247 | { |
4e8fd22b RK |
248 | unsigned long tmp, tmp2; |
249 | ||
6d9b37a3 RK |
250 | smp_mb(); |
251 | ||
9bb17be0 | 252 | prefetchw(&rw->lock); |
1da177e4 LT |
253 | __asm__ __volatile__( |
254 | "1: ldrex %0, [%2]\n" | |
255 | " sub %0, %0, #1\n" | |
256 | " strex %1, %0, [%2]\n" | |
257 | " teq %1, #0\n" | |
258 | " bne 1b" | |
259 | : "=&r" (tmp), "=&r" (tmp2) | |
260 | : "r" (&rw->lock) | |
6d9b37a3 | 261 | : "cc"); |
c5113b61 RV |
262 | |
263 | if (tmp == 0) | |
264 | dsb_sev(); | |
1da177e4 LT |
265 | } |
266 | ||
e5931943 | 267 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
8e34703b | 268 | { |
00efaa02 | 269 | unsigned long contended, res; |
8e34703b | 270 | |
9bb17be0 | 271 | prefetchw(&rw->lock); |
00efaa02 WD |
272 | do { |
273 | __asm__ __volatile__( | |
274 | " ldrex %0, [%2]\n" | |
275 | " mov %1, #0\n" | |
276 | " adds %0, %0, #1\n" | |
277 | " strexpl %1, %0, [%2]" | |
278 | : "=&r" (contended), "=&r" (res) | |
279 | : "r" (&rw->lock) | |
280 | : "cc"); | |
281 | } while (res); | |
8e34703b | 282 | |
00efaa02 WD |
283 | /* If the lock is negative, then it is already held for write. */ |
284 | if (contended < 0x80000000) { | |
285 | smp_mb(); | |
286 | return 1; | |
287 | } else { | |
288 | return 0; | |
289 | } | |
8e34703b | 290 | } |
1da177e4 | 291 | |
c2a4c406 | 292 | /* read_can_lock - would read_trylock() succeed? */ |
9bb17be0 | 293 | #define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000) |
c2a4c406 | 294 | |
e5931943 TG |
295 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
296 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
f5f7eac4 | 297 | |
0199c4e6 TG |
298 | #define arch_spin_relax(lock) cpu_relax() |
299 | #define arch_read_relax(lock) cpu_relax() | |
300 | #define arch_write_relax(lock) cpu_relax() | |
ef6edc97 | 301 | |
1da177e4 | 302 | #endif /* __ASM_SPINLOCK_H */ |