]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #if __LINUX_ARM_ARCH__ < 6 | |
5 | #error SMP not supported on pre-ARMv6 CPUs | |
6 | #endif | |
7 | ||
9bb17be0 | 8 | #include <linux/prefetch.h> |
603605ab | 9 | |
000d9c78 RK |
10 | /* |
11 | * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K | |
12 | * extensions, so when running on UP, we have to patch these instructions away. | |
13 | */ | |
000d9c78 | 14 | #ifdef CONFIG_THUMB2_KERNEL |
917692f5 DM |
15 | /* |
16 | * For Thumb-2, special care is needed to ensure that the conditional WFE | |
17 | * instruction really does assemble to exactly 4 bytes (as required by | |
18 | * the SMP_ON_UP fixup code). By itself "wfene" might cause the | |
19 | * assembler to insert a extra (16-bit) IT instruction, depending on the | |
20 | * presence or absence of neighbouring conditional instructions. | |
21 | * | |
22 | * To avoid this unpredictableness, an approprite IT is inserted explicitly: | |
23 | * the assembler won't change IT instructions which are explicitly present | |
24 | * in the input. | |
25 | */ | |
27a84793 | 26 | #define WFE(cond) __ALT_SMP_ASM( \ |
917692f5 DM |
27 | "it " cond "\n\t" \ |
28 | "wfe" cond ".n", \ | |
29 | \ | |
30 | "nop.w" \ | |
31 | ) | |
000d9c78 | 32 | #else |
27a84793 | 33 | #define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop") |
000d9c78 RK |
34 | #endif |
35 | ||
27a84793 WD |
36 | #define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop)) |
37 | ||
c5113b61 RV |
38 | static inline void dsb_sev(void) |
39 | { | |
7c8746a9 WD |
40 | |
41 | dsb(ishst); | |
42 | __asm__(SEV); | |
c5113b61 RV |
43 | } |
44 | ||
1da177e4 | 45 | /* |
546c2896 | 46 | * ARMv6 ticket-based spin-locking. |
1da177e4 | 47 | * |
546c2896 WD |
48 | * A memory barrier is required after we get a lock, and before we |
49 | * release it, because V6 CPUs are assumed to have weakly ordered | |
50 | * memory. | |
1da177e4 | 51 | */ |
1da177e4 | 52 | |
0199c4e6 TG |
53 | #define arch_spin_unlock_wait(lock) \ |
54 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | |
1da177e4 | 55 | |
0199c4e6 | 56 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
1da177e4 | 57 | |
0199c4e6 | 58 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
1da177e4 LT |
59 | { |
60 | unsigned long tmp; | |
546c2896 WD |
61 | u32 newval; |
62 | arch_spinlock_t lockval; | |
1da177e4 | 63 | |
9bb17be0 | 64 | prefetchw(&lock->slock); |
1da177e4 | 65 | __asm__ __volatile__( |
546c2896 WD |
66 | "1: ldrex %0, [%3]\n" |
67 | " add %1, %0, %4\n" | |
68 | " strex %2, %1, [%3]\n" | |
69 | " teq %2, #0\n" | |
1da177e4 | 70 | " bne 1b" |
546c2896 WD |
71 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) |
72 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | |
6d9b37a3 RK |
73 | : "cc"); |
74 | ||
546c2896 WD |
75 | while (lockval.tickets.next != lockval.tickets.owner) { |
76 | wfe(); | |
77 | lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); | |
78 | } | |
79 | ||
6d9b37a3 | 80 | smp_mb(); |
1da177e4 LT |
81 | } |
82 | ||
0199c4e6 | 83 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
1da177e4 | 84 | { |
15e7e5c1 | 85 | unsigned long contended, res; |
546c2896 | 86 | u32 slock; |
1da177e4 | 87 | |
9bb17be0 | 88 | prefetchw(&lock->slock); |
15e7e5c1 WD |
89 | do { |
90 | __asm__ __volatile__( | |
91 | " ldrex %0, [%3]\n" | |
92 | " mov %2, #0\n" | |
93 | " subs %1, %0, %0, ror #16\n" | |
94 | " addeq %0, %0, %4\n" | |
95 | " strexeq %2, %0, [%3]" | |
afa31d8e | 96 | : "=&r" (slock), "=&r" (contended), "=&r" (res) |
15e7e5c1 WD |
97 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
98 | : "cc"); | |
99 | } while (res); | |
100 | ||
101 | if (!contended) { | |
6d9b37a3 RK |
102 | smp_mb(); |
103 | return 1; | |
104 | } else { | |
105 | return 0; | |
106 | } | |
1da177e4 LT |
107 | } |
108 | ||
0199c4e6 | 109 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
1da177e4 | 110 | { |
6d9b37a3 | 111 | smp_mb(); |
20e260b6 | 112 | lock->tickets.owner++; |
c5113b61 | 113 | dsb_sev(); |
1da177e4 LT |
114 | } |
115 | ||
0cbad9c9 WD |
116 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
117 | { | |
118 | return lock.tickets.owner == lock.tickets.next; | |
119 | } | |
120 | ||
546c2896 WD |
121 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
122 | { | |
488beef1 | 123 | return !arch_spin_value_unlocked(READ_ONCE(*lock)); |
546c2896 WD |
124 | } |
125 | ||
126 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | |
127 | { | |
488beef1 | 128 | struct __raw_tickets tickets = READ_ONCE(lock->tickets); |
546c2896 WD |
129 | return (tickets.next - tickets.owner) > 1; |
130 | } | |
131 | #define arch_spin_is_contended arch_spin_is_contended | |
132 | ||
1da177e4 LT |
133 | /* |
134 | * RWLOCKS | |
fb1c8f93 IM |
135 | * |
136 | * | |
1da177e4 LT |
137 | * Write locks are easy - we just set bit 31. When unlocking, we can |
138 | * just write zero since the lock is exclusively held. | |
139 | */ | |
fb1c8f93 | 140 | |
e5931943 | 141 | static inline void arch_write_lock(arch_rwlock_t *rw) |
1da177e4 LT |
142 | { |
143 | unsigned long tmp; | |
144 | ||
9bb17be0 | 145 | prefetchw(&rw->lock); |
1da177e4 LT |
146 | __asm__ __volatile__( |
147 | "1: ldrex %0, [%1]\n" | |
148 | " teq %0, #0\n" | |
000d9c78 | 149 | WFE("ne") |
1da177e4 LT |
150 | " strexeq %0, %2, [%1]\n" |
151 | " teq %0, #0\n" | |
152 | " bne 1b" | |
153 | : "=&r" (tmp) | |
154 | : "r" (&rw->lock), "r" (0x80000000) | |
6d9b37a3 RK |
155 | : "cc"); |
156 | ||
157 | smp_mb(); | |
1da177e4 LT |
158 | } |
159 | ||
e5931943 | 160 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
4e8fd22b | 161 | { |
00efaa02 | 162 | unsigned long contended, res; |
4e8fd22b | 163 | |
9bb17be0 | 164 | prefetchw(&rw->lock); |
00efaa02 WD |
165 | do { |
166 | __asm__ __volatile__( | |
167 | " ldrex %0, [%2]\n" | |
168 | " mov %1, #0\n" | |
169 | " teq %0, #0\n" | |
170 | " strexeq %1, %3, [%2]" | |
171 | : "=&r" (contended), "=&r" (res) | |
172 | : "r" (&rw->lock), "r" (0x80000000) | |
173 | : "cc"); | |
174 | } while (res); | |
6d9b37a3 | 175 | |
00efaa02 | 176 | if (!contended) { |
6d9b37a3 RK |
177 | smp_mb(); |
178 | return 1; | |
179 | } else { | |
180 | return 0; | |
181 | } | |
4e8fd22b RK |
182 | } |
183 | ||
e5931943 | 184 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
1da177e4 | 185 | { |
6d9b37a3 RK |
186 | smp_mb(); |
187 | ||
1da177e4 | 188 | __asm__ __volatile__( |
00b4c907 | 189 | "str %1, [%0]\n" |
1da177e4 LT |
190 | : |
191 | : "r" (&rw->lock), "r" (0) | |
6d9b37a3 | 192 | : "cc"); |
c5113b61 RV |
193 | |
194 | dsb_sev(); | |
1da177e4 LT |
195 | } |
196 | ||
c2a4c406 | 197 | /* write_can_lock - would write_trylock() succeed? */ |
9bb17be0 | 198 | #define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0) |
c2a4c406 | 199 | |
1da177e4 LT |
200 | /* |
201 | * Read locks are a bit more hairy: | |
202 | * - Exclusively load the lock value. | |
203 | * - Increment it. | |
204 | * - Store new lock value if positive, and we still own this location. | |
205 | * If the value is negative, we've already failed. | |
206 | * - If we failed to store the value, we want a negative result. | |
207 | * - If we failed, try again. | |
208 | * Unlocking is similarly hairy. We may have multiple read locks | |
209 | * currently active. However, we know we won't have any write | |
210 | * locks. | |
211 | */ | |
e5931943 | 212 | static inline void arch_read_lock(arch_rwlock_t *rw) |
1da177e4 LT |
213 | { |
214 | unsigned long tmp, tmp2; | |
215 | ||
9bb17be0 | 216 | prefetchw(&rw->lock); |
1da177e4 LT |
217 | __asm__ __volatile__( |
218 | "1: ldrex %0, [%2]\n" | |
219 | " adds %0, %0, #1\n" | |
220 | " strexpl %1, %0, [%2]\n" | |
000d9c78 | 221 | WFE("mi") |
1da177e4 LT |
222 | " rsbpls %0, %1, #0\n" |
223 | " bmi 1b" | |
224 | : "=&r" (tmp), "=&r" (tmp2) | |
225 | : "r" (&rw->lock) | |
6d9b37a3 RK |
226 | : "cc"); |
227 | ||
228 | smp_mb(); | |
1da177e4 LT |
229 | } |
230 | ||
e5931943 | 231 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
1da177e4 | 232 | { |
4e8fd22b RK |
233 | unsigned long tmp, tmp2; |
234 | ||
6d9b37a3 RK |
235 | smp_mb(); |
236 | ||
9bb17be0 | 237 | prefetchw(&rw->lock); |
1da177e4 LT |
238 | __asm__ __volatile__( |
239 | "1: ldrex %0, [%2]\n" | |
240 | " sub %0, %0, #1\n" | |
241 | " strex %1, %0, [%2]\n" | |
242 | " teq %1, #0\n" | |
243 | " bne 1b" | |
244 | : "=&r" (tmp), "=&r" (tmp2) | |
245 | : "r" (&rw->lock) | |
6d9b37a3 | 246 | : "cc"); |
c5113b61 RV |
247 | |
248 | if (tmp == 0) | |
249 | dsb_sev(); | |
1da177e4 LT |
250 | } |
251 | ||
e5931943 | 252 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
8e34703b | 253 | { |
00efaa02 | 254 | unsigned long contended, res; |
8e34703b | 255 | |
9bb17be0 | 256 | prefetchw(&rw->lock); |
00efaa02 WD |
257 | do { |
258 | __asm__ __volatile__( | |
259 | " ldrex %0, [%2]\n" | |
260 | " mov %1, #0\n" | |
261 | " adds %0, %0, #1\n" | |
262 | " strexpl %1, %0, [%2]" | |
263 | : "=&r" (contended), "=&r" (res) | |
264 | : "r" (&rw->lock) | |
265 | : "cc"); | |
266 | } while (res); | |
8e34703b | 267 | |
00efaa02 WD |
268 | /* If the lock is negative, then it is already held for write. */ |
269 | if (contended < 0x80000000) { | |
270 | smp_mb(); | |
271 | return 1; | |
272 | } else { | |
273 | return 0; | |
274 | } | |
8e34703b | 275 | } |
1da177e4 | 276 | |
c2a4c406 | 277 | /* read_can_lock - would read_trylock() succeed? */ |
9bb17be0 | 278 | #define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000) |
c2a4c406 | 279 | |
e5931943 TG |
280 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
281 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
f5f7eac4 | 282 | |
0199c4e6 TG |
283 | #define arch_spin_relax(lock) cpu_relax() |
284 | #define arch_read_relax(lock) cpu_relax() | |
285 | #define arch_write_relax(lock) cpu_relax() | |
ef6edc97 | 286 | |
1da177e4 | 287 | #endif /* __ASM_SPINLOCK_H */ |