]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
88ced031 | 3 | #ifdef __KERNEL__ |
1da177e4 LT |
4 | |
5 | /* | |
6 | * Simple spin lock operations. | |
7 | * | |
8 | * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM | |
9 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | |
10 | * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM | |
11 | * Rework to support virtual processors | |
12 | * | |
13 | * Type of int is used as a full 64b word is not necessary. | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
fb1c8f93 IM |
19 | * |
20 | * (the type definitions are in asm/spinlock_types.h) | |
1da177e4 | 21 | */ |
945feb17 | 22 | #include <linux/irqflags.h> |
0212ddd8 | 23 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
24 | #include <asm/paca.h> |
25 | #include <asm/hvcall.h> | |
0212ddd8 PM |
26 | #endif |
27 | #include <asm/asm-compat.h> | |
28 | #include <asm/synch.h> | |
4e14a4d1 | 29 | #include <asm/ppc-opcode.h> |
1da177e4 | 30 | |
0212ddd8 PM |
31 | #ifdef CONFIG_PPC64 |
32 | /* use 0x800000yy when locked, where yy == CPU number */ | |
54bb7f4b | 33 | #ifdef __BIG_ENDIAN__ |
0212ddd8 PM |
34 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) |
35 | #else | |
54bb7f4b AB |
36 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) |
37 | #endif | |
38 | #else | |
0212ddd8 PM |
39 | #define LOCK_TOKEN 1 |
40 | #endif | |
41 | ||
f007cacf PM |
42 | #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) |
43 | #define CLEAR_IO_SYNC (get_paca()->io_sync = 0) | |
44 | #define SYNC_IO do { \ | |
45 | if (unlikely(get_paca()->io_sync)) { \ | |
46 | mb(); \ | |
47 | get_paca()->io_sync = 0; \ | |
48 | } \ | |
49 | } while (0) | |
50 | #else | |
51 | #define CLEAR_IO_SYNC | |
52 | #define SYNC_IO | |
53 | #endif | |
54 | ||
41946c86 PX |
55 | #ifdef CONFIG_PPC_PSERIES |
56 | #define vcpu_is_preempted vcpu_is_preempted | |
57 | static inline bool vcpu_is_preempted(int cpu) | |
58 | { | |
59 | return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); | |
60 | } | |
61 | #endif | |
62 | ||
3405d230 ME |
63 | static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
64 | { | |
65 | return lock.slock == 0; | |
66 | } | |
67 | ||
7179ba52 ME |
68 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
69 | { | |
51d7d520 | 70 | smp_mb(); |
7179ba52 ME |
71 | return !arch_spin_value_unlocked(*lock); |
72 | } | |
73 | ||
fb1c8f93 IM |
74 | /* |
75 | * This returns the old value in the lock, so we succeeded | |
76 | * in getting the lock if the return value is 0. | |
77 | */ | |
0199c4e6 | 78 | static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) |
fb1c8f93 | 79 | { |
0212ddd8 | 80 | unsigned long tmp, token; |
1da177e4 | 81 | |
0212ddd8 | 82 | token = LOCK_TOKEN; |
fb1c8f93 | 83 | __asm__ __volatile__( |
4e14a4d1 | 84 | "1: " PPC_LWARX(%0,0,%2,1) "\n\ |
fb1c8f93 IM |
85 | cmpwi 0,%0,0\n\ |
86 | bne- 2f\n\ | |
87 | stwcx. %1,0,%2\n\ | |
f10e2e5b AB |
88 | bne- 1b\n" |
89 | PPC_ACQUIRE_BARRIER | |
90 | "2:" | |
91 | : "=&r" (tmp) | |
0212ddd8 | 92 | : "r" (token), "r" (&lock->slock) |
fb1c8f93 | 93 | : "cr0", "memory"); |
1da177e4 | 94 | |
fb1c8f93 IM |
95 | return tmp; |
96 | } | |
1da177e4 | 97 | |
0199c4e6 | 98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
1da177e4 | 99 | { |
f007cacf | 100 | CLEAR_IO_SYNC; |
0199c4e6 | 101 | return __arch_spin_trylock(lock) == 0; |
1da177e4 LT |
102 | } |
103 | ||
104 | /* | |
105 | * On a system with shared processors (that is, where a physical | |
106 | * processor is multiplexed between several virtual processors), | |
107 | * there is no point spinning on a lock if the holder of the lock | |
108 | * isn't currently scheduled on a physical processor. Instead | |
109 | * we detect this situation and ask the hypervisor to give the | |
110 | * rest of our timeslice to the lock holder. | |
111 | * | |
112 | * So that we can tell which virtual processor is holding a lock, | |
113 | * we put 0x80000000 | smp_processor_id() in the lock when it is | |
114 | * held. Conveniently, we have a word in the paca that holds this | |
115 | * value. | |
116 | */ | |
117 | ||
1b041885 | 118 | #if defined(CONFIG_PPC_SPLPAR) |
1da177e4 | 119 | /* We only yield to the hypervisor if we are in shared processor mode */ |
f13c13a0 | 120 | #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr)) |
445c8951 | 121 | extern void __spin_yield(arch_spinlock_t *lock); |
fb3a6bbc | 122 | extern void __rw_yield(arch_rwlock_t *lock); |
1b041885 | 123 | #else /* SPLPAR */ |
1da177e4 LT |
124 | #define __spin_yield(x) barrier() |
125 | #define __rw_yield(x) barrier() | |
126 | #define SHARED_PROCESSOR 0 | |
127 | #endif | |
1da177e4 | 128 | |
0199c4e6 | 129 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
1da177e4 | 130 | { |
f007cacf | 131 | CLEAR_IO_SYNC; |
1da177e4 | 132 | while (1) { |
0199c4e6 | 133 | if (likely(__arch_spin_trylock(lock) == 0)) |
1da177e4 LT |
134 | break; |
135 | do { | |
136 | HMT_low(); | |
137 | if (SHARED_PROCESSOR) | |
138 | __spin_yield(lock); | |
fb1c8f93 | 139 | } while (unlikely(lock->slock != 0)); |
1da177e4 LT |
140 | HMT_medium(); |
141 | } | |
142 | } | |
143 | ||
89b5810f | 144 | static inline |
0199c4e6 | 145 | void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
1da177e4 LT |
146 | { |
147 | unsigned long flags_dis; | |
148 | ||
f007cacf | 149 | CLEAR_IO_SYNC; |
1da177e4 | 150 | while (1) { |
0199c4e6 | 151 | if (likely(__arch_spin_trylock(lock) == 0)) |
1da177e4 LT |
152 | break; |
153 | local_save_flags(flags_dis); | |
154 | local_irq_restore(flags); | |
155 | do { | |
156 | HMT_low(); | |
157 | if (SHARED_PROCESSOR) | |
158 | __spin_yield(lock); | |
fb1c8f93 | 159 | } while (unlikely(lock->slock != 0)); |
1da177e4 LT |
160 | HMT_medium(); |
161 | local_irq_restore(flags_dis); | |
162 | } | |
163 | } | |
164 | ||
0199c4e6 | 165 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
fb1c8f93 | 166 | { |
f007cacf | 167 | SYNC_IO; |
0199c4e6 | 168 | __asm__ __volatile__("# arch_spin_unlock\n\t" |
f10e2e5b | 169 | PPC_RELEASE_BARRIER: : :"memory"); |
fb1c8f93 IM |
170 | lock->slock = 0; |
171 | } | |
172 | ||
6262db7c BF |
173 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
174 | { | |
175 | arch_spinlock_t lock_val; | |
176 | ||
177 | smp_mb(); | |
178 | ||
179 | /* | |
180 | * Atomically load and store back the lock value (unchanged). This | |
181 | * ensures that our observation of the lock value is ordered with | |
182 | * respect to other lock operations. | |
183 | */ | |
184 | __asm__ __volatile__( | |
185 | "1: " PPC_LWARX(%0, 0, %2, 0) "\n" | |
186 | " stwcx. %0, 0, %2\n" | |
187 | " bne- 1b\n" | |
188 | : "=&r" (lock_val), "+m" (*lock) | |
189 | : "r" (lock) | |
190 | : "cr0", "xer"); | |
191 | ||
192 | if (arch_spin_value_unlocked(lock_val)) | |
193 | goto out; | |
194 | ||
195 | while (lock->slock) { | |
196 | HMT_low(); | |
197 | if (SHARED_PROCESSOR) | |
198 | __spin_yield(lock); | |
199 | } | |
200 | HMT_medium(); | |
201 | ||
202 | out: | |
203 | smp_mb(); | |
204 | } | |
fb1c8f93 | 205 | |
1da177e4 LT |
206 | /* |
207 | * Read-write spinlocks, allowing multiple readers | |
208 | * but only one writer. | |
209 | * | |
210 | * NOTE! it is quite common to have readers in interrupts | |
211 | * but no interrupt writers. For those circumstances we | |
212 | * can "mix" irq-safe locks - any writer needs to get a | |
213 | * irq-safe write-lock, but readers can get non-irqsafe | |
214 | * read-locks. | |
215 | */ | |
1da177e4 | 216 | |
e5931943 TG |
217 | #define arch_read_can_lock(rw) ((rw)->lock >= 0) |
218 | #define arch_write_can_lock(rw) (!(rw)->lock) | |
1da177e4 | 219 | |
0212ddd8 PM |
220 | #ifdef CONFIG_PPC64 |
221 | #define __DO_SIGN_EXTEND "extsw %0,%0\n" | |
222 | #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ | |
223 | #else | |
224 | #define __DO_SIGN_EXTEND | |
225 | #define WRLOCK_TOKEN (-1) | |
226 | #endif | |
227 | ||
1da177e4 LT |
228 | /* |
229 | * This returns the old value in the lock + 1, | |
230 | * so we got a read lock if the return value is > 0. | |
231 | */ | |
e5931943 | 232 | static inline long __arch_read_trylock(arch_rwlock_t *rw) |
1da177e4 LT |
233 | { |
234 | long tmp; | |
235 | ||
236 | __asm__ __volatile__( | |
4e14a4d1 | 237 | "1: " PPC_LWARX(%0,0,%1,1) "\n" |
0212ddd8 PM |
238 | __DO_SIGN_EXTEND |
239 | " addic. %0,%0,1\n\ | |
240 | ble- 2f\n" | |
241 | PPC405_ERR77(0,%1) | |
242 | " stwcx. %0,0,%1\n\ | |
f10e2e5b AB |
243 | bne- 1b\n" |
244 | PPC_ACQUIRE_BARRIER | |
245 | "2:" : "=&r" (tmp) | |
1da177e4 LT |
246 | : "r" (&rw->lock) |
247 | : "cr0", "xer", "memory"); | |
248 | ||
249 | return tmp; | |
250 | } | |
251 | ||
1da177e4 LT |
252 | /* |
253 | * This returns the old value in the lock, | |
254 | * so we got the write lock if the return value is 0. | |
255 | */ | |
e5931943 | 256 | static inline long __arch_write_trylock(arch_rwlock_t *rw) |
1da177e4 | 257 | { |
0212ddd8 | 258 | long tmp, token; |
1da177e4 | 259 | |
0212ddd8 | 260 | token = WRLOCK_TOKEN; |
1da177e4 | 261 | __asm__ __volatile__( |
4e14a4d1 | 262 | "1: " PPC_LWARX(%0,0,%2,1) "\n\ |
1da177e4 | 263 | cmpwi 0,%0,0\n\ |
0212ddd8 PM |
264 | bne- 2f\n" |
265 | PPC405_ERR77(0,%1) | |
266 | " stwcx. %1,0,%2\n\ | |
f10e2e5b AB |
267 | bne- 1b\n" |
268 | PPC_ACQUIRE_BARRIER | |
269 | "2:" : "=&r" (tmp) | |
0212ddd8 | 270 | : "r" (token), "r" (&rw->lock) |
1da177e4 LT |
271 | : "cr0", "memory"); |
272 | ||
273 | return tmp; | |
274 | } | |
275 | ||
e5931943 | 276 | static inline void arch_read_lock(arch_rwlock_t *rw) |
1da177e4 | 277 | { |
fb1c8f93 | 278 | while (1) { |
e5931943 | 279 | if (likely(__arch_read_trylock(rw) > 0)) |
fb1c8f93 IM |
280 | break; |
281 | do { | |
282 | HMT_low(); | |
283 | if (SHARED_PROCESSOR) | |
284 | __rw_yield(rw); | |
285 | } while (unlikely(rw->lock < 0)); | |
286 | HMT_medium(); | |
287 | } | |
1da177e4 LT |
288 | } |
289 | ||
e5931943 | 290 | static inline void arch_write_lock(arch_rwlock_t *rw) |
1da177e4 LT |
291 | { |
292 | while (1) { | |
e5931943 | 293 | if (likely(__arch_write_trylock(rw) == 0)) |
1da177e4 LT |
294 | break; |
295 | do { | |
296 | HMT_low(); | |
297 | if (SHARED_PROCESSOR) | |
298 | __rw_yield(rw); | |
d637413f | 299 | } while (unlikely(rw->lock != 0)); |
1da177e4 LT |
300 | HMT_medium(); |
301 | } | |
302 | } | |
303 | ||
e5931943 | 304 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
fb1c8f93 | 305 | { |
e5931943 | 306 | return __arch_read_trylock(rw) > 0; |
fb1c8f93 IM |
307 | } |
308 | ||
e5931943 | 309 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
fb1c8f93 | 310 | { |
e5931943 | 311 | return __arch_write_trylock(rw) == 0; |
fb1c8f93 IM |
312 | } |
313 | ||
e5931943 | 314 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
fb1c8f93 IM |
315 | { |
316 | long tmp; | |
317 | ||
318 | __asm__ __volatile__( | |
144b9c13 | 319 | "# read_unlock\n\t" |
f10e2e5b | 320 | PPC_RELEASE_BARRIER |
144b9c13 | 321 | "1: lwarx %0,0,%1\n\ |
0212ddd8 PM |
322 | addic %0,%0,-1\n" |
323 | PPC405_ERR77(0,%1) | |
324 | " stwcx. %0,0,%1\n\ | |
fb1c8f93 IM |
325 | bne- 1b" |
326 | : "=&r"(tmp) | |
327 | : "r"(&rw->lock) | |
efc3624c | 328 | : "cr0", "xer", "memory"); |
fb1c8f93 IM |
329 | } |
330 | ||
e5931943 | 331 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
fb1c8f93 | 332 | { |
144b9c13 | 333 | __asm__ __volatile__("# write_unlock\n\t" |
f10e2e5b | 334 | PPC_RELEASE_BARRIER: : :"memory"); |
fb1c8f93 IM |
335 | rw->lock = 0; |
336 | } | |
337 | ||
e5931943 TG |
338 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
339 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
f5f7eac4 | 340 | |
0199c4e6 TG |
341 | #define arch_spin_relax(lock) __spin_yield(lock) |
342 | #define arch_read_relax(lock) __rw_yield(lock) | |
343 | #define arch_write_relax(lock) __rw_yield(lock) | |
ef6edc97 | 344 | |
88ced031 | 345 | #endif /* __KERNEL__ */ |
1da177e4 | 346 | #endif /* __ASM_SPINLOCK_H */ |