]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_SPINLOCK_H |
2 | #define __LINUX_SPINLOCK_H | |
3 | ||
4 | /* | |
fb1c8f93 IM |
5 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
6 | * | |
7 | * here's the role of the various spinlock/rwlock related include files: | |
8 | * | |
9 | * on SMP builds: | |
10 | * | |
fb3a6bbc | 11 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
fb1c8f93 IM |
12 | * initializers |
13 | * | |
14 | * linux/spinlock_types.h: | |
15 | * defines the generic type and initializers | |
16 | * | |
0199c4e6 | 17 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
fb1c8f93 IM |
18 | * implementations, mostly inline assembly code |
19 | * | |
20 | * (also included on UP-debug builds:) | |
21 | * | |
22 | * linux/spinlock_api_smp.h: | |
23 | * contains the prototypes for the _spin_*() APIs. | |
24 | * | |
25 | * linux/spinlock.h: builds the final spin_*() APIs. | |
26 | * | |
27 | * on UP builds: | |
28 | * | |
29 | * linux/spinlock_type_up.h: | |
30 | * contains the generic, simplified UP spinlock type. | |
31 | * (which is an empty structure on non-debug builds) | |
32 | * | |
33 | * linux/spinlock_types.h: | |
34 | * defines the generic type and initializers | |
35 | * | |
36 | * linux/spinlock_up.h: | |
0199c4e6 | 37 | * contains the arch_spin_*()/etc. version of UP |
fb1c8f93 IM |
38 | * builds. (which are NOPs on non-debug, non-preempt |
39 | * builds) | |
40 | * | |
41 | * (included on UP-non-debug builds:) | |
42 | * | |
43 | * linux/spinlock_api_up.h: | |
44 | * builds the _spin_*() APIs. | |
45 | * | |
46 | * linux/spinlock.h: builds the final spin_*() APIs. | |
1da177e4 LT |
47 | */ |
48 | ||
3f307891 | 49 | #include <linux/typecheck.h> |
1da177e4 LT |
50 | #include <linux/preempt.h> |
51 | #include <linux/linkage.h> | |
52 | #include <linux/compiler.h> | |
df9ee292 | 53 | #include <linux/irqflags.h> |
1da177e4 LT |
54 | #include <linux/thread_info.h> |
55 | #include <linux/kernel.h> | |
56 | #include <linux/stringify.h> | |
676dcb8b | 57 | #include <linux/bottom_half.h> |
96f951ed | 58 | #include <asm/barrier.h> |
1da177e4 | 59 | |
1da177e4 LT |
60 | |
61 | /* | |
62 | * Must define these before including other files, inline functions need them | |
63 | */ | |
75ddb0e8 | 64 | #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME |
1da177e4 LT |
65 | |
66 | #define LOCK_SECTION_START(extra) \ | |
67 | ".subsection 1\n\t" \ | |
68 | extra \ | |
69 | ".ifndef " LOCK_SECTION_NAME "\n\t" \ | |
70 | LOCK_SECTION_NAME ":\n\t" \ | |
71 | ".endif\n" | |
72 | ||
73 | #define LOCK_SECTION_END \ | |
74 | ".previous\n\t" | |
75 | ||
ec701584 | 76 | #define __lockfunc __attribute__((section(".spinlock.text"))) |
1da177e4 LT |
77 | |
78 | /* | |
fb3a6bbc | 79 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
1da177e4 | 80 | */ |
fb1c8f93 | 81 | #include <linux/spinlock_types.h> |
1da177e4 | 82 | |
1da177e4 | 83 | /* |
25985edc | 84 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): |
1da177e4 | 85 | */ |
8a25d5de | 86 | #ifdef CONFIG_SMP |
fb1c8f93 | 87 | # include <asm/spinlock.h> |
1da177e4 | 88 | #else |
fb1c8f93 | 89 | # include <linux/spinlock_up.h> |
1da177e4 LT |
90 | #endif |
91 | ||
8a25d5de | 92 | #ifdef CONFIG_DEBUG_SPINLOCK |
c2f21ce2 TG |
93 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
94 | struct lock_class_key *key); | |
95 | # define raw_spin_lock_init(lock) \ | |
8a25d5de IM |
96 | do { \ |
97 | static struct lock_class_key __key; \ | |
98 | \ | |
c2f21ce2 | 99 | __raw_spin_lock_init((lock), #lock, &__key); \ |
8a25d5de IM |
100 | } while (0) |
101 | ||
102 | #else | |
c2f21ce2 TG |
103 | # define raw_spin_lock_init(lock) \ |
104 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) | |
8a25d5de IM |
105 | #endif |
106 | ||
c2f21ce2 | 107 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
fb1c8f93 | 108 | |
95c354fe | 109 | #ifdef CONFIG_GENERIC_LOCKBREAK |
c2f21ce2 | 110 | #define raw_spin_is_contended(lock) ((lock)->break_lock) |
95c354fe | 111 | #else |
a5ef7ca0 | 112 | |
0199c4e6 | 113 | #ifdef arch_spin_is_contended |
c2f21ce2 | 114 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
a5ef7ca0 | 115 | #else |
c2f21ce2 | 116 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
0199c4e6 | 117 | #endif /*arch_spin_is_contended*/ |
95c354fe NP |
118 | #endif |
119 | ||
e0acd0a6 | 120 | /* |
d89e588c PZ |
121 | * This barrier must provide two things: |
122 | * | |
123 | * - it must guarantee a STORE before the spin_lock() is ordered against a | |
124 | * LOAD after it, see the comments at its two usage sites. | |
125 | * | |
126 | * - it must ensure the critical section is RCsc. | |
127 | * | |
128 | * The latter is important for cases where we observe values written by other | |
129 | * CPUs in spin-loops, without barriers, while being subject to scheduling. | |
130 | * | |
131 | * CPU0 CPU1 CPU2 | |
132 | * | |
133 | * for (;;) { | |
134 | * if (READ_ONCE(X)) | |
135 | * break; | |
136 | * } | |
137 | * X=1 | |
138 | * <sched-out> | |
139 | * <sched-in> | |
140 | * r = X; | |
141 | * | |
142 | * without transitivity it could be that CPU1 observes X!=0 breaks the loop, | |
143 | * we get migrated and CPU2 sees X==0. | |
144 | * | |
145 | * Since most load-store architectures implement ACQUIRE with an smp_mb() after | |
146 | * the LL/SC loop, they need no further barriers. Similarly all our TSO | |
147 | * architectures imply an smp_mb() for each atomic instruction and equally don't | |
148 | * need more. | |
149 | * | |
150 | * Architectures that can implement ACQUIRE better need to take care. | |
e0acd0a6 | 151 | */ |
d89e588c PZ |
152 | #ifndef smp_mb__after_spinlock |
153 | #define smp_mb__after_spinlock() do { } while (0) | |
ad462769 JO |
154 | #endif |
155 | ||
fb1c8f93 | 156 | #ifdef CONFIG_DEBUG_SPINLOCK |
b97c4bc1 | 157 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
9828ea9d TG |
158 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
159 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); | |
b97c4bc1 | 160 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
fb1c8f93 | 161 | #else |
b97c4bc1 | 162 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
c2f21ce2 | 163 | { |
b97c4bc1 | 164 | __acquire(lock); |
c2f21ce2 TG |
165 | arch_spin_lock(&lock->raw_lock); |
166 | } | |
167 | ||
168 | static inline void | |
b97c4bc1 | 169 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) |
c2f21ce2 | 170 | { |
b97c4bc1 | 171 | __acquire(lock); |
c2f21ce2 TG |
172 | arch_spin_lock_flags(&lock->raw_lock, *flags); |
173 | } | |
174 | ||
9828ea9d | 175 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
c2f21ce2 TG |
176 | { |
177 | return arch_spin_trylock(&(lock)->raw_lock); | |
178 | } | |
179 | ||
b97c4bc1 | 180 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
c2f21ce2 TG |
181 | { |
182 | arch_spin_unlock(&lock->raw_lock); | |
b97c4bc1 | 183 | __release(lock); |
c2f21ce2 | 184 | } |
fb1c8f93 | 185 | #endif |
1da177e4 | 186 | |
1da177e4 | 187 | /* |
ef12f109 TG |
188 | * Define the various spin_lock methods. Note we define these |
189 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The | |
190 | * various methods are defined as nops in the case they are not | |
191 | * required. | |
1da177e4 | 192 | */ |
9c1721aa | 193 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
1da177e4 | 194 | |
9c1721aa | 195 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
8a25d5de IM |
196 | |
197 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
9c1721aa TG |
198 | # define raw_spin_lock_nested(lock, subclass) \ |
199 | _raw_spin_lock_nested(lock, subclass) | |
200 | ||
c2f21ce2 | 201 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ |
b7d39aff PZ |
202 | do { \ |
203 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ | |
9c1721aa | 204 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
b7d39aff | 205 | } while (0) |
8a25d5de | 206 | #else |
4999201a BVA |
207 | /* |
208 | * Always evaluate the 'subclass' argument to avoid that the compiler | |
209 | * warns about set-but-not-used variables when building with | |
210 | * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. | |
211 | */ | |
212 | # define raw_spin_lock_nested(lock, subclass) \ | |
213 | _raw_spin_lock(((void)(subclass), (lock))) | |
9c1721aa | 214 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
8a25d5de IM |
215 | #endif |
216 | ||
fb1c8f93 | 217 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
b8e6ec86 | 218 | |
c2f21ce2 | 219 | #define raw_spin_lock_irqsave(lock, flags) \ |
3f307891 SR |
220 | do { \ |
221 | typecheck(unsigned long, flags); \ | |
9c1721aa | 222 | flags = _raw_spin_lock_irqsave(lock); \ |
3f307891 | 223 | } while (0) |
cfd3ef23 AV |
224 | |
225 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
c2f21ce2 | 226 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
3f307891 SR |
227 | do { \ |
228 | typecheck(unsigned long, flags); \ | |
9c1721aa | 229 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
3f307891 | 230 | } while (0) |
cfd3ef23 | 231 | #else |
c2f21ce2 | 232 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
3f307891 SR |
233 | do { \ |
234 | typecheck(unsigned long, flags); \ | |
9c1721aa | 235 | flags = _raw_spin_lock_irqsave(lock); \ |
3f307891 | 236 | } while (0) |
cfd3ef23 AV |
237 | #endif |
238 | ||
1da177e4 | 239 | #else |
b8e6ec86 | 240 | |
c2f21ce2 | 241 | #define raw_spin_lock_irqsave(lock, flags) \ |
3f307891 SR |
242 | do { \ |
243 | typecheck(unsigned long, flags); \ | |
9c1721aa | 244 | _raw_spin_lock_irqsave(lock, flags); \ |
3f307891 | 245 | } while (0) |
ef12f109 | 246 | |
c2f21ce2 TG |
247 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
248 | raw_spin_lock_irqsave(lock, flags) | |
cfd3ef23 | 249 | |
1da177e4 LT |
250 | #endif |
251 | ||
9c1721aa TG |
252 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
253 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) | |
254 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) | |
255 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) | |
1da177e4 | 256 | |
c2f21ce2 TG |
257 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
258 | do { \ | |
259 | typecheck(unsigned long, flags); \ | |
9c1721aa | 260 | _raw_spin_unlock_irqrestore(lock, flags); \ |
3f307891 | 261 | } while (0) |
9c1721aa | 262 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
1da177e4 | 263 | |
9c1721aa TG |
264 | #define raw_spin_trylock_bh(lock) \ |
265 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) | |
1da177e4 | 266 | |
c2f21ce2 | 267 | #define raw_spin_trylock_irq(lock) \ |
1da177e4 LT |
268 | ({ \ |
269 | local_irq_disable(); \ | |
c2f21ce2 | 270 | raw_spin_trylock(lock) ? \ |
fb1c8f93 | 271 | 1 : ({ local_irq_enable(); 0; }); \ |
1da177e4 LT |
272 | }) |
273 | ||
c2f21ce2 | 274 | #define raw_spin_trylock_irqsave(lock, flags) \ |
1da177e4 LT |
275 | ({ \ |
276 | local_irq_save(flags); \ | |
c2f21ce2 | 277 | raw_spin_trylock(lock) ? \ |
fb1c8f93 | 278 | 1 : ({ local_irq_restore(flags); 0; }); \ |
1da177e4 LT |
279 | }) |
280 | ||
c2f21ce2 TG |
281 | /* Include rwlock functions */ |
282 | #include <linux/rwlock.h> | |
283 | ||
284 | /* | |
285 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | |
286 | */ | |
287 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
288 | # include <linux/spinlock_api_smp.h> | |
289 | #else | |
290 | # include <linux/spinlock_api_up.h> | |
291 | #endif | |
292 | ||
293 | /* | |
294 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n | |
295 | */ | |
296 | ||
3490565b | 297 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
c2f21ce2 TG |
298 | { |
299 | return &lock->rlock; | |
300 | } | |
301 | ||
302 | #define spin_lock_init(_lock) \ | |
303 | do { \ | |
304 | spinlock_check(_lock); \ | |
305 | raw_spin_lock_init(&(_lock)->rlock); \ | |
306 | } while (0) | |
307 | ||
3490565b | 308 | static __always_inline void spin_lock(spinlock_t *lock) |
c2f21ce2 TG |
309 | { |
310 | raw_spin_lock(&lock->rlock); | |
311 | } | |
312 | ||
3490565b | 313 | static __always_inline void spin_lock_bh(spinlock_t *lock) |
c2f21ce2 TG |
314 | { |
315 | raw_spin_lock_bh(&lock->rlock); | |
316 | } | |
317 | ||
3490565b | 318 | static __always_inline int spin_trylock(spinlock_t *lock) |
c2f21ce2 TG |
319 | { |
320 | return raw_spin_trylock(&lock->rlock); | |
321 | } | |
322 | ||
323 | #define spin_lock_nested(lock, subclass) \ | |
324 | do { \ | |
325 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ | |
326 | } while (0) | |
327 | ||
328 | #define spin_lock_nest_lock(lock, nest_lock) \ | |
329 | do { \ | |
330 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ | |
331 | } while (0) | |
332 | ||
3490565b | 333 | static __always_inline void spin_lock_irq(spinlock_t *lock) |
c2f21ce2 TG |
334 | { |
335 | raw_spin_lock_irq(&lock->rlock); | |
336 | } | |
337 | ||
338 | #define spin_lock_irqsave(lock, flags) \ | |
339 | do { \ | |
340 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ | |
341 | } while (0) | |
342 | ||
343 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | |
344 | do { \ | |
345 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ | |
346 | } while (0) | |
347 | ||
3490565b | 348 | static __always_inline void spin_unlock(spinlock_t *lock) |
c2f21ce2 TG |
349 | { |
350 | raw_spin_unlock(&lock->rlock); | |
351 | } | |
352 | ||
3490565b | 353 | static __always_inline void spin_unlock_bh(spinlock_t *lock) |
c2f21ce2 TG |
354 | { |
355 | raw_spin_unlock_bh(&lock->rlock); | |
356 | } | |
357 | ||
3490565b | 358 | static __always_inline void spin_unlock_irq(spinlock_t *lock) |
c2f21ce2 TG |
359 | { |
360 | raw_spin_unlock_irq(&lock->rlock); | |
361 | } | |
362 | ||
3490565b | 363 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
c2f21ce2 TG |
364 | { |
365 | raw_spin_unlock_irqrestore(&lock->rlock, flags); | |
366 | } | |
367 | ||
3490565b | 368 | static __always_inline int spin_trylock_bh(spinlock_t *lock) |
c2f21ce2 TG |
369 | { |
370 | return raw_spin_trylock_bh(&lock->rlock); | |
371 | } | |
372 | ||
3490565b | 373 | static __always_inline int spin_trylock_irq(spinlock_t *lock) |
c2f21ce2 TG |
374 | { |
375 | return raw_spin_trylock_irq(&lock->rlock); | |
376 | } | |
377 | ||
378 | #define spin_trylock_irqsave(lock, flags) \ | |
379 | ({ \ | |
380 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | |
381 | }) | |
382 | ||
3490565b | 383 | static __always_inline int spin_is_locked(spinlock_t *lock) |
c2f21ce2 TG |
384 | { |
385 | return raw_spin_is_locked(&lock->rlock); | |
386 | } | |
387 | ||
3490565b | 388 | static __always_inline int spin_is_contended(spinlock_t *lock) |
c2f21ce2 TG |
389 | { |
390 | return raw_spin_is_contended(&lock->rlock); | |
391 | } | |
392 | ||
4ebc1b4b | 393 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
c2f21ce2 | 394 | |
1da177e4 | 395 | /* |
fb1c8f93 IM |
396 | * Pull the atomic_t declaration: |
397 | * (asm-mips/atomic.h needs above definitions) | |
1da177e4 | 398 | */ |
60063497 | 399 | #include <linux/atomic.h> |
fb1c8f93 IM |
400 | /** |
401 | * atomic_dec_and_lock - lock on reaching reference count zero | |
402 | * @atomic: the atomic counter | |
403 | * @lock: the spinlock in question | |
dc07e721 BF |
404 | * |
405 | * Decrements @atomic by 1. If the result is 0, returns true and locks | |
406 | * @lock. Returns false for all other cases. | |
1da177e4 | 407 | */ |
fb1c8f93 IM |
408 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
409 | #define atomic_dec_and_lock(atomic, lock) \ | |
dcc8e559 | 410 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
1da177e4 | 411 | |
1da177e4 | 412 | #endif /* __LINUX_SPINLOCK_H */ |