]>
Commit | Line | Data |
---|---|---|
1 | #ifndef __LINUX_SPINLOCK_H | |
2 | #define __LINUX_SPINLOCK_H | |
3 | ||
4 | /* | |
5 | * include/linux/spinlock.h - generic spinlock/rwlock declarations | |
6 | * | |
7 | * here's the role of the various spinlock/rwlock related include files: | |
8 | * | |
9 | * on SMP builds: | |
10 | * | |
11 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the | |
12 | * initializers | |
13 | * | |
14 | * linux/spinlock_types.h: | |
15 | * defines the generic type and initializers | |
16 | * | |
17 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel | |
18 | * implementations, mostly inline assembly code | |
19 | * | |
20 | * (also included on UP-debug builds:) | |
21 | * | |
22 | * linux/spinlock_api_smp.h: | |
23 | * contains the prototypes for the _spin_*() APIs. | |
24 | * | |
25 | * linux/spinlock.h: builds the final spin_*() APIs. | |
26 | * | |
27 | * on UP builds: | |
28 | * | |
29 | * linux/spinlock_type_up.h: | |
30 | * contains the generic, simplified UP spinlock type. | |
31 | * (which is an empty structure on non-debug builds) | |
32 | * | |
33 | * linux/spinlock_types.h: | |
34 | * defines the generic type and initializers | |
35 | * | |
36 | * linux/spinlock_up.h: | |
37 | * contains the arch_spin_*()/etc. version of UP | |
38 | * builds. (which are NOPs on non-debug, non-preempt | |
39 | * builds) | |
40 | * | |
41 | * (included on UP-non-debug builds:) | |
42 | * | |
43 | * linux/spinlock_api_up.h: | |
44 | * builds the _spin_*() APIs. | |
45 | * | |
46 | * linux/spinlock.h: builds the final spin_*() APIs. | |
47 | */ | |
48 | ||
49 | #include <linux/typecheck.h> | |
50 | #include <linux/preempt.h> | |
51 | #include <linux/linkage.h> | |
52 | #include <linux/compiler.h> | |
53 | #include <linux/irqflags.h> | |
54 | #include <linux/thread_info.h> | |
55 | #include <linux/kernel.h> | |
56 | #include <linux/stringify.h> | |
57 | #include <linux/bottom_half.h> | |
58 | #include <asm/barrier.h> | |
59 | ||
60 | ||
61 | /* | |
62 | * Must define these before including other files, inline functions need them | |
63 | */ | |
64 | #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME | |
65 | ||
66 | #define LOCK_SECTION_START(extra) \ | |
67 | ".subsection 1\n\t" \ | |
68 | extra \ | |
69 | ".ifndef " LOCK_SECTION_NAME "\n\t" \ | |
70 | LOCK_SECTION_NAME ":\n\t" \ | |
71 | ".endif\n" | |
72 | ||
73 | #define LOCK_SECTION_END \ | |
74 | ".previous\n\t" | |
75 | ||
76 | #define __lockfunc __attribute__((section(".spinlock.text"))) | |
77 | ||
78 | /* | |
79 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: | |
80 | */ | |
81 | #include <linux/spinlock_types.h> | |
82 | ||
83 | /* | |
84 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): | |
85 | */ | |
86 | #ifdef CONFIG_SMP | |
87 | # include <asm/spinlock.h> | |
88 | #else | |
89 | # include <linux/spinlock_up.h> | |
90 | #endif | |
91 | ||
92 | #ifdef CONFIG_DEBUG_SPINLOCK | |
93 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, | |
94 | struct lock_class_key *key); | |
95 | # define raw_spin_lock_init(lock) \ | |
96 | do { \ | |
97 | static struct lock_class_key __key; \ | |
98 | \ | |
99 | __raw_spin_lock_init((lock), #lock, &__key); \ | |
100 | } while (0) | |
101 | ||
102 | #else | |
103 | # define raw_spin_lock_init(lock) \ | |
104 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) | |
105 | #endif | |
106 | ||
107 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) | |
108 | ||
109 | #ifdef CONFIG_GENERIC_LOCKBREAK | |
110 | #define raw_spin_is_contended(lock) ((lock)->break_lock) | |
111 | #else | |
112 | ||
113 | #ifdef arch_spin_is_contended | |
114 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) | |
115 | #else | |
116 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) | |
117 | #endif /*arch_spin_is_contended*/ | |
118 | #endif | |
119 | ||
120 | /* | |
121 | * This barrier must provide two things: | |
122 | * | |
123 | * - it must guarantee a STORE before the spin_lock() is ordered against a | |
124 | * LOAD after it, see the comments at its two usage sites. | |
125 | * | |
126 | * - it must ensure the critical section is RCsc. | |
127 | * | |
128 | * The latter is important for cases where we observe values written by other | |
129 | * CPUs in spin-loops, without barriers, while being subject to scheduling. | |
130 | * | |
131 | * CPU0 CPU1 CPU2 | |
132 | * | |
133 | * for (;;) { | |
134 | * if (READ_ONCE(X)) | |
135 | * break; | |
136 | * } | |
137 | * X=1 | |
138 | * <sched-out> | |
139 | * <sched-in> | |
140 | * r = X; | |
141 | * | |
142 | * without transitivity it could be that CPU1 observes X!=0 breaks the loop, | |
143 | * we get migrated and CPU2 sees X==0. | |
144 | * | |
145 | * Since most load-store architectures implement ACQUIRE with an smp_mb() after | |
146 | * the LL/SC loop, they need no further barriers. Similarly all our TSO | |
147 | * architectures imply an smp_mb() for each atomic instruction and equally don't | |
148 | * need more. | |
149 | * | |
150 | * Architectures that can implement ACQUIRE better need to take care. | |
151 | */ | |
152 | #ifndef smp_mb__after_spinlock | |
153 | #define smp_mb__after_spinlock() do { } while (0) | |
154 | #endif | |
155 | ||
156 | #ifdef CONFIG_DEBUG_SPINLOCK | |
157 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); | |
158 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) | |
159 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); | |
160 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); | |
161 | #else | |
162 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) | |
163 | { | |
164 | __acquire(lock); | |
165 | arch_spin_lock(&lock->raw_lock); | |
166 | } | |
167 | ||
168 | static inline void | |
169 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) | |
170 | { | |
171 | __acquire(lock); | |
172 | arch_spin_lock_flags(&lock->raw_lock, *flags); | |
173 | } | |
174 | ||
175 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) | |
176 | { | |
177 | return arch_spin_trylock(&(lock)->raw_lock); | |
178 | } | |
179 | ||
180 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) | |
181 | { | |
182 | arch_spin_unlock(&lock->raw_lock); | |
183 | __release(lock); | |
184 | } | |
185 | #endif | |
186 | ||
187 | /* | |
188 | * Define the various spin_lock methods. Note we define these | |
189 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The | |
190 | * various methods are defined as nops in the case they are not | |
191 | * required. | |
192 | */ | |
193 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) | |
194 | ||
195 | #define raw_spin_lock(lock) _raw_spin_lock(lock) | |
196 | ||
197 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
198 | # define raw_spin_lock_nested(lock, subclass) \ | |
199 | _raw_spin_lock_nested(lock, subclass) | |
200 | ||
201 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ | |
202 | do { \ | |
203 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ | |
204 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ | |
205 | } while (0) | |
206 | #else | |
207 | /* | |
208 | * Always evaluate the 'subclass' argument to avoid that the compiler | |
209 | * warns about set-but-not-used variables when building with | |
210 | * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. | |
211 | */ | |
212 | # define raw_spin_lock_nested(lock, subclass) \ | |
213 | _raw_spin_lock(((void)(subclass), (lock))) | |
214 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) | |
215 | #endif | |
216 | ||
217 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
218 | ||
219 | #define raw_spin_lock_irqsave(lock, flags) \ | |
220 | do { \ | |
221 | typecheck(unsigned long, flags); \ | |
222 | flags = _raw_spin_lock_irqsave(lock); \ | |
223 | } while (0) | |
224 | ||
225 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
226 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ | |
227 | do { \ | |
228 | typecheck(unsigned long, flags); \ | |
229 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ | |
230 | } while (0) | |
231 | #else | |
232 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ | |
233 | do { \ | |
234 | typecheck(unsigned long, flags); \ | |
235 | flags = _raw_spin_lock_irqsave(lock); \ | |
236 | } while (0) | |
237 | #endif | |
238 | ||
239 | #else | |
240 | ||
241 | #define raw_spin_lock_irqsave(lock, flags) \ | |
242 | do { \ | |
243 | typecheck(unsigned long, flags); \ | |
244 | _raw_spin_lock_irqsave(lock, flags); \ | |
245 | } while (0) | |
246 | ||
247 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ | |
248 | raw_spin_lock_irqsave(lock, flags) | |
249 | ||
250 | #endif | |
251 | ||
252 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) | |
253 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) | |
254 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) | |
255 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) | |
256 | ||
257 | #define raw_spin_unlock_irqrestore(lock, flags) \ | |
258 | do { \ | |
259 | typecheck(unsigned long, flags); \ | |
260 | _raw_spin_unlock_irqrestore(lock, flags); \ | |
261 | } while (0) | |
262 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) | |
263 | ||
264 | #define raw_spin_trylock_bh(lock) \ | |
265 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) | |
266 | ||
267 | #define raw_spin_trylock_irq(lock) \ | |
268 | ({ \ | |
269 | local_irq_disable(); \ | |
270 | raw_spin_trylock(lock) ? \ | |
271 | 1 : ({ local_irq_enable(); 0; }); \ | |
272 | }) | |
273 | ||
274 | #define raw_spin_trylock_irqsave(lock, flags) \ | |
275 | ({ \ | |
276 | local_irq_save(flags); \ | |
277 | raw_spin_trylock(lock) ? \ | |
278 | 1 : ({ local_irq_restore(flags); 0; }); \ | |
279 | }) | |
280 | ||
281 | /** | |
282 | * raw_spin_can_lock - would raw_spin_trylock() succeed? | |
283 | * @lock: the spinlock in question. | |
284 | */ | |
285 | #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) | |
286 | ||
287 | /* Include rwlock functions */ | |
288 | #include <linux/rwlock.h> | |
289 | ||
290 | /* | |
291 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | |
292 | */ | |
293 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
294 | # include <linux/spinlock_api_smp.h> | |
295 | #else | |
296 | # include <linux/spinlock_api_up.h> | |
297 | #endif | |
298 | ||
299 | /* | |
300 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n | |
301 | */ | |
302 | ||
303 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) | |
304 | { | |
305 | return &lock->rlock; | |
306 | } | |
307 | ||
308 | #define spin_lock_init(_lock) \ | |
309 | do { \ | |
310 | spinlock_check(_lock); \ | |
311 | raw_spin_lock_init(&(_lock)->rlock); \ | |
312 | } while (0) | |
313 | ||
314 | static __always_inline void spin_lock(spinlock_t *lock) | |
315 | { | |
316 | raw_spin_lock(&lock->rlock); | |
317 | } | |
318 | ||
319 | static __always_inline void spin_lock_bh(spinlock_t *lock) | |
320 | { | |
321 | raw_spin_lock_bh(&lock->rlock); | |
322 | } | |
323 | ||
324 | static __always_inline int spin_trylock(spinlock_t *lock) | |
325 | { | |
326 | return raw_spin_trylock(&lock->rlock); | |
327 | } | |
328 | ||
329 | #define spin_lock_nested(lock, subclass) \ | |
330 | do { \ | |
331 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ | |
332 | } while (0) | |
333 | ||
334 | #define spin_lock_nest_lock(lock, nest_lock) \ | |
335 | do { \ | |
336 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ | |
337 | } while (0) | |
338 | ||
339 | static __always_inline void spin_lock_irq(spinlock_t *lock) | |
340 | { | |
341 | raw_spin_lock_irq(&lock->rlock); | |
342 | } | |
343 | ||
344 | #define spin_lock_irqsave(lock, flags) \ | |
345 | do { \ | |
346 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ | |
347 | } while (0) | |
348 | ||
349 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | |
350 | do { \ | |
351 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ | |
352 | } while (0) | |
353 | ||
354 | static __always_inline void spin_unlock(spinlock_t *lock) | |
355 | { | |
356 | raw_spin_unlock(&lock->rlock); | |
357 | } | |
358 | ||
359 | static __always_inline void spin_unlock_bh(spinlock_t *lock) | |
360 | { | |
361 | raw_spin_unlock_bh(&lock->rlock); | |
362 | } | |
363 | ||
364 | static __always_inline void spin_unlock_irq(spinlock_t *lock) | |
365 | { | |
366 | raw_spin_unlock_irq(&lock->rlock); | |
367 | } | |
368 | ||
369 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | |
370 | { | |
371 | raw_spin_unlock_irqrestore(&lock->rlock, flags); | |
372 | } | |
373 | ||
374 | static __always_inline int spin_trylock_bh(spinlock_t *lock) | |
375 | { | |
376 | return raw_spin_trylock_bh(&lock->rlock); | |
377 | } | |
378 | ||
379 | static __always_inline int spin_trylock_irq(spinlock_t *lock) | |
380 | { | |
381 | return raw_spin_trylock_irq(&lock->rlock); | |
382 | } | |
383 | ||
384 | #define spin_trylock_irqsave(lock, flags) \ | |
385 | ({ \ | |
386 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | |
387 | }) | |
388 | ||
389 | static __always_inline int spin_is_locked(spinlock_t *lock) | |
390 | { | |
391 | return raw_spin_is_locked(&lock->rlock); | |
392 | } | |
393 | ||
394 | static __always_inline int spin_is_contended(spinlock_t *lock) | |
395 | { | |
396 | return raw_spin_is_contended(&lock->rlock); | |
397 | } | |
398 | ||
399 | static __always_inline int spin_can_lock(spinlock_t *lock) | |
400 | { | |
401 | return raw_spin_can_lock(&lock->rlock); | |
402 | } | |
403 | ||
404 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) | |
405 | ||
406 | /* | |
407 | * Pull the atomic_t declaration: | |
408 | * (asm-mips/atomic.h needs above definitions) | |
409 | */ | |
410 | #include <linux/atomic.h> | |
411 | /** | |
412 | * atomic_dec_and_lock - lock on reaching reference count zero | |
413 | * @atomic: the atomic counter | |
414 | * @lock: the spinlock in question | |
415 | * | |
416 | * Decrements @atomic by 1. If the result is 0, returns true and locks | |
417 | * @lock. Returns false for all other cases. | |
418 | */ | |
419 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |
420 | #define atomic_dec_and_lock(atomic, lock) \ | |
421 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) | |
422 | ||
423 | #endif /* __LINUX_SPINLOCK_H */ |