]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_SPINLOCK_H |
2 | #define __LINUX_SPINLOCK_H | |
3 | ||
4 | /* | |
fb1c8f93 IM |
5 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
6 | * | |
7 | * here's the role of the various spinlock/rwlock related include files: | |
8 | * | |
9 | * on SMP builds: | |
10 | * | |
fb3a6bbc | 11 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
fb1c8f93 IM |
12 | * initializers |
13 | * | |
14 | * linux/spinlock_types.h: | |
15 | * defines the generic type and initializers | |
16 | * | |
0199c4e6 | 17 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
fb1c8f93 IM |
18 | * implementations, mostly inline assembly code |
19 | * | |
20 | * (also included on UP-debug builds:) | |
21 | * | |
22 | * linux/spinlock_api_smp.h: | |
23 | * contains the prototypes for the _spin_*() APIs. | |
24 | * | |
25 | * linux/spinlock.h: builds the final spin_*() APIs. | |
26 | * | |
27 | * on UP builds: | |
28 | * | |
29 | * linux/spinlock_type_up.h: | |
30 | * contains the generic, simplified UP spinlock type. | |
31 | * (which is an empty structure on non-debug builds) | |
32 | * | |
33 | * linux/spinlock_types.h: | |
34 | * defines the generic type and initializers | |
35 | * | |
36 | * linux/spinlock_up.h: | |
0199c4e6 | 37 | * contains the arch_spin_*()/etc. version of UP |
fb1c8f93 IM |
38 | * builds. (which are NOPs on non-debug, non-preempt |
39 | * builds) | |
40 | * | |
41 | * (included on UP-non-debug builds:) | |
42 | * | |
43 | * linux/spinlock_api_up.h: | |
44 | * builds the _spin_*() APIs. | |
45 | * | |
46 | * linux/spinlock.h: builds the final spin_*() APIs. | |
1da177e4 LT |
47 | */ |
48 | ||
3f307891 | 49 | #include <linux/typecheck.h> |
1da177e4 LT |
50 | #include <linux/preempt.h> |
51 | #include <linux/linkage.h> | |
52 | #include <linux/compiler.h> | |
df9ee292 | 53 | #include <linux/irqflags.h> |
1da177e4 LT |
54 | #include <linux/thread_info.h> |
55 | #include <linux/kernel.h> | |
56 | #include <linux/stringify.h> | |
676dcb8b | 57 | #include <linux/bottom_half.h> |
96f951ed | 58 | #include <asm/barrier.h> |
1da177e4 | 59 | |
1da177e4 LT |
60 | |
61 | /* | |
62 | * Must define these before including other files, inline functions need them | |
63 | */ | |
75ddb0e8 | 64 | #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME |
1da177e4 LT |
65 | |
66 | #define LOCK_SECTION_START(extra) \ | |
67 | ".subsection 1\n\t" \ | |
68 | extra \ | |
69 | ".ifndef " LOCK_SECTION_NAME "\n\t" \ | |
70 | LOCK_SECTION_NAME ":\n\t" \ | |
71 | ".endif\n" | |
72 | ||
73 | #define LOCK_SECTION_END \ | |
74 | ".previous\n\t" | |
75 | ||
ec701584 | 76 | #define __lockfunc __attribute__((section(".spinlock.text"))) |
1da177e4 LT |
77 | |
78 | /* | |
fb3a6bbc | 79 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
1da177e4 | 80 | */ |
fb1c8f93 | 81 | #include <linux/spinlock_types.h> |
1da177e4 | 82 | |
1da177e4 | 83 | /* |
25985edc | 84 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): |
1da177e4 | 85 | */ |
8a25d5de | 86 | #ifdef CONFIG_SMP |
fb1c8f93 | 87 | # include <asm/spinlock.h> |
1da177e4 | 88 | #else |
fb1c8f93 | 89 | # include <linux/spinlock_up.h> |
1da177e4 LT |
90 | #endif |
91 | ||
8a25d5de | 92 | #ifdef CONFIG_DEBUG_SPINLOCK |
c2f21ce2 TG |
93 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
94 | struct lock_class_key *key); | |
95 | # define raw_spin_lock_init(lock) \ | |
8a25d5de IM |
96 | do { \ |
97 | static struct lock_class_key __key; \ | |
98 | \ | |
c2f21ce2 | 99 | __raw_spin_lock_init((lock), #lock, &__key); \ |
8a25d5de IM |
100 | } while (0) |
101 | ||
102 | #else | |
c2f21ce2 TG |
103 | # define raw_spin_lock_init(lock) \ |
104 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) | |
8a25d5de IM |
105 | #endif |
106 | ||
c2f21ce2 | 107 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
fb1c8f93 | 108 | |
95c354fe | 109 | #ifdef CONFIG_GENERIC_LOCKBREAK |
c2f21ce2 | 110 | #define raw_spin_is_contended(lock) ((lock)->break_lock) |
95c354fe | 111 | #else |
a5ef7ca0 | 112 | |
0199c4e6 | 113 | #ifdef arch_spin_is_contended |
c2f21ce2 | 114 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
a5ef7ca0 | 115 | #else |
c2f21ce2 | 116 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
0199c4e6 | 117 | #endif /*arch_spin_is_contended*/ |
95c354fe NP |
118 | #endif |
119 | ||
e0acd0a6 ON |
120 | /* |
121 | * Despite its name it doesn't necessarily has to be a full barrier. | |
122 | * It should only guarantee that a STORE before the critical section | |
123 | * can not be reordered with a LOAD inside this section. | |
124 | * spin_lock() is the one-way barrier, this LOAD can not escape out | |
125 | * of the region. So the default implementation simply ensures that | |
126 | * a STORE can not move into the critical section, smp_wmb() should | |
127 | * serialize it with another STORE done by spin_lock(). | |
128 | */ | |
129 | #ifndef smp_mb__before_spinlock | |
130 | #define smp_mb__before_spinlock() smp_wmb() | |
ad462769 JO |
131 | #endif |
132 | ||
fb1c8f93 | 133 | /** |
c2f21ce2 | 134 | * raw_spin_unlock_wait - wait until the spinlock gets unlocked |
fb1c8f93 IM |
135 | * @lock: the spinlock in question. |
136 | */ | |
c2f21ce2 | 137 | #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
fb1c8f93 | 138 | |
fb1c8f93 | 139 | #ifdef CONFIG_DEBUG_SPINLOCK |
b97c4bc1 | 140 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
9828ea9d TG |
141 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
142 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); | |
b97c4bc1 | 143 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
fb1c8f93 | 144 | #else |
b97c4bc1 | 145 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
c2f21ce2 | 146 | { |
b97c4bc1 | 147 | __acquire(lock); |
c2f21ce2 TG |
148 | arch_spin_lock(&lock->raw_lock); |
149 | } | |
150 | ||
151 | static inline void | |
b97c4bc1 | 152 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) |
c2f21ce2 | 153 | { |
b97c4bc1 | 154 | __acquire(lock); |
c2f21ce2 TG |
155 | arch_spin_lock_flags(&lock->raw_lock, *flags); |
156 | } | |
157 | ||
9828ea9d | 158 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
c2f21ce2 TG |
159 | { |
160 | return arch_spin_trylock(&(lock)->raw_lock); | |
161 | } | |
162 | ||
b97c4bc1 | 163 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
c2f21ce2 TG |
164 | { |
165 | arch_spin_unlock(&lock->raw_lock); | |
b97c4bc1 | 166 | __release(lock); |
c2f21ce2 | 167 | } |
fb1c8f93 | 168 | #endif |
1da177e4 | 169 | |
1da177e4 | 170 | /* |
ef12f109 TG |
171 | * Define the various spin_lock methods. Note we define these |
172 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The | |
173 | * various methods are defined as nops in the case they are not | |
174 | * required. | |
1da177e4 | 175 | */ |
9c1721aa | 176 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
1da177e4 | 177 | |
9c1721aa | 178 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
8a25d5de IM |
179 | |
180 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
9c1721aa TG |
181 | # define raw_spin_lock_nested(lock, subclass) \ |
182 | _raw_spin_lock_nested(lock, subclass) | |
183 | ||
c2f21ce2 | 184 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ |
b7d39aff PZ |
185 | do { \ |
186 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ | |
9c1721aa | 187 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
b7d39aff | 188 | } while (0) |
8a25d5de | 189 | #else |
9c1721aa TG |
190 | # define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) |
191 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) | |
8a25d5de IM |
192 | #endif |
193 | ||
fb1c8f93 | 194 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
b8e6ec86 | 195 | |
c2f21ce2 | 196 | #define raw_spin_lock_irqsave(lock, flags) \ |
3f307891 SR |
197 | do { \ |
198 | typecheck(unsigned long, flags); \ | |
9c1721aa | 199 | flags = _raw_spin_lock_irqsave(lock); \ |
3f307891 | 200 | } while (0) |
cfd3ef23 AV |
201 | |
202 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
c2f21ce2 | 203 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
3f307891 SR |
204 | do { \ |
205 | typecheck(unsigned long, flags); \ | |
9c1721aa | 206 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
3f307891 | 207 | } while (0) |
cfd3ef23 | 208 | #else |
c2f21ce2 | 209 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
3f307891 SR |
210 | do { \ |
211 | typecheck(unsigned long, flags); \ | |
9c1721aa | 212 | flags = _raw_spin_lock_irqsave(lock); \ |
3f307891 | 213 | } while (0) |
cfd3ef23 AV |
214 | #endif |
215 | ||
1da177e4 | 216 | #else |
b8e6ec86 | 217 | |
c2f21ce2 | 218 | #define raw_spin_lock_irqsave(lock, flags) \ |
3f307891 SR |
219 | do { \ |
220 | typecheck(unsigned long, flags); \ | |
9c1721aa | 221 | _raw_spin_lock_irqsave(lock, flags); \ |
3f307891 | 222 | } while (0) |
ef12f109 | 223 | |
c2f21ce2 TG |
224 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
225 | raw_spin_lock_irqsave(lock, flags) | |
cfd3ef23 | 226 | |
1da177e4 LT |
227 | #endif |
228 | ||
9c1721aa TG |
229 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
230 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) | |
231 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) | |
232 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) | |
1da177e4 | 233 | |
c2f21ce2 TG |
234 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
235 | do { \ | |
236 | typecheck(unsigned long, flags); \ | |
9c1721aa | 237 | _raw_spin_unlock_irqrestore(lock, flags); \ |
3f307891 | 238 | } while (0) |
9c1721aa | 239 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
1da177e4 | 240 | |
9c1721aa TG |
241 | #define raw_spin_trylock_bh(lock) \ |
242 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) | |
1da177e4 | 243 | |
c2f21ce2 | 244 | #define raw_spin_trylock_irq(lock) \ |
1da177e4 LT |
245 | ({ \ |
246 | local_irq_disable(); \ | |
c2f21ce2 | 247 | raw_spin_trylock(lock) ? \ |
fb1c8f93 | 248 | 1 : ({ local_irq_enable(); 0; }); \ |
1da177e4 LT |
249 | }) |
250 | ||
c2f21ce2 | 251 | #define raw_spin_trylock_irqsave(lock, flags) \ |
1da177e4 LT |
252 | ({ \ |
253 | local_irq_save(flags); \ | |
c2f21ce2 | 254 | raw_spin_trylock(lock) ? \ |
fb1c8f93 | 255 | 1 : ({ local_irq_restore(flags); 0; }); \ |
1da177e4 LT |
256 | }) |
257 | ||
c2f21ce2 TG |
258 | /** |
259 | * raw_spin_can_lock - would raw_spin_trylock() succeed? | |
260 | * @lock: the spinlock in question. | |
261 | */ | |
262 | #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) | |
263 | ||
264 | /* Include rwlock functions */ | |
265 | #include <linux/rwlock.h> | |
266 | ||
267 | /* | |
268 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | |
269 | */ | |
270 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
271 | # include <linux/spinlock_api_smp.h> | |
272 | #else | |
273 | # include <linux/spinlock_api_up.h> | |
274 | #endif | |
275 | ||
276 | /* | |
277 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n | |
278 | */ | |
279 | ||
280 | static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) | |
281 | { | |
282 | return &lock->rlock; | |
283 | } | |
284 | ||
285 | #define spin_lock_init(_lock) \ | |
286 | do { \ | |
287 | spinlock_check(_lock); \ | |
288 | raw_spin_lock_init(&(_lock)->rlock); \ | |
289 | } while (0) | |
290 | ||
291 | static inline void spin_lock(spinlock_t *lock) | |
292 | { | |
293 | raw_spin_lock(&lock->rlock); | |
294 | } | |
295 | ||
296 | static inline void spin_lock_bh(spinlock_t *lock) | |
297 | { | |
298 | raw_spin_lock_bh(&lock->rlock); | |
299 | } | |
300 | ||
301 | static inline int spin_trylock(spinlock_t *lock) | |
302 | { | |
303 | return raw_spin_trylock(&lock->rlock); | |
304 | } | |
305 | ||
306 | #define spin_lock_nested(lock, subclass) \ | |
307 | do { \ | |
308 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ | |
309 | } while (0) | |
310 | ||
311 | #define spin_lock_nest_lock(lock, nest_lock) \ | |
312 | do { \ | |
313 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ | |
314 | } while (0) | |
315 | ||
316 | static inline void spin_lock_irq(spinlock_t *lock) | |
317 | { | |
318 | raw_spin_lock_irq(&lock->rlock); | |
319 | } | |
320 | ||
321 | #define spin_lock_irqsave(lock, flags) \ | |
322 | do { \ | |
323 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ | |
324 | } while (0) | |
325 | ||
326 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | |
327 | do { \ | |
328 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ | |
329 | } while (0) | |
330 | ||
331 | static inline void spin_unlock(spinlock_t *lock) | |
332 | { | |
333 | raw_spin_unlock(&lock->rlock); | |
334 | } | |
335 | ||
336 | static inline void spin_unlock_bh(spinlock_t *lock) | |
337 | { | |
338 | raw_spin_unlock_bh(&lock->rlock); | |
339 | } | |
340 | ||
341 | static inline void spin_unlock_irq(spinlock_t *lock) | |
342 | { | |
343 | raw_spin_unlock_irq(&lock->rlock); | |
344 | } | |
345 | ||
346 | static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | |
347 | { | |
348 | raw_spin_unlock_irqrestore(&lock->rlock, flags); | |
349 | } | |
350 | ||
351 | static inline int spin_trylock_bh(spinlock_t *lock) | |
352 | { | |
353 | return raw_spin_trylock_bh(&lock->rlock); | |
354 | } | |
355 | ||
356 | static inline int spin_trylock_irq(spinlock_t *lock) | |
357 | { | |
358 | return raw_spin_trylock_irq(&lock->rlock); | |
359 | } | |
360 | ||
361 | #define spin_trylock_irqsave(lock, flags) \ | |
362 | ({ \ | |
363 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | |
364 | }) | |
365 | ||
366 | static inline void spin_unlock_wait(spinlock_t *lock) | |
367 | { | |
368 | raw_spin_unlock_wait(&lock->rlock); | |
369 | } | |
370 | ||
371 | static inline int spin_is_locked(spinlock_t *lock) | |
372 | { | |
373 | return raw_spin_is_locked(&lock->rlock); | |
374 | } | |
375 | ||
376 | static inline int spin_is_contended(spinlock_t *lock) | |
377 | { | |
378 | return raw_spin_is_contended(&lock->rlock); | |
379 | } | |
380 | ||
381 | static inline int spin_can_lock(spinlock_t *lock) | |
382 | { | |
383 | return raw_spin_can_lock(&lock->rlock); | |
384 | } | |
385 | ||
4ebc1b4b | 386 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
c2f21ce2 | 387 | |
1da177e4 | 388 | /* |
fb1c8f93 IM |
389 | * Pull the atomic_t declaration: |
390 | * (asm-mips/atomic.h needs above definitions) | |
1da177e4 | 391 | */ |
60063497 | 392 | #include <linux/atomic.h> |
fb1c8f93 IM |
393 | /** |
394 | * atomic_dec_and_lock - lock on reaching reference count zero | |
395 | * @atomic: the atomic counter | |
396 | * @lock: the spinlock in question | |
dc07e721 BF |
397 | * |
398 | * Decrements @atomic by 1. If the result is 0, returns true and locks | |
399 | * @lock. Returns false for all other cases. | |
1da177e4 | 400 | */ |
fb1c8f93 IM |
401 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
402 | #define atomic_dec_and_lock(atomic, lock) \ | |
dcc8e559 | 403 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
1da177e4 | 404 | |
1da177e4 | 405 | #endif /* __LINUX_SPINLOCK_H */ |