]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
fbb9ce95 IM |
2 | /* |
3 | * Runtime locking correctness validator | |
4 | * | |
4b32d0a4 | 5 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
90eec103 | 6 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
fbb9ce95 | 7 | * |
387b1468 | 8 | * see Documentation/locking/lockdep-design.rst for more details. |
fbb9ce95 IM |
9 | */ |
10 | #ifndef __LINUX_LOCKDEP_H | |
11 | #define __LINUX_LOCKDEP_H | |
12 | ||
c935cd62 | 13 | #include <linux/lockdep_types.h> |
0cd39f46 | 14 | #include <linux/smp.h> |
a21ee605 | 15 | #include <asm/percpu.h> |
c935cd62 | 16 | |
a1e96b03 HC |
17 | struct task_struct; |
18 | ||
2edf5e49 DY |
19 | /* for sysctl */ |
20 | extern int prove_locking; | |
21 | extern int lock_stat; | |
22 | ||
db0b0ead MT |
23 | #ifdef CONFIG_LOCKDEP |
24 | ||
fbb9ce95 | 25 | #include <linux/linkage.h> |
5be542e9 | 26 | #include <linux/list.h> |
fbb9ce95 IM |
27 | #include <linux/debug_locks.h> |
28 | #include <linux/stacktrace.h> | |
29 | ||
4d82a1de PZ |
30 | static inline void lockdep_copy_map(struct lockdep_map *to, |
31 | struct lockdep_map *from) | |
32 | { | |
33 | int i; | |
34 | ||
35 | *to = *from; | |
36 | /* | |
37 | * Since the class cache can be modified concurrently we could observe | |
38 | * half pointers (64bit arch using 32bit copy insns). Therefore clear | |
39 | * the caches and take the performance hit. | |
40 | * | |
41 | * XXX it doesn't work well with lockdep_set_class_and_subclass(), since | |
42 | * that relies on cache abuse. | |
43 | */ | |
44 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) | |
45 | to->class_cache[i] = NULL; | |
46 | } | |
47 | ||
fbb9ce95 IM |
48 | /* |
49 | * Every lock has a list of other locks that were taken after it. | |
50 | * We only grow the list, never remove from it: | |
51 | */ | |
52 | struct lock_list { | |
53 | struct list_head entry; | |
54 | struct lock_class *class; | |
86cffb80 | 55 | struct lock_class *links_to; |
12593b74 | 56 | const struct lock_trace *trace; |
bd76eca1 | 57 | u16 distance; |
3454a36d BF |
58 | /* bitmap of different dependencies from head to this */ |
59 | u8 dep; | |
6971c0f3 BF |
60 | /* used by BFS to record whether "prev -> this" only has -(*R)-> */ |
61 | u8 only_xr; | |
c94aa5ca | 62 | |
af012961 PZ |
63 | /* |
64 | * The parent field is used to implement breadth-first search, and the | |
65 | * bit 0 is reused to indicate if the lock has been accessed in BFS. | |
c94aa5ca ML |
66 | */ |
67 | struct lock_list *parent; | |
fbb9ce95 IM |
68 | }; |
69 | ||
d16dbd1b YD |
70 | /** |
71 | * struct lock_chain - lock dependency chain record | |
72 | * | |
73 | * @irq_context: the same as irq_context in held_lock below | |
74 | * @depth: the number of held locks in this chain | |
75 | * @base: the index in chain_hlocks for this chain | |
76 | * @entry: the collided lock chains in lock_chain hash list | |
77 | * @chain_key: the hash key of this lock_chain | |
fbb9ce95 IM |
78 | */ |
79 | struct lock_chain { | |
d16dbd1b | 80 | /* see BUILD_BUG_ON()s in add_chain_cache() */ |
75dd602a PZ |
81 | unsigned int irq_context : 2, |
82 | depth : 6, | |
83 | base : 24; | |
84 | /* 4 byte hole */ | |
a63f38cc | 85 | struct hlist_node entry; |
fbb9ce95 IM |
86 | u64 chain_key; |
87 | }; | |
88 | ||
e5f363e3 | 89 | #define MAX_LOCKDEP_KEYS_BITS 13 |
01bb6f0a YD |
90 | #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) |
91 | #define INITIAL_CHAIN_KEY -1 | |
f82b217e | 92 | |
fbb9ce95 IM |
93 | struct held_lock { |
94 | /* | |
95 | * One-way hash of the dependency chain up to this point. We | |
96 | * hash the hashes step by step as the dependency chain grows. | |
97 | * | |
98 | * We use it for dependency-caching and we skip detection | |
99 | * passes and dependency-updates if there is a cache-hit, so | |
100 | * it is absolutely critical for 100% coverage of the validator | |
101 | * to have a unique key value for every unique dependency path | |
102 | * that can occur in the system, to make a unique hash value | |
103 | * as likely as possible - hence the 64-bit width. | |
104 | * | |
105 | * The task struct holds the current hash value (initialized | |
106 | * with zero), here we store the previous hash value: | |
107 | */ | |
108 | u64 prev_chain_key; | |
fbb9ce95 IM |
109 | unsigned long acquire_ip; |
110 | struct lockdep_map *instance; | |
7531e2f3 | 111 | struct lockdep_map *nest_lock; |
f20786ff PZ |
112 | #ifdef CONFIG_LOCK_STAT |
113 | u64 waittime_stamp; | |
114 | u64 holdtime_stamp; | |
115 | #endif | |
01bb6f0a YD |
116 | /* |
117 | * class_idx is zero-indexed; it points to the element in | |
118 | * lock_classes this held lock instance belongs to. class_idx is in | |
119 | * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. | |
120 | */ | |
f82b217e | 121 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
fbb9ce95 IM |
122 | /* |
123 | * The lock-stack is unified in that the lock chains of interrupt | |
124 | * contexts nest ontop of process context chains, but we 'separate' | |
125 | * the hashes by starting with 0 if we cross into an interrupt | |
126 | * context, and we also keep do not add cross-context lock | |
127 | * dependencies - the lock usage graph walking covers that area | |
128 | * anyway, and we'd just unnecessarily increase the number of | |
129 | * dependencies otherwise. [Note: hardirq and softirq contexts | |
130 | * are separated from each other too.] | |
131 | * | |
132 | * The following field is used to detect when we cross into an | |
133 | * interrupt context: | |
134 | */ | |
f82b217e | 135 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
bb97a91e PZ |
136 | unsigned int trylock:1; /* 16 bits */ |
137 | ||
f82b217e | 138 | unsigned int read:2; /* see lock_acquire() comment */ |
fb9edbe9 | 139 | unsigned int check:1; /* see lock_acquire() comment */ |
f82b217e | 140 | unsigned int hardirqs_off:1; |
fb9edbe9 | 141 | unsigned int references:12; /* 32 bits */ |
a24fc60d | 142 | unsigned int pin_count; |
b09be676 BP |
143 | }; |
144 | ||
fbb9ce95 IM |
145 | /* |
146 | * Initialization, self-test and debugging-output methods: | |
147 | */ | |
c3bc8fd6 | 148 | extern void lockdep_init(void); |
fbb9ce95 IM |
149 | extern void lockdep_reset(void); |
150 | extern void lockdep_reset_lock(struct lockdep_map *lock); | |
151 | extern void lockdep_free_key_range(void *start, unsigned long size); | |
63f9a7fd | 152 | extern asmlinkage void lockdep_sys_exit(void); |
cdc84d79 | 153 | extern void lockdep_set_selftest_task(struct task_struct *task); |
fbb9ce95 | 154 | |
e196e479 YD |
155 | extern void lockdep_init_task(struct task_struct *task); |
156 | ||
e616cb8d | 157 | /* |
e2db7592 | 158 | * Split the recursion counter in two to readily detect 'off' vs recursion. |
e616cb8d PZ |
159 | */ |
160 | #define LOCKDEP_RECURSION_BITS 16 | |
161 | #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) | |
162 | #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) | |
163 | ||
164 | /* | |
165 | * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due | |
166 | * to header dependencies. | |
167 | */ | |
168 | ||
169 | #define lockdep_off() \ | |
170 | do { \ | |
171 | current->lockdep_recursion += LOCKDEP_OFF; \ | |
172 | } while (0) | |
173 | ||
174 | #define lockdep_on() \ | |
175 | do { \ | |
176 | current->lockdep_recursion -= LOCKDEP_OFF; \ | |
177 | } while (0) | |
fbb9ce95 | 178 | |
108c1485 BVA |
179 | extern void lockdep_register_key(struct lock_class_key *key); |
180 | extern void lockdep_unregister_key(struct lock_class_key *key); | |
181 | ||
fbb9ce95 IM |
182 | /* |
183 | * These methods are used by specific locking variants (spinlocks, | |
184 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events | |
185 | * to lockdep: | |
186 | */ | |
187 | ||
dfd5e3f5 PZ |
188 | extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, |
189 | struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type); | |
190 | ||
191 | static inline void | |
192 | lockdep_init_map_waits(struct lockdep_map *lock, const char *name, | |
193 | struct lock_class_key *key, int subclass, u8 inner, u8 outer) | |
194 | { | |
195 | lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL); | |
196 | } | |
de8f5e4f PZ |
197 | |
198 | static inline void | |
199 | lockdep_init_map_wait(struct lockdep_map *lock, const char *name, | |
dfd5e3f5 | 200 | struct lock_class_key *key, int subclass, u8 inner) |
de8f5e4f PZ |
201 | { |
202 | lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); | |
203 | } | |
204 | ||
205 | static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, | |
206 | struct lock_class_key *key, int subclass) | |
207 | { | |
208 | lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); | |
209 | } | |
fbb9ce95 IM |
210 | |
211 | /* | |
212 | * Reinitialize a lock key - for cases where there is special locking or | |
213 | * special initialization of locks so that the validator gets the scope | |
214 | * of dependencies wrong: they are either too broad (they need a class-split) | |
215 | * or they are too narrow (they suffer from a false class-split): | |
216 | */ | |
de8f5e4f PZ |
217 | #define lockdep_set_class(lock, key) \ |
218 | lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \ | |
219 | (lock)->dep_map.wait_type_inner, \ | |
220 | (lock)->dep_map.wait_type_outer) | |
221 | ||
222 | #define lockdep_set_class_and_name(lock, key, name) \ | |
223 | lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \ | |
224 | (lock)->dep_map.wait_type_inner, \ | |
225 | (lock)->dep_map.wait_type_outer) | |
226 | ||
227 | #define lockdep_set_class_and_subclass(lock, key, sub) \ | |
228 | lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\ | |
229 | (lock)->dep_map.wait_type_inner, \ | |
230 | (lock)->dep_map.wait_type_outer) | |
231 | ||
232 | #define lockdep_set_subclass(lock, sub) \ | |
233 | lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ | |
234 | (lock)->dep_map.wait_type_inner, \ | |
235 | (lock)->dep_map.wait_type_outer) | |
1704f47b PZ |
236 | |
237 | #define lockdep_set_novalidate_class(lock) \ | |
47be1c1a | 238 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
de8f5e4f | 239 | |
9a7aa12f JK |
240 | /* |
241 | * Compare locking classes | |
242 | */ | |
243 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) | |
244 | ||
245 | static inline int lockdep_match_key(struct lockdep_map *lock, | |
246 | struct lock_class_key *key) | |
247 | { | |
248 | return lock->key == key; | |
249 | } | |
fbb9ce95 IM |
250 | |
251 | /* | |
252 | * Acquire a lock. | |
253 | * | |
254 | * Values for "read": | |
255 | * | |
256 | * 0: exclusive (write) acquire | |
257 | * 1: read-acquire (no recursion allowed) | |
258 | * 2: read-acquire with same-instance recursion allowed | |
259 | * | |
260 | * Values for check: | |
261 | * | |
fb9edbe9 ON |
262 | * 0: simple checks (freeing, held-at-exit-time, etc.) |
263 | * 1: full validation | |
fbb9ce95 IM |
264 | */ |
265 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |
7531e2f3 PZ |
266 | int trylock, int read, int check, |
267 | struct lockdep_map *nest_lock, unsigned long ip); | |
fbb9ce95 | 268 | |
5facae4f | 269 | extern void lock_release(struct lockdep_map *lock, unsigned long ip); |
fbb9ce95 | 270 | |
f8cfa466 SK |
271 | /* lock_is_held_type() returns */ |
272 | #define LOCK_STATE_UNKNOWN -1 | |
273 | #define LOCK_STATE_NOT_HELD 0 | |
274 | #define LOCK_STATE_HELD 1 | |
275 | ||
f8319483 PZ |
276 | /* |
277 | * Same "read" as for lock_acquire(), except -1 means any. | |
278 | */ | |
08f36ff6 | 279 | extern int lock_is_held_type(const struct lockdep_map *lock, int read); |
f8319483 | 280 | |
08f36ff6 | 281 | static inline int lock_is_held(const struct lockdep_map *lock) |
f8319483 PZ |
282 | { |
283 | return lock_is_held_type(lock, -1); | |
284 | } | |
f607c668 | 285 | |
f8319483 PZ |
286 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
287 | #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) | |
f607c668 | 288 | |
00ef9f73 PZ |
289 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
290 | struct lock_class_key *key, unsigned int subclass, | |
291 | unsigned long ip); | |
292 | ||
293 | static inline void lock_set_subclass(struct lockdep_map *lock, | |
294 | unsigned int subclass, unsigned long ip) | |
295 | { | |
296 | lock_set_class(lock, lock->name, lock->key, subclass, ip); | |
297 | } | |
64aa348e | 298 | |
6419c4af O |
299 | extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); |
300 | ||
e7904a28 PZ |
301 | #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } |
302 | ||
303 | extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); | |
304 | extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); | |
305 | extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); | |
a24fc60d | 306 | |
e3a55fd1 | 307 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
d5abe669 | 308 | |
d19c8137 PZ |
309 | #define lockdep_assert(cond) \ |
310 | do { WARN_ON(debug_locks && !(cond)); } while (0) | |
3e31f947 | 311 | |
d19c8137 PZ |
312 | #define lockdep_assert_once(cond) \ |
313 | do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0) | |
f607c668 | 314 | |
d19c8137 PZ |
315 | #define lockdep_assert_held(l) \ |
316 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) | |
f8319483 | 317 | |
d19c8137 PZ |
318 | #define lockdep_assert_not_held(l) \ |
319 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD) | |
f8319483 | 320 | |
d19c8137 PZ |
321 | #define lockdep_assert_held_write(l) \ |
322 | lockdep_assert(lockdep_is_held_type(l, 0)) | |
9a37110d | 323 | |
d19c8137 PZ |
324 | #define lockdep_assert_held_read(l) \ |
325 | lockdep_assert(lockdep_is_held_type(l, 1)) | |
326 | ||
327 | #define lockdep_assert_held_once(l) \ | |
328 | lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) | |
329 | ||
330 | #define lockdep_assert_none_held_once() \ | |
331 | lockdep_assert_once(!current->lockdep_depth) | |
7621350c | 332 | |
94d24fc4 PZ |
333 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
334 | ||
e7904a28 PZ |
335 | #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) |
336 | #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) | |
337 | #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) | |
a24fc60d | 338 | |
a51805ef | 339 | #else /* !CONFIG_LOCKDEP */ |
fbb9ce95 | 340 | |
e196e479 YD |
341 | static inline void lockdep_init_task(struct task_struct *task) |
342 | { | |
343 | } | |
344 | ||
fbb9ce95 IM |
345 | static inline void lockdep_off(void) |
346 | { | |
347 | } | |
348 | ||
349 | static inline void lockdep_on(void) | |
350 | { | |
351 | } | |
352 | ||
cdc84d79 BVA |
353 | static inline void lockdep_set_selftest_task(struct task_struct *task) |
354 | { | |
355 | } | |
356 | ||
7531e2f3 | 357 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
5facae4f | 358 | # define lock_release(l, i) do { } while (0) |
6419c4af | 359 | # define lock_downgrade(l, i) do { } while (0) |
00ef9f73 | 360 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
64aa348e | 361 | # define lock_set_subclass(l, s, i) do { } while (0) |
c3bc8fd6 | 362 | # define lockdep_init() do { } while (0) |
dfd5e3f5 PZ |
363 | # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ |
364 | do { (void)(name); (void)(key); } while (0) | |
de8f5e4f PZ |
365 | # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ |
366 | do { (void)(name); (void)(key); } while (0) | |
367 | # define lockdep_init_map_wait(lock, name, key, sub, inner) \ | |
368 | do { (void)(name); (void)(key); } while (0) | |
e25cf3db IM |
369 | # define lockdep_init_map(lock, name, key, sub) \ |
370 | do { (void)(name); (void)(key); } while (0) | |
fbb9ce95 IM |
371 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
372 | # define lockdep_set_class_and_name(lock, key, name) \ | |
e25cf3db | 373 | do { (void)(key); (void)(name); } while (0) |
4dfbb9d8 PZ |
374 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
375 | do { (void)(key); } while (0) | |
07646e21 | 376 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
1704f47b PZ |
377 | |
378 | #define lockdep_set_novalidate_class(lock) do { } while (0) | |
379 | ||
9a7aa12f JK |
380 | /* |
381 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP | |
382 | * case since the result is not well defined and the caller should rather | |
383 | * #ifdef the call himself. | |
384 | */ | |
07646e21 | 385 | |
fbb9ce95 IM |
386 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
387 | # define lockdep_free_key_range(start, size) do { } while (0) | |
b351d164 | 388 | # define lockdep_sys_exit() do { } while (0) |
d5abe669 | 389 | |
108c1485 BVA |
390 | static inline void lockdep_register_key(struct lock_class_key *key) |
391 | { | |
392 | } | |
393 | ||
394 | static inline void lockdep_unregister_key(struct lock_class_key *key) | |
395 | { | |
396 | } | |
397 | ||
d5abe669 PZ |
398 | #define lockdep_depth(tsk) (0) |
399 | ||
cd539cff JK |
400 | /* |
401 | * Dummy forward declarations, allow users to write less ifdef-y code | |
402 | * and depend on dead code elimination. | |
403 | */ | |
404 | extern int lock_is_held(const void *); | |
405 | extern int lockdep_is_held(const void *); | |
f8319483 PZ |
406 | #define lockdep_is_held_type(l, r) (1) |
407 | ||
d19c8137 PZ |
408 | #define lockdep_assert(c) do { } while (0) |
409 | #define lockdep_assert_once(c) do { } while (0) | |
410 | ||
5cd3f5af | 411 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
3e31f947 SK |
412 | #define lockdep_assert_not_held(l) do { (void)(l); } while (0) |
413 | #define lockdep_assert_held_write(l) do { (void)(l); } while (0) | |
f8319483 | 414 | #define lockdep_assert_held_read(l) do { (void)(l); } while (0) |
9a37110d | 415 | #define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
7621350c | 416 | #define lockdep_assert_none_held_once() do { } while (0) |
f607c668 | 417 | |
94d24fc4 PZ |
418 | #define lockdep_recursing(tsk) (0) |
419 | ||
e7904a28 PZ |
420 | #define NIL_COOKIE (struct pin_cookie){ } |
421 | ||
3771b0fe | 422 | #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) |
e7904a28 PZ |
423 | #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) |
424 | #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) | |
a24fc60d | 425 | |
fbb9ce95 IM |
426 | #endif /* !LOCKDEP */ |
427 | ||
b09be676 BP |
428 | enum xhlock_context_t { |
429 | XHLOCK_HARD, | |
430 | XHLOCK_SOFT, | |
b09be676 BP |
431 | XHLOCK_CTX_NR, |
432 | }; | |
433 | ||
52fa5bc5 | 434 | #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) |
b09be676 BP |
435 | /* |
436 | * To initialize a lockdep_map statically use this macro. | |
437 | * Note that _name must not be NULL. | |
438 | */ | |
439 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ | |
440 | { .name = (_name), .key = (void *)(_key), } | |
441 | ||
f52be570 | 442 | static inline void lockdep_invariant_state(bool force) {} |
b09be676 | 443 | static inline void lockdep_free_task(struct task_struct *task) {} |
b09be676 | 444 | |
f20786ff PZ |
445 | #ifdef CONFIG_LOCK_STAT |
446 | ||
447 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); | |
c7e78cff | 448 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
f20786ff PZ |
449 | |
450 | #define LOCK_CONTENDED(_lock, try, lock) \ | |
451 | do { \ | |
452 | if (!try(_lock)) { \ | |
453 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ | |
454 | lock(_lock); \ | |
f20786ff | 455 | } \ |
c7e78cff | 456 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
f20786ff PZ |
457 | } while (0) |
458 | ||
916633a4 MH |
459 | #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ |
460 | ({ \ | |
461 | int ____err = 0; \ | |
462 | if (!try(_lock)) { \ | |
463 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ | |
464 | ____err = lock(_lock); \ | |
465 | } \ | |
466 | if (!____err) \ | |
467 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ | |
468 | ____err; \ | |
469 | }) | |
470 | ||
f20786ff PZ |
471 | #else /* CONFIG_LOCK_STAT */ |
472 | ||
473 | #define lock_contended(lockdep_map, ip) do {} while (0) | |
c7e78cff | 474 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
f20786ff PZ |
475 | |
476 | #define LOCK_CONTENDED(_lock, try, lock) \ | |
477 | lock(_lock) | |
478 | ||
916633a4 MH |
479 | #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ |
480 | lock(_lock) | |
481 | ||
f20786ff PZ |
482 | #endif /* CONFIG_LOCK_STAT */ |
483 | ||
e8c158bb RH |
484 | #ifdef CONFIG_LOCKDEP |
485 | ||
486 | /* | |
487 | * On lockdep we dont want the hand-coded irq-enable of | |
488 | * _raw_*_lock_flags() code, because lockdep assumes | |
489 | * that interrupts are not re-enabled during lock-acquire: | |
490 | */ | |
491 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | |
492 | LOCK_CONTENDED((_lock), (try), (lock)) | |
493 | ||
494 | #else /* CONFIG_LOCKDEP */ | |
495 | ||
496 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | |
497 | lockfl((_lock), (flags)) | |
498 | ||
499 | #endif /* CONFIG_LOCKDEP */ | |
500 | ||
c3bc8fd6 | 501 | #ifdef CONFIG_PROVE_LOCKING |
3117df04 | 502 | extern void print_irqtrace_events(struct task_struct *curr); |
fbb9ce95 | 503 | #else |
3117df04 IM |
504 | static inline void print_irqtrace_events(struct task_struct *curr) |
505 | { | |
506 | } | |
fbb9ce95 IM |
507 | #endif |
508 | ||
e9181886 BF |
509 | /* Variable used to make lockdep treat read_lock() as recursive in selftests */ |
510 | #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS | |
511 | extern unsigned int force_read_lock_recursive; | |
512 | #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ | |
513 | #define force_read_lock_recursive 0 | |
514 | #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ | |
515 | ||
516 | #ifdef CONFIG_LOCKDEP | |
517 | extern bool read_lock_is_recursive(void); | |
518 | #else /* CONFIG_LOCKDEP */ | |
519 | /* If !LOCKDEP, the value is meaningless */ | |
520 | #define read_lock_is_recursive() 0 | |
521 | #endif | |
522 | ||
fbb9ce95 IM |
523 | /* |
524 | * For trivial one-depth nesting of a lock-class, the following | |
525 | * global define can be used. (Subsystems with multiple levels | |
526 | * of nesting should define their own lock-nesting subclasses.) | |
527 | */ | |
528 | #define SINGLE_DEPTH_NESTING 1 | |
529 | ||
530 | /* | |
531 | * Map the dependency ops to NOP or to real lockdep ops, depending | |
532 | * on the per lock-class debug mode: | |
533 | */ | |
534 | ||
fb9edbe9 ON |
535 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
536 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) | |
537 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) | |
fbb9ce95 | 538 | |
a51805ef ML |
539 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
540 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) | |
5facae4f | 541 | #define spin_release(l, i) lock_release(l, i) |
fbb9ce95 | 542 | |
a51805ef | 543 | #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
e9181886 BF |
544 | #define rwlock_acquire_read(l, s, t, i) \ |
545 | do { \ | |
546 | if (read_lock_is_recursive()) \ | |
547 | lock_acquire_shared_recursive(l, s, t, NULL, i); \ | |
548 | else \ | |
549 | lock_acquire_shared(l, s, t, NULL, i); \ | |
550 | } while (0) | |
551 | ||
5facae4f | 552 | #define rwlock_release(l, i) lock_release(l, i) |
fbb9ce95 | 553 | |
1ca7d67c JS |
554 | #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
555 | #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) | |
5facae4f | 556 | #define seqcount_release(l, i) lock_release(l, i) |
1ca7d67c | 557 | |
a51805ef ML |
558 | #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
559 | #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) | |
5facae4f | 560 | #define mutex_release(l, i) lock_release(l, i) |
a51805ef ML |
561 | |
562 | #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) | |
563 | #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) | |
564 | #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) | |
5facae4f | 565 | #define rwsem_release(l, i) lock_release(l, i) |
fbb9ce95 | 566 | |
a51805ef ML |
567 | #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
568 | #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) | |
dd56af42 | 569 | #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
5facae4f | 570 | #define lock_map_release(l) lock_release(l, _THIS_IP_) |
4f3e7524 | 571 | |
76b189e9 | 572 | #ifdef CONFIG_PROVE_LOCKING |
baffd723 | 573 | # define might_lock(lock) \ |
76b189e9 PZ |
574 | do { \ |
575 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | |
fb9edbe9 | 576 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
5facae4f | 577 | lock_release(&(lock)->dep_map, _THIS_IP_); \ |
76b189e9 | 578 | } while (0) |
baffd723 | 579 | # define might_lock_read(lock) \ |
76b189e9 PZ |
580 | do { \ |
581 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | |
fb9edbe9 | 582 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
5facae4f | 583 | lock_release(&(lock)->dep_map, _THIS_IP_); \ |
76b189e9 | 584 | } while (0) |
baffd723 | 585 | # define might_lock_nested(lock, subclass) \ |
e692b402 DV |
586 | do { \ |
587 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | |
588 | lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ | |
589 | _THIS_IP_); \ | |
023265ed | 590 | lock_release(&(lock)->dep_map, _THIS_IP_); \ |
e692b402 | 591 | } while (0) |
f54bb2ec | 592 | |
a21ee605 PZ |
593 | DECLARE_PER_CPU(int, hardirqs_enabled); |
594 | DECLARE_PER_CPU(int, hardirq_context); | |
4d004099 | 595 | DECLARE_PER_CPU(unsigned int, lockdep_recursion); |
f54bb2ec | 596 | |
baffd723 | 597 | #define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion)) |
fddf9055 | 598 | |
a21ee605 PZ |
599 | #define lockdep_assert_irqs_enabled() \ |
600 | do { \ | |
baffd723 | 601 | WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ |
a21ee605 | 602 | } while (0) |
f54bb2ec | 603 | |
a21ee605 PZ |
604 | #define lockdep_assert_irqs_disabled() \ |
605 | do { \ | |
baffd723 | 606 | WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ |
a21ee605 PZ |
607 | } while (0) |
608 | ||
609 | #define lockdep_assert_in_irq() \ | |
610 | do { \ | |
baffd723 | 611 | WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ |
a21ee605 | 612 | } while (0) |
71d8d153 | 613 | |
8fd8ad5c AD |
614 | #define lockdep_assert_preemption_enabled() \ |
615 | do { \ | |
616 | WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ | |
4d004099 | 617 | __lockdep_enabled && \ |
8fd8ad5c | 618 | (preempt_count() != 0 || \ |
baffd723 | 619 | !this_cpu_read(hardirqs_enabled))); \ |
8fd8ad5c AD |
620 | } while (0) |
621 | ||
622 | #define lockdep_assert_preemption_disabled() \ | |
623 | do { \ | |
624 | WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ | |
4d004099 | 625 | __lockdep_enabled && \ |
8fd8ad5c | 626 | (preempt_count() == 0 && \ |
baffd723 | 627 | this_cpu_read(hardirqs_enabled))); \ |
8fd8ad5c AD |
628 | } while (0) |
629 | ||
8b5536ad YL |
630 | /* |
631 | * Acceptable for protecting per-CPU resources accessed from BH. | |
632 | * Much like in_softirq() - semantics are ambiguous, use carefully. | |
633 | */ | |
634 | #define lockdep_assert_in_softirq() \ | |
635 | do { \ | |
636 | WARN_ON_ONCE(__lockdep_enabled && \ | |
637 | (!in_softirq() || in_irq() || in_nmi())); \ | |
638 | } while (0) | |
639 | ||
76b189e9 PZ |
640 | #else |
641 | # define might_lock(lock) do { } while (0) | |
642 | # define might_lock_read(lock) do { } while (0) | |
e692b402 | 643 | # define might_lock_nested(lock, subclass) do { } while (0) |
a21ee605 | 644 | |
f54bb2ec FW |
645 | # define lockdep_assert_irqs_enabled() do { } while (0) |
646 | # define lockdep_assert_irqs_disabled() do { } while (0) | |
71d8d153 | 647 | # define lockdep_assert_in_irq() do { } while (0) |
8fd8ad5c AD |
648 | |
649 | # define lockdep_assert_preemption_enabled() do { } while (0) | |
650 | # define lockdep_assert_preemption_disabled() do { } while (0) | |
8b5536ad | 651 | # define lockdep_assert_in_softirq() do { } while (0) |
76b189e9 PZ |
652 | #endif |
653 | ||
8bf6c677 SS |
654 | #ifdef CONFIG_PROVE_RAW_LOCK_NESTING |
655 | ||
656 | # define lockdep_assert_RT_in_threaded_ctx() do { \ | |
657 | WARN_ONCE(debug_locks && !current->lockdep_recursion && \ | |
f9ad4a5f | 658 | lockdep_hardirq_context() && \ |
8bf6c677 SS |
659 | !(current->hardirq_threaded || current->irq_config), \ |
660 | "Not in threaded context on PREEMPT_RT as expected\n"); \ | |
661 | } while (0) | |
662 | ||
663 | #else | |
664 | ||
665 | # define lockdep_assert_RT_in_threaded_ctx() do { } while (0) | |
666 | ||
667 | #endif | |
668 | ||
d24209bb | 669 | #ifdef CONFIG_LOCKDEP |
b3fbab05 | 670 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); |
d24209bb PM |
671 | #else |
672 | static inline void | |
673 | lockdep_rcu_suspicious(const char *file, const int line, const char *s) | |
674 | { | |
675 | } | |
0632eb3d PM |
676 | #endif |
677 | ||
fbb9ce95 | 678 | #endif /* __LINUX_LOCKDEP_H */ |