]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/lockdep.h
lockdep: fix combinatorial explosion in lock subgraph traversal
[mirror_ubuntu-bionic-kernel.git] / include / linux / lockdep.h
1 /*
2 * Runtime locking correctness validator
3 *
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 *
7 * see Documentation/lockdep-design.txt for more details.
8 */
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
11
12 struct task_struct;
13 struct lockdep_map;
14
15 #ifdef CONFIG_LOCKDEP
16
17 #include <linux/linkage.h>
18 #include <linux/list.h>
19 #include <linux/debug_locks.h>
20 #include <linux/stacktrace.h>
21
22 /*
23 * Lock-class usage-state bits:
24 */
25 enum lock_usage_bit
26 {
27 LOCK_USED = 0,
28 LOCK_USED_IN_HARDIRQ,
29 LOCK_USED_IN_SOFTIRQ,
30 LOCK_ENABLED_SOFTIRQS,
31 LOCK_ENABLED_HARDIRQS,
32 LOCK_USED_IN_HARDIRQ_READ,
33 LOCK_USED_IN_SOFTIRQ_READ,
34 LOCK_ENABLED_SOFTIRQS_READ,
35 LOCK_ENABLED_HARDIRQS_READ,
36 LOCK_USAGE_STATES
37 };
38
39 /*
40 * Usage-state bitmasks:
41 */
42 #define LOCKF_USED (1 << LOCK_USED)
43 #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
44 #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
45 #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
46 #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
47
48 #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
49 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
50
51 #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
52 #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
53 #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
54 #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
55
56 #define LOCKF_ENABLED_IRQS_READ \
57 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
58 #define LOCKF_USED_IN_IRQ_READ \
59 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
60
61 #define MAX_LOCKDEP_SUBCLASSES 8UL
62
63 /*
64 * Lock-classes are keyed via unique addresses, by embedding the
65 * lockclass-key into the kernel (or module) .data section. (For
66 * static locks we use the lock address itself as the key.)
67 */
68 struct lockdep_subclass_key {
69 char __one_byte;
70 } __attribute__ ((__packed__));
71
72 struct lock_class_key {
73 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
74 };
75
76 /*
77 * The lock-class itself:
78 */
79 struct lock_class {
80 /*
81 * class-hash:
82 */
83 struct list_head hash_entry;
84
85 /*
86 * global list of all lock-classes:
87 */
88 struct list_head lock_entry;
89
90 struct lockdep_subclass_key *key;
91 unsigned int subclass;
92 unsigned int dep_gen_id;
93
94 /*
95 * IRQ/softirq usage tracking bits:
96 */
97 unsigned long usage_mask;
98 struct stack_trace usage_traces[LOCK_USAGE_STATES];
99
100 /*
101 * These fields represent a directed graph of lock dependencies,
102 * to every node we attach a list of "forward" and a list of
103 * "backward" graph nodes.
104 */
105 struct list_head locks_after, locks_before;
106
107 /*
108 * Generation counter, when doing certain classes of graph walking,
109 * to ensure that we check one node only once:
110 */
111 unsigned int version;
112
113 /*
114 * Statistics counter:
115 */
116 unsigned long ops;
117
118 const char *name;
119 int name_version;
120
121 #ifdef CONFIG_LOCK_STAT
122 unsigned long contention_point[4];
123 #endif
124 };
125
126 #ifdef CONFIG_LOCK_STAT
127 struct lock_time {
128 s64 min;
129 s64 max;
130 s64 total;
131 unsigned long nr;
132 };
133
134 enum bounce_type {
135 bounce_acquired_write,
136 bounce_acquired_read,
137 bounce_contended_write,
138 bounce_contended_read,
139 nr_bounce_types,
140
141 bounce_acquired = bounce_acquired_write,
142 bounce_contended = bounce_contended_write,
143 };
144
145 struct lock_class_stats {
146 unsigned long contention_point[4];
147 struct lock_time read_waittime;
148 struct lock_time write_waittime;
149 struct lock_time read_holdtime;
150 struct lock_time write_holdtime;
151 unsigned long bounces[nr_bounce_types];
152 };
153
154 struct lock_class_stats lock_stats(struct lock_class *class);
155 void clear_lock_stats(struct lock_class *class);
156 #endif
157
158 /*
159 * Map the lock object (the lock instance) to the lock-class object.
160 * This is embedded into specific lock instances:
161 */
162 struct lockdep_map {
163 struct lock_class_key *key;
164 struct lock_class *class_cache;
165 const char *name;
166 #ifdef CONFIG_LOCK_STAT
167 int cpu;
168 #endif
169 };
170
171 /*
172 * Every lock has a list of other locks that were taken after it.
173 * We only grow the list, never remove from it:
174 */
175 struct lock_list {
176 struct list_head entry;
177 struct lock_class *class;
178 struct stack_trace trace;
179 int distance;
180 };
181
182 /*
183 * We record lock dependency chains, so that we can cache them:
184 */
185 struct lock_chain {
186 u8 irq_context;
187 u8 depth;
188 u16 base;
189 struct list_head entry;
190 u64 chain_key;
191 };
192
193 struct held_lock {
194 /*
195 * One-way hash of the dependency chain up to this point. We
196 * hash the hashes step by step as the dependency chain grows.
197 *
198 * We use it for dependency-caching and we skip detection
199 * passes and dependency-updates if there is a cache-hit, so
200 * it is absolutely critical for 100% coverage of the validator
201 * to have a unique key value for every unique dependency path
202 * that can occur in the system, to make a unique hash value
203 * as likely as possible - hence the 64-bit width.
204 *
205 * The task struct holds the current hash value (initialized
206 * with zero), here we store the previous hash value:
207 */
208 u64 prev_chain_key;
209 struct lock_class *class;
210 unsigned long acquire_ip;
211 struct lockdep_map *instance;
212
213 #ifdef CONFIG_LOCK_STAT
214 u64 waittime_stamp;
215 u64 holdtime_stamp;
216 #endif
217 /*
218 * The lock-stack is unified in that the lock chains of interrupt
219 * contexts nest ontop of process context chains, but we 'separate'
220 * the hashes by starting with 0 if we cross into an interrupt
221 * context, and we also keep do not add cross-context lock
222 * dependencies - the lock usage graph walking covers that area
223 * anyway, and we'd just unnecessarily increase the number of
224 * dependencies otherwise. [Note: hardirq and softirq contexts
225 * are separated from each other too.]
226 *
227 * The following field is used to detect when we cross into an
228 * interrupt context:
229 */
230 int irq_context;
231 int trylock;
232 int read;
233 int check;
234 int hardirqs_off;
235 };
236
237 /*
238 * Initialization, self-test and debugging-output methods:
239 */
240 extern void lockdep_init(void);
241 extern void lockdep_info(void);
242 extern void lockdep_reset(void);
243 extern void lockdep_reset_lock(struct lockdep_map *lock);
244 extern void lockdep_free_key_range(void *start, unsigned long size);
245 extern void lockdep_sys_exit(void);
246
247 extern void lockdep_off(void);
248 extern void lockdep_on(void);
249
250 /*
251 * These methods are used by specific locking variants (spinlocks,
252 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
253 * to lockdep:
254 */
255
256 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
257 struct lock_class_key *key, int subclass);
258
259 /*
260 * To initialize a lockdep_map statically use this macro.
261 * Note that _name must not be NULL.
262 */
263 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
264 { .name = (_name), .key = (void *)(_key), }
265
266 /*
267 * Reinitialize a lock key - for cases where there is special locking or
268 * special initialization of locks so that the validator gets the scope
269 * of dependencies wrong: they are either too broad (they need a class-split)
270 * or they are too narrow (they suffer from a false class-split):
271 */
272 #define lockdep_set_class(lock, key) \
273 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
274 #define lockdep_set_class_and_name(lock, key, name) \
275 lockdep_init_map(&(lock)->dep_map, name, key, 0)
276 #define lockdep_set_class_and_subclass(lock, key, sub) \
277 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
278 #define lockdep_set_subclass(lock, sub) \
279 lockdep_init_map(&(lock)->dep_map, #lock, \
280 (lock)->dep_map.key, sub)
281
282 /*
283 * Acquire a lock.
284 *
285 * Values for "read":
286 *
287 * 0: exclusive (write) acquire
288 * 1: read-acquire (no recursion allowed)
289 * 2: read-acquire with same-instance recursion allowed
290 *
291 * Values for check:
292 *
293 * 0: disabled
294 * 1: simple checks (freeing, held-at-exit-time, etc.)
295 * 2: full validation
296 */
297 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
298 int trylock, int read, int check, unsigned long ip);
299
300 extern void lock_release(struct lockdep_map *lock, int nested,
301 unsigned long ip);
302
303 # define INIT_LOCKDEP .lockdep_recursion = 0,
304
305 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
306
307 #else /* !LOCKDEP */
308
309 static inline void lockdep_off(void)
310 {
311 }
312
313 static inline void lockdep_on(void)
314 {
315 }
316
317 # define lock_acquire(l, s, t, r, c, i) do { } while (0)
318 # define lock_release(l, n, i) do { } while (0)
319 # define lockdep_init() do { } while (0)
320 # define lockdep_info() do { } while (0)
321 # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
322 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
323 # define lockdep_set_class_and_name(lock, key, name) \
324 do { (void)(key); } while (0)
325 #define lockdep_set_class_and_subclass(lock, key, sub) \
326 do { (void)(key); } while (0)
327 #define lockdep_set_subclass(lock, sub) do { } while (0)
328
329 # define INIT_LOCKDEP
330 # define lockdep_reset() do { debug_locks = 1; } while (0)
331 # define lockdep_free_key_range(start, size) do { } while (0)
332 # define lockdep_sys_exit() do { } while (0)
333 /*
334 * The class key takes no space if lockdep is disabled:
335 */
336 struct lock_class_key { };
337
338 #define lockdep_depth(tsk) (0)
339
340 #endif /* !LOCKDEP */
341
342 #ifdef CONFIG_LOCK_STAT
343
344 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
345 extern void lock_acquired(struct lockdep_map *lock);
346
347 #define LOCK_CONTENDED(_lock, try, lock) \
348 do { \
349 if (!try(_lock)) { \
350 lock_contended(&(_lock)->dep_map, _RET_IP_); \
351 lock(_lock); \
352 } \
353 lock_acquired(&(_lock)->dep_map); \
354 } while (0)
355
356 #else /* CONFIG_LOCK_STAT */
357
358 #define lock_contended(lockdep_map, ip) do {} while (0)
359 #define lock_acquired(lockdep_map) do {} while (0)
360
361 #define LOCK_CONTENDED(_lock, try, lock) \
362 lock(_lock)
363
364 #endif /* CONFIG_LOCK_STAT */
365
366 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
367 extern void early_init_irq_lock_class(void);
368 #else
369 static inline void early_init_irq_lock_class(void)
370 {
371 }
372 #endif
373
374 #ifdef CONFIG_TRACE_IRQFLAGS
375 extern void early_boot_irqs_off(void);
376 extern void early_boot_irqs_on(void);
377 extern void print_irqtrace_events(struct task_struct *curr);
378 #else
379 static inline void early_boot_irqs_off(void)
380 {
381 }
382 static inline void early_boot_irqs_on(void)
383 {
384 }
385 static inline void print_irqtrace_events(struct task_struct *curr)
386 {
387 }
388 #endif
389
390 /*
391 * For trivial one-depth nesting of a lock-class, the following
392 * global define can be used. (Subsystems with multiple levels
393 * of nesting should define their own lock-nesting subclasses.)
394 */
395 #define SINGLE_DEPTH_NESTING 1
396
397 /*
398 * Map the dependency ops to NOP or to real lockdep ops, depending
399 * on the per lock-class debug mode:
400 */
401
402 #ifdef CONFIG_DEBUG_LOCK_ALLOC
403 # ifdef CONFIG_PROVE_LOCKING
404 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
405 # else
406 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
407 # endif
408 # define spin_release(l, n, i) lock_release(l, n, i)
409 #else
410 # define spin_acquire(l, s, t, i) do { } while (0)
411 # define spin_release(l, n, i) do { } while (0)
412 #endif
413
414 #ifdef CONFIG_DEBUG_LOCK_ALLOC
415 # ifdef CONFIG_PROVE_LOCKING
416 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
417 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
418 # else
419 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
420 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
421 # endif
422 # define rwlock_release(l, n, i) lock_release(l, n, i)
423 #else
424 # define rwlock_acquire(l, s, t, i) do { } while (0)
425 # define rwlock_acquire_read(l, s, t, i) do { } while (0)
426 # define rwlock_release(l, n, i) do { } while (0)
427 #endif
428
429 #ifdef CONFIG_DEBUG_LOCK_ALLOC
430 # ifdef CONFIG_PROVE_LOCKING
431 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
432 # else
433 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
434 # endif
435 # define mutex_release(l, n, i) lock_release(l, n, i)
436 #else
437 # define mutex_acquire(l, s, t, i) do { } while (0)
438 # define mutex_release(l, n, i) do { } while (0)
439 #endif
440
441 #ifdef CONFIG_DEBUG_LOCK_ALLOC
442 # ifdef CONFIG_PROVE_LOCKING
443 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
444 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
445 # else
446 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
447 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
448 # endif
449 # define rwsem_release(l, n, i) lock_release(l, n, i)
450 #else
451 # define rwsem_acquire(l, s, t, i) do { } while (0)
452 # define rwsem_acquire_read(l, s, t, i) do { } while (0)
453 # define rwsem_release(l, n, i) do { } while (0)
454 #endif
455
456 #endif /* __LINUX_LOCKDEP_H */