]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - include/linux/lockdep.h
rcu: Add lockdep-enabled variants of rcu_dereference()
[mirror_ubuntu-zesty-kernel.git] / include / linux / lockdep.h
CommitLineData
fbb9ce95
IM
1/*
2 * Runtime locking correctness validator
3 *
4b32d0a4
PZ
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
fbb9ce95
IM
6 *
7 * see Documentation/lockdep-design.txt for more details.
8 */
9#ifndef __LINUX_LOCKDEP_H
10#define __LINUX_LOCKDEP_H
11
a1e96b03 12struct task_struct;
f20786ff 13struct lockdep_map;
a1e96b03 14
db0b0ead
MT
15#ifdef CONFIG_LOCKDEP
16
fbb9ce95
IM
17#include <linux/linkage.h>
18#include <linux/list.h>
19#include <linux/debug_locks.h>
20#include <linux/stacktrace.h>
21
fbb9ce95 22/*
9851673b
PZ
23 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
24 * the total number of states... :-(
fbb9ce95 25 */
9851673b 26#define XXX_LOCK_USAGE_STATES (1+3*4)
fbb9ce95
IM
27
28#define MAX_LOCKDEP_SUBCLASSES 8UL
29
30/*
31 * Lock-classes are keyed via unique addresses, by embedding the
32 * lockclass-key into the kernel (or module) .data section. (For
33 * static locks we use the lock address itself as the key.)
34 */
35struct lockdep_subclass_key {
36 char __one_byte;
37} __attribute__ ((__packed__));
38
39struct lock_class_key {
40 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
41};
42
c7e78cff
PZ
43#define LOCKSTAT_POINTS 4
44
fbb9ce95
IM
45/*
46 * The lock-class itself:
47 */
48struct lock_class {
49 /*
50 * class-hash:
51 */
52 struct list_head hash_entry;
53
54 /*
55 * global list of all lock-classes:
56 */
57 struct list_head lock_entry;
58
59 struct lockdep_subclass_key *key;
60 unsigned int subclass;
e351b660 61 unsigned int dep_gen_id;
fbb9ce95
IM
62
63 /*
64 * IRQ/softirq usage tracking bits:
65 */
66 unsigned long usage_mask;
9851673b 67 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
fbb9ce95
IM
68
69 /*
70 * These fields represent a directed graph of lock dependencies,
71 * to every node we attach a list of "forward" and a list of
72 * "backward" graph nodes.
73 */
74 struct list_head locks_after, locks_before;
75
76 /*
77 * Generation counter, when doing certain classes of graph walking,
78 * to ensure that we check one node only once:
79 */
80 unsigned int version;
81
82 /*
83 * Statistics counter:
84 */
85 unsigned long ops;
86
87 const char *name;
88 int name_version;
f20786ff
PZ
89
90#ifdef CONFIG_LOCK_STAT
c7e78cff
PZ
91 unsigned long contention_point[LOCKSTAT_POINTS];
92 unsigned long contending_point[LOCKSTAT_POINTS];
f20786ff
PZ
93#endif
94};
95
96#ifdef CONFIG_LOCK_STAT
97struct lock_time {
98 s64 min;
99 s64 max;
100 s64 total;
101 unsigned long nr;
fbb9ce95
IM
102};
103
96645678
PZ
104enum bounce_type {
105 bounce_acquired_write,
106 bounce_acquired_read,
107 bounce_contended_write,
108 bounce_contended_read,
109 nr_bounce_types,
110
111 bounce_acquired = bounce_acquired_write,
112 bounce_contended = bounce_contended_write,
113};
114
f20786ff
PZ
115struct lock_class_stats {
116 unsigned long contention_point[4];
c7e78cff 117 unsigned long contending_point[4];
f20786ff
PZ
118 struct lock_time read_waittime;
119 struct lock_time write_waittime;
120 struct lock_time read_holdtime;
121 struct lock_time write_holdtime;
96645678 122 unsigned long bounces[nr_bounce_types];
f20786ff
PZ
123};
124
125struct lock_class_stats lock_stats(struct lock_class *class);
126void clear_lock_stats(struct lock_class *class);
127#endif
128
fbb9ce95
IM
129/*
130 * Map the lock object (the lock instance) to the lock-class object.
131 * This is embedded into specific lock instances:
132 */
133struct lockdep_map {
134 struct lock_class_key *key;
d6d897ce 135 struct lock_class *class_cache;
fbb9ce95 136 const char *name;
96645678
PZ
137#ifdef CONFIG_LOCK_STAT
138 int cpu;
c7e78cff 139 unsigned long ip;
96645678 140#endif
fbb9ce95
IM
141};
142
143/*
144 * Every lock has a list of other locks that were taken after it.
145 * We only grow the list, never remove from it:
146 */
147struct lock_list {
148 struct list_head entry;
149 struct lock_class *class;
150 struct stack_trace trace;
068135e6 151 int distance;
c94aa5ca 152
af012961
PZ
153 /*
154 * The parent field is used to implement breadth-first search, and the
155 * bit 0 is reused to indicate if the lock has been accessed in BFS.
c94aa5ca
ML
156 */
157 struct lock_list *parent;
fbb9ce95
IM
158};
159
160/*
161 * We record lock dependency chains, so that we can cache them:
162 */
163struct lock_chain {
443cd507
HY
164 u8 irq_context;
165 u8 depth;
166 u16 base;
fbb9ce95
IM
167 struct list_head entry;
168 u64 chain_key;
169};
170
e5f363e3 171#define MAX_LOCKDEP_KEYS_BITS 13
b42e737e
PZ
172/*
173 * Subtract one because we offset hlock->class_idx by 1 in order
174 * to make 0 mean no class. This avoids overflowing the class_idx
175 * bitfield and hitting the BUG in hlock_class().
176 */
177#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
f82b217e 178
fbb9ce95
IM
179struct held_lock {
180 /*
181 * One-way hash of the dependency chain up to this point. We
182 * hash the hashes step by step as the dependency chain grows.
183 *
184 * We use it for dependency-caching and we skip detection
185 * passes and dependency-updates if there is a cache-hit, so
186 * it is absolutely critical for 100% coverage of the validator
187 * to have a unique key value for every unique dependency path
188 * that can occur in the system, to make a unique hash value
189 * as likely as possible - hence the 64-bit width.
190 *
191 * The task struct holds the current hash value (initialized
192 * with zero), here we store the previous hash value:
193 */
194 u64 prev_chain_key;
fbb9ce95
IM
195 unsigned long acquire_ip;
196 struct lockdep_map *instance;
7531e2f3 197 struct lockdep_map *nest_lock;
f20786ff
PZ
198#ifdef CONFIG_LOCK_STAT
199 u64 waittime_stamp;
200 u64 holdtime_stamp;
201#endif
f82b217e 202 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
fbb9ce95
IM
203 /*
204 * The lock-stack is unified in that the lock chains of interrupt
205 * contexts nest ontop of process context chains, but we 'separate'
206 * the hashes by starting with 0 if we cross into an interrupt
207 * context, and we also keep do not add cross-context lock
208 * dependencies - the lock usage graph walking covers that area
209 * anyway, and we'd just unnecessarily increase the number of
210 * dependencies otherwise. [Note: hardirq and softirq contexts
211 * are separated from each other too.]
212 *
213 * The following field is used to detect when we cross into an
214 * interrupt context:
215 */
f82b217e 216 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
bb97a91e
PZ
217 unsigned int trylock:1; /* 16 bits */
218
f82b217e
DJ
219 unsigned int read:2; /* see lock_acquire() comment */
220 unsigned int check:2; /* see lock_acquire() comment */
221 unsigned int hardirqs_off:1;
bb97a91e 222 unsigned int references:11; /* 32 bits */
fbb9ce95
IM
223};
224
225/*
226 * Initialization, self-test and debugging-output methods:
227 */
228extern void lockdep_init(void);
229extern void lockdep_info(void);
230extern void lockdep_reset(void);
231extern void lockdep_reset_lock(struct lockdep_map *lock);
232extern void lockdep_free_key_range(void *start, unsigned long size);
b351d164 233extern void lockdep_sys_exit(void);
fbb9ce95
IM
234
235extern void lockdep_off(void);
236extern void lockdep_on(void);
fbb9ce95
IM
237
238/*
239 * These methods are used by specific locking variants (spinlocks,
240 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
241 * to lockdep:
242 */
243
244extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
4dfbb9d8 245 struct lock_class_key *key, int subclass);
fbb9ce95 246
851a67b8
PZ
247/*
248 * To initialize a lockdep_map statically use this macro.
249 * Note that _name must not be NULL.
250 */
251#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
252 { .name = (_name), .key = (void *)(_key), }
253
fbb9ce95
IM
254/*
255 * Reinitialize a lock key - for cases where there is special locking or
256 * special initialization of locks so that the validator gets the scope
257 * of dependencies wrong: they are either too broad (they need a class-split)
258 * or they are too narrow (they suffer from a false class-split):
259 */
260#define lockdep_set_class(lock, key) \
4dfbb9d8 261 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
fbb9ce95 262#define lockdep_set_class_and_name(lock, key, name) \
4dfbb9d8
PZ
263 lockdep_init_map(&(lock)->dep_map, name, key, 0)
264#define lockdep_set_class_and_subclass(lock, key, sub) \
265 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
266#define lockdep_set_subclass(lock, sub) \
267 lockdep_init_map(&(lock)->dep_map, #lock, \
268 (lock)->dep_map.key, sub)
9a7aa12f
JK
269/*
270 * Compare locking classes
271 */
272#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
273
274static inline int lockdep_match_key(struct lockdep_map *lock,
275 struct lock_class_key *key)
276{
277 return lock->key == key;
278}
fbb9ce95
IM
279
280/*
281 * Acquire a lock.
282 *
283 * Values for "read":
284 *
285 * 0: exclusive (write) acquire
286 * 1: read-acquire (no recursion allowed)
287 * 2: read-acquire with same-instance recursion allowed
288 *
289 * Values for check:
290 *
291 * 0: disabled
292 * 1: simple checks (freeing, held-at-exit-time, etc.)
293 * 2: full validation
294 */
295extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
7531e2f3
PZ
296 int trylock, int read, int check,
297 struct lockdep_map *nest_lock, unsigned long ip);
fbb9ce95
IM
298
299extern void lock_release(struct lockdep_map *lock, int nested,
300 unsigned long ip);
301
f607c668
PZ
302#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
303
304extern int lock_is_held(struct lockdep_map *lock);
305
00ef9f73
PZ
306extern void lock_set_class(struct lockdep_map *lock, const char *name,
307 struct lock_class_key *key, unsigned int subclass,
308 unsigned long ip);
309
310static inline void lock_set_subclass(struct lockdep_map *lock,
311 unsigned int subclass, unsigned long ip)
312{
313 lock_set_class(lock, lock->name, lock->key, subclass, ip);
314}
64aa348e 315
cf40bd16
NP
316extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
317extern void lockdep_clear_current_reclaim_state(void);
318extern void lockdep_trace_alloc(gfp_t mask);
319
320# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
fbb9ce95 321
e3a55fd1 322#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
d5abe669 323
f607c668
PZ
324#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
325
fbb9ce95
IM
326#else /* !LOCKDEP */
327
328static inline void lockdep_off(void)
329{
330}
331
332static inline void lockdep_on(void)
333{
334}
335
7531e2f3 336# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
fbb9ce95 337# define lock_release(l, n, i) do { } while (0)
00ef9f73 338# define lock_set_class(l, n, k, s, i) do { } while (0)
64aa348e 339# define lock_set_subclass(l, s, i) do { } while (0)
cf40bd16
NP
340# define lockdep_set_current_reclaim_state(g) do { } while (0)
341# define lockdep_clear_current_reclaim_state() do { } while (0)
342# define lockdep_trace_alloc(g) do { } while (0)
fbb9ce95
IM
343# define lockdep_init() do { } while (0)
344# define lockdep_info() do { } while (0)
e25cf3db
IM
345# define lockdep_init_map(lock, name, key, sub) \
346 do { (void)(name); (void)(key); } while (0)
fbb9ce95
IM
347# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
348# define lockdep_set_class_and_name(lock, key, name) \
e25cf3db 349 do { (void)(key); (void)(name); } while (0)
4dfbb9d8
PZ
350#define lockdep_set_class_and_subclass(lock, key, sub) \
351 do { (void)(key); } while (0)
07646e21 352#define lockdep_set_subclass(lock, sub) do { } while (0)
9a7aa12f
JK
353/*
354 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
355 * case since the result is not well defined and the caller should rather
356 * #ifdef the call himself.
357 */
07646e21 358
fbb9ce95
IM
359# define INIT_LOCKDEP
360# define lockdep_reset() do { debug_locks = 1; } while (0)
361# define lockdep_free_key_range(start, size) do { } while (0)
b351d164 362# define lockdep_sys_exit() do { } while (0)
fbb9ce95
IM
363/*
364 * The class key takes no space if lockdep is disabled:
365 */
366struct lock_class_key { };
d5abe669
PZ
367
368#define lockdep_depth(tsk) (0)
369
f607c668
PZ
370#define lockdep_assert_held(l) do { } while (0)
371
fbb9ce95
IM
372#endif /* !LOCKDEP */
373
f20786ff
PZ
374#ifdef CONFIG_LOCK_STAT
375
376extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
c7e78cff 377extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
f20786ff
PZ
378
379#define LOCK_CONTENDED(_lock, try, lock) \
380do { \
381 if (!try(_lock)) { \
382 lock_contended(&(_lock)->dep_map, _RET_IP_); \
383 lock(_lock); \
f20786ff 384 } \
c7e78cff 385 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
f20786ff
PZ
386} while (0)
387
388#else /* CONFIG_LOCK_STAT */
389
390#define lock_contended(lockdep_map, ip) do {} while (0)
c7e78cff 391#define lock_acquired(lockdep_map, ip) do {} while (0)
f20786ff
PZ
392
393#define LOCK_CONTENDED(_lock, try, lock) \
394 lock(_lock)
395
396#endif /* CONFIG_LOCK_STAT */
397
e8c158bb
RH
398#ifdef CONFIG_LOCKDEP
399
400/*
401 * On lockdep we dont want the hand-coded irq-enable of
402 * _raw_*_lock_flags() code, because lockdep assumes
403 * that interrupts are not re-enabled during lock-acquire:
404 */
405#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
406 LOCK_CONTENDED((_lock), (try), (lock))
407
408#else /* CONFIG_LOCKDEP */
409
410#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
411 lockfl((_lock), (flags))
412
413#endif /* CONFIG_LOCKDEP */
414
74c8a613 415#ifdef CONFIG_GENERIC_HARDIRQS
243c7621
IM
416extern void early_init_irq_lock_class(void);
417#else
3117df04
IM
418static inline void early_init_irq_lock_class(void)
419{
420}
243c7621
IM
421#endif
422
fbb9ce95
IM
423#ifdef CONFIG_TRACE_IRQFLAGS
424extern void early_boot_irqs_off(void);
425extern void early_boot_irqs_on(void);
3117df04 426extern void print_irqtrace_events(struct task_struct *curr);
fbb9ce95 427#else
3117df04
IM
428static inline void early_boot_irqs_off(void)
429{
430}
431static inline void early_boot_irqs_on(void)
432{
433}
434static inline void print_irqtrace_events(struct task_struct *curr)
435{
436}
fbb9ce95
IM
437#endif
438
439/*
440 * For trivial one-depth nesting of a lock-class, the following
441 * global define can be used. (Subsystems with multiple levels
442 * of nesting should define their own lock-nesting subclasses.)
443 */
444#define SINGLE_DEPTH_NESTING 1
445
446/*
447 * Map the dependency ops to NOP or to real lockdep ops, depending
448 * on the per lock-class debug mode:
449 */
450
451#ifdef CONFIG_DEBUG_LOCK_ALLOC
452# ifdef CONFIG_PROVE_LOCKING
7531e2f3 453# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
b7d39aff 454# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
fbb9ce95 455# else
7531e2f3 456# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
b7d39aff 457# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
fbb9ce95
IM
458# endif
459# define spin_release(l, n, i) lock_release(l, n, i)
460#else
461# define spin_acquire(l, s, t, i) do { } while (0)
462# define spin_release(l, n, i) do { } while (0)
463#endif
464
465#ifdef CONFIG_DEBUG_LOCK_ALLOC
466# ifdef CONFIG_PROVE_LOCKING
7531e2f3
PZ
467# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
468# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
fbb9ce95 469# else
7531e2f3
PZ
470# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
471# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
fbb9ce95
IM
472# endif
473# define rwlock_release(l, n, i) lock_release(l, n, i)
474#else
475# define rwlock_acquire(l, s, t, i) do { } while (0)
476# define rwlock_acquire_read(l, s, t, i) do { } while (0)
477# define rwlock_release(l, n, i) do { } while (0)
478#endif
479
480#ifdef CONFIG_DEBUG_LOCK_ALLOC
481# ifdef CONFIG_PROVE_LOCKING
7531e2f3 482# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
fbb9ce95 483# else
7531e2f3 484# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
fbb9ce95
IM
485# endif
486# define mutex_release(l, n, i) lock_release(l, n, i)
487#else
488# define mutex_acquire(l, s, t, i) do { } while (0)
489# define mutex_release(l, n, i) do { } while (0)
490#endif
491
492#ifdef CONFIG_DEBUG_LOCK_ALLOC
493# ifdef CONFIG_PROVE_LOCKING
7531e2f3
PZ
494# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
495# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
fbb9ce95 496# else
7531e2f3
PZ
497# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
498# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
fbb9ce95
IM
499# endif
500# define rwsem_release(l, n, i) lock_release(l, n, i)
501#else
502# define rwsem_acquire(l, s, t, i) do { } while (0)
503# define rwsem_acquire_read(l, s, t, i) do { } while (0)
504# define rwsem_release(l, n, i) do { } while (0)
505#endif
506
4f3e7524
PZ
507#ifdef CONFIG_DEBUG_LOCK_ALLOC
508# ifdef CONFIG_PROVE_LOCKING
3295f0ef 509# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
4f3e7524 510# else
3295f0ef 511# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
4f3e7524 512# endif
3295f0ef 513# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
4f3e7524 514#else
3295f0ef
IM
515# define lock_map_acquire(l) do { } while (0)
516# define lock_map_release(l) do { } while (0)
4f3e7524
PZ
517#endif
518
76b189e9
PZ
519#ifdef CONFIG_PROVE_LOCKING
520# define might_lock(lock) \
521do { \
522 typecheck(struct lockdep_map *, &(lock)->dep_map); \
523 lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
524 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
525} while (0)
526# define might_lock_read(lock) \
527do { \
528 typecheck(struct lockdep_map *, &(lock)->dep_map); \
529 lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
530 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
531} while (0)
532#else
533# define might_lock(lock) do { } while (0)
534# define might_lock_read(lock) do { } while (0)
535#endif
536
fbb9ce95 537#endif /* __LINUX_LOCKDEP_H */