]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - include/linux/lockdep.h
lockdep: annotate reclaim context (__GFP_NOFS)
[mirror_ubuntu-zesty-kernel.git] / include / linux / lockdep.h
CommitLineData
fbb9ce95
IM
1/*
2 * Runtime locking correctness validator
3 *
4b32d0a4
PZ
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
fbb9ce95
IM
6 *
7 * see Documentation/lockdep-design.txt for more details.
8 */
9#ifndef __LINUX_LOCKDEP_H
10#define __LINUX_LOCKDEP_H
11
a1e96b03 12struct task_struct;
f20786ff 13struct lockdep_map;
a1e96b03 14
db0b0ead
MT
15#ifdef CONFIG_LOCKDEP
16
fbb9ce95
IM
17#include <linux/linkage.h>
18#include <linux/list.h>
19#include <linux/debug_locks.h>
20#include <linux/stacktrace.h>
21
fbb9ce95
IM
22/*
23 * Lock-class usage-state bits:
24 */
25enum lock_usage_bit
26{
27 LOCK_USED = 0,
28 LOCK_USED_IN_HARDIRQ,
29 LOCK_USED_IN_SOFTIRQ,
cf40bd16 30 LOCK_USED_IN_RECLAIM_FS,
fbb9ce95
IM
31 LOCK_ENABLED_SOFTIRQS,
32 LOCK_ENABLED_HARDIRQS,
cf40bd16 33 LOCK_HELD_OVER_RECLAIM_FS,
fbb9ce95
IM
34 LOCK_USED_IN_HARDIRQ_READ,
35 LOCK_USED_IN_SOFTIRQ_READ,
cf40bd16 36 LOCK_USED_IN_RECLAIM_FS_READ,
fbb9ce95
IM
37 LOCK_ENABLED_SOFTIRQS_READ,
38 LOCK_ENABLED_HARDIRQS_READ,
cf40bd16 39 LOCK_HELD_OVER_RECLAIM_FS_READ,
fbb9ce95
IM
40 LOCK_USAGE_STATES
41};
42
43/*
44 * Usage-state bitmasks:
45 */
46#define LOCKF_USED (1 << LOCK_USED)
47#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
48#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
cf40bd16 49#define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS)
fbb9ce95
IM
50#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
51#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
cf40bd16 52#define LOCKF_HELD_OVER_RECLAIM_FS (1 << LOCK_HELD_OVER_RECLAIM_FS)
fbb9ce95
IM
53
54#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
55#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
56
57#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
58#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
cf40bd16 59#define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ)
fbb9ce95
IM
60#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
61#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
cf40bd16 62#define LOCKF_HELD_OVER_RECLAIM_FS_READ (1 << LOCK_HELD_OVER_RECLAIM_FS_READ)
fbb9ce95
IM
63
64#define LOCKF_ENABLED_IRQS_READ \
65 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
66#define LOCKF_USED_IN_IRQ_READ \
67 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
68
69#define MAX_LOCKDEP_SUBCLASSES 8UL
70
71/*
72 * Lock-classes are keyed via unique addresses, by embedding the
73 * lockclass-key into the kernel (or module) .data section. (For
74 * static locks we use the lock address itself as the key.)
75 */
76struct lockdep_subclass_key {
77 char __one_byte;
78} __attribute__ ((__packed__));
79
80struct lock_class_key {
81 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
82};
83
c7e78cff
PZ
84#define LOCKSTAT_POINTS 4
85
fbb9ce95
IM
86/*
87 * The lock-class itself:
88 */
89struct lock_class {
90 /*
91 * class-hash:
92 */
93 struct list_head hash_entry;
94
95 /*
96 * global list of all lock-classes:
97 */
98 struct list_head lock_entry;
99
100 struct lockdep_subclass_key *key;
101 unsigned int subclass;
419ca3f1 102 unsigned int dep_gen_id;
fbb9ce95
IM
103
104 /*
105 * IRQ/softirq usage tracking bits:
106 */
107 unsigned long usage_mask;
108 struct stack_trace usage_traces[LOCK_USAGE_STATES];
109
110 /*
111 * These fields represent a directed graph of lock dependencies,
112 * to every node we attach a list of "forward" and a list of
113 * "backward" graph nodes.
114 */
115 struct list_head locks_after, locks_before;
116
117 /*
118 * Generation counter, when doing certain classes of graph walking,
119 * to ensure that we check one node only once:
120 */
121 unsigned int version;
122
123 /*
124 * Statistics counter:
125 */
126 unsigned long ops;
127
128 const char *name;
129 int name_version;
f20786ff
PZ
130
131#ifdef CONFIG_LOCK_STAT
c7e78cff
PZ
132 unsigned long contention_point[LOCKSTAT_POINTS];
133 unsigned long contending_point[LOCKSTAT_POINTS];
f20786ff
PZ
134#endif
135};
136
137#ifdef CONFIG_LOCK_STAT
138struct lock_time {
139 s64 min;
140 s64 max;
141 s64 total;
142 unsigned long nr;
fbb9ce95
IM
143};
144
96645678
PZ
145enum bounce_type {
146 bounce_acquired_write,
147 bounce_acquired_read,
148 bounce_contended_write,
149 bounce_contended_read,
150 nr_bounce_types,
151
152 bounce_acquired = bounce_acquired_write,
153 bounce_contended = bounce_contended_write,
154};
155
f20786ff
PZ
156struct lock_class_stats {
157 unsigned long contention_point[4];
c7e78cff 158 unsigned long contending_point[4];
f20786ff
PZ
159 struct lock_time read_waittime;
160 struct lock_time write_waittime;
161 struct lock_time read_holdtime;
162 struct lock_time write_holdtime;
96645678 163 unsigned long bounces[nr_bounce_types];
f20786ff
PZ
164};
165
166struct lock_class_stats lock_stats(struct lock_class *class);
167void clear_lock_stats(struct lock_class *class);
168#endif
169
fbb9ce95
IM
170/*
171 * Map the lock object (the lock instance) to the lock-class object.
172 * This is embedded into specific lock instances:
173 */
174struct lockdep_map {
175 struct lock_class_key *key;
d6d897ce 176 struct lock_class *class_cache;
fbb9ce95 177 const char *name;
96645678
PZ
178#ifdef CONFIG_LOCK_STAT
179 int cpu;
c7e78cff 180 unsigned long ip;
96645678 181#endif
fbb9ce95
IM
182};
183
184/*
185 * Every lock has a list of other locks that were taken after it.
186 * We only grow the list, never remove from it:
187 */
188struct lock_list {
189 struct list_head entry;
190 struct lock_class *class;
191 struct stack_trace trace;
068135e6 192 int distance;
fbb9ce95
IM
193};
194
195/*
196 * We record lock dependency chains, so that we can cache them:
197 */
198struct lock_chain {
443cd507
HY
199 u8 irq_context;
200 u8 depth;
201 u16 base;
fbb9ce95
IM
202 struct list_head entry;
203 u64 chain_key;
204};
205
e5f363e3 206#define MAX_LOCKDEP_KEYS_BITS 13
b42e737e
PZ
207/*
208 * Subtract one because we offset hlock->class_idx by 1 in order
209 * to make 0 mean no class. This avoids overflowing the class_idx
210 * bitfield and hitting the BUG in hlock_class().
211 */
212#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
f82b217e 213
fbb9ce95
IM
214struct held_lock {
215 /*
216 * One-way hash of the dependency chain up to this point. We
217 * hash the hashes step by step as the dependency chain grows.
218 *
219 * We use it for dependency-caching and we skip detection
220 * passes and dependency-updates if there is a cache-hit, so
221 * it is absolutely critical for 100% coverage of the validator
222 * to have a unique key value for every unique dependency path
223 * that can occur in the system, to make a unique hash value
224 * as likely as possible - hence the 64-bit width.
225 *
226 * The task struct holds the current hash value (initialized
227 * with zero), here we store the previous hash value:
228 */
229 u64 prev_chain_key;
fbb9ce95
IM
230 unsigned long acquire_ip;
231 struct lockdep_map *instance;
7531e2f3 232 struct lockdep_map *nest_lock;
f20786ff
PZ
233#ifdef CONFIG_LOCK_STAT
234 u64 waittime_stamp;
235 u64 holdtime_stamp;
236#endif
f82b217e 237 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
fbb9ce95
IM
238 /*
239 * The lock-stack is unified in that the lock chains of interrupt
240 * contexts nest ontop of process context chains, but we 'separate'
241 * the hashes by starting with 0 if we cross into an interrupt
242 * context, and we also keep do not add cross-context lock
243 * dependencies - the lock usage graph walking covers that area
244 * anyway, and we'd just unnecessarily increase the number of
245 * dependencies otherwise. [Note: hardirq and softirq contexts
246 * are separated from each other too.]
247 *
248 * The following field is used to detect when we cross into an
249 * interrupt context:
250 */
f82b217e
DJ
251 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
252 unsigned int trylock:1;
253 unsigned int read:2; /* see lock_acquire() comment */
254 unsigned int check:2; /* see lock_acquire() comment */
255 unsigned int hardirqs_off:1;
fbb9ce95
IM
256};
257
258/*
259 * Initialization, self-test and debugging-output methods:
260 */
261extern void lockdep_init(void);
262extern void lockdep_info(void);
263extern void lockdep_reset(void);
264extern void lockdep_reset_lock(struct lockdep_map *lock);
265extern void lockdep_free_key_range(void *start, unsigned long size);
b351d164 266extern void lockdep_sys_exit(void);
fbb9ce95
IM
267
268extern void lockdep_off(void);
269extern void lockdep_on(void);
fbb9ce95
IM
270
271/*
272 * These methods are used by specific locking variants (spinlocks,
273 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
274 * to lockdep:
275 */
276
277extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
4dfbb9d8 278 struct lock_class_key *key, int subclass);
fbb9ce95 279
851a67b8
PZ
280/*
281 * To initialize a lockdep_map statically use this macro.
282 * Note that _name must not be NULL.
283 */
284#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
285 { .name = (_name), .key = (void *)(_key), }
286
fbb9ce95
IM
287/*
288 * Reinitialize a lock key - for cases where there is special locking or
289 * special initialization of locks so that the validator gets the scope
290 * of dependencies wrong: they are either too broad (they need a class-split)
291 * or they are too narrow (they suffer from a false class-split):
292 */
293#define lockdep_set_class(lock, key) \
4dfbb9d8 294 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
fbb9ce95 295#define lockdep_set_class_and_name(lock, key, name) \
4dfbb9d8
PZ
296 lockdep_init_map(&(lock)->dep_map, name, key, 0)
297#define lockdep_set_class_and_subclass(lock, key, sub) \
298 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
299#define lockdep_set_subclass(lock, sub) \
300 lockdep_init_map(&(lock)->dep_map, #lock, \
301 (lock)->dep_map.key, sub)
fbb9ce95
IM
302
303/*
304 * Acquire a lock.
305 *
306 * Values for "read":
307 *
308 * 0: exclusive (write) acquire
309 * 1: read-acquire (no recursion allowed)
310 * 2: read-acquire with same-instance recursion allowed
311 *
312 * Values for check:
313 *
314 * 0: disabled
315 * 1: simple checks (freeing, held-at-exit-time, etc.)
316 * 2: full validation
317 */
318extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
7531e2f3
PZ
319 int trylock, int read, int check,
320 struct lockdep_map *nest_lock, unsigned long ip);
fbb9ce95
IM
321
322extern void lock_release(struct lockdep_map *lock, int nested,
323 unsigned long ip);
324
00ef9f73
PZ
325extern void lock_set_class(struct lockdep_map *lock, const char *name,
326 struct lock_class_key *key, unsigned int subclass,
327 unsigned long ip);
328
329static inline void lock_set_subclass(struct lockdep_map *lock,
330 unsigned int subclass, unsigned long ip)
331{
332 lock_set_class(lock, lock->name, lock->key, subclass, ip);
333}
64aa348e 334
cf40bd16
NP
335extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
336extern void lockdep_clear_current_reclaim_state(void);
337extern void lockdep_trace_alloc(gfp_t mask);
338
339# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
fbb9ce95 340
e3a55fd1 341#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
d5abe669 342
fbb9ce95
IM
343#else /* !LOCKDEP */
344
345static inline void lockdep_off(void)
346{
347}
348
349static inline void lockdep_on(void)
350{
351}
352
7531e2f3 353# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
fbb9ce95 354# define lock_release(l, n, i) do { } while (0)
00ef9f73 355# define lock_set_class(l, n, k, s, i) do { } while (0)
64aa348e 356# define lock_set_subclass(l, s, i) do { } while (0)
cf40bd16
NP
357# define lockdep_set_current_reclaim_state(g) do { } while (0)
358# define lockdep_clear_current_reclaim_state() do { } while (0)
359# define lockdep_trace_alloc(g) do { } while (0)
fbb9ce95
IM
360# define lockdep_init() do { } while (0)
361# define lockdep_info() do { } while (0)
e25cf3db
IM
362# define lockdep_init_map(lock, name, key, sub) \
363 do { (void)(name); (void)(key); } while (0)
fbb9ce95
IM
364# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
365# define lockdep_set_class_and_name(lock, key, name) \
e25cf3db 366 do { (void)(key); (void)(name); } while (0)
4dfbb9d8
PZ
367#define lockdep_set_class_and_subclass(lock, key, sub) \
368 do { (void)(key); } while (0)
07646e21
AM
369#define lockdep_set_subclass(lock, sub) do { } while (0)
370
fbb9ce95
IM
371# define INIT_LOCKDEP
372# define lockdep_reset() do { debug_locks = 1; } while (0)
373# define lockdep_free_key_range(start, size) do { } while (0)
b351d164 374# define lockdep_sys_exit() do { } while (0)
fbb9ce95
IM
375/*
376 * The class key takes no space if lockdep is disabled:
377 */
378struct lock_class_key { };
d5abe669
PZ
379
380#define lockdep_depth(tsk) (0)
381
fbb9ce95
IM
382#endif /* !LOCKDEP */
383
f20786ff
PZ
384#ifdef CONFIG_LOCK_STAT
385
386extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
c7e78cff 387extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
f20786ff
PZ
388
389#define LOCK_CONTENDED(_lock, try, lock) \
390do { \
391 if (!try(_lock)) { \
392 lock_contended(&(_lock)->dep_map, _RET_IP_); \
393 lock(_lock); \
f20786ff 394 } \
c7e78cff 395 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
f20786ff
PZ
396} while (0)
397
398#else /* CONFIG_LOCK_STAT */
399
400#define lock_contended(lockdep_map, ip) do {} while (0)
c7e78cff 401#define lock_acquired(lockdep_map, ip) do {} while (0)
f20786ff
PZ
402
403#define LOCK_CONTENDED(_lock, try, lock) \
404 lock(_lock)
405
406#endif /* CONFIG_LOCK_STAT */
407
74c8a613 408#ifdef CONFIG_GENERIC_HARDIRQS
243c7621
IM
409extern void early_init_irq_lock_class(void);
410#else
3117df04
IM
411static inline void early_init_irq_lock_class(void)
412{
413}
243c7621
IM
414#endif
415
fbb9ce95
IM
416#ifdef CONFIG_TRACE_IRQFLAGS
417extern void early_boot_irqs_off(void);
418extern void early_boot_irqs_on(void);
3117df04 419extern void print_irqtrace_events(struct task_struct *curr);
fbb9ce95 420#else
3117df04
IM
421static inline void early_boot_irqs_off(void)
422{
423}
424static inline void early_boot_irqs_on(void)
425{
426}
427static inline void print_irqtrace_events(struct task_struct *curr)
428{
429}
fbb9ce95
IM
430#endif
431
432/*
433 * For trivial one-depth nesting of a lock-class, the following
434 * global define can be used. (Subsystems with multiple levels
435 * of nesting should define their own lock-nesting subclasses.)
436 */
437#define SINGLE_DEPTH_NESTING 1
438
439/*
440 * Map the dependency ops to NOP or to real lockdep ops, depending
441 * on the per lock-class debug mode:
442 */
443
444#ifdef CONFIG_DEBUG_LOCK_ALLOC
445# ifdef CONFIG_PROVE_LOCKING
7531e2f3 446# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
b7d39aff 447# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
fbb9ce95 448# else
7531e2f3 449# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
b7d39aff 450# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
fbb9ce95
IM
451# endif
452# define spin_release(l, n, i) lock_release(l, n, i)
453#else
454# define spin_acquire(l, s, t, i) do { } while (0)
455# define spin_release(l, n, i) do { } while (0)
456#endif
457
458#ifdef CONFIG_DEBUG_LOCK_ALLOC
459# ifdef CONFIG_PROVE_LOCKING
7531e2f3
PZ
460# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
461# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
fbb9ce95 462# else
7531e2f3
PZ
463# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
464# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
fbb9ce95
IM
465# endif
466# define rwlock_release(l, n, i) lock_release(l, n, i)
467#else
468# define rwlock_acquire(l, s, t, i) do { } while (0)
469# define rwlock_acquire_read(l, s, t, i) do { } while (0)
470# define rwlock_release(l, n, i) do { } while (0)
471#endif
472
473#ifdef CONFIG_DEBUG_LOCK_ALLOC
474# ifdef CONFIG_PROVE_LOCKING
7531e2f3 475# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
fbb9ce95 476# else
7531e2f3 477# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
fbb9ce95
IM
478# endif
479# define mutex_release(l, n, i) lock_release(l, n, i)
480#else
481# define mutex_acquire(l, s, t, i) do { } while (0)
482# define mutex_release(l, n, i) do { } while (0)
483#endif
484
485#ifdef CONFIG_DEBUG_LOCK_ALLOC
486# ifdef CONFIG_PROVE_LOCKING
7531e2f3
PZ
487# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
488# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
fbb9ce95 489# else
7531e2f3
PZ
490# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
491# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
fbb9ce95
IM
492# endif
493# define rwsem_release(l, n, i) lock_release(l, n, i)
494#else
495# define rwsem_acquire(l, s, t, i) do { } while (0)
496# define rwsem_acquire_read(l, s, t, i) do { } while (0)
497# define rwsem_release(l, n, i) do { } while (0)
498#endif
499
4f3e7524
PZ
500#ifdef CONFIG_DEBUG_LOCK_ALLOC
501# ifdef CONFIG_PROVE_LOCKING
3295f0ef 502# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
4f3e7524 503# else
3295f0ef 504# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
4f3e7524 505# endif
3295f0ef 506# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
4f3e7524 507#else
3295f0ef
IM
508# define lock_map_acquire(l) do { } while (0)
509# define lock_map_release(l) do { } while (0)
4f3e7524
PZ
510#endif
511
76b189e9
PZ
512#ifdef CONFIG_PROVE_LOCKING
513# define might_lock(lock) \
514do { \
515 typecheck(struct lockdep_map *, &(lock)->dep_map); \
516 lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
517 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
518} while (0)
519# define might_lock_read(lock) \
520do { \
521 typecheck(struct lockdep_map *, &(lock)->dep_map); \
522 lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
523 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
524} while (0)
525#else
526# define might_lock(lock) do { } while (0)
527# define might_lock_read(lock) do { } while (0)
528#endif
529
fbb9ce95 530#endif /* __LINUX_LOCKDEP_H */