2 * Runtime locking correctness validator
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * see Documentation/lockdep-design.txt for more details.
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
17 #include <linux/linkage.h>
18 #include <linux/list.h>
19 #include <linux/debug_locks.h>
20 #include <linux/stacktrace.h>
23 * Lock-class usage-state bits:
30 LOCK_ENABLED_SOFTIRQS
,
31 LOCK_ENABLED_HARDIRQS
,
32 LOCK_USED_IN_HARDIRQ_READ
,
33 LOCK_USED_IN_SOFTIRQ_READ
,
34 LOCK_ENABLED_SOFTIRQS_READ
,
35 LOCK_ENABLED_HARDIRQS_READ
,
40 * Usage-state bitmasks:
42 #define LOCKF_USED (1 << LOCK_USED)
43 #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
44 #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
45 #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
46 #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
48 #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
49 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
51 #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
52 #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
53 #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
54 #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
56 #define LOCKF_ENABLED_IRQS_READ \
57 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
58 #define LOCKF_USED_IN_IRQ_READ \
59 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
61 #define MAX_LOCKDEP_SUBCLASSES 8UL
64 * Lock-classes are keyed via unique addresses, by embedding the
65 * lockclass-key into the kernel (or module) .data section. (For
66 * static locks we use the lock address itself as the key.)
68 struct lockdep_subclass_key
{
70 } __attribute__ ((__packed__
));
72 struct lock_class_key
{
73 struct lockdep_subclass_key subkeys
[MAX_LOCKDEP_SUBCLASSES
];
77 * The lock-class itself:
83 struct list_head hash_entry
;
86 * global list of all lock-classes:
88 struct list_head lock_entry
;
90 struct lockdep_subclass_key
*key
;
91 unsigned int subclass
;
92 unsigned int dep_gen_id
;
95 * IRQ/softirq usage tracking bits:
97 unsigned long usage_mask
;
98 struct stack_trace usage_traces
[LOCK_USAGE_STATES
];
101 * These fields represent a directed graph of lock dependencies,
102 * to every node we attach a list of "forward" and a list of
103 * "backward" graph nodes.
105 struct list_head locks_after
, locks_before
;
108 * Generation counter, when doing certain classes of graph walking,
109 * to ensure that we check one node only once:
111 unsigned int version
;
114 * Statistics counter:
121 #ifdef CONFIG_LOCK_STAT
122 unsigned long contention_point
[4];
126 #ifdef CONFIG_LOCK_STAT
135 bounce_acquired_write
,
136 bounce_acquired_read
,
137 bounce_contended_write
,
138 bounce_contended_read
,
141 bounce_acquired
= bounce_acquired_write
,
142 bounce_contended
= bounce_contended_write
,
145 struct lock_class_stats
{
146 unsigned long contention_point
[4];
147 struct lock_time read_waittime
;
148 struct lock_time write_waittime
;
149 struct lock_time read_holdtime
;
150 struct lock_time write_holdtime
;
151 unsigned long bounces
[nr_bounce_types
];
154 struct lock_class_stats
lock_stats(struct lock_class
*class);
155 void clear_lock_stats(struct lock_class
*class);
159 * Map the lock object (the lock instance) to the lock-class object.
160 * This is embedded into specific lock instances:
163 struct lock_class_key
*key
;
164 struct lock_class
*class_cache
;
166 #ifdef CONFIG_LOCK_STAT
172 * Every lock has a list of other locks that were taken after it.
173 * We only grow the list, never remove from it:
176 struct list_head entry
;
177 struct lock_class
*class;
178 struct stack_trace trace
;
183 * We record lock dependency chains, so that we can cache them:
189 struct list_head entry
;
193 #define MAX_LOCKDEP_KEYS_BITS 11
194 #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
198 * One-way hash of the dependency chain up to this point. We
199 * hash the hashes step by step as the dependency chain grows.
201 * We use it for dependency-caching and we skip detection
202 * passes and dependency-updates if there is a cache-hit, so
203 * it is absolutely critical for 100% coverage of the validator
204 * to have a unique key value for every unique dependency path
205 * that can occur in the system, to make a unique hash value
206 * as likely as possible - hence the 64-bit width.
208 * The task struct holds the current hash value (initialized
209 * with zero), here we store the previous hash value:
212 unsigned long acquire_ip
;
213 struct lockdep_map
*instance
;
214 #ifdef CONFIG_LOCK_STAT
218 unsigned int class_idx
:MAX_LOCKDEP_KEYS_BITS
;
220 * The lock-stack is unified in that the lock chains of interrupt
221 * contexts nest ontop of process context chains, but we 'separate'
222 * the hashes by starting with 0 if we cross into an interrupt
223 * context, and we also keep do not add cross-context lock
224 * dependencies - the lock usage graph walking covers that area
225 * anyway, and we'd just unnecessarily increase the number of
226 * dependencies otherwise. [Note: hardirq and softirq contexts
227 * are separated from each other too.]
229 * The following field is used to detect when we cross into an
232 unsigned int irq_context
:2; /* bit 0 - soft, bit 1 - hard */
233 unsigned int trylock
:1;
234 unsigned int read
:2; /* see lock_acquire() comment */
235 unsigned int check
:2; /* see lock_acquire() comment */
236 unsigned int hardirqs_off
:1;
240 * Initialization, self-test and debugging-output methods:
242 extern void lockdep_init(void);
243 extern void lockdep_info(void);
244 extern void lockdep_reset(void);
245 extern void lockdep_reset_lock(struct lockdep_map
*lock
);
246 extern void lockdep_free_key_range(void *start
, unsigned long size
);
247 extern void lockdep_sys_exit(void);
249 extern void lockdep_off(void);
250 extern void lockdep_on(void);
253 * These methods are used by specific locking variants (spinlocks,
254 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
258 extern void lockdep_init_map(struct lockdep_map
*lock
, const char *name
,
259 struct lock_class_key
*key
, int subclass
);
262 * To initialize a lockdep_map statically use this macro.
263 * Note that _name must not be NULL.
265 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
266 { .name = (_name), .key = (void *)(_key), }
269 * Reinitialize a lock key - for cases where there is special locking or
270 * special initialization of locks so that the validator gets the scope
271 * of dependencies wrong: they are either too broad (they need a class-split)
272 * or they are too narrow (they suffer from a false class-split):
274 #define lockdep_set_class(lock, key) \
275 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
276 #define lockdep_set_class_and_name(lock, key, name) \
277 lockdep_init_map(&(lock)->dep_map, name, key, 0)
278 #define lockdep_set_class_and_subclass(lock, key, sub) \
279 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
280 #define lockdep_set_subclass(lock, sub) \
281 lockdep_init_map(&(lock)->dep_map, #lock, \
282 (lock)->dep_map.key, sub)
289 * 0: exclusive (write) acquire
290 * 1: read-acquire (no recursion allowed)
291 * 2: read-acquire with same-instance recursion allowed
296 * 1: simple checks (freeing, held-at-exit-time, etc.)
299 extern void lock_acquire(struct lockdep_map
*lock
, unsigned int subclass
,
300 int trylock
, int read
, int check
, unsigned long ip
);
302 extern void lock_release(struct lockdep_map
*lock
, int nested
,
305 extern void lock_set_subclass(struct lockdep_map
*lock
, unsigned int subclass
,
308 # define INIT_LOCKDEP .lockdep_recursion = 0,
310 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
314 static inline void lockdep_off(void)
318 static inline void lockdep_on(void)
322 # define lock_acquire(l, s, t, r, c, i) do { } while (0)
323 # define lock_release(l, n, i) do { } while (0)
324 # define lock_set_subclass(l, s, i) do { } while (0)
325 # define lockdep_init() do { } while (0)
326 # define lockdep_info() do { } while (0)
327 # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
328 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
329 # define lockdep_set_class_and_name(lock, key, name) \
330 do { (void)(key); } while (0)
331 #define lockdep_set_class_and_subclass(lock, key, sub) \
332 do { (void)(key); } while (0)
333 #define lockdep_set_subclass(lock, sub) do { } while (0)
335 # define INIT_LOCKDEP
336 # define lockdep_reset() do { debug_locks = 1; } while (0)
337 # define lockdep_free_key_range(start, size) do { } while (0)
338 # define lockdep_sys_exit() do { } while (0)
340 * The class key takes no space if lockdep is disabled:
342 struct lock_class_key
{ };
344 #define lockdep_depth(tsk) (0)
346 #endif /* !LOCKDEP */
348 #ifdef CONFIG_LOCK_STAT
350 extern void lock_contended(struct lockdep_map
*lock
, unsigned long ip
);
351 extern void lock_acquired(struct lockdep_map
*lock
);
353 #define LOCK_CONTENDED(_lock, try, lock) \
356 lock_contended(&(_lock)->dep_map, _RET_IP_); \
359 lock_acquired(&(_lock)->dep_map); \
362 #else /* CONFIG_LOCK_STAT */
364 #define lock_contended(lockdep_map, ip) do {} while (0)
365 #define lock_acquired(lockdep_map) do {} while (0)
367 #define LOCK_CONTENDED(_lock, try, lock) \
370 #endif /* CONFIG_LOCK_STAT */
372 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
373 extern void early_init_irq_lock_class(void);
375 static inline void early_init_irq_lock_class(void)
380 #ifdef CONFIG_TRACE_IRQFLAGS
381 extern void early_boot_irqs_off(void);
382 extern void early_boot_irqs_on(void);
383 extern void print_irqtrace_events(struct task_struct
*curr
);
385 static inline void early_boot_irqs_off(void)
388 static inline void early_boot_irqs_on(void)
391 static inline void print_irqtrace_events(struct task_struct
*curr
)
397 * For trivial one-depth nesting of a lock-class, the following
398 * global define can be used. (Subsystems with multiple levels
399 * of nesting should define their own lock-nesting subclasses.)
401 #define SINGLE_DEPTH_NESTING 1
404 * Map the dependency ops to NOP or to real lockdep ops, depending
405 * on the per lock-class debug mode:
408 #ifdef CONFIG_DEBUG_LOCK_ALLOC
409 # ifdef CONFIG_PROVE_LOCKING
410 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
412 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
414 # define spin_release(l, n, i) lock_release(l, n, i)
416 # define spin_acquire(l, s, t, i) do { } while (0)
417 # define spin_release(l, n, i) do { } while (0)
420 #ifdef CONFIG_DEBUG_LOCK_ALLOC
421 # ifdef CONFIG_PROVE_LOCKING
422 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
423 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
425 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
426 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
428 # define rwlock_release(l, n, i) lock_release(l, n, i)
430 # define rwlock_acquire(l, s, t, i) do { } while (0)
431 # define rwlock_acquire_read(l, s, t, i) do { } while (0)
432 # define rwlock_release(l, n, i) do { } while (0)
435 #ifdef CONFIG_DEBUG_LOCK_ALLOC
436 # ifdef CONFIG_PROVE_LOCKING
437 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
439 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
441 # define mutex_release(l, n, i) lock_release(l, n, i)
443 # define mutex_acquire(l, s, t, i) do { } while (0)
444 # define mutex_release(l, n, i) do { } while (0)
447 #ifdef CONFIG_DEBUG_LOCK_ALLOC
448 # ifdef CONFIG_PROVE_LOCKING
449 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
450 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
452 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
453 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
455 # define rwsem_release(l, n, i) lock_release(l, n, i)
457 # define rwsem_acquire(l, s, t, i) do { } while (0)
458 # define rwsem_acquire_read(l, s, t, i) do { } while (0)
459 # define rwsem_release(l, n, i) do { } while (0)
462 #endif /* __LINUX_LOCKDEP_H */