]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/lockdep.h
mod_devicetable: fix PHY module format
[mirror_ubuntu-bionic-kernel.git] / include / linux / lockdep.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Runtime locking correctness validator
4 *
5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7 *
8 * see Documentation/locking/lockdep-design.txt for more details.
9 */
10 #ifndef __LINUX_LOCKDEP_H
11 #define __LINUX_LOCKDEP_H
12
13 struct task_struct;
14 struct lockdep_map;
15
16 /* for sysctl */
17 extern int prove_locking;
18 extern int lock_stat;
19
20 #define MAX_LOCKDEP_SUBCLASSES 8UL
21
22 #include <linux/types.h>
23
24 #ifdef CONFIG_LOCKDEP
25
26 #include <linux/linkage.h>
27 #include <linux/list.h>
28 #include <linux/debug_locks.h>
29 #include <linux/stacktrace.h>
30
31 /*
32 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
33 * the total number of states... :-(
34 */
35 #define XXX_LOCK_USAGE_STATES (1+2*4)
36
37 /*
38 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
39 * cached in the instance of lockdep_map
40 *
41 * Currently main class (subclass == 0) and signle depth subclass
42 * are cached in lockdep_map. This optimization is mainly targeting
43 * on rq->lock. double_rq_lock() acquires this highly competitive with
44 * single depth.
45 */
46 #define NR_LOCKDEP_CACHING_CLASSES 2
47
48 /*
49 * Lock-classes are keyed via unique addresses, by embedding the
50 * lockclass-key into the kernel (or module) .data section. (For
51 * static locks we use the lock address itself as the key.)
52 */
53 struct lockdep_subclass_key {
54 char __one_byte;
55 } __attribute__ ((__packed__));
56
57 struct lock_class_key {
58 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
59 };
60
61 extern struct lock_class_key __lockdep_no_validate__;
62
63 #define LOCKSTAT_POINTS 4
64
65 /*
66 * The lock-class itself:
67 */
68 struct lock_class {
69 /*
70 * class-hash:
71 */
72 struct hlist_node hash_entry;
73
74 /*
75 * global list of all lock-classes:
76 */
77 struct list_head lock_entry;
78
79 struct lockdep_subclass_key *key;
80 unsigned int subclass;
81 unsigned int dep_gen_id;
82
83 /*
84 * IRQ/softirq usage tracking bits:
85 */
86 unsigned long usage_mask;
87 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
88
89 /*
90 * These fields represent a directed graph of lock dependencies,
91 * to every node we attach a list of "forward" and a list of
92 * "backward" graph nodes.
93 */
94 struct list_head locks_after, locks_before;
95
96 /*
97 * Generation counter, when doing certain classes of graph walking,
98 * to ensure that we check one node only once:
99 */
100 unsigned int version;
101
102 /*
103 * Statistics counter:
104 */
105 unsigned long ops;
106
107 const char *name;
108 int name_version;
109
110 #ifdef CONFIG_LOCK_STAT
111 unsigned long contention_point[LOCKSTAT_POINTS];
112 unsigned long contending_point[LOCKSTAT_POINTS];
113 #endif
114 };
115
116 #ifdef CONFIG_LOCK_STAT
117 struct lock_time {
118 s64 min;
119 s64 max;
120 s64 total;
121 unsigned long nr;
122 };
123
124 enum bounce_type {
125 bounce_acquired_write,
126 bounce_acquired_read,
127 bounce_contended_write,
128 bounce_contended_read,
129 nr_bounce_types,
130
131 bounce_acquired = bounce_acquired_write,
132 bounce_contended = bounce_contended_write,
133 };
134
135 struct lock_class_stats {
136 unsigned long contention_point[LOCKSTAT_POINTS];
137 unsigned long contending_point[LOCKSTAT_POINTS];
138 struct lock_time read_waittime;
139 struct lock_time write_waittime;
140 struct lock_time read_holdtime;
141 struct lock_time write_holdtime;
142 unsigned long bounces[nr_bounce_types];
143 };
144
145 struct lock_class_stats lock_stats(struct lock_class *class);
146 void clear_lock_stats(struct lock_class *class);
147 #endif
148
149 /*
150 * Map the lock object (the lock instance) to the lock-class object.
151 * This is embedded into specific lock instances:
152 */
153 struct lockdep_map {
154 struct lock_class_key *key;
155 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
156 const char *name;
157 #ifdef CONFIG_LOCK_STAT
158 int cpu;
159 unsigned long ip;
160 #endif
161 };
162
163 static inline void lockdep_copy_map(struct lockdep_map *to,
164 struct lockdep_map *from)
165 {
166 int i;
167
168 *to = *from;
169 /*
170 * Since the class cache can be modified concurrently we could observe
171 * half pointers (64bit arch using 32bit copy insns). Therefore clear
172 * the caches and take the performance hit.
173 *
174 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
175 * that relies on cache abuse.
176 */
177 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
178 to->class_cache[i] = NULL;
179 }
180
181 /*
182 * Every lock has a list of other locks that were taken after it.
183 * We only grow the list, never remove from it:
184 */
185 struct lock_list {
186 struct list_head entry;
187 struct lock_class *class;
188 struct stack_trace trace;
189 int distance;
190
191 /*
192 * The parent field is used to implement breadth-first search, and the
193 * bit 0 is reused to indicate if the lock has been accessed in BFS.
194 */
195 struct lock_list *parent;
196 };
197
198 /*
199 * We record lock dependency chains, so that we can cache them:
200 */
201 struct lock_chain {
202 /* see BUILD_BUG_ON()s in lookup_chain_cache() */
203 unsigned int irq_context : 2,
204 depth : 6,
205 base : 24;
206 /* 4 byte hole */
207 struct hlist_node entry;
208 u64 chain_key;
209 };
210
211 #define MAX_LOCKDEP_KEYS_BITS 13
212 /*
213 * Subtract one because we offset hlock->class_idx by 1 in order
214 * to make 0 mean no class. This avoids overflowing the class_idx
215 * bitfield and hitting the BUG in hlock_class().
216 */
217 #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
218
219 struct held_lock {
220 /*
221 * One-way hash of the dependency chain up to this point. We
222 * hash the hashes step by step as the dependency chain grows.
223 *
224 * We use it for dependency-caching and we skip detection
225 * passes and dependency-updates if there is a cache-hit, so
226 * it is absolutely critical for 100% coverage of the validator
227 * to have a unique key value for every unique dependency path
228 * that can occur in the system, to make a unique hash value
229 * as likely as possible - hence the 64-bit width.
230 *
231 * The task struct holds the current hash value (initialized
232 * with zero), here we store the previous hash value:
233 */
234 u64 prev_chain_key;
235 unsigned long acquire_ip;
236 struct lockdep_map *instance;
237 struct lockdep_map *nest_lock;
238 #ifdef CONFIG_LOCK_STAT
239 u64 waittime_stamp;
240 u64 holdtime_stamp;
241 #endif
242 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
243 /*
244 * The lock-stack is unified in that the lock chains of interrupt
245 * contexts nest ontop of process context chains, but we 'separate'
246 * the hashes by starting with 0 if we cross into an interrupt
247 * context, and we also keep do not add cross-context lock
248 * dependencies - the lock usage graph walking covers that area
249 * anyway, and we'd just unnecessarily increase the number of
250 * dependencies otherwise. [Note: hardirq and softirq contexts
251 * are separated from each other too.]
252 *
253 * The following field is used to detect when we cross into an
254 * interrupt context:
255 */
256 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
257 unsigned int trylock:1; /* 16 bits */
258
259 unsigned int read:2; /* see lock_acquire() comment */
260 unsigned int check:1; /* see lock_acquire() comment */
261 unsigned int hardirqs_off:1;
262 unsigned int references:12; /* 32 bits */
263 unsigned int pin_count;
264 };
265
266 /*
267 * Initialization, self-test and debugging-output methods:
268 */
269 extern void lockdep_info(void);
270 extern void lockdep_reset(void);
271 extern void lockdep_reset_lock(struct lockdep_map *lock);
272 extern void lockdep_free_key_range(void *start, unsigned long size);
273 extern asmlinkage void lockdep_sys_exit(void);
274
275 extern void lockdep_off(void);
276 extern void lockdep_on(void);
277
278 /*
279 * These methods are used by specific locking variants (spinlocks,
280 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
281 * to lockdep:
282 */
283
284 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
285 struct lock_class_key *key, int subclass);
286
287 /*
288 * Reinitialize a lock key - for cases where there is special locking or
289 * special initialization of locks so that the validator gets the scope
290 * of dependencies wrong: they are either too broad (they need a class-split)
291 * or they are too narrow (they suffer from a false class-split):
292 */
293 #define lockdep_set_class(lock, key) \
294 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
295 #define lockdep_set_class_and_name(lock, key, name) \
296 lockdep_init_map(&(lock)->dep_map, name, key, 0)
297 #define lockdep_set_class_and_subclass(lock, key, sub) \
298 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
299 #define lockdep_set_subclass(lock, sub) \
300 lockdep_init_map(&(lock)->dep_map, #lock, \
301 (lock)->dep_map.key, sub)
302
303 #define lockdep_set_novalidate_class(lock) \
304 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
305 /*
306 * Compare locking classes
307 */
308 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
309
310 static inline int lockdep_match_key(struct lockdep_map *lock,
311 struct lock_class_key *key)
312 {
313 return lock->key == key;
314 }
315
316 struct lock_class *lockdep_hlock_class(struct held_lock *hlock);
317
318 /*
319 * Acquire a lock.
320 *
321 * Values for "read":
322 *
323 * 0: exclusive (write) acquire
324 * 1: read-acquire (no recursion allowed)
325 * 2: read-acquire with same-instance recursion allowed
326 *
327 * Values for check:
328 *
329 * 0: simple checks (freeing, held-at-exit-time, etc.)
330 * 1: full validation
331 */
332 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
333 int trylock, int read, int check,
334 struct lockdep_map *nest_lock, unsigned long ip);
335
336 extern void lock_release(struct lockdep_map *lock, int nested,
337 unsigned long ip);
338
339 /*
340 * Same "read" as for lock_acquire(), except -1 means any.
341 */
342 extern int lock_is_held_type(struct lockdep_map *lock, int read);
343
344 static inline int lock_is_held(struct lockdep_map *lock)
345 {
346 return lock_is_held_type(lock, -1);
347 }
348
349 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
350 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
351
352 extern void lock_set_class(struct lockdep_map *lock, const char *name,
353 struct lock_class_key *key, unsigned int subclass,
354 unsigned long ip);
355
356 static inline void lock_set_subclass(struct lockdep_map *lock,
357 unsigned int subclass, unsigned long ip)
358 {
359 lock_set_class(lock, lock->name, lock->key, subclass, ip);
360 }
361
362 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
363
364 struct pin_cookie { unsigned int val; };
365
366 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
367
368 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
369 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
370 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
371
372 # define INIT_LOCKDEP .lockdep_recursion = 0,
373
374 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
375
376 #define lockdep_assert_held(l) do { \
377 WARN_ON(debug_locks && !lockdep_is_held(l)); \
378 } while (0)
379
380 #define lockdep_assert_held_exclusive(l) do { \
381 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
382 } while (0)
383
384 #define lockdep_assert_held_read(l) do { \
385 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
386 } while (0)
387
388 #define lockdep_assert_held_once(l) do { \
389 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
390 } while (0)
391
392 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
393
394 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
395 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
396 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
397
398 #else /* !CONFIG_LOCKDEP */
399
400 static inline void lockdep_off(void)
401 {
402 }
403
404 static inline void lockdep_on(void)
405 {
406 }
407
408 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
409 # define lock_release(l, n, i) do { } while (0)
410 # define lock_downgrade(l, i) do { } while (0)
411 # define lock_set_class(l, n, k, s, i) do { } while (0)
412 # define lock_set_subclass(l, s, i) do { } while (0)
413 # define lockdep_info() do { } while (0)
414 # define lockdep_init_map(lock, name, key, sub) \
415 do { (void)(name); (void)(key); } while (0)
416 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
417 # define lockdep_set_class_and_name(lock, key, name) \
418 do { (void)(key); (void)(name); } while (0)
419 #define lockdep_set_class_and_subclass(lock, key, sub) \
420 do { (void)(key); } while (0)
421 #define lockdep_set_subclass(lock, sub) do { } while (0)
422
423 #define lockdep_set_novalidate_class(lock) do { } while (0)
424
425 /*
426 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
427 * case since the result is not well defined and the caller should rather
428 * #ifdef the call himself.
429 */
430
431 # define INIT_LOCKDEP
432 # define lockdep_reset() do { debug_locks = 1; } while (0)
433 # define lockdep_free_key_range(start, size) do { } while (0)
434 # define lockdep_sys_exit() do { } while (0)
435 /*
436 * The class key takes no space if lockdep is disabled:
437 */
438 struct lock_class_key { };
439
440 /*
441 * The lockdep_map takes no space if lockdep is disabled:
442 */
443 struct lockdep_map { };
444
445 #define lockdep_depth(tsk) (0)
446
447 #define lockdep_is_held(lock) (1)
448 #define lockdep_is_held_type(l, r) (1)
449
450 #define lockdep_assert_held(l) do { (void)(l); } while (0)
451 #define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0)
452 #define lockdep_assert_held_read(l) do { (void)(l); } while (0)
453 #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
454
455 #define lockdep_recursing(tsk) (0)
456
457 struct pin_cookie { };
458
459 #define NIL_COOKIE (struct pin_cookie){ }
460
461 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
462 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
463 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
464
465 #endif /* !LOCKDEP */
466
467 enum xhlock_context_t {
468 XHLOCK_HARD,
469 XHLOCK_SOFT,
470 XHLOCK_CTX_NR,
471 };
472
473 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
474 /*
475 * To initialize a lockdep_map statically use this macro.
476 * Note that _name must not be NULL.
477 */
478 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
479 { .name = (_name), .key = (void *)(_key), }
480
481 static inline void lockdep_invariant_state(bool force) {}
482 static inline void lockdep_init_task(struct task_struct *task) {}
483 static inline void lockdep_free_task(struct task_struct *task) {}
484
485 #ifdef CONFIG_LOCK_STAT
486
487 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
488 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
489
490 #define LOCK_CONTENDED(_lock, try, lock) \
491 do { \
492 if (!try(_lock)) { \
493 lock_contended(&(_lock)->dep_map, _RET_IP_); \
494 lock(_lock); \
495 } \
496 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
497 } while (0)
498
499 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
500 ({ \
501 int ____err = 0; \
502 if (!try(_lock)) { \
503 lock_contended(&(_lock)->dep_map, _RET_IP_); \
504 ____err = lock(_lock); \
505 } \
506 if (!____err) \
507 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
508 ____err; \
509 })
510
511 #else /* CONFIG_LOCK_STAT */
512
513 #define lock_contended(lockdep_map, ip) do {} while (0)
514 #define lock_acquired(lockdep_map, ip) do {} while (0)
515
516 #define LOCK_CONTENDED(_lock, try, lock) \
517 lock(_lock)
518
519 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
520 lock(_lock)
521
522 #endif /* CONFIG_LOCK_STAT */
523
524 #ifdef CONFIG_LOCKDEP
525
526 /*
527 * On lockdep we dont want the hand-coded irq-enable of
528 * _raw_*_lock_flags() code, because lockdep assumes
529 * that interrupts are not re-enabled during lock-acquire:
530 */
531 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
532 LOCK_CONTENDED((_lock), (try), (lock))
533
534 #else /* CONFIG_LOCKDEP */
535
536 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
537 lockfl((_lock), (flags))
538
539 #endif /* CONFIG_LOCKDEP */
540
541 #ifdef CONFIG_TRACE_IRQFLAGS
542 extern void print_irqtrace_events(struct task_struct *curr);
543 #else
544 static inline void print_irqtrace_events(struct task_struct *curr)
545 {
546 }
547 #endif
548
549 /*
550 * For trivial one-depth nesting of a lock-class, the following
551 * global define can be used. (Subsystems with multiple levels
552 * of nesting should define their own lock-nesting subclasses.)
553 */
554 #define SINGLE_DEPTH_NESTING 1
555
556 /*
557 * Map the dependency ops to NOP or to real lockdep ops, depending
558 * on the per lock-class debug mode:
559 */
560
561 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
562 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
563 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
564
565 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
566 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
567 #define spin_release(l, n, i) lock_release(l, n, i)
568
569 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
570 #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
571 #define rwlock_release(l, n, i) lock_release(l, n, i)
572
573 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
574 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
575 #define seqcount_release(l, n, i) lock_release(l, n, i)
576
577 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
578 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
579 #define mutex_release(l, n, i) lock_release(l, n, i)
580
581 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
582 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
583 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
584 #define rwsem_release(l, n, i) lock_release(l, n, i)
585
586 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
587 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
588 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
589 #define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
590
591 #ifdef CONFIG_PROVE_LOCKING
592 # define might_lock(lock) \
593 do { \
594 typecheck(struct lockdep_map *, &(lock)->dep_map); \
595 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
596 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
597 } while (0)
598 # define might_lock_read(lock) \
599 do { \
600 typecheck(struct lockdep_map *, &(lock)->dep_map); \
601 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
602 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
603 } while (0)
604
605 #define lockdep_assert_irqs_enabled() do { \
606 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
607 !current->hardirqs_enabled, \
608 "IRQs not enabled as expected\n"); \
609 } while (0)
610
611 #define lockdep_assert_irqs_disabled() do { \
612 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
613 current->hardirqs_enabled, \
614 "IRQs not disabled as expected\n"); \
615 } while (0)
616
617 #else
618 # define might_lock(lock) do { } while (0)
619 # define might_lock_read(lock) do { } while (0)
620 # define lockdep_assert_irqs_enabled() do { } while (0)
621 # define lockdep_assert_irqs_disabled() do { } while (0)
622 #endif
623
624 #ifdef CONFIG_LOCKDEP
625 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
626 #else
627 static inline void
628 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
629 {
630 }
631 #endif
632
633 #endif /* __LINUX_LOCKDEP_H */