1 // SPDX-License-Identifier: GPL-2.0-only
5 * Runtime locking correctness validator
7 * Started by Ingo Molnar:
9 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
12 * this code maps all the lock dependencies as they occur in a live kernel
13 * and will warn about the following classes of locking bugs:
15 * - lock inversion scenarios
16 * - circular lock dependencies
17 * - hardirq/softirq safe/unsafe locking bugs
19 * Bugs are reported even if the current locking scenario does not cause
20 * any deadlock at this point.
22 * I.e. if anytime in the past two locks were taken in a different order,
23 * even if it happened for another task, even if those were different
24 * locks (but of the same class as this lock), this code will detect it.
26 * Thanks to Arjan van de Ven for coming up with the initial idea of
27 * mapping lock dependencies runtime.
29 #define DISABLE_BRANCH_PROFILING
30 #include <linux/mutex.h>
31 #include <linux/sched.h>
32 #include <linux/sched/clock.h>
33 #include <linux/sched/task.h>
34 #include <linux/sched/mm.h>
35 #include <linux/delay.h>
36 #include <linux/module.h>
37 #include <linux/proc_fs.h>
38 #include <linux/seq_file.h>
39 #include <linux/spinlock.h>
40 #include <linux/kallsyms.h>
41 #include <linux/interrupt.h>
42 #include <linux/stacktrace.h>
43 #include <linux/debug_locks.h>
44 #include <linux/irqflags.h>
45 #include <linux/utsname.h>
46 #include <linux/hash.h>
47 #include <linux/ftrace.h>
48 #include <linux/stringify.h>
49 #include <linux/bitmap.h>
50 #include <linux/bitops.h>
51 #include <linux/gfp.h>
52 #include <linux/random.h>
53 #include <linux/jhash.h>
54 #include <linux/nmi.h>
55 #include <linux/rcupdate.h>
56 #include <linux/kprobes.h>
57 #include <linux/lockdep.h>
59 #include <asm/sections.h>
61 #include "lockdep_internals.h"
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/lock.h>
66 #ifdef CONFIG_PROVE_LOCKING
67 int prove_locking
= 1;
68 module_param(prove_locking
, int, 0644);
70 #define prove_locking 0
73 #ifdef CONFIG_LOCK_STAT
75 module_param(lock_stat
, int, 0644);
80 DEFINE_PER_CPU(unsigned int, lockdep_recursion
);
81 EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion
);
83 static __always_inline
bool lockdep_enabled(void)
88 if (this_cpu_read(lockdep_recursion
))
91 if (current
->lockdep_recursion
)
98 * lockdep_lock: protects the lockdep graph, the hashes and the
99 * class/list/hash allocators.
101 * This is one of the rare exceptions where it's justified
102 * to use a raw spinlock - we really dont want the spinlock
103 * code to recurse back into the lockdep code...
105 static arch_spinlock_t __lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
106 static struct task_struct
*__owner
;
108 static inline void lockdep_lock(void)
110 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
112 __this_cpu_inc(lockdep_recursion
);
113 arch_spin_lock(&__lock
);
117 static inline void lockdep_unlock(void)
119 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
121 if (debug_locks
&& DEBUG_LOCKS_WARN_ON(__owner
!= current
))
125 arch_spin_unlock(&__lock
);
126 __this_cpu_dec(lockdep_recursion
);
129 static inline bool lockdep_assert_locked(void)
131 return DEBUG_LOCKS_WARN_ON(__owner
!= current
);
134 static struct task_struct
*lockdep_selftest_task_struct
;
137 static int graph_lock(void)
141 * Make sure that if another CPU detected a bug while
142 * walking the graph we dont change it (while the other
143 * CPU is busy printing out stuff with the graph lock
153 static inline void graph_unlock(void)
159 * Turn lock debugging off and return with 0 if it was off already,
160 * and also release the graph lock:
162 static inline int debug_locks_off_graph_unlock(void)
164 int ret
= debug_locks_off();
171 unsigned long nr_list_entries
;
172 static struct lock_list list_entries
[MAX_LOCKDEP_ENTRIES
];
173 static DECLARE_BITMAP(list_entries_in_use
, MAX_LOCKDEP_ENTRIES
);
176 * All data structures here are protected by the global debug_lock.
178 * nr_lock_classes is the number of elements of lock_classes[] that is
181 #define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
182 #define KEYHASH_SIZE (1UL << KEYHASH_BITS)
183 static struct hlist_head lock_keys_hash
[KEYHASH_SIZE
];
184 unsigned long nr_lock_classes
;
185 unsigned long nr_zapped_classes
;
186 #ifndef CONFIG_DEBUG_LOCKDEP
189 struct lock_class lock_classes
[MAX_LOCKDEP_KEYS
];
190 static DECLARE_BITMAP(lock_classes_in_use
, MAX_LOCKDEP_KEYS
);
192 inline struct lock_class
*lockdep_hlock_class(struct held_lock
*hlock
)
194 unsigned int class_idx
= hlock
->class_idx
;
196 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
199 if (!test_bit(class_idx
, lock_classes_in_use
)) {
201 * Someone passed in garbage, we give up.
203 DEBUG_LOCKS_WARN_ON(1);
208 * At this point, if the passed hlock->class_idx is still garbage,
209 * we just have to live with it
211 return lock_classes
+ class_idx
;
213 EXPORT_SYMBOL_GPL(lockdep_hlock_class
);
214 #define hlock_class(hlock) lockdep_hlock_class(hlock)
216 #ifdef CONFIG_LOCK_STAT
217 static DEFINE_PER_CPU(struct lock_class_stats
[MAX_LOCKDEP_KEYS
], cpu_lock_stats
);
219 static inline u64
lockstat_clock(void)
221 return local_clock();
224 static int lock_point(unsigned long points
[], unsigned long ip
)
228 for (i
= 0; i
< LOCKSTAT_POINTS
; i
++) {
229 if (points
[i
] == 0) {
240 static void lock_time_inc(struct lock_time
*lt
, u64 time
)
245 if (time
< lt
->min
|| !lt
->nr
)
252 static inline void lock_time_add(struct lock_time
*src
, struct lock_time
*dst
)
257 if (src
->max
> dst
->max
)
260 if (src
->min
< dst
->min
|| !dst
->nr
)
263 dst
->total
+= src
->total
;
267 struct lock_class_stats
lock_stats(struct lock_class
*class)
269 struct lock_class_stats stats
;
272 memset(&stats
, 0, sizeof(struct lock_class_stats
));
273 for_each_possible_cpu(cpu
) {
274 struct lock_class_stats
*pcs
=
275 &per_cpu(cpu_lock_stats
, cpu
)[class - lock_classes
];
277 for (i
= 0; i
< ARRAY_SIZE(stats
.contention_point
); i
++)
278 stats
.contention_point
[i
] += pcs
->contention_point
[i
];
280 for (i
= 0; i
< ARRAY_SIZE(stats
.contending_point
); i
++)
281 stats
.contending_point
[i
] += pcs
->contending_point
[i
];
283 lock_time_add(&pcs
->read_waittime
, &stats
.read_waittime
);
284 lock_time_add(&pcs
->write_waittime
, &stats
.write_waittime
);
286 lock_time_add(&pcs
->read_holdtime
, &stats
.read_holdtime
);
287 lock_time_add(&pcs
->write_holdtime
, &stats
.write_holdtime
);
289 for (i
= 0; i
< ARRAY_SIZE(stats
.bounces
); i
++)
290 stats
.bounces
[i
] += pcs
->bounces
[i
];
296 void clear_lock_stats(struct lock_class
*class)
300 for_each_possible_cpu(cpu
) {
301 struct lock_class_stats
*cpu_stats
=
302 &per_cpu(cpu_lock_stats
, cpu
)[class - lock_classes
];
304 memset(cpu_stats
, 0, sizeof(struct lock_class_stats
));
306 memset(class->contention_point
, 0, sizeof(class->contention_point
));
307 memset(class->contending_point
, 0, sizeof(class->contending_point
));
310 static struct lock_class_stats
*get_lock_stats(struct lock_class
*class)
312 return &this_cpu_ptr(cpu_lock_stats
)[class - lock_classes
];
315 static void lock_release_holdtime(struct held_lock
*hlock
)
317 struct lock_class_stats
*stats
;
323 holdtime
= lockstat_clock() - hlock
->holdtime_stamp
;
325 stats
= get_lock_stats(hlock_class(hlock
));
327 lock_time_inc(&stats
->read_holdtime
, holdtime
);
329 lock_time_inc(&stats
->write_holdtime
, holdtime
);
332 static inline void lock_release_holdtime(struct held_lock
*hlock
)
338 * We keep a global list of all lock classes. The list is only accessed with
339 * the lockdep spinlock lock held. free_lock_classes is a list with free
340 * elements. These elements are linked together by the lock_entry member in
343 LIST_HEAD(all_lock_classes
);
344 static LIST_HEAD(free_lock_classes
);
347 * struct pending_free - information about data structures about to be freed
348 * @zapped: Head of a list with struct lock_class elements.
349 * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
350 * are about to be freed.
352 struct pending_free
{
353 struct list_head zapped
;
354 DECLARE_BITMAP(lock_chains_being_freed
, MAX_LOCKDEP_CHAINS
);
358 * struct delayed_free - data structures used for delayed freeing
360 * A data structure for delayed freeing of data structures that may be
361 * accessed by RCU readers at the time these were freed.
363 * @rcu_head: Used to schedule an RCU callback for freeing data structures.
364 * @index: Index of @pf to which freed data structures are added.
365 * @scheduled: Whether or not an RCU callback has been scheduled.
366 * @pf: Array with information about data structures about to be freed.
368 static struct delayed_free
{
369 struct rcu_head rcu_head
;
372 struct pending_free pf
[2];
376 * The lockdep classes are in a hash-table as well, for fast lookup:
378 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
379 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
380 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
381 #define classhashentry(key) (classhash_table + __classhashfn((key)))
383 static struct hlist_head classhash_table
[CLASSHASH_SIZE
];
386 * We put the lock dependency chains into a hash-table as well, to cache
389 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
390 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
391 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
392 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
394 static struct hlist_head chainhash_table
[CHAINHASH_SIZE
];
397 * the id of held_lock
399 static inline u16
hlock_id(struct held_lock
*hlock
)
401 BUILD_BUG_ON(MAX_LOCKDEP_KEYS_BITS
+ 2 > 16);
403 return (hlock
->class_idx
| (hlock
->read
<< MAX_LOCKDEP_KEYS_BITS
));
406 static inline unsigned int chain_hlock_class_idx(u16 hlock_id
)
408 return hlock_id
& (MAX_LOCKDEP_KEYS
- 1);
412 * The hash key of the lock dependency chains is a hash itself too:
413 * it's a hash of all locks taken up to that lock, including that lock.
414 * It's a 64-bit hash, because it's important for the keys to be
417 static inline u64
iterate_chain_key(u64 key
, u32 idx
)
419 u32 k0
= key
, k1
= key
>> 32;
421 __jhash_mix(idx
, k0
, k1
); /* Macro that modifies arguments! */
423 return k0
| (u64
)k1
<< 32;
426 void lockdep_init_task(struct task_struct
*task
)
428 task
->lockdep_depth
= 0; /* no locks held yet */
429 task
->curr_chain_key
= INITIAL_CHAIN_KEY
;
430 task
->lockdep_recursion
= 0;
433 static __always_inline
void lockdep_recursion_inc(void)
435 __this_cpu_inc(lockdep_recursion
);
438 static __always_inline
void lockdep_recursion_finish(void)
440 if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion
)))
441 __this_cpu_write(lockdep_recursion
, 0);
444 void lockdep_set_selftest_task(struct task_struct
*task
)
446 lockdep_selftest_task_struct
= task
;
450 * Debugging switches:
454 #define VERY_VERBOSE 0
457 # define HARDIRQ_VERBOSE 1
458 # define SOFTIRQ_VERBOSE 1
460 # define HARDIRQ_VERBOSE 0
461 # define SOFTIRQ_VERBOSE 0
464 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
466 * Quick filtering for interesting events:
468 static int class_filter(struct lock_class
*class)
472 if (class->name_version
== 1 &&
473 !strcmp(class->name
, "lockname"))
475 if (class->name_version
== 1 &&
476 !strcmp(class->name
, "&struct->lockfield"))
479 /* Filter everything else. 1 would be to allow everything else */
484 static int verbose(struct lock_class
*class)
487 return class_filter(class);
492 static void print_lockdep_off(const char *bug_msg
)
494 printk(KERN_DEBUG
"%s\n", bug_msg
);
495 printk(KERN_DEBUG
"turning off the locking correctness validator.\n");
496 #ifdef CONFIG_LOCK_STAT
497 printk(KERN_DEBUG
"Please attach the output of /proc/lock_stat to the bug report\n");
501 unsigned long nr_stack_trace_entries
;
503 #ifdef CONFIG_PROVE_LOCKING
505 * struct lock_trace - single stack backtrace
506 * @hash_entry: Entry in a stack_trace_hash[] list.
507 * @hash: jhash() of @entries.
508 * @nr_entries: Number of entries in @entries.
509 * @entries: Actual stack backtrace.
512 struct hlist_node hash_entry
;
515 unsigned long entries
[] __aligned(sizeof(unsigned long));
517 #define LOCK_TRACE_SIZE_IN_LONGS \
518 (sizeof(struct lock_trace) / sizeof(unsigned long))
520 * Stack-trace: sequence of lock_trace structures. Protected by the graph_lock.
522 static unsigned long stack_trace
[MAX_STACK_TRACE_ENTRIES
];
523 static struct hlist_head stack_trace_hash
[STACK_TRACE_HASH_SIZE
];
525 static bool traces_identical(struct lock_trace
*t1
, struct lock_trace
*t2
)
527 return t1
->hash
== t2
->hash
&& t1
->nr_entries
== t2
->nr_entries
&&
528 memcmp(t1
->entries
, t2
->entries
,
529 t1
->nr_entries
* sizeof(t1
->entries
[0])) == 0;
532 static struct lock_trace
*save_trace(void)
534 struct lock_trace
*trace
, *t2
;
535 struct hlist_head
*hash_head
;
539 BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE
);
540 BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS
>= MAX_STACK_TRACE_ENTRIES
);
542 trace
= (struct lock_trace
*)(stack_trace
+ nr_stack_trace_entries
);
543 max_entries
= MAX_STACK_TRACE_ENTRIES
- nr_stack_trace_entries
-
544 LOCK_TRACE_SIZE_IN_LONGS
;
546 if (max_entries
<= 0) {
547 if (!debug_locks_off_graph_unlock())
550 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
555 trace
->nr_entries
= stack_trace_save(trace
->entries
, max_entries
, 3);
557 hash
= jhash(trace
->entries
, trace
->nr_entries
*
558 sizeof(trace
->entries
[0]), 0);
560 hash_head
= stack_trace_hash
+ (hash
& (STACK_TRACE_HASH_SIZE
- 1));
561 hlist_for_each_entry(t2
, hash_head
, hash_entry
) {
562 if (traces_identical(trace
, t2
))
565 nr_stack_trace_entries
+= LOCK_TRACE_SIZE_IN_LONGS
+ trace
->nr_entries
;
566 hlist_add_head(&trace
->hash_entry
, hash_head
);
571 /* Return the number of stack traces in the stack_trace[] array. */
572 u64
lockdep_stack_trace_count(void)
574 struct lock_trace
*trace
;
578 for (i
= 0; i
< ARRAY_SIZE(stack_trace_hash
); i
++) {
579 hlist_for_each_entry(trace
, &stack_trace_hash
[i
], hash_entry
) {
587 /* Return the number of stack hash chains that have at least one stack trace. */
588 u64
lockdep_stack_hash_count(void)
593 for (i
= 0; i
< ARRAY_SIZE(stack_trace_hash
); i
++)
594 if (!hlist_empty(&stack_trace_hash
[i
]))
601 unsigned int nr_hardirq_chains
;
602 unsigned int nr_softirq_chains
;
603 unsigned int nr_process_chains
;
604 unsigned int max_lockdep_depth
;
606 #ifdef CONFIG_DEBUG_LOCKDEP
608 * Various lockdep statistics:
610 DEFINE_PER_CPU(struct lockdep_stats
, lockdep_stats
);
613 #ifdef CONFIG_PROVE_LOCKING
618 #define __USAGE(__STATE) \
619 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
620 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
621 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
622 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
624 static const char *usage_str
[] =
626 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
627 #include "lockdep_states.h"
629 [LOCK_USED
] = "INITIAL USE",
630 [LOCK_USED_READ
] = "INITIAL READ USE",
631 /* abused as string storage for verify_lock_unused() */
632 [LOCK_USAGE_STATES
] = "IN-NMI",
636 const char *__get_key_name(const struct lockdep_subclass_key
*key
, char *str
)
638 return kallsyms_lookup((unsigned long)key
, NULL
, NULL
, NULL
, str
);
641 static inline unsigned long lock_flag(enum lock_usage_bit bit
)
646 static char get_usage_char(struct lock_class
*class, enum lock_usage_bit bit
)
649 * The usage character defaults to '.' (i.e., irqs disabled and not in
650 * irq context), which is the safest usage category.
655 * The order of the following usage checks matters, which will
656 * result in the outcome character as follows:
658 * - '+': irq is enabled and not in irq context
659 * - '-': in irq context and irq is disabled
660 * - '?': in irq context and irq is enabled
662 if (class->usage_mask
& lock_flag(bit
+ LOCK_USAGE_DIR_MASK
)) {
664 if (class->usage_mask
& lock_flag(bit
))
666 } else if (class->usage_mask
& lock_flag(bit
))
672 void get_usage_chars(struct lock_class
*class, char usage
[LOCK_USAGE_CHARS
])
676 #define LOCKDEP_STATE(__STATE) \
677 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
678 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
679 #include "lockdep_states.h"
685 static void __print_lock_name(struct lock_class
*class)
687 char str
[KSYM_NAME_LEN
];
692 name
= __get_key_name(class->key
, str
);
693 printk(KERN_CONT
"%s", name
);
695 printk(KERN_CONT
"%s", name
);
696 if (class->name_version
> 1)
697 printk(KERN_CONT
"#%d", class->name_version
);
699 printk(KERN_CONT
"/%d", class->subclass
);
703 static void print_lock_name(struct lock_class
*class)
705 char usage
[LOCK_USAGE_CHARS
];
707 get_usage_chars(class, usage
);
709 printk(KERN_CONT
" (");
710 __print_lock_name(class);
711 printk(KERN_CONT
"){%s}-{%d:%d}", usage
,
712 class->wait_type_outer
?: class->wait_type_inner
,
713 class->wait_type_inner
);
716 static void print_lockdep_cache(struct lockdep_map
*lock
)
719 char str
[KSYM_NAME_LEN
];
723 name
= __get_key_name(lock
->key
->subkeys
, str
);
725 printk(KERN_CONT
"%s", name
);
728 static void print_lock(struct held_lock
*hlock
)
731 * We can be called locklessly through debug_show_all_locks() so be
732 * extra careful, the hlock might have been released and cleared.
734 * If this indeed happens, lets pretend it does not hurt to continue
735 * to print the lock unless the hlock class_idx does not point to a
736 * registered class. The rationale here is: since we don't attempt
737 * to distinguish whether we are in this situation, if it just
738 * happened we can't count on class_idx to tell either.
740 struct lock_class
*lock
= hlock_class(hlock
);
743 printk(KERN_CONT
"<RELEASED>\n");
747 printk(KERN_CONT
"%px", hlock
->instance
);
748 print_lock_name(lock
);
749 printk(KERN_CONT
", at: %pS\n", (void *)hlock
->acquire_ip
);
752 static void lockdep_print_held_locks(struct task_struct
*p
)
754 int i
, depth
= READ_ONCE(p
->lockdep_depth
);
757 printk("no locks held by %s/%d.\n", p
->comm
, task_pid_nr(p
));
759 printk("%d lock%s held by %s/%d:\n", depth
,
760 depth
> 1 ? "s" : "", p
->comm
, task_pid_nr(p
));
762 * It's not reliable to print a task's held locks if it's not sleeping
763 * and it's not the current task.
765 if (p
!= current
&& task_is_running(p
))
767 for (i
= 0; i
< depth
; i
++) {
769 print_lock(p
->held_locks
+ i
);
773 static void print_kernel_ident(void)
775 printk("%s %.*s %s\n", init_utsname()->release
,
776 (int)strcspn(init_utsname()->version
, " "),
777 init_utsname()->version
,
781 static int very_verbose(struct lock_class
*class)
784 return class_filter(class);
790 * Is this the address of a static object:
793 static int static_obj(const void *obj
)
795 unsigned long start
= (unsigned long) &_stext
,
796 end
= (unsigned long) &_end
,
797 addr
= (unsigned long) obj
;
799 if (arch_is_kernel_initmem_freed(addr
))
805 if ((addr
>= start
) && (addr
< end
))
808 if (arch_is_kernel_data(addr
))
812 * in-kernel percpu var?
814 if (is_kernel_percpu_address(addr
))
818 * module static or percpu var?
820 return is_module_address(addr
) || is_module_percpu_address(addr
);
825 * To make lock name printouts unique, we calculate a unique
826 * class->name_version generation counter. The caller must hold the graph
829 static int count_matching_names(struct lock_class
*new_class
)
831 struct lock_class
*class;
834 if (!new_class
->name
)
837 list_for_each_entry(class, &all_lock_classes
, lock_entry
) {
838 if (new_class
->key
- new_class
->subclass
== class->key
)
839 return class->name_version
;
840 if (class->name
&& !strcmp(class->name
, new_class
->name
))
841 count
= max(count
, class->name_version
);
847 /* used from NMI context -- must be lockless */
848 static noinstr
struct lock_class
*
849 look_up_lock_class(const struct lockdep_map
*lock
, unsigned int subclass
)
851 struct lockdep_subclass_key
*key
;
852 struct hlist_head
*hash_head
;
853 struct lock_class
*class;
855 if (unlikely(subclass
>= MAX_LOCKDEP_SUBCLASSES
)) {
856 instrumentation_begin();
859 "BUG: looking up invalid subclass: %u\n", subclass
);
861 "turning off the locking correctness validator.\n");
863 instrumentation_end();
868 * If it is not initialised then it has never been locked,
869 * so it won't be present in the hash table.
871 if (unlikely(!lock
->key
))
875 * NOTE: the class-key must be unique. For dynamic locks, a static
876 * lock_class_key variable is passed in through the mutex_init()
877 * (or spin_lock_init()) call - which acts as the key. For static
878 * locks we use the lock object itself as the key.
880 BUILD_BUG_ON(sizeof(struct lock_class_key
) >
881 sizeof(struct lockdep_map
));
883 key
= lock
->key
->subkeys
+ subclass
;
885 hash_head
= classhashentry(key
);
888 * We do an RCU walk of the hash, see lockdep_free_key_range().
890 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
893 hlist_for_each_entry_rcu_notrace(class, hash_head
, hash_entry
) {
894 if (class->key
== key
) {
896 * Huh! same key, different name? Did someone trample
897 * on some memory? We're most confused.
899 WARN_ON_ONCE(class->name
!= lock
->name
&&
900 lock
->key
!= &__lockdep_no_validate__
);
909 * Static locks do not have their class-keys yet - for them the key is
910 * the lock object itself. If the lock is in the per cpu area, the
911 * canonical address of the lock (per cpu offset removed) is used.
913 static bool assign_lock_key(struct lockdep_map
*lock
)
915 unsigned long can_addr
, addr
= (unsigned long)lock
;
919 * lockdep_free_key_range() assumes that struct lock_class_key
920 * objects do not overlap. Since we use the address of lock
921 * objects as class key for static objects, check whether the
922 * size of lock_class_key objects does not exceed the size of
923 * the smallest lock object.
925 BUILD_BUG_ON(sizeof(struct lock_class_key
) > sizeof(raw_spinlock_t
));
928 if (__is_kernel_percpu_address(addr
, &can_addr
))
929 lock
->key
= (void *)can_addr
;
930 else if (__is_module_percpu_address(addr
, &can_addr
))
931 lock
->key
= (void *)can_addr
;
932 else if (static_obj(lock
))
933 lock
->key
= (void *)lock
;
935 /* Debug-check: all keys must be persistent! */
937 pr_err("INFO: trying to register non-static key.\n");
938 pr_err("The code is fine but needs lockdep annotation, or maybe\n");
939 pr_err("you didn't initialize this object before use?\n");
940 pr_err("turning off the locking correctness validator.\n");
948 #ifdef CONFIG_DEBUG_LOCKDEP
950 /* Check whether element @e occurs in list @h */
951 static bool in_list(struct list_head
*e
, struct list_head
*h
)
955 list_for_each(f
, h
) {
964 * Check whether entry @e occurs in any of the locks_after or locks_before
967 static bool in_any_class_list(struct list_head
*e
)
969 struct lock_class
*class;
972 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
973 class = &lock_classes
[i
];
974 if (in_list(e
, &class->locks_after
) ||
975 in_list(e
, &class->locks_before
))
981 static bool class_lock_list_valid(struct lock_class
*c
, struct list_head
*h
)
985 list_for_each_entry(e
, h
, entry
) {
986 if (e
->links_to
!= c
) {
987 printk(KERN_INFO
"class %s: mismatch for lock entry %ld; class %s <> %s",
989 (unsigned long)(e
- list_entries
),
990 e
->links_to
&& e
->links_to
->name
?
991 e
->links_to
->name
: "(?)",
992 e
->class && e
->class->name
? e
->class->name
:
1000 #ifdef CONFIG_PROVE_LOCKING
1001 static u16 chain_hlocks
[MAX_LOCKDEP_CHAIN_HLOCKS
];
1004 static bool check_lock_chain_key(struct lock_chain
*chain
)
1006 #ifdef CONFIG_PROVE_LOCKING
1007 u64 chain_key
= INITIAL_CHAIN_KEY
;
1010 for (i
= chain
->base
; i
< chain
->base
+ chain
->depth
; i
++)
1011 chain_key
= iterate_chain_key(chain_key
, chain_hlocks
[i
]);
1013 * The 'unsigned long long' casts avoid that a compiler warning
1014 * is reported when building tools/lib/lockdep.
1016 if (chain
->chain_key
!= chain_key
) {
1017 printk(KERN_INFO
"chain %lld: key %#llx <> %#llx\n",
1018 (unsigned long long)(chain
- lock_chains
),
1019 (unsigned long long)chain
->chain_key
,
1020 (unsigned long long)chain_key
);
1027 static bool in_any_zapped_class_list(struct lock_class
*class)
1029 struct pending_free
*pf
;
1032 for (i
= 0, pf
= delayed_free
.pf
; i
< ARRAY_SIZE(delayed_free
.pf
); i
++, pf
++) {
1033 if (in_list(&class->lock_entry
, &pf
->zapped
))
1040 static bool __check_data_structures(void)
1042 struct lock_class
*class;
1043 struct lock_chain
*chain
;
1044 struct hlist_head
*head
;
1045 struct lock_list
*e
;
1048 /* Check whether all classes occur in a lock list. */
1049 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
1050 class = &lock_classes
[i
];
1051 if (!in_list(&class->lock_entry
, &all_lock_classes
) &&
1052 !in_list(&class->lock_entry
, &free_lock_classes
) &&
1053 !in_any_zapped_class_list(class)) {
1054 printk(KERN_INFO
"class %px/%s is not in any class list\n",
1055 class, class->name
? : "(?)");
1060 /* Check whether all classes have valid lock lists. */
1061 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
1062 class = &lock_classes
[i
];
1063 if (!class_lock_list_valid(class, &class->locks_before
))
1065 if (!class_lock_list_valid(class, &class->locks_after
))
1069 /* Check the chain_key of all lock chains. */
1070 for (i
= 0; i
< ARRAY_SIZE(chainhash_table
); i
++) {
1071 head
= chainhash_table
+ i
;
1072 hlist_for_each_entry_rcu(chain
, head
, entry
) {
1073 if (!check_lock_chain_key(chain
))
1079 * Check whether all list entries that are in use occur in a class
1082 for_each_set_bit(i
, list_entries_in_use
, ARRAY_SIZE(list_entries
)) {
1083 e
= list_entries
+ i
;
1084 if (!in_any_class_list(&e
->entry
)) {
1085 printk(KERN_INFO
"list entry %d is not in any class list; class %s <> %s\n",
1086 (unsigned int)(e
- list_entries
),
1087 e
->class->name
? : "(?)",
1088 e
->links_to
->name
? : "(?)");
1094 * Check whether all list entries that are not in use do not occur in
1095 * a class lock list.
1097 for_each_clear_bit(i
, list_entries_in_use
, ARRAY_SIZE(list_entries
)) {
1098 e
= list_entries
+ i
;
1099 if (in_any_class_list(&e
->entry
)) {
1100 printk(KERN_INFO
"list entry %d occurs in a class list; class %s <> %s\n",
1101 (unsigned int)(e
- list_entries
),
1102 e
->class && e
->class->name
? e
->class->name
:
1104 e
->links_to
&& e
->links_to
->name
?
1105 e
->links_to
->name
: "(?)");
1113 int check_consistency
= 0;
1114 module_param(check_consistency
, int, 0644);
1116 static void check_data_structures(void)
1118 static bool once
= false;
1120 if (check_consistency
&& !once
) {
1121 if (!__check_data_structures()) {
1128 #else /* CONFIG_DEBUG_LOCKDEP */
1130 static inline void check_data_structures(void) { }
1132 #endif /* CONFIG_DEBUG_LOCKDEP */
1134 static void init_chain_block_buckets(void);
1137 * Initialize the lock_classes[] array elements, the free_lock_classes list
1138 * and also the delayed_free structure.
1140 static void init_data_structures_once(void)
1142 static bool __read_mostly ds_initialized
, rcu_head_initialized
;
1145 if (likely(rcu_head_initialized
))
1148 if (system_state
>= SYSTEM_SCHEDULING
) {
1149 init_rcu_head(&delayed_free
.rcu_head
);
1150 rcu_head_initialized
= true;
1156 ds_initialized
= true;
1158 INIT_LIST_HEAD(&delayed_free
.pf
[0].zapped
);
1159 INIT_LIST_HEAD(&delayed_free
.pf
[1].zapped
);
1161 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
1162 list_add_tail(&lock_classes
[i
].lock_entry
, &free_lock_classes
);
1163 INIT_LIST_HEAD(&lock_classes
[i
].locks_after
);
1164 INIT_LIST_HEAD(&lock_classes
[i
].locks_before
);
1166 init_chain_block_buckets();
1169 static inline struct hlist_head
*keyhashentry(const struct lock_class_key
*key
)
1171 unsigned long hash
= hash_long((uintptr_t)key
, KEYHASH_BITS
);
1173 return lock_keys_hash
+ hash
;
1176 /* Register a dynamically allocated key. */
1177 void lockdep_register_key(struct lock_class_key
*key
)
1179 struct hlist_head
*hash_head
;
1180 struct lock_class_key
*k
;
1181 unsigned long flags
;
1183 if (WARN_ON_ONCE(static_obj(key
)))
1185 hash_head
= keyhashentry(key
);
1187 raw_local_irq_save(flags
);
1190 hlist_for_each_entry_rcu(k
, hash_head
, hash_entry
) {
1191 if (WARN_ON_ONCE(k
== key
))
1194 hlist_add_head_rcu(&key
->hash_entry
, hash_head
);
1198 raw_local_irq_restore(flags
);
1200 EXPORT_SYMBOL_GPL(lockdep_register_key
);
1202 /* Check whether a key has been registered as a dynamic key. */
1203 static bool is_dynamic_key(const struct lock_class_key
*key
)
1205 struct hlist_head
*hash_head
;
1206 struct lock_class_key
*k
;
1209 if (WARN_ON_ONCE(static_obj(key
)))
1213 * If lock debugging is disabled lock_keys_hash[] may contain
1214 * pointers to memory that has already been freed. Avoid triggering
1215 * a use-after-free in that case by returning early.
1220 hash_head
= keyhashentry(key
);
1223 hlist_for_each_entry_rcu(k
, hash_head
, hash_entry
) {
1235 * Register a lock's class in the hash-table, if the class is not present
1236 * yet. Otherwise we look it up. We cache the result in the lock object
1237 * itself, so actual lookup of the hash should be once per lock object.
1239 static struct lock_class
*
1240 register_lock_class(struct lockdep_map
*lock
, unsigned int subclass
, int force
)
1242 struct lockdep_subclass_key
*key
;
1243 struct hlist_head
*hash_head
;
1244 struct lock_class
*class;
1246 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1248 class = look_up_lock_class(lock
, subclass
);
1250 goto out_set_class_cache
;
1253 if (!assign_lock_key(lock
))
1255 } else if (!static_obj(lock
->key
) && !is_dynamic_key(lock
->key
)) {
1259 key
= lock
->key
->subkeys
+ subclass
;
1260 hash_head
= classhashentry(key
);
1262 if (!graph_lock()) {
1266 * We have to do the hash-walk again, to avoid races
1269 hlist_for_each_entry_rcu(class, hash_head
, hash_entry
) {
1270 if (class->key
== key
)
1271 goto out_unlock_set
;
1274 init_data_structures_once();
1276 /* Allocate a new lock class and add it to the hash. */
1277 class = list_first_entry_or_null(&free_lock_classes
, typeof(*class),
1280 if (!debug_locks_off_graph_unlock()) {
1284 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1289 __set_bit(class - lock_classes
, lock_classes_in_use
);
1290 debug_atomic_inc(nr_unused_locks
);
1292 class->name
= lock
->name
;
1293 class->subclass
= subclass
;
1294 WARN_ON_ONCE(!list_empty(&class->locks_before
));
1295 WARN_ON_ONCE(!list_empty(&class->locks_after
));
1296 class->name_version
= count_matching_names(class);
1297 class->wait_type_inner
= lock
->wait_type_inner
;
1298 class->wait_type_outer
= lock
->wait_type_outer
;
1299 class->lock_type
= lock
->lock_type
;
1301 * We use RCU's safe list-add method to make
1302 * parallel walking of the hash-list safe:
1304 hlist_add_head_rcu(&class->hash_entry
, hash_head
);
1306 * Remove the class from the free list and add it to the global list
1309 list_move_tail(&class->lock_entry
, &all_lock_classes
);
1311 if (verbose(class)) {
1314 printk("\nnew class %px: %s", class->key
, class->name
);
1315 if (class->name_version
> 1)
1316 printk(KERN_CONT
"#%d", class->name_version
);
1317 printk(KERN_CONT
"\n");
1320 if (!graph_lock()) {
1327 out_set_class_cache
:
1328 if (!subclass
|| force
)
1329 lock
->class_cache
[0] = class;
1330 else if (subclass
< NR_LOCKDEP_CACHING_CLASSES
)
1331 lock
->class_cache
[subclass
] = class;
1334 * Hash collision, did we smoke some? We found a class with a matching
1335 * hash but the subclass -- which is hashed in -- didn't match.
1337 if (DEBUG_LOCKS_WARN_ON(class->subclass
!= subclass
))
1343 #ifdef CONFIG_PROVE_LOCKING
1345 * Allocate a lockdep entry. (assumes the graph_lock held, returns
1346 * with NULL on failure)
1348 static struct lock_list
*alloc_list_entry(void)
1350 int idx
= find_first_zero_bit(list_entries_in_use
,
1351 ARRAY_SIZE(list_entries
));
1353 if (idx
>= ARRAY_SIZE(list_entries
)) {
1354 if (!debug_locks_off_graph_unlock())
1357 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1362 __set_bit(idx
, list_entries_in_use
);
1363 return list_entries
+ idx
;
1367 * Add a new dependency to the head of the list:
1369 static int add_lock_to_list(struct lock_class
*this,
1370 struct lock_class
*links_to
, struct list_head
*head
,
1371 unsigned long ip
, u16 distance
, u8 dep
,
1372 const struct lock_trace
*trace
)
1374 struct lock_list
*entry
;
1376 * Lock not present yet - get a new dependency struct and
1377 * add it to the list:
1379 entry
= alloc_list_entry();
1383 entry
->class = this;
1384 entry
->links_to
= links_to
;
1386 entry
->distance
= distance
;
1387 entry
->trace
= trace
;
1389 * Both allocation and removal are done under the graph lock; but
1390 * iteration is under RCU-sched; see look_up_lock_class() and
1391 * lockdep_free_key_range().
1393 list_add_tail_rcu(&entry
->entry
, head
);
1399 * For good efficiency of modular, we use power of 2
1401 #define MAX_CIRCULAR_QUEUE_SIZE (1UL << CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS)
1402 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
1405 * The circular_queue and helpers are used to implement graph
1406 * breadth-first search (BFS) algorithm, by which we can determine
1407 * whether there is a path from a lock to another. In deadlock checks,
1408 * a path from the next lock to be acquired to a previous held lock
1409 * indicates that adding the <prev> -> <next> lock dependency will
1410 * produce a circle in the graph. Breadth-first search instead of
1411 * depth-first search is used in order to find the shortest (circular)
1414 struct circular_queue
{
1415 struct lock_list
*element
[MAX_CIRCULAR_QUEUE_SIZE
];
1416 unsigned int front
, rear
;
1419 static struct circular_queue lock_cq
;
1421 unsigned int max_bfs_queue_depth
;
1423 static unsigned int lockdep_dependency_gen_id
;
1425 static inline void __cq_init(struct circular_queue
*cq
)
1427 cq
->front
= cq
->rear
= 0;
1428 lockdep_dependency_gen_id
++;
1431 static inline int __cq_empty(struct circular_queue
*cq
)
1433 return (cq
->front
== cq
->rear
);
1436 static inline int __cq_full(struct circular_queue
*cq
)
1438 return ((cq
->rear
+ 1) & CQ_MASK
) == cq
->front
;
1441 static inline int __cq_enqueue(struct circular_queue
*cq
, struct lock_list
*elem
)
1446 cq
->element
[cq
->rear
] = elem
;
1447 cq
->rear
= (cq
->rear
+ 1) & CQ_MASK
;
1452 * Dequeue an element from the circular_queue, return a lock_list if
1453 * the queue is not empty, or NULL if otherwise.
1455 static inline struct lock_list
* __cq_dequeue(struct circular_queue
*cq
)
1457 struct lock_list
* lock
;
1462 lock
= cq
->element
[cq
->front
];
1463 cq
->front
= (cq
->front
+ 1) & CQ_MASK
;
1468 static inline unsigned int __cq_get_elem_count(struct circular_queue
*cq
)
1470 return (cq
->rear
- cq
->front
) & CQ_MASK
;
1473 static inline void mark_lock_accessed(struct lock_list
*lock
)
1475 lock
->class->dep_gen_id
= lockdep_dependency_gen_id
;
1478 static inline void visit_lock_entry(struct lock_list
*lock
,
1479 struct lock_list
*parent
)
1481 lock
->parent
= parent
;
1484 static inline unsigned long lock_accessed(struct lock_list
*lock
)
1486 return lock
->class->dep_gen_id
== lockdep_dependency_gen_id
;
1489 static inline struct lock_list
*get_lock_parent(struct lock_list
*child
)
1491 return child
->parent
;
1494 static inline int get_lock_depth(struct lock_list
*child
)
1497 struct lock_list
*parent
;
1499 while ((parent
= get_lock_parent(child
))) {
1507 * Return the forward or backward dependency list.
1509 * @lock: the lock_list to get its class's dependency list
1510 * @offset: the offset to struct lock_class to determine whether it is
1511 * locks_after or locks_before
1513 static inline struct list_head
*get_dep_list(struct lock_list
*lock
, int offset
)
1515 void *lock_class
= lock
->class;
1517 return lock_class
+ offset
;
1520 * Return values of a bfs search:
1522 * BFS_E* indicates an error
1523 * BFS_R* indicates a result (match or not)
1525 * BFS_EINVALIDNODE: Find a invalid node in the graph.
1527 * BFS_EQUEUEFULL: The queue is full while doing the bfs.
1529 * BFS_RMATCH: Find the matched node in the graph, and put that node into
1532 * BFS_RNOMATCH: Haven't found the matched node and keep *@target_entry
1536 BFS_EINVALIDNODE
= -2,
1537 BFS_EQUEUEFULL
= -1,
1543 * bfs_result < 0 means error
1545 static inline bool bfs_error(enum bfs_result res
)
1551 * DEP_*_BIT in lock_list::dep
1553 * For dependency @prev -> @next:
1555 * SR: @prev is shared reader (->read != 0) and @next is recursive reader
1557 * ER: @prev is exclusive locker (->read == 0) and @next is recursive reader
1558 * SN: @prev is shared reader and @next is non-recursive locker (->read != 2)
1559 * EN: @prev is exclusive locker and @next is non-recursive locker
1561 * Note that we define the value of DEP_*_BITs so that:
1562 * bit0 is prev->read == 0
1563 * bit1 is next->read != 2
1565 #define DEP_SR_BIT (0 + (0 << 1)) /* 0 */
1566 #define DEP_ER_BIT (1 + (0 << 1)) /* 1 */
1567 #define DEP_SN_BIT (0 + (1 << 1)) /* 2 */
1568 #define DEP_EN_BIT (1 + (1 << 1)) /* 3 */
1570 #define DEP_SR_MASK (1U << (DEP_SR_BIT))
1571 #define DEP_ER_MASK (1U << (DEP_ER_BIT))
1572 #define DEP_SN_MASK (1U << (DEP_SN_BIT))
1573 #define DEP_EN_MASK (1U << (DEP_EN_BIT))
1575 static inline unsigned int
1576 __calc_dep_bit(struct held_lock
*prev
, struct held_lock
*next
)
1578 return (prev
->read
== 0) + ((next
->read
!= 2) << 1);
1581 static inline u8
calc_dep(struct held_lock
*prev
, struct held_lock
*next
)
1583 return 1U << __calc_dep_bit(prev
, next
);
1587 * calculate the dep_bit for backwards edges. We care about whether @prev is
1588 * shared and whether @next is recursive.
1590 static inline unsigned int
1591 __calc_dep_bitb(struct held_lock
*prev
, struct held_lock
*next
)
1593 return (next
->read
!= 2) + ((prev
->read
== 0) << 1);
1596 static inline u8
calc_depb(struct held_lock
*prev
, struct held_lock
*next
)
1598 return 1U << __calc_dep_bitb(prev
, next
);
1602 * Initialize a lock_list entry @lock belonging to @class as the root for a BFS
1605 static inline void __bfs_init_root(struct lock_list
*lock
,
1606 struct lock_class
*class)
1608 lock
->class = class;
1609 lock
->parent
= NULL
;
1614 * Initialize a lock_list entry @lock based on a lock acquisition @hlock as the
1615 * root for a BFS search.
1617 * ->only_xr of the initial lock node is set to @hlock->read == 2, to make sure
1618 * that <prev> -> @hlock and @hlock -> <whatever __bfs() found> is not -(*R)->
1621 static inline void bfs_init_root(struct lock_list
*lock
,
1622 struct held_lock
*hlock
)
1624 __bfs_init_root(lock
, hlock_class(hlock
));
1625 lock
->only_xr
= (hlock
->read
== 2);
1629 * Similar to bfs_init_root() but initialize the root for backwards BFS.
1631 * ->only_xr of the initial lock node is set to @hlock->read != 0, to make sure
1632 * that <next> -> @hlock and @hlock -> <whatever backwards BFS found> is not
1633 * -(*S)-> and -(R*)-> (reverse order of -(*R)-> and -(S*)->).
1635 static inline void bfs_init_rootb(struct lock_list
*lock
,
1636 struct held_lock
*hlock
)
1638 __bfs_init_root(lock
, hlock_class(hlock
));
1639 lock
->only_xr
= (hlock
->read
!= 0);
1642 static inline struct lock_list
*__bfs_next(struct lock_list
*lock
, int offset
)
1644 if (!lock
|| !lock
->parent
)
1647 return list_next_or_null_rcu(get_dep_list(lock
->parent
, offset
),
1648 &lock
->entry
, struct lock_list
, entry
);
1652 * Breadth-First Search to find a strong path in the dependency graph.
1654 * @source_entry: the source of the path we are searching for.
1655 * @data: data used for the second parameter of @match function
1656 * @match: match function for the search
1657 * @target_entry: pointer to the target of a matched path
1658 * @offset: the offset to struct lock_class to determine whether it is
1659 * locks_after or locks_before
1661 * We may have multiple edges (considering different kinds of dependencies,
1662 * e.g. ER and SN) between two nodes in the dependency graph. But
1663 * only the strong dependency path in the graph is relevant to deadlocks. A
1664 * strong dependency path is a dependency path that doesn't have two adjacent
1665 * dependencies as -(*R)-> -(S*)->, please see:
1667 * Documentation/locking/lockdep-design.rst
1669 * for more explanation of the definition of strong dependency paths
1671 * In __bfs(), we only traverse in the strong dependency path:
1673 * In lock_list::only_xr, we record whether the previous dependency only
1674 * has -(*R)-> in the search, and if it does (prev only has -(*R)->), we
1675 * filter out any -(S*)-> in the current dependency and after that, the
1676 * ->only_xr is set according to whether we only have -(*R)-> left.
1678 static enum bfs_result
__bfs(struct lock_list
*source_entry
,
1680 bool (*match
)(struct lock_list
*entry
, void *data
),
1681 bool (*skip
)(struct lock_list
*entry
, void *data
),
1682 struct lock_list
**target_entry
,
1685 struct circular_queue
*cq
= &lock_cq
;
1686 struct lock_list
*lock
= NULL
;
1687 struct lock_list
*entry
;
1688 struct list_head
*head
;
1689 unsigned int cq_depth
;
1692 lockdep_assert_locked();
1695 __cq_enqueue(cq
, source_entry
);
1697 while ((lock
= __bfs_next(lock
, offset
)) || (lock
= __cq_dequeue(cq
))) {
1699 return BFS_EINVALIDNODE
;
1702 * Step 1: check whether we already finish on this one.
1704 * If we have visited all the dependencies from this @lock to
1705 * others (iow, if we have visited all lock_list entries in
1706 * @lock->class->locks_{after,before}) we skip, otherwise go
1707 * and visit all the dependencies in the list and mark this
1710 if (lock_accessed(lock
))
1713 mark_lock_accessed(lock
);
1716 * Step 2: check whether prev dependency and this form a strong
1719 if (lock
->parent
) { /* Parent exists, check prev dependency */
1721 bool prev_only_xr
= lock
->parent
->only_xr
;
1724 * Mask out all -(S*)-> if we only have *R in previous
1725 * step, because -(*R)-> -(S*)-> don't make up a strong
1729 dep
&= ~(DEP_SR_MASK
| DEP_SN_MASK
);
1731 /* If nothing left, we skip */
1735 /* If there are only -(*R)-> left, set that for the next step */
1736 lock
->only_xr
= !(dep
& (DEP_SN_MASK
| DEP_EN_MASK
));
1740 * Step 3: we haven't visited this and there is a strong
1741 * dependency path to this, so check with @match.
1742 * If @skip is provide and returns true, we skip this
1743 * lock (and any path this lock is in).
1745 if (skip
&& skip(lock
, data
))
1748 if (match(lock
, data
)) {
1749 *target_entry
= lock
;
1754 * Step 4: if not match, expand the path by adding the
1755 * forward or backwards dependencies in the search
1759 head
= get_dep_list(lock
, offset
);
1760 list_for_each_entry_rcu(entry
, head
, entry
) {
1761 visit_lock_entry(entry
, lock
);
1764 * Note we only enqueue the first of the list into the
1765 * queue, because we can always find a sibling
1766 * dependency from one (see __bfs_next()), as a result
1767 * the space of queue is saved.
1774 if (__cq_enqueue(cq
, entry
))
1775 return BFS_EQUEUEFULL
;
1777 cq_depth
= __cq_get_elem_count(cq
);
1778 if (max_bfs_queue_depth
< cq_depth
)
1779 max_bfs_queue_depth
= cq_depth
;
1783 return BFS_RNOMATCH
;
1786 static inline enum bfs_result
1787 __bfs_forwards(struct lock_list
*src_entry
,
1789 bool (*match
)(struct lock_list
*entry
, void *data
),
1790 bool (*skip
)(struct lock_list
*entry
, void *data
),
1791 struct lock_list
**target_entry
)
1793 return __bfs(src_entry
, data
, match
, skip
, target_entry
,
1794 offsetof(struct lock_class
, locks_after
));
1798 static inline enum bfs_result
1799 __bfs_backwards(struct lock_list
*src_entry
,
1801 bool (*match
)(struct lock_list
*entry
, void *data
),
1802 bool (*skip
)(struct lock_list
*entry
, void *data
),
1803 struct lock_list
**target_entry
)
1805 return __bfs(src_entry
, data
, match
, skip
, target_entry
,
1806 offsetof(struct lock_class
, locks_before
));
1810 static void print_lock_trace(const struct lock_trace
*trace
,
1811 unsigned int spaces
)
1813 stack_trace_print(trace
->entries
, trace
->nr_entries
, spaces
);
1817 * Print a dependency chain entry (this is only done when a deadlock
1818 * has been detected):
1820 static noinline
void
1821 print_circular_bug_entry(struct lock_list
*target
, int depth
)
1823 if (debug_locks_silent
)
1825 printk("\n-> #%u", depth
);
1826 print_lock_name(target
->class);
1827 printk(KERN_CONT
":\n");
1828 print_lock_trace(target
->trace
, 6);
1832 print_circular_lock_scenario(struct held_lock
*src
,
1833 struct held_lock
*tgt
,
1834 struct lock_list
*prt
)
1836 struct lock_class
*source
= hlock_class(src
);
1837 struct lock_class
*target
= hlock_class(tgt
);
1838 struct lock_class
*parent
= prt
->class;
1841 * A direct locking problem where unsafe_class lock is taken
1842 * directly by safe_class lock, then all we need to show
1843 * is the deadlock scenario, as it is obvious that the
1844 * unsafe lock is taken under the safe lock.
1846 * But if there is a chain instead, where the safe lock takes
1847 * an intermediate lock (middle_class) where this lock is
1848 * not the same as the safe lock, then the lock chain is
1849 * used to describe the problem. Otherwise we would need
1850 * to show a different CPU case for each link in the chain
1851 * from the safe_class lock to the unsafe_class lock.
1853 if (parent
!= source
) {
1854 printk("Chain exists of:\n ");
1855 __print_lock_name(source
);
1856 printk(KERN_CONT
" --> ");
1857 __print_lock_name(parent
);
1858 printk(KERN_CONT
" --> ");
1859 __print_lock_name(target
);
1860 printk(KERN_CONT
"\n\n");
1863 printk(" Possible unsafe locking scenario:\n\n");
1864 printk(" CPU0 CPU1\n");
1865 printk(" ---- ----\n");
1867 __print_lock_name(target
);
1868 printk(KERN_CONT
");\n");
1870 __print_lock_name(parent
);
1871 printk(KERN_CONT
");\n");
1873 __print_lock_name(target
);
1874 printk(KERN_CONT
");\n");
1876 __print_lock_name(source
);
1877 printk(KERN_CONT
");\n");
1878 printk("\n *** DEADLOCK ***\n\n");
1882 * When a circular dependency is detected, print the
1885 static noinline
void
1886 print_circular_bug_header(struct lock_list
*entry
, unsigned int depth
,
1887 struct held_lock
*check_src
,
1888 struct held_lock
*check_tgt
)
1890 struct task_struct
*curr
= current
;
1892 if (debug_locks_silent
)
1896 pr_warn("======================================================\n");
1897 pr_warn("WARNING: possible circular locking dependency detected\n");
1898 print_kernel_ident();
1899 pr_warn("------------------------------------------------------\n");
1900 pr_warn("%s/%d is trying to acquire lock:\n",
1901 curr
->comm
, task_pid_nr(curr
));
1902 print_lock(check_src
);
1904 pr_warn("\nbut task is already holding lock:\n");
1906 print_lock(check_tgt
);
1907 pr_warn("\nwhich lock already depends on the new lock.\n\n");
1908 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1910 print_circular_bug_entry(entry
, depth
);
1914 * We are about to add A -> B into the dependency graph, and in __bfs() a
1915 * strong dependency path A -> .. -> B is found: hlock_class equals
1918 * If A -> .. -> B can replace A -> B in any __bfs() search (means the former
1919 * is _stronger_ than or equal to the latter), we consider A -> B as redundant.
1920 * For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A
1921 * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the
1922 * dependency graph, as any strong path ..-> A -> B ->.. we can get with
1923 * having dependency A -> B, we could already get a equivalent path ..-> A ->
1924 * .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant.
1926 * We need to make sure both the start and the end of A -> .. -> B is not
1927 * weaker than A -> B. For the start part, please see the comment in
1928 * check_redundant(). For the end part, we need:
1932 * a) A -> B is -(*R)-> (everything is not weaker than that)
1936 * b) A -> .. -> B is -(*N)-> (nothing is stronger than this)
1939 static inline bool hlock_equal(struct lock_list
*entry
, void *data
)
1941 struct held_lock
*hlock
= (struct held_lock
*)data
;
1943 return hlock_class(hlock
) == entry
->class && /* Found A -> .. -> B */
1944 (hlock
->read
== 2 || /* A -> B is -(*R)-> */
1945 !entry
->only_xr
); /* A -> .. -> B is -(*N)-> */
1949 * We are about to add B -> A into the dependency graph, and in __bfs() a
1950 * strong dependency path A -> .. -> B is found: hlock_class equals
1953 * We will have a deadlock case (conflict) if A -> .. -> B -> A is a strong
1954 * dependency cycle, that means:
1958 * a) B -> A is -(E*)->
1962 * b) A -> .. -> B is -(*N)-> (i.e. A -> .. -(*N)-> B)
1964 * as then we don't have -(*R)-> -(S*)-> in the cycle.
1966 static inline bool hlock_conflict(struct lock_list
*entry
, void *data
)
1968 struct held_lock
*hlock
= (struct held_lock
*)data
;
1970 return hlock_class(hlock
) == entry
->class && /* Found A -> .. -> B */
1971 (hlock
->read
== 0 || /* B -> A is -(E*)-> */
1972 !entry
->only_xr
); /* A -> .. -> B is -(*N)-> */
1975 static noinline
void print_circular_bug(struct lock_list
*this,
1976 struct lock_list
*target
,
1977 struct held_lock
*check_src
,
1978 struct held_lock
*check_tgt
)
1980 struct task_struct
*curr
= current
;
1981 struct lock_list
*parent
;
1982 struct lock_list
*first_parent
;
1985 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
1988 this->trace
= save_trace();
1992 depth
= get_lock_depth(target
);
1994 print_circular_bug_header(target
, depth
, check_src
, check_tgt
);
1996 parent
= get_lock_parent(target
);
1997 first_parent
= parent
;
2000 print_circular_bug_entry(parent
, --depth
);
2001 parent
= get_lock_parent(parent
);
2004 printk("\nother info that might help us debug this:\n\n");
2005 print_circular_lock_scenario(check_src
, check_tgt
,
2008 lockdep_print_held_locks(curr
);
2010 printk("\nstack backtrace:\n");
2014 static noinline
void print_bfs_bug(int ret
)
2016 if (!debug_locks_off_graph_unlock())
2020 * Breadth-first-search failed, graph got corrupted?
2022 WARN(1, "lockdep bfs error:%d\n", ret
);
2025 static bool noop_count(struct lock_list
*entry
, void *data
)
2027 (*(unsigned long *)data
)++;
2031 static unsigned long __lockdep_count_forward_deps(struct lock_list
*this)
2033 unsigned long count
= 0;
2034 struct lock_list
*target_entry
;
2036 __bfs_forwards(this, (void *)&count
, noop_count
, NULL
, &target_entry
);
2040 unsigned long lockdep_count_forward_deps(struct lock_class
*class)
2042 unsigned long ret
, flags
;
2043 struct lock_list
this;
2045 __bfs_init_root(&this, class);
2047 raw_local_irq_save(flags
);
2049 ret
= __lockdep_count_forward_deps(&this);
2051 raw_local_irq_restore(flags
);
2056 static unsigned long __lockdep_count_backward_deps(struct lock_list
*this)
2058 unsigned long count
= 0;
2059 struct lock_list
*target_entry
;
2061 __bfs_backwards(this, (void *)&count
, noop_count
, NULL
, &target_entry
);
2066 unsigned long lockdep_count_backward_deps(struct lock_class
*class)
2068 unsigned long ret
, flags
;
2069 struct lock_list
this;
2071 __bfs_init_root(&this, class);
2073 raw_local_irq_save(flags
);
2075 ret
= __lockdep_count_backward_deps(&this);
2077 raw_local_irq_restore(flags
);
2083 * Check that the dependency graph starting at <src> can lead to
2086 static noinline
enum bfs_result
2087 check_path(struct held_lock
*target
, struct lock_list
*src_entry
,
2088 bool (*match
)(struct lock_list
*entry
, void *data
),
2089 bool (*skip
)(struct lock_list
*entry
, void *data
),
2090 struct lock_list
**target_entry
)
2092 enum bfs_result ret
;
2094 ret
= __bfs_forwards(src_entry
, target
, match
, skip
, target_entry
);
2096 if (unlikely(bfs_error(ret
)))
2103 * Prove that the dependency graph starting at <src> can not
2104 * lead to <target>. If it can, there is a circle when adding
2105 * <target> -> <src> dependency.
2107 * Print an error and return BFS_RMATCH if it does.
2109 static noinline
enum bfs_result
2110 check_noncircular(struct held_lock
*src
, struct held_lock
*target
,
2111 struct lock_trace
**const trace
)
2113 enum bfs_result ret
;
2114 struct lock_list
*target_entry
;
2115 struct lock_list src_entry
;
2117 bfs_init_root(&src_entry
, src
);
2119 debug_atomic_inc(nr_cyclic_checks
);
2121 ret
= check_path(target
, &src_entry
, hlock_conflict
, NULL
, &target_entry
);
2123 if (unlikely(ret
== BFS_RMATCH
)) {
2126 * If save_trace fails here, the printing might
2127 * trigger a WARN but because of the !nr_entries it
2128 * should not do bad things.
2130 *trace
= save_trace();
2133 print_circular_bug(&src_entry
, target_entry
, src
, target
);
2139 #ifdef CONFIG_TRACE_IRQFLAGS
2142 * Forwards and backwards subgraph searching, for the purposes of
2143 * proving that two subgraphs can be connected by a new dependency
2144 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
2146 * A irq safe->unsafe deadlock happens with the following conditions:
2148 * 1) We have a strong dependency path A -> ... -> B
2150 * 2) and we have ENABLED_IRQ usage of B and USED_IN_IRQ usage of A, therefore
2151 * irq can create a new dependency B -> A (consider the case that a holder
2152 * of B gets interrupted by an irq whose handler will try to acquire A).
2154 * 3) the dependency circle A -> ... -> B -> A we get from 1) and 2) is a
2157 * For the usage bits of B:
2158 * a) if A -> B is -(*N)->, then B -> A could be any type, so any
2159 * ENABLED_IRQ usage suffices.
2160 * b) if A -> B is -(*R)->, then B -> A must be -(E*)->, so only
2161 * ENABLED_IRQ_*_READ usage suffices.
2163 * For the usage bits of A:
2164 * c) if A -> B is -(E*)->, then B -> A could be any type, so any
2165 * USED_IN_IRQ usage suffices.
2166 * d) if A -> B is -(S*)->, then B -> A must be -(*N)->, so only
2167 * USED_IN_IRQ_*_READ usage suffices.
2171 * There is a strong dependency path in the dependency graph: A -> B, and now
2172 * we need to decide which usage bit of A should be accumulated to detect
2173 * safe->unsafe bugs.
2175 * Note that usage_accumulate() is used in backwards search, so ->only_xr
2176 * stands for whether A -> B only has -(S*)-> (in this case ->only_xr is true).
2178 * As above, if only_xr is false, which means A -> B has -(E*)-> dependency
2179 * path, any usage of A should be considered. Otherwise, we should only
2180 * consider _READ usage.
2182 static inline bool usage_accumulate(struct lock_list
*entry
, void *mask
)
2184 if (!entry
->only_xr
)
2185 *(unsigned long *)mask
|= entry
->class->usage_mask
;
2186 else /* Mask out _READ usage bits */
2187 *(unsigned long *)mask
|= (entry
->class->usage_mask
& LOCKF_IRQ
);
2193 * There is a strong dependency path in the dependency graph: A -> B, and now
2194 * we need to decide which usage bit of B conflicts with the usage bits of A,
2195 * i.e. which usage bit of B may introduce safe->unsafe deadlocks.
2197 * As above, if only_xr is false, which means A -> B has -(*N)-> dependency
2198 * path, any usage of B should be considered. Otherwise, we should only
2199 * consider _READ usage.
2201 static inline bool usage_match(struct lock_list
*entry
, void *mask
)
2203 if (!entry
->only_xr
)
2204 return !!(entry
->class->usage_mask
& *(unsigned long *)mask
);
2205 else /* Mask out _READ usage bits */
2206 return !!((entry
->class->usage_mask
& LOCKF_IRQ
) & *(unsigned long *)mask
);
2209 static inline bool usage_skip(struct lock_list
*entry
, void *mask
)
2212 * Skip local_lock() for irq inversion detection.
2214 * For !RT, local_lock() is not a real lock, so it won't carry any
2217 * For RT, an irq inversion happens when we have lock A and B, and on
2218 * some CPU we can have:
2224 * where lock(B) cannot sleep, and we have a dependency B -> ... -> A.
2226 * Now we prove local_lock() cannot exist in that dependency. First we
2227 * have the observation for any lock chain L1 -> ... -> Ln, for any
2228 * 1 <= i <= n, Li.inner_wait_type <= L1.inner_wait_type, otherwise
2229 * wait context check will complain. And since B is not a sleep lock,
2230 * therefore B.inner_wait_type >= 2, and since the inner_wait_type of
2231 * local_lock() is 3, which is greater than 2, therefore there is no
2232 * way the local_lock() exists in the dependency B -> ... -> A.
2234 * As a result, we will skip local_lock(), when we search for irq
2237 if (entry
->class->lock_type
== LD_LOCK_PERCPU
) {
2238 if (DEBUG_LOCKS_WARN_ON(entry
->class->wait_type_inner
< LD_WAIT_CONFIG
))
2248 * Find a node in the forwards-direction dependency sub-graph starting
2249 * at @root->class that matches @bit.
2251 * Return BFS_MATCH if such a node exists in the subgraph, and put that node
2252 * into *@target_entry.
2254 static enum bfs_result
2255 find_usage_forwards(struct lock_list
*root
, unsigned long usage_mask
,
2256 struct lock_list
**target_entry
)
2258 enum bfs_result result
;
2260 debug_atomic_inc(nr_find_usage_forwards_checks
);
2262 result
= __bfs_forwards(root
, &usage_mask
, usage_match
, usage_skip
, target_entry
);
2268 * Find a node in the backwards-direction dependency sub-graph starting
2269 * at @root->class that matches @bit.
2271 static enum bfs_result
2272 find_usage_backwards(struct lock_list
*root
, unsigned long usage_mask
,
2273 struct lock_list
**target_entry
)
2275 enum bfs_result result
;
2277 debug_atomic_inc(nr_find_usage_backwards_checks
);
2279 result
= __bfs_backwards(root
, &usage_mask
, usage_match
, usage_skip
, target_entry
);
2284 static void print_lock_class_header(struct lock_class
*class, int depth
)
2288 printk("%*s->", depth
, "");
2289 print_lock_name(class);
2290 #ifdef CONFIG_DEBUG_LOCKDEP
2291 printk(KERN_CONT
" ops: %lu", debug_class_ops_read(class));
2293 printk(KERN_CONT
" {\n");
2295 for (bit
= 0; bit
< LOCK_TRACE_STATES
; bit
++) {
2296 if (class->usage_mask
& (1 << bit
)) {
2299 len
+= printk("%*s %s", depth
, "", usage_str
[bit
]);
2300 len
+= printk(KERN_CONT
" at:\n");
2301 print_lock_trace(class->usage_traces
[bit
], len
);
2304 printk("%*s }\n", depth
, "");
2306 printk("%*s ... key at: [<%px>] %pS\n",
2307 depth
, "", class->key
, class->key
);
2311 * Dependency path printing:
2313 * After BFS we get a lock dependency path (linked via ->parent of lock_list),
2314 * printing out each lock in the dependency path will help on understanding how
2315 * the deadlock could happen. Here are some details about dependency path
2318 * 1) A lock_list can be either forwards or backwards for a lock dependency,
2319 * for a lock dependency A -> B, there are two lock_lists:
2321 * a) lock_list in the ->locks_after list of A, whose ->class is B and
2322 * ->links_to is A. In this case, we can say the lock_list is
2323 * "A -> B" (forwards case).
2325 * b) lock_list in the ->locks_before list of B, whose ->class is A
2326 * and ->links_to is B. In this case, we can say the lock_list is
2327 * "B <- A" (bacwards case).
2329 * The ->trace of both a) and b) point to the call trace where B was
2330 * acquired with A held.
2332 * 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't
2333 * represent a certain lock dependency, it only provides an initial entry
2334 * for BFS. For example, BFS may introduce a "helper" lock_list whose
2335 * ->class is A, as a result BFS will search all dependencies starting with
2336 * A, e.g. A -> B or A -> C.
2338 * The notation of a forwards helper lock_list is like "-> A", which means
2339 * we should search the forwards dependencies starting with "A", e.g A -> B
2342 * The notation of a bacwards helper lock_list is like "<- B", which means
2343 * we should search the backwards dependencies ending with "B", e.g.
2348 * printk the shortest lock dependencies from @root to @leaf in reverse order.
2350 * We have a lock dependency path as follow:
2356 * | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list |
2357 * | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln|
2359 * , so it's natural that we start from @leaf and print every ->class and
2360 * ->trace until we reach the @root.
2363 print_shortest_lock_dependencies(struct lock_list
*leaf
,
2364 struct lock_list
*root
)
2366 struct lock_list
*entry
= leaf
;
2369 /*compute depth from generated tree by BFS*/
2370 depth
= get_lock_depth(leaf
);
2373 print_lock_class_header(entry
->class, depth
);
2374 printk("%*s ... acquired at:\n", depth
, "");
2375 print_lock_trace(entry
->trace
, 2);
2378 if (depth
== 0 && (entry
!= root
)) {
2379 printk("lockdep:%s bad path found in chain graph\n", __func__
);
2383 entry
= get_lock_parent(entry
);
2385 } while (entry
&& (depth
>= 0));
2389 * printk the shortest lock dependencies from @leaf to @root.
2391 * We have a lock dependency path (from a backwards search) as follow:
2397 * | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list |
2398 * | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln |
2400 * , so when we iterate from @leaf to @root, we actually print the lock
2401 * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order.
2403 * Another thing to notice here is that ->class of L2 <- L1 is L1, while the
2404 * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call
2405 * trace of L1 in the dependency path, which is alright, because most of the
2406 * time we can figure out where L1 is held from the call trace of L2.
2409 print_shortest_lock_dependencies_backwards(struct lock_list
*leaf
,
2410 struct lock_list
*root
)
2412 struct lock_list
*entry
= leaf
;
2413 const struct lock_trace
*trace
= NULL
;
2416 /*compute depth from generated tree by BFS*/
2417 depth
= get_lock_depth(leaf
);
2420 print_lock_class_header(entry
->class, depth
);
2422 printk("%*s ... acquired at:\n", depth
, "");
2423 print_lock_trace(trace
, 2);
2428 * Record the pointer to the trace for the next lock_list
2429 * entry, see the comments for the function.
2431 trace
= entry
->trace
;
2433 if (depth
== 0 && (entry
!= root
)) {
2434 printk("lockdep:%s bad path found in chain graph\n", __func__
);
2438 entry
= get_lock_parent(entry
);
2440 } while (entry
&& (depth
>= 0));
2444 print_irq_lock_scenario(struct lock_list
*safe_entry
,
2445 struct lock_list
*unsafe_entry
,
2446 struct lock_class
*prev_class
,
2447 struct lock_class
*next_class
)
2449 struct lock_class
*safe_class
= safe_entry
->class;
2450 struct lock_class
*unsafe_class
= unsafe_entry
->class;
2451 struct lock_class
*middle_class
= prev_class
;
2453 if (middle_class
== safe_class
)
2454 middle_class
= next_class
;
2457 * A direct locking problem where unsafe_class lock is taken
2458 * directly by safe_class lock, then all we need to show
2459 * is the deadlock scenario, as it is obvious that the
2460 * unsafe lock is taken under the safe lock.
2462 * But if there is a chain instead, where the safe lock takes
2463 * an intermediate lock (middle_class) where this lock is
2464 * not the same as the safe lock, then the lock chain is
2465 * used to describe the problem. Otherwise we would need
2466 * to show a different CPU case for each link in the chain
2467 * from the safe_class lock to the unsafe_class lock.
2469 if (middle_class
!= unsafe_class
) {
2470 printk("Chain exists of:\n ");
2471 __print_lock_name(safe_class
);
2472 printk(KERN_CONT
" --> ");
2473 __print_lock_name(middle_class
);
2474 printk(KERN_CONT
" --> ");
2475 __print_lock_name(unsafe_class
);
2476 printk(KERN_CONT
"\n\n");
2479 printk(" Possible interrupt unsafe locking scenario:\n\n");
2480 printk(" CPU0 CPU1\n");
2481 printk(" ---- ----\n");
2483 __print_lock_name(unsafe_class
);
2484 printk(KERN_CONT
");\n");
2485 printk(" local_irq_disable();\n");
2487 __print_lock_name(safe_class
);
2488 printk(KERN_CONT
");\n");
2490 __print_lock_name(middle_class
);
2491 printk(KERN_CONT
");\n");
2492 printk(" <Interrupt>\n");
2494 __print_lock_name(safe_class
);
2495 printk(KERN_CONT
");\n");
2496 printk("\n *** DEADLOCK ***\n\n");
2500 print_bad_irq_dependency(struct task_struct
*curr
,
2501 struct lock_list
*prev_root
,
2502 struct lock_list
*next_root
,
2503 struct lock_list
*backwards_entry
,
2504 struct lock_list
*forwards_entry
,
2505 struct held_lock
*prev
,
2506 struct held_lock
*next
,
2507 enum lock_usage_bit bit1
,
2508 enum lock_usage_bit bit2
,
2509 const char *irqclass
)
2511 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
2515 pr_warn("=====================================================\n");
2516 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
2517 irqclass
, irqclass
);
2518 print_kernel_ident();
2519 pr_warn("-----------------------------------------------------\n");
2520 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
2521 curr
->comm
, task_pid_nr(curr
),
2522 lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT
,
2523 curr
->softirq_context
, softirq_count() >> SOFTIRQ_SHIFT
,
2524 lockdep_hardirqs_enabled(),
2525 curr
->softirqs_enabled
);
2528 pr_warn("\nand this task is already holding:\n");
2530 pr_warn("which would create a new lock dependency:\n");
2531 print_lock_name(hlock_class(prev
));
2533 print_lock_name(hlock_class(next
));
2536 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
2538 print_lock_name(backwards_entry
->class);
2539 pr_warn("\n... which became %s-irq-safe at:\n", irqclass
);
2541 print_lock_trace(backwards_entry
->class->usage_traces
[bit1
], 1);
2543 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass
);
2544 print_lock_name(forwards_entry
->class);
2545 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass
);
2548 print_lock_trace(forwards_entry
->class->usage_traces
[bit2
], 1);
2550 pr_warn("\nother info that might help us debug this:\n\n");
2551 print_irq_lock_scenario(backwards_entry
, forwards_entry
,
2552 hlock_class(prev
), hlock_class(next
));
2554 lockdep_print_held_locks(curr
);
2556 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass
);
2557 print_shortest_lock_dependencies_backwards(backwards_entry
, prev_root
);
2559 pr_warn("\nthe dependencies between the lock to be acquired");
2560 pr_warn(" and %s-irq-unsafe lock:\n", irqclass
);
2561 next_root
->trace
= save_trace();
2562 if (!next_root
->trace
)
2564 print_shortest_lock_dependencies(forwards_entry
, next_root
);
2566 pr_warn("\nstack backtrace:\n");
2570 static const char *state_names
[] = {
2571 #define LOCKDEP_STATE(__STATE) \
2572 __stringify(__STATE),
2573 #include "lockdep_states.h"
2574 #undef LOCKDEP_STATE
2577 static const char *state_rnames
[] = {
2578 #define LOCKDEP_STATE(__STATE) \
2579 __stringify(__STATE)"-READ",
2580 #include "lockdep_states.h"
2581 #undef LOCKDEP_STATE
2584 static inline const char *state_name(enum lock_usage_bit bit
)
2586 if (bit
& LOCK_USAGE_READ_MASK
)
2587 return state_rnames
[bit
>> LOCK_USAGE_DIR_MASK
];
2589 return state_names
[bit
>> LOCK_USAGE_DIR_MASK
];
2593 * The bit number is encoded like:
2595 * bit0: 0 exclusive, 1 read lock
2596 * bit1: 0 used in irq, 1 irq enabled
2599 static int exclusive_bit(int new_bit
)
2601 int state
= new_bit
& LOCK_USAGE_STATE_MASK
;
2602 int dir
= new_bit
& LOCK_USAGE_DIR_MASK
;
2605 * keep state, bit flip the direction and strip read.
2607 return state
| (dir
^ LOCK_USAGE_DIR_MASK
);
2611 * Observe that when given a bitmask where each bitnr is encoded as above, a
2612 * right shift of the mask transforms the individual bitnrs as -1 and
2613 * conversely, a left shift transforms into +1 for the individual bitnrs.
2615 * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
2616 * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
2617 * instead by subtracting the bit number by 2, or shifting the mask right by 2.
2619 * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
2621 * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
2622 * all bits set) and recompose with bitnr1 flipped.
2624 static unsigned long invert_dir_mask(unsigned long mask
)
2626 unsigned long excl
= 0;
2629 excl
|= (mask
& LOCKF_ENABLED_IRQ_ALL
) >> LOCK_USAGE_DIR_MASK
;
2630 excl
|= (mask
& LOCKF_USED_IN_IRQ_ALL
) << LOCK_USAGE_DIR_MASK
;
2636 * Note that a LOCK_ENABLED_IRQ_*_READ usage and a LOCK_USED_IN_IRQ_*_READ
2637 * usage may cause deadlock too, for example:
2641 * write_lock(l1); <irq enabled>
2647 * , in above case, l1 will be marked as LOCK_USED_IN_IRQ_HARDIRQ_READ and l2
2648 * will marked as LOCK_ENABLE_IRQ_HARDIRQ_READ, and this is a possible
2651 * In fact, all of the following cases may cause deadlocks:
2653 * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*
2654 * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*
2655 * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*_READ
2656 * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*_READ
2658 * As a result, to calculate the "exclusive mask", first we invert the
2659 * direction (USED_IN/ENABLED) of the original mask, and 1) for all bits with
2660 * bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*). 2) for all
2661 * bits with bitnr0 cleared (LOCK_*_READ), add those with bitnr0 set (LOCK_*).
2663 static unsigned long exclusive_mask(unsigned long mask
)
2665 unsigned long excl
= invert_dir_mask(mask
);
2667 excl
|= (excl
& LOCKF_IRQ_READ
) >> LOCK_USAGE_READ_MASK
;
2668 excl
|= (excl
& LOCKF_IRQ
) << LOCK_USAGE_READ_MASK
;
2674 * Retrieve the _possible_ original mask to which @mask is
2675 * exclusive. Ie: this is the opposite of exclusive_mask().
2676 * Note that 2 possible original bits can match an exclusive
2677 * bit: one has LOCK_USAGE_READ_MASK set, the other has it
2678 * cleared. So both are returned for each exclusive bit.
2680 static unsigned long original_mask(unsigned long mask
)
2682 unsigned long excl
= invert_dir_mask(mask
);
2684 /* Include read in existing usages */
2685 excl
|= (excl
& LOCKF_IRQ_READ
) >> LOCK_USAGE_READ_MASK
;
2686 excl
|= (excl
& LOCKF_IRQ
) << LOCK_USAGE_READ_MASK
;
2692 * Find the first pair of bit match between an original
2693 * usage mask and an exclusive usage mask.
2695 static int find_exclusive_match(unsigned long mask
,
2696 unsigned long excl_mask
,
2697 enum lock_usage_bit
*bitp
,
2698 enum lock_usage_bit
*excl_bitp
)
2700 int bit
, excl
, excl_read
;
2702 for_each_set_bit(bit
, &mask
, LOCK_USED
) {
2704 * exclusive_bit() strips the read bit, however,
2705 * LOCK_ENABLED_IRQ_*_READ may cause deadlocks too, so we need
2706 * to search excl | LOCK_USAGE_READ_MASK as well.
2708 excl
= exclusive_bit(bit
);
2709 excl_read
= excl
| LOCK_USAGE_READ_MASK
;
2710 if (excl_mask
& lock_flag(excl
)) {
2714 } else if (excl_mask
& lock_flag(excl_read
)) {
2716 *excl_bitp
= excl_read
;
2724 * Prove that the new dependency does not connect a hardirq-safe(-read)
2725 * lock with a hardirq-unsafe lock - to achieve this we search
2726 * the backwards-subgraph starting at <prev>, and the
2727 * forwards-subgraph starting at <next>:
2729 static int check_irq_usage(struct task_struct
*curr
, struct held_lock
*prev
,
2730 struct held_lock
*next
)
2732 unsigned long usage_mask
= 0, forward_mask
, backward_mask
;
2733 enum lock_usage_bit forward_bit
= 0, backward_bit
= 0;
2734 struct lock_list
*target_entry1
;
2735 struct lock_list
*target_entry
;
2736 struct lock_list
this, that
;
2737 enum bfs_result ret
;
2740 * Step 1: gather all hard/soft IRQs usages backward in an
2741 * accumulated usage mask.
2743 bfs_init_rootb(&this, prev
);
2745 ret
= __bfs_backwards(&this, &usage_mask
, usage_accumulate
, usage_skip
, NULL
);
2746 if (bfs_error(ret
)) {
2751 usage_mask
&= LOCKF_USED_IN_IRQ_ALL
;
2756 * Step 2: find exclusive uses forward that match the previous
2757 * backward accumulated mask.
2759 forward_mask
= exclusive_mask(usage_mask
);
2761 bfs_init_root(&that
, next
);
2763 ret
= find_usage_forwards(&that
, forward_mask
, &target_entry1
);
2764 if (bfs_error(ret
)) {
2768 if (ret
== BFS_RNOMATCH
)
2772 * Step 3: we found a bad match! Now retrieve a lock from the backward
2773 * list whose usage mask matches the exclusive usage mask from the
2774 * lock found on the forward list.
2776 * Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering
2779 * When trying to add A -> B to the graph, we find that there is a
2780 * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M,
2781 * that B -> ... -> M. However M is **softirq-safe**, if we use exact
2782 * invert bits of M's usage_mask, we will find another lock N that is
2783 * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not
2784 * cause a inversion deadlock.
2786 backward_mask
= original_mask(target_entry1
->class->usage_mask
& LOCKF_ENABLED_IRQ_ALL
);
2788 ret
= find_usage_backwards(&this, backward_mask
, &target_entry
);
2789 if (bfs_error(ret
)) {
2793 if (DEBUG_LOCKS_WARN_ON(ret
== BFS_RNOMATCH
))
2797 * Step 4: narrow down to a pair of incompatible usage bits
2800 ret
= find_exclusive_match(target_entry
->class->usage_mask
,
2801 target_entry1
->class->usage_mask
,
2802 &backward_bit
, &forward_bit
);
2803 if (DEBUG_LOCKS_WARN_ON(ret
== -1))
2806 print_bad_irq_dependency(curr
, &this, &that
,
2807 target_entry
, target_entry1
,
2809 backward_bit
, forward_bit
,
2810 state_name(backward_bit
));
2817 static inline int check_irq_usage(struct task_struct
*curr
,
2818 struct held_lock
*prev
, struct held_lock
*next
)
2823 static inline bool usage_skip(struct lock_list
*entry
, void *mask
)
2828 #endif /* CONFIG_TRACE_IRQFLAGS */
2830 #ifdef CONFIG_LOCKDEP_SMALL
2832 * Check that the dependency graph starting at <src> can lead to
2833 * <target> or not. If it can, <src> -> <target> dependency is already
2836 * Return BFS_RMATCH if it does, or BFS_RNOMATCH if it does not, return BFS_E* if
2837 * any error appears in the bfs search.
2839 static noinline
enum bfs_result
2840 check_redundant(struct held_lock
*src
, struct held_lock
*target
)
2842 enum bfs_result ret
;
2843 struct lock_list
*target_entry
;
2844 struct lock_list src_entry
;
2846 bfs_init_root(&src_entry
, src
);
2848 * Special setup for check_redundant().
2850 * To report redundant, we need to find a strong dependency path that
2851 * is equal to or stronger than <src> -> <target>. So if <src> is E,
2852 * we need to let __bfs() only search for a path starting at a -(E*)->,
2853 * we achieve this by setting the initial node's ->only_xr to true in
2854 * that case. And if <prev> is S, we set initial ->only_xr to false
2855 * because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant.
2857 src_entry
.only_xr
= src
->read
== 0;
2859 debug_atomic_inc(nr_redundant_checks
);
2862 * Note: we skip local_lock() for redundant check, because as the
2863 * comment in usage_skip(), A -> local_lock() -> B and A -> B are not
2866 ret
= check_path(target
, &src_entry
, hlock_equal
, usage_skip
, &target_entry
);
2868 if (ret
== BFS_RMATCH
)
2869 debug_atomic_inc(nr_redundant
);
2876 static inline enum bfs_result
2877 check_redundant(struct held_lock
*src
, struct held_lock
*target
)
2879 return BFS_RNOMATCH
;
2884 static void inc_chains(int irq_context
)
2886 if (irq_context
& LOCK_CHAIN_HARDIRQ_CONTEXT
)
2887 nr_hardirq_chains
++;
2888 else if (irq_context
& LOCK_CHAIN_SOFTIRQ_CONTEXT
)
2889 nr_softirq_chains
++;
2891 nr_process_chains
++;
2894 static void dec_chains(int irq_context
)
2896 if (irq_context
& LOCK_CHAIN_HARDIRQ_CONTEXT
)
2897 nr_hardirq_chains
--;
2898 else if (irq_context
& LOCK_CHAIN_SOFTIRQ_CONTEXT
)
2899 nr_softirq_chains
--;
2901 nr_process_chains
--;
2905 print_deadlock_scenario(struct held_lock
*nxt
, struct held_lock
*prv
)
2907 struct lock_class
*next
= hlock_class(nxt
);
2908 struct lock_class
*prev
= hlock_class(prv
);
2910 printk(" Possible unsafe locking scenario:\n\n");
2914 __print_lock_name(prev
);
2915 printk(KERN_CONT
");\n");
2917 __print_lock_name(next
);
2918 printk(KERN_CONT
");\n");
2919 printk("\n *** DEADLOCK ***\n\n");
2920 printk(" May be due to missing lock nesting notation\n\n");
2924 print_deadlock_bug(struct task_struct
*curr
, struct held_lock
*prev
,
2925 struct held_lock
*next
)
2927 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
2931 pr_warn("============================================\n");
2932 pr_warn("WARNING: possible recursive locking detected\n");
2933 print_kernel_ident();
2934 pr_warn("--------------------------------------------\n");
2935 pr_warn("%s/%d is trying to acquire lock:\n",
2936 curr
->comm
, task_pid_nr(curr
));
2938 pr_warn("\nbut task is already holding lock:\n");
2941 pr_warn("\nother info that might help us debug this:\n");
2942 print_deadlock_scenario(next
, prev
);
2943 lockdep_print_held_locks(curr
);
2945 pr_warn("\nstack backtrace:\n");
2950 * Check whether we are holding such a class already.
2952 * (Note that this has to be done separately, because the graph cannot
2953 * detect such classes of deadlocks.)
2955 * Returns: 0 on deadlock detected, 1 on OK, 2 if another lock with the same
2956 * lock class is held but nest_lock is also held, i.e. we rely on the
2957 * nest_lock to avoid the deadlock.
2960 check_deadlock(struct task_struct
*curr
, struct held_lock
*next
)
2962 struct held_lock
*prev
;
2963 struct held_lock
*nest
= NULL
;
2966 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
2967 prev
= curr
->held_locks
+ i
;
2969 if (prev
->instance
== next
->nest_lock
)
2972 if (hlock_class(prev
) != hlock_class(next
))
2976 * Allow read-after-read recursion of the same
2977 * lock class (i.e. read_lock(lock)+read_lock(lock)):
2979 if ((next
->read
== 2) && prev
->read
)
2983 * We're holding the nest_lock, which serializes this lock's
2984 * nesting behaviour.
2989 print_deadlock_bug(curr
, prev
, next
);
2996 * There was a chain-cache miss, and we are about to add a new dependency
2997 * to a previous lock. We validate the following rules:
2999 * - would the adding of the <prev> -> <next> dependency create a
3000 * circular dependency in the graph? [== circular deadlock]
3002 * - does the new prev->next dependency connect any hardirq-safe lock
3003 * (in the full backwards-subgraph starting at <prev>) with any
3004 * hardirq-unsafe lock (in the full forwards-subgraph starting at
3005 * <next>)? [== illegal lock inversion with hardirq contexts]
3007 * - does the new prev->next dependency connect any softirq-safe lock
3008 * (in the full backwards-subgraph starting at <prev>) with any
3009 * softirq-unsafe lock (in the full forwards-subgraph starting at
3010 * <next>)? [== illegal lock inversion with softirq contexts]
3012 * any of these scenarios could lead to a deadlock.
3014 * Then if all the validations pass, we add the forwards and backwards
3018 check_prev_add(struct task_struct
*curr
, struct held_lock
*prev
,
3019 struct held_lock
*next
, u16 distance
,
3020 struct lock_trace
**const trace
)
3022 struct lock_list
*entry
;
3023 enum bfs_result ret
;
3025 if (!hlock_class(prev
)->key
|| !hlock_class(next
)->key
) {
3027 * The warning statements below may trigger a use-after-free
3028 * of the class name. It is better to trigger a use-after free
3029 * and to have the class name most of the time instead of not
3030 * having the class name available.
3032 WARN_ONCE(!debug_locks_silent
&& !hlock_class(prev
)->key
,
3033 "Detected use-after-free of lock class %px/%s\n",
3035 hlock_class(prev
)->name
);
3036 WARN_ONCE(!debug_locks_silent
&& !hlock_class(next
)->key
,
3037 "Detected use-after-free of lock class %px/%s\n",
3039 hlock_class(next
)->name
);
3044 * Prove that the new <prev> -> <next> dependency would not
3045 * create a circular dependency in the graph. (We do this by
3046 * a breadth-first search into the graph starting at <next>,
3047 * and check whether we can reach <prev>.)
3049 * The search is limited by the size of the circular queue (i.e.,
3050 * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
3051 * in the graph whose neighbours are to be checked.
3053 ret
= check_noncircular(next
, prev
, trace
);
3054 if (unlikely(bfs_error(ret
) || ret
== BFS_RMATCH
))
3057 if (!check_irq_usage(curr
, prev
, next
))
3061 * Is the <prev> -> <next> dependency already present?
3063 * (this may occur even though this is a new chain: consider
3064 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
3065 * chains - the second one will be new, but L1 already has
3066 * L2 added to its dependency list, due to the first chain.)
3068 list_for_each_entry(entry
, &hlock_class(prev
)->locks_after
, entry
) {
3069 if (entry
->class == hlock_class(next
)) {
3071 entry
->distance
= 1;
3072 entry
->dep
|= calc_dep(prev
, next
);
3075 * Also, update the reverse dependency in @next's
3076 * ->locks_before list.
3078 * Here we reuse @entry as the cursor, which is fine
3079 * because we won't go to the next iteration of the
3082 * For normal cases, we return in the inner loop.
3084 * If we fail to return, we have inconsistency, i.e.
3085 * <prev>::locks_after contains <next> while
3086 * <next>::locks_before doesn't contain <prev>. In
3087 * that case, we return after the inner and indicate
3088 * something is wrong.
3090 list_for_each_entry(entry
, &hlock_class(next
)->locks_before
, entry
) {
3091 if (entry
->class == hlock_class(prev
)) {
3093 entry
->distance
= 1;
3094 entry
->dep
|= calc_depb(prev
, next
);
3099 /* <prev> is not found in <next>::locks_before */
3105 * Is the <prev> -> <next> link redundant?
3107 ret
= check_redundant(prev
, next
);
3110 else if (ret
== BFS_RMATCH
)
3114 *trace
= save_trace();
3120 * Ok, all validations passed, add the new lock
3121 * to the previous lock's dependency list:
3123 ret
= add_lock_to_list(hlock_class(next
), hlock_class(prev
),
3124 &hlock_class(prev
)->locks_after
,
3125 next
->acquire_ip
, distance
,
3126 calc_dep(prev
, next
),
3132 ret
= add_lock_to_list(hlock_class(prev
), hlock_class(next
),
3133 &hlock_class(next
)->locks_before
,
3134 next
->acquire_ip
, distance
,
3135 calc_depb(prev
, next
),
3144 * Add the dependency to all directly-previous locks that are 'relevant'.
3145 * The ones that are relevant are (in increasing distance from curr):
3146 * all consecutive trylock entries and the final non-trylock entry - or
3147 * the end of this context's lock-chain - whichever comes first.
3150 check_prevs_add(struct task_struct
*curr
, struct held_lock
*next
)
3152 struct lock_trace
*trace
= NULL
;
3153 int depth
= curr
->lockdep_depth
;
3154 struct held_lock
*hlock
;
3159 * Depth must not be zero for a non-head lock:
3164 * At least two relevant locks must exist for this
3167 if (curr
->held_locks
[depth
].irq_context
!=
3168 curr
->held_locks
[depth
-1].irq_context
)
3172 u16 distance
= curr
->lockdep_depth
- depth
+ 1;
3173 hlock
= curr
->held_locks
+ depth
- 1;
3176 int ret
= check_prev_add(curr
, hlock
, next
, distance
, &trace
);
3181 * Stop after the first non-trylock entry,
3182 * as non-trylock entries have added their
3183 * own direct dependencies already, so this
3184 * lock is connected to them indirectly:
3186 if (!hlock
->trylock
)
3192 * End of lock-stack?
3197 * Stop the search if we cross into another context:
3199 if (curr
->held_locks
[depth
].irq_context
!=
3200 curr
->held_locks
[depth
-1].irq_context
)
3205 if (!debug_locks_off_graph_unlock())
3209 * Clearly we all shouldn't be here, but since we made it we
3210 * can reliable say we messed up our state. See the above two
3211 * gotos for reasons why we could possibly end up here.
3218 struct lock_chain lock_chains
[MAX_LOCKDEP_CHAINS
];
3219 static DECLARE_BITMAP(lock_chains_in_use
, MAX_LOCKDEP_CHAINS
);
3220 static u16 chain_hlocks
[MAX_LOCKDEP_CHAIN_HLOCKS
];
3221 unsigned long nr_zapped_lock_chains
;
3222 unsigned int nr_free_chain_hlocks
; /* Free chain_hlocks in buckets */
3223 unsigned int nr_lost_chain_hlocks
; /* Lost chain_hlocks */
3224 unsigned int nr_large_chain_blocks
; /* size > MAX_CHAIN_BUCKETS */
3227 * The first 2 chain_hlocks entries in the chain block in the bucket
3228 * list contains the following meta data:
3231 * Bit 15 - always set to 1 (it is not a class index)
3232 * Bits 0-14 - upper 15 bits of the next block index
3233 * entry[1] - lower 16 bits of next block index
3235 * A next block index of all 1 bits means it is the end of the list.
3237 * On the unsized bucket (bucket-0), the 3rd and 4th entries contain
3238 * the chain block size:
3240 * entry[2] - upper 16 bits of the chain block size
3241 * entry[3] - lower 16 bits of the chain block size
3243 #define MAX_CHAIN_BUCKETS 16
3244 #define CHAIN_BLK_FLAG (1U << 15)
3245 #define CHAIN_BLK_LIST_END 0xFFFFU
3247 static int chain_block_buckets
[MAX_CHAIN_BUCKETS
];
3249 static inline int size_to_bucket(int size
)
3251 if (size
> MAX_CHAIN_BUCKETS
)
3258 * Iterate all the chain blocks in a bucket.
3260 #define for_each_chain_block(bucket, prev, curr) \
3261 for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \
3263 (prev) = (curr), (curr) = chain_block_next(curr))
3268 static inline int chain_block_next(int offset
)
3270 int next
= chain_hlocks
[offset
];
3272 WARN_ON_ONCE(!(next
& CHAIN_BLK_FLAG
));
3274 if (next
== CHAIN_BLK_LIST_END
)
3277 next
&= ~CHAIN_BLK_FLAG
;
3279 next
|= chain_hlocks
[offset
+ 1];
3287 static inline int chain_block_size(int offset
)
3289 return (chain_hlocks
[offset
+ 2] << 16) | chain_hlocks
[offset
+ 3];
3292 static inline void init_chain_block(int offset
, int next
, int bucket
, int size
)
3294 chain_hlocks
[offset
] = (next
>> 16) | CHAIN_BLK_FLAG
;
3295 chain_hlocks
[offset
+ 1] = (u16
)next
;
3297 if (size
&& !bucket
) {
3298 chain_hlocks
[offset
+ 2] = size
>> 16;
3299 chain_hlocks
[offset
+ 3] = (u16
)size
;
3303 static inline void add_chain_block(int offset
, int size
)
3305 int bucket
= size_to_bucket(size
);
3306 int next
= chain_block_buckets
[bucket
];
3309 if (unlikely(size
< 2)) {
3311 * We can't store single entries on the freelist. Leak them.
3313 * One possible way out would be to uniquely mark them, other
3314 * than with CHAIN_BLK_FLAG, such that we can recover them when
3315 * the block before it is re-added.
3318 nr_lost_chain_hlocks
++;
3322 nr_free_chain_hlocks
+= size
;
3324 nr_large_chain_blocks
++;
3327 * Variable sized, sort large to small.
3329 for_each_chain_block(0, prev
, curr
) {
3330 if (size
>= chain_block_size(curr
))
3333 init_chain_block(offset
, curr
, 0, size
);
3335 chain_block_buckets
[0] = offset
;
3337 init_chain_block(prev
, offset
, 0, 0);
3341 * Fixed size, add to head.
3343 init_chain_block(offset
, next
, bucket
, size
);
3344 chain_block_buckets
[bucket
] = offset
;
3348 * Only the first block in the list can be deleted.
3350 * For the variable size bucket[0], the first block (the largest one) is
3351 * returned, broken up and put back into the pool. So if a chain block of
3352 * length > MAX_CHAIN_BUCKETS is ever used and zapped, it will just be
3353 * queued up after the primordial chain block and never be used until the
3354 * hlock entries in the primordial chain block is almost used up. That
3355 * causes fragmentation and reduce allocation efficiency. That can be
3356 * monitored by looking at the "large chain blocks" number in lockdep_stats.
3358 static inline void del_chain_block(int bucket
, int size
, int next
)
3360 nr_free_chain_hlocks
-= size
;
3361 chain_block_buckets
[bucket
] = next
;
3364 nr_large_chain_blocks
--;
3367 static void init_chain_block_buckets(void)
3371 for (i
= 0; i
< MAX_CHAIN_BUCKETS
; i
++)
3372 chain_block_buckets
[i
] = -1;
3374 add_chain_block(0, ARRAY_SIZE(chain_hlocks
));
3378 * Return offset of a chain block of the right size or -1 if not found.
3380 * Fairly simple worst-fit allocator with the addition of a number of size
3381 * specific free lists.
3383 static int alloc_chain_hlocks(int req
)
3385 int bucket
, curr
, size
;
3388 * We rely on the MSB to act as an escape bit to denote freelist
3389 * pointers. Make sure this bit isn't set in 'normal' class_idx usage.
3391 BUILD_BUG_ON((MAX_LOCKDEP_KEYS
-1) & CHAIN_BLK_FLAG
);
3393 init_data_structures_once();
3395 if (nr_free_chain_hlocks
< req
)
3399 * We require a minimum of 2 (u16) entries to encode a freelist
3403 bucket
= size_to_bucket(req
);
3404 curr
= chain_block_buckets
[bucket
];
3408 del_chain_block(bucket
, req
, chain_block_next(curr
));
3412 curr
= chain_block_buckets
[0];
3416 * The variable sized freelist is sorted by size; the first entry is
3417 * the largest. Use it if it fits.
3420 size
= chain_block_size(curr
);
3421 if (likely(size
>= req
)) {
3422 del_chain_block(0, size
, chain_block_next(curr
));
3423 add_chain_block(curr
+ req
, size
- req
);
3429 * Last resort, split a block in a larger sized bucket.
3431 for (size
= MAX_CHAIN_BUCKETS
; size
> req
; size
--) {
3432 bucket
= size_to_bucket(size
);
3433 curr
= chain_block_buckets
[bucket
];
3437 del_chain_block(bucket
, size
, chain_block_next(curr
));
3438 add_chain_block(curr
+ req
, size
- req
);
3445 static inline void free_chain_hlocks(int base
, int size
)
3447 add_chain_block(base
, max(size
, 2));
3450 struct lock_class
*lock_chain_get_class(struct lock_chain
*chain
, int i
)
3452 u16 chain_hlock
= chain_hlocks
[chain
->base
+ i
];
3453 unsigned int class_idx
= chain_hlock_class_idx(chain_hlock
);
3455 return lock_classes
+ class_idx
;
3459 * Returns the index of the first held_lock of the current chain
3461 static inline int get_first_held_lock(struct task_struct
*curr
,
3462 struct held_lock
*hlock
)
3465 struct held_lock
*hlock_curr
;
3467 for (i
= curr
->lockdep_depth
- 1; i
>= 0; i
--) {
3468 hlock_curr
= curr
->held_locks
+ i
;
3469 if (hlock_curr
->irq_context
!= hlock
->irq_context
)
3477 #ifdef CONFIG_DEBUG_LOCKDEP
3479 * Returns the next chain_key iteration
3481 static u64
print_chain_key_iteration(u16 hlock_id
, u64 chain_key
)
3483 u64 new_chain_key
= iterate_chain_key(chain_key
, hlock_id
);
3485 printk(" hlock_id:%d -> chain_key:%016Lx",
3486 (unsigned int)hlock_id
,
3487 (unsigned long long)new_chain_key
);
3488 return new_chain_key
;
3492 print_chain_keys_held_locks(struct task_struct
*curr
, struct held_lock
*hlock_next
)
3494 struct held_lock
*hlock
;
3495 u64 chain_key
= INITIAL_CHAIN_KEY
;
3496 int depth
= curr
->lockdep_depth
;
3497 int i
= get_first_held_lock(curr
, hlock_next
);
3499 printk("depth: %u (irq_context %u)\n", depth
- i
+ 1,
3500 hlock_next
->irq_context
);
3501 for (; i
< depth
; i
++) {
3502 hlock
= curr
->held_locks
+ i
;
3503 chain_key
= print_chain_key_iteration(hlock_id(hlock
), chain_key
);
3508 print_chain_key_iteration(hlock_id(hlock_next
), chain_key
);
3509 print_lock(hlock_next
);
3512 static void print_chain_keys_chain(struct lock_chain
*chain
)
3515 u64 chain_key
= INITIAL_CHAIN_KEY
;
3518 printk("depth: %u\n", chain
->depth
);
3519 for (i
= 0; i
< chain
->depth
; i
++) {
3520 hlock_id
= chain_hlocks
[chain
->base
+ i
];
3521 chain_key
= print_chain_key_iteration(hlock_id
, chain_key
);
3523 print_lock_name(lock_classes
+ chain_hlock_class_idx(hlock_id
));
3528 static void print_collision(struct task_struct
*curr
,
3529 struct held_lock
*hlock_next
,
3530 struct lock_chain
*chain
)
3533 pr_warn("============================\n");
3534 pr_warn("WARNING: chain_key collision\n");
3535 print_kernel_ident();
3536 pr_warn("----------------------------\n");
3537 pr_warn("%s/%d: ", current
->comm
, task_pid_nr(current
));
3538 pr_warn("Hash chain already cached but the contents don't match!\n");
3540 pr_warn("Held locks:");
3541 print_chain_keys_held_locks(curr
, hlock_next
);
3543 pr_warn("Locks in cached chain:");
3544 print_chain_keys_chain(chain
);
3546 pr_warn("\nstack backtrace:\n");
3552 * Checks whether the chain and the current held locks are consistent
3553 * in depth and also in content. If they are not it most likely means
3554 * that there was a collision during the calculation of the chain_key.
3555 * Returns: 0 not passed, 1 passed
3557 static int check_no_collision(struct task_struct
*curr
,
3558 struct held_lock
*hlock
,
3559 struct lock_chain
*chain
)
3561 #ifdef CONFIG_DEBUG_LOCKDEP
3564 i
= get_first_held_lock(curr
, hlock
);
3566 if (DEBUG_LOCKS_WARN_ON(chain
->depth
!= curr
->lockdep_depth
- (i
- 1))) {
3567 print_collision(curr
, hlock
, chain
);
3571 for (j
= 0; j
< chain
->depth
- 1; j
++, i
++) {
3572 id
= hlock_id(&curr
->held_locks
[i
]);
3574 if (DEBUG_LOCKS_WARN_ON(chain_hlocks
[chain
->base
+ j
] != id
)) {
3575 print_collision(curr
, hlock
, chain
);
3584 * Given an index that is >= -1, return the index of the next lock chain.
3585 * Return -2 if there is no next lock chain.
3587 long lockdep_next_lockchain(long i
)
3589 i
= find_next_bit(lock_chains_in_use
, ARRAY_SIZE(lock_chains
), i
+ 1);
3590 return i
< ARRAY_SIZE(lock_chains
) ? i
: -2;
3593 unsigned long lock_chain_count(void)
3595 return bitmap_weight(lock_chains_in_use
, ARRAY_SIZE(lock_chains
));
3598 /* Must be called with the graph lock held. */
3599 static struct lock_chain
*alloc_lock_chain(void)
3601 int idx
= find_first_zero_bit(lock_chains_in_use
,
3602 ARRAY_SIZE(lock_chains
));
3604 if (unlikely(idx
>= ARRAY_SIZE(lock_chains
)))
3606 __set_bit(idx
, lock_chains_in_use
);
3607 return lock_chains
+ idx
;
3611 * Adds a dependency chain into chain hashtable. And must be called with
3614 * Return 0 if fail, and graph_lock is released.
3615 * Return 1 if succeed, with graph_lock held.
3617 static inline int add_chain_cache(struct task_struct
*curr
,
3618 struct held_lock
*hlock
,
3621 struct hlist_head
*hash_head
= chainhashentry(chain_key
);
3622 struct lock_chain
*chain
;
3626 * The caller must hold the graph lock, ensure we've got IRQs
3627 * disabled to make this an IRQ-safe lock.. for recursion reasons
3628 * lockdep won't complain about its own locking errors.
3630 if (lockdep_assert_locked())
3633 chain
= alloc_lock_chain();
3635 if (!debug_locks_off_graph_unlock())
3638 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
3642 chain
->chain_key
= chain_key
;
3643 chain
->irq_context
= hlock
->irq_context
;
3644 i
= get_first_held_lock(curr
, hlock
);
3645 chain
->depth
= curr
->lockdep_depth
+ 1 - i
;
3647 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks
));
3648 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr
->held_locks
));
3649 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks
[0])) <= ARRAY_SIZE(lock_classes
));
3651 j
= alloc_chain_hlocks(chain
->depth
);
3653 if (!debug_locks_off_graph_unlock())
3656 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
3662 for (j
= 0; j
< chain
->depth
- 1; j
++, i
++) {
3663 int lock_id
= hlock_id(curr
->held_locks
+ i
);
3665 chain_hlocks
[chain
->base
+ j
] = lock_id
;
3667 chain_hlocks
[chain
->base
+ j
] = hlock_id(hlock
);
3668 hlist_add_head_rcu(&chain
->entry
, hash_head
);
3669 debug_atomic_inc(chain_lookup_misses
);
3670 inc_chains(chain
->irq_context
);
3676 * Look up a dependency chain. Must be called with either the graph lock or
3677 * the RCU read lock held.
3679 static inline struct lock_chain
*lookup_chain_cache(u64 chain_key
)
3681 struct hlist_head
*hash_head
= chainhashentry(chain_key
);
3682 struct lock_chain
*chain
;
3684 hlist_for_each_entry_rcu(chain
, hash_head
, entry
) {
3685 if (READ_ONCE(chain
->chain_key
) == chain_key
) {
3686 debug_atomic_inc(chain_lookup_hits
);
3694 * If the key is not present yet in dependency chain cache then
3695 * add it and return 1 - in this case the new dependency chain is
3696 * validated. If the key is already hashed, return 0.
3697 * (On return with 1 graph_lock is held.)
3699 static inline int lookup_chain_cache_add(struct task_struct
*curr
,
3700 struct held_lock
*hlock
,
3703 struct lock_class
*class = hlock_class(hlock
);
3704 struct lock_chain
*chain
= lookup_chain_cache(chain_key
);
3708 if (!check_no_collision(curr
, hlock
, chain
))
3711 if (very_verbose(class)) {
3712 printk("\nhash chain already cached, key: "
3713 "%016Lx tail class: [%px] %s\n",
3714 (unsigned long long)chain_key
,
3715 class->key
, class->name
);
3721 if (very_verbose(class)) {
3722 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
3723 (unsigned long long)chain_key
, class->key
, class->name
);
3730 * We have to walk the chain again locked - to avoid duplicates:
3732 chain
= lookup_chain_cache(chain_key
);
3738 if (!add_chain_cache(curr
, hlock
, chain_key
))
3744 static int validate_chain(struct task_struct
*curr
,
3745 struct held_lock
*hlock
,
3746 int chain_head
, u64 chain_key
)
3749 * Trylock needs to maintain the stack of held locks, but it
3750 * does not add new dependencies, because trylock can be done
3753 * We look up the chain_key and do the O(N^2) check and update of
3754 * the dependencies only if this is a new dependency chain.
3755 * (If lookup_chain_cache_add() return with 1 it acquires
3756 * graph_lock for us)
3758 if (!hlock
->trylock
&& hlock
->check
&&
3759 lookup_chain_cache_add(curr
, hlock
, chain_key
)) {
3761 * Check whether last held lock:
3763 * - is irq-safe, if this lock is irq-unsafe
3764 * - is softirq-safe, if this lock is hardirq-unsafe
3766 * And check whether the new lock's dependency graph
3767 * could lead back to the previous lock:
3769 * - within the current held-lock stack
3770 * - across our accumulated lock dependency records
3772 * any of these scenarios could lead to a deadlock.
3775 * The simple case: does the current hold the same lock
3778 int ret
= check_deadlock(curr
, hlock
);
3783 * Add dependency only if this lock is not the head
3784 * of the chain, and if the new lock introduces no more
3785 * lock dependency (because we already hold a lock with the
3786 * same lock class) nor deadlock (because the nest_lock
3787 * serializes nesting locks), see the comments for
3790 if (!chain_head
&& ret
!= 2) {
3791 if (!check_prevs_add(curr
, hlock
))
3797 /* after lookup_chain_cache_add(): */
3798 if (unlikely(!debug_locks
))
3805 static inline int validate_chain(struct task_struct
*curr
,
3806 struct held_lock
*hlock
,
3807 int chain_head
, u64 chain_key
)
3812 static void init_chain_block_buckets(void) { }
3813 #endif /* CONFIG_PROVE_LOCKING */
3816 * We are building curr_chain_key incrementally, so double-check
3817 * it from scratch, to make sure that it's done correctly:
3819 static void check_chain_key(struct task_struct
*curr
)
3821 #ifdef CONFIG_DEBUG_LOCKDEP
3822 struct held_lock
*hlock
, *prev_hlock
= NULL
;
3824 u64 chain_key
= INITIAL_CHAIN_KEY
;
3826 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
3827 hlock
= curr
->held_locks
+ i
;
3828 if (chain_key
!= hlock
->prev_chain_key
) {
3831 * We got mighty confused, our chain keys don't match
3832 * with what we expect, someone trample on our task state?
3834 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
3835 curr
->lockdep_depth
, i
,
3836 (unsigned long long)chain_key
,
3837 (unsigned long long)hlock
->prev_chain_key
);
3842 * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
3843 * it registered lock class index?
3845 if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock
->class_idx
, lock_classes_in_use
)))
3848 if (prev_hlock
&& (prev_hlock
->irq_context
!=
3849 hlock
->irq_context
))
3850 chain_key
= INITIAL_CHAIN_KEY
;
3851 chain_key
= iterate_chain_key(chain_key
, hlock_id(hlock
));
3854 if (chain_key
!= curr
->curr_chain_key
) {
3857 * More smoking hash instead of calculating it, damn see these
3858 * numbers float.. I bet that a pink elephant stepped on my memory.
3860 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
3861 curr
->lockdep_depth
, i
,
3862 (unsigned long long)chain_key
,
3863 (unsigned long long)curr
->curr_chain_key
);
3868 #ifdef CONFIG_PROVE_LOCKING
3869 static int mark_lock(struct task_struct
*curr
, struct held_lock
*this,
3870 enum lock_usage_bit new_bit
);
3872 static void print_usage_bug_scenario(struct held_lock
*lock
)
3874 struct lock_class
*class = hlock_class(lock
);
3876 printk(" Possible unsafe locking scenario:\n\n");
3880 __print_lock_name(class);
3881 printk(KERN_CONT
");\n");
3882 printk(" <Interrupt>\n");
3884 __print_lock_name(class);
3885 printk(KERN_CONT
");\n");
3886 printk("\n *** DEADLOCK ***\n\n");
3890 print_usage_bug(struct task_struct
*curr
, struct held_lock
*this,
3891 enum lock_usage_bit prev_bit
, enum lock_usage_bit new_bit
)
3893 if (!debug_locks_off() || debug_locks_silent
)
3897 pr_warn("================================\n");
3898 pr_warn("WARNING: inconsistent lock state\n");
3899 print_kernel_ident();
3900 pr_warn("--------------------------------\n");
3902 pr_warn("inconsistent {%s} -> {%s} usage.\n",
3903 usage_str
[prev_bit
], usage_str
[new_bit
]);
3905 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
3906 curr
->comm
, task_pid_nr(curr
),
3907 lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT
,
3908 lockdep_softirq_context(curr
), softirq_count() >> SOFTIRQ_SHIFT
,
3909 lockdep_hardirqs_enabled(),
3910 lockdep_softirqs_enabled(curr
));
3913 pr_warn("{%s} state was registered at:\n", usage_str
[prev_bit
]);
3914 print_lock_trace(hlock_class(this)->usage_traces
[prev_bit
], 1);
3916 print_irqtrace_events(curr
);
3917 pr_warn("\nother info that might help us debug this:\n");
3918 print_usage_bug_scenario(this);
3920 lockdep_print_held_locks(curr
);
3922 pr_warn("\nstack backtrace:\n");
3927 * Print out an error if an invalid bit is set:
3930 valid_state(struct task_struct
*curr
, struct held_lock
*this,
3931 enum lock_usage_bit new_bit
, enum lock_usage_bit bad_bit
)
3933 if (unlikely(hlock_class(this)->usage_mask
& (1 << bad_bit
))) {
3935 print_usage_bug(curr
, this, bad_bit
, new_bit
);
3943 * print irq inversion bug:
3946 print_irq_inversion_bug(struct task_struct
*curr
,
3947 struct lock_list
*root
, struct lock_list
*other
,
3948 struct held_lock
*this, int forwards
,
3949 const char *irqclass
)
3951 struct lock_list
*entry
= other
;
3952 struct lock_list
*middle
= NULL
;
3955 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
3959 pr_warn("========================================================\n");
3960 pr_warn("WARNING: possible irq lock inversion dependency detected\n");
3961 print_kernel_ident();
3962 pr_warn("--------------------------------------------------------\n");
3963 pr_warn("%s/%d just changed the state of lock:\n",
3964 curr
->comm
, task_pid_nr(curr
));
3967 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass
);
3969 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass
);
3970 print_lock_name(other
->class);
3971 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
3973 pr_warn("\nother info that might help us debug this:\n");
3975 /* Find a middle lock (if one exists) */
3976 depth
= get_lock_depth(other
);
3978 if (depth
== 0 && (entry
!= root
)) {
3979 pr_warn("lockdep:%s bad path found in chain graph\n", __func__
);
3983 entry
= get_lock_parent(entry
);
3985 } while (entry
&& entry
!= root
&& (depth
>= 0));
3987 print_irq_lock_scenario(root
, other
,
3988 middle
? middle
->class : root
->class, other
->class);
3990 print_irq_lock_scenario(other
, root
,
3991 middle
? middle
->class : other
->class, root
->class);
3993 lockdep_print_held_locks(curr
);
3995 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
3996 root
->trace
= save_trace();
3999 print_shortest_lock_dependencies(other
, root
);
4001 pr_warn("\nstack backtrace:\n");
4006 * Prove that in the forwards-direction subgraph starting at <this>
4007 * there is no lock matching <mask>:
4010 check_usage_forwards(struct task_struct
*curr
, struct held_lock
*this,
4011 enum lock_usage_bit bit
)
4013 enum bfs_result ret
;
4014 struct lock_list root
;
4015 struct lock_list
*target_entry
;
4016 enum lock_usage_bit read_bit
= bit
+ LOCK_USAGE_READ_MASK
;
4017 unsigned usage_mask
= lock_flag(bit
) | lock_flag(read_bit
);
4019 bfs_init_root(&root
, this);
4020 ret
= find_usage_forwards(&root
, usage_mask
, &target_entry
);
4021 if (bfs_error(ret
)) {
4025 if (ret
== BFS_RNOMATCH
)
4028 /* Check whether write or read usage is the match */
4029 if (target_entry
->class->usage_mask
& lock_flag(bit
)) {
4030 print_irq_inversion_bug(curr
, &root
, target_entry
,
4031 this, 1, state_name(bit
));
4033 print_irq_inversion_bug(curr
, &root
, target_entry
,
4034 this, 1, state_name(read_bit
));
4041 * Prove that in the backwards-direction subgraph starting at <this>
4042 * there is no lock matching <mask>:
4045 check_usage_backwards(struct task_struct
*curr
, struct held_lock
*this,
4046 enum lock_usage_bit bit
)
4048 enum bfs_result ret
;
4049 struct lock_list root
;
4050 struct lock_list
*target_entry
;
4051 enum lock_usage_bit read_bit
= bit
+ LOCK_USAGE_READ_MASK
;
4052 unsigned usage_mask
= lock_flag(bit
) | lock_flag(read_bit
);
4054 bfs_init_rootb(&root
, this);
4055 ret
= find_usage_backwards(&root
, usage_mask
, &target_entry
);
4056 if (bfs_error(ret
)) {
4060 if (ret
== BFS_RNOMATCH
)
4063 /* Check whether write or read usage is the match */
4064 if (target_entry
->class->usage_mask
& lock_flag(bit
)) {
4065 print_irq_inversion_bug(curr
, &root
, target_entry
,
4066 this, 0, state_name(bit
));
4068 print_irq_inversion_bug(curr
, &root
, target_entry
,
4069 this, 0, state_name(read_bit
));
4075 void print_irqtrace_events(struct task_struct
*curr
)
4077 const struct irqtrace_events
*trace
= &curr
->irqtrace
;
4079 printk("irq event stamp: %u\n", trace
->irq_events
);
4080 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
4081 trace
->hardirq_enable_event
, (void *)trace
->hardirq_enable_ip
,
4082 (void *)trace
->hardirq_enable_ip
);
4083 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
4084 trace
->hardirq_disable_event
, (void *)trace
->hardirq_disable_ip
,
4085 (void *)trace
->hardirq_disable_ip
);
4086 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
4087 trace
->softirq_enable_event
, (void *)trace
->softirq_enable_ip
,
4088 (void *)trace
->softirq_enable_ip
);
4089 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
4090 trace
->softirq_disable_event
, (void *)trace
->softirq_disable_ip
,
4091 (void *)trace
->softirq_disable_ip
);
4094 static int HARDIRQ_verbose(struct lock_class
*class)
4097 return class_filter(class);
4102 static int SOFTIRQ_verbose(struct lock_class
*class)
4105 return class_filter(class);
4110 static int (*state_verbose_f
[])(struct lock_class
*class) = {
4111 #define LOCKDEP_STATE(__STATE) \
4113 #include "lockdep_states.h"
4114 #undef LOCKDEP_STATE
4117 static inline int state_verbose(enum lock_usage_bit bit
,
4118 struct lock_class
*class)
4120 return state_verbose_f
[bit
>> LOCK_USAGE_DIR_MASK
](class);
4123 typedef int (*check_usage_f
)(struct task_struct
*, struct held_lock
*,
4124 enum lock_usage_bit bit
, const char *name
);
4127 mark_lock_irq(struct task_struct
*curr
, struct held_lock
*this,
4128 enum lock_usage_bit new_bit
)
4130 int excl_bit
= exclusive_bit(new_bit
);
4131 int read
= new_bit
& LOCK_USAGE_READ_MASK
;
4132 int dir
= new_bit
& LOCK_USAGE_DIR_MASK
;
4135 * Validate that this particular lock does not have conflicting
4138 if (!valid_state(curr
, this, new_bit
, excl_bit
))
4142 * Check for read in write conflicts
4144 if (!read
&& !valid_state(curr
, this, new_bit
,
4145 excl_bit
+ LOCK_USAGE_READ_MASK
))
4150 * Validate that the lock dependencies don't have conflicting usage
4155 * mark ENABLED has to look backwards -- to ensure no dependee
4156 * has USED_IN state, which, again, would allow recursion deadlocks.
4158 if (!check_usage_backwards(curr
, this, excl_bit
))
4162 * mark USED_IN has to look forwards -- to ensure no dependency
4163 * has ENABLED state, which would allow recursion deadlocks.
4165 if (!check_usage_forwards(curr
, this, excl_bit
))
4169 if (state_verbose(new_bit
, hlock_class(this)))
4176 * Mark all held locks with a usage bit:
4179 mark_held_locks(struct task_struct
*curr
, enum lock_usage_bit base_bit
)
4181 struct held_lock
*hlock
;
4184 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4185 enum lock_usage_bit hlock_bit
= base_bit
;
4186 hlock
= curr
->held_locks
+ i
;
4189 hlock_bit
+= LOCK_USAGE_READ_MASK
;
4191 BUG_ON(hlock_bit
>= LOCK_USAGE_STATES
);
4196 if (!mark_lock(curr
, hlock
, hlock_bit
))
4204 * Hardirqs will be enabled:
4206 static void __trace_hardirqs_on_caller(void)
4208 struct task_struct
*curr
= current
;
4211 * We are going to turn hardirqs on, so set the
4212 * usage bit for all held locks:
4214 if (!mark_held_locks(curr
, LOCK_ENABLED_HARDIRQ
))
4217 * If we have softirqs enabled, then set the usage
4218 * bit for all held locks. (disabled hardirqs prevented
4219 * this bit from being set before)
4221 if (curr
->softirqs_enabled
)
4222 mark_held_locks(curr
, LOCK_ENABLED_SOFTIRQ
);
4226 * lockdep_hardirqs_on_prepare - Prepare for enabling interrupts
4227 * @ip: Caller address
4229 * Invoked before a possible transition to RCU idle from exit to user or
4230 * guest mode. This ensures that all RCU operations are done before RCU
4231 * stops watching. After the RCU transition lockdep_hardirqs_on() has to be
4232 * invoked to set the final state.
4234 void lockdep_hardirqs_on_prepare(unsigned long ip
)
4236 if (unlikely(!debug_locks
))
4240 * NMIs do not (and cannot) track lock dependencies, nothing to do.
4242 if (unlikely(in_nmi()))
4245 if (unlikely(this_cpu_read(lockdep_recursion
)))
4248 if (unlikely(lockdep_hardirqs_enabled())) {
4250 * Neither irq nor preemption are disabled here
4251 * so this is racy by nature but losing one hit
4252 * in a stat is not a big deal.
4254 __debug_atomic_inc(redundant_hardirqs_on
);
4259 * We're enabling irqs and according to our state above irqs weren't
4260 * already enabled, yet we find the hardware thinks they are in fact
4261 * enabled.. someone messed up their IRQ state tracing.
4263 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4267 * See the fine text that goes along with this variable definition.
4269 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled
))
4273 * Can't allow enabling interrupts while in an interrupt handler,
4274 * that's general bad form and such. Recursion, limited stack etc..
4276 if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
4279 current
->hardirq_chain_key
= current
->curr_chain_key
;
4281 lockdep_recursion_inc();
4282 __trace_hardirqs_on_caller();
4283 lockdep_recursion_finish();
4285 EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare
);
4287 void noinstr
lockdep_hardirqs_on(unsigned long ip
)
4289 struct irqtrace_events
*trace
= ¤t
->irqtrace
;
4291 if (unlikely(!debug_locks
))
4295 * NMIs can happen in the middle of local_irq_{en,dis}able() where the
4296 * tracking state and hardware state are out of sync.
4298 * NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from,
4299 * and not rely on hardware state like normal interrupts.
4301 if (unlikely(in_nmi())) {
4302 if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI
))
4307 * - recursion check, because NMI can hit lockdep;
4308 * - hardware state check, because above;
4309 * - chain_key check, see lockdep_hardirqs_on_prepare().
4314 if (unlikely(this_cpu_read(lockdep_recursion
)))
4317 if (lockdep_hardirqs_enabled()) {
4319 * Neither irq nor preemption are disabled here
4320 * so this is racy by nature but losing one hit
4321 * in a stat is not a big deal.
4323 __debug_atomic_inc(redundant_hardirqs_on
);
4328 * We're enabling irqs and according to our state above irqs weren't
4329 * already enabled, yet we find the hardware thinks they are in fact
4330 * enabled.. someone messed up their IRQ state tracing.
4332 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4336 * Ensure the lock stack remained unchanged between
4337 * lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
4339 DEBUG_LOCKS_WARN_ON(current
->hardirq_chain_key
!=
4340 current
->curr_chain_key
);
4343 /* we'll do an OFF -> ON transition: */
4344 __this_cpu_write(hardirqs_enabled
, 1);
4345 trace
->hardirq_enable_ip
= ip
;
4346 trace
->hardirq_enable_event
= ++trace
->irq_events
;
4347 debug_atomic_inc(hardirqs_on_events
);
4349 EXPORT_SYMBOL_GPL(lockdep_hardirqs_on
);
4352 * Hardirqs were disabled:
4354 void noinstr
lockdep_hardirqs_off(unsigned long ip
)
4356 if (unlikely(!debug_locks
))
4360 * Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep;
4361 * they will restore the software state. This ensures the software
4362 * state is consistent inside NMIs as well.
4365 if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI
))
4367 } else if (__this_cpu_read(lockdep_recursion
))
4371 * So we're supposed to get called after you mask local IRQs, but for
4372 * some reason the hardware doesn't quite think you did a proper job.
4374 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4377 if (lockdep_hardirqs_enabled()) {
4378 struct irqtrace_events
*trace
= ¤t
->irqtrace
;
4381 * We have done an ON -> OFF transition:
4383 __this_cpu_write(hardirqs_enabled
, 0);
4384 trace
->hardirq_disable_ip
= ip
;
4385 trace
->hardirq_disable_event
= ++trace
->irq_events
;
4386 debug_atomic_inc(hardirqs_off_events
);
4388 debug_atomic_inc(redundant_hardirqs_off
);
4391 EXPORT_SYMBOL_GPL(lockdep_hardirqs_off
);
4394 * Softirqs will be enabled:
4396 void lockdep_softirqs_on(unsigned long ip
)
4398 struct irqtrace_events
*trace
= ¤t
->irqtrace
;
4400 if (unlikely(!lockdep_enabled()))
4404 * We fancy IRQs being disabled here, see softirq.c, avoids
4405 * funny state and nesting things.
4407 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4410 if (current
->softirqs_enabled
) {
4411 debug_atomic_inc(redundant_softirqs_on
);
4415 lockdep_recursion_inc();
4417 * We'll do an OFF -> ON transition:
4419 current
->softirqs_enabled
= 1;
4420 trace
->softirq_enable_ip
= ip
;
4421 trace
->softirq_enable_event
= ++trace
->irq_events
;
4422 debug_atomic_inc(softirqs_on_events
);
4424 * We are going to turn softirqs on, so set the
4425 * usage bit for all held locks, if hardirqs are
4428 if (lockdep_hardirqs_enabled())
4429 mark_held_locks(current
, LOCK_ENABLED_SOFTIRQ
);
4430 lockdep_recursion_finish();
4434 * Softirqs were disabled:
4436 void lockdep_softirqs_off(unsigned long ip
)
4438 if (unlikely(!lockdep_enabled()))
4442 * We fancy IRQs being disabled here, see softirq.c
4444 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4447 if (current
->softirqs_enabled
) {
4448 struct irqtrace_events
*trace
= ¤t
->irqtrace
;
4451 * We have done an ON -> OFF transition:
4453 current
->softirqs_enabled
= 0;
4454 trace
->softirq_disable_ip
= ip
;
4455 trace
->softirq_disable_event
= ++trace
->irq_events
;
4456 debug_atomic_inc(softirqs_off_events
);
4458 * Whoops, we wanted softirqs off, so why aren't they?
4460 DEBUG_LOCKS_WARN_ON(!softirq_count());
4462 debug_atomic_inc(redundant_softirqs_off
);
4466 mark_usage(struct task_struct
*curr
, struct held_lock
*hlock
, int check
)
4472 * If non-trylock use in a hardirq or softirq context, then
4473 * mark the lock as used in these contexts:
4475 if (!hlock
->trylock
) {
4477 if (lockdep_hardirq_context())
4478 if (!mark_lock(curr
, hlock
,
4479 LOCK_USED_IN_HARDIRQ_READ
))
4481 if (curr
->softirq_context
)
4482 if (!mark_lock(curr
, hlock
,
4483 LOCK_USED_IN_SOFTIRQ_READ
))
4486 if (lockdep_hardirq_context())
4487 if (!mark_lock(curr
, hlock
, LOCK_USED_IN_HARDIRQ
))
4489 if (curr
->softirq_context
)
4490 if (!mark_lock(curr
, hlock
, LOCK_USED_IN_SOFTIRQ
))
4494 if (!hlock
->hardirqs_off
) {
4496 if (!mark_lock(curr
, hlock
,
4497 LOCK_ENABLED_HARDIRQ_READ
))
4499 if (curr
->softirqs_enabled
)
4500 if (!mark_lock(curr
, hlock
,
4501 LOCK_ENABLED_SOFTIRQ_READ
))
4504 if (!mark_lock(curr
, hlock
,
4505 LOCK_ENABLED_HARDIRQ
))
4507 if (curr
->softirqs_enabled
)
4508 if (!mark_lock(curr
, hlock
,
4509 LOCK_ENABLED_SOFTIRQ
))
4515 /* mark it as used: */
4516 if (!mark_lock(curr
, hlock
, LOCK_USED
))
4522 static inline unsigned int task_irq_context(struct task_struct
*task
)
4524 return LOCK_CHAIN_HARDIRQ_CONTEXT
* !!lockdep_hardirq_context() +
4525 LOCK_CHAIN_SOFTIRQ_CONTEXT
* !!task
->softirq_context
;
4528 static int separate_irq_context(struct task_struct
*curr
,
4529 struct held_lock
*hlock
)
4531 unsigned int depth
= curr
->lockdep_depth
;
4534 * Keep track of points where we cross into an interrupt context:
4537 struct held_lock
*prev_hlock
;
4539 prev_hlock
= curr
->held_locks
+ depth
-1;
4541 * If we cross into another context, reset the
4542 * hash key (this also prevents the checking and the
4543 * adding of the dependency to 'prev'):
4545 if (prev_hlock
->irq_context
!= hlock
->irq_context
)
4552 * Mark a lock with a usage bit, and validate the state transition:
4554 static int mark_lock(struct task_struct
*curr
, struct held_lock
*this,
4555 enum lock_usage_bit new_bit
)
4557 unsigned int new_mask
, ret
= 1;
4559 if (new_bit
>= LOCK_USAGE_STATES
) {
4560 DEBUG_LOCKS_WARN_ON(1);
4564 if (new_bit
== LOCK_USED
&& this->read
)
4565 new_bit
= LOCK_USED_READ
;
4567 new_mask
= 1 << new_bit
;
4570 * If already set then do not dirty the cacheline,
4571 * nor do any checks:
4573 if (likely(hlock_class(this)->usage_mask
& new_mask
))
4579 * Make sure we didn't race:
4581 if (unlikely(hlock_class(this)->usage_mask
& new_mask
))
4584 if (!hlock_class(this)->usage_mask
)
4585 debug_atomic_dec(nr_unused_locks
);
4587 hlock_class(this)->usage_mask
|= new_mask
;
4589 if (new_bit
< LOCK_TRACE_STATES
) {
4590 if (!(hlock_class(this)->usage_traces
[new_bit
] = save_trace()))
4594 if (new_bit
< LOCK_USED
) {
4595 ret
= mark_lock_irq(curr
, this, new_bit
);
4604 * We must printk outside of the graph_lock:
4607 printk("\nmarked lock as {%s}:\n", usage_str
[new_bit
]);
4609 print_irqtrace_events(curr
);
4616 static inline short task_wait_context(struct task_struct
*curr
)
4619 * Set appropriate wait type for the context; for IRQs we have to take
4620 * into account force_irqthread as that is implied by PREEMPT_RT.
4622 if (lockdep_hardirq_context()) {
4624 * Check if force_irqthreads will run us threaded.
4626 if (curr
->hardirq_threaded
|| curr
->irq_config
)
4627 return LD_WAIT_CONFIG
;
4629 return LD_WAIT_SPIN
;
4630 } else if (curr
->softirq_context
) {
4632 * Softirqs are always threaded.
4634 return LD_WAIT_CONFIG
;
4641 print_lock_invalid_wait_context(struct task_struct
*curr
,
4642 struct held_lock
*hlock
)
4646 if (!debug_locks_off())
4648 if (debug_locks_silent
)
4652 pr_warn("=============================\n");
4653 pr_warn("[ BUG: Invalid wait context ]\n");
4654 print_kernel_ident();
4655 pr_warn("-----------------------------\n");
4657 pr_warn("%s/%d is trying to lock:\n", curr
->comm
, task_pid_nr(curr
));
4660 pr_warn("other info that might help us debug this:\n");
4662 curr_inner
= task_wait_context(curr
);
4663 pr_warn("context-{%d:%d}\n", curr_inner
, curr_inner
);
4665 lockdep_print_held_locks(curr
);
4667 pr_warn("stack backtrace:\n");
4674 * Verify the wait_type context.
4676 * This check validates we takes locks in the right wait-type order; that is it
4677 * ensures that we do not take mutexes inside spinlocks and do not attempt to
4678 * acquire spinlocks inside raw_spinlocks and the sort.
4680 * The entire thing is slightly more complex because of RCU, RCU is a lock that
4681 * can be taken from (pretty much) any context but also has constraints.
4682 * However when taken in a stricter environment the RCU lock does not loosen
4685 * Therefore we must look for the strictest environment in the lock stack and
4686 * compare that to the lock we're trying to acquire.
4688 static int check_wait_context(struct task_struct
*curr
, struct held_lock
*next
)
4690 u8 next_inner
= hlock_class(next
)->wait_type_inner
;
4691 u8 next_outer
= hlock_class(next
)->wait_type_outer
;
4695 if (!next_inner
|| next
->trylock
)
4699 next_outer
= next_inner
;
4702 * Find start of current irq_context..
4704 for (depth
= curr
->lockdep_depth
- 1; depth
>= 0; depth
--) {
4705 struct held_lock
*prev
= curr
->held_locks
+ depth
;
4706 if (prev
->irq_context
!= next
->irq_context
)
4711 curr_inner
= task_wait_context(curr
);
4713 for (; depth
< curr
->lockdep_depth
; depth
++) {
4714 struct held_lock
*prev
= curr
->held_locks
+ depth
;
4715 u8 prev_inner
= hlock_class(prev
)->wait_type_inner
;
4719 * We can have a bigger inner than a previous one
4720 * when outer is smaller than inner, as with RCU.
4722 * Also due to trylocks.
4724 curr_inner
= min(curr_inner
, prev_inner
);
4728 if (next_outer
> curr_inner
)
4729 return print_lock_invalid_wait_context(curr
, next
);
4734 #else /* CONFIG_PROVE_LOCKING */
4737 mark_usage(struct task_struct
*curr
, struct held_lock
*hlock
, int check
)
4742 static inline unsigned int task_irq_context(struct task_struct
*task
)
4747 static inline int separate_irq_context(struct task_struct
*curr
,
4748 struct held_lock
*hlock
)
4753 static inline int check_wait_context(struct task_struct
*curr
,
4754 struct held_lock
*next
)
4759 #endif /* CONFIG_PROVE_LOCKING */
4762 * Initialize a lock instance's lock-class mapping info:
4764 void lockdep_init_map_type(struct lockdep_map
*lock
, const char *name
,
4765 struct lock_class_key
*key
, int subclass
,
4766 u8 inner
, u8 outer
, u8 lock_type
)
4770 for (i
= 0; i
< NR_LOCKDEP_CACHING_CLASSES
; i
++)
4771 lock
->class_cache
[i
] = NULL
;
4773 #ifdef CONFIG_LOCK_STAT
4774 lock
->cpu
= raw_smp_processor_id();
4778 * Can't be having no nameless bastards around this place!
4780 if (DEBUG_LOCKS_WARN_ON(!name
)) {
4781 lock
->name
= "NULL";
4787 lock
->wait_type_outer
= outer
;
4788 lock
->wait_type_inner
= inner
;
4789 lock
->lock_type
= lock_type
;
4792 * No key, no joy, we need to hash something.
4794 if (DEBUG_LOCKS_WARN_ON(!key
))
4797 * Sanity check, the lock-class key must either have been allocated
4798 * statically or must have been registered as a dynamic key.
4800 if (!static_obj(key
) && !is_dynamic_key(key
)) {
4802 printk(KERN_ERR
"BUG: key %px has not been registered!\n", key
);
4803 DEBUG_LOCKS_WARN_ON(1);
4808 if (unlikely(!debug_locks
))
4812 unsigned long flags
;
4814 if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
4817 raw_local_irq_save(flags
);
4818 lockdep_recursion_inc();
4819 register_lock_class(lock
, subclass
, 1);
4820 lockdep_recursion_finish();
4821 raw_local_irq_restore(flags
);
4824 EXPORT_SYMBOL_GPL(lockdep_init_map_type
);
4826 struct lock_class_key __lockdep_no_validate__
;
4827 EXPORT_SYMBOL_GPL(__lockdep_no_validate__
);
4830 print_lock_nested_lock_not_held(struct task_struct
*curr
,
4831 struct held_lock
*hlock
,
4834 if (!debug_locks_off())
4836 if (debug_locks_silent
)
4840 pr_warn("==================================\n");
4841 pr_warn("WARNING: Nested lock was not taken\n");
4842 print_kernel_ident();
4843 pr_warn("----------------------------------\n");
4845 pr_warn("%s/%d is trying to lock:\n", curr
->comm
, task_pid_nr(curr
));
4848 pr_warn("\nbut this task is not holding:\n");
4849 pr_warn("%s\n", hlock
->nest_lock
->name
);
4851 pr_warn("\nstack backtrace:\n");
4854 pr_warn("\nother info that might help us debug this:\n");
4855 lockdep_print_held_locks(curr
);
4857 pr_warn("\nstack backtrace:\n");
4861 static int __lock_is_held(const struct lockdep_map
*lock
, int read
);
4864 * This gets called for every mutex_lock*()/spin_lock*() operation.
4865 * We maintain the dependency maps and validate the locking attempt:
4867 * The callers must make sure that IRQs are disabled before calling it,
4868 * otherwise we could get an interrupt which would want to take locks,
4869 * which would end up in lockdep again.
4871 static int __lock_acquire(struct lockdep_map
*lock
, unsigned int subclass
,
4872 int trylock
, int read
, int check
, int hardirqs_off
,
4873 struct lockdep_map
*nest_lock
, unsigned long ip
,
4874 int references
, int pin_count
)
4876 struct task_struct
*curr
= current
;
4877 struct lock_class
*class = NULL
;
4878 struct held_lock
*hlock
;
4884 if (unlikely(!debug_locks
))
4887 if (!prove_locking
|| lock
->key
== &__lockdep_no_validate__
)
4890 if (subclass
< NR_LOCKDEP_CACHING_CLASSES
)
4891 class = lock
->class_cache
[subclass
];
4895 if (unlikely(!class)) {
4896 class = register_lock_class(lock
, subclass
, 0);
4901 debug_class_ops_inc(class);
4903 if (very_verbose(class)) {
4904 printk("\nacquire class [%px] %s", class->key
, class->name
);
4905 if (class->name_version
> 1)
4906 printk(KERN_CONT
"#%d", class->name_version
);
4907 printk(KERN_CONT
"\n");
4912 * Add the lock to the list of currently held locks.
4913 * (we dont increase the depth just yet, up until the
4914 * dependency checks are done)
4916 depth
= curr
->lockdep_depth
;
4918 * Ran out of static storage for our per-task lock stack again have we?
4920 if (DEBUG_LOCKS_WARN_ON(depth
>= MAX_LOCK_DEPTH
))
4923 class_idx
= class - lock_classes
;
4925 if (depth
) { /* we're holding locks */
4926 hlock
= curr
->held_locks
+ depth
- 1;
4927 if (hlock
->class_idx
== class_idx
&& nest_lock
) {
4931 if (!hlock
->references
)
4932 hlock
->references
++;
4934 hlock
->references
+= references
;
4937 if (DEBUG_LOCKS_WARN_ON(hlock
->references
< references
))
4944 hlock
= curr
->held_locks
+ depth
;
4946 * Plain impossible, we just registered it and checked it weren't no
4947 * NULL like.. I bet this mushroom I ate was good!
4949 if (DEBUG_LOCKS_WARN_ON(!class))
4951 hlock
->class_idx
= class_idx
;
4952 hlock
->acquire_ip
= ip
;
4953 hlock
->instance
= lock
;
4954 hlock
->nest_lock
= nest_lock
;
4955 hlock
->irq_context
= task_irq_context(curr
);
4956 hlock
->trylock
= trylock
;
4958 hlock
->check
= check
;
4959 hlock
->hardirqs_off
= !!hardirqs_off
;
4960 hlock
->references
= references
;
4961 #ifdef CONFIG_LOCK_STAT
4962 hlock
->waittime_stamp
= 0;
4963 hlock
->holdtime_stamp
= lockstat_clock();
4965 hlock
->pin_count
= pin_count
;
4967 if (check_wait_context(curr
, hlock
))
4970 /* Initialize the lock usage bit */
4971 if (!mark_usage(curr
, hlock
, check
))
4975 * Calculate the chain hash: it's the combined hash of all the
4976 * lock keys along the dependency chain. We save the hash value
4977 * at every step so that we can get the current hash easily
4978 * after unlock. The chain hash is then used to cache dependency
4981 * The 'key ID' is what is the most compact key value to drive
4982 * the hash, not class->key.
4985 * Whoops, we did it again.. class_idx is invalid.
4987 if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx
, lock_classes_in_use
)))
4990 chain_key
= curr
->curr_chain_key
;
4993 * How can we have a chain hash when we ain't got no keys?!
4995 if (DEBUG_LOCKS_WARN_ON(chain_key
!= INITIAL_CHAIN_KEY
))
5000 hlock
->prev_chain_key
= chain_key
;
5001 if (separate_irq_context(curr
, hlock
)) {
5002 chain_key
= INITIAL_CHAIN_KEY
;
5005 chain_key
= iterate_chain_key(chain_key
, hlock_id(hlock
));
5007 if (nest_lock
&& !__lock_is_held(nest_lock
, -1)) {
5008 print_lock_nested_lock_not_held(curr
, hlock
, ip
);
5012 if (!debug_locks_silent
) {
5013 WARN_ON_ONCE(depth
&& !hlock_class(hlock
- 1)->key
);
5014 WARN_ON_ONCE(!hlock_class(hlock
)->key
);
5017 if (!validate_chain(curr
, hlock
, chain_head
, chain_key
))
5020 curr
->curr_chain_key
= chain_key
;
5021 curr
->lockdep_depth
++;
5022 check_chain_key(curr
);
5023 #ifdef CONFIG_DEBUG_LOCKDEP
5024 if (unlikely(!debug_locks
))
5027 if (unlikely(curr
->lockdep_depth
>= MAX_LOCK_DEPTH
)) {
5029 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
5030 printk(KERN_DEBUG
"depth: %i max: %lu!\n",
5031 curr
->lockdep_depth
, MAX_LOCK_DEPTH
);
5033 lockdep_print_held_locks(current
);
5034 debug_show_all_locks();
5040 if (unlikely(curr
->lockdep_depth
> max_lockdep_depth
))
5041 max_lockdep_depth
= curr
->lockdep_depth
;
5046 static void print_unlock_imbalance_bug(struct task_struct
*curr
,
5047 struct lockdep_map
*lock
,
5050 if (!debug_locks_off())
5052 if (debug_locks_silent
)
5056 pr_warn("=====================================\n");
5057 pr_warn("WARNING: bad unlock balance detected!\n");
5058 print_kernel_ident();
5059 pr_warn("-------------------------------------\n");
5060 pr_warn("%s/%d is trying to release lock (",
5061 curr
->comm
, task_pid_nr(curr
));
5062 print_lockdep_cache(lock
);
5064 print_ip_sym(KERN_WARNING
, ip
);
5065 pr_warn("but there are no more locks to release!\n");
5066 pr_warn("\nother info that might help us debug this:\n");
5067 lockdep_print_held_locks(curr
);
5069 pr_warn("\nstack backtrace:\n");
5073 static noinstr
int match_held_lock(const struct held_lock
*hlock
,
5074 const struct lockdep_map
*lock
)
5076 if (hlock
->instance
== lock
)
5079 if (hlock
->references
) {
5080 const struct lock_class
*class = lock
->class_cache
[0];
5083 class = look_up_lock_class(lock
, 0);
5086 * If look_up_lock_class() failed to find a class, we're trying
5087 * to test if we hold a lock that has never yet been acquired.
5088 * Clearly if the lock hasn't been acquired _ever_, we're not
5089 * holding it either, so report failure.
5095 * References, but not a lock we're actually ref-counting?
5096 * State got messed up, follow the sites that change ->references
5097 * and try to make sense of it.
5099 if (DEBUG_LOCKS_WARN_ON(!hlock
->nest_lock
))
5102 if (hlock
->class_idx
== class - lock_classes
)
5109 /* @depth must not be zero */
5110 static struct held_lock
*find_held_lock(struct task_struct
*curr
,
5111 struct lockdep_map
*lock
,
5112 unsigned int depth
, int *idx
)
5114 struct held_lock
*ret
, *hlock
, *prev_hlock
;
5118 hlock
= curr
->held_locks
+ i
;
5120 if (match_held_lock(hlock
, lock
))
5124 for (i
--, prev_hlock
= hlock
--;
5126 i
--, prev_hlock
= hlock
--) {
5128 * We must not cross into another context:
5130 if (prev_hlock
->irq_context
!= hlock
->irq_context
) {
5134 if (match_held_lock(hlock
, lock
)) {
5145 static int reacquire_held_locks(struct task_struct
*curr
, unsigned int depth
,
5146 int idx
, unsigned int *merged
)
5148 struct held_lock
*hlock
;
5149 int first_idx
= idx
;
5151 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
5154 for (hlock
= curr
->held_locks
+ idx
; idx
< depth
; idx
++, hlock
++) {
5155 switch (__lock_acquire(hlock
->instance
,
5156 hlock_class(hlock
)->subclass
,
5158 hlock
->read
, hlock
->check
,
5159 hlock
->hardirqs_off
,
5160 hlock
->nest_lock
, hlock
->acquire_ip
,
5161 hlock
->references
, hlock
->pin_count
)) {
5167 *merged
+= (idx
== first_idx
);
5178 __lock_set_class(struct lockdep_map
*lock
, const char *name
,
5179 struct lock_class_key
*key
, unsigned int subclass
,
5182 struct task_struct
*curr
= current
;
5183 unsigned int depth
, merged
= 0;
5184 struct held_lock
*hlock
;
5185 struct lock_class
*class;
5188 if (unlikely(!debug_locks
))
5191 depth
= curr
->lockdep_depth
;
5193 * This function is about (re)setting the class of a held lock,
5194 * yet we're not actually holding any locks. Naughty user!
5196 if (DEBUG_LOCKS_WARN_ON(!depth
))
5199 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
5201 print_unlock_imbalance_bug(curr
, lock
, ip
);
5205 lockdep_init_map_waits(lock
, name
, key
, 0,
5206 lock
->wait_type_inner
,
5207 lock
->wait_type_outer
);
5208 class = register_lock_class(lock
, subclass
, 0);
5209 hlock
->class_idx
= class - lock_classes
;
5211 curr
->lockdep_depth
= i
;
5212 curr
->curr_chain_key
= hlock
->prev_chain_key
;
5214 if (reacquire_held_locks(curr
, depth
, i
, &merged
))
5218 * I took it apart and put it back together again, except now I have
5219 * these 'spare' parts.. where shall I put them.
5221 if (DEBUG_LOCKS_WARN_ON(curr
->lockdep_depth
!= depth
- merged
))
5226 static int __lock_downgrade(struct lockdep_map
*lock
, unsigned long ip
)
5228 struct task_struct
*curr
= current
;
5229 unsigned int depth
, merged
= 0;
5230 struct held_lock
*hlock
;
5233 if (unlikely(!debug_locks
))
5236 depth
= curr
->lockdep_depth
;
5238 * This function is about (re)setting the class of a held lock,
5239 * yet we're not actually holding any locks. Naughty user!
5241 if (DEBUG_LOCKS_WARN_ON(!depth
))
5244 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
5246 print_unlock_imbalance_bug(curr
, lock
, ip
);
5250 curr
->lockdep_depth
= i
;
5251 curr
->curr_chain_key
= hlock
->prev_chain_key
;
5253 WARN(hlock
->read
, "downgrading a read lock");
5255 hlock
->acquire_ip
= ip
;
5257 if (reacquire_held_locks(curr
, depth
, i
, &merged
))
5260 /* Merging can't happen with unchanged classes.. */
5261 if (DEBUG_LOCKS_WARN_ON(merged
))
5265 * I took it apart and put it back together again, except now I have
5266 * these 'spare' parts.. where shall I put them.
5268 if (DEBUG_LOCKS_WARN_ON(curr
->lockdep_depth
!= depth
))
5275 * Remove the lock from the list of currently held locks - this gets
5276 * called on mutex_unlock()/spin_unlock*() (or on a failed
5277 * mutex_lock_interruptible()).
5280 __lock_release(struct lockdep_map
*lock
, unsigned long ip
)
5282 struct task_struct
*curr
= current
;
5283 unsigned int depth
, merged
= 1;
5284 struct held_lock
*hlock
;
5287 if (unlikely(!debug_locks
))
5290 depth
= curr
->lockdep_depth
;
5292 * So we're all set to release this lock.. wait what lock? We don't
5293 * own any locks, you've been drinking again?
5296 print_unlock_imbalance_bug(curr
, lock
, ip
);
5301 * Check whether the lock exists in the current stack
5304 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
5306 print_unlock_imbalance_bug(curr
, lock
, ip
);
5310 if (hlock
->instance
== lock
)
5311 lock_release_holdtime(hlock
);
5313 WARN(hlock
->pin_count
, "releasing a pinned lock\n");
5315 if (hlock
->references
) {
5316 hlock
->references
--;
5317 if (hlock
->references
) {
5319 * We had, and after removing one, still have
5320 * references, the current lock stack is still
5321 * valid. We're done!
5328 * We have the right lock to unlock, 'hlock' points to it.
5329 * Now we remove it from the stack, and add back the other
5330 * entries (if any), recalculating the hash along the way:
5333 curr
->lockdep_depth
= i
;
5334 curr
->curr_chain_key
= hlock
->prev_chain_key
;
5337 * The most likely case is when the unlock is on the innermost
5338 * lock. In this case, we are done!
5343 if (reacquire_held_locks(curr
, depth
, i
+ 1, &merged
))
5347 * We had N bottles of beer on the wall, we drank one, but now
5348 * there's not N-1 bottles of beer left on the wall...
5349 * Pouring two of the bottles together is acceptable.
5351 DEBUG_LOCKS_WARN_ON(curr
->lockdep_depth
!= depth
- merged
);
5354 * Since reacquire_held_locks() would have called check_chain_key()
5355 * indirectly via __lock_acquire(), we don't need to do it again
5361 static __always_inline
5362 int __lock_is_held(const struct lockdep_map
*lock
, int read
)
5364 struct task_struct
*curr
= current
;
5367 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
5368 struct held_lock
*hlock
= curr
->held_locks
+ i
;
5370 if (match_held_lock(hlock
, lock
)) {
5371 if (read
== -1 || !!hlock
->read
== read
)
5372 return LOCK_STATE_HELD
;
5374 return LOCK_STATE_NOT_HELD
;
5378 return LOCK_STATE_NOT_HELD
;
5381 static struct pin_cookie
__lock_pin_lock(struct lockdep_map
*lock
)
5383 struct pin_cookie cookie
= NIL_COOKIE
;
5384 struct task_struct
*curr
= current
;
5387 if (unlikely(!debug_locks
))
5390 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
5391 struct held_lock
*hlock
= curr
->held_locks
+ i
;
5393 if (match_held_lock(hlock
, lock
)) {
5395 * Grab 16bits of randomness; this is sufficient to not
5396 * be guessable and still allows some pin nesting in
5397 * our u32 pin_count.
5399 cookie
.val
= 1 + (prandom_u32() >> 16);
5400 hlock
->pin_count
+= cookie
.val
;
5405 WARN(1, "pinning an unheld lock\n");
5409 static void __lock_repin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
5411 struct task_struct
*curr
= current
;
5414 if (unlikely(!debug_locks
))
5417 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
5418 struct held_lock
*hlock
= curr
->held_locks
+ i
;
5420 if (match_held_lock(hlock
, lock
)) {
5421 hlock
->pin_count
+= cookie
.val
;
5426 WARN(1, "pinning an unheld lock\n");
5429 static void __lock_unpin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
5431 struct task_struct
*curr
= current
;
5434 if (unlikely(!debug_locks
))
5437 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
5438 struct held_lock
*hlock
= curr
->held_locks
+ i
;
5440 if (match_held_lock(hlock
, lock
)) {
5441 if (WARN(!hlock
->pin_count
, "unpinning an unpinned lock\n"))
5444 hlock
->pin_count
-= cookie
.val
;
5446 if (WARN((int)hlock
->pin_count
< 0, "pin count corrupted\n"))
5447 hlock
->pin_count
= 0;
5453 WARN(1, "unpinning an unheld lock\n");
5457 * Check whether we follow the irq-flags state precisely:
5459 static noinstr
void check_flags(unsigned long flags
)
5461 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
5465 /* Get the warning out.. */
5466 instrumentation_begin();
5468 if (irqs_disabled_flags(flags
)) {
5469 if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
5470 printk("possible reason: unannotated irqs-off.\n");
5473 if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) {
5474 printk("possible reason: unannotated irqs-on.\n");
5479 * We dont accurately track softirq state in e.g.
5480 * hardirq contexts (such as on 4KSTACKS), so only
5481 * check if not in hardirq contexts:
5483 if (!hardirq_count()) {
5484 if (softirq_count()) {
5485 /* like the above, but with softirqs */
5486 DEBUG_LOCKS_WARN_ON(current
->softirqs_enabled
);
5488 /* lick the above, does it taste good? */
5489 DEBUG_LOCKS_WARN_ON(!current
->softirqs_enabled
);
5494 print_irqtrace_events(current
);
5496 instrumentation_end();
5500 void lock_set_class(struct lockdep_map
*lock
, const char *name
,
5501 struct lock_class_key
*key
, unsigned int subclass
,
5504 unsigned long flags
;
5506 if (unlikely(!lockdep_enabled()))
5509 raw_local_irq_save(flags
);
5510 lockdep_recursion_inc();
5512 if (__lock_set_class(lock
, name
, key
, subclass
, ip
))
5513 check_chain_key(current
);
5514 lockdep_recursion_finish();
5515 raw_local_irq_restore(flags
);
5517 EXPORT_SYMBOL_GPL(lock_set_class
);
5519 void lock_downgrade(struct lockdep_map
*lock
, unsigned long ip
)
5521 unsigned long flags
;
5523 if (unlikely(!lockdep_enabled()))
5526 raw_local_irq_save(flags
);
5527 lockdep_recursion_inc();
5529 if (__lock_downgrade(lock
, ip
))
5530 check_chain_key(current
);
5531 lockdep_recursion_finish();
5532 raw_local_irq_restore(flags
);
5534 EXPORT_SYMBOL_GPL(lock_downgrade
);
5536 /* NMI context !!! */
5537 static void verify_lock_unused(struct lockdep_map
*lock
, struct held_lock
*hlock
, int subclass
)
5539 #ifdef CONFIG_PROVE_LOCKING
5540 struct lock_class
*class = look_up_lock_class(lock
, subclass
);
5541 unsigned long mask
= LOCKF_USED
;
5543 /* if it doesn't have a class (yet), it certainly hasn't been used yet */
5548 * READ locks only conflict with USED, such that if we only ever use
5549 * READ locks, there is no deadlock possible -- RCU.
5552 mask
|= LOCKF_USED_READ
;
5554 if (!(class->usage_mask
& mask
))
5557 hlock
->class_idx
= class - lock_classes
;
5559 print_usage_bug(current
, hlock
, LOCK_USED
, LOCK_USAGE_STATES
);
5563 static bool lockdep_nmi(void)
5565 if (raw_cpu_read(lockdep_recursion
))
5575 * read_lock() is recursive if:
5576 * 1. We force lockdep think this way in selftests or
5577 * 2. The implementation is not queued read/write lock or
5578 * 3. The locker is at an in_interrupt() context.
5580 bool read_lock_is_recursive(void)
5582 return force_read_lock_recursive
||
5583 !IS_ENABLED(CONFIG_QUEUED_RWLOCKS
) ||
5586 EXPORT_SYMBOL_GPL(read_lock_is_recursive
);
5589 * We are not always called with irqs disabled - do that here,
5590 * and also avoid lockdep recursion:
5592 void lock_acquire(struct lockdep_map
*lock
, unsigned int subclass
,
5593 int trylock
, int read
, int check
,
5594 struct lockdep_map
*nest_lock
, unsigned long ip
)
5596 unsigned long flags
;
5598 trace_lock_acquire(lock
, subclass
, trylock
, read
, check
, nest_lock
, ip
);
5603 if (unlikely(!lockdep_enabled())) {
5604 /* XXX allow trylock from NMI ?!? */
5605 if (lockdep_nmi() && !trylock
) {
5606 struct held_lock hlock
;
5608 hlock
.acquire_ip
= ip
;
5609 hlock
.instance
= lock
;
5610 hlock
.nest_lock
= nest_lock
;
5611 hlock
.irq_context
= 2; // XXX
5612 hlock
.trylock
= trylock
;
5614 hlock
.check
= check
;
5615 hlock
.hardirqs_off
= true;
5616 hlock
.references
= 0;
5618 verify_lock_unused(lock
, &hlock
, subclass
);
5623 raw_local_irq_save(flags
);
5626 lockdep_recursion_inc();
5627 __lock_acquire(lock
, subclass
, trylock
, read
, check
,
5628 irqs_disabled_flags(flags
), nest_lock
, ip
, 0, 0);
5629 lockdep_recursion_finish();
5630 raw_local_irq_restore(flags
);
5632 EXPORT_SYMBOL_GPL(lock_acquire
);
5634 void lock_release(struct lockdep_map
*lock
, unsigned long ip
)
5636 unsigned long flags
;
5638 trace_lock_release(lock
, ip
);
5640 if (unlikely(!lockdep_enabled()))
5643 raw_local_irq_save(flags
);
5646 lockdep_recursion_inc();
5647 if (__lock_release(lock
, ip
))
5648 check_chain_key(current
);
5649 lockdep_recursion_finish();
5650 raw_local_irq_restore(flags
);
5652 EXPORT_SYMBOL_GPL(lock_release
);
5654 noinstr
int lock_is_held_type(const struct lockdep_map
*lock
, int read
)
5656 unsigned long flags
;
5657 int ret
= LOCK_STATE_NOT_HELD
;
5660 * Avoid false negative lockdep_assert_held() and
5661 * lockdep_assert_not_held().
5663 if (unlikely(!lockdep_enabled()))
5664 return LOCK_STATE_UNKNOWN
;
5666 raw_local_irq_save(flags
);
5669 lockdep_recursion_inc();
5670 ret
= __lock_is_held(lock
, read
);
5671 lockdep_recursion_finish();
5672 raw_local_irq_restore(flags
);
5676 EXPORT_SYMBOL_GPL(lock_is_held_type
);
5677 NOKPROBE_SYMBOL(lock_is_held_type
);
5679 struct pin_cookie
lock_pin_lock(struct lockdep_map
*lock
)
5681 struct pin_cookie cookie
= NIL_COOKIE
;
5682 unsigned long flags
;
5684 if (unlikely(!lockdep_enabled()))
5687 raw_local_irq_save(flags
);
5690 lockdep_recursion_inc();
5691 cookie
= __lock_pin_lock(lock
);
5692 lockdep_recursion_finish();
5693 raw_local_irq_restore(flags
);
5697 EXPORT_SYMBOL_GPL(lock_pin_lock
);
5699 void lock_repin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
5701 unsigned long flags
;
5703 if (unlikely(!lockdep_enabled()))
5706 raw_local_irq_save(flags
);
5709 lockdep_recursion_inc();
5710 __lock_repin_lock(lock
, cookie
);
5711 lockdep_recursion_finish();
5712 raw_local_irq_restore(flags
);
5714 EXPORT_SYMBOL_GPL(lock_repin_lock
);
5716 void lock_unpin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
5718 unsigned long flags
;
5720 if (unlikely(!lockdep_enabled()))
5723 raw_local_irq_save(flags
);
5726 lockdep_recursion_inc();
5727 __lock_unpin_lock(lock
, cookie
);
5728 lockdep_recursion_finish();
5729 raw_local_irq_restore(flags
);
5731 EXPORT_SYMBOL_GPL(lock_unpin_lock
);
5733 #ifdef CONFIG_LOCK_STAT
5734 static void print_lock_contention_bug(struct task_struct
*curr
,
5735 struct lockdep_map
*lock
,
5738 if (!debug_locks_off())
5740 if (debug_locks_silent
)
5744 pr_warn("=================================\n");
5745 pr_warn("WARNING: bad contention detected!\n");
5746 print_kernel_ident();
5747 pr_warn("---------------------------------\n");
5748 pr_warn("%s/%d is trying to contend lock (",
5749 curr
->comm
, task_pid_nr(curr
));
5750 print_lockdep_cache(lock
);
5752 print_ip_sym(KERN_WARNING
, ip
);
5753 pr_warn("but there are no locks held!\n");
5754 pr_warn("\nother info that might help us debug this:\n");
5755 lockdep_print_held_locks(curr
);
5757 pr_warn("\nstack backtrace:\n");
5762 __lock_contended(struct lockdep_map
*lock
, unsigned long ip
)
5764 struct task_struct
*curr
= current
;
5765 struct held_lock
*hlock
;
5766 struct lock_class_stats
*stats
;
5768 int i
, contention_point
, contending_point
;
5770 depth
= curr
->lockdep_depth
;
5772 * Whee, we contended on this lock, except it seems we're not
5773 * actually trying to acquire anything much at all..
5775 if (DEBUG_LOCKS_WARN_ON(!depth
))
5778 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
5780 print_lock_contention_bug(curr
, lock
, ip
);
5784 if (hlock
->instance
!= lock
)
5787 hlock
->waittime_stamp
= lockstat_clock();
5789 contention_point
= lock_point(hlock_class(hlock
)->contention_point
, ip
);
5790 contending_point
= lock_point(hlock_class(hlock
)->contending_point
,
5793 stats
= get_lock_stats(hlock_class(hlock
));
5794 if (contention_point
< LOCKSTAT_POINTS
)
5795 stats
->contention_point
[contention_point
]++;
5796 if (contending_point
< LOCKSTAT_POINTS
)
5797 stats
->contending_point
[contending_point
]++;
5798 if (lock
->cpu
!= smp_processor_id())
5799 stats
->bounces
[bounce_contended
+ !!hlock
->read
]++;
5803 __lock_acquired(struct lockdep_map
*lock
, unsigned long ip
)
5805 struct task_struct
*curr
= current
;
5806 struct held_lock
*hlock
;
5807 struct lock_class_stats
*stats
;
5809 u64 now
, waittime
= 0;
5812 depth
= curr
->lockdep_depth
;
5814 * Yay, we acquired ownership of this lock we didn't try to
5815 * acquire, how the heck did that happen?
5817 if (DEBUG_LOCKS_WARN_ON(!depth
))
5820 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
5822 print_lock_contention_bug(curr
, lock
, _RET_IP_
);
5826 if (hlock
->instance
!= lock
)
5829 cpu
= smp_processor_id();
5830 if (hlock
->waittime_stamp
) {
5831 now
= lockstat_clock();
5832 waittime
= now
- hlock
->waittime_stamp
;
5833 hlock
->holdtime_stamp
= now
;
5836 stats
= get_lock_stats(hlock_class(hlock
));
5839 lock_time_inc(&stats
->read_waittime
, waittime
);
5841 lock_time_inc(&stats
->write_waittime
, waittime
);
5843 if (lock
->cpu
!= cpu
)
5844 stats
->bounces
[bounce_acquired
+ !!hlock
->read
]++;
5850 void lock_contended(struct lockdep_map
*lock
, unsigned long ip
)
5852 unsigned long flags
;
5854 trace_lock_contended(lock
, ip
);
5856 if (unlikely(!lock_stat
|| !lockdep_enabled()))
5859 raw_local_irq_save(flags
);
5861 lockdep_recursion_inc();
5862 __lock_contended(lock
, ip
);
5863 lockdep_recursion_finish();
5864 raw_local_irq_restore(flags
);
5866 EXPORT_SYMBOL_GPL(lock_contended
);
5868 void lock_acquired(struct lockdep_map
*lock
, unsigned long ip
)
5870 unsigned long flags
;
5872 trace_lock_acquired(lock
, ip
);
5874 if (unlikely(!lock_stat
|| !lockdep_enabled()))
5877 raw_local_irq_save(flags
);
5879 lockdep_recursion_inc();
5880 __lock_acquired(lock
, ip
);
5881 lockdep_recursion_finish();
5882 raw_local_irq_restore(flags
);
5884 EXPORT_SYMBOL_GPL(lock_acquired
);
5888 * Used by the testsuite, sanitize the validator state
5889 * after a simulated failure:
5892 void lockdep_reset(void)
5894 unsigned long flags
;
5897 raw_local_irq_save(flags
);
5898 lockdep_init_task(current
);
5899 memset(current
->held_locks
, 0, MAX_LOCK_DEPTH
*sizeof(struct held_lock
));
5900 nr_hardirq_chains
= 0;
5901 nr_softirq_chains
= 0;
5902 nr_process_chains
= 0;
5904 for (i
= 0; i
< CHAINHASH_SIZE
; i
++)
5905 INIT_HLIST_HEAD(chainhash_table
+ i
);
5906 raw_local_irq_restore(flags
);
5909 /* Remove a class from a lock chain. Must be called with the graph lock held. */
5910 static void remove_class_from_lock_chain(struct pending_free
*pf
,
5911 struct lock_chain
*chain
,
5912 struct lock_class
*class)
5914 #ifdef CONFIG_PROVE_LOCKING
5917 for (i
= chain
->base
; i
< chain
->base
+ chain
->depth
; i
++) {
5918 if (chain_hlock_class_idx(chain_hlocks
[i
]) != class - lock_classes
)
5921 * Each lock class occurs at most once in a lock chain so once
5922 * we found a match we can break out of this loop.
5924 goto free_lock_chain
;
5926 /* Since the chain has not been modified, return. */
5930 free_chain_hlocks(chain
->base
, chain
->depth
);
5931 /* Overwrite the chain key for concurrent RCU readers. */
5932 WRITE_ONCE(chain
->chain_key
, INITIAL_CHAIN_KEY
);
5933 dec_chains(chain
->irq_context
);
5936 * Note: calling hlist_del_rcu() from inside a
5937 * hlist_for_each_entry_rcu() loop is safe.
5939 hlist_del_rcu(&chain
->entry
);
5940 __set_bit(chain
- lock_chains
, pf
->lock_chains_being_freed
);
5941 nr_zapped_lock_chains
++;
5945 /* Must be called with the graph lock held. */
5946 static void remove_class_from_lock_chains(struct pending_free
*pf
,
5947 struct lock_class
*class)
5949 struct lock_chain
*chain
;
5950 struct hlist_head
*head
;
5953 for (i
= 0; i
< ARRAY_SIZE(chainhash_table
); i
++) {
5954 head
= chainhash_table
+ i
;
5955 hlist_for_each_entry_rcu(chain
, head
, entry
) {
5956 remove_class_from_lock_chain(pf
, chain
, class);
5962 * Remove all references to a lock class. The caller must hold the graph lock.
5964 static void zap_class(struct pending_free
*pf
, struct lock_class
*class)
5966 struct lock_list
*entry
;
5969 WARN_ON_ONCE(!class->key
);
5972 * Remove all dependencies this lock is
5975 for_each_set_bit(i
, list_entries_in_use
, ARRAY_SIZE(list_entries
)) {
5976 entry
= list_entries
+ i
;
5977 if (entry
->class != class && entry
->links_to
!= class)
5979 __clear_bit(i
, list_entries_in_use
);
5981 list_del_rcu(&entry
->entry
);
5983 if (list_empty(&class->locks_after
) &&
5984 list_empty(&class->locks_before
)) {
5985 list_move_tail(&class->lock_entry
, &pf
->zapped
);
5986 hlist_del_rcu(&class->hash_entry
);
5987 WRITE_ONCE(class->key
, NULL
);
5988 WRITE_ONCE(class->name
, NULL
);
5990 __clear_bit(class - lock_classes
, lock_classes_in_use
);
5992 WARN_ONCE(true, "%s() failed for class %s\n", __func__
,
5996 remove_class_from_lock_chains(pf
, class);
5997 nr_zapped_classes
++;
6000 static void reinit_class(struct lock_class
*class)
6002 void *const p
= class;
6003 const unsigned int offset
= offsetof(struct lock_class
, key
);
6005 WARN_ON_ONCE(!class->lock_entry
.next
);
6006 WARN_ON_ONCE(!list_empty(&class->locks_after
));
6007 WARN_ON_ONCE(!list_empty(&class->locks_before
));
6008 memset(p
+ offset
, 0, sizeof(*class) - offset
);
6009 WARN_ON_ONCE(!class->lock_entry
.next
);
6010 WARN_ON_ONCE(!list_empty(&class->locks_after
));
6011 WARN_ON_ONCE(!list_empty(&class->locks_before
));
6014 static inline int within(const void *addr
, void *start
, unsigned long size
)
6016 return addr
>= start
&& addr
< start
+ size
;
6019 static bool inside_selftest(void)
6021 return current
== lockdep_selftest_task_struct
;
6024 /* The caller must hold the graph lock. */
6025 static struct pending_free
*get_pending_free(void)
6027 return delayed_free
.pf
+ delayed_free
.index
;
6030 static void free_zapped_rcu(struct rcu_head
*cb
);
6033 * Schedule an RCU callback if no RCU callback is pending. Must be called with
6034 * the graph lock held.
6036 static void call_rcu_zapped(struct pending_free
*pf
)
6038 WARN_ON_ONCE(inside_selftest());
6040 if (list_empty(&pf
->zapped
))
6043 if (delayed_free
.scheduled
)
6046 delayed_free
.scheduled
= true;
6048 WARN_ON_ONCE(delayed_free
.pf
+ delayed_free
.index
!= pf
);
6049 delayed_free
.index
^= 1;
6051 call_rcu(&delayed_free
.rcu_head
, free_zapped_rcu
);
6054 /* The caller must hold the graph lock. May be called from RCU context. */
6055 static void __free_zapped_classes(struct pending_free
*pf
)
6057 struct lock_class
*class;
6059 check_data_structures();
6061 list_for_each_entry(class, &pf
->zapped
, lock_entry
)
6062 reinit_class(class);
6064 list_splice_init(&pf
->zapped
, &free_lock_classes
);
6066 #ifdef CONFIG_PROVE_LOCKING
6067 bitmap_andnot(lock_chains_in_use
, lock_chains_in_use
,
6068 pf
->lock_chains_being_freed
, ARRAY_SIZE(lock_chains
));
6069 bitmap_clear(pf
->lock_chains_being_freed
, 0, ARRAY_SIZE(lock_chains
));
6073 static void free_zapped_rcu(struct rcu_head
*ch
)
6075 struct pending_free
*pf
;
6076 unsigned long flags
;
6078 if (WARN_ON_ONCE(ch
!= &delayed_free
.rcu_head
))
6081 raw_local_irq_save(flags
);
6085 pf
= delayed_free
.pf
+ (delayed_free
.index
^ 1);
6086 __free_zapped_classes(pf
);
6087 delayed_free
.scheduled
= false;
6090 * If there's anything on the open list, close and start a new callback.
6092 call_rcu_zapped(delayed_free
.pf
+ delayed_free
.index
);
6095 raw_local_irq_restore(flags
);
6099 * Remove all lock classes from the class hash table and from the
6100 * all_lock_classes list whose key or name is in the address range [start,
6101 * start + size). Move these lock classes to the zapped_classes list. Must
6102 * be called with the graph lock held.
6104 static void __lockdep_free_key_range(struct pending_free
*pf
, void *start
,
6107 struct lock_class
*class;
6108 struct hlist_head
*head
;
6111 /* Unhash all classes that were created by a module. */
6112 for (i
= 0; i
< CLASSHASH_SIZE
; i
++) {
6113 head
= classhash_table
+ i
;
6114 hlist_for_each_entry_rcu(class, head
, hash_entry
) {
6115 if (!within(class->key
, start
, size
) &&
6116 !within(class->name
, start
, size
))
6118 zap_class(pf
, class);
6124 * Used in module.c to remove lock classes from memory that is going to be
6125 * freed; and possibly re-used by other modules.
6127 * We will have had one synchronize_rcu() before getting here, so we're
6128 * guaranteed nobody will look up these exact classes -- they're properly dead
6129 * but still allocated.
6131 static void lockdep_free_key_range_reg(void *start
, unsigned long size
)
6133 struct pending_free
*pf
;
6134 unsigned long flags
;
6136 init_data_structures_once();
6138 raw_local_irq_save(flags
);
6140 pf
= get_pending_free();
6141 __lockdep_free_key_range(pf
, start
, size
);
6142 call_rcu_zapped(pf
);
6144 raw_local_irq_restore(flags
);
6147 * Wait for any possible iterators from look_up_lock_class() to pass
6148 * before continuing to free the memory they refer to.
6154 * Free all lockdep keys in the range [start, start+size). Does not sleep.
6155 * Ignores debug_locks. Must only be used by the lockdep selftests.
6157 static void lockdep_free_key_range_imm(void *start
, unsigned long size
)
6159 struct pending_free
*pf
= delayed_free
.pf
;
6160 unsigned long flags
;
6162 init_data_structures_once();
6164 raw_local_irq_save(flags
);
6166 __lockdep_free_key_range(pf
, start
, size
);
6167 __free_zapped_classes(pf
);
6169 raw_local_irq_restore(flags
);
6172 void lockdep_free_key_range(void *start
, unsigned long size
)
6174 init_data_structures_once();
6176 if (inside_selftest())
6177 lockdep_free_key_range_imm(start
, size
);
6179 lockdep_free_key_range_reg(start
, size
);
6183 * Check whether any element of the @lock->class_cache[] array refers to a
6184 * registered lock class. The caller must hold either the graph lock or the
6187 static bool lock_class_cache_is_registered(struct lockdep_map
*lock
)
6189 struct lock_class
*class;
6190 struct hlist_head
*head
;
6193 for (i
= 0; i
< CLASSHASH_SIZE
; i
++) {
6194 head
= classhash_table
+ i
;
6195 hlist_for_each_entry_rcu(class, head
, hash_entry
) {
6196 for (j
= 0; j
< NR_LOCKDEP_CACHING_CLASSES
; j
++)
6197 if (lock
->class_cache
[j
] == class)
6204 /* The caller must hold the graph lock. Does not sleep. */
6205 static void __lockdep_reset_lock(struct pending_free
*pf
,
6206 struct lockdep_map
*lock
)
6208 struct lock_class
*class;
6212 * Remove all classes this lock might have:
6214 for (j
= 0; j
< MAX_LOCKDEP_SUBCLASSES
; j
++) {
6216 * If the class exists we look it up and zap it:
6218 class = look_up_lock_class(lock
, j
);
6220 zap_class(pf
, class);
6223 * Debug check: in the end all mapped classes should
6226 if (WARN_ON_ONCE(lock_class_cache_is_registered(lock
)))
6231 * Remove all information lockdep has about a lock if debug_locks == 1. Free
6232 * released data structures from RCU context.
6234 static void lockdep_reset_lock_reg(struct lockdep_map
*lock
)
6236 struct pending_free
*pf
;
6237 unsigned long flags
;
6240 raw_local_irq_save(flags
);
6241 locked
= graph_lock();
6245 pf
= get_pending_free();
6246 __lockdep_reset_lock(pf
, lock
);
6247 call_rcu_zapped(pf
);
6251 raw_local_irq_restore(flags
);
6255 * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
6256 * lockdep selftests.
6258 static void lockdep_reset_lock_imm(struct lockdep_map
*lock
)
6260 struct pending_free
*pf
= delayed_free
.pf
;
6261 unsigned long flags
;
6263 raw_local_irq_save(flags
);
6265 __lockdep_reset_lock(pf
, lock
);
6266 __free_zapped_classes(pf
);
6268 raw_local_irq_restore(flags
);
6271 void lockdep_reset_lock(struct lockdep_map
*lock
)
6273 init_data_structures_once();
6275 if (inside_selftest())
6276 lockdep_reset_lock_imm(lock
);
6278 lockdep_reset_lock_reg(lock
);
6282 * Unregister a dynamically allocated key.
6284 * Unlike lockdep_register_key(), a search is always done to find a matching
6285 * key irrespective of debug_locks to avoid potential invalid access to freed
6286 * memory in lock_class entry.
6288 void lockdep_unregister_key(struct lock_class_key
*key
)
6290 struct hlist_head
*hash_head
= keyhashentry(key
);
6291 struct lock_class_key
*k
;
6292 struct pending_free
*pf
;
6293 unsigned long flags
;
6298 if (WARN_ON_ONCE(static_obj(key
)))
6301 raw_local_irq_save(flags
);
6304 hlist_for_each_entry_rcu(k
, hash_head
, hash_entry
) {
6306 hlist_del_rcu(&k
->hash_entry
);
6311 WARN_ON_ONCE(!found
&& debug_locks
);
6313 pf
= get_pending_free();
6314 __lockdep_free_key_range(pf
, key
, 1);
6315 call_rcu_zapped(pf
);
6318 raw_local_irq_restore(flags
);
6320 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
6323 EXPORT_SYMBOL_GPL(lockdep_unregister_key
);
6325 void __init
lockdep_init(void)
6327 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
6329 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES
);
6330 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH
);
6331 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS
);
6332 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE
);
6333 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES
);
6334 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS
);
6335 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE
);
6337 printk(" memory used by lock dependency info: %zu kB\n",
6338 (sizeof(lock_classes
) +
6339 sizeof(lock_classes_in_use
) +
6340 sizeof(classhash_table
) +
6341 sizeof(list_entries
) +
6342 sizeof(list_entries_in_use
) +
6343 sizeof(chainhash_table
) +
6344 sizeof(delayed_free
)
6345 #ifdef CONFIG_PROVE_LOCKING
6347 + sizeof(lock_chains
)
6348 + sizeof(lock_chains_in_use
)
6349 + sizeof(chain_hlocks
)
6354 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
6355 printk(" memory used for stack traces: %zu kB\n",
6356 (sizeof(stack_trace
) + sizeof(stack_trace_hash
)) / 1024
6360 printk(" per task-struct memory footprint: %zu bytes\n",
6361 sizeof(((struct task_struct
*)NULL
)->held_locks
));
6365 print_freed_lock_bug(struct task_struct
*curr
, const void *mem_from
,
6366 const void *mem_to
, struct held_lock
*hlock
)
6368 if (!debug_locks_off())
6370 if (debug_locks_silent
)
6374 pr_warn("=========================\n");
6375 pr_warn("WARNING: held lock freed!\n");
6376 print_kernel_ident();
6377 pr_warn("-------------------------\n");
6378 pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
6379 curr
->comm
, task_pid_nr(curr
), mem_from
, mem_to
-1);
6381 lockdep_print_held_locks(curr
);
6383 pr_warn("\nstack backtrace:\n");
6387 static inline int not_in_range(const void* mem_from
, unsigned long mem_len
,
6388 const void* lock_from
, unsigned long lock_len
)
6390 return lock_from
+ lock_len
<= mem_from
||
6391 mem_from
+ mem_len
<= lock_from
;
6395 * Called when kernel memory is freed (or unmapped), or if a lock
6396 * is destroyed or reinitialized - this code checks whether there is
6397 * any held lock in the memory range of <from> to <to>:
6399 void debug_check_no_locks_freed(const void *mem_from
, unsigned long mem_len
)
6401 struct task_struct
*curr
= current
;
6402 struct held_lock
*hlock
;
6403 unsigned long flags
;
6406 if (unlikely(!debug_locks
))
6409 raw_local_irq_save(flags
);
6410 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
6411 hlock
= curr
->held_locks
+ i
;
6413 if (not_in_range(mem_from
, mem_len
, hlock
->instance
,
6414 sizeof(*hlock
->instance
)))
6417 print_freed_lock_bug(curr
, mem_from
, mem_from
+ mem_len
, hlock
);
6420 raw_local_irq_restore(flags
);
6422 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed
);
6424 static void print_held_locks_bug(void)
6426 if (!debug_locks_off())
6428 if (debug_locks_silent
)
6432 pr_warn("====================================\n");
6433 pr_warn("WARNING: %s/%d still has locks held!\n",
6434 current
->comm
, task_pid_nr(current
));
6435 print_kernel_ident();
6436 pr_warn("------------------------------------\n");
6437 lockdep_print_held_locks(current
);
6438 pr_warn("\nstack backtrace:\n");
6442 void debug_check_no_locks_held(void)
6444 if (unlikely(current
->lockdep_depth
> 0))
6445 print_held_locks_bug();
6447 EXPORT_SYMBOL_GPL(debug_check_no_locks_held
);
6450 void debug_show_all_locks(void)
6452 struct task_struct
*g
, *p
;
6454 if (unlikely(!debug_locks
)) {
6455 pr_warn("INFO: lockdep is turned off.\n");
6458 pr_warn("\nShowing all locks held in the system:\n");
6461 for_each_process_thread(g
, p
) {
6462 if (!p
->lockdep_depth
)
6464 lockdep_print_held_locks(p
);
6465 touch_nmi_watchdog();
6466 touch_all_softlockup_watchdogs();
6471 pr_warn("=============================================\n\n");
6473 EXPORT_SYMBOL_GPL(debug_show_all_locks
);
6477 * Careful: only use this function if you are sure that
6478 * the task cannot run in parallel!
6480 void debug_show_held_locks(struct task_struct
*task
)
6482 if (unlikely(!debug_locks
)) {
6483 printk("INFO: lockdep is turned off.\n");
6486 lockdep_print_held_locks(task
);
6488 EXPORT_SYMBOL_GPL(debug_show_held_locks
);
6490 asmlinkage __visible
void lockdep_sys_exit(void)
6492 struct task_struct
*curr
= current
;
6494 if (unlikely(curr
->lockdep_depth
)) {
6495 if (!debug_locks_off())
6498 pr_warn("================================================\n");
6499 pr_warn("WARNING: lock held when returning to user space!\n");
6500 print_kernel_ident();
6501 pr_warn("------------------------------------------------\n");
6502 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
6503 curr
->comm
, curr
->pid
);
6504 lockdep_print_held_locks(curr
);
6508 * The lock history for each syscall should be independent. So wipe the
6509 * slate clean on return to userspace.
6511 lockdep_invariant_state(false);
6514 void lockdep_rcu_suspicious(const char *file
, const int line
, const char *s
)
6516 struct task_struct
*curr
= current
;
6517 int dl
= READ_ONCE(debug_locks
);
6519 /* Note: the following can be executed concurrently, so be careful. */
6521 pr_warn("=============================\n");
6522 pr_warn("WARNING: suspicious RCU usage\n");
6523 print_kernel_ident();
6524 pr_warn("-----------------------------\n");
6525 pr_warn("%s:%d %s!\n", file
, line
, s
);
6526 pr_warn("\nother info that might help us debug this:\n\n");
6527 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n%s",
6528 !rcu_lockdep_current_cpu_online()
6529 ? "RCU used illegally from offline CPU!\n"
6531 rcu_scheduler_active
, dl
,
6532 dl
? "" : "Possible false positive due to lockdep disabling via debug_locks = 0\n");
6535 * If a CPU is in the RCU-free window in idle (ie: in the section
6536 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
6537 * considers that CPU to be in an "extended quiescent state",
6538 * which means that RCU will be completely ignoring that CPU.
6539 * Therefore, rcu_read_lock() and friends have absolutely no
6540 * effect on a CPU running in that state. In other words, even if
6541 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
6542 * delete data structures out from under it. RCU really has no
6543 * choice here: we need to keep an RCU-free window in idle where
6544 * the CPU may possibly enter into low power mode. This way we can
6545 * notice an extended quiescent state to other CPUs that started a grace
6546 * period. Otherwise we would delay any grace period as long as we run
6549 * So complain bitterly if someone does call rcu_read_lock(),
6550 * rcu_read_lock_bh() and so on from extended quiescent states.
6552 if (!rcu_is_watching())
6553 pr_warn("RCU used illegally from extended quiescent state!\n");
6555 lockdep_print_held_locks(curr
);
6556 pr_warn("\nstack backtrace:\n");
6559 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious
);