4 * Runtime locking correctness validator
6 * Started by Ingo Molnar:
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs:
14 * - lock inversion scenarios
15 * - circular lock dependencies
16 * - hardirq/softirq safe/unsafe locking bugs
18 * Bugs are reported even if the current locking scenario does not cause
19 * any deadlock at this point.
21 * I.e. if anytime in the past two locks were taken in a different order,
22 * even if it happened for another task, even if those were different
23 * locks (but of the same class as this lock), this code will detect it.
25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime.
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/sched/clock.h>
32 #include <linux/sched/task.h>
33 #include <linux/sched/mm.h>
34 #include <linux/delay.h>
35 #include <linux/module.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/spinlock.h>
39 #include <linux/kallsyms.h>
40 #include <linux/interrupt.h>
41 #include <linux/stacktrace.h>
42 #include <linux/debug_locks.h>
43 #include <linux/irqflags.h>
44 #include <linux/utsname.h>
45 #include <linux/hash.h>
46 #include <linux/ftrace.h>
47 #include <linux/stringify.h>
48 #include <linux/bitmap.h>
49 #include <linux/bitops.h>
50 #include <linux/gfp.h>
51 #include <linux/random.h>
52 #include <linux/jhash.h>
53 #include <linux/nmi.h>
54 #include <linux/rcupdate.h>
55 #include <linux/kprobes.h>
57 #include <asm/sections.h>
59 #include "lockdep_internals.h"
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/lock.h>
64 #ifdef CONFIG_PROVE_LOCKING
65 int prove_locking
= 1;
66 module_param(prove_locking
, int, 0644);
68 #define prove_locking 0
71 #ifdef CONFIG_LOCK_STAT
73 module_param(lock_stat
, int, 0644);
79 * lockdep_lock: protects the lockdep graph, the hashes and the
80 * class/list/hash allocators.
82 * This is one of the rare exceptions where it's justified
83 * to use a raw spinlock - we really dont want the spinlock
84 * code to recurse back into the lockdep code...
86 static arch_spinlock_t lockdep_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
87 static struct task_struct
*lockdep_selftest_task_struct
;
89 static int graph_lock(void)
91 arch_spin_lock(&lockdep_lock
);
93 * Make sure that if another CPU detected a bug while
94 * walking the graph we dont change it (while the other
95 * CPU is busy printing out stuff with the graph lock
99 arch_spin_unlock(&lockdep_lock
);
102 /* prevent any recursions within lockdep from causing deadlocks */
103 current
->lockdep_recursion
++;
107 static inline int graph_unlock(void)
109 if (debug_locks
&& !arch_spin_is_locked(&lockdep_lock
)) {
111 * The lockdep graph lock isn't locked while we expect it to
112 * be, we're confused now, bye!
114 return DEBUG_LOCKS_WARN_ON(1);
117 current
->lockdep_recursion
--;
118 arch_spin_unlock(&lockdep_lock
);
123 * Turn lock debugging off and return with 0 if it was off already,
124 * and also release the graph lock:
126 static inline int debug_locks_off_graph_unlock(void)
128 int ret
= debug_locks_off();
130 arch_spin_unlock(&lockdep_lock
);
135 unsigned long nr_list_entries
;
136 static struct lock_list list_entries
[MAX_LOCKDEP_ENTRIES
];
137 static DECLARE_BITMAP(list_entries_in_use
, MAX_LOCKDEP_ENTRIES
);
140 * All data structures here are protected by the global debug_lock.
142 * nr_lock_classes is the number of elements of lock_classes[] that is
145 #define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
146 #define KEYHASH_SIZE (1UL << KEYHASH_BITS)
147 static struct hlist_head lock_keys_hash
[KEYHASH_SIZE
];
148 unsigned long nr_lock_classes
;
149 #ifndef CONFIG_DEBUG_LOCKDEP
152 struct lock_class lock_classes
[MAX_LOCKDEP_KEYS
];
154 static inline struct lock_class
*hlock_class(struct held_lock
*hlock
)
156 if (!hlock
->class_idx
) {
158 * Someone passed in garbage, we give up.
160 DEBUG_LOCKS_WARN_ON(1);
163 return lock_classes
+ hlock
->class_idx
- 1;
166 #ifdef CONFIG_LOCK_STAT
167 static DEFINE_PER_CPU(struct lock_class_stats
[MAX_LOCKDEP_KEYS
], cpu_lock_stats
);
169 static inline u64
lockstat_clock(void)
171 return local_clock();
174 static int lock_point(unsigned long points
[], unsigned long ip
)
178 for (i
= 0; i
< LOCKSTAT_POINTS
; i
++) {
179 if (points
[i
] == 0) {
190 static void lock_time_inc(struct lock_time
*lt
, u64 time
)
195 if (time
< lt
->min
|| !lt
->nr
)
202 static inline void lock_time_add(struct lock_time
*src
, struct lock_time
*dst
)
207 if (src
->max
> dst
->max
)
210 if (src
->min
< dst
->min
|| !dst
->nr
)
213 dst
->total
+= src
->total
;
217 struct lock_class_stats
lock_stats(struct lock_class
*class)
219 struct lock_class_stats stats
;
222 memset(&stats
, 0, sizeof(struct lock_class_stats
));
223 for_each_possible_cpu(cpu
) {
224 struct lock_class_stats
*pcs
=
225 &per_cpu(cpu_lock_stats
, cpu
)[class - lock_classes
];
227 for (i
= 0; i
< ARRAY_SIZE(stats
.contention_point
); i
++)
228 stats
.contention_point
[i
] += pcs
->contention_point
[i
];
230 for (i
= 0; i
< ARRAY_SIZE(stats
.contending_point
); i
++)
231 stats
.contending_point
[i
] += pcs
->contending_point
[i
];
233 lock_time_add(&pcs
->read_waittime
, &stats
.read_waittime
);
234 lock_time_add(&pcs
->write_waittime
, &stats
.write_waittime
);
236 lock_time_add(&pcs
->read_holdtime
, &stats
.read_holdtime
);
237 lock_time_add(&pcs
->write_holdtime
, &stats
.write_holdtime
);
239 for (i
= 0; i
< ARRAY_SIZE(stats
.bounces
); i
++)
240 stats
.bounces
[i
] += pcs
->bounces
[i
];
246 void clear_lock_stats(struct lock_class
*class)
250 for_each_possible_cpu(cpu
) {
251 struct lock_class_stats
*cpu_stats
=
252 &per_cpu(cpu_lock_stats
, cpu
)[class - lock_classes
];
254 memset(cpu_stats
, 0, sizeof(struct lock_class_stats
));
256 memset(class->contention_point
, 0, sizeof(class->contention_point
));
257 memset(class->contending_point
, 0, sizeof(class->contending_point
));
260 static struct lock_class_stats
*get_lock_stats(struct lock_class
*class)
262 return &this_cpu_ptr(cpu_lock_stats
)[class - lock_classes
];
265 static void lock_release_holdtime(struct held_lock
*hlock
)
267 struct lock_class_stats
*stats
;
273 holdtime
= lockstat_clock() - hlock
->holdtime_stamp
;
275 stats
= get_lock_stats(hlock_class(hlock
));
277 lock_time_inc(&stats
->read_holdtime
, holdtime
);
279 lock_time_inc(&stats
->write_holdtime
, holdtime
);
282 static inline void lock_release_holdtime(struct held_lock
*hlock
)
288 * We keep a global list of all lock classes. The list is only accessed with
289 * the lockdep spinlock lock held. free_lock_classes is a list with free
290 * elements. These elements are linked together by the lock_entry member in
293 LIST_HEAD(all_lock_classes
);
294 static LIST_HEAD(free_lock_classes
);
297 * struct pending_free - information about data structures about to be freed
298 * @zapped: Head of a list with struct lock_class elements.
299 * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
300 * are about to be freed.
302 struct pending_free
{
303 struct list_head zapped
;
304 DECLARE_BITMAP(lock_chains_being_freed
, MAX_LOCKDEP_CHAINS
);
308 * struct delayed_free - data structures used for delayed freeing
310 * A data structure for delayed freeing of data structures that may be
311 * accessed by RCU readers at the time these were freed.
313 * @rcu_head: Used to schedule an RCU callback for freeing data structures.
314 * @index: Index of @pf to which freed data structures are added.
315 * @scheduled: Whether or not an RCU callback has been scheduled.
316 * @pf: Array with information about data structures about to be freed.
318 static struct delayed_free
{
319 struct rcu_head rcu_head
;
322 struct pending_free pf
[2];
326 * The lockdep classes are in a hash-table as well, for fast lookup:
328 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
329 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
330 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
331 #define classhashentry(key) (classhash_table + __classhashfn((key)))
333 static struct hlist_head classhash_table
[CLASSHASH_SIZE
];
336 * We put the lock dependency chains into a hash-table as well, to cache
339 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
340 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
341 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
342 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
344 static struct hlist_head chainhash_table
[CHAINHASH_SIZE
];
347 * The hash key of the lock dependency chains is a hash itself too:
348 * it's a hash of all locks taken up to that lock, including that lock.
349 * It's a 64-bit hash, because it's important for the keys to be
352 static inline u64
iterate_chain_key(u64 key
, u32 idx
)
354 u32 k0
= key
, k1
= key
>> 32;
356 __jhash_mix(idx
, k0
, k1
); /* Macro that modifies arguments! */
358 return k0
| (u64
)k1
<< 32;
361 void lockdep_off(void)
363 current
->lockdep_recursion
++;
365 EXPORT_SYMBOL(lockdep_off
);
367 void lockdep_on(void)
369 current
->lockdep_recursion
--;
371 EXPORT_SYMBOL(lockdep_on
);
373 void lockdep_set_selftest_task(struct task_struct
*task
)
375 lockdep_selftest_task_struct
= task
;
379 * Debugging switches:
383 #define VERY_VERBOSE 0
386 # define HARDIRQ_VERBOSE 1
387 # define SOFTIRQ_VERBOSE 1
389 # define HARDIRQ_VERBOSE 0
390 # define SOFTIRQ_VERBOSE 0
393 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
395 * Quick filtering for interesting events:
397 static int class_filter(struct lock_class
*class)
401 if (class->name_version
== 1 &&
402 !strcmp(class->name
, "lockname"))
404 if (class->name_version
== 1 &&
405 !strcmp(class->name
, "&struct->lockfield"))
408 /* Filter everything else. 1 would be to allow everything else */
413 static int verbose(struct lock_class
*class)
416 return class_filter(class);
422 * Stack-trace: tightly packed array of stack backtrace
423 * addresses. Protected by the graph_lock.
425 unsigned long nr_stack_trace_entries
;
426 static unsigned long stack_trace
[MAX_STACK_TRACE_ENTRIES
];
428 static void print_lockdep_off(const char *bug_msg
)
430 printk(KERN_DEBUG
"%s\n", bug_msg
);
431 printk(KERN_DEBUG
"turning off the locking correctness validator.\n");
432 #ifdef CONFIG_LOCK_STAT
433 printk(KERN_DEBUG
"Please attach the output of /proc/lock_stat to the bug report\n");
437 static int save_trace(struct stack_trace
*trace
)
439 trace
->nr_entries
= 0;
440 trace
->max_entries
= MAX_STACK_TRACE_ENTRIES
- nr_stack_trace_entries
;
441 trace
->entries
= stack_trace
+ nr_stack_trace_entries
;
445 save_stack_trace(trace
);
448 * Some daft arches put -1 at the end to indicate its a full trace.
450 * <rant> this is buggy anyway, since it takes a whole extra entry so a
451 * complete trace that maxes out the entries provided will be reported
452 * as incomplete, friggin useless </rant>
454 if (trace
->nr_entries
!= 0 &&
455 trace
->entries
[trace
->nr_entries
-1] == ULONG_MAX
)
458 trace
->max_entries
= trace
->nr_entries
;
460 nr_stack_trace_entries
+= trace
->nr_entries
;
462 if (nr_stack_trace_entries
>= MAX_STACK_TRACE_ENTRIES
-1) {
463 if (!debug_locks_off_graph_unlock())
466 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
475 unsigned int nr_hardirq_chains
;
476 unsigned int nr_softirq_chains
;
477 unsigned int nr_process_chains
;
478 unsigned int max_lockdep_depth
;
480 #ifdef CONFIG_DEBUG_LOCKDEP
482 * Various lockdep statistics:
484 DEFINE_PER_CPU(struct lockdep_stats
, lockdep_stats
);
491 #define __USAGE(__STATE) \
492 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
493 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
494 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
495 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
497 static const char *usage_str
[] =
499 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
500 #include "lockdep_states.h"
502 [LOCK_USED
] = "INITIAL USE",
505 const char * __get_key_name(struct lockdep_subclass_key
*key
, char *str
)
507 return kallsyms_lookup((unsigned long)key
, NULL
, NULL
, NULL
, str
);
510 static inline unsigned long lock_flag(enum lock_usage_bit bit
)
515 static char get_usage_char(struct lock_class
*class, enum lock_usage_bit bit
)
519 if (class->usage_mask
& lock_flag(bit
+ 2))
521 if (class->usage_mask
& lock_flag(bit
)) {
523 if (class->usage_mask
& lock_flag(bit
+ 2))
530 void get_usage_chars(struct lock_class
*class, char usage
[LOCK_USAGE_CHARS
])
534 #define LOCKDEP_STATE(__STATE) \
535 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
536 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
537 #include "lockdep_states.h"
543 static void __print_lock_name(struct lock_class
*class)
545 char str
[KSYM_NAME_LEN
];
550 name
= __get_key_name(class->key
, str
);
551 printk(KERN_CONT
"%s", name
);
553 printk(KERN_CONT
"%s", name
);
554 if (class->name_version
> 1)
555 printk(KERN_CONT
"#%d", class->name_version
);
557 printk(KERN_CONT
"/%d", class->subclass
);
561 static void print_lock_name(struct lock_class
*class)
563 char usage
[LOCK_USAGE_CHARS
];
565 get_usage_chars(class, usage
);
567 printk(KERN_CONT
" (");
568 __print_lock_name(class);
569 printk(KERN_CONT
"){%s}", usage
);
572 static void print_lockdep_cache(struct lockdep_map
*lock
)
575 char str
[KSYM_NAME_LEN
];
579 name
= __get_key_name(lock
->key
->subkeys
, str
);
581 printk(KERN_CONT
"%s", name
);
584 static void print_lock(struct held_lock
*hlock
)
587 * We can be called locklessly through debug_show_all_locks() so be
588 * extra careful, the hlock might have been released and cleared.
590 unsigned int class_idx
= hlock
->class_idx
;
592 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
595 if (!class_idx
|| (class_idx
- 1) >= MAX_LOCKDEP_KEYS
) {
596 printk(KERN_CONT
"<RELEASED>\n");
600 printk(KERN_CONT
"%p", hlock
->instance
);
601 print_lock_name(lock_classes
+ class_idx
- 1);
602 printk(KERN_CONT
", at: %pS\n", (void *)hlock
->acquire_ip
);
605 static void lockdep_print_held_locks(struct task_struct
*p
)
607 int i
, depth
= READ_ONCE(p
->lockdep_depth
);
610 printk("no locks held by %s/%d.\n", p
->comm
, task_pid_nr(p
));
612 printk("%d lock%s held by %s/%d:\n", depth
,
613 depth
> 1 ? "s" : "", p
->comm
, task_pid_nr(p
));
615 * It's not reliable to print a task's held locks if it's not sleeping
616 * and it's not the current task.
618 if (p
->state
== TASK_RUNNING
&& p
!= current
)
620 for (i
= 0; i
< depth
; i
++) {
622 print_lock(p
->held_locks
+ i
);
626 static void print_kernel_ident(void)
628 printk("%s %.*s %s\n", init_utsname()->release
,
629 (int)strcspn(init_utsname()->version
, " "),
630 init_utsname()->version
,
634 static int very_verbose(struct lock_class
*class)
637 return class_filter(class);
643 * Is this the address of a static object:
646 static int static_obj(const void *obj
)
648 unsigned long start
= (unsigned long) &_stext
,
649 end
= (unsigned long) &_end
,
650 addr
= (unsigned long) obj
;
655 if ((addr
>= start
) && (addr
< end
))
658 if (arch_is_kernel_data(addr
))
662 * in-kernel percpu var?
664 if (is_kernel_percpu_address(addr
))
668 * module static or percpu var?
670 return is_module_address(addr
) || is_module_percpu_address(addr
);
675 * To make lock name printouts unique, we calculate a unique
676 * class->name_version generation counter. The caller must hold the graph
679 static int count_matching_names(struct lock_class
*new_class
)
681 struct lock_class
*class;
684 if (!new_class
->name
)
687 list_for_each_entry(class, &all_lock_classes
, lock_entry
) {
688 if (new_class
->key
- new_class
->subclass
== class->key
)
689 return class->name_version
;
690 if (class->name
&& !strcmp(class->name
, new_class
->name
))
691 count
= max(count
, class->name_version
);
697 static inline struct lock_class
*
698 look_up_lock_class(const struct lockdep_map
*lock
, unsigned int subclass
)
700 struct lockdep_subclass_key
*key
;
701 struct hlist_head
*hash_head
;
702 struct lock_class
*class;
704 if (unlikely(subclass
>= MAX_LOCKDEP_SUBCLASSES
)) {
707 "BUG: looking up invalid subclass: %u\n", subclass
);
709 "turning off the locking correctness validator.\n");
715 * If it is not initialised then it has never been locked,
716 * so it won't be present in the hash table.
718 if (unlikely(!lock
->key
))
722 * NOTE: the class-key must be unique. For dynamic locks, a static
723 * lock_class_key variable is passed in through the mutex_init()
724 * (or spin_lock_init()) call - which acts as the key. For static
725 * locks we use the lock object itself as the key.
727 BUILD_BUG_ON(sizeof(struct lock_class_key
) >
728 sizeof(struct lockdep_map
));
730 key
= lock
->key
->subkeys
+ subclass
;
732 hash_head
= classhashentry(key
);
735 * We do an RCU walk of the hash, see lockdep_free_key_range().
737 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
740 hlist_for_each_entry_rcu(class, hash_head
, hash_entry
) {
741 if (class->key
== key
) {
743 * Huh! same key, different name? Did someone trample
744 * on some memory? We're most confused.
746 WARN_ON_ONCE(class->name
!= lock
->name
);
755 * Static locks do not have their class-keys yet - for them the key is
756 * the lock object itself. If the lock is in the per cpu area, the
757 * canonical address of the lock (per cpu offset removed) is used.
759 static bool assign_lock_key(struct lockdep_map
*lock
)
761 unsigned long can_addr
, addr
= (unsigned long)lock
;
765 * lockdep_free_key_range() assumes that struct lock_class_key
766 * objects do not overlap. Since we use the address of lock
767 * objects as class key for static objects, check whether the
768 * size of lock_class_key objects does not exceed the size of
769 * the smallest lock object.
771 BUILD_BUG_ON(sizeof(struct lock_class_key
) > sizeof(raw_spinlock_t
));
774 if (__is_kernel_percpu_address(addr
, &can_addr
))
775 lock
->key
= (void *)can_addr
;
776 else if (__is_module_percpu_address(addr
, &can_addr
))
777 lock
->key
= (void *)can_addr
;
778 else if (static_obj(lock
))
779 lock
->key
= (void *)lock
;
781 /* Debug-check: all keys must be persistent! */
783 pr_err("INFO: trying to register non-static key.\n");
784 pr_err("the code is fine but needs lockdep annotation.\n");
785 pr_err("turning off the locking correctness validator.\n");
793 #ifdef CONFIG_DEBUG_LOCKDEP
795 /* Check whether element @e occurs in list @h */
796 static bool in_list(struct list_head
*e
, struct list_head
*h
)
800 list_for_each(f
, h
) {
809 * Check whether entry @e occurs in any of the locks_after or locks_before
812 static bool in_any_class_list(struct list_head
*e
)
814 struct lock_class
*class;
817 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
818 class = &lock_classes
[i
];
819 if (in_list(e
, &class->locks_after
) ||
820 in_list(e
, &class->locks_before
))
826 static bool class_lock_list_valid(struct lock_class
*c
, struct list_head
*h
)
830 list_for_each_entry(e
, h
, entry
) {
831 if (e
->links_to
!= c
) {
832 printk(KERN_INFO
"class %s: mismatch for lock entry %ld; class %s <> %s",
834 (unsigned long)(e
- list_entries
),
835 e
->links_to
&& e
->links_to
->name
?
836 e
->links_to
->name
: "(?)",
837 e
->class && e
->class->name
? e
->class->name
:
845 static u16 chain_hlocks
[];
847 static bool check_lock_chain_key(struct lock_chain
*chain
)
849 #ifdef CONFIG_PROVE_LOCKING
853 for (i
= chain
->base
; i
< chain
->base
+ chain
->depth
; i
++)
854 chain_key
= iterate_chain_key(chain_key
, chain_hlocks
[i
] + 1);
856 * The 'unsigned long long' casts avoid that a compiler warning
857 * is reported when building tools/lib/lockdep.
859 if (chain
->chain_key
!= chain_key
) {
860 printk(KERN_INFO
"chain %lld: key %#llx <> %#llx\n",
861 (unsigned long long)(chain
- lock_chains
),
862 (unsigned long long)chain
->chain_key
,
863 (unsigned long long)chain_key
);
870 static bool in_any_zapped_class_list(struct lock_class
*class)
872 struct pending_free
*pf
;
875 for (i
= 0, pf
= delayed_free
.pf
; i
< ARRAY_SIZE(delayed_free
.pf
); i
++, pf
++) {
876 if (in_list(&class->lock_entry
, &pf
->zapped
))
883 static bool __check_data_structures(void)
885 struct lock_class
*class;
886 struct lock_chain
*chain
;
887 struct hlist_head
*head
;
891 /* Check whether all classes occur in a lock list. */
892 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
893 class = &lock_classes
[i
];
894 if (!in_list(&class->lock_entry
, &all_lock_classes
) &&
895 !in_list(&class->lock_entry
, &free_lock_classes
) &&
896 !in_any_zapped_class_list(class)) {
897 printk(KERN_INFO
"class %px/%s is not in any class list\n",
898 class, class->name
? : "(?)");
903 /* Check whether all classes have valid lock lists. */
904 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
905 class = &lock_classes
[i
];
906 if (!class_lock_list_valid(class, &class->locks_before
))
908 if (!class_lock_list_valid(class, &class->locks_after
))
912 /* Check the chain_key of all lock chains. */
913 for (i
= 0; i
< ARRAY_SIZE(chainhash_table
); i
++) {
914 head
= chainhash_table
+ i
;
915 hlist_for_each_entry_rcu(chain
, head
, entry
) {
916 if (!check_lock_chain_key(chain
))
922 * Check whether all list entries that are in use occur in a class
925 for_each_set_bit(i
, list_entries_in_use
, ARRAY_SIZE(list_entries
)) {
926 e
= list_entries
+ i
;
927 if (!in_any_class_list(&e
->entry
)) {
928 printk(KERN_INFO
"list entry %d is not in any class list; class %s <> %s\n",
929 (unsigned int)(e
- list_entries
),
930 e
->class->name
? : "(?)",
931 e
->links_to
->name
? : "(?)");
937 * Check whether all list entries that are not in use do not occur in
940 for_each_clear_bit(i
, list_entries_in_use
, ARRAY_SIZE(list_entries
)) {
941 e
= list_entries
+ i
;
942 if (in_any_class_list(&e
->entry
)) {
943 printk(KERN_INFO
"list entry %d occurs in a class list; class %s <> %s\n",
944 (unsigned int)(e
- list_entries
),
945 e
->class && e
->class->name
? e
->class->name
:
947 e
->links_to
&& e
->links_to
->name
?
948 e
->links_to
->name
: "(?)");
956 int check_consistency
= 0;
957 module_param(check_consistency
, int, 0644);
959 static void check_data_structures(void)
961 static bool once
= false;
963 if (check_consistency
&& !once
) {
964 if (!__check_data_structures()) {
971 #else /* CONFIG_DEBUG_LOCKDEP */
973 static inline void check_data_structures(void) { }
975 #endif /* CONFIG_DEBUG_LOCKDEP */
978 * Initialize the lock_classes[] array elements, the free_lock_classes list
979 * and also the delayed_free structure.
981 static void init_data_structures_once(void)
983 static bool initialization_happened
;
986 if (likely(initialization_happened
))
989 initialization_happened
= true;
991 init_rcu_head(&delayed_free
.rcu_head
);
992 INIT_LIST_HEAD(&delayed_free
.pf
[0].zapped
);
993 INIT_LIST_HEAD(&delayed_free
.pf
[1].zapped
);
995 for (i
= 0; i
< ARRAY_SIZE(lock_classes
); i
++) {
996 list_add_tail(&lock_classes
[i
].lock_entry
, &free_lock_classes
);
997 INIT_LIST_HEAD(&lock_classes
[i
].locks_after
);
998 INIT_LIST_HEAD(&lock_classes
[i
].locks_before
);
1002 static inline struct hlist_head
*keyhashentry(const struct lock_class_key
*key
)
1004 unsigned long hash
= hash_long((uintptr_t)key
, KEYHASH_BITS
);
1006 return lock_keys_hash
+ hash
;
1009 /* Register a dynamically allocated key. */
1010 void lockdep_register_key(struct lock_class_key
*key
)
1012 struct hlist_head
*hash_head
;
1013 struct lock_class_key
*k
;
1014 unsigned long flags
;
1016 if (WARN_ON_ONCE(static_obj(key
)))
1018 hash_head
= keyhashentry(key
);
1020 raw_local_irq_save(flags
);
1023 hlist_for_each_entry_rcu(k
, hash_head
, hash_entry
) {
1024 if (WARN_ON_ONCE(k
== key
))
1027 hlist_add_head_rcu(&key
->hash_entry
, hash_head
);
1031 raw_local_irq_restore(flags
);
1033 EXPORT_SYMBOL_GPL(lockdep_register_key
);
1035 /* Check whether a key has been registered as a dynamic key. */
1036 static bool is_dynamic_key(const struct lock_class_key
*key
)
1038 struct hlist_head
*hash_head
;
1039 struct lock_class_key
*k
;
1042 if (WARN_ON_ONCE(static_obj(key
)))
1046 * If lock debugging is disabled lock_keys_hash[] may contain
1047 * pointers to memory that has already been freed. Avoid triggering
1048 * a use-after-free in that case by returning early.
1053 hash_head
= keyhashentry(key
);
1056 hlist_for_each_entry_rcu(k
, hash_head
, hash_entry
) {
1068 * Register a lock's class in the hash-table, if the class is not present
1069 * yet. Otherwise we look it up. We cache the result in the lock object
1070 * itself, so actual lookup of the hash should be once per lock object.
1072 static struct lock_class
*
1073 register_lock_class(struct lockdep_map
*lock
, unsigned int subclass
, int force
)
1075 struct lockdep_subclass_key
*key
;
1076 struct hlist_head
*hash_head
;
1077 struct lock_class
*class;
1079 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1081 class = look_up_lock_class(lock
, subclass
);
1083 goto out_set_class_cache
;
1086 if (!assign_lock_key(lock
))
1088 } else if (!static_obj(lock
->key
) && !is_dynamic_key(lock
->key
)) {
1092 key
= lock
->key
->subkeys
+ subclass
;
1093 hash_head
= classhashentry(key
);
1095 if (!graph_lock()) {
1099 * We have to do the hash-walk again, to avoid races
1102 hlist_for_each_entry_rcu(class, hash_head
, hash_entry
) {
1103 if (class->key
== key
)
1104 goto out_unlock_set
;
1107 init_data_structures_once();
1109 /* Allocate a new lock class and add it to the hash. */
1110 class = list_first_entry_or_null(&free_lock_classes
, typeof(*class),
1113 if (!debug_locks_off_graph_unlock()) {
1117 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1122 debug_atomic_inc(nr_unused_locks
);
1124 class->name
= lock
->name
;
1125 class->subclass
= subclass
;
1126 WARN_ON_ONCE(!list_empty(&class->locks_before
));
1127 WARN_ON_ONCE(!list_empty(&class->locks_after
));
1128 class->name_version
= count_matching_names(class);
1130 * We use RCU's safe list-add method to make
1131 * parallel walking of the hash-list safe:
1133 hlist_add_head_rcu(&class->hash_entry
, hash_head
);
1135 * Remove the class from the free list and add it to the global list
1138 list_move_tail(&class->lock_entry
, &all_lock_classes
);
1140 if (verbose(class)) {
1143 printk("\nnew class %px: %s", class->key
, class->name
);
1144 if (class->name_version
> 1)
1145 printk(KERN_CONT
"#%d", class->name_version
);
1146 printk(KERN_CONT
"\n");
1149 if (!graph_lock()) {
1156 out_set_class_cache
:
1157 if (!subclass
|| force
)
1158 lock
->class_cache
[0] = class;
1159 else if (subclass
< NR_LOCKDEP_CACHING_CLASSES
)
1160 lock
->class_cache
[subclass
] = class;
1163 * Hash collision, did we smoke some? We found a class with a matching
1164 * hash but the subclass -- which is hashed in -- didn't match.
1166 if (DEBUG_LOCKS_WARN_ON(class->subclass
!= subclass
))
1172 #ifdef CONFIG_PROVE_LOCKING
1174 * Allocate a lockdep entry. (assumes the graph_lock held, returns
1175 * with NULL on failure)
1177 static struct lock_list
*alloc_list_entry(void)
1179 int idx
= find_first_zero_bit(list_entries_in_use
,
1180 ARRAY_SIZE(list_entries
));
1182 if (idx
>= ARRAY_SIZE(list_entries
)) {
1183 if (!debug_locks_off_graph_unlock())
1186 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1191 __set_bit(idx
, list_entries_in_use
);
1192 return list_entries
+ idx
;
1196 * Add a new dependency to the head of the list:
1198 static int add_lock_to_list(struct lock_class
*this,
1199 struct lock_class
*links_to
, struct list_head
*head
,
1200 unsigned long ip
, int distance
,
1201 struct stack_trace
*trace
)
1203 struct lock_list
*entry
;
1205 * Lock not present yet - get a new dependency struct and
1206 * add it to the list:
1208 entry
= alloc_list_entry();
1212 entry
->class = this;
1213 entry
->links_to
= links_to
;
1214 entry
->distance
= distance
;
1215 entry
->trace
= *trace
;
1217 * Both allocation and removal are done under the graph lock; but
1218 * iteration is under RCU-sched; see look_up_lock_class() and
1219 * lockdep_free_key_range().
1221 list_add_tail_rcu(&entry
->entry
, head
);
1227 * For good efficiency of modular, we use power of 2
1229 #define MAX_CIRCULAR_QUEUE_SIZE 4096UL
1230 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
1233 * The circular_queue and helpers is used to implement the
1234 * breadth-first search(BFS)algorithem, by which we can build
1235 * the shortest path from the next lock to be acquired to the
1236 * previous held lock if there is a circular between them.
1238 struct circular_queue
{
1239 unsigned long element
[MAX_CIRCULAR_QUEUE_SIZE
];
1240 unsigned int front
, rear
;
1243 static struct circular_queue lock_cq
;
1245 unsigned int max_bfs_queue_depth
;
1247 static unsigned int lockdep_dependency_gen_id
;
1249 static inline void __cq_init(struct circular_queue
*cq
)
1251 cq
->front
= cq
->rear
= 0;
1252 lockdep_dependency_gen_id
++;
1255 static inline int __cq_empty(struct circular_queue
*cq
)
1257 return (cq
->front
== cq
->rear
);
1260 static inline int __cq_full(struct circular_queue
*cq
)
1262 return ((cq
->rear
+ 1) & CQ_MASK
) == cq
->front
;
1265 static inline int __cq_enqueue(struct circular_queue
*cq
, unsigned long elem
)
1270 cq
->element
[cq
->rear
] = elem
;
1271 cq
->rear
= (cq
->rear
+ 1) & CQ_MASK
;
1275 static inline int __cq_dequeue(struct circular_queue
*cq
, unsigned long *elem
)
1280 *elem
= cq
->element
[cq
->front
];
1281 cq
->front
= (cq
->front
+ 1) & CQ_MASK
;
1285 static inline unsigned int __cq_get_elem_count(struct circular_queue
*cq
)
1287 return (cq
->rear
- cq
->front
) & CQ_MASK
;
1290 static inline void mark_lock_accessed(struct lock_list
*lock
,
1291 struct lock_list
*parent
)
1295 nr
= lock
- list_entries
;
1296 WARN_ON(nr
>= ARRAY_SIZE(list_entries
)); /* Out-of-bounds, input fail */
1297 lock
->parent
= parent
;
1298 lock
->class->dep_gen_id
= lockdep_dependency_gen_id
;
1301 static inline unsigned long lock_accessed(struct lock_list
*lock
)
1305 nr
= lock
- list_entries
;
1306 WARN_ON(nr
>= ARRAY_SIZE(list_entries
)); /* Out-of-bounds, input fail */
1307 return lock
->class->dep_gen_id
== lockdep_dependency_gen_id
;
1310 static inline struct lock_list
*get_lock_parent(struct lock_list
*child
)
1312 return child
->parent
;
1315 static inline int get_lock_depth(struct lock_list
*child
)
1318 struct lock_list
*parent
;
1320 while ((parent
= get_lock_parent(child
))) {
1327 static int __bfs(struct lock_list
*source_entry
,
1329 int (*match
)(struct lock_list
*entry
, void *data
),
1330 struct lock_list
**target_entry
,
1333 struct lock_list
*entry
;
1334 struct list_head
*head
;
1335 struct circular_queue
*cq
= &lock_cq
;
1338 if (match(source_entry
, data
)) {
1339 *target_entry
= source_entry
;
1345 head
= &source_entry
->class->locks_after
;
1347 head
= &source_entry
->class->locks_before
;
1349 if (list_empty(head
))
1353 __cq_enqueue(cq
, (unsigned long)source_entry
);
1355 while (!__cq_empty(cq
)) {
1356 struct lock_list
*lock
;
1358 __cq_dequeue(cq
, (unsigned long *)&lock
);
1366 head
= &lock
->class->locks_after
;
1368 head
= &lock
->class->locks_before
;
1370 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1372 list_for_each_entry_rcu(entry
, head
, entry
) {
1373 if (!lock_accessed(entry
)) {
1374 unsigned int cq_depth
;
1375 mark_lock_accessed(entry
, lock
);
1376 if (match(entry
, data
)) {
1377 *target_entry
= entry
;
1382 if (__cq_enqueue(cq
, (unsigned long)entry
)) {
1386 cq_depth
= __cq_get_elem_count(cq
);
1387 if (max_bfs_queue_depth
< cq_depth
)
1388 max_bfs_queue_depth
= cq_depth
;
1396 static inline int __bfs_forwards(struct lock_list
*src_entry
,
1398 int (*match
)(struct lock_list
*entry
, void *data
),
1399 struct lock_list
**target_entry
)
1401 return __bfs(src_entry
, data
, match
, target_entry
, 1);
1405 static inline int __bfs_backwards(struct lock_list
*src_entry
,
1407 int (*match
)(struct lock_list
*entry
, void *data
),
1408 struct lock_list
**target_entry
)
1410 return __bfs(src_entry
, data
, match
, target_entry
, 0);
1415 * Recursive, forwards-direction lock-dependency checking, used for
1416 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1421 * Print a dependency chain entry (this is only done when a deadlock
1422 * has been detected):
1425 print_circular_bug_entry(struct lock_list
*target
, int depth
)
1427 if (debug_locks_silent
)
1429 printk("\n-> #%u", depth
);
1430 print_lock_name(target
->class);
1431 printk(KERN_CONT
":\n");
1432 print_stack_trace(&target
->trace
, 6);
1438 print_circular_lock_scenario(struct held_lock
*src
,
1439 struct held_lock
*tgt
,
1440 struct lock_list
*prt
)
1442 struct lock_class
*source
= hlock_class(src
);
1443 struct lock_class
*target
= hlock_class(tgt
);
1444 struct lock_class
*parent
= prt
->class;
1447 * A direct locking problem where unsafe_class lock is taken
1448 * directly by safe_class lock, then all we need to show
1449 * is the deadlock scenario, as it is obvious that the
1450 * unsafe lock is taken under the safe lock.
1452 * But if there is a chain instead, where the safe lock takes
1453 * an intermediate lock (middle_class) where this lock is
1454 * not the same as the safe lock, then the lock chain is
1455 * used to describe the problem. Otherwise we would need
1456 * to show a different CPU case for each link in the chain
1457 * from the safe_class lock to the unsafe_class lock.
1459 if (parent
!= source
) {
1460 printk("Chain exists of:\n ");
1461 __print_lock_name(source
);
1462 printk(KERN_CONT
" --> ");
1463 __print_lock_name(parent
);
1464 printk(KERN_CONT
" --> ");
1465 __print_lock_name(target
);
1466 printk(KERN_CONT
"\n\n");
1469 printk(" Possible unsafe locking scenario:\n\n");
1470 printk(" CPU0 CPU1\n");
1471 printk(" ---- ----\n");
1473 __print_lock_name(target
);
1474 printk(KERN_CONT
");\n");
1476 __print_lock_name(parent
);
1477 printk(KERN_CONT
");\n");
1479 __print_lock_name(target
);
1480 printk(KERN_CONT
");\n");
1482 __print_lock_name(source
);
1483 printk(KERN_CONT
");\n");
1484 printk("\n *** DEADLOCK ***\n\n");
1488 * When a circular dependency is detected, print the
1492 print_circular_bug_header(struct lock_list
*entry
, unsigned int depth
,
1493 struct held_lock
*check_src
,
1494 struct held_lock
*check_tgt
)
1496 struct task_struct
*curr
= current
;
1498 if (debug_locks_silent
)
1502 pr_warn("======================================================\n");
1503 pr_warn("WARNING: possible circular locking dependency detected\n");
1504 print_kernel_ident();
1505 pr_warn("------------------------------------------------------\n");
1506 pr_warn("%s/%d is trying to acquire lock:\n",
1507 curr
->comm
, task_pid_nr(curr
));
1508 print_lock(check_src
);
1510 pr_warn("\nbut task is already holding lock:\n");
1512 print_lock(check_tgt
);
1513 pr_warn("\nwhich lock already depends on the new lock.\n\n");
1514 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1516 print_circular_bug_entry(entry
, depth
);
1521 static inline int class_equal(struct lock_list
*entry
, void *data
)
1523 return entry
->class == data
;
1526 static noinline
int print_circular_bug(struct lock_list
*this,
1527 struct lock_list
*target
,
1528 struct held_lock
*check_src
,
1529 struct held_lock
*check_tgt
,
1530 struct stack_trace
*trace
)
1532 struct task_struct
*curr
= current
;
1533 struct lock_list
*parent
;
1534 struct lock_list
*first_parent
;
1537 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
1540 if (!save_trace(&this->trace
))
1543 depth
= get_lock_depth(target
);
1545 print_circular_bug_header(target
, depth
, check_src
, check_tgt
);
1547 parent
= get_lock_parent(target
);
1548 first_parent
= parent
;
1551 print_circular_bug_entry(parent
, --depth
);
1552 parent
= get_lock_parent(parent
);
1555 printk("\nother info that might help us debug this:\n\n");
1556 print_circular_lock_scenario(check_src
, check_tgt
,
1559 lockdep_print_held_locks(curr
);
1561 printk("\nstack backtrace:\n");
1567 static noinline
int print_bfs_bug(int ret
)
1569 if (!debug_locks_off_graph_unlock())
1573 * Breadth-first-search failed, graph got corrupted?
1575 WARN(1, "lockdep bfs error:%d\n", ret
);
1580 static int noop_count(struct lock_list
*entry
, void *data
)
1582 (*(unsigned long *)data
)++;
1586 static unsigned long __lockdep_count_forward_deps(struct lock_list
*this)
1588 unsigned long count
= 0;
1589 struct lock_list
*uninitialized_var(target_entry
);
1591 __bfs_forwards(this, (void *)&count
, noop_count
, &target_entry
);
1595 unsigned long lockdep_count_forward_deps(struct lock_class
*class)
1597 unsigned long ret
, flags
;
1598 struct lock_list
this;
1603 raw_local_irq_save(flags
);
1604 arch_spin_lock(&lockdep_lock
);
1605 ret
= __lockdep_count_forward_deps(&this);
1606 arch_spin_unlock(&lockdep_lock
);
1607 raw_local_irq_restore(flags
);
1612 static unsigned long __lockdep_count_backward_deps(struct lock_list
*this)
1614 unsigned long count
= 0;
1615 struct lock_list
*uninitialized_var(target_entry
);
1617 __bfs_backwards(this, (void *)&count
, noop_count
, &target_entry
);
1622 unsigned long lockdep_count_backward_deps(struct lock_class
*class)
1624 unsigned long ret
, flags
;
1625 struct lock_list
this;
1630 raw_local_irq_save(flags
);
1631 arch_spin_lock(&lockdep_lock
);
1632 ret
= __lockdep_count_backward_deps(&this);
1633 arch_spin_unlock(&lockdep_lock
);
1634 raw_local_irq_restore(flags
);
1640 * Prove that the dependency graph starting at <entry> can not
1641 * lead to <target>. Print an error and return 0 if it does.
1644 check_noncircular(struct lock_list
*root
, struct lock_class
*target
,
1645 struct lock_list
**target_entry
)
1649 debug_atomic_inc(nr_cyclic_checks
);
1651 result
= __bfs_forwards(root
, target
, class_equal
, target_entry
);
1657 check_redundant(struct lock_list
*root
, struct lock_class
*target
,
1658 struct lock_list
**target_entry
)
1662 debug_atomic_inc(nr_redundant_checks
);
1664 result
= __bfs_forwards(root
, target
, class_equal
, target_entry
);
1669 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1671 * Forwards and backwards subgraph searching, for the purposes of
1672 * proving that two subgraphs can be connected by a new dependency
1673 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1676 static inline int usage_match(struct lock_list
*entry
, void *bit
)
1678 return entry
->class->usage_mask
& (1 << (enum lock_usage_bit
)bit
);
1684 * Find a node in the forwards-direction dependency sub-graph starting
1685 * at @root->class that matches @bit.
1687 * Return 0 if such a node exists in the subgraph, and put that node
1688 * into *@target_entry.
1690 * Return 1 otherwise and keep *@target_entry unchanged.
1691 * Return <0 on error.
1694 find_usage_forwards(struct lock_list
*root
, enum lock_usage_bit bit
,
1695 struct lock_list
**target_entry
)
1699 debug_atomic_inc(nr_find_usage_forwards_checks
);
1701 result
= __bfs_forwards(root
, (void *)bit
, usage_match
, target_entry
);
1707 * Find a node in the backwards-direction dependency sub-graph starting
1708 * at @root->class that matches @bit.
1710 * Return 0 if such a node exists in the subgraph, and put that node
1711 * into *@target_entry.
1713 * Return 1 otherwise and keep *@target_entry unchanged.
1714 * Return <0 on error.
1717 find_usage_backwards(struct lock_list
*root
, enum lock_usage_bit bit
,
1718 struct lock_list
**target_entry
)
1722 debug_atomic_inc(nr_find_usage_backwards_checks
);
1724 result
= __bfs_backwards(root
, (void *)bit
, usage_match
, target_entry
);
1729 static void print_lock_class_header(struct lock_class
*class, int depth
)
1733 printk("%*s->", depth
, "");
1734 print_lock_name(class);
1735 #ifdef CONFIG_DEBUG_LOCKDEP
1736 printk(KERN_CONT
" ops: %lu", debug_class_ops_read(class));
1738 printk(KERN_CONT
" {\n");
1740 for (bit
= 0; bit
< LOCK_USAGE_STATES
; bit
++) {
1741 if (class->usage_mask
& (1 << bit
)) {
1744 len
+= printk("%*s %s", depth
, "", usage_str
[bit
]);
1745 len
+= printk(KERN_CONT
" at:\n");
1746 print_stack_trace(class->usage_traces
+ bit
, len
);
1749 printk("%*s }\n", depth
, "");
1751 printk("%*s ... key at: [<%px>] %pS\n",
1752 depth
, "", class->key
, class->key
);
1756 * printk the shortest lock dependencies from @start to @end in reverse order:
1759 print_shortest_lock_dependencies(struct lock_list
*leaf
,
1760 struct lock_list
*root
)
1762 struct lock_list
*entry
= leaf
;
1765 /*compute depth from generated tree by BFS*/
1766 depth
= get_lock_depth(leaf
);
1769 print_lock_class_header(entry
->class, depth
);
1770 printk("%*s ... acquired at:\n", depth
, "");
1771 print_stack_trace(&entry
->trace
, 2);
1774 if (depth
== 0 && (entry
!= root
)) {
1775 printk("lockdep:%s bad path found in chain graph\n", __func__
);
1779 entry
= get_lock_parent(entry
);
1781 } while (entry
&& (depth
>= 0));
1787 print_irq_lock_scenario(struct lock_list
*safe_entry
,
1788 struct lock_list
*unsafe_entry
,
1789 struct lock_class
*prev_class
,
1790 struct lock_class
*next_class
)
1792 struct lock_class
*safe_class
= safe_entry
->class;
1793 struct lock_class
*unsafe_class
= unsafe_entry
->class;
1794 struct lock_class
*middle_class
= prev_class
;
1796 if (middle_class
== safe_class
)
1797 middle_class
= next_class
;
1800 * A direct locking problem where unsafe_class lock is taken
1801 * directly by safe_class lock, then all we need to show
1802 * is the deadlock scenario, as it is obvious that the
1803 * unsafe lock is taken under the safe lock.
1805 * But if there is a chain instead, where the safe lock takes
1806 * an intermediate lock (middle_class) where this lock is
1807 * not the same as the safe lock, then the lock chain is
1808 * used to describe the problem. Otherwise we would need
1809 * to show a different CPU case for each link in the chain
1810 * from the safe_class lock to the unsafe_class lock.
1812 if (middle_class
!= unsafe_class
) {
1813 printk("Chain exists of:\n ");
1814 __print_lock_name(safe_class
);
1815 printk(KERN_CONT
" --> ");
1816 __print_lock_name(middle_class
);
1817 printk(KERN_CONT
" --> ");
1818 __print_lock_name(unsafe_class
);
1819 printk(KERN_CONT
"\n\n");
1822 printk(" Possible interrupt unsafe locking scenario:\n\n");
1823 printk(" CPU0 CPU1\n");
1824 printk(" ---- ----\n");
1826 __print_lock_name(unsafe_class
);
1827 printk(KERN_CONT
");\n");
1828 printk(" local_irq_disable();\n");
1830 __print_lock_name(safe_class
);
1831 printk(KERN_CONT
");\n");
1833 __print_lock_name(middle_class
);
1834 printk(KERN_CONT
");\n");
1835 printk(" <Interrupt>\n");
1837 __print_lock_name(safe_class
);
1838 printk(KERN_CONT
");\n");
1839 printk("\n *** DEADLOCK ***\n\n");
1843 print_bad_irq_dependency(struct task_struct
*curr
,
1844 struct lock_list
*prev_root
,
1845 struct lock_list
*next_root
,
1846 struct lock_list
*backwards_entry
,
1847 struct lock_list
*forwards_entry
,
1848 struct held_lock
*prev
,
1849 struct held_lock
*next
,
1850 enum lock_usage_bit bit1
,
1851 enum lock_usage_bit bit2
,
1852 const char *irqclass
)
1854 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
1858 pr_warn("=====================================================\n");
1859 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1860 irqclass
, irqclass
);
1861 print_kernel_ident();
1862 pr_warn("-----------------------------------------------------\n");
1863 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1864 curr
->comm
, task_pid_nr(curr
),
1865 curr
->hardirq_context
, hardirq_count() >> HARDIRQ_SHIFT
,
1866 curr
->softirq_context
, softirq_count() >> SOFTIRQ_SHIFT
,
1867 curr
->hardirqs_enabled
,
1868 curr
->softirqs_enabled
);
1871 pr_warn("\nand this task is already holding:\n");
1873 pr_warn("which would create a new lock dependency:\n");
1874 print_lock_name(hlock_class(prev
));
1876 print_lock_name(hlock_class(next
));
1879 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1881 print_lock_name(backwards_entry
->class);
1882 pr_warn("\n... which became %s-irq-safe at:\n", irqclass
);
1884 print_stack_trace(backwards_entry
->class->usage_traces
+ bit1
, 1);
1886 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass
);
1887 print_lock_name(forwards_entry
->class);
1888 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass
);
1891 print_stack_trace(forwards_entry
->class->usage_traces
+ bit2
, 1);
1893 pr_warn("\nother info that might help us debug this:\n\n");
1894 print_irq_lock_scenario(backwards_entry
, forwards_entry
,
1895 hlock_class(prev
), hlock_class(next
));
1897 lockdep_print_held_locks(curr
);
1899 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass
);
1900 if (!save_trace(&prev_root
->trace
))
1902 print_shortest_lock_dependencies(backwards_entry
, prev_root
);
1904 pr_warn("\nthe dependencies between the lock to be acquired");
1905 pr_warn(" and %s-irq-unsafe lock:\n", irqclass
);
1906 if (!save_trace(&next_root
->trace
))
1908 print_shortest_lock_dependencies(forwards_entry
, next_root
);
1910 pr_warn("\nstack backtrace:\n");
1917 check_usage(struct task_struct
*curr
, struct held_lock
*prev
,
1918 struct held_lock
*next
, enum lock_usage_bit bit_backwards
,
1919 enum lock_usage_bit bit_forwards
, const char *irqclass
)
1922 struct lock_list
this, that
;
1923 struct lock_list
*uninitialized_var(target_entry
);
1924 struct lock_list
*uninitialized_var(target_entry1
);
1928 this.class = hlock_class(prev
);
1929 ret
= find_usage_backwards(&this, bit_backwards
, &target_entry
);
1931 return print_bfs_bug(ret
);
1936 that
.class = hlock_class(next
);
1937 ret
= find_usage_forwards(&that
, bit_forwards
, &target_entry1
);
1939 return print_bfs_bug(ret
);
1943 return print_bad_irq_dependency(curr
, &this, &that
,
1944 target_entry
, target_entry1
,
1946 bit_backwards
, bit_forwards
, irqclass
);
1949 static const char *state_names
[] = {
1950 #define LOCKDEP_STATE(__STATE) \
1951 __stringify(__STATE),
1952 #include "lockdep_states.h"
1953 #undef LOCKDEP_STATE
1956 static const char *state_rnames
[] = {
1957 #define LOCKDEP_STATE(__STATE) \
1958 __stringify(__STATE)"-READ",
1959 #include "lockdep_states.h"
1960 #undef LOCKDEP_STATE
1963 static inline const char *state_name(enum lock_usage_bit bit
)
1965 return (bit
& LOCK_USAGE_READ_MASK
) ? state_rnames
[bit
>> 2] : state_names
[bit
>> 2];
1968 static int exclusive_bit(int new_bit
)
1970 int state
= new_bit
& LOCK_USAGE_STATE_MASK
;
1971 int dir
= new_bit
& LOCK_USAGE_DIR_MASK
;
1974 * keep state, bit flip the direction and strip read.
1976 return state
| (dir
^ LOCK_USAGE_DIR_MASK
);
1979 static int check_irq_usage(struct task_struct
*curr
, struct held_lock
*prev
,
1980 struct held_lock
*next
, enum lock_usage_bit bit
)
1983 * Prove that the new dependency does not connect a hardirq-safe
1984 * lock with a hardirq-unsafe lock - to achieve this we search
1985 * the backwards-subgraph starting at <prev>, and the
1986 * forwards-subgraph starting at <next>:
1988 if (!check_usage(curr
, prev
, next
, bit
,
1989 exclusive_bit(bit
), state_name(bit
)))
1995 * Prove that the new dependency does not connect a hardirq-safe-read
1996 * lock with a hardirq-unsafe lock - to achieve this we search
1997 * the backwards-subgraph starting at <prev>, and the
1998 * forwards-subgraph starting at <next>:
2000 if (!check_usage(curr
, prev
, next
, bit
,
2001 exclusive_bit(bit
), state_name(bit
)))
2008 check_prev_add_irq(struct task_struct
*curr
, struct held_lock
*prev
,
2009 struct held_lock
*next
)
2011 #define LOCKDEP_STATE(__STATE) \
2012 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
2014 #include "lockdep_states.h"
2015 #undef LOCKDEP_STATE
2020 static void inc_chains(void)
2022 if (current
->hardirq_context
)
2023 nr_hardirq_chains
++;
2025 if (current
->softirq_context
)
2026 nr_softirq_chains
++;
2028 nr_process_chains
++;
2035 check_prev_add_irq(struct task_struct
*curr
, struct held_lock
*prev
,
2036 struct held_lock
*next
)
2041 static inline void inc_chains(void)
2043 nr_process_chains
++;
2049 print_deadlock_scenario(struct held_lock
*nxt
,
2050 struct held_lock
*prv
)
2052 struct lock_class
*next
= hlock_class(nxt
);
2053 struct lock_class
*prev
= hlock_class(prv
);
2055 printk(" Possible unsafe locking scenario:\n\n");
2059 __print_lock_name(prev
);
2060 printk(KERN_CONT
");\n");
2062 __print_lock_name(next
);
2063 printk(KERN_CONT
");\n");
2064 printk("\n *** DEADLOCK ***\n\n");
2065 printk(" May be due to missing lock nesting notation\n\n");
2069 print_deadlock_bug(struct task_struct
*curr
, struct held_lock
*prev
,
2070 struct held_lock
*next
)
2072 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
2076 pr_warn("============================================\n");
2077 pr_warn("WARNING: possible recursive locking detected\n");
2078 print_kernel_ident();
2079 pr_warn("--------------------------------------------\n");
2080 pr_warn("%s/%d is trying to acquire lock:\n",
2081 curr
->comm
, task_pid_nr(curr
));
2083 pr_warn("\nbut task is already holding lock:\n");
2086 pr_warn("\nother info that might help us debug this:\n");
2087 print_deadlock_scenario(next
, prev
);
2088 lockdep_print_held_locks(curr
);
2090 pr_warn("\nstack backtrace:\n");
2097 * Check whether we are holding such a class already.
2099 * (Note that this has to be done separately, because the graph cannot
2100 * detect such classes of deadlocks.)
2102 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
2105 check_deadlock(struct task_struct
*curr
, struct held_lock
*next
,
2106 struct lockdep_map
*next_instance
, int read
)
2108 struct held_lock
*prev
;
2109 struct held_lock
*nest
= NULL
;
2112 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
2113 prev
= curr
->held_locks
+ i
;
2115 if (prev
->instance
== next
->nest_lock
)
2118 if (hlock_class(prev
) != hlock_class(next
))
2122 * Allow read-after-read recursion of the same
2123 * lock class (i.e. read_lock(lock)+read_lock(lock)):
2125 if ((read
== 2) && prev
->read
)
2129 * We're holding the nest_lock, which serializes this lock's
2130 * nesting behaviour.
2135 return print_deadlock_bug(curr
, prev
, next
);
2141 * There was a chain-cache miss, and we are about to add a new dependency
2142 * to a previous lock. We recursively validate the following rules:
2144 * - would the adding of the <prev> -> <next> dependency create a
2145 * circular dependency in the graph? [== circular deadlock]
2147 * - does the new prev->next dependency connect any hardirq-safe lock
2148 * (in the full backwards-subgraph starting at <prev>) with any
2149 * hardirq-unsafe lock (in the full forwards-subgraph starting at
2150 * <next>)? [== illegal lock inversion with hardirq contexts]
2152 * - does the new prev->next dependency connect any softirq-safe lock
2153 * (in the full backwards-subgraph starting at <prev>) with any
2154 * softirq-unsafe lock (in the full forwards-subgraph starting at
2155 * <next>)? [== illegal lock inversion with softirq contexts]
2157 * any of these scenarios could lead to a deadlock.
2159 * Then if all the validations pass, we add the forwards and backwards
2163 check_prev_add(struct task_struct
*curr
, struct held_lock
*prev
,
2164 struct held_lock
*next
, int distance
, struct stack_trace
*trace
,
2165 int (*save
)(struct stack_trace
*trace
))
2167 struct lock_list
*uninitialized_var(target_entry
);
2168 struct lock_list
*entry
;
2169 struct lock_list
this;
2172 if (!hlock_class(prev
)->key
|| !hlock_class(next
)->key
) {
2174 * The warning statements below may trigger a use-after-free
2175 * of the class name. It is better to trigger a use-after free
2176 * and to have the class name most of the time instead of not
2177 * having the class name available.
2179 WARN_ONCE(!debug_locks_silent
&& !hlock_class(prev
)->key
,
2180 "Detected use-after-free of lock class %px/%s\n",
2182 hlock_class(prev
)->name
);
2183 WARN_ONCE(!debug_locks_silent
&& !hlock_class(next
)->key
,
2184 "Detected use-after-free of lock class %px/%s\n",
2186 hlock_class(next
)->name
);
2191 * Prove that the new <prev> -> <next> dependency would not
2192 * create a circular dependency in the graph. (We do this by
2193 * forward-recursing into the graph starting at <next>, and
2194 * checking whether we can reach <prev>.)
2196 * We are using global variables to control the recursion, to
2197 * keep the stackframe size of the recursive functions low:
2199 this.class = hlock_class(next
);
2201 ret
= check_noncircular(&this, hlock_class(prev
), &target_entry
);
2202 if (unlikely(!ret
)) {
2203 if (!trace
->entries
) {
2205 * If @save fails here, the printing might trigger
2206 * a WARN but because of the !nr_entries it should
2207 * not do bad things.
2211 return print_circular_bug(&this, target_entry
, next
, prev
, trace
);
2213 else if (unlikely(ret
< 0))
2214 return print_bfs_bug(ret
);
2216 if (!check_prev_add_irq(curr
, prev
, next
))
2220 * For recursive read-locks we do all the dependency checks,
2221 * but we dont store read-triggered dependencies (only
2222 * write-triggered dependencies). This ensures that only the
2223 * write-side dependencies matter, and that if for example a
2224 * write-lock never takes any other locks, then the reads are
2225 * equivalent to a NOP.
2227 if (next
->read
== 2 || prev
->read
== 2)
2230 * Is the <prev> -> <next> dependency already present?
2232 * (this may occur even though this is a new chain: consider
2233 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
2234 * chains - the second one will be new, but L1 already has
2235 * L2 added to its dependency list, due to the first chain.)
2237 list_for_each_entry(entry
, &hlock_class(prev
)->locks_after
, entry
) {
2238 if (entry
->class == hlock_class(next
)) {
2240 entry
->distance
= 1;
2246 * Is the <prev> -> <next> link redundant?
2248 this.class = hlock_class(prev
);
2250 ret
= check_redundant(&this, hlock_class(next
), &target_entry
);
2252 debug_atomic_inc(nr_redundant
);
2256 return print_bfs_bug(ret
);
2259 if (!trace
->entries
&& !save(trace
))
2263 * Ok, all validations passed, add the new lock
2264 * to the previous lock's dependency list:
2266 ret
= add_lock_to_list(hlock_class(next
), hlock_class(prev
),
2267 &hlock_class(prev
)->locks_after
,
2268 next
->acquire_ip
, distance
, trace
);
2273 ret
= add_lock_to_list(hlock_class(prev
), hlock_class(next
),
2274 &hlock_class(next
)->locks_before
,
2275 next
->acquire_ip
, distance
, trace
);
2283 * Add the dependency to all directly-previous locks that are 'relevant'.
2284 * The ones that are relevant are (in increasing distance from curr):
2285 * all consecutive trylock entries and the final non-trylock entry - or
2286 * the end of this context's lock-chain - whichever comes first.
2289 check_prevs_add(struct task_struct
*curr
, struct held_lock
*next
)
2291 int depth
= curr
->lockdep_depth
;
2292 struct held_lock
*hlock
;
2293 struct stack_trace trace
= {
2303 * Depth must not be zero for a non-head lock:
2308 * At least two relevant locks must exist for this
2311 if (curr
->held_locks
[depth
].irq_context
!=
2312 curr
->held_locks
[depth
-1].irq_context
)
2316 int distance
= curr
->lockdep_depth
- depth
+ 1;
2317 hlock
= curr
->held_locks
+ depth
- 1;
2320 * Only non-recursive-read entries get new dependencies
2323 if (hlock
->read
!= 2 && hlock
->check
) {
2324 int ret
= check_prev_add(curr
, hlock
, next
, distance
, &trace
, save_trace
);
2329 * Stop after the first non-trylock entry,
2330 * as non-trylock entries have added their
2331 * own direct dependencies already, so this
2332 * lock is connected to them indirectly:
2334 if (!hlock
->trylock
)
2340 * End of lock-stack?
2345 * Stop the search if we cross into another context:
2347 if (curr
->held_locks
[depth
].irq_context
!=
2348 curr
->held_locks
[depth
-1].irq_context
)
2353 if (!debug_locks_off_graph_unlock())
2357 * Clearly we all shouldn't be here, but since we made it we
2358 * can reliable say we messed up our state. See the above two
2359 * gotos for reasons why we could possibly end up here.
2366 struct lock_chain lock_chains
[MAX_LOCKDEP_CHAINS
];
2367 static DECLARE_BITMAP(lock_chains_in_use
, MAX_LOCKDEP_CHAINS
);
2368 int nr_chain_hlocks
;
2369 static u16 chain_hlocks
[MAX_LOCKDEP_CHAIN_HLOCKS
];
2371 struct lock_class
*lock_chain_get_class(struct lock_chain
*chain
, int i
)
2373 return lock_classes
+ chain_hlocks
[chain
->base
+ i
];
2377 * Returns the index of the first held_lock of the current chain
2379 static inline int get_first_held_lock(struct task_struct
*curr
,
2380 struct held_lock
*hlock
)
2383 struct held_lock
*hlock_curr
;
2385 for (i
= curr
->lockdep_depth
- 1; i
>= 0; i
--) {
2386 hlock_curr
= curr
->held_locks
+ i
;
2387 if (hlock_curr
->irq_context
!= hlock
->irq_context
)
2395 #ifdef CONFIG_DEBUG_LOCKDEP
2397 * Returns the next chain_key iteration
2399 static u64
print_chain_key_iteration(int class_idx
, u64 chain_key
)
2401 u64 new_chain_key
= iterate_chain_key(chain_key
, class_idx
);
2403 printk(" class_idx:%d -> chain_key:%016Lx",
2405 (unsigned long long)new_chain_key
);
2406 return new_chain_key
;
2410 print_chain_keys_held_locks(struct task_struct
*curr
, struct held_lock
*hlock_next
)
2412 struct held_lock
*hlock
;
2414 int depth
= curr
->lockdep_depth
;
2417 printk("depth: %u\n", depth
+ 1);
2418 for (i
= get_first_held_lock(curr
, hlock_next
); i
< depth
; i
++) {
2419 hlock
= curr
->held_locks
+ i
;
2420 chain_key
= print_chain_key_iteration(hlock
->class_idx
, chain_key
);
2425 print_chain_key_iteration(hlock_next
->class_idx
, chain_key
);
2426 print_lock(hlock_next
);
2429 static void print_chain_keys_chain(struct lock_chain
*chain
)
2435 printk("depth: %u\n", chain
->depth
);
2436 for (i
= 0; i
< chain
->depth
; i
++) {
2437 class_id
= chain_hlocks
[chain
->base
+ i
];
2438 chain_key
= print_chain_key_iteration(class_id
+ 1, chain_key
);
2440 print_lock_name(lock_classes
+ class_id
);
2445 static void print_collision(struct task_struct
*curr
,
2446 struct held_lock
*hlock_next
,
2447 struct lock_chain
*chain
)
2450 pr_warn("============================\n");
2451 pr_warn("WARNING: chain_key collision\n");
2452 print_kernel_ident();
2453 pr_warn("----------------------------\n");
2454 pr_warn("%s/%d: ", current
->comm
, task_pid_nr(current
));
2455 pr_warn("Hash chain already cached but the contents don't match!\n");
2457 pr_warn("Held locks:");
2458 print_chain_keys_held_locks(curr
, hlock_next
);
2460 pr_warn("Locks in cached chain:");
2461 print_chain_keys_chain(chain
);
2463 pr_warn("\nstack backtrace:\n");
2469 * Checks whether the chain and the current held locks are consistent
2470 * in depth and also in content. If they are not it most likely means
2471 * that there was a collision during the calculation of the chain_key.
2472 * Returns: 0 not passed, 1 passed
2474 static int check_no_collision(struct task_struct
*curr
,
2475 struct held_lock
*hlock
,
2476 struct lock_chain
*chain
)
2478 #ifdef CONFIG_DEBUG_LOCKDEP
2481 i
= get_first_held_lock(curr
, hlock
);
2483 if (DEBUG_LOCKS_WARN_ON(chain
->depth
!= curr
->lockdep_depth
- (i
- 1))) {
2484 print_collision(curr
, hlock
, chain
);
2488 for (j
= 0; j
< chain
->depth
- 1; j
++, i
++) {
2489 id
= curr
->held_locks
[i
].class_idx
- 1;
2491 if (DEBUG_LOCKS_WARN_ON(chain_hlocks
[chain
->base
+ j
] != id
)) {
2492 print_collision(curr
, hlock
, chain
);
2501 * Given an index that is >= -1, return the index of the next lock chain.
2502 * Return -2 if there is no next lock chain.
2504 long lockdep_next_lockchain(long i
)
2506 i
= find_next_bit(lock_chains_in_use
, ARRAY_SIZE(lock_chains
), i
+ 1);
2507 return i
< ARRAY_SIZE(lock_chains
) ? i
: -2;
2510 unsigned long lock_chain_count(void)
2512 return bitmap_weight(lock_chains_in_use
, ARRAY_SIZE(lock_chains
));
2515 /* Must be called with the graph lock held. */
2516 static struct lock_chain
*alloc_lock_chain(void)
2518 int idx
= find_first_zero_bit(lock_chains_in_use
,
2519 ARRAY_SIZE(lock_chains
));
2521 if (unlikely(idx
>= ARRAY_SIZE(lock_chains
)))
2523 __set_bit(idx
, lock_chains_in_use
);
2524 return lock_chains
+ idx
;
2528 * Adds a dependency chain into chain hashtable. And must be called with
2531 * Return 0 if fail, and graph_lock is released.
2532 * Return 1 if succeed, with graph_lock held.
2534 static inline int add_chain_cache(struct task_struct
*curr
,
2535 struct held_lock
*hlock
,
2538 struct lock_class
*class = hlock_class(hlock
);
2539 struct hlist_head
*hash_head
= chainhashentry(chain_key
);
2540 struct lock_chain
*chain
;
2544 * The caller must hold the graph lock, ensure we've got IRQs
2545 * disabled to make this an IRQ-safe lock.. for recursion reasons
2546 * lockdep won't complain about its own locking errors.
2548 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2551 chain
= alloc_lock_chain();
2553 if (!debug_locks_off_graph_unlock())
2556 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2560 chain
->chain_key
= chain_key
;
2561 chain
->irq_context
= hlock
->irq_context
;
2562 i
= get_first_held_lock(curr
, hlock
);
2563 chain
->depth
= curr
->lockdep_depth
+ 1 - i
;
2565 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks
));
2566 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr
->held_locks
));
2567 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks
[0])) <= ARRAY_SIZE(lock_classes
));
2569 if (likely(nr_chain_hlocks
+ chain
->depth
<= MAX_LOCKDEP_CHAIN_HLOCKS
)) {
2570 chain
->base
= nr_chain_hlocks
;
2571 for (j
= 0; j
< chain
->depth
- 1; j
++, i
++) {
2572 int lock_id
= curr
->held_locks
[i
].class_idx
- 1;
2573 chain_hlocks
[chain
->base
+ j
] = lock_id
;
2575 chain_hlocks
[chain
->base
+ j
] = class - lock_classes
;
2576 nr_chain_hlocks
+= chain
->depth
;
2578 if (!debug_locks_off_graph_unlock())
2581 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2586 hlist_add_head_rcu(&chain
->entry
, hash_head
);
2587 debug_atomic_inc(chain_lookup_misses
);
2594 * Look up a dependency chain. Must be called with either the graph lock or
2595 * the RCU read lock held.
2597 static inline struct lock_chain
*lookup_chain_cache(u64 chain_key
)
2599 struct hlist_head
*hash_head
= chainhashentry(chain_key
);
2600 struct lock_chain
*chain
;
2602 hlist_for_each_entry_rcu(chain
, hash_head
, entry
) {
2603 if (READ_ONCE(chain
->chain_key
) == chain_key
) {
2604 debug_atomic_inc(chain_lookup_hits
);
2612 * If the key is not present yet in dependency chain cache then
2613 * add it and return 1 - in this case the new dependency chain is
2614 * validated. If the key is already hashed, return 0.
2615 * (On return with 1 graph_lock is held.)
2617 static inline int lookup_chain_cache_add(struct task_struct
*curr
,
2618 struct held_lock
*hlock
,
2621 struct lock_class
*class = hlock_class(hlock
);
2622 struct lock_chain
*chain
= lookup_chain_cache(chain_key
);
2626 if (!check_no_collision(curr
, hlock
, chain
))
2629 if (very_verbose(class)) {
2630 printk("\nhash chain already cached, key: "
2631 "%016Lx tail class: [%px] %s\n",
2632 (unsigned long long)chain_key
,
2633 class->key
, class->name
);
2639 if (very_verbose(class)) {
2640 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
2641 (unsigned long long)chain_key
, class->key
, class->name
);
2648 * We have to walk the chain again locked - to avoid duplicates:
2650 chain
= lookup_chain_cache(chain_key
);
2656 if (!add_chain_cache(curr
, hlock
, chain_key
))
2662 static int validate_chain(struct task_struct
*curr
, struct lockdep_map
*lock
,
2663 struct held_lock
*hlock
, int chain_head
, u64 chain_key
)
2666 * Trylock needs to maintain the stack of held locks, but it
2667 * does not add new dependencies, because trylock can be done
2670 * We look up the chain_key and do the O(N^2) check and update of
2671 * the dependencies only if this is a new dependency chain.
2672 * (If lookup_chain_cache_add() return with 1 it acquires
2673 * graph_lock for us)
2675 if (!hlock
->trylock
&& hlock
->check
&&
2676 lookup_chain_cache_add(curr
, hlock
, chain_key
)) {
2678 * Check whether last held lock:
2680 * - is irq-safe, if this lock is irq-unsafe
2681 * - is softirq-safe, if this lock is hardirq-unsafe
2683 * And check whether the new lock's dependency graph
2684 * could lead back to the previous lock.
2686 * any of these scenarios could lead to a deadlock. If
2689 int ret
= check_deadlock(curr
, hlock
, lock
, hlock
->read
);
2694 * Mark recursive read, as we jump over it when
2695 * building dependencies (just like we jump over
2701 * Add dependency only if this lock is not the head
2702 * of the chain, and if it's not a secondary read-lock:
2704 if (!chain_head
&& ret
!= 2) {
2705 if (!check_prevs_add(curr
, hlock
))
2711 /* after lookup_chain_cache_add(): */
2712 if (unlikely(!debug_locks
))
2719 static inline int validate_chain(struct task_struct
*curr
,
2720 struct lockdep_map
*lock
, struct held_lock
*hlock
,
2721 int chain_head
, u64 chain_key
)
2728 * We are building curr_chain_key incrementally, so double-check
2729 * it from scratch, to make sure that it's done correctly:
2731 static void check_chain_key(struct task_struct
*curr
)
2733 #ifdef CONFIG_DEBUG_LOCKDEP
2734 struct held_lock
*hlock
, *prev_hlock
= NULL
;
2738 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
2739 hlock
= curr
->held_locks
+ i
;
2740 if (chain_key
!= hlock
->prev_chain_key
) {
2743 * We got mighty confused, our chain keys don't match
2744 * with what we expect, someone trample on our task state?
2746 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2747 curr
->lockdep_depth
, i
,
2748 (unsigned long long)chain_key
,
2749 (unsigned long long)hlock
->prev_chain_key
);
2753 * Whoops ran out of static storage again?
2755 if (DEBUG_LOCKS_WARN_ON(hlock
->class_idx
> MAX_LOCKDEP_KEYS
))
2758 if (prev_hlock
&& (prev_hlock
->irq_context
!=
2759 hlock
->irq_context
))
2761 chain_key
= iterate_chain_key(chain_key
, hlock
->class_idx
);
2764 if (chain_key
!= curr
->curr_chain_key
) {
2767 * More smoking hash instead of calculating it, damn see these
2768 * numbers float.. I bet that a pink elephant stepped on my memory.
2770 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2771 curr
->lockdep_depth
, i
,
2772 (unsigned long long)chain_key
,
2773 (unsigned long long)curr
->curr_chain_key
);
2779 print_usage_bug_scenario(struct held_lock
*lock
)
2781 struct lock_class
*class = hlock_class(lock
);
2783 printk(" Possible unsafe locking scenario:\n\n");
2787 __print_lock_name(class);
2788 printk(KERN_CONT
");\n");
2789 printk(" <Interrupt>\n");
2791 __print_lock_name(class);
2792 printk(KERN_CONT
");\n");
2793 printk("\n *** DEADLOCK ***\n\n");
2797 print_usage_bug(struct task_struct
*curr
, struct held_lock
*this,
2798 enum lock_usage_bit prev_bit
, enum lock_usage_bit new_bit
)
2800 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
2804 pr_warn("================================\n");
2805 pr_warn("WARNING: inconsistent lock state\n");
2806 print_kernel_ident();
2807 pr_warn("--------------------------------\n");
2809 pr_warn("inconsistent {%s} -> {%s} usage.\n",
2810 usage_str
[prev_bit
], usage_str
[new_bit
]);
2812 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2813 curr
->comm
, task_pid_nr(curr
),
2814 trace_hardirq_context(curr
), hardirq_count() >> HARDIRQ_SHIFT
,
2815 trace_softirq_context(curr
), softirq_count() >> SOFTIRQ_SHIFT
,
2816 trace_hardirqs_enabled(curr
),
2817 trace_softirqs_enabled(curr
));
2820 pr_warn("{%s} state was registered at:\n", usage_str
[prev_bit
]);
2821 print_stack_trace(hlock_class(this)->usage_traces
+ prev_bit
, 1);
2823 print_irqtrace_events(curr
);
2824 pr_warn("\nother info that might help us debug this:\n");
2825 print_usage_bug_scenario(this);
2827 lockdep_print_held_locks(curr
);
2829 pr_warn("\nstack backtrace:\n");
2836 * Print out an error if an invalid bit is set:
2839 valid_state(struct task_struct
*curr
, struct held_lock
*this,
2840 enum lock_usage_bit new_bit
, enum lock_usage_bit bad_bit
)
2842 if (unlikely(hlock_class(this)->usage_mask
& (1 << bad_bit
)))
2843 return print_usage_bug(curr
, this, bad_bit
, new_bit
);
2847 static int mark_lock(struct task_struct
*curr
, struct held_lock
*this,
2848 enum lock_usage_bit new_bit
);
2850 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2853 * print irq inversion bug:
2856 print_irq_inversion_bug(struct task_struct
*curr
,
2857 struct lock_list
*root
, struct lock_list
*other
,
2858 struct held_lock
*this, int forwards
,
2859 const char *irqclass
)
2861 struct lock_list
*entry
= other
;
2862 struct lock_list
*middle
= NULL
;
2865 if (!debug_locks_off_graph_unlock() || debug_locks_silent
)
2869 pr_warn("========================================================\n");
2870 pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2871 print_kernel_ident();
2872 pr_warn("--------------------------------------------------------\n");
2873 pr_warn("%s/%d just changed the state of lock:\n",
2874 curr
->comm
, task_pid_nr(curr
));
2877 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass
);
2879 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass
);
2880 print_lock_name(other
->class);
2881 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2883 pr_warn("\nother info that might help us debug this:\n");
2885 /* Find a middle lock (if one exists) */
2886 depth
= get_lock_depth(other
);
2888 if (depth
== 0 && (entry
!= root
)) {
2889 pr_warn("lockdep:%s bad path found in chain graph\n", __func__
);
2893 entry
= get_lock_parent(entry
);
2895 } while (entry
&& entry
!= root
&& (depth
>= 0));
2897 print_irq_lock_scenario(root
, other
,
2898 middle
? middle
->class : root
->class, other
->class);
2900 print_irq_lock_scenario(other
, root
,
2901 middle
? middle
->class : other
->class, root
->class);
2903 lockdep_print_held_locks(curr
);
2905 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2906 if (!save_trace(&root
->trace
))
2908 print_shortest_lock_dependencies(other
, root
);
2910 pr_warn("\nstack backtrace:\n");
2917 * Prove that in the forwards-direction subgraph starting at <this>
2918 * there is no lock matching <mask>:
2921 check_usage_forwards(struct task_struct
*curr
, struct held_lock
*this,
2922 enum lock_usage_bit bit
, const char *irqclass
)
2925 struct lock_list root
;
2926 struct lock_list
*uninitialized_var(target_entry
);
2929 root
.class = hlock_class(this);
2930 ret
= find_usage_forwards(&root
, bit
, &target_entry
);
2932 return print_bfs_bug(ret
);
2936 return print_irq_inversion_bug(curr
, &root
, target_entry
,
2941 * Prove that in the backwards-direction subgraph starting at <this>
2942 * there is no lock matching <mask>:
2945 check_usage_backwards(struct task_struct
*curr
, struct held_lock
*this,
2946 enum lock_usage_bit bit
, const char *irqclass
)
2949 struct lock_list root
;
2950 struct lock_list
*uninitialized_var(target_entry
);
2953 root
.class = hlock_class(this);
2954 ret
= find_usage_backwards(&root
, bit
, &target_entry
);
2956 return print_bfs_bug(ret
);
2960 return print_irq_inversion_bug(curr
, &root
, target_entry
,
2964 void print_irqtrace_events(struct task_struct
*curr
)
2966 printk("irq event stamp: %u\n", curr
->irq_events
);
2967 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
2968 curr
->hardirq_enable_event
, (void *)curr
->hardirq_enable_ip
,
2969 (void *)curr
->hardirq_enable_ip
);
2970 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
2971 curr
->hardirq_disable_event
, (void *)curr
->hardirq_disable_ip
,
2972 (void *)curr
->hardirq_disable_ip
);
2973 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
2974 curr
->softirq_enable_event
, (void *)curr
->softirq_enable_ip
,
2975 (void *)curr
->softirq_enable_ip
);
2976 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
2977 curr
->softirq_disable_event
, (void *)curr
->softirq_disable_ip
,
2978 (void *)curr
->softirq_disable_ip
);
2981 static int HARDIRQ_verbose(struct lock_class
*class)
2984 return class_filter(class);
2989 static int SOFTIRQ_verbose(struct lock_class
*class)
2992 return class_filter(class);
2997 #define STRICT_READ_CHECKS 1
2999 static int (*state_verbose_f
[])(struct lock_class
*class) = {
3000 #define LOCKDEP_STATE(__STATE) \
3002 #include "lockdep_states.h"
3003 #undef LOCKDEP_STATE
3006 static inline int state_verbose(enum lock_usage_bit bit
,
3007 struct lock_class
*class)
3009 return state_verbose_f
[bit
>> 2](class);
3012 typedef int (*check_usage_f
)(struct task_struct
*, struct held_lock
*,
3013 enum lock_usage_bit bit
, const char *name
);
3016 mark_lock_irq(struct task_struct
*curr
, struct held_lock
*this,
3017 enum lock_usage_bit new_bit
)
3019 int excl_bit
= exclusive_bit(new_bit
);
3020 int read
= new_bit
& LOCK_USAGE_READ_MASK
;
3021 int dir
= new_bit
& LOCK_USAGE_DIR_MASK
;
3024 * mark USED_IN has to look forwards -- to ensure no dependency
3025 * has ENABLED state, which would allow recursion deadlocks.
3027 * mark ENABLED has to look backwards -- to ensure no dependee
3028 * has USED_IN state, which, again, would allow recursion deadlocks.
3030 check_usage_f usage
= dir
?
3031 check_usage_backwards
: check_usage_forwards
;
3034 * Validate that this particular lock does not have conflicting
3037 if (!valid_state(curr
, this, new_bit
, excl_bit
))
3041 * Validate that the lock dependencies don't have conflicting usage
3044 if ((!read
|| !dir
|| STRICT_READ_CHECKS
) &&
3045 !usage(curr
, this, excl_bit
, state_name(new_bit
& ~LOCK_USAGE_READ_MASK
)))
3049 * Check for read in write conflicts
3052 if (!valid_state(curr
, this, new_bit
, excl_bit
+ LOCK_USAGE_READ_MASK
))
3055 if (STRICT_READ_CHECKS
&&
3056 !usage(curr
, this, excl_bit
+ LOCK_USAGE_READ_MASK
,
3057 state_name(new_bit
+ LOCK_USAGE_READ_MASK
)))
3061 if (state_verbose(new_bit
, hlock_class(this)))
3068 * Mark all held locks with a usage bit:
3071 mark_held_locks(struct task_struct
*curr
, enum lock_usage_bit base_bit
)
3073 struct held_lock
*hlock
;
3076 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
3077 enum lock_usage_bit hlock_bit
= base_bit
;
3078 hlock
= curr
->held_locks
+ i
;
3081 hlock_bit
+= LOCK_USAGE_READ_MASK
;
3083 BUG_ON(hlock_bit
>= LOCK_USAGE_STATES
);
3088 if (!mark_lock(curr
, hlock
, hlock_bit
))
3096 * Hardirqs will be enabled:
3098 static void __trace_hardirqs_on_caller(unsigned long ip
)
3100 struct task_struct
*curr
= current
;
3102 /* we'll do an OFF -> ON transition: */
3103 curr
->hardirqs_enabled
= 1;
3106 * We are going to turn hardirqs on, so set the
3107 * usage bit for all held locks:
3109 if (!mark_held_locks(curr
, LOCK_ENABLED_HARDIRQ
))
3112 * If we have softirqs enabled, then set the usage
3113 * bit for all held locks. (disabled hardirqs prevented
3114 * this bit from being set before)
3116 if (curr
->softirqs_enabled
)
3117 if (!mark_held_locks(curr
, LOCK_ENABLED_SOFTIRQ
))
3120 curr
->hardirq_enable_ip
= ip
;
3121 curr
->hardirq_enable_event
= ++curr
->irq_events
;
3122 debug_atomic_inc(hardirqs_on_events
);
3125 void lockdep_hardirqs_on(unsigned long ip
)
3127 if (unlikely(!debug_locks
|| current
->lockdep_recursion
))
3130 if (unlikely(current
->hardirqs_enabled
)) {
3132 * Neither irq nor preemption are disabled here
3133 * so this is racy by nature but losing one hit
3134 * in a stat is not a big deal.
3136 __debug_atomic_inc(redundant_hardirqs_on
);
3141 * We're enabling irqs and according to our state above irqs weren't
3142 * already enabled, yet we find the hardware thinks they are in fact
3143 * enabled.. someone messed up their IRQ state tracing.
3145 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3149 * See the fine text that goes along with this variable definition.
3151 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled
)))
3155 * Can't allow enabling interrupts while in an interrupt handler,
3156 * that's general bad form and such. Recursion, limited stack etc..
3158 if (DEBUG_LOCKS_WARN_ON(current
->hardirq_context
))
3161 current
->lockdep_recursion
= 1;
3162 __trace_hardirqs_on_caller(ip
);
3163 current
->lockdep_recursion
= 0;
3165 NOKPROBE_SYMBOL(lockdep_hardirqs_on
);
3168 * Hardirqs were disabled:
3170 void lockdep_hardirqs_off(unsigned long ip
)
3172 struct task_struct
*curr
= current
;
3174 if (unlikely(!debug_locks
|| current
->lockdep_recursion
))
3178 * So we're supposed to get called after you mask local IRQs, but for
3179 * some reason the hardware doesn't quite think you did a proper job.
3181 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3184 if (curr
->hardirqs_enabled
) {
3186 * We have done an ON -> OFF transition:
3188 curr
->hardirqs_enabled
= 0;
3189 curr
->hardirq_disable_ip
= ip
;
3190 curr
->hardirq_disable_event
= ++curr
->irq_events
;
3191 debug_atomic_inc(hardirqs_off_events
);
3193 debug_atomic_inc(redundant_hardirqs_off
);
3195 NOKPROBE_SYMBOL(lockdep_hardirqs_off
);
3198 * Softirqs will be enabled:
3200 void trace_softirqs_on(unsigned long ip
)
3202 struct task_struct
*curr
= current
;
3204 if (unlikely(!debug_locks
|| current
->lockdep_recursion
))
3208 * We fancy IRQs being disabled here, see softirq.c, avoids
3209 * funny state and nesting things.
3211 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3214 if (curr
->softirqs_enabled
) {
3215 debug_atomic_inc(redundant_softirqs_on
);
3219 current
->lockdep_recursion
= 1;
3221 * We'll do an OFF -> ON transition:
3223 curr
->softirqs_enabled
= 1;
3224 curr
->softirq_enable_ip
= ip
;
3225 curr
->softirq_enable_event
= ++curr
->irq_events
;
3226 debug_atomic_inc(softirqs_on_events
);
3228 * We are going to turn softirqs on, so set the
3229 * usage bit for all held locks, if hardirqs are
3232 if (curr
->hardirqs_enabled
)
3233 mark_held_locks(curr
, LOCK_ENABLED_SOFTIRQ
);
3234 current
->lockdep_recursion
= 0;
3238 * Softirqs were disabled:
3240 void trace_softirqs_off(unsigned long ip
)
3242 struct task_struct
*curr
= current
;
3244 if (unlikely(!debug_locks
|| current
->lockdep_recursion
))
3248 * We fancy IRQs being disabled here, see softirq.c
3250 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3253 if (curr
->softirqs_enabled
) {
3255 * We have done an ON -> OFF transition:
3257 curr
->softirqs_enabled
= 0;
3258 curr
->softirq_disable_ip
= ip
;
3259 curr
->softirq_disable_event
= ++curr
->irq_events
;
3260 debug_atomic_inc(softirqs_off_events
);
3262 * Whoops, we wanted softirqs off, so why aren't they?
3264 DEBUG_LOCKS_WARN_ON(!softirq_count());
3266 debug_atomic_inc(redundant_softirqs_off
);
3269 static int mark_irqflags(struct task_struct
*curr
, struct held_lock
*hlock
)
3272 * If non-trylock use in a hardirq or softirq context, then
3273 * mark the lock as used in these contexts:
3275 if (!hlock
->trylock
) {
3277 if (curr
->hardirq_context
)
3278 if (!mark_lock(curr
, hlock
,
3279 LOCK_USED_IN_HARDIRQ_READ
))
3281 if (curr
->softirq_context
)
3282 if (!mark_lock(curr
, hlock
,
3283 LOCK_USED_IN_SOFTIRQ_READ
))
3286 if (curr
->hardirq_context
)
3287 if (!mark_lock(curr
, hlock
, LOCK_USED_IN_HARDIRQ
))
3289 if (curr
->softirq_context
)
3290 if (!mark_lock(curr
, hlock
, LOCK_USED_IN_SOFTIRQ
))
3294 if (!hlock
->hardirqs_off
) {
3296 if (!mark_lock(curr
, hlock
,
3297 LOCK_ENABLED_HARDIRQ_READ
))
3299 if (curr
->softirqs_enabled
)
3300 if (!mark_lock(curr
, hlock
,
3301 LOCK_ENABLED_SOFTIRQ_READ
))
3304 if (!mark_lock(curr
, hlock
,
3305 LOCK_ENABLED_HARDIRQ
))
3307 if (curr
->softirqs_enabled
)
3308 if (!mark_lock(curr
, hlock
,
3309 LOCK_ENABLED_SOFTIRQ
))
3317 static inline unsigned int task_irq_context(struct task_struct
*task
)
3319 return 2 * !!task
->hardirq_context
+ !!task
->softirq_context
;
3322 static int separate_irq_context(struct task_struct
*curr
,
3323 struct held_lock
*hlock
)
3325 unsigned int depth
= curr
->lockdep_depth
;
3328 * Keep track of points where we cross into an interrupt context:
3331 struct held_lock
*prev_hlock
;
3333 prev_hlock
= curr
->held_locks
+ depth
-1;
3335 * If we cross into another context, reset the
3336 * hash key (this also prevents the checking and the
3337 * adding of the dependency to 'prev'):
3339 if (prev_hlock
->irq_context
!= hlock
->irq_context
)
3345 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3348 int mark_lock_irq(struct task_struct
*curr
, struct held_lock
*this,
3349 enum lock_usage_bit new_bit
)
3351 WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
3355 static inline int mark_irqflags(struct task_struct
*curr
,
3356 struct held_lock
*hlock
)
3361 static inline unsigned int task_irq_context(struct task_struct
*task
)
3366 static inline int separate_irq_context(struct task_struct
*curr
,
3367 struct held_lock
*hlock
)
3372 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3375 * Mark a lock with a usage bit, and validate the state transition:
3377 static int mark_lock(struct task_struct
*curr
, struct held_lock
*this,
3378 enum lock_usage_bit new_bit
)
3380 unsigned int new_mask
= 1 << new_bit
, ret
= 1;
3383 * If already set then do not dirty the cacheline,
3384 * nor do any checks:
3386 if (likely(hlock_class(this)->usage_mask
& new_mask
))
3392 * Make sure we didn't race:
3394 if (unlikely(hlock_class(this)->usage_mask
& new_mask
)) {
3399 hlock_class(this)->usage_mask
|= new_mask
;
3401 if (!save_trace(hlock_class(this)->usage_traces
+ new_bit
))
3405 #define LOCKDEP_STATE(__STATE) \
3406 case LOCK_USED_IN_##__STATE: \
3407 case LOCK_USED_IN_##__STATE##_READ: \
3408 case LOCK_ENABLED_##__STATE: \
3409 case LOCK_ENABLED_##__STATE##_READ:
3410 #include "lockdep_states.h"
3411 #undef LOCKDEP_STATE
3412 ret
= mark_lock_irq(curr
, this, new_bit
);
3417 debug_atomic_dec(nr_unused_locks
);
3420 if (!debug_locks_off_graph_unlock())
3429 * We must printk outside of the graph_lock:
3432 printk("\nmarked lock as {%s}:\n", usage_str
[new_bit
]);
3434 print_irqtrace_events(curr
);
3442 * Initialize a lock instance's lock-class mapping info:
3444 void lockdep_init_map(struct lockdep_map
*lock
, const char *name
,
3445 struct lock_class_key
*key
, int subclass
)
3449 for (i
= 0; i
< NR_LOCKDEP_CACHING_CLASSES
; i
++)
3450 lock
->class_cache
[i
] = NULL
;
3452 #ifdef CONFIG_LOCK_STAT
3453 lock
->cpu
= raw_smp_processor_id();
3457 * Can't be having no nameless bastards around this place!
3459 if (DEBUG_LOCKS_WARN_ON(!name
)) {
3460 lock
->name
= "NULL";
3467 * No key, no joy, we need to hash something.
3469 if (DEBUG_LOCKS_WARN_ON(!key
))
3472 * Sanity check, the lock-class key must either have been allocated
3473 * statically or must have been registered as a dynamic key.
3475 if (!static_obj(key
) && !is_dynamic_key(key
)) {
3477 printk(KERN_ERR
"BUG: key %px has not been registered!\n", key
);
3478 DEBUG_LOCKS_WARN_ON(1);
3483 if (unlikely(!debug_locks
))
3487 unsigned long flags
;
3489 if (DEBUG_LOCKS_WARN_ON(current
->lockdep_recursion
))
3492 raw_local_irq_save(flags
);
3493 current
->lockdep_recursion
= 1;
3494 register_lock_class(lock
, subclass
, 1);
3495 current
->lockdep_recursion
= 0;
3496 raw_local_irq_restore(flags
);
3499 EXPORT_SYMBOL_GPL(lockdep_init_map
);
3501 struct lock_class_key __lockdep_no_validate__
;
3502 EXPORT_SYMBOL_GPL(__lockdep_no_validate__
);
3505 print_lock_nested_lock_not_held(struct task_struct
*curr
,
3506 struct held_lock
*hlock
,
3509 if (!debug_locks_off())
3511 if (debug_locks_silent
)
3515 pr_warn("==================================\n");
3516 pr_warn("WARNING: Nested lock was not taken\n");
3517 print_kernel_ident();
3518 pr_warn("----------------------------------\n");
3520 pr_warn("%s/%d is trying to lock:\n", curr
->comm
, task_pid_nr(curr
));
3523 pr_warn("\nbut this task is not holding:\n");
3524 pr_warn("%s\n", hlock
->nest_lock
->name
);
3526 pr_warn("\nstack backtrace:\n");
3529 pr_warn("\nother info that might help us debug this:\n");
3530 lockdep_print_held_locks(curr
);
3532 pr_warn("\nstack backtrace:\n");
3538 static int __lock_is_held(const struct lockdep_map
*lock
, int read
);
3541 * This gets called for every mutex_lock*()/spin_lock*() operation.
3542 * We maintain the dependency maps and validate the locking attempt:
3544 * The callers must make sure that IRQs are disabled before calling it,
3545 * otherwise we could get an interrupt which would want to take locks,
3546 * which would end up in lockdep again.
3548 static int __lock_acquire(struct lockdep_map
*lock
, unsigned int subclass
,
3549 int trylock
, int read
, int check
, int hardirqs_off
,
3550 struct lockdep_map
*nest_lock
, unsigned long ip
,
3551 int references
, int pin_count
)
3553 struct task_struct
*curr
= current
;
3554 struct lock_class
*class = NULL
;
3555 struct held_lock
*hlock
;
3561 if (unlikely(!debug_locks
))
3564 if (!prove_locking
|| lock
->key
== &__lockdep_no_validate__
)
3567 if (subclass
< NR_LOCKDEP_CACHING_CLASSES
)
3568 class = lock
->class_cache
[subclass
];
3572 if (unlikely(!class)) {
3573 class = register_lock_class(lock
, subclass
, 0);
3578 debug_class_ops_inc(class);
3580 if (very_verbose(class)) {
3581 printk("\nacquire class [%px] %s", class->key
, class->name
);
3582 if (class->name_version
> 1)
3583 printk(KERN_CONT
"#%d", class->name_version
);
3584 printk(KERN_CONT
"\n");
3589 * Add the lock to the list of currently held locks.
3590 * (we dont increase the depth just yet, up until the
3591 * dependency checks are done)
3593 depth
= curr
->lockdep_depth
;
3595 * Ran out of static storage for our per-task lock stack again have we?
3597 if (DEBUG_LOCKS_WARN_ON(depth
>= MAX_LOCK_DEPTH
))
3600 class_idx
= class - lock_classes
+ 1;
3603 hlock
= curr
->held_locks
+ depth
- 1;
3604 if (hlock
->class_idx
== class_idx
&& nest_lock
) {
3605 if (hlock
->references
) {
3607 * Check: unsigned int references:12, overflow.
3609 if (DEBUG_LOCKS_WARN_ON(hlock
->references
== (1 << 12)-1))
3612 hlock
->references
++;
3614 hlock
->references
= 2;
3621 hlock
= curr
->held_locks
+ depth
;
3623 * Plain impossible, we just registered it and checked it weren't no
3624 * NULL like.. I bet this mushroom I ate was good!
3626 if (DEBUG_LOCKS_WARN_ON(!class))
3628 hlock
->class_idx
= class_idx
;
3629 hlock
->acquire_ip
= ip
;
3630 hlock
->instance
= lock
;
3631 hlock
->nest_lock
= nest_lock
;
3632 hlock
->irq_context
= task_irq_context(curr
);
3633 hlock
->trylock
= trylock
;
3635 hlock
->check
= check
;
3636 hlock
->hardirqs_off
= !!hardirqs_off
;
3637 hlock
->references
= references
;
3638 #ifdef CONFIG_LOCK_STAT
3639 hlock
->waittime_stamp
= 0;
3640 hlock
->holdtime_stamp
= lockstat_clock();
3642 hlock
->pin_count
= pin_count
;
3644 if (check
&& !mark_irqflags(curr
, hlock
))
3647 /* mark it as used: */
3648 if (!mark_lock(curr
, hlock
, LOCK_USED
))
3652 * Calculate the chain hash: it's the combined hash of all the
3653 * lock keys along the dependency chain. We save the hash value
3654 * at every step so that we can get the current hash easily
3655 * after unlock. The chain hash is then used to cache dependency
3658 * The 'key ID' is what is the most compact key value to drive
3659 * the hash, not class->key.
3662 * Whoops, we did it again.. ran straight out of our static allocation.
3664 if (DEBUG_LOCKS_WARN_ON(class_idx
> MAX_LOCKDEP_KEYS
))
3667 chain_key
= curr
->curr_chain_key
;
3670 * How can we have a chain hash when we ain't got no keys?!
3672 if (DEBUG_LOCKS_WARN_ON(chain_key
!= 0))
3677 hlock
->prev_chain_key
= chain_key
;
3678 if (separate_irq_context(curr
, hlock
)) {
3682 chain_key
= iterate_chain_key(chain_key
, class_idx
);
3684 if (nest_lock
&& !__lock_is_held(nest_lock
, -1))
3685 return print_lock_nested_lock_not_held(curr
, hlock
, ip
);
3687 if (!debug_locks_silent
) {
3688 WARN_ON_ONCE(depth
&& !hlock_class(hlock
- 1)->key
);
3689 WARN_ON_ONCE(!hlock_class(hlock
)->key
);
3692 if (!validate_chain(curr
, lock
, hlock
, chain_head
, chain_key
))
3695 curr
->curr_chain_key
= chain_key
;
3696 curr
->lockdep_depth
++;
3697 check_chain_key(curr
);
3698 #ifdef CONFIG_DEBUG_LOCKDEP
3699 if (unlikely(!debug_locks
))
3702 if (unlikely(curr
->lockdep_depth
>= MAX_LOCK_DEPTH
)) {
3704 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3705 printk(KERN_DEBUG
"depth: %i max: %lu!\n",
3706 curr
->lockdep_depth
, MAX_LOCK_DEPTH
);
3708 lockdep_print_held_locks(current
);
3709 debug_show_all_locks();
3715 if (unlikely(curr
->lockdep_depth
> max_lockdep_depth
))
3716 max_lockdep_depth
= curr
->lockdep_depth
;
3722 print_unlock_imbalance_bug(struct task_struct
*curr
, struct lockdep_map
*lock
,
3725 if (!debug_locks_off())
3727 if (debug_locks_silent
)
3731 pr_warn("=====================================\n");
3732 pr_warn("WARNING: bad unlock balance detected!\n");
3733 print_kernel_ident();
3734 pr_warn("-------------------------------------\n");
3735 pr_warn("%s/%d is trying to release lock (",
3736 curr
->comm
, task_pid_nr(curr
));
3737 print_lockdep_cache(lock
);
3740 pr_warn("but there are no more locks to release!\n");
3741 pr_warn("\nother info that might help us debug this:\n");
3742 lockdep_print_held_locks(curr
);
3744 pr_warn("\nstack backtrace:\n");
3750 static int match_held_lock(const struct held_lock
*hlock
,
3751 const struct lockdep_map
*lock
)
3753 if (hlock
->instance
== lock
)
3756 if (hlock
->references
) {
3757 const struct lock_class
*class = lock
->class_cache
[0];
3760 class = look_up_lock_class(lock
, 0);
3763 * If look_up_lock_class() failed to find a class, we're trying
3764 * to test if we hold a lock that has never yet been acquired.
3765 * Clearly if the lock hasn't been acquired _ever_, we're not
3766 * holding it either, so report failure.
3772 * References, but not a lock we're actually ref-counting?
3773 * State got messed up, follow the sites that change ->references
3774 * and try to make sense of it.
3776 if (DEBUG_LOCKS_WARN_ON(!hlock
->nest_lock
))
3779 if (hlock
->class_idx
== class - lock_classes
+ 1)
3786 /* @depth must not be zero */
3787 static struct held_lock
*find_held_lock(struct task_struct
*curr
,
3788 struct lockdep_map
*lock
,
3789 unsigned int depth
, int *idx
)
3791 struct held_lock
*ret
, *hlock
, *prev_hlock
;
3795 hlock
= curr
->held_locks
+ i
;
3797 if (match_held_lock(hlock
, lock
))
3801 for (i
--, prev_hlock
= hlock
--;
3803 i
--, prev_hlock
= hlock
--) {
3805 * We must not cross into another context:
3807 if (prev_hlock
->irq_context
!= hlock
->irq_context
) {
3811 if (match_held_lock(hlock
, lock
)) {
3822 static int reacquire_held_locks(struct task_struct
*curr
, unsigned int depth
,
3825 struct held_lock
*hlock
;
3827 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3830 for (hlock
= curr
->held_locks
+ idx
; idx
< depth
; idx
++, hlock
++) {
3831 if (!__lock_acquire(hlock
->instance
,
3832 hlock_class(hlock
)->subclass
,
3834 hlock
->read
, hlock
->check
,
3835 hlock
->hardirqs_off
,
3836 hlock
->nest_lock
, hlock
->acquire_ip
,
3837 hlock
->references
, hlock
->pin_count
))
3844 __lock_set_class(struct lockdep_map
*lock
, const char *name
,
3845 struct lock_class_key
*key
, unsigned int subclass
,
3848 struct task_struct
*curr
= current
;
3849 struct held_lock
*hlock
;
3850 struct lock_class
*class;
3854 if (unlikely(!debug_locks
))
3857 depth
= curr
->lockdep_depth
;
3859 * This function is about (re)setting the class of a held lock,
3860 * yet we're not actually holding any locks. Naughty user!
3862 if (DEBUG_LOCKS_WARN_ON(!depth
))
3865 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
3867 return print_unlock_imbalance_bug(curr
, lock
, ip
);
3869 lockdep_init_map(lock
, name
, key
, 0);
3870 class = register_lock_class(lock
, subclass
, 0);
3871 hlock
->class_idx
= class - lock_classes
+ 1;
3873 curr
->lockdep_depth
= i
;
3874 curr
->curr_chain_key
= hlock
->prev_chain_key
;
3876 if (reacquire_held_locks(curr
, depth
, i
))
3880 * I took it apart and put it back together again, except now I have
3881 * these 'spare' parts.. where shall I put them.
3883 if (DEBUG_LOCKS_WARN_ON(curr
->lockdep_depth
!= depth
))
3888 static int __lock_downgrade(struct lockdep_map
*lock
, unsigned long ip
)
3890 struct task_struct
*curr
= current
;
3891 struct held_lock
*hlock
;
3895 if (unlikely(!debug_locks
))
3898 depth
= curr
->lockdep_depth
;
3900 * This function is about (re)setting the class of a held lock,
3901 * yet we're not actually holding any locks. Naughty user!
3903 if (DEBUG_LOCKS_WARN_ON(!depth
))
3906 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
3908 return print_unlock_imbalance_bug(curr
, lock
, ip
);
3910 curr
->lockdep_depth
= i
;
3911 curr
->curr_chain_key
= hlock
->prev_chain_key
;
3913 WARN(hlock
->read
, "downgrading a read lock");
3915 hlock
->acquire_ip
= ip
;
3917 if (reacquire_held_locks(curr
, depth
, i
))
3921 * I took it apart and put it back together again, except now I have
3922 * these 'spare' parts.. where shall I put them.
3924 if (DEBUG_LOCKS_WARN_ON(curr
->lockdep_depth
!= depth
))
3930 * Remove the lock to the list of currently held locks - this gets
3931 * called on mutex_unlock()/spin_unlock*() (or on a failed
3932 * mutex_lock_interruptible()).
3934 * @nested is an hysterical artifact, needs a tree wide cleanup.
3937 __lock_release(struct lockdep_map
*lock
, int nested
, unsigned long ip
)
3939 struct task_struct
*curr
= current
;
3940 struct held_lock
*hlock
;
3944 if (unlikely(!debug_locks
))
3947 depth
= curr
->lockdep_depth
;
3949 * So we're all set to release this lock.. wait what lock? We don't
3950 * own any locks, you've been drinking again?
3952 if (DEBUG_LOCKS_WARN_ON(depth
<= 0))
3953 return print_unlock_imbalance_bug(curr
, lock
, ip
);
3956 * Check whether the lock exists in the current stack
3959 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
3961 return print_unlock_imbalance_bug(curr
, lock
, ip
);
3963 if (hlock
->instance
== lock
)
3964 lock_release_holdtime(hlock
);
3966 WARN(hlock
->pin_count
, "releasing a pinned lock\n");
3968 if (hlock
->references
) {
3969 hlock
->references
--;
3970 if (hlock
->references
) {
3972 * We had, and after removing one, still have
3973 * references, the current lock stack is still
3974 * valid. We're done!
3981 * We have the right lock to unlock, 'hlock' points to it.
3982 * Now we remove it from the stack, and add back the other
3983 * entries (if any), recalculating the hash along the way:
3986 curr
->lockdep_depth
= i
;
3987 curr
->curr_chain_key
= hlock
->prev_chain_key
;
3990 * The most likely case is when the unlock is on the innermost
3991 * lock. In this case, we are done!
3996 if (reacquire_held_locks(curr
, depth
, i
+ 1))
4000 * We had N bottles of beer on the wall, we drank one, but now
4001 * there's not N-1 bottles of beer left on the wall...
4003 DEBUG_LOCKS_WARN_ON(curr
->lockdep_depth
!= depth
-1);
4006 * Since reacquire_held_locks() would have called check_chain_key()
4007 * indirectly via __lock_acquire(), we don't need to do it again
4013 static nokprobe_inline
4014 int __lock_is_held(const struct lockdep_map
*lock
, int read
)
4016 struct task_struct
*curr
= current
;
4019 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4020 struct held_lock
*hlock
= curr
->held_locks
+ i
;
4022 if (match_held_lock(hlock
, lock
)) {
4023 if (read
== -1 || hlock
->read
== read
)
4033 static struct pin_cookie
__lock_pin_lock(struct lockdep_map
*lock
)
4035 struct pin_cookie cookie
= NIL_COOKIE
;
4036 struct task_struct
*curr
= current
;
4039 if (unlikely(!debug_locks
))
4042 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4043 struct held_lock
*hlock
= curr
->held_locks
+ i
;
4045 if (match_held_lock(hlock
, lock
)) {
4047 * Grab 16bits of randomness; this is sufficient to not
4048 * be guessable and still allows some pin nesting in
4049 * our u32 pin_count.
4051 cookie
.val
= 1 + (prandom_u32() >> 16);
4052 hlock
->pin_count
+= cookie
.val
;
4057 WARN(1, "pinning an unheld lock\n");
4061 static void __lock_repin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
4063 struct task_struct
*curr
= current
;
4066 if (unlikely(!debug_locks
))
4069 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4070 struct held_lock
*hlock
= curr
->held_locks
+ i
;
4072 if (match_held_lock(hlock
, lock
)) {
4073 hlock
->pin_count
+= cookie
.val
;
4078 WARN(1, "pinning an unheld lock\n");
4081 static void __lock_unpin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
4083 struct task_struct
*curr
= current
;
4086 if (unlikely(!debug_locks
))
4089 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
4090 struct held_lock
*hlock
= curr
->held_locks
+ i
;
4092 if (match_held_lock(hlock
, lock
)) {
4093 if (WARN(!hlock
->pin_count
, "unpinning an unpinned lock\n"))
4096 hlock
->pin_count
-= cookie
.val
;
4098 if (WARN((int)hlock
->pin_count
< 0, "pin count corrupted\n"))
4099 hlock
->pin_count
= 0;
4105 WARN(1, "unpinning an unheld lock\n");
4109 * Check whether we follow the irq-flags state precisely:
4111 static void check_flags(unsigned long flags
)
4113 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
4114 defined(CONFIG_TRACE_IRQFLAGS)
4118 if (irqs_disabled_flags(flags
)) {
4119 if (DEBUG_LOCKS_WARN_ON(current
->hardirqs_enabled
)) {
4120 printk("possible reason: unannotated irqs-off.\n");
4123 if (DEBUG_LOCKS_WARN_ON(!current
->hardirqs_enabled
)) {
4124 printk("possible reason: unannotated irqs-on.\n");
4129 * We dont accurately track softirq state in e.g.
4130 * hardirq contexts (such as on 4KSTACKS), so only
4131 * check if not in hardirq contexts:
4133 if (!hardirq_count()) {
4134 if (softirq_count()) {
4135 /* like the above, but with softirqs */
4136 DEBUG_LOCKS_WARN_ON(current
->softirqs_enabled
);
4138 /* lick the above, does it taste good? */
4139 DEBUG_LOCKS_WARN_ON(!current
->softirqs_enabled
);
4144 print_irqtrace_events(current
);
4148 void lock_set_class(struct lockdep_map
*lock
, const char *name
,
4149 struct lock_class_key
*key
, unsigned int subclass
,
4152 unsigned long flags
;
4154 if (unlikely(current
->lockdep_recursion
))
4157 raw_local_irq_save(flags
);
4158 current
->lockdep_recursion
= 1;
4160 if (__lock_set_class(lock
, name
, key
, subclass
, ip
))
4161 check_chain_key(current
);
4162 current
->lockdep_recursion
= 0;
4163 raw_local_irq_restore(flags
);
4165 EXPORT_SYMBOL_GPL(lock_set_class
);
4167 void lock_downgrade(struct lockdep_map
*lock
, unsigned long ip
)
4169 unsigned long flags
;
4171 if (unlikely(current
->lockdep_recursion
))
4174 raw_local_irq_save(flags
);
4175 current
->lockdep_recursion
= 1;
4177 if (__lock_downgrade(lock
, ip
))
4178 check_chain_key(current
);
4179 current
->lockdep_recursion
= 0;
4180 raw_local_irq_restore(flags
);
4182 EXPORT_SYMBOL_GPL(lock_downgrade
);
4185 * We are not always called with irqs disabled - do that here,
4186 * and also avoid lockdep recursion:
4188 void lock_acquire(struct lockdep_map
*lock
, unsigned int subclass
,
4189 int trylock
, int read
, int check
,
4190 struct lockdep_map
*nest_lock
, unsigned long ip
)
4192 unsigned long flags
;
4194 if (unlikely(current
->lockdep_recursion
))
4197 raw_local_irq_save(flags
);
4200 current
->lockdep_recursion
= 1;
4201 trace_lock_acquire(lock
, subclass
, trylock
, read
, check
, nest_lock
, ip
);
4202 __lock_acquire(lock
, subclass
, trylock
, read
, check
,
4203 irqs_disabled_flags(flags
), nest_lock
, ip
, 0, 0);
4204 current
->lockdep_recursion
= 0;
4205 raw_local_irq_restore(flags
);
4207 EXPORT_SYMBOL_GPL(lock_acquire
);
4209 void lock_release(struct lockdep_map
*lock
, int nested
,
4212 unsigned long flags
;
4214 if (unlikely(current
->lockdep_recursion
))
4217 raw_local_irq_save(flags
);
4219 current
->lockdep_recursion
= 1;
4220 trace_lock_release(lock
, ip
);
4221 if (__lock_release(lock
, nested
, ip
))
4222 check_chain_key(current
);
4223 current
->lockdep_recursion
= 0;
4224 raw_local_irq_restore(flags
);
4226 EXPORT_SYMBOL_GPL(lock_release
);
4228 int lock_is_held_type(const struct lockdep_map
*lock
, int read
)
4230 unsigned long flags
;
4233 if (unlikely(current
->lockdep_recursion
))
4234 return 1; /* avoid false negative lockdep_assert_held() */
4236 raw_local_irq_save(flags
);
4239 current
->lockdep_recursion
= 1;
4240 ret
= __lock_is_held(lock
, read
);
4241 current
->lockdep_recursion
= 0;
4242 raw_local_irq_restore(flags
);
4246 EXPORT_SYMBOL_GPL(lock_is_held_type
);
4247 NOKPROBE_SYMBOL(lock_is_held_type
);
4249 struct pin_cookie
lock_pin_lock(struct lockdep_map
*lock
)
4251 struct pin_cookie cookie
= NIL_COOKIE
;
4252 unsigned long flags
;
4254 if (unlikely(current
->lockdep_recursion
))
4257 raw_local_irq_save(flags
);
4260 current
->lockdep_recursion
= 1;
4261 cookie
= __lock_pin_lock(lock
);
4262 current
->lockdep_recursion
= 0;
4263 raw_local_irq_restore(flags
);
4267 EXPORT_SYMBOL_GPL(lock_pin_lock
);
4269 void lock_repin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
4271 unsigned long flags
;
4273 if (unlikely(current
->lockdep_recursion
))
4276 raw_local_irq_save(flags
);
4279 current
->lockdep_recursion
= 1;
4280 __lock_repin_lock(lock
, cookie
);
4281 current
->lockdep_recursion
= 0;
4282 raw_local_irq_restore(flags
);
4284 EXPORT_SYMBOL_GPL(lock_repin_lock
);
4286 void lock_unpin_lock(struct lockdep_map
*lock
, struct pin_cookie cookie
)
4288 unsigned long flags
;
4290 if (unlikely(current
->lockdep_recursion
))
4293 raw_local_irq_save(flags
);
4296 current
->lockdep_recursion
= 1;
4297 __lock_unpin_lock(lock
, cookie
);
4298 current
->lockdep_recursion
= 0;
4299 raw_local_irq_restore(flags
);
4301 EXPORT_SYMBOL_GPL(lock_unpin_lock
);
4303 #ifdef CONFIG_LOCK_STAT
4305 print_lock_contention_bug(struct task_struct
*curr
, struct lockdep_map
*lock
,
4308 if (!debug_locks_off())
4310 if (debug_locks_silent
)
4314 pr_warn("=================================\n");
4315 pr_warn("WARNING: bad contention detected!\n");
4316 print_kernel_ident();
4317 pr_warn("---------------------------------\n");
4318 pr_warn("%s/%d is trying to contend lock (",
4319 curr
->comm
, task_pid_nr(curr
));
4320 print_lockdep_cache(lock
);
4323 pr_warn("but there are no locks held!\n");
4324 pr_warn("\nother info that might help us debug this:\n");
4325 lockdep_print_held_locks(curr
);
4327 pr_warn("\nstack backtrace:\n");
4334 __lock_contended(struct lockdep_map
*lock
, unsigned long ip
)
4336 struct task_struct
*curr
= current
;
4337 struct held_lock
*hlock
;
4338 struct lock_class_stats
*stats
;
4340 int i
, contention_point
, contending_point
;
4342 depth
= curr
->lockdep_depth
;
4344 * Whee, we contended on this lock, except it seems we're not
4345 * actually trying to acquire anything much at all..
4347 if (DEBUG_LOCKS_WARN_ON(!depth
))
4350 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
4352 print_lock_contention_bug(curr
, lock
, ip
);
4356 if (hlock
->instance
!= lock
)
4359 hlock
->waittime_stamp
= lockstat_clock();
4361 contention_point
= lock_point(hlock_class(hlock
)->contention_point
, ip
);
4362 contending_point
= lock_point(hlock_class(hlock
)->contending_point
,
4365 stats
= get_lock_stats(hlock_class(hlock
));
4366 if (contention_point
< LOCKSTAT_POINTS
)
4367 stats
->contention_point
[contention_point
]++;
4368 if (contending_point
< LOCKSTAT_POINTS
)
4369 stats
->contending_point
[contending_point
]++;
4370 if (lock
->cpu
!= smp_processor_id())
4371 stats
->bounces
[bounce_contended
+ !!hlock
->read
]++;
4375 __lock_acquired(struct lockdep_map
*lock
, unsigned long ip
)
4377 struct task_struct
*curr
= current
;
4378 struct held_lock
*hlock
;
4379 struct lock_class_stats
*stats
;
4381 u64 now
, waittime
= 0;
4384 depth
= curr
->lockdep_depth
;
4386 * Yay, we acquired ownership of this lock we didn't try to
4387 * acquire, how the heck did that happen?
4389 if (DEBUG_LOCKS_WARN_ON(!depth
))
4392 hlock
= find_held_lock(curr
, lock
, depth
, &i
);
4394 print_lock_contention_bug(curr
, lock
, _RET_IP_
);
4398 if (hlock
->instance
!= lock
)
4401 cpu
= smp_processor_id();
4402 if (hlock
->waittime_stamp
) {
4403 now
= lockstat_clock();
4404 waittime
= now
- hlock
->waittime_stamp
;
4405 hlock
->holdtime_stamp
= now
;
4408 trace_lock_acquired(lock
, ip
);
4410 stats
= get_lock_stats(hlock_class(hlock
));
4413 lock_time_inc(&stats
->read_waittime
, waittime
);
4415 lock_time_inc(&stats
->write_waittime
, waittime
);
4417 if (lock
->cpu
!= cpu
)
4418 stats
->bounces
[bounce_acquired
+ !!hlock
->read
]++;
4424 void lock_contended(struct lockdep_map
*lock
, unsigned long ip
)
4426 unsigned long flags
;
4428 if (unlikely(!lock_stat
|| !debug_locks
))
4431 if (unlikely(current
->lockdep_recursion
))
4434 raw_local_irq_save(flags
);
4436 current
->lockdep_recursion
= 1;
4437 trace_lock_contended(lock
, ip
);
4438 __lock_contended(lock
, ip
);
4439 current
->lockdep_recursion
= 0;
4440 raw_local_irq_restore(flags
);
4442 EXPORT_SYMBOL_GPL(lock_contended
);
4444 void lock_acquired(struct lockdep_map
*lock
, unsigned long ip
)
4446 unsigned long flags
;
4448 if (unlikely(!lock_stat
|| !debug_locks
))
4451 if (unlikely(current
->lockdep_recursion
))
4454 raw_local_irq_save(flags
);
4456 current
->lockdep_recursion
= 1;
4457 __lock_acquired(lock
, ip
);
4458 current
->lockdep_recursion
= 0;
4459 raw_local_irq_restore(flags
);
4461 EXPORT_SYMBOL_GPL(lock_acquired
);
4465 * Used by the testsuite, sanitize the validator state
4466 * after a simulated failure:
4469 void lockdep_reset(void)
4471 unsigned long flags
;
4474 raw_local_irq_save(flags
);
4475 current
->curr_chain_key
= 0;
4476 current
->lockdep_depth
= 0;
4477 current
->lockdep_recursion
= 0;
4478 memset(current
->held_locks
, 0, MAX_LOCK_DEPTH
*sizeof(struct held_lock
));
4479 nr_hardirq_chains
= 0;
4480 nr_softirq_chains
= 0;
4481 nr_process_chains
= 0;
4483 for (i
= 0; i
< CHAINHASH_SIZE
; i
++)
4484 INIT_HLIST_HEAD(chainhash_table
+ i
);
4485 raw_local_irq_restore(flags
);
4488 /* Remove a class from a lock chain. Must be called with the graph lock held. */
4489 static void remove_class_from_lock_chain(struct pending_free
*pf
,
4490 struct lock_chain
*chain
,
4491 struct lock_class
*class)
4493 #ifdef CONFIG_PROVE_LOCKING
4494 struct lock_chain
*new_chain
;
4498 for (i
= chain
->base
; i
< chain
->base
+ chain
->depth
; i
++) {
4499 if (chain_hlocks
[i
] != class - lock_classes
)
4501 /* The code below leaks one chain_hlock[] entry. */
4502 if (--chain
->depth
> 0) {
4503 memmove(&chain_hlocks
[i
], &chain_hlocks
[i
+ 1],
4504 (chain
->base
+ chain
->depth
- i
) *
4505 sizeof(chain_hlocks
[0]));
4508 * Each lock class occurs at most once in a lock chain so once
4509 * we found a match we can break out of this loop.
4513 /* Since the chain has not been modified, return. */
4518 for (i
= chain
->base
; i
< chain
->base
+ chain
->depth
; i
++)
4519 chain_key
= iterate_chain_key(chain_key
, chain_hlocks
[i
] + 1);
4520 if (chain
->depth
&& chain
->chain_key
== chain_key
)
4522 /* Overwrite the chain key for concurrent RCU readers. */
4523 WRITE_ONCE(chain
->chain_key
, chain_key
);
4525 * Note: calling hlist_del_rcu() from inside a
4526 * hlist_for_each_entry_rcu() loop is safe.
4528 hlist_del_rcu(&chain
->entry
);
4529 __set_bit(chain
- lock_chains
, pf
->lock_chains_being_freed
);
4530 if (chain
->depth
== 0)
4533 * If the modified lock chain matches an existing lock chain, drop
4534 * the modified lock chain.
4536 if (lookup_chain_cache(chain_key
))
4538 new_chain
= alloc_lock_chain();
4539 if (WARN_ON_ONCE(!new_chain
)) {
4543 *new_chain
= *chain
;
4544 hlist_add_head_rcu(&new_chain
->entry
, chainhashentry(chain_key
));
4548 /* Must be called with the graph lock held. */
4549 static void remove_class_from_lock_chains(struct pending_free
*pf
,
4550 struct lock_class
*class)
4552 struct lock_chain
*chain
;
4553 struct hlist_head
*head
;
4556 for (i
= 0; i
< ARRAY_SIZE(chainhash_table
); i
++) {
4557 head
= chainhash_table
+ i
;
4558 hlist_for_each_entry_rcu(chain
, head
, entry
) {
4559 remove_class_from_lock_chain(pf
, chain
, class);
4565 * Remove all references to a lock class. The caller must hold the graph lock.
4567 static void zap_class(struct pending_free
*pf
, struct lock_class
*class)
4569 struct lock_list
*entry
;
4572 WARN_ON_ONCE(!class->key
);
4575 * Remove all dependencies this lock is
4578 for_each_set_bit(i
, list_entries_in_use
, ARRAY_SIZE(list_entries
)) {
4579 entry
= list_entries
+ i
;
4580 if (entry
->class != class && entry
->links_to
!= class)
4582 __clear_bit(i
, list_entries_in_use
);
4584 list_del_rcu(&entry
->entry
);
4586 if (list_empty(&class->locks_after
) &&
4587 list_empty(&class->locks_before
)) {
4588 list_move_tail(&class->lock_entry
, &pf
->zapped
);
4589 hlist_del_rcu(&class->hash_entry
);
4590 WRITE_ONCE(class->key
, NULL
);
4591 WRITE_ONCE(class->name
, NULL
);
4594 WARN_ONCE(true, "%s() failed for class %s\n", __func__
,
4598 remove_class_from_lock_chains(pf
, class);
4601 static void reinit_class(struct lock_class
*class)
4603 void *const p
= class;
4604 const unsigned int offset
= offsetof(struct lock_class
, key
);
4606 WARN_ON_ONCE(!class->lock_entry
.next
);
4607 WARN_ON_ONCE(!list_empty(&class->locks_after
));
4608 WARN_ON_ONCE(!list_empty(&class->locks_before
));
4609 memset(p
+ offset
, 0, sizeof(*class) - offset
);
4610 WARN_ON_ONCE(!class->lock_entry
.next
);
4611 WARN_ON_ONCE(!list_empty(&class->locks_after
));
4612 WARN_ON_ONCE(!list_empty(&class->locks_before
));
4615 static inline int within(const void *addr
, void *start
, unsigned long size
)
4617 return addr
>= start
&& addr
< start
+ size
;
4620 static bool inside_selftest(void)
4622 return current
== lockdep_selftest_task_struct
;
4625 /* The caller must hold the graph lock. */
4626 static struct pending_free
*get_pending_free(void)
4628 return delayed_free
.pf
+ delayed_free
.index
;
4631 static void free_zapped_rcu(struct rcu_head
*cb
);
4634 * Schedule an RCU callback if no RCU callback is pending. Must be called with
4635 * the graph lock held.
4637 static void call_rcu_zapped(struct pending_free
*pf
)
4639 WARN_ON_ONCE(inside_selftest());
4641 if (list_empty(&pf
->zapped
))
4644 if (delayed_free
.scheduled
)
4647 delayed_free
.scheduled
= true;
4649 WARN_ON_ONCE(delayed_free
.pf
+ delayed_free
.index
!= pf
);
4650 delayed_free
.index
^= 1;
4652 call_rcu(&delayed_free
.rcu_head
, free_zapped_rcu
);
4655 /* The caller must hold the graph lock. May be called from RCU context. */
4656 static void __free_zapped_classes(struct pending_free
*pf
)
4658 struct lock_class
*class;
4660 check_data_structures();
4662 list_for_each_entry(class, &pf
->zapped
, lock_entry
)
4663 reinit_class(class);
4665 list_splice_init(&pf
->zapped
, &free_lock_classes
);
4667 #ifdef CONFIG_PROVE_LOCKING
4668 bitmap_andnot(lock_chains_in_use
, lock_chains_in_use
,
4669 pf
->lock_chains_being_freed
, ARRAY_SIZE(lock_chains
));
4670 bitmap_clear(pf
->lock_chains_being_freed
, 0, ARRAY_SIZE(lock_chains
));
4674 static void free_zapped_rcu(struct rcu_head
*ch
)
4676 struct pending_free
*pf
;
4677 unsigned long flags
;
4679 if (WARN_ON_ONCE(ch
!= &delayed_free
.rcu_head
))
4682 raw_local_irq_save(flags
);
4687 pf
= delayed_free
.pf
+ (delayed_free
.index
^ 1);
4688 __free_zapped_classes(pf
);
4689 delayed_free
.scheduled
= false;
4692 * If there's anything on the open list, close and start a new callback.
4694 call_rcu_zapped(delayed_free
.pf
+ delayed_free
.index
);
4698 raw_local_irq_restore(flags
);
4702 * Remove all lock classes from the class hash table and from the
4703 * all_lock_classes list whose key or name is in the address range [start,
4704 * start + size). Move these lock classes to the zapped_classes list. Must
4705 * be called with the graph lock held.
4707 static void __lockdep_free_key_range(struct pending_free
*pf
, void *start
,
4710 struct lock_class
*class;
4711 struct hlist_head
*head
;
4714 /* Unhash all classes that were created by a module. */
4715 for (i
= 0; i
< CLASSHASH_SIZE
; i
++) {
4716 head
= classhash_table
+ i
;
4717 hlist_for_each_entry_rcu(class, head
, hash_entry
) {
4718 if (!within(class->key
, start
, size
) &&
4719 !within(class->name
, start
, size
))
4721 zap_class(pf
, class);
4727 * Used in module.c to remove lock classes from memory that is going to be
4728 * freed; and possibly re-used by other modules.
4730 * We will have had one synchronize_rcu() before getting here, so we're
4731 * guaranteed nobody will look up these exact classes -- they're properly dead
4732 * but still allocated.
4734 static void lockdep_free_key_range_reg(void *start
, unsigned long size
)
4736 struct pending_free
*pf
;
4737 unsigned long flags
;
4740 init_data_structures_once();
4742 raw_local_irq_save(flags
);
4743 locked
= graph_lock();
4747 pf
= get_pending_free();
4748 __lockdep_free_key_range(pf
, start
, size
);
4749 call_rcu_zapped(pf
);
4753 raw_local_irq_restore(flags
);
4756 * Wait for any possible iterators from look_up_lock_class() to pass
4757 * before continuing to free the memory they refer to.
4763 * Free all lockdep keys in the range [start, start+size). Does not sleep.
4764 * Ignores debug_locks. Must only be used by the lockdep selftests.
4766 static void lockdep_free_key_range_imm(void *start
, unsigned long size
)
4768 struct pending_free
*pf
= delayed_free
.pf
;
4769 unsigned long flags
;
4771 init_data_structures_once();
4773 raw_local_irq_save(flags
);
4774 arch_spin_lock(&lockdep_lock
);
4775 __lockdep_free_key_range(pf
, start
, size
);
4776 __free_zapped_classes(pf
);
4777 arch_spin_unlock(&lockdep_lock
);
4778 raw_local_irq_restore(flags
);
4781 void lockdep_free_key_range(void *start
, unsigned long size
)
4783 init_data_structures_once();
4785 if (inside_selftest())
4786 lockdep_free_key_range_imm(start
, size
);
4788 lockdep_free_key_range_reg(start
, size
);
4792 * Check whether any element of the @lock->class_cache[] array refers to a
4793 * registered lock class. The caller must hold either the graph lock or the
4796 static bool lock_class_cache_is_registered(struct lockdep_map
*lock
)
4798 struct lock_class
*class;
4799 struct hlist_head
*head
;
4802 for (i
= 0; i
< CLASSHASH_SIZE
; i
++) {
4803 head
= classhash_table
+ i
;
4804 hlist_for_each_entry_rcu(class, head
, hash_entry
) {
4805 for (j
= 0; j
< NR_LOCKDEP_CACHING_CLASSES
; j
++)
4806 if (lock
->class_cache
[j
] == class)
4813 /* The caller must hold the graph lock. Does not sleep. */
4814 static void __lockdep_reset_lock(struct pending_free
*pf
,
4815 struct lockdep_map
*lock
)
4817 struct lock_class
*class;
4821 * Remove all classes this lock might have:
4823 for (j
= 0; j
< MAX_LOCKDEP_SUBCLASSES
; j
++) {
4825 * If the class exists we look it up and zap it:
4827 class = look_up_lock_class(lock
, j
);
4829 zap_class(pf
, class);
4832 * Debug check: in the end all mapped classes should
4835 if (WARN_ON_ONCE(lock_class_cache_is_registered(lock
)))
4840 * Remove all information lockdep has about a lock if debug_locks == 1. Free
4841 * released data structures from RCU context.
4843 static void lockdep_reset_lock_reg(struct lockdep_map
*lock
)
4845 struct pending_free
*pf
;
4846 unsigned long flags
;
4849 raw_local_irq_save(flags
);
4850 locked
= graph_lock();
4854 pf
= get_pending_free();
4855 __lockdep_reset_lock(pf
, lock
);
4856 call_rcu_zapped(pf
);
4860 raw_local_irq_restore(flags
);
4864 * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
4865 * lockdep selftests.
4867 static void lockdep_reset_lock_imm(struct lockdep_map
*lock
)
4869 struct pending_free
*pf
= delayed_free
.pf
;
4870 unsigned long flags
;
4872 raw_local_irq_save(flags
);
4873 arch_spin_lock(&lockdep_lock
);
4874 __lockdep_reset_lock(pf
, lock
);
4875 __free_zapped_classes(pf
);
4876 arch_spin_unlock(&lockdep_lock
);
4877 raw_local_irq_restore(flags
);
4880 void lockdep_reset_lock(struct lockdep_map
*lock
)
4882 init_data_structures_once();
4884 if (inside_selftest())
4885 lockdep_reset_lock_imm(lock
);
4887 lockdep_reset_lock_reg(lock
);
4890 /* Unregister a dynamically allocated key. */
4891 void lockdep_unregister_key(struct lock_class_key
*key
)
4893 struct hlist_head
*hash_head
= keyhashentry(key
);
4894 struct lock_class_key
*k
;
4895 struct pending_free
*pf
;
4896 unsigned long flags
;
4901 if (WARN_ON_ONCE(static_obj(key
)))
4904 raw_local_irq_save(flags
);
4908 pf
= get_pending_free();
4909 hlist_for_each_entry_rcu(k
, hash_head
, hash_entry
) {
4911 hlist_del_rcu(&k
->hash_entry
);
4916 WARN_ON_ONCE(!found
);
4917 __lockdep_free_key_range(pf
, key
, 1);
4918 call_rcu_zapped(pf
);
4921 raw_local_irq_restore(flags
);
4923 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
4926 EXPORT_SYMBOL_GPL(lockdep_unregister_key
);
4928 void __init
lockdep_init(void)
4930 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4932 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES
);
4933 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH
);
4934 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS
);
4935 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE
);
4936 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES
);
4937 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS
);
4938 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE
);
4940 printk(" memory used by lock dependency info: %zu kB\n",
4941 (sizeof(lock_classes
) +
4942 sizeof(classhash_table
) +
4943 sizeof(list_entries
) +
4944 sizeof(list_entries_in_use
) +
4945 sizeof(chainhash_table
) +
4946 sizeof(delayed_free
)
4947 #ifdef CONFIG_PROVE_LOCKING
4949 + sizeof(lock_chains
)
4950 + sizeof(lock_chains_in_use
)
4951 + sizeof(chain_hlocks
)
4956 printk(" per task-struct memory footprint: %zu bytes\n",
4957 sizeof(((struct task_struct
*)NULL
)->held_locks
));
4961 print_freed_lock_bug(struct task_struct
*curr
, const void *mem_from
,
4962 const void *mem_to
, struct held_lock
*hlock
)
4964 if (!debug_locks_off())
4966 if (debug_locks_silent
)
4970 pr_warn("=========================\n");
4971 pr_warn("WARNING: held lock freed!\n");
4972 print_kernel_ident();
4973 pr_warn("-------------------------\n");
4974 pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
4975 curr
->comm
, task_pid_nr(curr
), mem_from
, mem_to
-1);
4977 lockdep_print_held_locks(curr
);
4979 pr_warn("\nstack backtrace:\n");
4983 static inline int not_in_range(const void* mem_from
, unsigned long mem_len
,
4984 const void* lock_from
, unsigned long lock_len
)
4986 return lock_from
+ lock_len
<= mem_from
||
4987 mem_from
+ mem_len
<= lock_from
;
4991 * Called when kernel memory is freed (or unmapped), or if a lock
4992 * is destroyed or reinitialized - this code checks whether there is
4993 * any held lock in the memory range of <from> to <to>:
4995 void debug_check_no_locks_freed(const void *mem_from
, unsigned long mem_len
)
4997 struct task_struct
*curr
= current
;
4998 struct held_lock
*hlock
;
4999 unsigned long flags
;
5002 if (unlikely(!debug_locks
))
5005 raw_local_irq_save(flags
);
5006 for (i
= 0; i
< curr
->lockdep_depth
; i
++) {
5007 hlock
= curr
->held_locks
+ i
;
5009 if (not_in_range(mem_from
, mem_len
, hlock
->instance
,
5010 sizeof(*hlock
->instance
)))
5013 print_freed_lock_bug(curr
, mem_from
, mem_from
+ mem_len
, hlock
);
5016 raw_local_irq_restore(flags
);
5018 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed
);
5020 static void print_held_locks_bug(void)
5022 if (!debug_locks_off())
5024 if (debug_locks_silent
)
5028 pr_warn("====================================\n");
5029 pr_warn("WARNING: %s/%d still has locks held!\n",
5030 current
->comm
, task_pid_nr(current
));
5031 print_kernel_ident();
5032 pr_warn("------------------------------------\n");
5033 lockdep_print_held_locks(current
);
5034 pr_warn("\nstack backtrace:\n");
5038 void debug_check_no_locks_held(void)
5040 if (unlikely(current
->lockdep_depth
> 0))
5041 print_held_locks_bug();
5043 EXPORT_SYMBOL_GPL(debug_check_no_locks_held
);
5046 void debug_show_all_locks(void)
5048 struct task_struct
*g
, *p
;
5050 if (unlikely(!debug_locks
)) {
5051 pr_warn("INFO: lockdep is turned off.\n");
5054 pr_warn("\nShowing all locks held in the system:\n");
5057 for_each_process_thread(g
, p
) {
5058 if (!p
->lockdep_depth
)
5060 lockdep_print_held_locks(p
);
5061 touch_nmi_watchdog();
5062 touch_all_softlockup_watchdogs();
5067 pr_warn("=============================================\n\n");
5069 EXPORT_SYMBOL_GPL(debug_show_all_locks
);
5073 * Careful: only use this function if you are sure that
5074 * the task cannot run in parallel!
5076 void debug_show_held_locks(struct task_struct
*task
)
5078 if (unlikely(!debug_locks
)) {
5079 printk("INFO: lockdep is turned off.\n");
5082 lockdep_print_held_locks(task
);
5084 EXPORT_SYMBOL_GPL(debug_show_held_locks
);
5086 asmlinkage __visible
void lockdep_sys_exit(void)
5088 struct task_struct
*curr
= current
;
5090 if (unlikely(curr
->lockdep_depth
)) {
5091 if (!debug_locks_off())
5094 pr_warn("================================================\n");
5095 pr_warn("WARNING: lock held when returning to user space!\n");
5096 print_kernel_ident();
5097 pr_warn("------------------------------------------------\n");
5098 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
5099 curr
->comm
, curr
->pid
);
5100 lockdep_print_held_locks(curr
);
5104 * The lock history for each syscall should be independent. So wipe the
5105 * slate clean on return to userspace.
5107 lockdep_invariant_state(false);
5110 void lockdep_rcu_suspicious(const char *file
, const int line
, const char *s
)
5112 struct task_struct
*curr
= current
;
5114 /* Note: the following can be executed concurrently, so be careful. */
5116 pr_warn("=============================\n");
5117 pr_warn("WARNING: suspicious RCU usage\n");
5118 print_kernel_ident();
5119 pr_warn("-----------------------------\n");
5120 pr_warn("%s:%d %s!\n", file
, line
, s
);
5121 pr_warn("\nother info that might help us debug this:\n\n");
5122 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
5123 !rcu_lockdep_current_cpu_online()
5124 ? "RCU used illegally from offline CPU!\n"
5125 : !rcu_is_watching()
5126 ? "RCU used illegally from idle CPU!\n"
5128 rcu_scheduler_active
, debug_locks
);
5131 * If a CPU is in the RCU-free window in idle (ie: in the section
5132 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
5133 * considers that CPU to be in an "extended quiescent state",
5134 * which means that RCU will be completely ignoring that CPU.
5135 * Therefore, rcu_read_lock() and friends have absolutely no
5136 * effect on a CPU running in that state. In other words, even if
5137 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
5138 * delete data structures out from under it. RCU really has no
5139 * choice here: we need to keep an RCU-free window in idle where
5140 * the CPU may possibly enter into low power mode. This way we can
5141 * notice an extended quiescent state to other CPUs that started a grace
5142 * period. Otherwise we would delay any grace period as long as we run
5145 * So complain bitterly if someone does call rcu_read_lock(),
5146 * rcu_read_lock_bh() and so on from extended quiescent states.
5148 if (!rcu_is_watching())
5149 pr_warn("RCU used illegally from extended quiescent state!\n");
5151 lockdep_print_held_locks(curr
);
5152 pr_warn("\nstack backtrace:\n");
5155 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious
);