]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
locking/lockdep: Iterate lock_classes directly when reading lockdep files
authorWaiman Long <longman@redhat.com>
Fri, 11 Feb 2022 03:55:26 +0000 (22:55 -0500)
committerStefan Bader <stefan.bader@canonical.com>
Fri, 20 May 2022 12:39:46 +0000 (14:39 +0200)
BugLink: https://bugs.launchpad.net/bugs/1969110
[ Upstream commit fb7275acd6fb988313dddd8d3d19efa70d9015ad ]

When dumping lock_classes information via /proc/lockdep, we can't take
the lockdep lock as the lock hold time is indeterminate. Iterating
over all_lock_classes without holding lock can be dangerous as there
is a slight chance that it may branch off to other lists leading to
infinite loop or even access invalid memory if changes are made to
all_lock_classes list in parallel.

To avoid this problem, iteration of lock classes is now done directly
on the lock_classes array itself. The lock_classes_in_use bitmap is
checked to see if the lock class is being used. To avoid iterating
the full array all the times, a new max_lock_class_idx value is added
to track the maximum lock_class index that is currently being used.

We can theoretically take the lockdep lock for iterating all_lock_classes
when other lockdep files (lockdep_stats and lock_stat) are accessed as
the lock hold time will be shorter for them. For consistency, they are
also modified to iterate the lock_classes array directly.

Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220211035526.1329503-2-longman@redhat.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
(cherry picked from commit 1388c10b325773a36cba7c257843dd40cafd646f)
Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
kernel/locking/lockdep.c
kernel/locking/lockdep_internals.h
kernel/locking/lockdep_proc.c

index f278815d1d2b80f10a41a62a609d09a71f2a86d6..409c7d1a68036428cd44f5abea7aaa9ecd84e1d5 100644 (file)
@@ -183,11 +183,9 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
 static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
 unsigned long nr_lock_classes;
 unsigned long nr_zapped_classes;
-#ifndef CONFIG_DEBUG_LOCKDEP
-static
-#endif
+unsigned long max_lock_class_idx;
 struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
-static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
+DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
 
 inline struct lock_class *lockdep_hlock_class(struct held_lock *hlock)
 {
@@ -340,7 +338,7 @@ static inline void lock_release_holdtime(struct held_lock *hlock)
  * elements. These elements are linked together by the lock_entry member in
  * struct lock_class.
  */
-LIST_HEAD(all_lock_classes);
+static LIST_HEAD(all_lock_classes);
 static LIST_HEAD(free_lock_classes);
 
 /**
@@ -1242,6 +1240,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
        struct lockdep_subclass_key *key;
        struct hlist_head *hash_head;
        struct lock_class *class;
+       int idx;
 
        DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 
@@ -1307,6 +1306,9 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
         * of classes.
         */
        list_move_tail(&class->lock_entry, &all_lock_classes);
+       idx = class - lock_classes;
+       if (idx > max_lock_class_idx)
+               max_lock_class_idx = idx;
 
        if (verbose(class)) {
                graph_unlock();
@@ -5988,6 +5990,8 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
                WRITE_ONCE(class->name, NULL);
                nr_lock_classes--;
                __clear_bit(class - lock_classes, lock_classes_in_use);
+               if (class - lock_classes == max_lock_class_idx)
+                       max_lock_class_idx--;
        } else {
                WARN_ONCE(true, "%s() failed for class %s\n", __func__,
                          class->name);
index ecb8662e7a4ed52c7935b19d280e79458af6c30b..bbe9000260d02ac56ce080fd0b7858623f071c4d 100644 (file)
@@ -121,7 +121,6 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
 
 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
 
-extern struct list_head all_lock_classes;
 extern struct lock_chain lock_chains[];
 
 #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
@@ -151,6 +150,10 @@ extern unsigned int nr_large_chain_blocks;
 
 extern unsigned int max_lockdep_depth;
 extern unsigned int max_bfs_queue_depth;
+extern unsigned long max_lock_class_idx;
+
+extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
+extern unsigned long lock_classes_in_use[];
 
 #ifdef CONFIG_PROVE_LOCKING
 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
@@ -205,7 +208,6 @@ struct lockdep_stats {
 };
 
 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
-extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 
 #define __debug_atomic_inc(ptr)                                        \
        this_cpu_inc(lockdep_stats.ptr);
index b8d9a050c337a76a4e970921339120c74178f13e..15fdc7fa5c688aca59572cc8ac27d71b5f076376 100644 (file)
 
 #include "lockdep_internals.h"
 
+/*
+ * Since iteration of lock_classes is done without holding the lockdep lock,
+ * it is not safe to iterate all_lock_classes list directly as the iteration
+ * may branch off to free_lock_classes or the zapped list. Iteration is done
+ * directly on the lock_classes array by checking the lock_classes_in_use
+ * bitmap and max_lock_class_idx.
+ */
+#define iterate_lock_classes(idx, class)                               \
+       for (idx = 0, class = lock_classes; idx <= max_lock_class_idx;  \
+            idx++, class++)
+
 static void *l_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       return seq_list_next(v, &all_lock_classes, pos);
+       struct lock_class *class = v;
+
+       ++class;
+       *pos = class - lock_classes;
+       return (*pos > max_lock_class_idx) ? NULL : class;
 }
 
 static void *l_start(struct seq_file *m, loff_t *pos)
 {
-       return seq_list_start_head(&all_lock_classes, *pos);
+       unsigned long idx = *pos;
+
+       if (idx > max_lock_class_idx)
+               return NULL;
+       return lock_classes + idx;
 }
 
 static void l_stop(struct seq_file *m, void *v)
@@ -57,14 +76,16 @@ static void print_name(struct seq_file *m, struct lock_class *class)
 
 static int l_show(struct seq_file *m, void *v)
 {
-       struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
+       struct lock_class *class = v;
        struct lock_list *entry;
        char usage[LOCK_USAGE_CHARS];
+       int idx = class - lock_classes;
 
-       if (v == &all_lock_classes) {
+       if (v == lock_classes)
                seq_printf(m, "all lock classes:\n");
+
+       if (!test_bit(idx, lock_classes_in_use))
                return 0;
-       }
 
        seq_printf(m, "%p", class->key);
 #ifdef CONFIG_DEBUG_LOCKDEP
@@ -220,8 +241,11 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
 
 #ifdef CONFIG_PROVE_LOCKING
        struct lock_class *class;
+       unsigned long idx;
 
-       list_for_each_entry(class, &all_lock_classes, lock_entry) {
+       iterate_lock_classes(idx, class) {
+               if (!test_bit(idx, lock_classes_in_use))
+                       continue;
 
                if (class->usage_mask == 0)
                        nr_unused++;
@@ -254,6 +278,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
 
                sum_forward_deps += lockdep_count_forward_deps(class);
        }
+
 #ifdef CONFIG_DEBUG_LOCKDEP
        DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
 #endif
@@ -345,6 +370,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
        seq_printf(m, " max bfs queue depth:           %11u\n",
                        max_bfs_queue_depth);
 #endif
+       seq_printf(m, " max lock class index:          %11lu\n",
+                       max_lock_class_idx);
        lockdep_stats_debug_show(m);
        seq_printf(m, " debug_locks:                   %11u\n",
                        debug_locks);
@@ -622,12 +649,16 @@ static int lock_stat_open(struct inode *inode, struct file *file)
        if (!res) {
                struct lock_stat_data *iter = data->stats;
                struct seq_file *m = file->private_data;
+               unsigned long idx;
 
-               list_for_each_entry(class, &all_lock_classes, lock_entry) {
+               iterate_lock_classes(idx, class) {
+                       if (!test_bit(idx, lock_classes_in_use))
+                               continue;
                        iter->class = class;
                        iter->stats = lock_stats(class);
                        iter++;
                }
+
                data->iter_end = iter;
 
                sort(data->stats, data->iter_end - data->stats,
@@ -645,6 +676,7 @@ static ssize_t lock_stat_write(struct file *file, const char __user *buf,
                               size_t count, loff_t *ppos)
 {
        struct lock_class *class;
+       unsigned long idx;
        char c;
 
        if (count) {
@@ -654,8 +686,11 @@ static ssize_t lock_stat_write(struct file *file, const char __user *buf,
                if (c != '0')
                        return count;
 
-               list_for_each_entry(class, &all_lock_classes, lock_entry)
+               iterate_lock_classes(idx, class) {
+                       if (!test_bit(idx, lock_classes_in_use))
+                               continue;
                        clear_lock_stats(class);
+               }
        }
        return count;
 }