]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
lib/dump_stack: move cpu lock to printk.c
authorJohn Ogness <john.ogness@linutronix.de>
Thu, 17 Jun 2021 09:50:50 +0000 (11:56 +0206)
committerPetr Mladek <pmladek@suse.com>
Tue, 22 Jun 2021 07:56:10 +0000 (09:56 +0200)
dump_stack() implements its own cpu-reentrant spinning lock to
best-effort serialize stack traces in the printk log. However,
there are other functions (such as show_regs()) that can also
benefit from this serialization.

Move the cpu-reentrant spinning lock (cpu lock) into new helper
functions printk_cpu_lock_irqsave()/printk_cpu_unlock_irqrestore()
so that it is available for others as well. For !CONFIG_SMP the
cpu lock is a NOP.

Note that having multiple cpu locks in the system can easily
lead to deadlock. Code needing a cpu lock should use the
printk cpu lock, since the printk cpu lock could be acquired
from any code and any context.

Also note that it is not necessary for a cpu lock to disable
interrupts. However, in upcoming work this cpu lock will be used
for emergency tasks (for example, atomic consoles during kernel
crashes) and any interruptions while holding the cpu lock should
be avoided if possible.

Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
[pmladek@suse.com: Backported on top of 5.13-rc1.]
Signed-off-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20210617095051.4808-2-john.ogness@linutronix.de
include/linux/printk.h
kernel/printk/printk.c
lib/dump_stack.c

index fe7eb2351610d0181caa8c42d0bccaa12bd3f790..1790a5521fd9dbd96672e64d2e0aac15678de56b 100644 (file)
@@ -282,6 +282,47 @@ static inline void printk_safe_flush_on_panic(void)
 }
 #endif
 
+#ifdef CONFIG_SMP
+extern int __printk_cpu_trylock(void);
+extern void __printk_wait_on_cpu_lock(void);
+extern void __printk_cpu_unlock(void);
+
+/**
+ * printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning
+ *                             lock and disable interrupts.
+ * @flags: Stack-allocated storage for saving local interrupt state,
+ *         to be passed to printk_cpu_unlock_irqrestore().
+ *
+ * If the lock is owned by another CPU, spin until it becomes available.
+ * Interrupts are restored while spinning.
+ */
+#define printk_cpu_lock_irqsave(flags)         \
+       for (;;) {                              \
+               local_irq_save(flags);          \
+               if (__printk_cpu_trylock())     \
+                       break;                  \
+               local_irq_restore(flags);       \
+               __printk_wait_on_cpu_lock();    \
+       }
+
+/**
+ * printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning
+ *                                  lock and restore interrupts.
+ * @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave().
+ */
+#define printk_cpu_unlock_irqrestore(flags)    \
+       do {                                    \
+               __printk_cpu_unlock();          \
+               local_irq_restore(flags);       \
+       } while (0)                             \
+
+#else
+
+#define printk_cpu_lock_irqsave(flags) ((void)flags)
+#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
+
+#endif /* CONFIG_SMP */
+
 extern int kptr_restrict;
 
 /**
index 421c35571797e4e12a717fc10bf46a7d7d0d03d9..9dfad0efb67fec26fb48c81ab938d393e40b7789 100644 (file)
@@ -3531,3 +3531,72 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
 EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
 
 #endif
+
+#ifdef CONFIG_SMP
+static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
+static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
+
+/**
+ * __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
+ *                               spinning lock is not owned by any CPU.
+ *
+ * Context: Any context.
+ */
+void __printk_wait_on_cpu_lock(void)
+{
+       do {
+               cpu_relax();
+       } while (atomic_read(&printk_cpulock_owner) != -1);
+}
+EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
+
+/**
+ * __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant
+ *                          spinning lock.
+ *
+ * If no processor has the lock, the calling processor takes the lock and
+ * becomes the owner. If the calling processor is already the owner of the
+ * lock, this function succeeds immediately.
+ *
+ * Context: Any context. Expects interrupts to be disabled.
+ * Return: 1 on success, otherwise 0.
+ */
+int __printk_cpu_trylock(void)
+{
+       int cpu;
+       int old;
+
+       cpu = smp_processor_id();
+
+       old = atomic_cmpxchg(&printk_cpulock_owner, -1, cpu);
+       if (old == -1) {
+               /* This CPU is now the owner. */
+               return 1;
+       } else if (old == cpu) {
+               /* This CPU is already the owner. */
+               atomic_inc(&printk_cpulock_nested);
+               return 1;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(__printk_cpu_trylock);
+
+/**
+ * __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock.
+ *
+ * The calling processor must be the owner of the lock.
+ *
+ * Context: Any context. Expects interrupts to be disabled.
+ */
+void __printk_cpu_unlock(void)
+{
+       if (atomic_read(&printk_cpulock_nested)) {
+               atomic_dec(&printk_cpulock_nested);
+               return;
+       }
+
+       atomic_set(&printk_cpulock_owner, -1);
+}
+EXPORT_SYMBOL(__printk_cpu_unlock);
+#endif /* CONFIG_SMP */
index f5a33b6f773f7f5725dc92fab7d597abc8dd1abc..5ebf4375fa8c92704484dd1d13205774e5e10dae 100644 (file)
@@ -84,50 +84,16 @@ static void __dump_stack(void)
  *
  * Architectures can override this implementation by implementing its own.
  */
-#ifdef CONFIG_SMP
-static atomic_t dump_lock = ATOMIC_INIT(-1);
-
 asmlinkage __visible void dump_stack(void)
 {
        unsigned long flags;
-       int was_locked;
-       int old;
-       int cpu;
 
        /*
         * Permit this cpu to perform nested stack dumps while serialising
         * against other CPUs
         */
-retry:
-       local_irq_save(flags);
-       cpu = smp_processor_id();
-       old = atomic_cmpxchg(&dump_lock, -1, cpu);
-       if (old == -1) {
-               was_locked = 0;
-       } else if (old == cpu) {
-               was_locked = 1;
-       } else {
-               local_irq_restore(flags);
-               /*
-                * Wait for the lock to release before jumping to
-                * atomic_cmpxchg() in order to mitigate the thundering herd
-                * problem.
-                */
-               do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
-               goto retry;
-       }
-
-       __dump_stack();
-
-       if (!was_locked)
-               atomic_set(&dump_lock, -1);
-
-       local_irq_restore(flags);
-}
-#else
-asmlinkage __visible void dump_stack(void)
-{
+       printk_cpu_lock_irqsave(flags);
        __dump_stack();
+       printk_cpu_unlock_irqrestore(flags);
 }
-#endif
 EXPORT_SYMBOL(dump_stack);