]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
watchdog/core, powerpc: Lock cpus across reconfiguration
authorThomas Gleixner <tglx@linutronix.de>
Tue, 3 Oct 2017 14:37:53 +0000 (16:37 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 4 Oct 2017 08:53:54 +0000 (10:53 +0200)
Instead of dropping the cpu hotplug lock after stopping NMI watchdog and
threads and reaquiring for restart, the code and the protection rules
become more obvious when holding cpu hotplug lock across the full
reconfiguration.

Suggested-by: Linus Torvalds <torvalds@linuxfoundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Michael Ellerman <mpe@ellerman.id.au>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1710022105570.2114@nanos
arch/powerpc/kernel/watchdog.c
kernel/smpboot.c
kernel/watchdog.c

index 2673ec8bec008707c535081e51d30545ec5988fd..f9b4c6352d24a72f7a45500cfa9991b3287d999f 100644 (file)
@@ -359,21 +359,17 @@ void watchdog_nmi_stop(void)
 {
        int cpu;
 
-       cpus_read_lock();
        for_each_cpu(cpu, &wd_cpus_enabled)
                stop_wd_on_cpu(cpu);
-       cpus_read_unlock();
 }
 
 void watchdog_nmi_start(void)
 {
        int cpu;
 
-       cpus_read_lock();
        watchdog_calc_timeouts();
        for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
                start_wd_on_cpu(cpu);
-       cpus_read_unlock();
 }
 
 /*
index ed7507b69b4801d0f6cee27b9ded1866509b393e..5043e7433f4b15879a6498ed3d1ca6cfa2876f83 100644 (file)
@@ -351,7 +351,7 @@ void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread
        static struct cpumask tmp;
        unsigned int cpu;
 
-       get_online_cpus();
+       lockdep_assert_cpus_held();
        mutex_lock(&smpboot_threads_lock);
 
        /* Park threads that were exclusively enabled on the old mask. */
@@ -367,7 +367,6 @@ void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread
        cpumask_copy(old, new);
 
        mutex_unlock(&smpboot_threads_lock);
-       put_online_cpus();
 }
 
 static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
index 6ad6226535d0e62e549113bd345cc087db023372..fff90fe100071bccf232c782e7bd12632ab84fdc 100644 (file)
@@ -535,7 +535,6 @@ static void softlockup_update_smpboot_threads(void)
 
        smpboot_update_cpumask_percpu_thread(&watchdog_threads,
                                             &watchdog_allowed_mask);
-       __lockup_detector_cleanup();
 }
 
 /* Temporarily park all watchdog threads */
@@ -554,6 +553,7 @@ static void softlockup_unpark_threads(void)
 
 static void softlockup_reconfigure_threads(void)
 {
+       cpus_read_lock();
        watchdog_nmi_stop();
        softlockup_park_all_threads();
        set_sample_period();
@@ -561,6 +561,12 @@ static void softlockup_reconfigure_threads(void)
        if (watchdog_enabled && watchdog_thresh)
                softlockup_unpark_threads();
        watchdog_nmi_start();
+       cpus_read_unlock();
+       /*
+        * Must be called outside the cpus locked section to prevent
+        * recursive locking in the perf code.
+        */
+       __lockup_detector_cleanup();
 }
 
 /*
@@ -605,9 +611,11 @@ static inline void watchdog_disable_all_cpus(void) { }
 static inline void softlockup_init_threads(void) { }
 static void softlockup_reconfigure_threads(void)
 {
+       cpus_read_lock();
        watchdog_nmi_stop();
        lockup_detector_update_enable();
        watchdog_nmi_start();
+       cpus_read_unlock();
 }
 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */