{
int cpu;
- cpus_read_lock();
for_each_cpu(cpu, &wd_cpus_enabled)
stop_wd_on_cpu(cpu);
- cpus_read_unlock();
}
void watchdog_nmi_start(void)
{
int cpu;
- cpus_read_lock();
watchdog_calc_timeouts();
for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
start_wd_on_cpu(cpu);
- cpus_read_unlock();
}
/*
static struct cpumask tmp;
unsigned int cpu;
- get_online_cpus();
+ lockdep_assert_cpus_held();
mutex_lock(&smpboot_threads_lock);
/* Park threads that were exclusively enabled on the old mask. */
cpumask_copy(old, new);
mutex_unlock(&smpboot_threads_lock);
- put_online_cpus();
}
static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
smpboot_update_cpumask_percpu_thread(&watchdog_threads,
&watchdog_allowed_mask);
- __lockup_detector_cleanup();
}
/* Temporarily park all watchdog threads */
static void softlockup_reconfigure_threads(void)
{
+ cpus_read_lock();
watchdog_nmi_stop();
softlockup_park_all_threads();
set_sample_period();
if (watchdog_enabled && watchdog_thresh)
softlockup_unpark_threads();
watchdog_nmi_start();
+ cpus_read_unlock();
+ /*
+ * Must be called outside the cpus locked section to prevent
+ * recursive locking in the perf code.
+ */
+ __lockup_detector_cleanup();
}
/*
static inline void softlockup_init_threads(void) { }
static void softlockup_reconfigure_threads(void)
{
+ cpus_read_lock();
watchdog_nmi_stop();
lockup_detector_update_enable();
watchdog_nmi_start();
+ cpus_read_unlock();
}
#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */