]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
tick/broadcast: Prevent deadlock on tick_broadcast_lock
authorMike Galbraith <efault@gmx.de>
Mon, 13 Feb 2017 02:31:55 +0000 (03:31 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 13 Feb 2017 08:49:31 +0000 (09:49 +0100)
tick_broadcast_lock is taken from interrupt context, but the following call
chain takes the lock without disabling interrupts:

[   12.703736]  _raw_spin_lock+0x3b/0x50
[   12.703738]  tick_broadcast_control+0x5a/0x1a0
[   12.703742]  intel_idle_cpu_online+0x22/0x100
[   12.703744]  cpuhp_invoke_callback+0x245/0x9d0
[   12.703752]  cpuhp_thread_fun+0x52/0x110
[   12.703754]  smpboot_thread_fn+0x276/0x320

So the following deadlock can happen:

   lock(tick_broadcast_lock);
   <Interrupt>
      lock(tick_broadcast_lock);

intel_idle_cpu_online() is the only place which violates the calling
convention of tick_broadcast_control(). This was caused by the removal of
the smp function call in course of the cpu hotplug rework.

Instead of slapping local_irq_disable/enable() at the call site, we can
relax the calling convention and handle it in the core code, which makes
the whole machinery more robust.

Fixes: 29d7bbada98e ("intel_idle: Remove superfluous SMP fuction call")
Reported-by: Gabriel C <nix.or.die@gmail.com>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Cc: Ruslan Ruslichenko <rruslich@cisco.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: lwn@lwn.net
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Anna-Maria Gleixner <anna-maria@linutronix.de>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: stable <stable@vger.kernel.org>
Link: http://lkml.kernel.org/r/1486953115.5912.4.camel@gmx.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/time/tick-broadcast.c

index 3109204c87cca83704d10326f8b179671577f3d6..17ac99b60ee58e78975d415f34ddc4ce5a2e5342 100644 (file)
@@ -347,17 +347,16 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
  *
  * Called when the system enters a state where affected tick devices
  * might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
- *
- * Called with interrupts disabled, so clockevents_lock is not
- * required here because the local clock event device cannot go away
- * under us.
  */
 void tick_broadcast_control(enum tick_broadcast_mode mode)
 {
        struct clock_event_device *bc, *dev;
        struct tick_device *td;
        int cpu, bc_stopped;
+       unsigned long flags;
 
+       /* Protects also the local clockevent device. */
+       raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
        td = this_cpu_ptr(&tick_cpu_device);
        dev = td->evtdev;
 
@@ -365,12 +364,11 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
         * Is the device not affected by the powerstate ?
         */
        if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
-               return;
+               goto out;
 
        if (!tick_device_is_functional(dev))
-               return;
+               goto out;
 
-       raw_spin_lock(&tick_broadcast_lock);
        cpu = smp_processor_id();
        bc = tick_broadcast_device.evtdev;
        bc_stopped = cpumask_empty(tick_broadcast_mask);
@@ -420,7 +418,8 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
                                tick_broadcast_setup_oneshot(bc);
                }
        }
-       raw_spin_unlock(&tick_broadcast_lock);
+out:
+       raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
 }
 EXPORT_SYMBOL_GPL(tick_broadcast_control);