]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
workqueue: reimplement cancel_delayed_work() using try_to_grab_pending()
authorTejun Heo <tj@kernel.org>
Tue, 21 Aug 2012 20:18:24 +0000 (13:18 -0700)
committerTejun Heo <tj@kernel.org>
Tue, 21 Aug 2012 20:18:24 +0000 (13:18 -0700)
cancel_delayed_work() can't be called from IRQ handlers due to its use
of del_timer_sync() and can't cancel work items which are already
transferred from timer to worklist.

Also, unlike other flush and cancel functions, a canceled delayed_work
would still point to the last associated cpu_workqueue.  If the
workqueue is destroyed afterwards and the work item is re-used on a
different workqueue, the queueing code can oops trying to dereference
already freed cpu_workqueue.

This patch reimplements cancel_delayed_work() using
try_to_grab_pending() and set_work_cpu_and_clear_pending().  This
allows the function to be called from IRQ handlers and makes its
behavior consistent with other flush / cancel functions.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
include/linux/workqueue.h
kernel/workqueue.c

index d86b320319e010923c5ee551566c6fce2c0fab8c..4898289564abbc6ff6d899750be20b859e813615 100644 (file)
@@ -420,6 +420,7 @@ extern bool flush_work(struct work_struct *work);
 extern bool cancel_work_sync(struct work_struct *work);
 
 extern bool flush_delayed_work(struct delayed_work *dwork);
+extern bool cancel_delayed_work(struct delayed_work *dwork);
 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
 
 extern void workqueue_set_max_active(struct workqueue_struct *wq,
@@ -428,22 +429,6 @@ extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
 extern unsigned int work_cpu(struct work_struct *work);
 extern unsigned int work_busy(struct work_struct *work);
 
-/*
- * Kill off a pending schedule_delayed_work().  Note that the work callback
- * function may still be running on return from cancel_delayed_work(), unless
- * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
- * cancel_work_sync() to wait on it.
- */
-static inline bool cancel_delayed_work(struct delayed_work *work)
-{
-       bool ret;
-
-       ret = del_timer_sync(&work->timer);
-       if (ret)
-               work_clear_pending(&work->work);
-       return ret;
-}
-
 /*
  * Like above, but uses del_timer() instead of del_timer_sync(). This means,
  * if it returns 0 the timer function may be running and the queueing is in
index b394df8beaee6ea6288c351fbabb806208a62cb7..039d0fae171a3d462e4f4a119761b3350112bc30 100644 (file)
@@ -2948,6 +2948,36 @@ bool flush_delayed_work(struct delayed_work *dwork)
 }
 EXPORT_SYMBOL(flush_delayed_work);
 
+/**
+ * cancel_delayed_work - cancel a delayed work
+ * @dwork: delayed_work to cancel
+ *
+ * Kill off a pending delayed_work.  Returns %true if @dwork was pending
+ * and canceled; %false if wasn't pending.  Note that the work callback
+ * function may still be running on return, unless it returns %true and the
+ * work doesn't re-arm itself.  Explicitly flush or use
+ * cancel_delayed_work_sync() to wait on it.
+ *
+ * This function is safe to call from any context including IRQ handler.
+ */
+bool cancel_delayed_work(struct delayed_work *dwork)
+{
+       unsigned long flags;
+       int ret;
+
+       do {
+               ret = try_to_grab_pending(&dwork->work, true, &flags);
+       } while (unlikely(ret == -EAGAIN));
+
+       if (unlikely(ret < 0))
+               return false;
+
+       set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
+       local_irq_restore(flags);
+       return true;
+}
+EXPORT_SYMBOL(cancel_delayed_work);
+
 /**
  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
  * @dwork: the delayed work cancel