]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
iio:trigger: Fix use_count race condition
authorLars-Peter Clausen <lars@metafoo.de>
Tue, 16 Jul 2013 14:28:00 +0000 (15:28 +0100)
committerJonathan Cameron <jic23@kernel.org>
Sat, 20 Jul 2013 09:18:53 +0000 (10:18 +0100)
When using more than one trigger consumer it can happen that multiple threads
perform a read-modify-update cycle on 'use_count' concurrently. This can cause
updates to be lost and use_count can get stuck at non-zero value, in which case
the IIO core assumes that at least one thread is still running and will wait for
it to finish before running any trigger handlers again. This effectively renders
the trigger disabled and a reboot is necessary before it can be used again. To
fix this make use_count an atomic variable. Also set it to the number of
consumers before starting the first consumer, otherwise it might happen that
use_count drops to 0 even though not all consumers have been run yet.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Tested-by: Denis Ciocca <denis.ciocca@st.com>
Signed-off-by: Jonathan Cameron <jic23@kernel.org>
drivers/iio/industrialio-trigger.c
include/linux/iio/trigger.h

index ea8a4146620de5c6ad551644facb278654ab1c1c..0dd9bb8731301e755dc29ef2851371f70deb819d 100644 (file)
@@ -127,12 +127,17 @@ static struct iio_trigger *iio_trigger_find_by_name(const char *name,
 void iio_trigger_poll(struct iio_trigger *trig, s64 time)
 {
        int i;
-       if (!trig->use_count)
-               for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
-                       if (trig->subirqs[i].enabled) {
-                               trig->use_count++;
+
+       if (!atomic_read(&trig->use_count)) {
+               atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+               for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+                       if (trig->subirqs[i].enabled)
                                generic_handle_irq(trig->subirq_base + i);
-                       }
+                       else
+                               iio_trigger_notify_done(trig);
+               }
+       }
 }
 EXPORT_SYMBOL(iio_trigger_poll);
 
@@ -146,19 +151,24 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
 void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
 {
        int i;
-       if (!trig->use_count)
-               for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
-                       if (trig->subirqs[i].enabled) {
-                               trig->use_count++;
+
+       if (!atomic_read(&trig->use_count)) {
+               atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+               for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+                       if (trig->subirqs[i].enabled)
                                handle_nested_irq(trig->subirq_base + i);
-                       }
+                       else
+                               iio_trigger_notify_done(trig);
+               }
+       }
 }
 EXPORT_SYMBOL(iio_trigger_poll_chained);
 
 void iio_trigger_notify_done(struct iio_trigger *trig)
 {
-       trig->use_count--;
-       if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
+       if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
+               trig->ops->try_reenable)
                if (trig->ops->try_reenable(trig))
                        /* Missed an interrupt so launch new poll now */
                        iio_trigger_poll(trig, 0);
index 3869c525b052171fff8bbe3000d61d5999bae7ee..369cf2cd51448b690cb812e6310cbd450ab83386 100644 (file)
@@ -8,6 +8,7 @@
  */
 #include <linux/irq.h>
 #include <linux/module.h>
+#include <linux/atomic.h>
 
 #ifndef _IIO_TRIGGER_H_
 #define _IIO_TRIGGER_H_
@@ -61,7 +62,7 @@ struct iio_trigger {
 
        struct list_head                list;
        struct list_head                alloc_list;
-       int use_count;
+       atomic_t                        use_count;
 
        struct irq_chip                 subirq_chip;
        int                             subirq_base;