]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
eventfd: track eventfd_signal() recursion depth
authorJens Axboe <axboe@kernel.dk>
Sun, 2 Feb 2020 15:23:03 +0000 (08:23 -0700)
committerKhalid Elmously <khalid.elmously@canonical.com>
Fri, 13 Mar 2020 04:31:00 +0000 (00:31 -0400)
BugLink: https://bugs.launchpad.net/bugs/1866678
commit b5e683d5cab8cd433b06ae178621f083cabd4f63 upstream.

eventfd use cases from aio and io_uring can deadlock due to circular
or resursive calling, when eventfd_signal() tries to grab the waitqueue
lock. On top of that, it's also possible to construct notification
chains that are deep enough that we could blow the stack.

Add a percpu counter that tracks the percpu recursion depth, warn if we
exceed it. The counter is also exposed so that users of eventfd_signal()
can do the right thing if it's non-zero in the context where it is
called.

Cc: stable@vger.kernel.org # 4.19+
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
fs/eventfd.c
include/linux/eventfd.h

index 2fb4eadaa1181e4c4f4178b7269c20f58b03a376..aaaa3c82f7f41946e27f124e1b7c4005ffc718c3 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 
+DEFINE_PER_CPU(int, eventfd_wake_count);
+
 struct eventfd_ctx {
        struct kref kref;
        wait_queue_head_t wqh;
@@ -55,12 +57,25 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
 {
        unsigned long flags;
 
+       /*
+        * Deadlock or stack overflow issues can happen if we recurse here
+        * through waitqueue wakeup handlers. If the caller users potentially
+        * nested waitqueues with custom wakeup handlers, then it should
+        * check eventfd_signal_count() before calling this function. If
+        * it returns true, the eventfd_signal() call should be deferred to a
+        * safe context.
+        */
+       if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+               return 0;
+
        spin_lock_irqsave(&ctx->wqh.lock, flags);
+       this_cpu_inc(eventfd_wake_count);
        if (ULLONG_MAX - ctx->count < n)
                n = ULLONG_MAX - ctx->count;
        ctx->count += n;
        if (waitqueue_active(&ctx->wqh))
                wake_up_locked_poll(&ctx->wqh, POLLIN);
+       this_cpu_dec(eventfd_wake_count);
        spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
        return n;
index 60b2985e8a184764ba7c5031985052c1a2f66f52..d74ac8f7783923d23c77e1d959531ebbd7d5fd46 100644 (file)
@@ -11,6 +11,8 @@
 
 #include <linux/fcntl.h>
 #include <linux/wait.h>
+#include <linux/percpu-defs.h>
+#include <linux/percpu.h>
 
 /*
  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -41,6 +43,13 @@ ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
                                  __u64 *cnt);
 
+DECLARE_PER_CPU(int, eventfd_wake_count);
+
+static inline bool eventfd_signal_count(void)
+{
+       return this_cpu_read(eventfd_wake_count);
+}
+
 #else /* CONFIG_EVENTFD */
 
 /*
@@ -79,6 +88,11 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
        return -ENOSYS;
 }
 
+static inline bool eventfd_signal_count(void)
+{
+       return false;
+}
+
 #endif
 
 #endif /* _LINUX_EVENTFD_H */