]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
io_uring: fix UAF due to missing POLLFREE handling
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 19 Sep 2022 17:58:00 +0000 (19:58 +0200)
committerStefan Bader <stefan.bader@canonical.com>
Tue, 20 Sep 2022 09:08:28 +0000 (11:08 +0200)
[ upstream commmit 791f3465c4afde02d7f16cf7424ca87070b69396 ]

Fixes a problem described in 50252e4b5e989
("aio: fix use-after-free due to missing POLLFREE handling")
and copies the approach used there.

In short, we have to forcibly eject a poll entry when we meet POLLFREE.
We can't rely on io_poll_get_ownership() as can't wait for potentially
running tw handlers, so we use the fact that wqs are RCU freed. See
Eric's patch and comments for more details.

Reported-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211209010455.42744-6-ebiggers@kernel.org
Reported-and-tested-by: syzbot+5426c7ed6868c705ca14@syzkaller.appspotmail.com
Fixes: 221c5eb233823 ("io_uring: add support for IORING_OP_POLL")
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/4ed56b6f548f7ea337603a82315750449412748a.1642161259.git.asml.silence@gmail.com
[axboe: drop non-functional change from patch]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
[pavel: backport]
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit e9d7ca0c4640cbebe6840ee3bac66a25a9bacaf5 linux-5.15.y)
CVE-2022-3176
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
fs/io_uring.c

index 5ce0254d2cbecb2d6aff955d3b80b2cbd2ab9d64..51d461b2e0291cf6c6d0a1aaf2bccdf24247b585 100644 (file)
@@ -5369,12 +5369,14 @@ static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
 
 static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
 {
-       struct wait_queue_head *head = poll->head;
+       struct wait_queue_head *head = smp_load_acquire(&poll->head);
 
-       spin_lock_irq(&head->lock);
-       list_del_init(&poll->wait.entry);
-       poll->head = NULL;
-       spin_unlock_irq(&head->lock);
+       if (head) {
+               spin_lock_irq(&head->lock);
+               list_del_init(&poll->wait.entry);
+               poll->head = NULL;
+               spin_unlock_irq(&head->lock);
+       }
 }
 
 static void io_poll_remove_entries(struct io_kiocb *req)
@@ -5382,10 +5384,26 @@ static void io_poll_remove_entries(struct io_kiocb *req)
        struct io_poll_iocb *poll = io_poll_get_single(req);
        struct io_poll_iocb *poll_double = io_poll_get_double(req);
 
-       if (poll->head)
-               io_poll_remove_entry(poll);
-       if (poll_double && poll_double->head)
+       /*
+        * While we hold the waitqueue lock and the waitqueue is nonempty,
+        * wake_up_pollfree() will wait for us.  However, taking the waitqueue
+        * lock in the first place can race with the waitqueue being freed.
+        *
+        * We solve this as eventpoll does: by taking advantage of the fact that
+        * all users of wake_up_pollfree() will RCU-delay the actual free.  If
+        * we enter rcu_read_lock() and see that the pointer to the queue is
+        * non-NULL, we can then lock it without the memory being freed out from
+        * under us.
+        *
+        * Keep holding rcu_read_lock() as long as we hold the queue lock, in
+        * case the caller deletes the entry from the queue, leaving it empty.
+        * In that case, only RCU prevents the queue memory from being freed.
+        */
+       rcu_read_lock();
+       io_poll_remove_entry(poll);
+       if (poll_double)
                io_poll_remove_entry(poll_double);
+       rcu_read_unlock();
 }
 
 /*
@@ -5523,6 +5541,30 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                                                 wait);
        __poll_t mask = key_to_poll(key);
 
+       if (unlikely(mask & POLLFREE)) {
+               io_poll_mark_cancelled(req);
+               /* we have to kick tw in case it's not already */
+               io_poll_execute(req, 0);
+
+               /*
+                * If the waitqueue is being freed early but someone is already
+                * holds ownership over it, we have to tear down the request as
+                * best we can. That means immediately removing the request from
+                * its waitqueue and preventing all further accesses to the
+                * waitqueue via the request.
+                */
+               list_del_init(&poll->wait.entry);
+
+               /*
+                * Careful: this *must* be the last step, since as soon
+                * as req->head is NULL'ed out, the request can be
+                * completed and freed, since aio_poll_complete_work()
+                * will no longer need to take the waitqueue lock.
+                */
+               smp_store_release(&poll->head, NULL);
+               return 1;
+       }
+
        /* for instances that support it check for an event match first */
        if (mask && !(mask & poll->events))
                return 0;