]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0014-io_uring-fix-race-between-timeout-flush-and-removal.patch
backport "io_uring: fix race between timeout flush and removal"
[pve-kernel.git] / patches / kernel / 0014-io_uring-fix-race-between-timeout-flush-and-removal.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Jens Axboe <axboe@kernel.dk>
3 Date: Fri, 8 Apr 2022 11:08:58 -0600
4 Subject: [PATCH] io_uring: fix race between timeout flush and removal
5
6 commit e677edbcabee849bfdd43f1602bccbecf736a646 upstream.
7
8 io_flush_timeouts() assumes the timeout isn't in progress of triggering
9 or being removed/canceled, so it unconditionally removes it from the
10 timeout list and attempts to cancel it.
11
12 Leave it on the list and let the normal timeout cancelation take care
13 of it.
14
15 Cc: stable@vger.kernel.org # 5.5+
16 Signed-off-by: Jens Axboe <axboe@kernel.dk>
17 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
18 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
19 ---
20 fs/io_uring.c | 7 +++----
21 1 file changed, 3 insertions(+), 4 deletions(-)
22
23 diff --git a/fs/io_uring.c b/fs/io_uring.c
24 index 156c54ebb62b..367b7ba2fcb0 100644
25 --- a/fs/io_uring.c
26 +++ b/fs/io_uring.c
27 @@ -1546,12 +1546,11 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
28 __must_hold(&ctx->completion_lock)
29 {
30 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
31 + struct io_kiocb *req, *tmp;
32
33 spin_lock_irq(&ctx->timeout_lock);
34 - while (!list_empty(&ctx->timeout_list)) {
35 + list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
36 u32 events_needed, events_got;
37 - struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
38 - struct io_kiocb, timeout.list);
39
40 if (io_is_timeout_noseq(req))
41 break;
42 @@ -1568,7 +1567,6 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
43 if (events_got < events_needed)
44 break;
45
46 - list_del_init(&req->timeout.list);
47 io_kill_timeout(req, 0);
48 }
49 ctx->cq_last_tm_flush = seq;
50 @@ -6209,6 +6207,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
51 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
52 return -EFAULT;
53
54 + INIT_LIST_HEAD(&req->timeout.list);
55 data->mode = io_translate_timeout_mode(flags);
56 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
57