]>
Commit | Line | Data |
---|---|---|
7aaff708 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/fs.h> | |
5 | #include <linux/file.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/namei.h> | |
9 | #include <linux/io_uring.h> | |
10 | ||
11 | #include <uapi/linux/io_uring.h> | |
12 | ||
13 | #include "io_uring_types.h" | |
14 | #include "io_uring.h" | |
15 | #include "tctx.h" | |
16 | #include "poll.h" | |
17 | #include "timeout.h" | |
18 | #include "cancel.h" | |
19 | ||
20 | struct io_cancel { | |
21 | struct file *file; | |
22 | u64 addr; | |
23 | u32 flags; | |
24 | s32 fd; | |
25 | }; | |
26 | ||
27 | #define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \ | |
28 | IORING_ASYNC_CANCEL_ANY) | |
29 | ||
30 | static bool io_cancel_cb(struct io_wq_work *work, void *data) | |
31 | { | |
32 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); | |
33 | struct io_cancel_data *cd = data; | |
34 | ||
35 | if (req->ctx != cd->ctx) | |
36 | return false; | |
37 | if (cd->flags & IORING_ASYNC_CANCEL_ANY) { | |
38 | ; | |
39 | } else if (cd->flags & IORING_ASYNC_CANCEL_FD) { | |
40 | if (req->file != cd->file) | |
41 | return false; | |
42 | } else { | |
43 | if (req->cqe.user_data != cd->data) | |
44 | return false; | |
45 | } | |
46 | if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) { | |
47 | if (cd->seq == req->work.cancel_seq) | |
48 | return false; | |
49 | req->work.cancel_seq = cd->seq; | |
50 | } | |
51 | return true; | |
52 | } | |
53 | ||
54 | static int io_async_cancel_one(struct io_uring_task *tctx, | |
55 | struct io_cancel_data *cd) | |
56 | { | |
57 | enum io_wq_cancel cancel_ret; | |
58 | int ret = 0; | |
59 | bool all; | |
60 | ||
61 | if (!tctx || !tctx->io_wq) | |
62 | return -ENOENT; | |
63 | ||
64 | all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY); | |
65 | cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all); | |
66 | switch (cancel_ret) { | |
67 | case IO_WQ_CANCEL_OK: | |
68 | ret = 0; | |
69 | break; | |
70 | case IO_WQ_CANCEL_RUNNING: | |
71 | ret = -EALREADY; | |
72 | break; | |
73 | case IO_WQ_CANCEL_NOTFOUND: | |
74 | ret = -ENOENT; | |
75 | break; | |
76 | } | |
77 | ||
78 | return ret; | |
79 | } | |
80 | ||
81 | int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd) | |
82 | { | |
83 | struct io_ring_ctx *ctx = req->ctx; | |
84 | int ret; | |
85 | ||
86 | WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current); | |
87 | ||
88 | ret = io_async_cancel_one(req->task->io_uring, cd); | |
89 | /* | |
90 | * Fall-through even for -EALREADY, as we may have poll armed | |
91 | * that need unarming. | |
92 | */ | |
93 | if (!ret) | |
94 | return 0; | |
95 | ||
7aaff708 JA |
96 | ret = io_poll_cancel(ctx, cd); |
97 | if (ret != -ENOENT) | |
98 | goto out; | |
38513c46 | 99 | spin_lock(&ctx->completion_lock); |
7aaff708 JA |
100 | if (!(cd->flags & IORING_ASYNC_CANCEL_FD)) |
101 | ret = io_timeout_cancel(ctx, cd); | |
7aaff708 | 102 | spin_unlock(&ctx->completion_lock); |
38513c46 | 103 | out: |
7aaff708 JA |
104 | return ret; |
105 | } | |
106 | ||
107 | ||
108 | int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
109 | { | |
110 | struct io_cancel *cancel = io_kiocb_to_cmd(req); | |
111 | ||
112 | if (unlikely(req->flags & REQ_F_BUFFER_SELECT)) | |
113 | return -EINVAL; | |
114 | if (sqe->off || sqe->len || sqe->splice_fd_in) | |
115 | return -EINVAL; | |
116 | ||
117 | cancel->addr = READ_ONCE(sqe->addr); | |
118 | cancel->flags = READ_ONCE(sqe->cancel_flags); | |
119 | if (cancel->flags & ~CANCEL_FLAGS) | |
120 | return -EINVAL; | |
121 | if (cancel->flags & IORING_ASYNC_CANCEL_FD) { | |
122 | if (cancel->flags & IORING_ASYNC_CANCEL_ANY) | |
123 | return -EINVAL; | |
124 | cancel->fd = READ_ONCE(sqe->fd); | |
125 | } | |
126 | ||
127 | return 0; | |
128 | } | |
129 | ||
130 | static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req, | |
131 | unsigned int issue_flags) | |
132 | { | |
133 | bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY); | |
134 | struct io_ring_ctx *ctx = cd->ctx; | |
135 | struct io_tctx_node *node; | |
136 | int ret, nr = 0; | |
137 | ||
138 | do { | |
139 | ret = io_try_cancel(req, cd); | |
140 | if (ret == -ENOENT) | |
141 | break; | |
142 | if (!all) | |
143 | return ret; | |
144 | nr++; | |
145 | } while (1); | |
146 | ||
147 | /* slow path, try all io-wq's */ | |
148 | io_ring_submit_lock(ctx, issue_flags); | |
149 | ret = -ENOENT; | |
150 | list_for_each_entry(node, &ctx->tctx_list, ctx_node) { | |
151 | struct io_uring_task *tctx = node->task->io_uring; | |
152 | ||
153 | ret = io_async_cancel_one(tctx, cd); | |
154 | if (ret != -ENOENT) { | |
155 | if (!all) | |
156 | break; | |
157 | nr++; | |
158 | } | |
159 | } | |
160 | io_ring_submit_unlock(ctx, issue_flags); | |
161 | return all ? nr : ret; | |
162 | } | |
163 | ||
164 | int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) | |
165 | { | |
166 | struct io_cancel *cancel = io_kiocb_to_cmd(req); | |
167 | struct io_cancel_data cd = { | |
168 | .ctx = req->ctx, | |
169 | .data = cancel->addr, | |
170 | .flags = cancel->flags, | |
171 | .seq = atomic_inc_return(&req->ctx->cancel_seq), | |
172 | }; | |
173 | int ret; | |
174 | ||
175 | if (cd.flags & IORING_ASYNC_CANCEL_FD) { | |
176 | if (req->flags & REQ_F_FIXED_FILE) | |
177 | req->file = io_file_get_fixed(req, cancel->fd, | |
178 | issue_flags); | |
179 | else | |
180 | req->file = io_file_get_normal(req, cancel->fd); | |
181 | if (!req->file) { | |
182 | ret = -EBADF; | |
183 | goto done; | |
184 | } | |
185 | cd.file = req->file; | |
186 | } | |
187 | ||
188 | ret = __io_async_cancel(&cd, req, issue_flags); | |
189 | done: | |
190 | if (ret < 0) | |
191 | req_set_fail(req); | |
192 | io_req_set_res(req, ret, 0); | |
193 | return IOU_OK; | |
194 | } | |
38513c46 HX |
195 | |
196 | void init_hash_table(struct io_hash_bucket *hash_table, unsigned size) | |
197 | { | |
198 | unsigned int i; | |
199 | ||
200 | for (i = 0; i < size; i++) { | |
201 | spin_lock_init(&hash_table[i].lock); | |
202 | INIT_HLIST_HEAD(&hash_table[i].list); | |
203 | } | |
204 | } |