]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/io_uring.c
Bluetooth: btmtksdio: fix use-after-free at btmtksdio_recv_event
[mirror_ubuntu-jammy-kernel.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
d068b506 14 * through a control-dependency in io_get_cqe (smp_store_release to
1e84b97b
SB
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
2b188cc1
JA
58#include <linux/percpu.h>
59#include <linux/slab.h>
2b188cc1 60#include <linux/blkdev.h>
edafccee 61#include <linux/bvec.h>
2b188cc1
JA
62#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
6b06314c 65#include <net/scm.h>
2b188cc1
JA
66#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
edafccee
JA
70#include <linux/sizes.h>
71#include <linux/hugetlb.h>
aa4c3967 72#include <linux/highmem.h>
15b71abe
JA
73#include <linux/namei.h>
74#include <linux/fsnotify.h>
4840e418 75#include <linux/fadvise.h>
3e4827b0 76#include <linux/eventpoll.h>
7d67af2c 77#include <linux/splice.h>
b41e9852 78#include <linux/task_work.h>
bcf5a063 79#include <linux/pagemap.h>
0f212204 80#include <linux/io_uring.h>
ef98eb04 81#include <linux/tracehook.h>
2b188cc1 82
c826bd7a
DD
83#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
2b188cc1
JA
86#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
561fb04a 89#include "io-wq.h"
2b188cc1 90
5277deaa 91#define IORING_MAX_ENTRIES 32768
33a107f0 92#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
4ce8ad95 93#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
65e19f54 94
187f08c1 95/* only define max */
042b0d85 96#define IORING_MAX_FIXED_FILES (1U << 15)
21b55dbc
SG
97#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
98 IORING_REGISTER_LAST + IORING_OP_LAST)
2b188cc1 99
187f08c1 100#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
2d091d62
PB
101#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
102#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
103
489809e2
PB
104#define IORING_MAX_REG_BUFFERS (1U << 14)
105
b16fed66
PB
106#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
107 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
108 IOSQE_BUFFER_SELECT)
c854357b
PB
109#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
110 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
b16fed66 111
09899b19
PB
112#define IO_TCTX_REFS_CACHE_NR (1U << 10)
113
2b188cc1
JA
114struct io_uring {
115 u32 head ____cacheline_aligned_in_smp;
116 u32 tail ____cacheline_aligned_in_smp;
117};
118
1e84b97b 119/*
75b28aff
HV
120 * This data is shared with the application through the mmap at offsets
121 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
122 *
123 * The offsets to the member fields are published through struct
124 * io_sqring_offsets when calling io_uring_setup.
125 */
75b28aff 126struct io_rings {
1e84b97b
SB
127 /*
128 * Head and tail offsets into the ring; the offsets need to be
129 * masked to get valid indices.
130 *
75b28aff
HV
131 * The kernel controls head of the sq ring and the tail of the cq ring,
132 * and the application controls tail of the sq ring and the head of the
133 * cq ring.
1e84b97b 134 */
75b28aff 135 struct io_uring sq, cq;
1e84b97b 136 /*
75b28aff 137 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
138 * ring_entries - 1)
139 */
75b28aff
HV
140 u32 sq_ring_mask, cq_ring_mask;
141 /* Ring sizes (constant, power of 2) */
142 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
143 /*
144 * Number of invalid entries dropped by the kernel due to
145 * invalid index stored in array
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application (i.e. get number of "new events" by comparing to
149 * cached value).
150 *
151 * After a new SQ head value was read by the application this
152 * counter includes all submissions that were dropped reaching
153 * the new SQ head (and possibly more).
154 */
75b28aff 155 u32 sq_dropped;
1e84b97b 156 /*
0d9b5b3a 157 * Runtime SQ flags
1e84b97b
SB
158 *
159 * Written by the kernel, shouldn't be modified by the
160 * application.
161 *
162 * The application needs a full memory barrier before checking
163 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
164 */
75b28aff 165 u32 sq_flags;
0d9b5b3a
SG
166 /*
167 * Runtime CQ flags
168 *
169 * Written by the application, shouldn't be modified by the
170 * kernel.
171 */
fe7e3257 172 u32 cq_flags;
1e84b97b
SB
173 /*
174 * Number of completion events lost because the queue was full;
175 * this should be avoided by the application by making sure
0b4295b5 176 * there are not more requests pending than there is space in
1e84b97b
SB
177 * the completion queue.
178 *
179 * Written by the kernel, shouldn't be modified by the
180 * application (i.e. get number of "new events" by comparing to
181 * cached value).
182 *
183 * As completion events come in out of order this counter is not
184 * ordered with any other data.
185 */
75b28aff 186 u32 cq_overflow;
1e84b97b
SB
187 /*
188 * Ring buffer of completion events.
189 *
190 * The kernel writes completion events fresh every time they are
191 * produced, so the application is allowed to modify pending
192 * entries.
193 */
75b28aff 194 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
195};
196
45d189c6
PB
197enum io_uring_cmd_flags {
198 IO_URING_F_NONBLOCK = 1,
889fca73 199 IO_URING_F_COMPLETE_DEFER = 2,
45d189c6
PB
200};
201
edafccee
JA
202struct io_mapped_ubuf {
203 u64 ubuf;
4751f53d 204 u64 ubuf_end;
edafccee 205 unsigned int nr_bvecs;
de293938 206 unsigned long acct_pages;
41edf1a5 207 struct bio_vec bvec[];
edafccee
JA
208};
209
50238531
BM
210struct io_ring_ctx;
211
6c2450ae
PB
212struct io_overflow_cqe {
213 struct io_uring_cqe cqe;
214 struct list_head list;
215};
216
a04b0ac0
PB
217struct io_fixed_file {
218 /* file * with additional FFS_* flags */
219 unsigned long file_ptr;
220};
221
269bbe5f
BM
222struct io_rsrc_put {
223 struct list_head list;
b60c8dce 224 u64 tag;
50238531
BM
225 union {
226 void *rsrc;
227 struct file *file;
bd54b6fe 228 struct io_mapped_ubuf *buf;
50238531 229 };
269bbe5f
BM
230};
231
aeca241b 232struct io_file_table {
042b0d85 233 struct io_fixed_file *files;
31b51510
JA
234};
235
b895c9a6 236struct io_rsrc_node {
05589553
XW
237 struct percpu_ref refs;
238 struct list_head node;
269bbe5f 239 struct list_head rsrc_list;
b895c9a6 240 struct io_rsrc_data *rsrc_data;
4a38aed2 241 struct llist_node llist;
e297822b 242 bool done;
05589553
XW
243};
244
40ae0ff7
PB
245typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
246
b895c9a6 247struct io_rsrc_data {
05f3fb3c
JA
248 struct io_ring_ctx *ctx;
249
2d091d62
PB
250 u64 **tags;
251 unsigned int nr;
40ae0ff7 252 rsrc_put_fn *do_put;
3e942498 253 atomic_t refs;
05f3fb3c 254 struct completion done;
8bad28d8 255 bool quiesce;
05f3fb3c
JA
256};
257
5a2e745d
JA
258struct io_buffer {
259 struct list_head list;
260 __u64 addr;
d1f82808 261 __u32 len;
5a2e745d
JA
262 __u16 bid;
263};
264
21b55dbc
SG
265struct io_restriction {
266 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
267 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
268 u8 sqe_flags_allowed;
269 u8 sqe_flags_required;
7e84e1c7 270 bool registered;
21b55dbc
SG
271};
272
37d1e2e3
JA
273enum {
274 IO_SQ_THREAD_SHOULD_STOP = 0,
275 IO_SQ_THREAD_SHOULD_PARK,
276};
277
534ca6d6
JA
278struct io_sq_data {
279 refcount_t refs;
9e138a48 280 atomic_t park_pending;
09a6f4ef 281 struct mutex lock;
69fb2131
JA
282
283 /* ctx's that are using this sqd */
284 struct list_head ctx_list;
69fb2131 285
534ca6d6
JA
286 struct task_struct *thread;
287 struct wait_queue_head wait;
08369246
XW
288
289 unsigned sq_thread_idle;
37d1e2e3
JA
290 int sq_cpu;
291 pid_t task_pid;
5c2469e0 292 pid_t task_tgid;
37d1e2e3
JA
293
294 unsigned long state;
37d1e2e3 295 struct completion exited;
534ca6d6
JA
296};
297
6dd0be1e 298#define IO_COMPL_BATCH 32
6ff119a6 299#define IO_REQ_CACHE_SIZE 32
bf019da7 300#define IO_REQ_ALLOC_BATCH 8
258b29a9 301
a1ab7b35
PB
302struct io_submit_link {
303 struct io_kiocb *head;
304 struct io_kiocb *last;
305};
306
258b29a9
PB
307struct io_submit_state {
308 struct blk_plug plug;
a1ab7b35 309 struct io_submit_link link;
258b29a9
PB
310
311 /*
312 * io_kiocb alloc cache
313 */
bf019da7 314 void *reqs[IO_REQ_CACHE_SIZE];
258b29a9
PB
315 unsigned int free_reqs;
316
317 bool plug_started;
318
319 /*
320 * Batch completion logic
321 */
cd0ca2e0
PB
322 struct io_kiocb *compl_reqs[IO_COMPL_BATCH];
323 unsigned int compl_nr;
324 /* inline/task_work completion list, under ->uring_lock */
325 struct list_head free_list;
258b29a9 326
258b29a9
PB
327 unsigned int ios_left;
328};
329
2b188cc1 330struct io_ring_ctx {
b52ecf8c 331 /* const or read-mostly hot data */
2b188cc1
JA
332 struct {
333 struct percpu_ref refs;
2b188cc1 334
b52ecf8c 335 struct io_rings *rings;
2b188cc1 336 unsigned int flags;
e1d85334 337 unsigned int compat: 1;
e1d85334
RD
338 unsigned int drain_next: 1;
339 unsigned int eventfd_async: 1;
21b55dbc 340 unsigned int restricted: 1;
f18ee4cf 341 unsigned int off_timeout_used: 1;
10c66904 342 unsigned int drain_active: 1;
b52ecf8c 343 } ____cacheline_aligned_in_smp;
2b188cc1 344
7f1129d2 345 /* submission data */
b52ecf8c 346 struct {
0499e582
PB
347 struct mutex uring_lock;
348
75b28aff
HV
349 /*
350 * Ring buffer of indices into array of io_uring_sqe, which is
351 * mmapped by the application using the IORING_OFF_SQES offset.
352 *
353 * This indirection could e.g. be used to assign fixed
354 * io_uring_sqe entries to operations and only submit them to
355 * the queue when needed.
356 *
357 * The kernel modifies neither the indices array nor the entries
358 * array.
359 */
360 u32 *sq_array;
c7af47cf 361 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
362 unsigned cached_sq_head;
363 unsigned sq_entries;
de0617e4 364 struct list_head defer_list;
7f1129d2
PB
365
366 /*
367 * Fixed resources fast path, should be accessed only under
368 * uring_lock, and updated through io_uring_register(2)
369 */
370 struct io_rsrc_node *rsrc_node;
371 struct io_file_table file_table;
372 unsigned nr_user_files;
373 unsigned nr_user_bufs;
374 struct io_mapped_ubuf **user_bufs;
375
376 struct io_submit_state submit_state;
5262f567 377 struct list_head timeout_list;
ef9dd637 378 struct list_head ltimeout_list;
1d7bb1d5 379 struct list_head cq_overflow_list;
7f1129d2
PB
380 struct xarray io_buffers;
381 struct xarray personalities;
382 u32 pers_next;
383 unsigned sq_thread_idle;
2b188cc1
JA
384 } ____cacheline_aligned_in_smp;
385
d0acdee2
PB
386 /* IRQ completion list, under ->completion_lock */
387 struct list_head locked_free_list;
388 unsigned int locked_free_nr;
3c1a2ead 389
7c30f36a 390 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
534ca6d6
JA
391 struct io_sq_data *sq_data; /* if using sq thread polling */
392
90554200 393 struct wait_queue_head sqo_sq_wait;
69fb2131 394 struct list_head sqd_list;
75b28aff 395
5ed7a37d
PB
396 unsigned long check_cq_overflow;
397
206aefde
JA
398 struct {
399 unsigned cached_cq_tail;
400 unsigned cq_entries;
0499e582 401 struct eventfd_ctx *cq_ev_fd;
311997b3 402 struct wait_queue_head poll_wait;
0499e582
PB
403 struct wait_queue_head cq_wait;
404 unsigned cq_extra;
405 atomic_t cq_timeouts;
0499e582 406 unsigned cq_last_tm_flush;
206aefde 407 } ____cacheline_aligned_in_smp;
2b188cc1 408
2b188cc1
JA
409 struct {
410 spinlock_t completion_lock;
e94f141b 411
89850fce
JA
412 spinlock_t timeout_lock;
413
def596e9 414 /*
540e32a0 415 * ->iopoll_list is protected by the ctx->uring_lock for
def596e9
JA
416 * io_uring instances that don't use IORING_SETUP_SQPOLL.
417 * For SQPOLL, only the single threaded io_sq_thread() will
418 * manipulate the list, hence no extra locking is needed there.
419 */
540e32a0 420 struct list_head iopoll_list;
78076bb6
JA
421 struct hlist_head *cancel_hash;
422 unsigned cancel_hash_bits;
915b3dde 423 bool poll_multi_queue;
2b188cc1 424 } ____cacheline_aligned_in_smp;
85faa7b8 425
21b55dbc 426 struct io_restriction restrictions;
3c1a2ead 427
b13a8918
PB
428 /* slow path rsrc auxilary data, used by update/register */
429 struct {
430 struct io_rsrc_node *rsrc_backup_node;
431 struct io_mapped_ubuf *dummy_ubuf;
432 struct io_rsrc_data *file_data;
433 struct io_rsrc_data *buf_data;
434
435 struct delayed_work rsrc_put_work;
436 struct llist_head rsrc_put_llist;
437 struct list_head rsrc_ref_list;
438 spinlock_t rsrc_ref_lock;
439 };
440
3c1a2ead 441 /* Keep this last, we don't need it for the fast path */
b986af7e
PB
442 struct {
443 #if defined(CONFIG_UNIX)
444 struct socket *ring_sock;
445 #endif
446 /* hashed buffered write serialization */
447 struct io_wq_hash *hash_map;
448
449 /* Only used for accounting purposes */
450 struct user_struct *user;
451 struct mm_struct *mm_account;
452
453 /* ctx exit and cancelation */
9011bf9a
PB
454 struct llist_head fallback_llist;
455 struct delayed_work fallback_work;
b986af7e
PB
456 struct work_struct exit_work;
457 struct list_head tctx_list;
458 struct completion ref_comp;
e139a1ec
PB
459 u32 iowq_limits[2];
460 bool iowq_limits_set;
b986af7e 461 };
2b188cc1
JA
462};
463
53e043b2
SM
464struct io_uring_task {
465 /* submission side */
09899b19 466 int cached_refs;
53e043b2
SM
467 struct xarray xa;
468 struct wait_queue_head wait;
ee53fb2b
SM
469 const struct io_ring_ctx *last;
470 struct io_wq *io_wq;
53e043b2 471 struct percpu_counter inflight;
b303fe2e 472 atomic_t inflight_tracked;
53e043b2 473 atomic_t in_idle;
53e043b2
SM
474
475 spinlock_t task_lock;
476 struct io_wq_work_list task_list;
53e043b2 477 struct callback_head task_work;
6294f368 478 bool task_running;
53e043b2
SM
479};
480
09bb8394
JA
481/*
482 * First field must be the file pointer in all the
483 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
484 */
221c5eb2
JA
485struct io_poll_iocb {
486 struct file *file;
018043be 487 struct wait_queue_head *head;
221c5eb2 488 __poll_t events;
8c838788 489 bool done;
221c5eb2 490 bool canceled;
392edb45 491 struct wait_queue_entry wait;
221c5eb2
JA
492};
493
9d805892 494struct io_poll_update {
018043be 495 struct file *file;
9d805892
PB
496 u64 old_user_data;
497 u64 new_user_data;
498 __poll_t events;
b69de288
JA
499 bool update_events;
500 bool update_user_data;
018043be
PB
501};
502
b5dba59e
JA
503struct io_close {
504 struct file *file;
b5dba59e 505 int fd;
7df778be 506 u32 file_slot;
b5dba59e
JA
507};
508
ad8a48ac
JA
509struct io_timeout_data {
510 struct io_kiocb *req;
511 struct hrtimer timer;
512 struct timespec64 ts;
513 enum hrtimer_mode mode;
50c1df2b 514 u32 flags;
ad8a48ac
JA
515};
516
8ed8d3c3
JA
517struct io_accept {
518 struct file *file;
519 struct sockaddr __user *addr;
520 int __user *addr_len;
521 int flags;
aaa4db12 522 u32 file_slot;
09952e3e 523 unsigned long nofile;
8ed8d3c3
JA
524};
525
526struct io_sync {
527 struct file *file;
528 loff_t len;
529 loff_t off;
530 int flags;
d63d1b5e 531 int mode;
8ed8d3c3
JA
532};
533
fbf23849
JA
534struct io_cancel {
535 struct file *file;
536 u64 addr;
537};
538
b29472ee
JA
539struct io_timeout {
540 struct file *file;
bfe68a22
PB
541 u32 off;
542 u32 target_seq;
135fcde8 543 struct list_head list;
90cd7e42
PB
544 /* head of the link, used by linked timeouts only */
545 struct io_kiocb *head;
89b263f6
JA
546 /* for linked completions */
547 struct io_kiocb *prev;
b29472ee
JA
548};
549
0bdf7a2d
PB
550struct io_timeout_rem {
551 struct file *file;
552 u64 addr;
9c8e11b3
PB
553
554 /* timeout update */
555 struct timespec64 ts;
556 u32 flags;
f1042b6c 557 bool ltimeout;
0bdf7a2d
PB
558};
559
9adbd45d
JA
560struct io_rw {
561 /* NOTE: kiocb has the file as the first member, so don't do it here */
562 struct kiocb kiocb;
563 u64 addr;
564 u64 len;
565};
566
3fbb51c1
JA
567struct io_connect {
568 struct file *file;
569 struct sockaddr __user *addr;
570 int addr_len;
571};
572
e47293fd
JA
573struct io_sr_msg {
574 struct file *file;
fddaface 575 union {
4af3417a
PB
576 struct compat_msghdr __user *umsg_compat;
577 struct user_msghdr __user *umsg;
578 void __user *buf;
fddaface 579 };
e47293fd 580 int msg_flags;
bcda7baa 581 int bgid;
fddaface 582 size_t len;
bcda7baa 583 struct io_buffer *kbuf;
e47293fd
JA
584};
585
15b71abe
JA
586struct io_open {
587 struct file *file;
588 int dfd;
b9445598 589 u32 file_slot;
15b71abe 590 struct filename *filename;
c12cedf2 591 struct open_how how;
4022e7af 592 unsigned long nofile;
15b71abe
JA
593};
594
269bbe5f 595struct io_rsrc_update {
05f3fb3c
JA
596 struct file *file;
597 u64 arg;
598 u32 nr_args;
599 u32 offset;
600};
601
4840e418
JA
602struct io_fadvise {
603 struct file *file;
604 u64 offset;
605 u32 len;
606 u32 advice;
607};
608
c1ca757b
JA
609struct io_madvise {
610 struct file *file;
611 u64 addr;
612 u32 len;
613 u32 advice;
614};
615
3e4827b0
JA
616struct io_epoll {
617 struct file *file;
618 int epfd;
619 int op;
620 int fd;
621 struct epoll_event event;
e47293fd
JA
622};
623
7d67af2c
PB
624struct io_splice {
625 struct file *file_out;
7d67af2c
PB
626 loff_t off_out;
627 loff_t off_in;
628 u64 len;
dfadddfc 629 int splice_fd_in;
7d67af2c
PB
630 unsigned int flags;
631};
632
ddf0322d
JA
633struct io_provide_buf {
634 struct file *file;
635 __u64 addr;
38134ada 636 __u32 len;
ddf0322d
JA
637 __u32 bgid;
638 __u16 nbufs;
639 __u16 bid;
640};
641
1d9e1288
BM
642struct io_statx {
643 struct file *file;
644 int dfd;
645 unsigned int mask;
646 unsigned int flags;
e62753e4 647 const char __user *filename;
1d9e1288
BM
648 struct statx __user *buffer;
649};
650
36f4fa68
JA
651struct io_shutdown {
652 struct file *file;
653 int how;
654};
655
80a261fd
JA
656struct io_rename {
657 struct file *file;
658 int old_dfd;
659 int new_dfd;
660 struct filename *oldpath;
661 struct filename *newpath;
662 int flags;
663};
664
14a1143b
JA
665struct io_unlink {
666 struct file *file;
667 int dfd;
668 int flags;
669 struct filename *filename;
670};
671
e34a02dc
DK
672struct io_mkdir {
673 struct file *file;
674 int dfd;
675 umode_t mode;
676 struct filename *filename;
677};
678
7a8721f8
DK
679struct io_symlink {
680 struct file *file;
681 int new_dfd;
682 struct filename *oldpath;
683 struct filename *newpath;
684};
685
cf30da90
DK
686struct io_hardlink {
687 struct file *file;
688 int old_dfd;
689 int new_dfd;
690 struct filename *oldpath;
691 struct filename *newpath;
692 int flags;
693};
694
3ca405eb
PB
695struct io_completion {
696 struct file *file;
8c3f9cd1 697 u32 cflags;
3ca405eb
PB
698};
699
f499a021
JA
700struct io_async_connect {
701 struct sockaddr_storage address;
702};
703
03b1230c
JA
704struct io_async_msghdr {
705 struct iovec fast_iov[UIO_FASTIOV];
257e84a5
PB
706 /* points to an allocated iov, if NULL we use fast_iov instead */
707 struct iovec *free_iov;
03b1230c
JA
708 struct sockaddr __user *uaddr;
709 struct msghdr msg;
b537916c 710 struct sockaddr_storage addr;
03b1230c
JA
711};
712
f67676d1
JA
713struct io_async_rw {
714 struct iovec fast_iov[UIO_FASTIOV];
ff6165b2
JA
715 const struct iovec *free_iovec;
716 struct iov_iter iter;
cd658695 717 struct iov_iter_state iter_state;
227c0c96 718 size_t bytes_done;
bcf5a063 719 struct wait_page_queue wpq;
f67676d1
JA
720};
721
6b47ee6e
PB
722enum {
723 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
724 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
725 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
726 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
727 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 728 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
6b47ee6e 729
dddca226 730 /* first byte is taken by user flags, shift it to not overlap */
93d2bcd2 731 REQ_F_FAIL_BIT = 8,
6b47ee6e
PB
732 REQ_F_INFLIGHT_BIT,
733 REQ_F_CUR_POS_BIT,
734 REQ_F_NOWAIT_BIT,
6b47ee6e 735 REQ_F_LINK_TIMEOUT_BIT,
99bc4c38 736 REQ_F_NEED_CLEANUP_BIT,
d7718a9d 737 REQ_F_POLLED_BIT,
bcda7baa 738 REQ_F_BUFFER_SELECTED_BIT,
e342c807 739 REQ_F_COMPLETE_INLINE_BIT,
230d50d4 740 REQ_F_REISSUE_BIT,
b8e64b53 741 REQ_F_CREDS_BIT,
20e60a38 742 REQ_F_REFCOUNT_BIT,
4d13d1a4 743 REQ_F_ARM_LTIMEOUT_BIT,
7b29f92d 744 /* keep async read/write and isreg together and in order */
b191e2df
PB
745 REQ_F_NOWAIT_READ_BIT,
746 REQ_F_NOWAIT_WRITE_BIT,
7b29f92d 747 REQ_F_ISREG_BIT,
84557871
JA
748
749 /* not a real bit, just to check we're not overflowing the space */
750 __REQ_F_LAST_BIT,
6b47ee6e
PB
751};
752
753enum {
754 /* ctx owns file */
755 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
756 /* drain existing IO first */
757 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
758 /* linked sqes */
759 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
760 /* doesn't sever on completion < 0 */
761 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
762 /* IOSQE_ASYNC */
763 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
764 /* IOSQE_BUFFER_SELECT */
765 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
6b47ee6e 766
6b47ee6e 767 /* fail rest of links */
93d2bcd2 768 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
b05a1bcd 769 /* on inflight list, should be cancelled and waited on exit reliably */
6b47ee6e
PB
770 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
771 /* read/write uses file position */
772 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
773 /* must not punt to workers */
774 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
900fad45 775 /* has or had linked timeout */
6b47ee6e 776 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
99bc4c38
PB
777 /* needs cleanup */
778 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
d7718a9d
JA
779 /* already went through poll handler */
780 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
781 /* buffer already selected */
782 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
e342c807
PB
783 /* completion is deferred through io_comp_state */
784 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
230d50d4
JA
785 /* caller should reissue async */
786 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
7b29f92d 787 /* supports async reads */
b191e2df 788 REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT),
7b29f92d 789 /* supports async writes */
b191e2df 790 REQ_F_NOWAIT_WRITE = BIT(REQ_F_NOWAIT_WRITE_BIT),
7b29f92d
JA
791 /* regular file */
792 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
b8e64b53
PB
793 /* has creds assigned */
794 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
20e60a38
PB
795 /* skip refcounting if not set */
796 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
4d13d1a4
PB
797 /* there is a linked timeout that has to be armed */
798 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
d7718a9d
JA
799};
800
801struct async_poll {
802 struct io_poll_iocb poll;
807abcb0 803 struct io_poll_iocb *double_poll;
6b47ee6e
PB
804};
805
f237c30a 806typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
5b0a6acc 807
7cbf1722 808struct io_task_work {
5b0a6acc
PB
809 union {
810 struct io_wq_work_node node;
811 struct llist_node fallback_node;
812 };
813 io_req_tw_func_t func;
7cbf1722
JA
814};
815
992da01a
PB
816enum {
817 IORING_RSRC_FILE = 0,
818 IORING_RSRC_BUFFER = 1,
819};
820
09bb8394
JA
821/*
822 * NOTE! Each of the iocb union members has the file pointer
823 * as the first entry in their struct definition. So you can
824 * access the file pointer through any of the sub-structs,
825 * or directly as just 'ki_filp' in this struct.
826 */
2b188cc1 827struct io_kiocb {
221c5eb2 828 union {
09bb8394 829 struct file *file;
9adbd45d 830 struct io_rw rw;
221c5eb2 831 struct io_poll_iocb poll;
9d805892 832 struct io_poll_update poll_update;
8ed8d3c3
JA
833 struct io_accept accept;
834 struct io_sync sync;
fbf23849 835 struct io_cancel cancel;
b29472ee 836 struct io_timeout timeout;
0bdf7a2d 837 struct io_timeout_rem timeout_rem;
3fbb51c1 838 struct io_connect connect;
e47293fd 839 struct io_sr_msg sr_msg;
15b71abe 840 struct io_open open;
b5dba59e 841 struct io_close close;
269bbe5f 842 struct io_rsrc_update rsrc_update;
4840e418 843 struct io_fadvise fadvise;
c1ca757b 844 struct io_madvise madvise;
3e4827b0 845 struct io_epoll epoll;
7d67af2c 846 struct io_splice splice;
ddf0322d 847 struct io_provide_buf pbuf;
1d9e1288 848 struct io_statx statx;
36f4fa68 849 struct io_shutdown shutdown;
80a261fd 850 struct io_rename rename;
14a1143b 851 struct io_unlink unlink;
e34a02dc 852 struct io_mkdir mkdir;
7a8721f8 853 struct io_symlink symlink;
cf30da90 854 struct io_hardlink hardlink;
3ca405eb
PB
855 /* use only after cleaning per-op data, see io_clean_op() */
856 struct io_completion compl;
221c5eb2 857 };
2b188cc1 858
e8c2bc1f
JA
859 /* opcode allocated if it needs to store data for async defer */
860 void *async_data;
d625c6ee 861 u8 opcode;
65a6543d
XW
862 /* polled IO has completed */
863 u8 iopoll_completed;
2b188cc1 864
4f4eeba8 865 u16 buf_index;
9cf7c104 866 u32 result;
4f4eeba8 867
010e8e6b
PB
868 struct io_ring_ctx *ctx;
869 unsigned int flags;
abc54d63 870 atomic_t refs;
010e8e6b
PB
871 struct task_struct *task;
872 u64 user_data;
d7718a9d 873
f2f87370 874 struct io_kiocb *link;
269bbe5f 875 struct percpu_ref *fixed_rsrc_refs;
fcb323cc 876
b303fe2e 877 /* used with ctx->iopoll_list with reads/writes */
010e8e6b 878 struct list_head inflight_entry;
5b0a6acc 879 struct io_task_work io_task_work;
010e8e6b
PB
880 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
881 struct hlist_node hash_node;
882 struct async_poll *apoll;
883 struct io_wq_work work;
fe7e3257 884 const struct cred *creds;
c10d1f98 885
eae071c9
PB
886 /* store used ubuf, so we can prevent reloading */
887 struct io_mapped_ubuf *imu;
2b188cc1 888};
05589553 889
13bf43f5
PB
890struct io_tctx_node {
891 struct list_head ctx_node;
892 struct task_struct *task;
13bf43f5
PB
893 struct io_ring_ctx *ctx;
894};
895
27dc8338
PB
896struct io_defer_entry {
897 struct list_head list;
898 struct io_kiocb *req;
9cf7c104 899 u32 seq;
2b188cc1
JA
900};
901
d3656344 902struct io_op_def {
d3656344
JA
903 /* needs req->file assigned */
904 unsigned needs_file : 1;
d3656344
JA
905 /* hash wq insertion if file is a regular file */
906 unsigned hash_reg_file : 1;
907 /* unbound wq insertion if file is a non-regular file */
908 unsigned unbound_nonreg_file : 1;
66f4af93
JA
909 /* opcode is not supported by this kernel */
910 unsigned not_supported : 1;
8a72758c
JA
911 /* set if opcode supports polled "wait" */
912 unsigned pollin : 1;
913 unsigned pollout : 1;
bcda7baa
JA
914 /* op supports buffer selection */
915 unsigned buffer_select : 1;
26f0505a
PB
916 /* do prep async if is going to be punted */
917 unsigned needs_async_setup : 1;
27926b68
JA
918 /* should block plug */
919 unsigned plug : 1;
e8c2bc1f
JA
920 /* size of async data needed, if any */
921 unsigned short async_size;
d3656344
JA
922};
923
0918682b 924static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
925 [IORING_OP_NOP] = {},
926 [IORING_OP_READV] = {
d3656344
JA
927 .needs_file = 1,
928 .unbound_nonreg_file = 1,
8a72758c 929 .pollin = 1,
4d954c25 930 .buffer_select = 1,
26f0505a 931 .needs_async_setup = 1,
27926b68 932 .plug = 1,
e8c2bc1f 933 .async_size = sizeof(struct io_async_rw),
d3656344 934 },
0463b6c5 935 [IORING_OP_WRITEV] = {
d3656344
JA
936 .needs_file = 1,
937 .hash_reg_file = 1,
938 .unbound_nonreg_file = 1,
8a72758c 939 .pollout = 1,
26f0505a 940 .needs_async_setup = 1,
27926b68 941 .plug = 1,
e8c2bc1f 942 .async_size = sizeof(struct io_async_rw),
d3656344 943 },
0463b6c5 944 [IORING_OP_FSYNC] = {
d3656344
JA
945 .needs_file = 1,
946 },
0463b6c5 947 [IORING_OP_READ_FIXED] = {
d3656344
JA
948 .needs_file = 1,
949 .unbound_nonreg_file = 1,
8a72758c 950 .pollin = 1,
27926b68 951 .plug = 1,
e8c2bc1f 952 .async_size = sizeof(struct io_async_rw),
d3656344 953 },
0463b6c5 954 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
955 .needs_file = 1,
956 .hash_reg_file = 1,
957 .unbound_nonreg_file = 1,
8a72758c 958 .pollout = 1,
27926b68 959 .plug = 1,
e8c2bc1f 960 .async_size = sizeof(struct io_async_rw),
d3656344 961 },
0463b6c5 962 [IORING_OP_POLL_ADD] = {
d3656344
JA
963 .needs_file = 1,
964 .unbound_nonreg_file = 1,
965 },
0463b6c5
PB
966 [IORING_OP_POLL_REMOVE] = {},
967 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344
JA
968 .needs_file = 1,
969 },
0463b6c5 970 [IORING_OP_SENDMSG] = {
d3656344
JA
971 .needs_file = 1,
972 .unbound_nonreg_file = 1,
8a72758c 973 .pollout = 1,
26f0505a 974 .needs_async_setup = 1,
e8c2bc1f 975 .async_size = sizeof(struct io_async_msghdr),
d3656344 976 },
0463b6c5 977 [IORING_OP_RECVMSG] = {
d3656344
JA
978 .needs_file = 1,
979 .unbound_nonreg_file = 1,
8a72758c 980 .pollin = 1,
52de1fe1 981 .buffer_select = 1,
26f0505a 982 .needs_async_setup = 1,
e8c2bc1f 983 .async_size = sizeof(struct io_async_msghdr),
d3656344 984 },
0463b6c5 985 [IORING_OP_TIMEOUT] = {
e8c2bc1f 986 .async_size = sizeof(struct io_timeout_data),
d3656344 987 },
9c8e11b3
PB
988 [IORING_OP_TIMEOUT_REMOVE] = {
989 /* used by timeout updates' prep() */
9c8e11b3 990 },
0463b6c5 991 [IORING_OP_ACCEPT] = {
d3656344
JA
992 .needs_file = 1,
993 .unbound_nonreg_file = 1,
8a72758c 994 .pollin = 1,
d3656344 995 },
0463b6c5
PB
996 [IORING_OP_ASYNC_CANCEL] = {},
997 [IORING_OP_LINK_TIMEOUT] = {
e8c2bc1f 998 .async_size = sizeof(struct io_timeout_data),
d3656344 999 },
0463b6c5 1000 [IORING_OP_CONNECT] = {
d3656344
JA
1001 .needs_file = 1,
1002 .unbound_nonreg_file = 1,
8a72758c 1003 .pollout = 1,
26f0505a 1004 .needs_async_setup = 1,
e8c2bc1f 1005 .async_size = sizeof(struct io_async_connect),
d3656344 1006 },
0463b6c5 1007 [IORING_OP_FALLOCATE] = {
d3656344 1008 .needs_file = 1,
d3656344 1009 },
44526bed
JA
1010 [IORING_OP_OPENAT] = {},
1011 [IORING_OP_CLOSE] = {},
1012 [IORING_OP_FILES_UPDATE] = {},
1013 [IORING_OP_STATX] = {},
0463b6c5 1014 [IORING_OP_READ] = {
3a6820f2
JA
1015 .needs_file = 1,
1016 .unbound_nonreg_file = 1,
8a72758c 1017 .pollin = 1,
bcda7baa 1018 .buffer_select = 1,
27926b68 1019 .plug = 1,
e8c2bc1f 1020 .async_size = sizeof(struct io_async_rw),
3a6820f2 1021 },
0463b6c5 1022 [IORING_OP_WRITE] = {
3a6820f2 1023 .needs_file = 1,
7b3188e7 1024 .hash_reg_file = 1,
3a6820f2 1025 .unbound_nonreg_file = 1,
8a72758c 1026 .pollout = 1,
27926b68 1027 .plug = 1,
e8c2bc1f 1028 .async_size = sizeof(struct io_async_rw),
3a6820f2 1029 },
0463b6c5 1030 [IORING_OP_FADVISE] = {
4840e418 1031 .needs_file = 1,
c1ca757b 1032 },
44526bed 1033 [IORING_OP_MADVISE] = {},
0463b6c5 1034 [IORING_OP_SEND] = {
fddaface
JA
1035 .needs_file = 1,
1036 .unbound_nonreg_file = 1,
8a72758c 1037 .pollout = 1,
fddaface 1038 },
0463b6c5 1039 [IORING_OP_RECV] = {
fddaface
JA
1040 .needs_file = 1,
1041 .unbound_nonreg_file = 1,
8a72758c 1042 .pollin = 1,
bcda7baa 1043 .buffer_select = 1,
fddaface 1044 },
0463b6c5 1045 [IORING_OP_OPENAT2] = {
cebdb986 1046 },
3e4827b0
JA
1047 [IORING_OP_EPOLL_CTL] = {
1048 .unbound_nonreg_file = 1,
3e4827b0 1049 },
7d67af2c
PB
1050 [IORING_OP_SPLICE] = {
1051 .needs_file = 1,
1052 .hash_reg_file = 1,
1053 .unbound_nonreg_file = 1,
ddf0322d
JA
1054 },
1055 [IORING_OP_PROVIDE_BUFFERS] = {},
067524e9 1056 [IORING_OP_REMOVE_BUFFERS] = {},
f2a8d5c7
PB
1057 [IORING_OP_TEE] = {
1058 .needs_file = 1,
1059 .hash_reg_file = 1,
1060 .unbound_nonreg_file = 1,
1061 },
36f4fa68
JA
1062 [IORING_OP_SHUTDOWN] = {
1063 .needs_file = 1,
1064 },
44526bed
JA
1065 [IORING_OP_RENAMEAT] = {},
1066 [IORING_OP_UNLINKAT] = {},
e34a02dc 1067 [IORING_OP_MKDIRAT] = {},
7a8721f8 1068 [IORING_OP_SYMLINKAT] = {},
cf30da90 1069 [IORING_OP_LINKAT] = {},
d3656344
JA
1070};
1071
0756a869
PB
1072/* requests with any of those set should undergo io_disarm_next() */
1073#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
1074
7a612350 1075static bool io_disarm_next(struct io_kiocb *req);
eef51daa 1076static void io_uring_del_tctx_node(unsigned long index);
9936c7c2
PB
1077static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1078 struct task_struct *task,
3dd0c97a 1079 bool cancel_all);
78cc687b 1080static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
1ffc5422 1081
d4d19c19
PB
1082static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1083 long res, unsigned int cflags);
ec9c02ad 1084static void io_put_req(struct io_kiocb *req);
91c2f697 1085static void io_put_req_deferred(struct io_kiocb *req);
c7dae4ba 1086static void io_dismantle_req(struct io_kiocb *req);
94ae5e77 1087static void io_queue_linked_timeout(struct io_kiocb *req);
fdecb662 1088static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
c3bdad02 1089 struct io_uring_rsrc_update2 *up,
98f0b3b4 1090 unsigned nr_args);
68fb8979 1091static void io_clean_op(struct io_kiocb *req);
ac177053 1092static struct file *io_file_get(struct io_ring_ctx *ctx,
8371adf5 1093 struct io_kiocb *req, int fd, bool fixed);
c5eef2b9 1094static void __io_queue_sqe(struct io_kiocb *req);
269bbe5f 1095static void io_rsrc_put_work(struct work_struct *work);
de0617e4 1096
907d1df3 1097static void io_req_task_queue(struct io_kiocb *req);
2a2758f2 1098static void io_submit_flush_completions(struct io_ring_ctx *ctx);
179ae0d1 1099static int io_req_prep_async(struct io_kiocb *req);
de0617e4 1100
b9445598
PB
1101static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1102 unsigned int issue_flags, u32 slot_index);
7df778be
PB
1103static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1104
f1042b6c 1105static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
b9445598 1106
2b188cc1
JA
1107static struct kmem_cache *req_cachep;
1108
0918682b 1109static const struct file_operations io_uring_fops;
2b188cc1
JA
1110
1111struct sock *io_uring_get_socket(struct file *file)
1112{
1113#if defined(CONFIG_UNIX)
1114 if (file->f_op == &io_uring_fops) {
1115 struct io_ring_ctx *ctx = file->private_data;
1116
1117 return ctx->ring_sock->sk;
1118 }
1119#endif
1120 return NULL;
1121}
1122EXPORT_SYMBOL(io_uring_get_socket);
1123
f237c30a
PB
1124static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
1125{
1126 if (!*locked) {
1127 mutex_lock(&ctx->uring_lock);
1128 *locked = true;
1129 }
1130}
1131
f2f87370
PB
1132#define io_for_each_link(pos, head) \
1133 for (pos = (head); pos; pos = pos->link)
1134
21c843d5
PB
1135/*
1136 * Shamelessly stolen from the mm implementation of page reference checking,
1137 * see commit f958d7b528b1 for details.
1138 */
1139#define req_ref_zero_or_close_to_overflow(req) \
1140 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1141
1142static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1143{
20e60a38 1144 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
21c843d5
PB
1145 return atomic_inc_not_zero(&req->refs);
1146}
1147
21c843d5
PB
1148static inline bool req_ref_put_and_test(struct io_kiocb *req)
1149{
20e60a38
PB
1150 if (likely(!(req->flags & REQ_F_REFCOUNT)))
1151 return true;
1152
21c843d5
PB
1153 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1154 return atomic_dec_and_test(&req->refs);
1155}
1156
1157static inline void req_ref_put(struct io_kiocb *req)
1158{
20e60a38 1159 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
21c843d5
PB
1160 WARN_ON_ONCE(req_ref_put_and_test(req));
1161}
1162
1163static inline void req_ref_get(struct io_kiocb *req)
1164{
20e60a38 1165 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
21c843d5
PB
1166 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1167 atomic_inc(&req->refs);
1168}
1169
48dcd38d 1170static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
20e60a38
PB
1171{
1172 if (!(req->flags & REQ_F_REFCOUNT)) {
1173 req->flags |= REQ_F_REFCOUNT;
48dcd38d 1174 atomic_set(&req->refs, nr);
20e60a38
PB
1175 }
1176}
1177
48dcd38d
PB
1178static inline void io_req_set_refcount(struct io_kiocb *req)
1179{
1180 __io_req_set_refcount(req, 1);
1181}
1182
b895c9a6 1183static inline void io_req_set_rsrc_node(struct io_kiocb *req)
36f72fe2
PB
1184{
1185 struct io_ring_ctx *ctx = req->ctx;
1186
269bbe5f 1187 if (!req->fixed_rsrc_refs) {
a7f0ed5a 1188 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
269bbe5f 1189 percpu_ref_get(req->fixed_rsrc_refs);
36f72fe2
PB
1190 }
1191}
1192
f70865db
PB
1193static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1194{
1195 bool got = percpu_ref_tryget(ref);
1196
1197 /* already at zero, wait for ->release() */
1198 if (!got)
1199 wait_for_completion(compl);
1200 percpu_ref_resurrect(ref);
1201 if (got)
1202 percpu_ref_put(ref);
1203}
1204
3dd0c97a
PB
1205static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1206 bool cancel_all)
f0baed8e 1207 __must_hold(&req->ctx->timeout_lock)
08d23634
PB
1208{
1209 struct io_kiocb *req;
1210
68207680 1211 if (task && head->task != task)
08d23634 1212 return false;
3dd0c97a 1213 if (cancel_all)
08d23634
PB
1214 return true;
1215
1216 io_for_each_link(req, head) {
b05a1bcd 1217 if (req->flags & REQ_F_INFLIGHT)
02a13674 1218 return true;
08d23634
PB
1219 }
1220 return false;
1221}
1222
f0baed8e
PB
1223static bool io_match_linked(struct io_kiocb *head)
1224{
1225 struct io_kiocb *req;
1226
1227 io_for_each_link(req, head) {
1228 if (req->flags & REQ_F_INFLIGHT)
1229 return true;
1230 }
1231 return false;
1232}
1233
1234/*
1235 * As io_match_task() but protected against racing with linked timeouts.
1236 * User must not hold timeout_lock.
1237 */
1238static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
1239 bool cancel_all)
1240{
1241 bool matched;
1242
1243 if (task && head->task != task)
1244 return false;
1245 if (cancel_all)
1246 return true;
1247
1248 if (head->flags & REQ_F_LINK_TIMEOUT) {
1249 struct io_ring_ctx *ctx = head->ctx;
1250
1251 /* protect against races with linked timeouts */
1252 spin_lock_irq(&ctx->timeout_lock);
1253 matched = io_match_linked(head);
1254 spin_unlock_irq(&ctx->timeout_lock);
1255 } else {
1256 matched = io_match_linked(head);
1257 }
1258 return matched;
1259}
1260
93d2bcd2 1261static inline void req_set_fail(struct io_kiocb *req)
c40f6379 1262{
93d2bcd2 1263 req->flags |= REQ_F_FAIL;
c40f6379 1264}
4a38aed2 1265
a8295b98
HX
1266static inline void req_fail_link_node(struct io_kiocb *req, int res)
1267{
1268 req_set_fail(req);
1269 req->result = res;
1270}
1271
2b188cc1
JA
1272static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1273{
1274 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1275
0f158b4c 1276 complete(&ctx->ref_comp);
2b188cc1
JA
1277}
1278
8eb7e2d0
PB
1279static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1280{
1281 return !req->timeout.off;
1282}
1283
f56165e6
PB
1284static void io_fallback_req_func(struct work_struct *work)
1285{
1286 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1287 fallback_work.work);
1288 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
1289 struct io_kiocb *req, *tmp;
f237c30a 1290 bool locked = false;
f56165e6
PB
1291
1292 percpu_ref_get(&ctx->refs);
1293 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
f237c30a 1294 req->io_task_work.func(req, &locked);
5636c00d 1295
f237c30a
PB
1296 if (locked) {
1297 if (ctx->submit_state.compl_nr)
1298 io_submit_flush_completions(ctx);
1299 mutex_unlock(&ctx->uring_lock);
1300 }
f56165e6 1301 percpu_ref_put(&ctx->refs);
f237c30a 1302
f56165e6
PB
1303}
1304
2b188cc1
JA
1305static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1306{
1307 struct io_ring_ctx *ctx;
78076bb6 1308 int hash_bits;
2b188cc1
JA
1309
1310 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1311 if (!ctx)
1312 return NULL;
1313
78076bb6
JA
1314 /*
1315 * Use 5 bits less than the max cq entries, that should give us around
1316 * 32 entries per hash list if totally full and uniformly spread.
1317 */
1318 hash_bits = ilog2(p->cq_entries);
1319 hash_bits -= 5;
1320 if (hash_bits <= 0)
1321 hash_bits = 1;
1322 ctx->cancel_hash_bits = hash_bits;
1323 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1324 GFP_KERNEL);
1325 if (!ctx->cancel_hash)
1326 goto err;
1327 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1328
6224843d
PB
1329 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1330 if (!ctx->dummy_ubuf)
1331 goto err;
1332 /* set invalid range, so io_import_fixed() fails meeting it */
1333 ctx->dummy_ubuf->ubuf = -1UL;
1334
21482896 1335 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
1336 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1337 goto err;
2b188cc1
JA
1338
1339 ctx->flags = p->flags;
90554200 1340 init_waitqueue_head(&ctx->sqo_sq_wait);
69fb2131 1341 INIT_LIST_HEAD(&ctx->sqd_list);
311997b3 1342 init_waitqueue_head(&ctx->poll_wait);
1d7bb1d5 1343 INIT_LIST_HEAD(&ctx->cq_overflow_list);
0f158b4c 1344 init_completion(&ctx->ref_comp);
9e15c3a0 1345 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
61cf9370 1346 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
2b188cc1 1347 mutex_init(&ctx->uring_lock);
311997b3 1348 init_waitqueue_head(&ctx->cq_wait);
2b188cc1 1349 spin_lock_init(&ctx->completion_lock);
89850fce 1350 spin_lock_init(&ctx->timeout_lock);
540e32a0 1351 INIT_LIST_HEAD(&ctx->iopoll_list);
de0617e4 1352 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 1353 INIT_LIST_HEAD(&ctx->timeout_list);
ef9dd637 1354 INIT_LIST_HEAD(&ctx->ltimeout_list);
d67d2263
BM
1355 spin_lock_init(&ctx->rsrc_ref_lock);
1356 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
269bbe5f
BM
1357 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1358 init_llist_head(&ctx->rsrc_put_llist);
13bf43f5 1359 INIT_LIST_HEAD(&ctx->tctx_list);
cd0ca2e0 1360 INIT_LIST_HEAD(&ctx->submit_state.free_list);
d0acdee2 1361 INIT_LIST_HEAD(&ctx->locked_free_list);
9011bf9a 1362 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
2b188cc1 1363 return ctx;
206aefde 1364err:
6224843d 1365 kfree(ctx->dummy_ubuf);
78076bb6 1366 kfree(ctx->cancel_hash);
206aefde
JA
1367 kfree(ctx);
1368 return NULL;
2b188cc1
JA
1369}
1370
8f6ed49a
PB
1371static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1372{
1373 struct io_rings *r = ctx->rings;
1374
1375 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1376 ctx->cq_extra--;
1377}
1378
9cf7c104 1379static bool req_need_defer(struct io_kiocb *req, u32 seq)
7adf4eaf 1380{
2bc9930e
JA
1381 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1382 struct io_ring_ctx *ctx = req->ctx;
a197f664 1383
8f6ed49a 1384 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
2bc9930e 1385 }
de0617e4 1386
9d858b21 1387 return false;
de0617e4
JA
1388}
1389
c97d8a0f
PB
1390#define FFS_ASYNC_READ 0x1UL
1391#define FFS_ASYNC_WRITE 0x2UL
1392#ifdef CONFIG_64BIT
1393#define FFS_ISREG 0x4UL
1394#else
1395#define FFS_ISREG 0x0UL
1396#endif
1397#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
1398
1399static inline bool io_req_ffs_set(struct io_kiocb *req)
1400{
1401 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
1402}
1403
ce3d5aae
PB
1404static void io_req_track_inflight(struct io_kiocb *req)
1405{
ce3d5aae 1406 if (!(req->flags & REQ_F_INFLIGHT)) {
ce3d5aae 1407 req->flags |= REQ_F_INFLIGHT;
b303fe2e 1408 atomic_inc(&current->io_uring->inflight_tracked);
ce3d5aae
PB
1409 }
1410}
1411
fd08e530
PB
1412static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1413{
906c6caa
PB
1414 if (WARN_ON_ONCE(!req->link))
1415 return NULL;
1416
4d13d1a4
PB
1417 req->flags &= ~REQ_F_ARM_LTIMEOUT;
1418 req->flags |= REQ_F_LINK_TIMEOUT;
fd08e530
PB
1419
1420 /* linked timeouts should have two refs once prep'ed */
48dcd38d 1421 io_req_set_refcount(req);
4d13d1a4
PB
1422 __io_req_set_refcount(req->link, 2);
1423 return req->link;
fd08e530
PB
1424}
1425
1426static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
1427{
4d13d1a4 1428 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
fd08e530
PB
1429 return NULL;
1430 return __io_prep_linked_timeout(req);
1431}
1432
1e6fa521
JA
1433static void io_prep_async_work(struct io_kiocb *req)
1434{
1435 const struct io_op_def *def = &io_op_defs[req->opcode];
1e6fa521
JA
1436 struct io_ring_ctx *ctx = req->ctx;
1437
b8e64b53
PB
1438 if (!(req->flags & REQ_F_CREDS)) {
1439 req->flags |= REQ_F_CREDS;
c10d1f98 1440 req->creds = get_current_cred();
b8e64b53 1441 }
003e8dcc 1442
e1d675df
PB
1443 req->work.list.next = NULL;
1444 req->work.flags = 0;
feaadc4f
PB
1445 if (req->flags & REQ_F_FORCE_ASYNC)
1446 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1447
1e6fa521
JA
1448 if (req->flags & REQ_F_ISREG) {
1449 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
1450 io_wq_hash_work(&req->work, file_inode(req->file));
4b982bd0 1451 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
1e6fa521
JA
1452 if (def->unbound_nonreg_file)
1453 req->work.flags |= IO_WQ_WORK_UNBOUND;
1454 }
561fb04a 1455}
cccf0ee8 1456
cbdcb435 1457static void io_prep_async_link(struct io_kiocb *req)
561fb04a 1458{
cbdcb435 1459 struct io_kiocb *cur;
54a91f3b 1460
44eff40a
PB
1461 if (req->flags & REQ_F_LINK_TIMEOUT) {
1462 struct io_ring_ctx *ctx = req->ctx;
1463
51b348e1 1464 spin_lock_irq(&ctx->timeout_lock);
44eff40a
PB
1465 io_for_each_link(cur, req)
1466 io_prep_async_work(cur);
51b348e1 1467 spin_unlock_irq(&ctx->timeout_lock);
44eff40a
PB
1468 } else {
1469 io_for_each_link(cur, req)
1470 io_prep_async_work(cur);
1471 }
561fb04a
JA
1472}
1473
f237c30a 1474static void io_queue_async_work(struct io_kiocb *req, bool *locked)
561fb04a 1475{
a197f664 1476 struct io_ring_ctx *ctx = req->ctx;
cbdcb435 1477 struct io_kiocb *link = io_prep_linked_timeout(req);
5aa75ed5 1478 struct io_uring_task *tctx = req->task->io_uring;
561fb04a 1479
f237c30a
PB
1480 /* must not take the lock, NULL it as a precaution */
1481 locked = NULL;
1482
3bfe6106
JA
1483 BUG_ON(!tctx);
1484 BUG_ON(!tctx->io_wq);
561fb04a 1485
cbdcb435
PB
1486 /* init ->work of the whole link before punting */
1487 io_prep_async_link(req);
991468dc
JA
1488
1489 /*
1490 * Not expected to happen, but if we do have a bug where this _can_
1491 * happen, catch it here and ensure the request is marked as
1492 * canceled. That will make io-wq go through the usual work cancel
1493 * procedure rather than attempt to run this request (or create a new
1494 * worker for it).
1495 */
1496 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1497 req->work.flags |= IO_WQ_WORK_CANCEL;
1498
d07f1e8a
PB
1499 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1500 &req->work, req->flags);
ebf93667 1501 io_wq_enqueue(tctx->io_wq, &req->work);
7271ef3a
JA
1502 if (link)
1503 io_queue_linked_timeout(link);
cbdcb435
PB
1504}
1505
1ee4160c 1506static void io_kill_timeout(struct io_kiocb *req, int status)
8c855885 1507 __must_hold(&req->ctx->completion_lock)
89850fce 1508 __must_hold(&req->ctx->timeout_lock)
5262f567 1509{
e8c2bc1f 1510 struct io_timeout_data *io = req->async_data;
5262f567 1511
fd9c7bc5 1512 if (hrtimer_try_to_cancel(&io->timer) != -1) {
2ae2eb9d
PB
1513 if (status)
1514 req_set_fail(req);
01cec8c1
PB
1515 atomic_set(&req->ctx->cq_timeouts,
1516 atomic_read(&req->ctx->cq_timeouts) + 1);
135fcde8 1517 list_del_init(&req->timeout.list);
d4d19c19 1518 io_cqring_fill_event(req->ctx, req->user_data, status, 0);
91c2f697 1519 io_put_req_deferred(req);
5262f567
JA
1520 }
1521}
1522
441b8a78 1523static void io_queue_deferred(struct io_ring_ctx *ctx)
de0617e4 1524{
441b8a78 1525 while (!list_empty(&ctx->defer_list)) {
27dc8338
PB
1526 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1527 struct io_defer_entry, list);
de0617e4 1528
9cf7c104 1529 if (req_need_defer(de->req, de->seq))
04518945 1530 break;
27dc8338 1531 list_del_init(&de->list);
907d1df3 1532 io_req_task_queue(de->req);
27dc8338 1533 kfree(de);
441b8a78 1534 }
04518945
PB
1535}
1536
360428f8 1537static void io_flush_timeouts(struct io_ring_ctx *ctx)
89850fce 1538 __must_hold(&ctx->completion_lock)
de0617e4 1539{
441b8a78 1540 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
7abdd4c7 1541 struct io_kiocb *req, *tmp;
f010505b 1542
79ebeaee 1543 spin_lock_irq(&ctx->timeout_lock);
7abdd4c7 1544 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
f010505b 1545 u32 events_needed, events_got;
de0617e4 1546
8eb7e2d0 1547 if (io_is_timeout_noseq(req))
360428f8 1548 break;
f010505b
MDG
1549
1550 /*
1551 * Since seq can easily wrap around over time, subtract
1552 * the last seq at which timeouts were flushed before comparing.
1553 * Assuming not more than 2^31-1 events have happened since,
1554 * these subtractions won't have wrapped, so we can check if
1555 * target is in [last_seq, current_seq] by comparing the two.
1556 */
1557 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1558 events_got = seq - ctx->cq_last_tm_flush;
1559 if (events_got < events_needed)
360428f8 1560 break;
bfe68a22 1561
1ee4160c 1562 io_kill_timeout(req, 0);
f18ee4cf 1563 }
f010505b 1564 ctx->cq_last_tm_flush = seq;
79ebeaee 1565 spin_unlock_irq(&ctx->timeout_lock);
360428f8 1566}
5262f567 1567
2335f6f5 1568static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
360428f8 1569{
2335f6f5
PB
1570 if (ctx->off_timeout_used)
1571 io_flush_timeouts(ctx);
1572 if (ctx->drain_active)
1573 io_queue_deferred(ctx);
1574}
1575
1576static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1577{
1578 if (unlikely(ctx->off_timeout_used || ctx->drain_active))
1579 __io_commit_cqring_flush(ctx);
ec30e04b
PB
1580 /* order cqe stores with ring update */
1581 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
de0617e4
JA
1582}
1583
90554200
JA
1584static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1585{
1586 struct io_rings *r = ctx->rings;
1587
a566c556 1588 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
90554200
JA
1589}
1590
888aae2e
PB
1591static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1592{
1593 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1594}
1595
d068b506 1596static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
2b188cc1 1597{
75b28aff 1598 struct io_rings *rings = ctx->rings;
ea5ab3b5 1599 unsigned tail, mask = ctx->cq_entries - 1;
2b188cc1 1600
115e12e5
SB
1601 /*
1602 * writes to the cq entry need to come after reading head; the
1603 * control dependency is enough as we're using WRITE_ONCE to
1604 * fill the cq entry
1605 */
a566c556 1606 if (__io_cqring_events(ctx) == ctx->cq_entries)
2b188cc1
JA
1607 return NULL;
1608
888aae2e 1609 tail = ctx->cached_cq_tail++;
ea5ab3b5 1610 return &rings->cqes[tail & mask];
2b188cc1
JA
1611}
1612
f2842ab5
JA
1613static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1614{
44c769de 1615 if (likely(!ctx->cq_ev_fd))
f0b493e6 1616 return false;
7e55a19c
SG
1617 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1618 return false;
44c769de 1619 return !ctx->eventfd_async || io_wq_current_is_worker();
f2842ab5
JA
1620}
1621
2c5d763c
JA
1622/*
1623 * This should only get called when at least one event has been posted.
1624 * Some applications rely on the eventfd notification count only changing
1625 * IFF a new CQE has been added to the CQ ring. There's no depedency on
1626 * 1:1 relationship between how many times this function is called (and
1627 * hence the eventfd count) and number of CQEs posted to the CQ ring.
1628 */
b41e9852 1629static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5 1630{
5fd46178
JA
1631 /*
1632 * wake_up_all() may seem excessive, but io_wake_function() and
1633 * io_should_wake() handle the termination of the loop and only
1634 * wake as many waiters as we need to.
1635 */
1636 if (wq_has_sleeper(&ctx->cq_wait))
1637 wake_up_all(&ctx->cq_wait);
534ca6d6
JA
1638 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1639 wake_up(&ctx->sq_data->wait);
b41e9852 1640 if (io_should_trigger_evfd(ctx))
1d7bb1d5 1641 eventfd_signal(ctx->cq_ev_fd, 1);
3f008385 1642 if (waitqueue_active(&ctx->poll_wait))
311997b3 1643 wake_up_interruptible(&ctx->poll_wait);
1d7bb1d5
JA
1644}
1645
80c18e4a
PB
1646static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1647{
c57a91fb
PB
1648 /* see waitqueue_active() comment */
1649 smp_mb();
1650
80c18e4a 1651 if (ctx->flags & IORING_SETUP_SQPOLL) {
c57a91fb 1652 if (waitqueue_active(&ctx->cq_wait))
5fd46178 1653 wake_up_all(&ctx->cq_wait);
80c18e4a
PB
1654 }
1655 if (io_should_trigger_evfd(ctx))
1656 eventfd_signal(ctx->cq_ev_fd, 1);
3f008385 1657 if (waitqueue_active(&ctx->poll_wait))
311997b3 1658 wake_up_interruptible(&ctx->poll_wait);
80c18e4a
PB
1659}
1660
c4a2ed72 1661/* Returns true if there are no backlogged entries after the flush */
6c2450ae 1662static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1d7bb1d5 1663{
b18032bb 1664 bool all_flushed, posted;
1d7bb1d5 1665
a566c556 1666 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
e23de15f 1667 return false;
1d7bb1d5 1668
b18032bb 1669 posted = false;
79ebeaee 1670 spin_lock(&ctx->completion_lock);
6c2450ae 1671 while (!list_empty(&ctx->cq_overflow_list)) {
d068b506 1672 struct io_uring_cqe *cqe = io_get_cqe(ctx);
6c2450ae 1673 struct io_overflow_cqe *ocqe;
e6c8aa9a 1674
1d7bb1d5
JA
1675 if (!cqe && !force)
1676 break;
6c2450ae
PB
1677 ocqe = list_first_entry(&ctx->cq_overflow_list,
1678 struct io_overflow_cqe, list);
1679 if (cqe)
1680 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1681 else
8f6ed49a
PB
1682 io_account_cq_overflow(ctx);
1683
b18032bb 1684 posted = true;
6c2450ae
PB
1685 list_del(&ocqe->list);
1686 kfree(ocqe);
1d7bb1d5
JA
1687 }
1688
09e88404
PB
1689 all_flushed = list_empty(&ctx->cq_overflow_list);
1690 if (all_flushed) {
5ed7a37d 1691 clear_bit(0, &ctx->check_cq_overflow);
20c0b380
NA
1692 WRITE_ONCE(ctx->rings->sq_flags,
1693 ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
09e88404 1694 }
46930143 1695
b18032bb
JA
1696 if (posted)
1697 io_commit_cqring(ctx);
79ebeaee 1698 spin_unlock(&ctx->completion_lock);
b18032bb
JA
1699 if (posted)
1700 io_cqring_ev_posted(ctx);
09e88404 1701 return all_flushed;
1d7bb1d5
JA
1702}
1703
90f67366 1704static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
6c503150 1705{
ca0a2651
JA
1706 bool ret = true;
1707
5ed7a37d 1708 if (test_bit(0, &ctx->check_cq_overflow)) {
6c503150
PB
1709 /* iopoll syncs against uring_lock, not completion_lock */
1710 if (ctx->flags & IORING_SETUP_IOPOLL)
1711 mutex_lock(&ctx->uring_lock);
90f67366 1712 ret = __io_cqring_overflow_flush(ctx, false);
6c503150
PB
1713 if (ctx->flags & IORING_SETUP_IOPOLL)
1714 mutex_unlock(&ctx->uring_lock);
1715 }
ca0a2651
JA
1716
1717 return ret;
6c503150
PB
1718}
1719
6a290a14
PB
1720/* must to be called somewhat shortly after putting a request */
1721static inline void io_put_task(struct task_struct *task, int nr)
1722{
1723 struct io_uring_task *tctx = task->io_uring;
1724
e98e49b2
PB
1725 if (likely(task == current)) {
1726 tctx->cached_refs += nr;
1727 } else {
1728 percpu_counter_sub(&tctx->inflight, nr);
1729 if (unlikely(atomic_read(&tctx->in_idle)))
1730 wake_up(&tctx->wait);
1731 put_task_struct_many(task, nr);
1732 }
6a290a14
PB
1733}
1734
9a10867a
PB
1735static void io_task_refs_refill(struct io_uring_task *tctx)
1736{
1737 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
1738
1739 percpu_counter_add(&tctx->inflight, refill);
1740 refcount_add(refill, &current->usage);
1741 tctx->cached_refs += refill;
1742}
1743
1744static inline void io_get_task_refs(int nr)
1745{
1746 struct io_uring_task *tctx = current->io_uring;
1747
1748 tctx->cached_refs -= nr;
1749 if (unlikely(tctx->cached_refs < 0))
1750 io_task_refs_refill(tctx);
1751}
1752
abdebba9
PB
1753static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
1754{
1755 struct io_uring_task *tctx = task->io_uring;
1756 unsigned int refs = tctx->cached_refs;
1757
1758 if (refs) {
1759 tctx->cached_refs = 0;
1760 percpu_counter_sub(&tctx->inflight, refs);
1761 put_task_struct_many(task, refs);
1762 }
1763}
1764
d4d19c19
PB
1765static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
1766 long res, unsigned int cflags)
2b188cc1 1767{
cce4b8b0 1768 struct io_overflow_cqe *ocqe;
2b188cc1 1769
cce4b8b0
PB
1770 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1771 if (!ocqe) {
1772 /*
1773 * If we're in ring overflow flush mode, or in task cancel mode,
1774 * or cannot allocate an overflow entry, then we need to drop it
1775 * on the floor.
1776 */
8f6ed49a 1777 io_account_cq_overflow(ctx);
cce4b8b0 1778 return false;
2b188cc1 1779 }
cce4b8b0 1780 if (list_empty(&ctx->cq_overflow_list)) {
5ed7a37d 1781 set_bit(0, &ctx->check_cq_overflow);
20c0b380
NA
1782 WRITE_ONCE(ctx->rings->sq_flags,
1783 ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
1784
cce4b8b0 1785 }
d4d19c19 1786 ocqe->cqe.user_data = user_data;
cce4b8b0
PB
1787 ocqe->cqe.res = res;
1788 ocqe->cqe.flags = cflags;
1789 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1790 return true;
2b188cc1
JA
1791}
1792
d4d19c19
PB
1793static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1794 long res, unsigned int cflags)
2b188cc1
JA
1795{
1796 struct io_uring_cqe *cqe;
1797
d4d19c19 1798 trace_io_uring_complete(ctx, user_data, res, cflags);
51c3ff62 1799
2b188cc1
JA
1800 /*
1801 * If we can't get a cq entry, userspace overflowed the
1802 * submission (by quite a lot). Increment the overflow count in
1803 * the ring.
1804 */
d068b506 1805 cqe = io_get_cqe(ctx);
1d7bb1d5 1806 if (likely(cqe)) {
d4d19c19 1807 WRITE_ONCE(cqe->user_data, user_data);
2b188cc1 1808 WRITE_ONCE(cqe->res, res);
bcda7baa 1809 WRITE_ONCE(cqe->flags, cflags);
8d13326e 1810 return true;
2b188cc1 1811 }
d4d19c19 1812 return io_cqring_event_overflow(ctx, user_data, res, cflags);
2b188cc1
JA
1813}
1814
8d13326e 1815/* not as hot to bloat with inlining */
d4d19c19
PB
1816static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1817 long res, unsigned int cflags)
bcda7baa 1818{
d4d19c19 1819 return __io_cqring_fill_event(ctx, user_data, res, cflags);
bcda7baa
JA
1820}
1821
7a612350
PB
1822static void io_req_complete_post(struct io_kiocb *req, long res,
1823 unsigned int cflags)
2b188cc1 1824{
78e19bbe 1825 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 1826
79ebeaee 1827 spin_lock(&ctx->completion_lock);
d4d19c19 1828 __io_cqring_fill_event(ctx, req->user_data, res, cflags);
c7dae4ba
JA
1829 /*
1830 * If we're the last reference to this request, add to our locked
1831 * free_list cache.
1832 */
de9b4cca 1833 if (req_ref_put_and_test(req)) {
7a612350 1834 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
0756a869 1835 if (req->flags & IO_DISARM_MASK)
7a612350
PB
1836 io_disarm_next(req);
1837 if (req->link) {
1838 io_req_task_queue(req->link);
1839 req->link = NULL;
1840 }
1841 }
c7dae4ba
JA
1842 io_dismantle_req(req);
1843 io_put_task(req->task, 1);
bb943b82 1844 list_add(&req->inflight_entry, &ctx->locked_free_list);
d0acdee2 1845 ctx->locked_free_nr++;
180f829f
PB
1846 } else {
1847 if (!percpu_ref_tryget(&ctx->refs))
1848 req = NULL;
1849 }
7a612350 1850 io_commit_cqring(ctx);
79ebeaee 1851 spin_unlock(&ctx->completion_lock);
7a612350 1852
180f829f
PB
1853 if (req) {
1854 io_cqring_ev_posted(ctx);
c7dae4ba 1855 percpu_ref_put(&ctx->refs);
180f829f 1856 }
229a7b63
JA
1857}
1858
4e3d9ff9
JA
1859static inline bool io_req_needs_clean(struct io_kiocb *req)
1860{
c854357b 1861 return req->flags & IO_REQ_CLEAN_FLAGS;
4e3d9ff9
JA
1862}
1863
a38d68db 1864static void io_req_complete_state(struct io_kiocb *req, long res,
889fca73 1865 unsigned int cflags)
229a7b63 1866{
4e3d9ff9 1867 if (io_req_needs_clean(req))
68fb8979 1868 io_clean_op(req);
a38d68db
PB
1869 req->result = res;
1870 req->compl.cflags = cflags;
e342c807 1871 req->flags |= REQ_F_COMPLETE_INLINE;
e1e16097
JA
1872}
1873
889fca73
PB
1874static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1875 long res, unsigned cflags)
bcda7baa 1876{
889fca73
PB
1877 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1878 io_req_complete_state(req, res, cflags);
a38d68db 1879 else
c7dae4ba 1880 io_req_complete_post(req, res, cflags);
bcda7baa
JA
1881}
1882
a38d68db 1883static inline void io_req_complete(struct io_kiocb *req, long res)
0ddf92e8 1884{
889fca73 1885 __io_req_complete(req, 0, res, 0);
0ddf92e8
JA
1886}
1887
f41db273
PB
1888static void io_req_complete_failed(struct io_kiocb *req, long res)
1889{
93d2bcd2 1890 req_set_fail(req);
f41db273
PB
1891 io_req_complete_post(req, res, 0);
1892}
1893
c6d3d9cb
PB
1894static void io_req_complete_fail_submit(struct io_kiocb *req)
1895{
1896 /*
1897 * We don't submit, fail them all, for that replace hardlinks with
1898 * normal links. Extra REQ_F_LINK is tolerated.
1899 */
1900 req->flags &= ~REQ_F_HARDLINK;
1901 req->flags |= REQ_F_LINK;
1902 io_req_complete_failed(req, req->result);
1903}
1904
864ea921
PB
1905/*
1906 * Don't initialise the fields below on every allocation, but do that in
1907 * advance and keep them valid across allocations.
1908 */
1909static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1910{
1911 req->ctx = ctx;
1912 req->link = NULL;
1913 req->async_data = NULL;
1914 /* not necessary, but safer to zero */
1915 req->result = 0;
1916}
1917
dac7a098 1918static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
cd0ca2e0 1919 struct io_submit_state *state)
dac7a098 1920{
79ebeaee 1921 spin_lock(&ctx->completion_lock);
cd0ca2e0 1922 list_splice_init(&ctx->locked_free_list, &state->free_list);
d0acdee2 1923 ctx->locked_free_nr = 0;
79ebeaee 1924 spin_unlock(&ctx->completion_lock);
dac7a098
PB
1925}
1926
dd78f492 1927/* Returns true IFF there are requests in the cache */
c7dae4ba 1928static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
0ddf92e8 1929{
c7dae4ba 1930 struct io_submit_state *state = &ctx->submit_state;
dd78f492 1931 int nr;
0ddf92e8 1932
c7dae4ba
JA
1933 /*
1934 * If we have more than a batch's worth of requests in our IRQ side
1935 * locked cache, grab the lock and move them over to our submission
1936 * side cache.
1937 */
d0acdee2 1938 if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
cd0ca2e0 1939 io_flush_cached_locked_reqs(ctx, state);
0ddf92e8 1940
dd78f492 1941 nr = state->free_reqs;
cd0ca2e0
PB
1942 while (!list_empty(&state->free_list)) {
1943 struct io_kiocb *req = list_first_entry(&state->free_list,
bb943b82 1944 struct io_kiocb, inflight_entry);
dd78f492 1945
bb943b82 1946 list_del(&req->inflight_entry);
dd78f492
PB
1947 state->reqs[nr++] = req;
1948 if (nr == ARRAY_SIZE(state->reqs))
e5d1bc0a 1949 break;
1b4c351f
JA
1950 }
1951
dd78f492
PB
1952 state->free_reqs = nr;
1953 return nr != 0;
0ddf92e8
JA
1954}
1955
5d5901a3
PB
1956/*
1957 * A request might get retired back into the request caches even before opcode
1958 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
1959 * Because of that, io_alloc_req() should be called only under ->uring_lock
1960 * and with extra caution to not get a request that is still worked on.
1961 */
e5d1bc0a 1962static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
5d5901a3 1963 __must_hold(&ctx->uring_lock)
2b188cc1 1964{
e5d1bc0a 1965 struct io_submit_state *state = &ctx->submit_state;
864ea921
PB
1966 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1967 int ret, i;
e5d1bc0a 1968
fe7e3257 1969 BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
e5d1bc0a 1970
864ea921
PB
1971 if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
1972 goto got_req;
e5d1bc0a 1973
864ea921
PB
1974 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1975 state->reqs);
fd6fab2c 1976
864ea921
PB
1977 /*
1978 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1979 * retry single alloc to be on the safe side.
1980 */
1981 if (unlikely(ret <= 0)) {
1982 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1983 if (!state->reqs[0])
1984 return NULL;
1985 ret = 1;
2b188cc1 1986 }
864ea921
PB
1987
1988 for (i = 0; i < ret; i++)
1989 io_preinit_req(state->reqs[i], ctx);
1990 state->free_reqs = ret;
e5d1bc0a 1991got_req:
291b2821
PB
1992 state->free_reqs--;
1993 return state->reqs[state->free_reqs];
2b188cc1
JA
1994}
1995
e1d767f0 1996static inline void io_put_file(struct file *file)
8da11c19 1997{
e1d767f0 1998 if (file)
8da11c19
PB
1999 fput(file);
2000}
2001
4edf20f9 2002static void io_dismantle_req(struct io_kiocb *req)
2b188cc1 2003{
094bae49 2004 unsigned int flags = req->flags;
929a3af9 2005
3a0a6902
PB
2006 if (io_req_needs_clean(req))
2007 io_clean_op(req);
e1d767f0
PB
2008 if (!(flags & REQ_F_FIXED_FILE))
2009 io_put_file(req->file);
269bbe5f
BM
2010 if (req->fixed_rsrc_refs)
2011 percpu_ref_put(req->fixed_rsrc_refs);
99ebe4ef 2012 if (req->async_data) {
094bae49 2013 kfree(req->async_data);
99ebe4ef
PB
2014 req->async_data = NULL;
2015 }
e65ef56d
JA
2016}
2017
216578e5 2018static void __io_free_req(struct io_kiocb *req)
c6ca97b3 2019{
51a4cc11 2020 struct io_ring_ctx *ctx = req->ctx;
c6ca97b3 2021
216578e5 2022 io_dismantle_req(req);
7c660731 2023 io_put_task(req->task, 1);
c6ca97b3 2024
79ebeaee 2025 spin_lock(&ctx->completion_lock);
bb943b82 2026 list_add(&req->inflight_entry, &ctx->locked_free_list);
c34b025f 2027 ctx->locked_free_nr++;
79ebeaee 2028 spin_unlock(&ctx->completion_lock);
c34b025f 2029
ecfc5177 2030 percpu_ref_put(&ctx->refs);
e65ef56d
JA
2031}
2032
f2f87370
PB
2033static inline void io_remove_next_linked(struct io_kiocb *req)
2034{
2035 struct io_kiocb *nxt = req->link;
2036
2037 req->link = nxt->link;
2038 nxt->link = NULL;
2039}
2040
33cc89a9
PB
2041static bool io_kill_linked_timeout(struct io_kiocb *req)
2042 __must_hold(&req->ctx->completion_lock)
89b263f6 2043 __must_hold(&req->ctx->timeout_lock)
2665abfd 2044{
33cc89a9 2045 struct io_kiocb *link = req->link;
f2f87370 2046
b97e736a 2047 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
c9abd7ad 2048 struct io_timeout_data *io = link->async_data;
7c86ffee 2049
f2f87370 2050 io_remove_next_linked(req);
90cd7e42 2051 link->timeout.head = NULL;
fd9c7bc5 2052 if (hrtimer_try_to_cancel(&io->timer) != -1) {
ef9dd637 2053 list_del(&link->timeout.list);
d4d19c19
PB
2054 io_cqring_fill_event(link->ctx, link->user_data,
2055 -ECANCELED, 0);
91c2f697 2056 io_put_req_deferred(link);
d4729fbd 2057 return true;
c9abd7ad
PB
2058 }
2059 }
d4729fbd 2060 return false;
7c86ffee
PB
2061}
2062
d148ca4b 2063static void io_fail_links(struct io_kiocb *req)
33cc89a9 2064 __must_hold(&req->ctx->completion_lock)
9e645e11 2065{
33cc89a9 2066 struct io_kiocb *nxt, *link = req->link;
9e645e11 2067
f2f87370 2068 req->link = NULL;
f2f87370 2069 while (link) {
a8295b98
HX
2070 long res = -ECANCELED;
2071
2072 if (link->flags & REQ_F_FAIL)
2073 res = link->result;
2074
f2f87370
PB
2075 nxt = link->link;
2076 link->link = NULL;
2665abfd 2077
f2f87370 2078 trace_io_uring_fail_link(req, link);
a8295b98 2079 io_cqring_fill_event(link->ctx, link->user_data, res, 0);
91c2f697 2080 io_put_req_deferred(link);
f2f87370 2081 link = nxt;
9e645e11 2082 }
33cc89a9 2083}
9e645e11 2084
33cc89a9
PB
2085static bool io_disarm_next(struct io_kiocb *req)
2086 __must_hold(&req->ctx->completion_lock)
2087{
2088 bool posted = false;
2089
0756a869
PB
2090 if (req->flags & REQ_F_ARM_LTIMEOUT) {
2091 struct io_kiocb *link = req->link;
2092
906c6caa 2093 req->flags &= ~REQ_F_ARM_LTIMEOUT;
0756a869
PB
2094 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
2095 io_remove_next_linked(req);
2096 io_cqring_fill_event(link->ctx, link->user_data,
2097 -ECANCELED, 0);
2098 io_put_req_deferred(link);
2099 posted = true;
2100 }
2101 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
89b263f6
JA
2102 struct io_ring_ctx *ctx = req->ctx;
2103
2104 spin_lock_irq(&ctx->timeout_lock);
33cc89a9 2105 posted = io_kill_linked_timeout(req);
89b263f6
JA
2106 spin_unlock_irq(&ctx->timeout_lock);
2107 }
93d2bcd2 2108 if (unlikely((req->flags & REQ_F_FAIL) &&
e4335ed3 2109 !(req->flags & REQ_F_HARDLINK))) {
33cc89a9
PB
2110 posted |= (req->link != NULL);
2111 io_fail_links(req);
2112 }
2113 return posted;
9e645e11
JA
2114}
2115
3fa5e0f3 2116static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
c69f8dbe 2117{
33cc89a9 2118 struct io_kiocb *nxt;
944e58bf 2119
9e645e11
JA
2120 /*
2121 * If LINK is set, we have dependent requests in this chain. If we
2122 * didn't fail this request, queue the first one up, moving any other
2123 * dependencies to the next request. In case of failure, fail the rest
2124 * of the chain.
2125 */
0756a869 2126 if (req->flags & IO_DISARM_MASK) {
33cc89a9 2127 struct io_ring_ctx *ctx = req->ctx;
33cc89a9
PB
2128 bool posted;
2129
79ebeaee 2130 spin_lock(&ctx->completion_lock);
33cc89a9
PB
2131 posted = io_disarm_next(req);
2132 if (posted)
2133 io_commit_cqring(req->ctx);
79ebeaee 2134 spin_unlock(&ctx->completion_lock);
33cc89a9
PB
2135 if (posted)
2136 io_cqring_ev_posted(ctx);
f2f87370 2137 }
33cc89a9
PB
2138 nxt = req->link;
2139 req->link = NULL;
2140 return nxt;
4d7dd462 2141}
9e645e11 2142
f2f87370 2143static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
3fa5e0f3 2144{
cdbff982 2145 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
3fa5e0f3
PB
2146 return NULL;
2147 return __io_req_find_next(req);
2148}
2149
f237c30a 2150static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
2c32395d
PB
2151{
2152 if (!ctx)
2153 return;
f237c30a 2154 if (*locked) {
99c8bc52
HX
2155 if (ctx->submit_state.compl_nr)
2156 io_submit_flush_completions(ctx);
2c32395d 2157 mutex_unlock(&ctx->uring_lock);
f237c30a 2158 *locked = false;
2c32395d
PB
2159 }
2160 percpu_ref_put(&ctx->refs);
2161}
2162
7cbf1722 2163static void tctx_task_work(struct callback_head *cb)
c40f6379 2164{
f237c30a 2165 bool locked = false;
ebd0df2e 2166 struct io_ring_ctx *ctx = NULL;
3f18407d
PB
2167 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
2168 task_work);
c40f6379 2169
16f72070 2170 while (1) {
3f18407d
PB
2171 struct io_wq_work_node *node;
2172
8d4ad41e
PB
2173 if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr)
2174 io_submit_flush_completions(ctx);
2175
3f18407d 2176 spin_lock_irq(&tctx->task_lock);
c6538be9 2177 node = tctx->task_list.first;
3f18407d 2178 INIT_WQ_LIST(&tctx->task_list);
6294f368
PB
2179 if (!node)
2180 tctx->task_running = false;
3f18407d 2181 spin_unlock_irq(&tctx->task_lock);
6294f368
PB
2182 if (!node)
2183 break;
3f18407d 2184
6294f368 2185 do {
3f18407d
PB
2186 struct io_wq_work_node *next = node->next;
2187 struct io_kiocb *req = container_of(node, struct io_kiocb,
2188 io_task_work.node);
2189
2190 if (req->ctx != ctx) {
f237c30a 2191 ctx_flush_and_put(ctx, &locked);
3f18407d 2192 ctx = req->ctx;
126180b9
PB
2193 /* if not contended, grab and improve batching */
2194 locked = mutex_trylock(&ctx->uring_lock);
3f18407d
PB
2195 percpu_ref_get(&ctx->refs);
2196 }
f237c30a 2197 req->io_task_work.func(req, &locked);
3f18407d 2198 node = next;
6294f368
PB
2199 } while (node);
2200
7cbf1722 2201 cond_resched();
3f18407d 2202 }
ebd0df2e 2203
f237c30a 2204 ctx_flush_and_put(ctx, &locked);
abdebba9
PB
2205
2206 /* relaxed read is enough as only the task itself sets ->in_idle */
2207 if (unlikely(atomic_read(&tctx->in_idle)))
2208 io_uring_drop_tctx_refs(current);
7cbf1722
JA
2209}
2210
e09ee510 2211static void io_req_task_work_add(struct io_kiocb *req)
7cbf1722 2212{
c15b79de 2213 struct task_struct *tsk = req->task;
7cbf1722 2214 struct io_uring_task *tctx = tsk->io_uring;
c15b79de 2215 enum task_work_notify_mode notify;
e09ee510 2216 struct io_wq_work_node *node;
0b81e80c 2217 unsigned long flags;
6294f368 2218 bool running;
7cbf1722
JA
2219
2220 WARN_ON_ONCE(!tctx);
2221
0b81e80c 2222 spin_lock_irqsave(&tctx->task_lock, flags);
7cbf1722 2223 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
6294f368
PB
2224 running = tctx->task_running;
2225 if (!running)
2226 tctx->task_running = true;
0b81e80c 2227 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722
JA
2228
2229 /* task_work already pending, we're done */
6294f368 2230 if (running)
e09ee510 2231 return;
7cbf1722 2232
c15b79de
PB
2233 /*
2234 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2235 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2236 * processing task_work. There's no reliable way to tell if TWA_RESUME
2237 * will do the job.
2238 */
2239 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
c15b79de
PB
2240 if (!task_work_add(tsk, &tctx->task_work, notify)) {
2241 wake_up_process(tsk);
e09ee510 2242 return;
c15b79de 2243 }
2215bed9 2244
0b81e80c 2245 spin_lock_irqsave(&tctx->task_lock, flags);
6294f368 2246 tctx->task_running = false;
e09ee510
PB
2247 node = tctx->task_list.first;
2248 INIT_WQ_LIST(&tctx->task_list);
0b81e80c 2249 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722 2250
e09ee510
PB
2251 while (node) {
2252 req = container_of(node, struct io_kiocb, io_task_work.node);
2253 node = node->next;
2254 if (llist_add(&req->io_task_work.fallback_node,
2255 &req->ctx->fallback_llist))
2256 schedule_delayed_work(&req->ctx->fallback_work, 1);
2257 }
eab30c4d
PB
2258}
2259
f237c30a 2260static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
c40f6379 2261{
87ceb6a6 2262 struct io_ring_ctx *ctx = req->ctx;
c40f6379 2263
b18a1a45 2264 /* not needed for normal modes, but SQPOLL depends on it */
f237c30a 2265 io_tw_lock(ctx, locked);
2593553a 2266 io_req_complete_failed(req, req->result);
c40f6379
JA
2267}
2268
f237c30a 2269static void io_req_task_submit(struct io_kiocb *req, bool *locked)
c40f6379
JA
2270{
2271 struct io_ring_ctx *ctx = req->ctx;
2272
f237c30a 2273 io_tw_lock(ctx, locked);
316319e8 2274 /* req->task == current here, checking PF_EXITING is safe */
af066f31 2275 if (likely(!(req->task->flags & PF_EXITING)))
c5eef2b9 2276 __io_queue_sqe(req);
81b6d05c 2277 else
2593553a 2278 io_req_complete_failed(req, -EFAULT);
c40f6379
JA
2279}
2280
2c4b8eb6 2281static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
c40f6379 2282{
2c4b8eb6 2283 req->result = ret;
5b0a6acc 2284 req->io_task_work.func = io_req_task_cancel;
e09ee510 2285 io_req_task_work_add(req);
c40f6379
JA
2286}
2287
2c4b8eb6 2288static void io_req_task_queue(struct io_kiocb *req)
a3df7698 2289{
5b0a6acc 2290 req->io_task_work.func = io_req_task_submit;
e09ee510 2291 io_req_task_work_add(req);
a3df7698
PB
2292}
2293
773af691
JA
2294static void io_req_task_queue_reissue(struct io_kiocb *req)
2295{
2296 req->io_task_work.func = io_queue_async_work;
2297 io_req_task_work_add(req);
2298}
2299
f2f87370 2300static inline void io_queue_next(struct io_kiocb *req)
c69f8dbe 2301{
9b5f7bd9 2302 struct io_kiocb *nxt = io_req_find_next(req);
944e58bf
PB
2303
2304 if (nxt)
906a8c3f 2305 io_req_task_queue(nxt);
c69f8dbe
JL
2306}
2307
c3524383 2308static void io_free_req(struct io_kiocb *req)
7a743e22 2309{
c3524383
PB
2310 io_queue_next(req);
2311 __io_free_req(req);
2312}
8766dd51 2313
f237c30a
PB
2314static void io_free_req_work(struct io_kiocb *req, bool *locked)
2315{
2316 io_free_req(req);
2317}
2318
2d6500d4 2319struct req_batch {
5af1d13e
PB
2320 struct task_struct *task;
2321 int task_refs;
1b4c351f 2322 int ctx_refs;
2d6500d4
PB
2323};
2324
5af1d13e
PB
2325static inline void io_init_req_batch(struct req_batch *rb)
2326{
5af1d13e 2327 rb->task_refs = 0;
9ae72463 2328 rb->ctx_refs = 0;
5af1d13e
PB
2329 rb->task = NULL;
2330}
2331
2d6500d4
PB
2332static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2333 struct req_batch *rb)
2334{
9ae72463
PB
2335 if (rb->ctx_refs)
2336 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
e98e49b2 2337 if (rb->task)
e9dbe221 2338 io_put_task(rb->task, rb->task_refs);
2d6500d4
PB
2339}
2340
6ff119a6
PB
2341static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2342 struct io_submit_state *state)
2d6500d4 2343{
f2f87370 2344 io_queue_next(req);
96670657 2345 io_dismantle_req(req);
2d6500d4 2346
e3bc8e9d 2347 if (req->task != rb->task) {
7c660731
PB
2348 if (rb->task)
2349 io_put_task(rb->task, rb->task_refs);
e3bc8e9d
JA
2350 rb->task = req->task;
2351 rb->task_refs = 0;
5af1d13e 2352 }
e3bc8e9d 2353 rb->task_refs++;
9ae72463 2354 rb->ctx_refs++;
5af1d13e 2355
bd759045 2356 if (state->free_reqs != ARRAY_SIZE(state->reqs))
6ff119a6 2357 state->reqs[state->free_reqs++] = req;
bd759045 2358 else
cd0ca2e0 2359 list_add(&req->inflight_entry, &state->free_list);
7a743e22
PB
2360}
2361
2a2758f2 2362static void io_submit_flush_completions(struct io_ring_ctx *ctx)
a141dd89 2363 __must_hold(&ctx->uring_lock)
905c172f 2364{
cd0ca2e0
PB
2365 struct io_submit_state *state = &ctx->submit_state;
2366 int i, nr = state->compl_nr;
905c172f
PB
2367 struct req_batch rb;
2368
79ebeaee 2369 spin_lock(&ctx->completion_lock);
905c172f 2370 for (i = 0; i < nr; i++) {
cd0ca2e0 2371 struct io_kiocb *req = state->compl_reqs[i];
5182ed2e 2372
d4d19c19
PB
2373 __io_cqring_fill_event(ctx, req->user_data, req->result,
2374 req->compl.cflags);
905c172f
PB
2375 }
2376 io_commit_cqring(ctx);
79ebeaee 2377 spin_unlock(&ctx->completion_lock);
905c172f 2378 io_cqring_ev_posted(ctx);
5182ed2e
PB
2379
2380 io_init_req_batch(&rb);
905c172f 2381 for (i = 0; i < nr; i++) {
cd0ca2e0 2382 struct io_kiocb *req = state->compl_reqs[i];
905c172f 2383
91c2f697 2384 if (req_ref_put_and_test(req))
6ff119a6 2385 io_req_free_batch(&rb, req, &ctx->submit_state);
905c172f
PB
2386 }
2387
2388 io_req_free_batch_finish(ctx, &rb);
cd0ca2e0 2389 state->compl_nr = 0;
7a743e22
PB
2390}
2391
ba816ad6
JA
2392/*
2393 * Drop reference to request, return next in chain (if there is one) if this
2394 * was the last reference to this request.
2395 */
0d85035a 2396static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
e65ef56d 2397{
9b5f7bd9
PB
2398 struct io_kiocb *nxt = NULL;
2399
de9b4cca 2400 if (req_ref_put_and_test(req)) {
9b5f7bd9 2401 nxt = io_req_find_next(req);
4d7dd462 2402 __io_free_req(req);
2a44f467 2403 }
9b5f7bd9 2404 return nxt;
2b188cc1
JA
2405}
2406
0d85035a 2407static inline void io_put_req(struct io_kiocb *req)
e65ef56d 2408{
de9b4cca 2409 if (req_ref_put_and_test(req))
e65ef56d 2410 io_free_req(req);
2b188cc1
JA
2411}
2412
91c2f697 2413static inline void io_put_req_deferred(struct io_kiocb *req)
216578e5 2414{
91c2f697 2415 if (req_ref_put_and_test(req)) {
f237c30a 2416 req->io_task_work.func = io_free_req_work;
543af3a1
PB
2417 io_req_task_work_add(req);
2418 }
216578e5
PB
2419}
2420
6c503150 2421static unsigned io_cqring_events(struct io_ring_ctx *ctx)
a3a0e43f
JA
2422{
2423 /* See comment at the top of this file */
2424 smp_rmb();
e23de15f 2425 return __io_cqring_events(ctx);
a3a0e43f
JA
2426}
2427
fb5ccc98
PB
2428static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2429{
2430 struct io_rings *rings = ctx->rings;
2431
2432 /* make sure SQ entry isn't read before tail */
2433 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2434}
2435
8ff069bf 2436static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
e94f141b 2437{
8ff069bf 2438 unsigned int cflags;
e94f141b 2439
bcda7baa
JA
2440 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2441 cflags |= IORING_CQE_F_BUFFER;
0e1b6fe3 2442 req->flags &= ~REQ_F_BUFFER_SELECTED;
bcda7baa
JA
2443 kfree(kbuf);
2444 return cflags;
e94f141b
JA
2445}
2446
8ff069bf 2447static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
bcda7baa 2448{
4d954c25 2449 struct io_buffer *kbuf;
bcda7baa 2450
ae421d93
PB
2451 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
2452 return 0;
4d954c25 2453 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
8ff069bf
PB
2454 return io_put_kbuf(req, kbuf);
2455}
2456
4c6e277c
JA
2457static inline bool io_run_task_work(void)
2458{
ef98eb04 2459 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
4c6e277c 2460 __set_current_state(TASK_RUNNING);
ef98eb04 2461 tracehook_notify_signal();
4c6e277c
JA
2462 return true;
2463 }
2464
2465 return false;
bcda7baa
JA
2466}
2467
def596e9
JA
2468/*
2469 * Find and free completed poll iocbs
2470 */
2471static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
a8576af9 2472 struct list_head *done)
def596e9 2473{
8237e045 2474 struct req_batch rb;
def596e9 2475 struct io_kiocb *req;
bbde017a
XW
2476
2477 /* order with ->result store in io_complete_rw_iopoll() */
2478 smp_rmb();
def596e9 2479
5af1d13e 2480 io_init_req_batch(&rb);
def596e9 2481 while (!list_empty(done)) {
d21ffe7e 2482 req = list_first_entry(done, struct io_kiocb, inflight_entry);
f161340d
PB
2483 list_del(&req->inflight_entry);
2484
ae421d93
PB
2485 __io_cqring_fill_event(ctx, req->user_data, req->result,
2486 io_put_rw_kbuf(req));
def596e9
JA
2487 (*nr_events)++;
2488
de9b4cca 2489 if (req_ref_put_and_test(req))
6ff119a6 2490 io_req_free_batch(&rb, req, &ctx->submit_state);
def596e9 2491 }
def596e9 2492
09bb8394 2493 io_commit_cqring(ctx);
80c18e4a 2494 io_cqring_ev_posted_iopoll(ctx);
2d6500d4 2495 io_req_free_batch_finish(ctx, &rb);
581f9810
BM
2496}
2497
def596e9 2498static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
a8576af9 2499 long min)
def596e9
JA
2500{
2501 struct io_kiocb *req, *tmp;
2502 LIST_HEAD(done);
2503 bool spin;
def596e9
JA
2504
2505 /*
2506 * Only spin for completions if we don't have multiple devices hanging
2507 * off our complete list, and we're under the requested amount.
2508 */
915b3dde 2509 spin = !ctx->poll_multi_queue && *nr_events < min;
def596e9 2510
d21ffe7e 2511 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
9adbd45d 2512 struct kiocb *kiocb = &req->rw.kiocb;
a2416e1e 2513 int ret;
def596e9
JA
2514
2515 /*
581f9810
BM
2516 * Move completed and retryable entries to our local lists.
2517 * If we find a request that requires polling, break out
2518 * and complete those lists first, if we have entries there.
def596e9 2519 */
65a6543d 2520 if (READ_ONCE(req->iopoll_completed)) {
d21ffe7e 2521 list_move_tail(&req->inflight_entry, &done);
def596e9
JA
2522 continue;
2523 }
2524 if (!list_empty(&done))
2525 break;
2526
2527 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
a2416e1e
PB
2528 if (unlikely(ret < 0))
2529 return ret;
2530 else if (ret)
2531 spin = false;
def596e9 2532
3aadc23e
PB
2533 /* iopoll may have completed current req */
2534 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2535 list_move_tail(&req->inflight_entry, &done);
def596e9
JA
2536 }
2537
2538 if (!list_empty(&done))
a8576af9 2539 io_iopoll_complete(ctx, nr_events, &done);
def596e9 2540
a2416e1e 2541 return 0;
def596e9
JA
2542}
2543
def596e9
JA
2544/*
2545 * We can't just wait for polled events to come to us, we have to actively
2546 * find and complete them.
2547 */
b2edc0a7 2548static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
def596e9
JA
2549{
2550 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2551 return;
2552
2553 mutex_lock(&ctx->uring_lock);
540e32a0 2554 while (!list_empty(&ctx->iopoll_list)) {
def596e9
JA
2555 unsigned int nr_events = 0;
2556
a8576af9 2557 io_do_iopoll(ctx, &nr_events, 0);
08f5439f 2558
b2edc0a7
PB
2559 /* let it sleep and repeat later if can't complete a request */
2560 if (nr_events == 0)
2561 break;
08f5439f
JA
2562 /*
2563 * Ensure we allow local-to-the-cpu processing to take place,
2564 * in this case we need to ensure that we reap all events.
3fcee5a6 2565 * Also let task_work, etc. to progress by releasing the mutex
08f5439f 2566 */
3fcee5a6
PB
2567 if (need_resched()) {
2568 mutex_unlock(&ctx->uring_lock);
2569 cond_resched();
2570 mutex_lock(&ctx->uring_lock);
2571 }
def596e9
JA
2572 }
2573 mutex_unlock(&ctx->uring_lock);
2574}
2575
7668b92a 2576static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
def596e9 2577{
7668b92a 2578 unsigned int nr_events = 0;
e9979b36 2579 int ret = 0;
500f9fba 2580
c7849be9
XW
2581 /*
2582 * We disallow the app entering submit/complete with polling, but we
2583 * still need to lock the ring to prevent racing with polled issue
2584 * that got punted to a workqueue.
2585 */
2586 mutex_lock(&ctx->uring_lock);
f39c8a5b
PB
2587 /*
2588 * Don't enter poll loop if we already have events pending.
2589 * If we do, we can potentially be spinning for commands that
2590 * already triggered a CQE (eg in error).
2591 */
5ed7a37d 2592 if (test_bit(0, &ctx->check_cq_overflow))
f39c8a5b
PB
2593 __io_cqring_overflow_flush(ctx, false);
2594 if (io_cqring_events(ctx))
2595 goto out;
def596e9 2596 do {
500f9fba
JA
2597 /*
2598 * If a submit got punted to a workqueue, we can have the
2599 * application entering polling for a command before it gets
2600 * issued. That app will hold the uring_lock for the duration
2601 * of the poll right here, so we need to take a breather every
2602 * now and then to ensure that the issue has a chance to add
2603 * the poll to the issued list. Otherwise we can spin here
2604 * forever, while the workqueue is stuck trying to acquire the
2605 * very same mutex.
2606 */
e9979b36 2607 if (list_empty(&ctx->iopoll_list)) {
8f487ef2
PB
2608 u32 tail = ctx->cached_cq_tail;
2609
500f9fba 2610 mutex_unlock(&ctx->uring_lock);
4c6e277c 2611 io_run_task_work();
500f9fba 2612 mutex_lock(&ctx->uring_lock);
def596e9 2613
8f487ef2
PB
2614 /* some requests don't go through iopoll_list */
2615 if (tail != ctx->cached_cq_tail ||
2616 list_empty(&ctx->iopoll_list))
e9979b36 2617 break;
500f9fba 2618 }
a8576af9 2619 ret = io_do_iopoll(ctx, &nr_events, min);
f39c8a5b
PB
2620 } while (!ret && nr_events < min && !need_resched());
2621out:
500f9fba 2622 mutex_unlock(&ctx->uring_lock);
def596e9
JA
2623 return ret;
2624}
2625
491381ce 2626static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 2627{
491381ce
JA
2628 /*
2629 * Tell lockdep we inherited freeze protection from submission
2630 * thread.
2631 */
2632 if (req->flags & REQ_F_ISREG) {
1c98679d 2633 struct super_block *sb = file_inode(req->file)->i_sb;
2b188cc1 2634
1c98679d
PB
2635 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2636 sb_end_write(sb);
2b188cc1
JA
2637 }
2638}
2639
b63534c4 2640#ifdef CONFIG_BLOCK
dc2a6e9a 2641static bool io_resubmit_prep(struct io_kiocb *req)
b63534c4 2642{
ab454438 2643 struct io_async_rw *rw = req->async_data;
b63534c4 2644
ab454438
PB
2645 if (!rw)
2646 return !io_req_prep_async(req);
cd658695 2647 iov_iter_restore(&rw->iter, &rw->iter_state);
ab454438 2648 return true;
b63534c4 2649}
b63534c4 2650
3e6a0d3c 2651static bool io_rw_should_reissue(struct io_kiocb *req)
b63534c4 2652{
355afaeb 2653 umode_t mode = file_inode(req->file)->i_mode;
3e6a0d3c 2654 struct io_ring_ctx *ctx = req->ctx;
b63534c4 2655
355afaeb
JA
2656 if (!S_ISBLK(mode) && !S_ISREG(mode))
2657 return false;
3e6a0d3c
JA
2658 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2659 !(ctx->flags & IORING_SETUP_IOPOLL)))
b63534c4 2660 return false;
7c977a58
JA
2661 /*
2662 * If ref is dying, we might be running poll reap from the exit work.
2663 * Don't attempt to reissue from that path, just let it fail with
2664 * -EAGAIN.
2665 */
3e6a0d3c
JA
2666 if (percpu_ref_is_dying(&ctx->refs))
2667 return false;
ef046888
JA
2668 /*
2669 * Play it safe and assume not safe to re-import and reissue if we're
2670 * not in the original thread group (or in task context).
2671 */
2672 if (!same_thread_group(req->task, current) || !in_task())
2673 return false;
3e6a0d3c
JA
2674 return true;
2675}
e82ad485 2676#else
a1ff1e3f 2677static bool io_resubmit_prep(struct io_kiocb *req)
e82ad485
JA
2678{
2679 return false;
2680}
e82ad485 2681static bool io_rw_should_reissue(struct io_kiocb *req)
3e6a0d3c 2682{
b63534c4
JA
2683 return false;
2684}
3e6a0d3c 2685#endif
b63534c4 2686
8ef12efe 2687static bool __io_complete_rw_common(struct io_kiocb *req, long res)
a1d7c393 2688{
48a904e2 2689 if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
b65c128f 2690 kiocb_end_write(req);
48a904e2
JA
2691 fsnotify_modify(req->file);
2692 } else {
2693 fsnotify_access(req->file);
2694 }
9532b99b
PB
2695 if (res != req->result) {
2696 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2697 io_rw_should_reissue(req)) {
2698 req->flags |= REQ_F_REISSUE;
8ef12efe 2699 return true;
9532b99b 2700 }
93d2bcd2 2701 req_set_fail(req);
8ef12efe 2702 req->result = res;
9532b99b 2703 }
8ef12efe
JA
2704 return false;
2705}
2706
f237c30a 2707static void io_req_task_complete(struct io_kiocb *req, bool *locked)
8ef12efe 2708{
126180b9
PB
2709 unsigned int cflags = io_put_rw_kbuf(req);
2710 long res = req->result;
2711
2712 if (*locked) {
2713 struct io_ring_ctx *ctx = req->ctx;
2714 struct io_submit_state *state = &ctx->submit_state;
2715
2716 io_req_complete_state(req, res, cflags);
2717 state->compl_reqs[state->compl_nr++] = req;
2718 if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
2719 io_submit_flush_completions(ctx);
2720 } else {
2721 io_req_complete_post(req, res, cflags);
2722 }
8ef12efe
JA
2723}
2724
2725static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2726 unsigned int issue_flags)
2727{
2728 if (__io_complete_rw_common(req, res))
2729 return;
63637853 2730 __io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
ba816ad6
JA
2731}
2732
2733static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2734{
9adbd45d 2735 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6 2736
8ef12efe
JA
2737 if (__io_complete_rw_common(req, res))
2738 return;
2739 req->result = res;
2740 req->io_task_work.func = io_req_task_complete;
2741 io_req_task_work_add(req);
2b188cc1
JA
2742}
2743
def596e9
JA
2744static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2745{
9adbd45d 2746 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 2747
491381ce
JA
2748 if (kiocb->ki_flags & IOCB_WRITE)
2749 kiocb_end_write(req);
9532b99b 2750 if (unlikely(res != req->result)) {
b66ceaf3
PB
2751 if (res == -EAGAIN && io_rw_should_reissue(req)) {
2752 req->flags |= REQ_F_REISSUE;
2753 return;
9532b99b 2754 }
8c130827 2755 }
bbde017a
XW
2756
2757 WRITE_ONCE(req->result, res);
b9b0e0d3 2758 /* order with io_iopoll_complete() checking ->result */
cd664b0e
PB
2759 smp_wmb();
2760 WRITE_ONCE(req->iopoll_completed, 1);
def596e9
JA
2761}
2762
2763/*
2764 * After the iocb has been issued, it's safe to be found on the poll list.
2765 * Adding the kiocb to the list AFTER submission ensures that we don't
f39c8a5b 2766 * find it from a io_do_iopoll() thread before the issuer is done
def596e9
JA
2767 * accessing the kiocb cookie.
2768 */
cb3d8972 2769static void io_iopoll_req_issued(struct io_kiocb *req)
def596e9
JA
2770{
2771 struct io_ring_ctx *ctx = req->ctx;
cb3d8972
PB
2772 const bool in_async = io_wq_current_is_worker();
2773
2774 /* workqueue context doesn't hold uring_lock, grab it now */
2775 if (unlikely(in_async))
2776 mutex_lock(&ctx->uring_lock);
def596e9
JA
2777
2778 /*
2779 * Track whether we have multiple files in our lists. This will impact
2780 * how we do polling eventually, not spinning if we're on potentially
2781 * different devices.
2782 */
540e32a0 2783 if (list_empty(&ctx->iopoll_list)) {
915b3dde
HX
2784 ctx->poll_multi_queue = false;
2785 } else if (!ctx->poll_multi_queue) {
def596e9 2786 struct io_kiocb *list_req;
915b3dde 2787 unsigned int queue_num0, queue_num1;
def596e9 2788
540e32a0 2789 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
d21ffe7e 2790 inflight_entry);
915b3dde
HX
2791
2792 if (list_req->file != req->file) {
2793 ctx->poll_multi_queue = true;
2794 } else {
2795 queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2796 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2797 if (queue_num0 != queue_num1)
2798 ctx->poll_multi_queue = true;
2799 }
def596e9
JA
2800 }
2801
2802 /*
2803 * For fast devices, IO may have already completed. If it has, add
2804 * it to the front so we find it first.
2805 */
65a6543d 2806 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2807 list_add(&req->inflight_entry, &ctx->iopoll_list);
def596e9 2808 else
d21ffe7e 2809 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
bdcd3eab 2810
cb3d8972
PB
2811 if (unlikely(in_async)) {
2812 /*
2813 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
2814 * in sq thread task context or in io worker task context. If
2815 * current task context is sq thread, we don't need to check
2816 * whether should wake up sq thread.
2817 */
2818 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2819 wq_has_sleeper(&ctx->sq_data->wait))
2820 wake_up(&ctx->sq_data->wait);
2821
2822 mutex_unlock(&ctx->uring_lock);
2823 }
def596e9
JA
2824}
2825
4503b767
JA
2826static bool io_bdev_nowait(struct block_device *bdev)
2827{
9ba0d0c8 2828 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
4503b767
JA
2829}
2830
2b188cc1
JA
2831/*
2832 * If we tracked the file through the SCM inflight mechanism, we could support
2833 * any file. For now, just ensure that anything potentially problematic is done
2834 * inline.
2835 */
b191e2df 2836static bool __io_file_supports_nowait(struct file *file, int rw)
2b188cc1
JA
2837{
2838 umode_t mode = file_inode(file)->i_mode;
2839
4503b767 2840 if (S_ISBLK(mode)) {
4e7b5671
CH
2841 if (IS_ENABLED(CONFIG_BLOCK) &&
2842 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
4503b767
JA
2843 return true;
2844 return false;
2845 }
976517f1 2846 if (S_ISSOCK(mode))
2b188cc1 2847 return true;
4503b767 2848 if (S_ISREG(mode)) {
4e7b5671
CH
2849 if (IS_ENABLED(CONFIG_BLOCK) &&
2850 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
4503b767
JA
2851 file->f_op != &io_uring_fops)
2852 return true;
2853 return false;
2854 }
2b188cc1 2855
c5b85625
JA
2856 /* any ->read/write should understand O_NONBLOCK */
2857 if (file->f_flags & O_NONBLOCK)
2858 return true;
2859
af197f50
JA
2860 if (!(file->f_mode & FMODE_NOWAIT))
2861 return false;
2862
2863 if (rw == READ)
2864 return file->f_op->read_iter != NULL;
2865
2866 return file->f_op->write_iter != NULL;
2b188cc1
JA
2867}
2868
b191e2df 2869static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
7b29f92d 2870{
b191e2df 2871 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
7b29f92d 2872 return true;
b191e2df 2873 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
7b29f92d
JA
2874 return true;
2875
b191e2df 2876 return __io_file_supports_nowait(req->file, rw);
7b29f92d
JA
2877}
2878
5d329e12
JA
2879static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2880 int rw)
2b188cc1 2881{
def596e9 2882 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 2883 struct kiocb *kiocb = &req->rw.kiocb;
75c668cd 2884 struct file *file = req->file;
09bb8394
JA
2885 unsigned ioprio;
2886 int ret;
2b188cc1 2887
c97d8a0f 2888 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
491381ce
JA
2889 req->flags |= REQ_F_ISREG;
2890
2b188cc1 2891 kiocb->ki_pos = READ_ONCE(sqe->off);
0c840c47
JA
2892 if (kiocb->ki_pos == -1) {
2893 if (!(file->f_mode & FMODE_STREAM)) {
2894 req->flags |= REQ_F_CUR_POS;
2895 kiocb->ki_pos = file->f_pos;
2896 } else {
2897 kiocb->ki_pos = 0;
2898 }
ba04291e 2899 }
2b188cc1 2900 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
2901 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2902 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2903 if (unlikely(ret))
2904 return ret;
2b188cc1 2905
5d329e12
JA
2906 /*
2907 * If the file is marked O_NONBLOCK, still allow retry for it if it
2908 * supports async. Otherwise it's impossible to use O_NONBLOCK files
2909 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
2910 */
2911 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
2912 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
75c668cd
PB
2913 req->flags |= REQ_F_NOWAIT;
2914
2b188cc1
JA
2915 ioprio = READ_ONCE(sqe->ioprio);
2916 if (ioprio) {
2917 ret = ioprio_check_cap(ioprio);
2918 if (ret)
09bb8394 2919 return ret;
2b188cc1
JA
2920
2921 kiocb->ki_ioprio = ioprio;
2922 } else
2923 kiocb->ki_ioprio = get_current_ioprio();
2924
def596e9 2925 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
2926 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2927 !kiocb->ki_filp->f_op->iopoll)
09bb8394 2928 return -EOPNOTSUPP;
2b188cc1 2929
394918eb 2930 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
def596e9 2931 kiocb->ki_complete = io_complete_rw_iopoll;
65a6543d 2932 req->iopoll_completed = 0;
def596e9 2933 } else {
09bb8394
JA
2934 if (kiocb->ki_flags & IOCB_HIPRI)
2935 return -EINVAL;
def596e9
JA
2936 kiocb->ki_complete = io_complete_rw;
2937 }
9adbd45d 2938
bf1aa2d2
PB
2939 /* used for fixed read/write too - just read unconditionally */
2940 req->buf_index = READ_ONCE(sqe->buf_index);
2941 req->imu = NULL;
2942
eae071c9
PB
2943 if (req->opcode == IORING_OP_READ_FIXED ||
2944 req->opcode == IORING_OP_WRITE_FIXED) {
bf1aa2d2
PB
2945 struct io_ring_ctx *ctx = req->ctx;
2946 u16 index;
2947
2948 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
2949 return -EFAULT;
2950 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
2951 req->imu = ctx->user_bufs[index];
eae071c9
PB
2952 io_req_set_rsrc_node(req);
2953 }
2954
3529d8c2
JA
2955 req->rw.addr = READ_ONCE(sqe->addr);
2956 req->rw.len = READ_ONCE(sqe->len);
2b188cc1 2957 return 0;
2b188cc1
JA
2958}
2959
2960static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2961{
2962 switch (ret) {
2963 case -EIOCBQUEUED:
2964 break;
2965 case -ERESTARTSYS:
2966 case -ERESTARTNOINTR:
2967 case -ERESTARTNOHAND:
2968 case -ERESTART_RESTARTBLOCK:
2969 /*
2970 * We can't just restart the syscall, since previously
2971 * submitted sqes may already be in progress. Just fail this
2972 * IO with EINTR.
2973 */
2974 ret = -EINTR;
df561f66 2975 fallthrough;
2b188cc1
JA
2976 default:
2977 kiocb->ki_complete(kiocb, ret, 0);
2978 }
2979}
2980
a1d7c393 2981static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
889fca73 2982 unsigned int issue_flags)
ba816ad6 2983{
ba04291e 2984 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
e8c2bc1f 2985 struct io_async_rw *io = req->async_data;
ba04291e 2986
227c0c96 2987 /* add previously done IO, if any */
e8c2bc1f 2988 if (io && io->bytes_done > 0) {
227c0c96 2989 if (ret < 0)
e8c2bc1f 2990 ret = io->bytes_done;
227c0c96 2991 else
e8c2bc1f 2992 ret += io->bytes_done;
227c0c96
JA
2993 }
2994
ba04291e
JA
2995 if (req->flags & REQ_F_CUR_POS)
2996 req->file->f_pos = kiocb->ki_pos;
b66ceaf3 2997 if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
889fca73 2998 __io_complete_rw(req, ret, 0, issue_flags);
ba816ad6
JA
2999 else
3000 io_rw_done(kiocb, ret);
97284637 3001
b66ceaf3 3002 if (req->flags & REQ_F_REISSUE) {
97284637 3003 req->flags &= ~REQ_F_REISSUE;
a7be7c23 3004 if (io_resubmit_prep(req)) {
773af691 3005 io_req_task_queue_reissue(req);
8c130827 3006 } else {
b66ceaf3
PB
3007 unsigned int cflags = io_put_rw_kbuf(req);
3008 struct io_ring_ctx *ctx = req->ctx;
3009
93d2bcd2 3010 req_set_fail(req);
14cfbb7a 3011 if (!(issue_flags & IO_URING_F_NONBLOCK)) {
b66ceaf3
PB
3012 mutex_lock(&ctx->uring_lock);
3013 __io_req_complete(req, issue_flags, ret, cflags);
3014 mutex_unlock(&ctx->uring_lock);
3015 } else {
3016 __io_req_complete(req, issue_flags, ret, cflags);
3017 }
97284637
PB
3018 }
3019 }
ba816ad6
JA
3020}
3021
eae071c9
PB
3022static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3023 struct io_mapped_ubuf *imu)
edafccee 3024{
9adbd45d 3025 size_t len = req->rw.len;
75769e3f 3026 u64 buf_end, buf_addr = req->rw.addr;
edafccee 3027 size_t offset;
edafccee 3028
75769e3f 3029 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
edafccee
JA
3030 return -EFAULT;
3031 /* not inside the mapped region */
4751f53d 3032 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
edafccee
JA
3033 return -EFAULT;
3034
3035 /*
3036 * May not be a start of buffer, set size appropriately
3037 * and advance us to the beginning.
3038 */
3039 offset = buf_addr - imu->ubuf;
3040 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
3041
3042 if (offset) {
3043 /*
3044 * Don't use iov_iter_advance() here, as it's really slow for
3045 * using the latter parts of a big fixed buffer - it iterates
3046 * over each segment manually. We can cheat a bit here, because
3047 * we know that:
3048 *
3049 * 1) it's a BVEC iter, we set it up
3050 * 2) all bvecs are PAGE_SIZE in size, except potentially the
3051 * first and last bvec
3052 *
3053 * So just find our index, and adjust the iterator afterwards.
3054 * If the offset is within the first bvec (or the whole first
3055 * bvec, just use iov_iter_advance(). This makes it easier
3056 * since we can just skip the first segment, which may not
3057 * be PAGE_SIZE aligned.
3058 */
3059 const struct bio_vec *bvec = imu->bvec;
3060
3061 if (offset <= bvec->bv_len) {
3062 iov_iter_advance(iter, offset);
3063 } else {
3064 unsigned long seg_skip;
3065
3066 /* skip first vec */
3067 offset -= bvec->bv_len;
3068 seg_skip = 1 + (offset >> PAGE_SHIFT);
3069
3070 iter->bvec = bvec + seg_skip;
3071 iter->nr_segs -= seg_skip;
99c79f66 3072 iter->count -= bvec->bv_len + offset;
bd11b3a3 3073 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
3074 }
3075 }
3076
847595de 3077 return 0;
edafccee
JA
3078}
3079
eae071c9
PB
3080static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
3081{
bf1aa2d2
PB
3082 if (WARN_ON_ONCE(!req->imu))
3083 return -EFAULT;
3084 return __io_import_fixed(req, rw, iter, req->imu);
eae071c9
PB
3085}
3086
bcda7baa
JA
3087static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
3088{
3089 if (needs_lock)
3090 mutex_unlock(&ctx->uring_lock);
3091}
3092
3093static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
3094{
3095 /*
3096 * "Normal" inline submissions always hold the uring_lock, since we
3097 * grab it from the system call. Same is true for the SQPOLL offload.
3098 * The only exception is when we've detached the request and issue it
3099 * from an async worker thread, grab the lock for that case.
3100 */
3101 if (needs_lock)
3102 mutex_lock(&ctx->uring_lock);
3103}
3104
3105static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
3106 int bgid, struct io_buffer *kbuf,
3107 bool needs_lock)
3108{
3109 struct io_buffer *head;
3110
3111 if (req->flags & REQ_F_BUFFER_SELECTED)
3112 return kbuf;
3113
3114 io_ring_submit_lock(req->ctx, needs_lock);
3115
3116 lockdep_assert_held(&req->ctx->uring_lock);
3117
9e15c3a0 3118 head = xa_load(&req->ctx->io_buffers, bgid);
bcda7baa
JA
3119 if (head) {
3120 if (!list_empty(&head->list)) {
3121 kbuf = list_last_entry(&head->list, struct io_buffer,
3122 list);
3123 list_del(&kbuf->list);
3124 } else {
3125 kbuf = head;
9e15c3a0 3126 xa_erase(&req->ctx->io_buffers, bgid);
bcda7baa
JA
3127 }
3128 if (*len > kbuf->len)
3129 *len = kbuf->len;
3130 } else {
3131 kbuf = ERR_PTR(-ENOBUFS);
3132 }
3133
3134 io_ring_submit_unlock(req->ctx, needs_lock);
3135
3136 return kbuf;
3137}
3138
4d954c25
JA
3139static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
3140 bool needs_lock)
3141{
3142 struct io_buffer *kbuf;
4f4eeba8 3143 u16 bgid;
4d954c25
JA
3144
3145 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
4f4eeba8 3146 bgid = req->buf_index;
4d954c25
JA
3147 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
3148 if (IS_ERR(kbuf))
3149 return kbuf;
3150 req->rw.addr = (u64) (unsigned long) kbuf;
3151 req->flags |= REQ_F_BUFFER_SELECTED;
3152 return u64_to_user_ptr(kbuf->addr);
3153}
3154
3155#ifdef CONFIG_COMPAT
3156static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3157 bool needs_lock)
3158{
3159 struct compat_iovec __user *uiov;
3160 compat_ssize_t clen;
3161 void __user *buf;
3162 ssize_t len;
3163
3164 uiov = u64_to_user_ptr(req->rw.addr);
3165 if (!access_ok(uiov, sizeof(*uiov)))
3166 return -EFAULT;
3167 if (__get_user(clen, &uiov->iov_len))
3168 return -EFAULT;
3169 if (clen < 0)
3170 return -EINVAL;
3171
3172 len = clen;
3173 buf = io_rw_buffer_select(req, &len, needs_lock);
3174 if (IS_ERR(buf))
3175 return PTR_ERR(buf);
3176 iov[0].iov_base = buf;
3177 iov[0].iov_len = (compat_size_t) len;
3178 return 0;
3179}
3180#endif
3181
3182static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3183 bool needs_lock)
3184{
3185 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3186 void __user *buf;
3187 ssize_t len;
3188
3189 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3190 return -EFAULT;
3191
3192 len = iov[0].iov_len;
3193 if (len < 0)
3194 return -EINVAL;
3195 buf = io_rw_buffer_select(req, &len, needs_lock);
3196 if (IS_ERR(buf))
3197 return PTR_ERR(buf);
3198 iov[0].iov_base = buf;
3199 iov[0].iov_len = len;
3200 return 0;
3201}
3202
3203static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3204 bool needs_lock)
3205{
dddb3e26
JA
3206 if (req->flags & REQ_F_BUFFER_SELECTED) {
3207 struct io_buffer *kbuf;
3208
3209 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3210 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3211 iov[0].iov_len = kbuf->len;
4d954c25 3212 return 0;
dddb3e26 3213 }
dd201662 3214 if (req->rw.len != 1)
4d954c25
JA
3215 return -EINVAL;
3216
3217#ifdef CONFIG_COMPAT
3218 if (req->ctx->compat)
3219 return io_compat_import(req, iov, needs_lock);
3220#endif
3221
3222 return __io_iov_buffer_select(req, iov, needs_lock);
3223}
3224
847595de
PB
3225static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3226 struct iov_iter *iter, bool needs_lock)
2b188cc1 3227{
9adbd45d
JA
3228 void __user *buf = u64_to_user_ptr(req->rw.addr);
3229 size_t sqe_len = req->rw.len;
847595de 3230 u8 opcode = req->opcode;
4d954c25 3231 ssize_t ret;
edafccee 3232
7d009165 3233 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 3234 *iovec = NULL;
9adbd45d 3235 return io_import_fixed(req, rw, iter);
edafccee 3236 }
2b188cc1 3237
bcda7baa 3238 /* buffer index only valid with fixed read/write, or buffer select */
4f4eeba8 3239 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
9adbd45d
JA
3240 return -EINVAL;
3241
3a6820f2 3242 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 3243 if (req->flags & REQ_F_BUFFER_SELECT) {
4d954c25 3244 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
867a23ea 3245 if (IS_ERR(buf))
4d954c25 3246 return PTR_ERR(buf);
3f9d6441 3247 req->rw.len = sqe_len;
bcda7baa
JA
3248 }
3249
3a6820f2
JA
3250 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3251 *iovec = NULL;
10fc72e4 3252 return ret;
3a6820f2
JA
3253 }
3254
4d954c25
JA
3255 if (req->flags & REQ_F_BUFFER_SELECT) {
3256 ret = io_iov_buffer_select(req, *iovec, needs_lock);
847595de
PB
3257 if (!ret)
3258 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
4d954c25
JA
3259 *iovec = NULL;
3260 return ret;
3261 }
3262
89cd35c5
CH
3263 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3264 req->ctx->compat);
2b188cc1
JA
3265}
3266
0fef9483
JA
3267static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3268{
5b09e37e 3269 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
0fef9483
JA
3270}
3271
31b51510 3272/*
32960613
JA
3273 * For files that don't have ->read_iter() and ->write_iter(), handle them
3274 * by looping over ->read() or ->write() manually.
31b51510 3275 */
4017eb91 3276static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
32960613 3277{
4017eb91
JA
3278 struct kiocb *kiocb = &req->rw.kiocb;
3279 struct file *file = req->file;
32960613
JA
3280 ssize_t ret = 0;
3281
3282 /*
3283 * Don't support polled IO through this interface, and we can't
3284 * support non-blocking either. For the latter, this just causes
3285 * the kiocb to be handled from an async context.
3286 */
3287 if (kiocb->ki_flags & IOCB_HIPRI)
3288 return -EOPNOTSUPP;
3289 if (kiocb->ki_flags & IOCB_NOWAIT)
3290 return -EAGAIN;
3291
3292 while (iov_iter_count(iter)) {
311ae9e1 3293 struct iovec iovec;
32960613
JA
3294 ssize_t nr;
3295
311ae9e1
PB
3296 if (!iov_iter_is_bvec(iter)) {
3297 iovec = iov_iter_iovec(iter);
3298 } else {
4017eb91
JA
3299 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3300 iovec.iov_len = req->rw.len;
311ae9e1
PB
3301 }
3302
32960613
JA
3303 if (rw == READ) {
3304 nr = file->f_op->read(file, iovec.iov_base,
0fef9483 3305 iovec.iov_len, io_kiocb_ppos(kiocb));
32960613
JA
3306 } else {
3307 nr = file->f_op->write(file, iovec.iov_base,
0fef9483 3308 iovec.iov_len, io_kiocb_ppos(kiocb));
32960613
JA
3309 }
3310
3311 if (nr < 0) {
3312 if (!ret)
3313 ret = nr;
3314 break;
3315 }
cf15ae0a 3316 ret += nr;
16c8d2df
JA
3317 if (!iov_iter_is_bvec(iter)) {
3318 iov_iter_advance(iter, nr);
3319 } else {
16c8d2df 3320 req->rw.addr += nr;
cf15ae0a
JA
3321 req->rw.len -= nr;
3322 if (!req->rw.len)
3323 break;
16c8d2df 3324 }
32960613
JA
3325 if (nr != iovec.iov_len)
3326 break;
32960613
JA
3327 }
3328
3329 return ret;
3330}
3331
ff6165b2
JA
3332static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3333 const struct iovec *fast_iov, struct iov_iter *iter)
f67676d1 3334{
e8c2bc1f 3335 struct io_async_rw *rw = req->async_data;
b64e3444 3336
ff6165b2 3337 memcpy(&rw->iter, iter, sizeof(*iter));
afb87658 3338 rw->free_iovec = iovec;
227c0c96 3339 rw->bytes_done = 0;
ff6165b2 3340 /* can only be fixed buffers, no need to do anything */
9c3a205c 3341 if (iov_iter_is_bvec(iter))
ff6165b2 3342 return;
b64e3444 3343 if (!iovec) {
ff6165b2
JA
3344 unsigned iov_off = 0;
3345
3346 rw->iter.iov = rw->fast_iov;
3347 if (iter->iov != fast_iov) {
3348 iov_off = iter->iov - fast_iov;
3349 rw->iter.iov += iov_off;
3350 }
3351 if (rw->fast_iov != fast_iov)
3352 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
45097dae 3353 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
3354 } else {
3355 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
3356 }
3357}
3358
6cb78689 3359static inline int io_alloc_async_data(struct io_kiocb *req)
3d9932a8 3360{
e8c2bc1f
JA
3361 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3362 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3363 return req->async_data == NULL;
3d9932a8
XW
3364}
3365
ff6165b2
JA
3366static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3367 const struct iovec *fast_iov,
227c0c96 3368 struct iov_iter *iter, bool force)
b7bb4f7d 3369{
26f0505a 3370 if (!force && !io_op_defs[req->opcode].needs_async_setup)
74566df3 3371 return 0;
e8c2bc1f 3372 if (!req->async_data) {
cd658695
JA
3373 struct io_async_rw *iorw;
3374
6cb78689 3375 if (io_alloc_async_data(req)) {
6bf985dc 3376 kfree(iovec);
5d204bcf 3377 return -ENOMEM;
6bf985dc 3378 }
b7bb4f7d 3379
ff6165b2 3380 io_req_map_rw(req, iovec, fast_iov, iter);
cd658695
JA
3381 iorw = req->async_data;
3382 /* we've copied and mapped the iter, ensure state is saved */
3383 iov_iter_save_state(&iorw->iter, &iorw->iter_state);
5d204bcf 3384 }
b7bb4f7d 3385 return 0;
f67676d1
JA
3386}
3387
73debe68 3388static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
c3e330a4 3389{
e8c2bc1f 3390 struct io_async_rw *iorw = req->async_data;
f4bff104 3391 struct iovec *iov = iorw->fast_iov;
847595de 3392 int ret;
c3e330a4 3393
2846c481 3394 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
c3e330a4
PB
3395 if (unlikely(ret < 0))
3396 return ret;
3397
ab0b196c
PB
3398 iorw->bytes_done = 0;
3399 iorw->free_iovec = iov;
3400 if (iov)
3401 req->flags |= REQ_F_NEED_CLEANUP;
cd658695 3402 iov_iter_save_state(&iorw->iter, &iorw->iter_state);
c3e330a4
PB
3403 return 0;
3404}
3405
73debe68 3406static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 3407{
3529d8c2
JA
3408 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3409 return -EBADF;
5d329e12 3410 return io_prep_rw(req, sqe, READ);
f67676d1
JA
3411}
3412
c1dd91d1
JA
3413/*
3414 * This is our waitqueue callback handler, registered through lock_page_async()
3415 * when we initially tried to do the IO with the iocb armed our waitqueue.
3416 * This gets called when the page is unlocked, and we generally expect that to
3417 * happen when the page IO is completed and the page is now uptodate. This will
3418 * queue a task_work based retry of the operation, attempting to copy the data
3419 * again. If the latter fails because the page was NOT uptodate, then we will
3420 * do a thread based blocking retry of the operation. That's the unexpected
3421 * slow path.
3422 */
bcf5a063
JA
3423static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3424 int sync, void *arg)
3425{
3426 struct wait_page_queue *wpq;
3427 struct io_kiocb *req = wait->private;
bcf5a063 3428 struct wait_page_key *key = arg;
bcf5a063
JA
3429
3430 wpq = container_of(wait, struct wait_page_queue, wait);
3431
cdc8fcb4
LT
3432 if (!wake_page_match(wpq, key))
3433 return 0;
3434
c8d317aa 3435 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
bcf5a063 3436 list_del_init(&wait->entry);
921b9054 3437 io_req_task_queue(req);
bcf5a063
JA
3438 return 1;
3439}
3440
c1dd91d1
JA
3441/*
3442 * This controls whether a given IO request should be armed for async page
3443 * based retry. If we return false here, the request is handed to the async
3444 * worker threads for retry. If we're doing buffered reads on a regular file,
3445 * we prepare a private wait_page_queue entry and retry the operation. This
3446 * will either succeed because the page is now uptodate and unlocked, or it
3447 * will register a callback when the page is unlocked at IO completion. Through
3448 * that callback, io_uring uses task_work to setup a retry of the operation.
3449 * That retry will attempt the buffered read again. The retry will generally
3450 * succeed, or in rare cases where it fails, we then fall back to using the
3451 * async worker threads for a blocking retry.
3452 */
227c0c96 3453static bool io_rw_should_retry(struct io_kiocb *req)
f67676d1 3454{
e8c2bc1f
JA
3455 struct io_async_rw *rw = req->async_data;
3456 struct wait_page_queue *wait = &rw->wpq;
bcf5a063 3457 struct kiocb *kiocb = &req->rw.kiocb;
f67676d1 3458
bcf5a063
JA
3459 /* never retry for NOWAIT, we just complete with -EAGAIN */
3460 if (req->flags & REQ_F_NOWAIT)
3461 return false;
f67676d1 3462
227c0c96 3463 /* Only for buffered IO */
3b2a4439 3464 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
bcf5a063 3465 return false;
3b2a4439 3466
bcf5a063
JA
3467 /*
3468 * just use poll if we can, and don't attempt if the fs doesn't
3469 * support callback based unlocks
3470 */
3471 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3472 return false;
f67676d1 3473
3b2a4439
JA
3474 wait->wait.func = io_async_buf_func;
3475 wait->wait.private = req;
3476 wait->wait.flags = 0;
3477 INIT_LIST_HEAD(&wait->wait.entry);
3478 kiocb->ki_flags |= IOCB_WAITQ;
c8d317aa 3479 kiocb->ki_flags &= ~IOCB_NOWAIT;
3b2a4439 3480 kiocb->ki_waitq = wait;
3b2a4439 3481 return true;
bcf5a063
JA
3482}
3483
aeab9506 3484static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
bcf5a063
JA
3485{
3486 if (req->file->f_op->read_iter)
3487 return call_read_iter(req->file, &req->rw.kiocb, iter);
2dd2111d 3488 else if (req->file->f_op->read)
4017eb91 3489 return loop_rw_iter(READ, req, iter);
2dd2111d
GH
3490 else
3491 return -EINVAL;
f67676d1
JA
3492}
3493
7db30437
ML
3494static bool need_read_all(struct io_kiocb *req)
3495{
3496 return req->flags & REQ_F_ISREG ||
3497 S_ISBLK(file_inode(req->file)->i_mode);
3498}
3499
889fca73 3500static int io_read(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
3501{
3502 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3503 struct kiocb *kiocb = &req->rw.kiocb;
ff6165b2 3504 struct iov_iter __iter, *iter = &__iter;
e8c2bc1f 3505 struct io_async_rw *rw = req->async_data;
45d189c6 3506 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
cd658695
JA
3507 struct iov_iter_state __state, *state;
3508 ssize_t ret, ret2;
ff6165b2 3509
2846c481 3510 if (rw) {
e8c2bc1f 3511 iter = &rw->iter;
cd658695
JA
3512 state = &rw->iter_state;
3513 /*
3514 * We come here from an earlier attempt, restore our state to
3515 * match in case it doesn't. It's cheap enough that we don't
3516 * need to make this conditional.
3517 */
3518 iov_iter_restore(iter, state);
2846c481
PB
3519 iovec = NULL;
3520 } else {
3521 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3522 if (ret < 0)
3523 return ret;
cd658695
JA
3524 state = &__state;
3525 iov_iter_save_state(iter, state);
2846c481 3526 }
cd658695 3527 req->result = iov_iter_count(iter);
2b188cc1 3528
fd6c2e4c
JA
3529 /* Ensure we clear previously set non-block flag */
3530 if (!force_nonblock)
29de5f6a 3531 kiocb->ki_flags &= ~IOCB_NOWAIT;
a88fc400
PB
3532 else
3533 kiocb->ki_flags |= IOCB_NOWAIT;
3534
24c74678 3535 /* If the file doesn't support async, just async punt */
b191e2df 3536 if (force_nonblock && !io_file_supports_nowait(req, READ)) {
6713e7a6 3537 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
6bf985dc 3538 return ret ?: -EAGAIN;
6713e7a6 3539 }
9e645e11 3540
cd658695 3541 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
5ea5dd45
PB
3542 if (unlikely(ret)) {
3543 kfree(iovec);
3544 return ret;
3545 }
2b188cc1 3546
227c0c96 3547 ret = io_iter_do_read(req, iter);
32960613 3548
230d50d4 3549 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
6ad7f233 3550 req->flags &= ~REQ_F_REISSUE;
eefdf30f
JA
3551 /* IOPOLL retry should happen for io-wq threads */
3552 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
f91daf56 3553 goto done;
75c668cd
PB
3554 /* no retry on NONBLOCK nor RWF_NOWAIT */
3555 if (req->flags & REQ_F_NOWAIT)
355afaeb 3556 goto done;
f38c7e3a 3557 ret = 0;
230d50d4
JA
3558 } else if (ret == -EIOCBQUEUED) {
3559 goto out_free;
cd658695 3560 } else if (ret <= 0 || ret == req->result || !force_nonblock ||
7db30437 3561 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
7335e3bf 3562 /* read all, failed, already did sync or don't want to retry */
00d23d51 3563 goto done;
227c0c96
JA
3564 }
3565
cd658695
JA
3566 /*
3567 * Don't depend on the iter state matching what was consumed, or being
3568 * untouched in case of error. Restore it and we'll advance it
3569 * manually if we need to.
3570 */
3571 iov_iter_restore(iter, state);
3572
227c0c96 3573 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
6bf985dc
PB
3574 if (ret2)
3575 return ret2;
3576
fe1cdd55 3577 iovec = NULL;
e8c2bc1f 3578 rw = req->async_data;
cd658695
JA
3579 /*
3580 * Now use our persistent iterator and state, if we aren't already.
3581 * We've restored and mapped the iter to match.
3582 */
3583 if (iter != &rw->iter) {
3584 iter = &rw->iter;
3585 state = &rw->iter_state;
3586 }
227c0c96 3587
b23df91b 3588 do {
cd658695
JA
3589 /*
3590 * We end up here because of a partial read, either from
3591 * above or inside this loop. Advance the iter by the bytes
3592 * that were consumed.
3593 */
3594 iov_iter_advance(iter, ret);
3595 if (!iov_iter_count(iter))
3596 break;
b23df91b 3597 rw->bytes_done += ret;
cd658695
JA
3598 iov_iter_save_state(iter, state);
3599
b23df91b
PB
3600 /* if we can retry, do so with the callbacks armed */
3601 if (!io_rw_should_retry(req)) {
3602 kiocb->ki_flags &= ~IOCB_WAITQ;
3603 return -EAGAIN;
3604 }
3605
3606 /*
3607 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3608 * we get -EIOCBQUEUED, then we'll get a notification when the
3609 * desired page gets unlocked. We can also get a partial read
3610 * here, and if we do, then just retry at the new offset.
3611 */
3612 ret = io_iter_do_read(req, iter);
3613 if (ret == -EIOCBQUEUED)
3614 return 0;
227c0c96 3615 /* we got some bytes, but not all. retry. */
b5b0ecb7 3616 kiocb->ki_flags &= ~IOCB_WAITQ;
cd658695
JA
3617 iov_iter_restore(iter, state);
3618 } while (ret > 0);
227c0c96 3619done:
889fca73 3620 kiocb_done(kiocb, ret, issue_flags);
fe1cdd55
PB
3621out_free:
3622 /* it's faster to check here then delegate to kfree */
3623 if (iovec)
3624 kfree(iovec);
5ea5dd45 3625 return 0;
2b188cc1
JA
3626}
3627
73debe68 3628static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 3629{
3529d8c2
JA
3630 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3631 return -EBADF;
5d329e12 3632 return io_prep_rw(req, sqe, WRITE);
f67676d1
JA
3633}
3634
889fca73 3635static int io_write(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
3636{
3637 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3638 struct kiocb *kiocb = &req->rw.kiocb;
ff6165b2 3639 struct iov_iter __iter, *iter = &__iter;
e8c2bc1f 3640 struct io_async_rw *rw = req->async_data;
45d189c6 3641 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
cd658695
JA
3642 struct iov_iter_state __state, *state;
3643 ssize_t ret, ret2;
2b188cc1 3644
2846c481 3645 if (rw) {
e8c2bc1f 3646 iter = &rw->iter;
cd658695
JA
3647 state = &rw->iter_state;
3648 iov_iter_restore(iter, state);
2846c481
PB
3649 iovec = NULL;
3650 } else {
3651 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3652 if (ret < 0)
3653 return ret;
cd658695
JA
3654 state = &__state;
3655 iov_iter_save_state(iter, state);
2846c481 3656 }
cd658695 3657 req->result = iov_iter_count(iter);
2b188cc1 3658
fd6c2e4c
JA
3659 /* Ensure we clear previously set non-block flag */
3660 if (!force_nonblock)
a88fc400
PB
3661 kiocb->ki_flags &= ~IOCB_NOWAIT;
3662 else
3663 kiocb->ki_flags |= IOCB_NOWAIT;
fd6c2e4c 3664
24c74678 3665 /* If the file doesn't support async, just async punt */
b191e2df 3666 if (force_nonblock && !io_file_supports_nowait(req, WRITE))
f67676d1 3667 goto copy_iov;
31b51510 3668
10d59345
JA
3669 /* file path doesn't support NOWAIT for non-direct_IO */
3670 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3671 (req->flags & REQ_F_ISREG))
f67676d1 3672 goto copy_iov;
31b51510 3673
cd658695 3674 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
fa15bafb
PB
3675 if (unlikely(ret))
3676 goto out_free;
4ed734b0 3677
fa15bafb
PB
3678 /*
3679 * Open-code file_start_write here to grab freeze protection,
3680 * which will be released by another thread in
3681 * io_complete_rw(). Fool lockdep by telling it the lock got
3682 * released so that it doesn't complain about the held lock when
3683 * we return to userspace.
3684 */
3685 if (req->flags & REQ_F_ISREG) {
8a3c84b6 3686 sb_start_write(file_inode(req->file)->i_sb);
fa15bafb
PB
3687 __sb_writers_release(file_inode(req->file)->i_sb,
3688 SB_FREEZE_WRITE);
3689 }
3690 kiocb->ki_flags |= IOCB_WRITE;
4ed734b0 3691
fa15bafb 3692 if (req->file->f_op->write_iter)
ff6165b2 3693 ret2 = call_write_iter(req->file, kiocb, iter);
2dd2111d 3694 else if (req->file->f_op->write)
4017eb91 3695 ret2 = loop_rw_iter(WRITE, req, iter);
2dd2111d
GH
3696 else
3697 ret2 = -EINVAL;
4ed734b0 3698
6ad7f233
PB
3699 if (req->flags & REQ_F_REISSUE) {
3700 req->flags &= ~REQ_F_REISSUE;
230d50d4 3701 ret2 = -EAGAIN;
6ad7f233 3702 }
230d50d4 3703
fa15bafb
PB
3704 /*
3705 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3706 * retry them without IOCB_NOWAIT.
3707 */
3708 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3709 ret2 = -EAGAIN;
75c668cd
PB
3710 /* no retry on NONBLOCK nor RWF_NOWAIT */
3711 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
355afaeb 3712 goto done;
fa15bafb 3713 if (!force_nonblock || ret2 != -EAGAIN) {
eefdf30f
JA
3714 /* IOPOLL retry should happen for io-wq threads */
3715 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3716 goto copy_iov;
355afaeb 3717done:
889fca73 3718 kiocb_done(kiocb, ret2, issue_flags);
fa15bafb 3719 } else {
f67676d1 3720copy_iov:
cd658695 3721 iov_iter_restore(iter, state);
227c0c96 3722 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
6bf985dc 3723 return ret ?: -EAGAIN;
2b188cc1 3724 }
31b51510 3725out_free:
f261c168 3726 /* it's reportedly faster than delegating the null check to kfree() */
252917c3 3727 if (iovec)
6f2cc166 3728 kfree(iovec);
2b188cc1
JA
3729 return ret;
3730}
3731
80a261fd
JA
3732static int io_renameat_prep(struct io_kiocb *req,
3733 const struct io_uring_sqe *sqe)
3734{
3735 struct io_rename *ren = &req->rename;
3736 const char __user *oldf, *newf;
3737
ed7eb259
JA
3738 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3739 return -EINVAL;
26578cda 3740 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
ed7eb259 3741 return -EINVAL;
80a261fd
JA
3742 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3743 return -EBADF;
3744
3745 ren->old_dfd = READ_ONCE(sqe->fd);
3746 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3747 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3748 ren->new_dfd = READ_ONCE(sqe->len);
3749 ren->flags = READ_ONCE(sqe->rename_flags);
3750
3751 ren->oldpath = getname(oldf);
3752 if (IS_ERR(ren->oldpath))
3753 return PTR_ERR(ren->oldpath);
3754
3755 ren->newpath = getname(newf);
3756 if (IS_ERR(ren->newpath)) {
3757 putname(ren->oldpath);
3758 return PTR_ERR(ren->newpath);
3759 }
3760
3761 req->flags |= REQ_F_NEED_CLEANUP;
3762 return 0;
3763}
3764
45d189c6 3765static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
80a261fd
JA
3766{
3767 struct io_rename *ren = &req->rename;
3768 int ret;
3769
45d189c6 3770 if (issue_flags & IO_URING_F_NONBLOCK)
80a261fd
JA
3771 return -EAGAIN;
3772
3773 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3774 ren->newpath, ren->flags);
3775
3776 req->flags &= ~REQ_F_NEED_CLEANUP;
3777 if (ret < 0)
93d2bcd2 3778 req_set_fail(req);
80a261fd
JA
3779 io_req_complete(req, ret);
3780 return 0;
3781}
3782
14a1143b
JA
3783static int io_unlinkat_prep(struct io_kiocb *req,
3784 const struct io_uring_sqe *sqe)
3785{
3786 struct io_unlink *un = &req->unlink;
3787 const char __user *fname;
3788
22634bc5
JA
3789 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3790 return -EINVAL;
26578cda
PB
3791 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
3792 sqe->splice_fd_in)
22634bc5 3793 return -EINVAL;
14a1143b
JA
3794 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3795 return -EBADF;
3796
3797 un->dfd = READ_ONCE(sqe->fd);
3798
3799 un->flags = READ_ONCE(sqe->unlink_flags);
3800 if (un->flags & ~AT_REMOVEDIR)
3801 return -EINVAL;
3802
3803 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3804 un->filename = getname(fname);
3805 if (IS_ERR(un->filename))
3806 return PTR_ERR(un->filename);
3807
3808 req->flags |= REQ_F_NEED_CLEANUP;
3809 return 0;
3810}
3811
45d189c6 3812static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
14a1143b
JA
3813{
3814 struct io_unlink *un = &req->unlink;
3815 int ret;
3816
45d189c6 3817 if (issue_flags & IO_URING_F_NONBLOCK)
14a1143b
JA
3818 return -EAGAIN;
3819
3820 if (un->flags & AT_REMOVEDIR)
3821 ret = do_rmdir(un->dfd, un->filename);
3822 else
3823 ret = do_unlinkat(un->dfd, un->filename);
3824
3825 req->flags &= ~REQ_F_NEED_CLEANUP;
3826 if (ret < 0)
93d2bcd2 3827 req_set_fail(req);
14a1143b
JA
3828 io_req_complete(req, ret);
3829 return 0;
3830}
3831
e34a02dc
DK
3832static int io_mkdirat_prep(struct io_kiocb *req,
3833 const struct io_uring_sqe *sqe)
3834{
3835 struct io_mkdir *mkd = &req->mkdir;
3836 const char __user *fname;
3837
3838 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3839 return -EINVAL;
3840 if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
3841 sqe->splice_fd_in)
3842 return -EINVAL;
3843 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3844 return -EBADF;
3845
3846 mkd->dfd = READ_ONCE(sqe->fd);
3847 mkd->mode = READ_ONCE(sqe->len);
3848
3849 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3850 mkd->filename = getname(fname);
3851 if (IS_ERR(mkd->filename))
3852 return PTR_ERR(mkd->filename);
3853
3854 req->flags |= REQ_F_NEED_CLEANUP;
3855 return 0;
3856}
3857
3858static int io_mkdirat(struct io_kiocb *req, int issue_flags)
3859{
3860 struct io_mkdir *mkd = &req->mkdir;
3861 int ret;
3862
3863 if (issue_flags & IO_URING_F_NONBLOCK)
3864 return -EAGAIN;
3865
3866 ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
3867
3868 req->flags &= ~REQ_F_NEED_CLEANUP;
3869 if (ret < 0)
3870 req_set_fail(req);
3871 io_req_complete(req, ret);
3872 return 0;
3873}
3874
7a8721f8
DK
3875static int io_symlinkat_prep(struct io_kiocb *req,
3876 const struct io_uring_sqe *sqe)
3877{
3878 struct io_symlink *sl = &req->symlink;
3879 const char __user *oldpath, *newpath;
3880
3881 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3882 return -EINVAL;
3883 if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
3884 sqe->splice_fd_in)
3885 return -EINVAL;
3886 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3887 return -EBADF;
3888
3889 sl->new_dfd = READ_ONCE(sqe->fd);
3890 oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
3891 newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3892
3893 sl->oldpath = getname(oldpath);
3894 if (IS_ERR(sl->oldpath))
3895 return PTR_ERR(sl->oldpath);
3896
3897 sl->newpath = getname(newpath);
3898 if (IS_ERR(sl->newpath)) {
3899 putname(sl->oldpath);
3900 return PTR_ERR(sl->newpath);
3901 }
3902
3903 req->flags |= REQ_F_NEED_CLEANUP;
3904 return 0;
3905}
3906
3907static int io_symlinkat(struct io_kiocb *req, int issue_flags)
3908{
3909 struct io_symlink *sl = &req->symlink;
3910 int ret;
3911
3912 if (issue_flags & IO_URING_F_NONBLOCK)
3913 return -EAGAIN;
3914
3915 ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
3916
3917 req->flags &= ~REQ_F_NEED_CLEANUP;
3918 if (ret < 0)
3919 req_set_fail(req);
3920 io_req_complete(req, ret);
3921 return 0;
3922}
3923
cf30da90
DK
3924static int io_linkat_prep(struct io_kiocb *req,
3925 const struct io_uring_sqe *sqe)
3926{
3927 struct io_hardlink *lnk = &req->hardlink;
3928 const char __user *oldf, *newf;
3929
3930 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3931 return -EINVAL;
3932 if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
3933 return -EINVAL;
3934 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3935 return -EBADF;
3936
3937 lnk->old_dfd = READ_ONCE(sqe->fd);
3938 lnk->new_dfd = READ_ONCE(sqe->len);
3939 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3940 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3941 lnk->flags = READ_ONCE(sqe->hardlink_flags);
3942
3943 lnk->oldpath = getname(oldf);
3944 if (IS_ERR(lnk->oldpath))
3945 return PTR_ERR(lnk->oldpath);
3946
3947 lnk->newpath = getname(newf);
3948 if (IS_ERR(lnk->newpath)) {
3949 putname(lnk->oldpath);
3950 return PTR_ERR(lnk->newpath);
3951 }
3952
3953 req->flags |= REQ_F_NEED_CLEANUP;
3954 return 0;
3955}
3956
3957static int io_linkat(struct io_kiocb *req, int issue_flags)
3958{
3959 struct io_hardlink *lnk = &req->hardlink;
3960 int ret;
3961
3962 if (issue_flags & IO_URING_F_NONBLOCK)
3963 return -EAGAIN;
3964
3965 ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
3966 lnk->newpath, lnk->flags);
3967
3968 req->flags &= ~REQ_F_NEED_CLEANUP;
3969 if (ret < 0)
3970 req_set_fail(req);
3971 io_req_complete(req, ret);
3972 return 0;
3973}
3974
36f4fa68
JA
3975static int io_shutdown_prep(struct io_kiocb *req,
3976 const struct io_uring_sqe *sqe)
3977{
3978#if defined(CONFIG_NET)
3979 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3980 return -EINVAL;
26578cda
PB
3981 if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3982 sqe->buf_index || sqe->splice_fd_in))
36f4fa68
JA
3983 return -EINVAL;
3984
3985 req->shutdown.how = READ_ONCE(sqe->len);
3986 return 0;
3987#else
3988 return -EOPNOTSUPP;
3989#endif
3990}
3991
45d189c6 3992static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
36f4fa68
JA
3993{
3994#if defined(CONFIG_NET)
3995 struct socket *sock;
3996 int ret;
3997
45d189c6 3998 if (issue_flags & IO_URING_F_NONBLOCK)
36f4fa68
JA
3999 return -EAGAIN;
4000
48aba79b 4001 sock = sock_from_file(req->file);
36f4fa68 4002 if (unlikely(!sock))
48aba79b 4003 return -ENOTSOCK;
36f4fa68
JA
4004
4005 ret = __sys_shutdown_sock(sock, req->shutdown.how);
a146468d 4006 if (ret < 0)
93d2bcd2 4007 req_set_fail(req);
36f4fa68
JA
4008 io_req_complete(req, ret);
4009 return 0;
4010#else
4011 return -EOPNOTSUPP;
4012#endif
4013}
4014
f2a8d5c7
PB
4015static int __io_splice_prep(struct io_kiocb *req,
4016 const struct io_uring_sqe *sqe)
7d67af2c 4017{
fe7e3257 4018 struct io_splice *sp = &req->splice;
7d67af2c 4019 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
7d67af2c 4020
3232dd02
PB
4021 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4022 return -EINVAL;
7d67af2c 4023
7d67af2c
PB
4024 sp->len = READ_ONCE(sqe->len);
4025 sp->flags = READ_ONCE(sqe->splice_flags);
7d67af2c
PB
4026 if (unlikely(sp->flags & ~valid_flags))
4027 return -EINVAL;
dfadddfc 4028 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
7d67af2c
PB
4029 return 0;
4030}
4031
f2a8d5c7
PB
4032static int io_tee_prep(struct io_kiocb *req,
4033 const struct io_uring_sqe *sqe)
4034{
4035 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
4036 return -EINVAL;
4037 return __io_splice_prep(req, sqe);
4038}
4039
45d189c6 4040static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
f2a8d5c7
PB
4041{
4042 struct io_splice *sp = &req->splice;
f2a8d5c7
PB
4043 struct file *out = sp->file_out;
4044 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
dfadddfc 4045 struct file *in;
f2a8d5c7
PB
4046 long ret = 0;
4047
45d189c6 4048 if (issue_flags & IO_URING_F_NONBLOCK)
f2a8d5c7 4049 return -EAGAIN;
dfadddfc
JA
4050
4051 in = io_file_get(req->ctx, req, sp->splice_fd_in,
4052 (sp->flags & SPLICE_F_FD_IN_FIXED));
4053 if (!in) {
4054 ret = -EBADF;
4055 goto done;
4056 }
4057
f2a8d5c7
PB
4058 if (sp->len)
4059 ret = do_tee(in, out, sp->len, flags);
4060
e1d767f0
PB
4061 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4062 io_put_file(in);
dfadddfc 4063done:
f2a8d5c7 4064 if (ret != sp->len)
93d2bcd2 4065 req_set_fail(req);
e1e16097 4066 io_req_complete(req, ret);
f2a8d5c7
PB
4067 return 0;
4068}
4069
4070static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4071{
fe7e3257 4072 struct io_splice *sp = &req->splice;
f2a8d5c7
PB
4073
4074 sp->off_in = READ_ONCE(sqe->splice_off_in);
4075 sp->off_out = READ_ONCE(sqe->off);
4076 return __io_splice_prep(req, sqe);
4077}
4078
45d189c6 4079static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
7d67af2c
PB
4080{
4081 struct io_splice *sp = &req->splice;
7d67af2c
PB
4082 struct file *out = sp->file_out;
4083 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
4084 loff_t *poff_in, *poff_out;
dfadddfc 4085 struct file *in;
c9687426 4086 long ret = 0;
7d67af2c 4087
45d189c6 4088 if (issue_flags & IO_URING_F_NONBLOCK)
2fb3e822 4089 return -EAGAIN;
7d67af2c 4090
dfadddfc
JA
4091 in = io_file_get(req->ctx, req, sp->splice_fd_in,
4092 (sp->flags & SPLICE_F_FD_IN_FIXED));
4093 if (!in) {
4094 ret = -EBADF;
4095 goto done;
4096 }
4097
7d67af2c
PB
4098 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
4099 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
c9687426 4100
948a7749 4101 if (sp->len)
c9687426 4102 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
7d67af2c 4103
e1d767f0
PB
4104 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4105 io_put_file(in);
dfadddfc 4106done:
7d67af2c 4107 if (ret != sp->len)
93d2bcd2 4108 req_set_fail(req);
e1e16097 4109 io_req_complete(req, ret);
7d67af2c
PB
4110 return 0;
4111}
4112
2b188cc1
JA
4113/*
4114 * IORING_OP_NOP just posts a completion event, nothing else.
4115 */
889fca73 4116static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
4117{
4118 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 4119
def596e9
JA
4120 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4121 return -EINVAL;
4122
889fca73 4123 __io_req_complete(req, issue_flags, 0, 0);
2b188cc1
JA
4124 return 0;
4125}
4126
1155c76a 4127static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 4128{
6b06314c 4129 struct io_ring_ctx *ctx = req->ctx;
c992fe29 4130
6b06314c 4131 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 4132 return -EINVAL;
26578cda
PB
4133 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4134 sqe->splice_fd_in))
c992fe29
CH
4135 return -EINVAL;
4136
8ed8d3c3
JA
4137 req->sync.flags = READ_ONCE(sqe->fsync_flags);
4138 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
4139 return -EINVAL;
4140
4141 req->sync.off = READ_ONCE(sqe->off);
4142 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
4143 return 0;
4144}
4145
45d189c6 4146static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 4147{
8ed8d3c3 4148 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
4149 int ret;
4150
ac45abc0 4151 /* fsync always requires a blocking context */
45d189c6 4152 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
4153 return -EAGAIN;
4154
9adbd45d 4155 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
4156 end > 0 ? end : LLONG_MAX,
4157 req->sync.flags & IORING_FSYNC_DATASYNC);
4158 if (ret < 0)
93d2bcd2 4159 req_set_fail(req);
e1e16097 4160 io_req_complete(req, ret);
c992fe29
CH
4161 return 0;
4162}
4163
d63d1b5e
JA
4164static int io_fallocate_prep(struct io_kiocb *req,
4165 const struct io_uring_sqe *sqe)
4166{
26578cda
PB
4167 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
4168 sqe->splice_fd_in)
d63d1b5e 4169 return -EINVAL;
3232dd02
PB
4170 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4171 return -EINVAL;
d63d1b5e
JA
4172
4173 req->sync.off = READ_ONCE(sqe->off);
4174 req->sync.len = READ_ONCE(sqe->addr);
4175 req->sync.mode = READ_ONCE(sqe->len);
4176 return 0;
4177}
4178
45d189c6 4179static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
5d17b4a4 4180{
ac45abc0
PB
4181 int ret;
4182
d63d1b5e 4183 /* fallocate always requiring blocking context */
45d189c6 4184 if (issue_flags & IO_URING_F_NONBLOCK)
5d17b4a4 4185 return -EAGAIN;
ac45abc0
PB
4186 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
4187 req->sync.len);
ac45abc0 4188 if (ret < 0)
93d2bcd2 4189 req_set_fail(req);
48a904e2
JA
4190 else
4191 fsnotify_modify(req->file);
e1e16097 4192 io_req_complete(req, ret);
5d17b4a4
JA
4193 return 0;
4194}
4195
ec65fea5 4196static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 4197{
f8748881 4198 const char __user *fname;
15b71abe 4199 int ret;
b7bb4f7d 4200
d3fddf6d
PB
4201 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4202 return -EINVAL;
b9445598 4203 if (unlikely(sqe->ioprio || sqe->buf_index))
15b71abe 4204 return -EINVAL;
ec65fea5 4205 if (unlikely(req->flags & REQ_F_FIXED_FILE))
cf3040ca 4206 return -EBADF;
03b1230c 4207
ec65fea5
PB
4208 /* open.how should be already initialised */
4209 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
08a1d26e 4210 req->open.how.flags |= O_LARGEFILE;
3529d8c2 4211
25e72d10
PB
4212 req->open.dfd = READ_ONCE(sqe->fd);
4213 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
f8748881 4214 req->open.filename = getname(fname);
15b71abe
JA
4215 if (IS_ERR(req->open.filename)) {
4216 ret = PTR_ERR(req->open.filename);
4217 req->open.filename = NULL;
4218 return ret;
4219 }
b9445598
PB
4220
4221 req->open.file_slot = READ_ONCE(sqe->file_index);
4222 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
4223 return -EINVAL;
4224
4022e7af 4225 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 4226 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 4227 return 0;
03b1230c
JA
4228}
4229
ec65fea5
PB
4230static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4231{
d3fddf6d
PB
4232 u64 mode = READ_ONCE(sqe->len);
4233 u64 flags = READ_ONCE(sqe->open_flags);
ec65fea5 4234
ec65fea5
PB
4235 req->open.how = build_open_how(flags, mode);
4236 return __io_openat_prep(req, sqe);
4237}
4238
cebdb986 4239static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 4240{
cebdb986 4241 struct open_how __user *how;
cebdb986 4242 size_t len;
0fa03c62
JA
4243 int ret;
4244
cebdb986
JA
4245 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4246 len = READ_ONCE(sqe->len);
cebdb986
JA
4247 if (len < OPEN_HOW_SIZE_VER0)
4248 return -EINVAL;
3529d8c2 4249
cebdb986
JA
4250 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4251 len);
4252 if (ret)
4253 return ret;
3529d8c2 4254
ec65fea5 4255 return __io_openat_prep(req, sqe);
cebdb986
JA
4256}
4257
45d189c6 4258static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
15b71abe
JA
4259{
4260 struct open_flags op;
15b71abe 4261 struct file *file;
b9445598
PB
4262 bool resolve_nonblock, nonblock_set;
4263 bool fixed = !!req->open.file_slot;
15b71abe
JA
4264 int ret;
4265
cebdb986 4266 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
4267 if (ret)
4268 goto err;
3a81fd02
JA
4269 nonblock_set = op.open_flag & O_NONBLOCK;
4270 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
45d189c6 4271 if (issue_flags & IO_URING_F_NONBLOCK) {
3a81fd02
JA
4272 /*
4273 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
4274 * it'll always -EAGAIN
4275 */
4276 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
4277 return -EAGAIN;
4278 op.lookup_flags |= LOOKUP_CACHED;
4279 op.open_flag |= O_NONBLOCK;
4280 }
15b71abe 4281
b9445598
PB
4282 if (!fixed) {
4283 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
4284 if (ret < 0)
4285 goto err;
4286 }
15b71abe
JA
4287
4288 file = do_filp_open(req->open.dfd, req->open.filename, &op);
12dcb58a 4289 if (IS_ERR(file)) {
944d1444 4290 /*
12dcb58a
PB
4291 * We could hang on to this 'fd' on retrying, but seems like
4292 * marginal gain for something that is now known to be a slower
4293 * path. So just put it, and we'll get a new one when we retry.
944d1444 4294 */
b9445598
PB
4295 if (!fixed)
4296 put_unused_fd(ret);
3a81fd02 4297
15b71abe 4298 ret = PTR_ERR(file);
12dcb58a
PB
4299 /* only retry if RESOLVE_CACHED wasn't already set by application */
4300 if (ret == -EAGAIN &&
4301 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
4302 return -EAGAIN;
4303 goto err;
15b71abe 4304 }
12dcb58a
PB
4305
4306 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
4307 file->f_flags &= ~O_NONBLOCK;
4308 fsnotify_open(file);
b9445598
PB
4309
4310 if (!fixed)
4311 fd_install(ret, file);
4312 else
4313 ret = io_install_fixed_file(req, file, issue_flags,
4314 req->open.file_slot - 1);
15b71abe
JA
4315err:
4316 putname(req->open.filename);
8fef80bf 4317 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe 4318 if (ret < 0)
93d2bcd2 4319 req_set_fail(req);
0bdf3398 4320 __io_req_complete(req, issue_flags, ret, 0);
15b71abe
JA
4321 return 0;
4322}
4323
45d189c6 4324static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
cebdb986 4325{
e45cff58 4326 return io_openat2(req, issue_flags);
cebdb986
JA
4327}
4328
067524e9
JA
4329static int io_remove_buffers_prep(struct io_kiocb *req,
4330 const struct io_uring_sqe *sqe)
4331{
4332 struct io_provide_buf *p = &req->pbuf;
4333 u64 tmp;
4334
26578cda
PB
4335 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
4336 sqe->splice_fd_in)
067524e9
JA
4337 return -EINVAL;
4338
4339 tmp = READ_ONCE(sqe->fd);
4340 if (!tmp || tmp > USHRT_MAX)
4341 return -EINVAL;
4342
4343 memset(p, 0, sizeof(*p));
4344 p->nbufs = tmp;
4345 p->bgid = READ_ONCE(sqe->buf_group);
4346 return 0;
4347}
4348
4349static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
4350 int bgid, unsigned nbufs)
4351{
4352 unsigned i = 0;
4353
4354 /* shouldn't happen */
4355 if (!nbufs)
4356 return 0;
4357
4358 /* the head kbuf is the list itself */
4359 while (!list_empty(&buf->list)) {
4360 struct io_buffer *nxt;
4361
4362 nxt = list_first_entry(&buf->list, struct io_buffer, list);
4363 list_del(&nxt->list);
4364 kfree(nxt);
4365 if (++i == nbufs)
4366 return i;
07edfd19 4367 cond_resched();
067524e9
JA
4368 }
4369 i++;
4370 kfree(buf);
9e15c3a0 4371 xa_erase(&ctx->io_buffers, bgid);
067524e9
JA
4372
4373 return i;
4374}
4375
889fca73 4376static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
067524e9
JA
4377{
4378 struct io_provide_buf *p = &req->pbuf;
4379 struct io_ring_ctx *ctx = req->ctx;
4380 struct io_buffer *head;
4381 int ret = 0;
45d189c6 4382 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
067524e9
JA
4383
4384 io_ring_submit_lock(ctx, !force_nonblock);
4385
4386 lockdep_assert_held(&ctx->uring_lock);
4387
4388 ret = -ENOENT;
9e15c3a0 4389 head = xa_load(&ctx->io_buffers, p->bgid);
067524e9
JA
4390 if (head)
4391 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
067524e9 4392 if (ret < 0)
93d2bcd2 4393 req_set_fail(req);
067524e9 4394
9fb8cb49
PB
4395 /* complete before unlock, IOPOLL may need the lock */
4396 __io_req_complete(req, issue_flags, ret, 0);
4397 io_ring_submit_unlock(ctx, !force_nonblock);
067524e9
JA
4398 return 0;
4399}
4400
ddf0322d
JA
4401static int io_provide_buffers_prep(struct io_kiocb *req,
4402 const struct io_uring_sqe *sqe)
4403{
38134ada 4404 unsigned long size, tmp_check;
ddf0322d
JA
4405 struct io_provide_buf *p = &req->pbuf;
4406 u64 tmp;
4407
26578cda 4408 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
ddf0322d
JA
4409 return -EINVAL;
4410
4411 tmp = READ_ONCE(sqe->fd);
4412 if (!tmp || tmp > USHRT_MAX)
4413 return -E2BIG;
4414 p->nbufs = tmp;
4415 p->addr = READ_ONCE(sqe->addr);
4416 p->len = READ_ONCE(sqe->len);
4417
38134ada
PB
4418 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
4419 &size))
4420 return -EOVERFLOW;
4421 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4422 return -EOVERFLOW;
4423
d81269fe
PB
4424 size = (unsigned long)p->len * p->nbufs;
4425 if (!access_ok(u64_to_user_ptr(p->addr), size))
ddf0322d
JA
4426 return -EFAULT;
4427
4428 p->bgid = READ_ONCE(sqe->buf_group);
4429 tmp = READ_ONCE(sqe->off);
4430 if (tmp > USHRT_MAX)
4431 return -E2BIG;
4432 p->bid = tmp;
4433 return 0;
4434}
4435
4436static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4437{
4438 struct io_buffer *buf;
4439 u64 addr = pbuf->addr;
4440 int i, bid = pbuf->bid;
4441
4442 for (i = 0; i < pbuf->nbufs; i++) {
9990da93 4443 buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
ddf0322d
JA
4444 if (!buf)
4445 break;
4446
4447 buf->addr = addr;
d1f82808 4448 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
ddf0322d
JA
4449 buf->bid = bid;
4450 addr += pbuf->len;
4451 bid++;
4452 if (!*head) {
4453 INIT_LIST_HEAD(&buf->list);
4454 *head = buf;
4455 } else {
4456 list_add_tail(&buf->list, &(*head)->list);
4457 }
2d1829ba 4458 cond_resched();
ddf0322d
JA
4459 }
4460
4461 return i ? i : -ENOMEM;
4462}
4463
889fca73 4464static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
ddf0322d
JA
4465{
4466 struct io_provide_buf *p = &req->pbuf;
4467 struct io_ring_ctx *ctx = req->ctx;
4468 struct io_buffer *head, *list;
4469 int ret = 0;
45d189c6 4470 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ddf0322d
JA
4471
4472 io_ring_submit_lock(ctx, !force_nonblock);
4473
4474 lockdep_assert_held(&ctx->uring_lock);
4475
9e15c3a0 4476 list = head = xa_load(&ctx->io_buffers, p->bgid);
ddf0322d
JA
4477
4478 ret = io_add_buffers(p, &head);
9e15c3a0
JA
4479 if (ret >= 0 && !list) {
4480 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4481 if (ret < 0)
067524e9 4482 __io_remove_buffers(ctx, head, p->bgid, -1U);
ddf0322d 4483 }
ddf0322d 4484 if (ret < 0)
93d2bcd2 4485 req_set_fail(req);
9fb8cb49
PB
4486 /* complete before unlock, IOPOLL may need the lock */
4487 __io_req_complete(req, issue_flags, ret, 0);
4488 io_ring_submit_unlock(ctx, !force_nonblock);
ddf0322d 4489 return 0;
cebdb986
JA
4490}
4491
3e4827b0
JA
4492static int io_epoll_ctl_prep(struct io_kiocb *req,
4493 const struct io_uring_sqe *sqe)
4494{
4495#if defined(CONFIG_EPOLL)
26578cda 4496 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
3e4827b0 4497 return -EINVAL;
2d74d042 4498 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 4499 return -EINVAL;
3e4827b0
JA
4500
4501 req->epoll.epfd = READ_ONCE(sqe->fd);
4502 req->epoll.op = READ_ONCE(sqe->len);
4503 req->epoll.fd = READ_ONCE(sqe->off);
4504
4505 if (ep_op_has_event(req->epoll.op)) {
4506 struct epoll_event __user *ev;
4507
4508 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4509 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4510 return -EFAULT;
4511 }
4512
4513 return 0;
4514#else
4515 return -EOPNOTSUPP;
4516#endif
4517}
4518
889fca73 4519static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
3e4827b0
JA
4520{
4521#if defined(CONFIG_EPOLL)
4522 struct io_epoll *ie = &req->epoll;
4523 int ret;
45d189c6 4524 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3e4827b0
JA
4525
4526 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4527 if (force_nonblock && ret == -EAGAIN)
4528 return -EAGAIN;
4529
4530 if (ret < 0)
93d2bcd2 4531 req_set_fail(req);
889fca73 4532 __io_req_complete(req, issue_flags, ret, 0);
3e4827b0
JA
4533 return 0;
4534#else
4535 return -EOPNOTSUPP;
4536#endif
4537}
4538
c1ca757b
JA
4539static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4540{
4541#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
26578cda 4542 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
c1ca757b 4543 return -EINVAL;
3232dd02
PB
4544 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4545 return -EINVAL;
c1ca757b
JA
4546
4547 req->madvise.addr = READ_ONCE(sqe->addr);
4548 req->madvise.len = READ_ONCE(sqe->len);
4549 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4550 return 0;
4551#else
4552 return -EOPNOTSUPP;
4553#endif
4554}
4555
45d189c6 4556static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
c1ca757b
JA
4557{
4558#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4559 struct io_madvise *ma = &req->madvise;
4560 int ret;
4561
45d189c6 4562 if (issue_flags & IO_URING_F_NONBLOCK)
c1ca757b
JA
4563 return -EAGAIN;
4564
0726b01e 4565 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
c1ca757b 4566 if (ret < 0)
93d2bcd2 4567 req_set_fail(req);
e1e16097 4568 io_req_complete(req, ret);
c1ca757b
JA
4569 return 0;
4570#else
4571 return -EOPNOTSUPP;
4572#endif
4573}
4574
4840e418
JA
4575static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4576{
26578cda 4577 if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
4840e418 4578 return -EINVAL;
3232dd02
PB
4579 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4580 return -EINVAL;
4840e418
JA
4581
4582 req->fadvise.offset = READ_ONCE(sqe->off);
4583 req->fadvise.len = READ_ONCE(sqe->len);
4584 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4585 return 0;
4586}
4587
45d189c6 4588static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4840e418
JA
4589{
4590 struct io_fadvise *fa = &req->fadvise;
4591 int ret;
4592
45d189c6 4593 if (issue_flags & IO_URING_F_NONBLOCK) {
3e69426d
JA
4594 switch (fa->advice) {
4595 case POSIX_FADV_NORMAL:
4596 case POSIX_FADV_RANDOM:
4597 case POSIX_FADV_SEQUENTIAL:
4598 break;
4599 default:
4600 return -EAGAIN;
4601 }
4602 }
4840e418
JA
4603
4604 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4605 if (ret < 0)
93d2bcd2 4606 req_set_fail(req);
0bdf3398 4607 __io_req_complete(req, issue_flags, ret, 0);
4840e418
JA
4608 return 0;
4609}
4610
eddc7ef5
JA
4611static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4612{
2d74d042 4613 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 4614 return -EINVAL;
26578cda 4615 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
eddc7ef5 4616 return -EINVAL;
9c280f90 4617 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 4618 return -EBADF;
eddc7ef5 4619
1d9e1288
BM
4620 req->statx.dfd = READ_ONCE(sqe->fd);
4621 req->statx.mask = READ_ONCE(sqe->len);
e62753e4 4622 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
4623 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4624 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5
JA
4625
4626 return 0;
4627}
4628
45d189c6 4629static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
eddc7ef5 4630{
1d9e1288 4631 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
4632 int ret;
4633
59d70013 4634 if (issue_flags & IO_URING_F_NONBLOCK)
eddc7ef5
JA
4635 return -EAGAIN;
4636
e62753e4
BM
4637 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4638 ctx->buffer);
eddc7ef5 4639
eddc7ef5 4640 if (ret < 0)
93d2bcd2 4641 req_set_fail(req);
e1e16097 4642 io_req_complete(req, ret);
eddc7ef5
JA
4643 return 0;
4644}
4645
b5dba59e
JA
4646static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4647{
14587a46 4648 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 4649 return -EINVAL;
b5dba59e 4650 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
7df778be 4651 sqe->rw_flags || sqe->buf_index)
b5dba59e 4652 return -EINVAL;
9c280f90 4653 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 4654 return -EBADF;
b5dba59e
JA
4655
4656 req->close.fd = READ_ONCE(sqe->fd);
7df778be
PB
4657 req->close.file_slot = READ_ONCE(sqe->file_index);
4658 if (req->close.file_slot && req->close.fd)
4659 return -EINVAL;
4660
b5dba59e 4661 return 0;
b5dba59e
JA
4662}
4663
889fca73 4664static int io_close(struct io_kiocb *req, unsigned int issue_flags)
b5dba59e 4665{
9eac1904 4666 struct files_struct *files = current->files;
3af73b28 4667 struct io_close *close = &req->close;
9eac1904 4668 struct fdtable *fdt;
a1fde923
PB
4669 struct file *file = NULL;
4670 int ret = -EBADF;
b5dba59e 4671
7df778be
PB
4672 if (req->close.file_slot) {
4673 ret = io_close_fixed(req, issue_flags);
4674 goto err;
4675 }
4676
9eac1904
JA
4677 spin_lock(&files->file_lock);
4678 fdt = files_fdtable(files);
4679 if (close->fd >= fdt->max_fds) {
4680 spin_unlock(&files->file_lock);
4681 goto err;
4682 }
4683 file = fdt->fd[close->fd];
a1fde923 4684 if (!file || file->f_op == &io_uring_fops) {
9eac1904
JA
4685 spin_unlock(&files->file_lock);
4686 file = NULL;
4687 goto err;
3af73b28 4688 }
b5dba59e
JA
4689
4690 /* if the file has a flush method, be safe and punt to async */
45d189c6 4691 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
9eac1904 4692 spin_unlock(&files->file_lock);
0bf0eefd 4693 return -EAGAIN;
a2100672 4694 }
b5dba59e 4695
9eac1904
JA
4696 ret = __close_fd_get_file(close->fd, &file);
4697 spin_unlock(&files->file_lock);
4698 if (ret < 0) {
4699 if (ret == -ENOENT)
4700 ret = -EBADF;
4701 goto err;
4702 }
4703
3af73b28 4704 /* No ->flush() or already async, safely close from here */
9eac1904
JA
4705 ret = filp_close(file, current->files);
4706err:
3af73b28 4707 if (ret < 0)
93d2bcd2 4708 req_set_fail(req);
9eac1904
JA
4709 if (file)
4710 fput(file);
889fca73 4711 __io_req_complete(req, issue_flags, ret, 0);
1a417f4e 4712 return 0;
b5dba59e
JA
4713}
4714
1155c76a 4715static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
4716{
4717 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4 4718
5d17b4a4
JA
4719 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4720 return -EINVAL;
26578cda
PB
4721 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4722 sqe->splice_fd_in))
5d17b4a4
JA
4723 return -EINVAL;
4724
8ed8d3c3
JA
4725 req->sync.off = READ_ONCE(sqe->off);
4726 req->sync.len = READ_ONCE(sqe->len);
4727 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
4728 return 0;
4729}
4730
45d189c6 4731static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 4732{
8ed8d3c3
JA
4733 int ret;
4734
ac45abc0 4735 /* sync_file_range always requires a blocking context */
45d189c6 4736 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
4737 return -EAGAIN;
4738
9adbd45d 4739 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
4740 req->sync.flags);
4741 if (ret < 0)
93d2bcd2 4742 req_set_fail(req);
e1e16097 4743 io_req_complete(req, ret);
5d17b4a4
JA
4744 return 0;
4745}
4746
469956e8 4747#if defined(CONFIG_NET)
02d27d89
PB
4748static int io_setup_async_msg(struct io_kiocb *req,
4749 struct io_async_msghdr *kmsg)
4750{
e8c2bc1f
JA
4751 struct io_async_msghdr *async_msg = req->async_data;
4752
4753 if (async_msg)
02d27d89 4754 return -EAGAIN;
e8c2bc1f 4755 if (io_alloc_async_data(req)) {
257e84a5 4756 kfree(kmsg->free_iov);
02d27d89
PB
4757 return -ENOMEM;
4758 }
e8c2bc1f 4759 async_msg = req->async_data;
02d27d89 4760 req->flags |= REQ_F_NEED_CLEANUP;
e8c2bc1f 4761 memcpy(async_msg, kmsg, sizeof(*kmsg));
2a780802 4762 async_msg->msg.msg_name = &async_msg->addr;
257e84a5
PB
4763 /* if were using fast_iov, set it to the new one */
4764 if (!async_msg->free_iov)
4765 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4766
02d27d89
PB
4767 return -EAGAIN;
4768}
4769
2ae523ed
PB
4770static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4771 struct io_async_msghdr *iomsg)
4772{
2ae523ed 4773 iomsg->msg.msg_name = &iomsg->addr;
257e84a5 4774 iomsg->free_iov = iomsg->fast_iov;
2ae523ed 4775 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
257e84a5 4776 req->sr_msg.msg_flags, &iomsg->free_iov);
2ae523ed
PB
4777}
4778
93642ef8
PB
4779static int io_sendmsg_prep_async(struct io_kiocb *req)
4780{
4781 int ret;
4782
93642ef8
PB
4783 ret = io_sendmsg_copy_hdr(req, req->async_data);
4784 if (!ret)
4785 req->flags |= REQ_F_NEED_CLEANUP;
4786 return ret;
4787}
4788
3529d8c2 4789static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 4790{
e47293fd 4791 struct io_sr_msg *sr = &req->sr_msg;
03b1230c 4792
d2b6f48b
PB
4793 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4794 return -EINVAL;
7cb4bf4c
JA
4795 if (unlikely(sqe->addr2 || sqe->file_index))
4796 return -EINVAL;
f55b8a50
JA
4797 if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
4798 return -EINVAL;
d2b6f48b 4799
270a5940 4800 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 4801 sr->len = READ_ONCE(sqe->len);
04411806
PB
4802 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4803 if (sr->msg_flags & MSG_DONTWAIT)
4804 req->flags |= REQ_F_NOWAIT;
3529d8c2 4805
d8768362
JA
4806#ifdef CONFIG_COMPAT
4807 if (req->ctx->compat)
4808 sr->msg_flags |= MSG_CMSG_COMPAT;
4809#endif
93642ef8 4810 return 0;
03b1230c
JA
4811}
4812
889fca73 4813static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 4814{
6b754c8b 4815 struct io_async_msghdr iomsg, *kmsg;
0fa03c62 4816 struct socket *sock;
7a7cacba 4817 unsigned flags;
0031275d 4818 int min_ret = 0;
0fa03c62
JA
4819 int ret;
4820
dba4a925 4821 sock = sock_from_file(req->file);
7a7cacba 4822 if (unlikely(!sock))
dba4a925 4823 return -ENOTSOCK;
3529d8c2 4824
257e84a5
PB
4825 kmsg = req->async_data;
4826 if (!kmsg) {
7a7cacba
PB
4827 ret = io_sendmsg_copy_hdr(req, &iomsg);
4828 if (ret)
4829 return ret;
4830 kmsg = &iomsg;
0fa03c62 4831 }
0fa03c62 4832
04411806
PB
4833 flags = req->sr_msg.msg_flags;
4834 if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 4835 flags |= MSG_DONTWAIT;
0031275d
SM
4836 if (flags & MSG_WAITALL)
4837 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4838
7a7cacba 4839 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
45d189c6 4840 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
7a7cacba
PB
4841 return io_setup_async_msg(req, kmsg);
4842 if (ret == -ERESTARTSYS)
4843 ret = -EINTR;
0fa03c62 4844
257e84a5
PB
4845 /* fast path, check for non-NULL to avoid function call */
4846 if (kmsg->free_iov)
4847 kfree(kmsg->free_iov);
99bc4c38 4848 req->flags &= ~REQ_F_NEED_CLEANUP;
0031275d 4849 if (ret < min_ret)
93d2bcd2 4850 req_set_fail(req);
889fca73 4851 __io_req_complete(req, issue_flags, ret, 0);
5d17b4a4 4852 return 0;
03b1230c 4853}
aa1fa28f 4854
889fca73 4855static int io_send(struct io_kiocb *req, unsigned int issue_flags)
fddaface 4856{
7a7cacba
PB
4857 struct io_sr_msg *sr = &req->sr_msg;
4858 struct msghdr msg;
4859 struct iovec iov;
fddaface 4860 struct socket *sock;
7a7cacba 4861 unsigned flags;
0031275d 4862 int min_ret = 0;
fddaface
JA
4863 int ret;
4864
dba4a925 4865 sock = sock_from_file(req->file);
7a7cacba 4866 if (unlikely(!sock))
dba4a925 4867 return -ENOTSOCK;
fddaface 4868
7a7cacba
PB
4869 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4870 if (unlikely(ret))
14db8411 4871 return ret;
fddaface 4872
7a7cacba
PB
4873 msg.msg_name = NULL;
4874 msg.msg_control = NULL;
4875 msg.msg_controllen = 0;
4876 msg.msg_namelen = 0;
fddaface 4877
04411806
PB
4878 flags = req->sr_msg.msg_flags;
4879 if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 4880 flags |= MSG_DONTWAIT;
0031275d
SM
4881 if (flags & MSG_WAITALL)
4882 min_ret = iov_iter_count(&msg.msg_iter);
4883
7a7cacba
PB
4884 msg.msg_flags = flags;
4885 ret = sock_sendmsg(sock, &msg);
45d189c6 4886 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
7a7cacba
PB
4887 return -EAGAIN;
4888 if (ret == -ERESTARTSYS)
4889 ret = -EINTR;
fddaface 4890
0031275d 4891 if (ret < min_ret)
93d2bcd2 4892 req_set_fail(req);
889fca73 4893 __io_req_complete(req, issue_flags, ret, 0);
fddaface 4894 return 0;
fddaface
JA
4895}
4896
1400e697
PB
4897static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4898 struct io_async_msghdr *iomsg)
52de1fe1
JA
4899{
4900 struct io_sr_msg *sr = &req->sr_msg;
4901 struct iovec __user *uiov;
4902 size_t iov_len;
4903 int ret;
4904
1400e697
PB
4905 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4906 &iomsg->uaddr, &uiov, &iov_len);
52de1fe1
JA
4907 if (ret)
4908 return ret;
4909
4910 if (req->flags & REQ_F_BUFFER_SELECT) {
4911 if (iov_len > 1)
4912 return -EINVAL;
5476dfed 4913 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
52de1fe1 4914 return -EFAULT;
5476dfed 4915 sr->len = iomsg->fast_iov[0].iov_len;
257e84a5 4916 iomsg->free_iov = NULL;
52de1fe1 4917 } else {
257e84a5 4918 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 4919 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
257e84a5 4920 &iomsg->free_iov, &iomsg->msg.msg_iter,
89cd35c5 4921 false);
52de1fe1
JA
4922 if (ret > 0)
4923 ret = 0;
4924 }
4925
4926 return ret;
4927}
4928
4929#ifdef CONFIG_COMPAT
4930static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
1400e697 4931 struct io_async_msghdr *iomsg)
52de1fe1 4932{
52de1fe1
JA
4933 struct io_sr_msg *sr = &req->sr_msg;
4934 struct compat_iovec __user *uiov;
4935 compat_uptr_t ptr;
4936 compat_size_t len;
4937 int ret;
4938
4af3417a
PB
4939 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4940 &ptr, &len);
52de1fe1
JA
4941 if (ret)
4942 return ret;
4943
4944 uiov = compat_ptr(ptr);
4945 if (req->flags & REQ_F_BUFFER_SELECT) {
4946 compat_ssize_t clen;
4947
4948 if (len > 1)
4949 return -EINVAL;
4950 if (!access_ok(uiov, sizeof(*uiov)))
4951 return -EFAULT;
4952 if (__get_user(clen, &uiov->iov_len))
4953 return -EFAULT;
4954 if (clen < 0)
4955 return -EINVAL;
2d280bc8 4956 sr->len = clen;
257e84a5 4957 iomsg->free_iov = NULL;
52de1fe1 4958 } else {
257e84a5 4959 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 4960 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
257e84a5 4961 UIO_FASTIOV, &iomsg->free_iov,
89cd35c5 4962 &iomsg->msg.msg_iter, true);
52de1fe1
JA
4963 if (ret < 0)
4964 return ret;
4965 }
4966
4967 return 0;
4968}
4969#endif
4970
1400e697
PB
4971static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4972 struct io_async_msghdr *iomsg)
52de1fe1 4973{
1400e697 4974 iomsg->msg.msg_name = &iomsg->addr;
52de1fe1
JA
4975
4976#ifdef CONFIG_COMPAT
4977 if (req->ctx->compat)
1400e697 4978 return __io_compat_recvmsg_copy_hdr(req, iomsg);
fddaface 4979#endif
52de1fe1 4980
1400e697 4981 return __io_recvmsg_copy_hdr(req, iomsg);
52de1fe1
JA
4982}
4983
bcda7baa 4984static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
7fbb1b54 4985 bool needs_lock)
bcda7baa
JA
4986{
4987 struct io_sr_msg *sr = &req->sr_msg;
4988 struct io_buffer *kbuf;
4989
bcda7baa
JA
4990 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4991 if (IS_ERR(kbuf))
4992 return kbuf;
4993
4994 sr->kbuf = kbuf;
4995 req->flags |= REQ_F_BUFFER_SELECTED;
bcda7baa 4996 return kbuf;
fddaface
JA
4997}
4998
7fbb1b54
PB
4999static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
5000{
5001 return io_put_kbuf(req, req->sr_msg.kbuf);
5002}
5003
93642ef8 5004static int io_recvmsg_prep_async(struct io_kiocb *req)
aa1fa28f 5005{
99bc4c38 5006 int ret;
3529d8c2 5007
93642ef8
PB
5008 ret = io_recvmsg_copy_hdr(req, req->async_data);
5009 if (!ret)
5010 req->flags |= REQ_F_NEED_CLEANUP;
5011 return ret;
5012}
5013
5014static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5015{
5016 struct io_sr_msg *sr = &req->sr_msg;
5017
d2b6f48b
PB
5018 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5019 return -EINVAL;
aa9362ee
JA
5020 if (unlikely(sqe->addr2 || sqe->file_index))
5021 return -EINVAL;
f55b8a50
JA
5022 if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
5023 return -EINVAL;
d2b6f48b 5024
270a5940 5025 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 5026 sr->len = READ_ONCE(sqe->len);
bcda7baa 5027 sr->bgid = READ_ONCE(sqe->buf_group);
04411806
PB
5028 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
5029 if (sr->msg_flags & MSG_DONTWAIT)
5030 req->flags |= REQ_F_NOWAIT;
06b76d44 5031
d8768362
JA
5032#ifdef CONFIG_COMPAT
5033 if (req->ctx->compat)
5034 sr->msg_flags |= MSG_CMSG_COMPAT;
5035#endif
93642ef8 5036 return 0;
aa1fa28f
JA
5037}
5038
889fca73 5039static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 5040{
6b754c8b 5041 struct io_async_msghdr iomsg, *kmsg;
03b1230c 5042 struct socket *sock;
7fbb1b54 5043 struct io_buffer *kbuf;
7a7cacba 5044 unsigned flags;
0031275d 5045 int min_ret = 0;
52de1fe1 5046 int ret, cflags = 0;
45d189c6 5047 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
03b1230c 5048
dba4a925 5049 sock = sock_from_file(req->file);
7a7cacba 5050 if (unlikely(!sock))
dba4a925 5051 return -ENOTSOCK;
3529d8c2 5052
257e84a5
PB
5053 kmsg = req->async_data;
5054 if (!kmsg) {
7a7cacba
PB
5055 ret = io_recvmsg_copy_hdr(req, &iomsg);
5056 if (ret)
681fda8d 5057 return ret;
7a7cacba
PB
5058 kmsg = &iomsg;
5059 }
03b1230c 5060
bc02ef33 5061 if (req->flags & REQ_F_BUFFER_SELECT) {
7fbb1b54 5062 kbuf = io_recv_buffer_select(req, !force_nonblock);
bc02ef33 5063 if (IS_ERR(kbuf))
52de1fe1 5064 return PTR_ERR(kbuf);
7a7cacba 5065 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
5476dfed
PB
5066 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
5067 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
7a7cacba
PB
5068 1, req->sr_msg.len);
5069 }
52de1fe1 5070
04411806
PB
5071 flags = req->sr_msg.msg_flags;
5072 if (force_nonblock)
7a7cacba 5073 flags |= MSG_DONTWAIT;
0031275d
SM
5074 if (flags & MSG_WAITALL)
5075 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
5076
7a7cacba
PB
5077 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
5078 kmsg->uaddr, flags);
0e1b6fe3
PB
5079 if (force_nonblock && ret == -EAGAIN)
5080 return io_setup_async_msg(req, kmsg);
7a7cacba
PB
5081 if (ret == -ERESTARTSYS)
5082 ret = -EINTR;
03b1230c 5083
7fbb1b54
PB
5084 if (req->flags & REQ_F_BUFFER_SELECTED)
5085 cflags = io_put_recv_kbuf(req);
257e84a5
PB
5086 /* fast path, check for non-NULL to avoid function call */
5087 if (kmsg->free_iov)
5088 kfree(kmsg->free_iov);
99bc4c38 5089 req->flags &= ~REQ_F_NEED_CLEANUP;
0031275d 5090 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
93d2bcd2 5091 req_set_fail(req);
889fca73 5092 __io_req_complete(req, issue_flags, ret, cflags);
03b1230c 5093 return 0;
0fa03c62 5094}
5d17b4a4 5095
889fca73 5096static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
fddaface 5097{
6b754c8b 5098 struct io_buffer *kbuf;
7a7cacba
PB
5099 struct io_sr_msg *sr = &req->sr_msg;
5100 struct msghdr msg;
5101 void __user *buf = sr->buf;
fddaface 5102 struct socket *sock;
7a7cacba
PB
5103 struct iovec iov;
5104 unsigned flags;
0031275d 5105 int min_ret = 0;
bcda7baa 5106 int ret, cflags = 0;
45d189c6 5107 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
fddaface 5108
dba4a925 5109 sock = sock_from_file(req->file);
7a7cacba 5110 if (unlikely(!sock))
dba4a925 5111 return -ENOTSOCK;
fddaface 5112
bc02ef33 5113 if (req->flags & REQ_F_BUFFER_SELECT) {
7fbb1b54 5114 kbuf = io_recv_buffer_select(req, !force_nonblock);
bcda7baa
JA
5115 if (IS_ERR(kbuf))
5116 return PTR_ERR(kbuf);
7a7cacba 5117 buf = u64_to_user_ptr(kbuf->addr);
bc02ef33 5118 }
bcda7baa 5119
7a7cacba 5120 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
14c32eee
PB
5121 if (unlikely(ret))
5122 goto out_free;
fddaface 5123
7a7cacba
PB
5124 msg.msg_name = NULL;
5125 msg.msg_control = NULL;
5126 msg.msg_controllen = 0;
5127 msg.msg_namelen = 0;
5128 msg.msg_iocb = NULL;
5129 msg.msg_flags = 0;
fddaface 5130
04411806
PB
5131 flags = req->sr_msg.msg_flags;
5132 if (force_nonblock)
7a7cacba 5133 flags |= MSG_DONTWAIT;
0031275d
SM
5134 if (flags & MSG_WAITALL)
5135 min_ret = iov_iter_count(&msg.msg_iter);
5136
7a7cacba
PB
5137 ret = sock_recvmsg(sock, &msg, flags);
5138 if (force_nonblock && ret == -EAGAIN)
5139 return -EAGAIN;
5140 if (ret == -ERESTARTSYS)
5141 ret = -EINTR;
14c32eee 5142out_free:
7fbb1b54
PB
5143 if (req->flags & REQ_F_BUFFER_SELECTED)
5144 cflags = io_put_recv_kbuf(req);
0031275d 5145 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
93d2bcd2 5146 req_set_fail(req);
889fca73 5147 __io_req_complete(req, issue_flags, ret, cflags);
fddaface 5148 return 0;
fddaface
JA
5149}
5150
3529d8c2 5151static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 5152{
8ed8d3c3
JA
5153 struct io_accept *accept = &req->accept;
5154
14587a46 5155 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
17f2fe35 5156 return -EINVAL;
aaa4db12 5157 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
5158 return -EINVAL;
5159
d55e5f5b
JA
5160 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5161 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 5162 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 5163 accept->nofile = rlimit(RLIMIT_NOFILE);
a7083ad5 5164
aaa4db12 5165 accept->file_slot = READ_ONCE(sqe->file_index);
56a8e15e 5166 if (accept->file_slot && (accept->flags & SOCK_CLOEXEC))
aaa4db12 5167 return -EINVAL;
a7083ad5
PB
5168 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
5169 return -EINVAL;
5170 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
5171 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
8ed8d3c3 5172 return 0;
8ed8d3c3 5173}
17f2fe35 5174
889fca73 5175static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3
JA
5176{
5177 struct io_accept *accept = &req->accept;
45d189c6 5178 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ac45abc0 5179 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
aaa4db12 5180 bool fixed = !!accept->file_slot;
a7083ad5
PB
5181 struct file *file;
5182 int ret, fd;
8ed8d3c3 5183
e697deed
JX
5184 if (req->file->f_flags & O_NONBLOCK)
5185 req->flags |= REQ_F_NOWAIT;
5186
aaa4db12
PB
5187 if (!fixed) {
5188 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
5189 if (unlikely(fd < 0))
5190 return fd;
5191 }
a7083ad5
PB
5192 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
5193 accept->flags);
5194 if (IS_ERR(file)) {
aaa4db12
PB
5195 if (!fixed)
5196 put_unused_fd(fd);
a7083ad5
PB
5197 ret = PTR_ERR(file);
5198 if (ret == -EAGAIN && force_nonblock)
5199 return -EAGAIN;
ac45abc0
PB
5200 if (ret == -ERESTARTSYS)
5201 ret = -EINTR;
93d2bcd2 5202 req_set_fail(req);
aaa4db12 5203 } else if (!fixed) {
a7083ad5
PB
5204 fd_install(fd, file);
5205 ret = fd;
aaa4db12
PB
5206 } else {
5207 ret = io_install_fixed_file(req, file, issue_flags,
5208 accept->file_slot - 1);
ac45abc0 5209 }
889fca73 5210 __io_req_complete(req, issue_flags, ret, 0);
17f2fe35 5211 return 0;
8ed8d3c3
JA
5212}
5213
93642ef8
PB
5214static int io_connect_prep_async(struct io_kiocb *req)
5215{
5216 struct io_async_connect *io = req->async_data;
5217 struct io_connect *conn = &req->connect;
5218
5219 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
5220}
5221
3529d8c2 5222static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 5223{
3529d8c2 5224 struct io_connect *conn = &req->connect;
f499a021 5225
14587a46 5226 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3fbb51c1 5227 return -EINVAL;
26578cda
PB
5228 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
5229 sqe->splice_fd_in)
3fbb51c1
JA
5230 return -EINVAL;
5231
3529d8c2
JA
5232 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5233 conn->addr_len = READ_ONCE(sqe->addr2);
93642ef8 5234 return 0;
f499a021
JA
5235}
5236
889fca73 5237static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
f8e85cf2 5238{
e8c2bc1f 5239 struct io_async_connect __io, *io;
f8e85cf2 5240 unsigned file_flags;
3fbb51c1 5241 int ret;
45d189c6 5242 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
f8e85cf2 5243
e8c2bc1f
JA
5244 if (req->async_data) {
5245 io = req->async_data;
f499a021 5246 } else {
3529d8c2
JA
5247 ret = move_addr_to_kernel(req->connect.addr,
5248 req->connect.addr_len,
e8c2bc1f 5249 &__io.address);
f499a021
JA
5250 if (ret)
5251 goto out;
5252 io = &__io;
5253 }
5254
3fbb51c1
JA
5255 file_flags = force_nonblock ? O_NONBLOCK : 0;
5256
e8c2bc1f 5257 ret = __sys_connect_file(req->file, &io->address,
3fbb51c1 5258 req->connect.addr_len, file_flags);
87f80d62 5259 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
e8c2bc1f 5260 if (req->async_data)
b7bb4f7d 5261 return -EAGAIN;
e8c2bc1f 5262 if (io_alloc_async_data(req)) {
f499a021
JA
5263 ret = -ENOMEM;
5264 goto out;
5265 }
e8c2bc1f 5266 memcpy(req->async_data, &__io, sizeof(__io));
f8e85cf2 5267 return -EAGAIN;
f499a021 5268 }
f8e85cf2
JA
5269 if (ret == -ERESTARTSYS)
5270 ret = -EINTR;
f499a021 5271out:
4e88d6e7 5272 if (ret < 0)
93d2bcd2 5273 req_set_fail(req);
889fca73 5274 __io_req_complete(req, issue_flags, ret, 0);
f8e85cf2 5275 return 0;
469956e8
Y
5276}
5277#else /* !CONFIG_NET */
99a10081
JA
5278#define IO_NETOP_FN(op) \
5279static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5280{ \
5281 return -EOPNOTSUPP; \
5282}
5283
5284#define IO_NETOP_PREP(op) \
5285IO_NETOP_FN(op) \
5286static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5287{ \
5288 return -EOPNOTSUPP; \
5289} \
5290
5291#define IO_NETOP_PREP_ASYNC(op) \
5292IO_NETOP_PREP(op) \
5293static int io_##op##_prep_async(struct io_kiocb *req) \
5294{ \
5295 return -EOPNOTSUPP; \
5296}
5297
5298IO_NETOP_PREP_ASYNC(sendmsg);
5299IO_NETOP_PREP_ASYNC(recvmsg);
5300IO_NETOP_PREP_ASYNC(connect);
5301IO_NETOP_PREP(accept);
5302IO_NETOP_FN(send);
5303IO_NETOP_FN(recv);
469956e8 5304#endif /* CONFIG_NET */
f8e85cf2 5305
d7718a9d
JA
5306struct io_poll_table {
5307 struct poll_table_struct pt;
5308 struct io_kiocb *req;
68b11e8b 5309 int nr_entries;
d7718a9d
JA
5310 int error;
5311};
ce593a6c 5312
d7718a9d 5313static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
5b0a6acc 5314 __poll_t mask, io_req_tw_func_t func)
d7718a9d 5315{
d7718a9d
JA
5316 /* for instances that support it check for an event match first: */
5317 if (mask && !(mask & poll->events))
5318 return 0;
5319
5320 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
5321
5322 list_del_init(&poll->wait.entry);
5323
d7718a9d 5324 req->result = mask;
5b0a6acc 5325 req->io_task_work.func = func;
6d816e08 5326
d7718a9d 5327 /*
e3aabf95
JA
5328 * If this fails, then the task is exiting. When a task exits, the
5329 * work gets canceled, so just cancel this request as well instead
5330 * of executing it. We can't safely execute it anyway, as we may not
5331 * have the needed state needed for it anyway.
d7718a9d 5332 */
e09ee510 5333 io_req_task_work_add(req);
d7718a9d
JA
5334 return 1;
5335}
5336
74ce6ce4
JA
5337static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
5338 __acquires(&req->ctx->completion_lock)
5339{
5340 struct io_ring_ctx *ctx = req->ctx;
5341
316319e8 5342 /* req->task == current here, checking PF_EXITING is safe */
e09ee510
PB
5343 if (unlikely(req->task->flags & PF_EXITING))
5344 WRITE_ONCE(poll->canceled, true);
5345
74ce6ce4
JA
5346 if (!req->result && !READ_ONCE(poll->canceled)) {
5347 struct poll_table_struct pt = { ._key = poll->events };
5348
5349 req->result = vfs_poll(req->file, &pt) & poll->events;
5350 }
5351
79ebeaee 5352 spin_lock(&ctx->completion_lock);
74ce6ce4
JA
5353 if (!req->result && !READ_ONCE(poll->canceled)) {
5354 add_wait_queue(poll->head, &poll->wait);
5355 return true;
5356 }
5357
5358 return false;
5359}
5360
d4e7cd36 5361static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
18bceab1 5362{
e8c2bc1f 5363 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
d4e7cd36 5364 if (req->opcode == IORING_OP_POLL_ADD)
e8c2bc1f 5365 return req->async_data;
d4e7cd36
JA
5366 return req->apoll->double_poll;
5367}
5368
5369static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5370{
5371 if (req->opcode == IORING_OP_POLL_ADD)
5372 return &req->poll;
5373 return &req->apoll->poll;
5374}
5375
5376static void io_poll_remove_double(struct io_kiocb *req)
e07785b0 5377 __must_hold(&req->ctx->completion_lock)
d4e7cd36
JA
5378{
5379 struct io_poll_iocb *poll = io_poll_get_double(req);
18bceab1
JA
5380
5381 lockdep_assert_held(&req->ctx->completion_lock);
5382
5383 if (poll && poll->head) {
5384 struct wait_queue_head *head = poll->head;
5385
79ebeaee 5386 spin_lock_irq(&head->lock);
18bceab1
JA
5387 list_del_init(&poll->wait.entry);
5388 if (poll->wait.private)
de9b4cca 5389 req_ref_put(req);
18bceab1 5390 poll->head = NULL;
79ebeaee 5391 spin_unlock_irq(&head->lock);
18bceab1
JA
5392 }
5393}
5394
31efe48e 5395static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
e07785b0 5396 __must_hold(&req->ctx->completion_lock)
18bceab1
JA
5397{
5398 struct io_ring_ctx *ctx = req->ctx;
88e41cf9 5399 unsigned flags = IORING_CQE_F_MORE;
e27414be 5400 int error;
18bceab1 5401
e27414be 5402 if (READ_ONCE(req->poll.canceled)) {
45ab03b1 5403 error = -ECANCELED;
88e41cf9 5404 req->poll.events |= EPOLLONESHOT;
e27414be 5405 } else {
5082620f 5406 error = mangle_poll(mask);
e27414be 5407 }
b69de288
JA
5408 if (req->poll.events & EPOLLONESHOT)
5409 flags = 0;
a62682f9
HX
5410 if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
5411 req->poll.events |= EPOLLONESHOT;
88e41cf9 5412 flags = 0;
a62682f9 5413 }
7b289c38
HX
5414 if (flags & IORING_CQE_F_MORE)
5415 ctx->cq_extra++;
18bceab1 5416
88e41cf9 5417 return !(flags & IORING_CQE_F_MORE);
18bceab1
JA
5418}
5419
31efe48e
XW
5420static inline bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
5421 __must_hold(&req->ctx->completion_lock)
5422{
5423 bool done;
5424
5425 done = __io_poll_complete(req, mask);
5426 io_commit_cqring(req->ctx);
5427 return done;
5428}
5429
f237c30a 5430static void io_poll_task_func(struct io_kiocb *req, bool *locked)
18bceab1
JA
5431{
5432 struct io_ring_ctx *ctx = req->ctx;
dd221f46 5433 struct io_kiocb *nxt;
18bceab1
JA
5434
5435 if (io_poll_rewait(req, &req->poll)) {
79ebeaee 5436 spin_unlock(&ctx->completion_lock);
dd221f46 5437 } else {
f40b964a 5438 bool done;
18bceab1 5439
5b7aa38d
HX
5440 if (req->poll.done) {
5441 spin_unlock(&ctx->completion_lock);
5442 return;
5443 }
31efe48e 5444 done = __io_poll_complete(req, req->result);
88e41cf9 5445 if (done) {
a890d01e 5446 io_poll_remove_double(req);
88e41cf9 5447 hash_del(&req->hash_node);
bd99c71b 5448 req->poll.done = true;
f40b964a 5449 } else {
88e41cf9
JA
5450 req->result = 0;
5451 add_wait_queue(req->poll.head, &req->poll.wait);
5452 }
31efe48e 5453 io_commit_cqring(ctx);
79ebeaee 5454 spin_unlock(&ctx->completion_lock);
dd221f46 5455 io_cqring_ev_posted(ctx);
18bceab1 5456
88e41cf9
JA
5457 if (done) {
5458 nxt = io_put_req_find_next(req);
5459 if (nxt)
f237c30a 5460 io_req_task_submit(nxt, locked);
88e41cf9 5461 }
dd221f46 5462 }
18bceab1
JA
5463}
5464
5465static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
5466 int sync, void *key)
5467{
5468 struct io_kiocb *req = wait->private;
d4e7cd36 5469 struct io_poll_iocb *poll = io_poll_get_single(req);
18bceab1 5470 __poll_t mask = key_to_poll(key);
79ebeaee 5471 unsigned long flags;
18bceab1
JA
5472
5473 /* for instances that support it check for an event match first: */
5474 if (mask && !(mask & poll->events))
5475 return 0;
88e41cf9
JA
5476 if (!(poll->events & EPOLLONESHOT))
5477 return poll->wait.func(&poll->wait, mode, sync, key);
18bceab1 5478
8706e04e
JA
5479 list_del_init(&wait->entry);
5480
9ce85ef2 5481 if (poll->head) {
18bceab1
JA
5482 bool done;
5483
79ebeaee 5484 spin_lock_irqsave(&poll->head->lock, flags);
807abcb0 5485 done = list_empty(&poll->wait.entry);
18bceab1 5486 if (!done)
807abcb0 5487 list_del_init(&poll->wait.entry);
d4e7cd36
JA
5488 /* make sure double remove sees this as being gone */
5489 wait->private = NULL;
79ebeaee 5490 spin_unlock_irqrestore(&poll->head->lock, flags);
c8b5e260
JA
5491 if (!done) {
5492 /* use wait func handler, so it matches the rq type */
5493 poll->wait.func(&poll->wait, mode, sync, key);
5494 }
18bceab1 5495 }
de9b4cca 5496 req_ref_put(req);
18bceab1
JA
5497 return 1;
5498}
5499
5500static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5501 wait_queue_func_t wake_func)
5502{
5503 poll->head = NULL;
5504 poll->done = false;
5505 poll->canceled = false;
464dca61
JA
5506#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5507 /* mask in events that we always want/need */
5508 poll->events = events | IO_POLL_UNMASK;
18bceab1
JA
5509 INIT_LIST_HEAD(&poll->wait.entry);
5510 init_waitqueue_func_entry(&poll->wait, wake_func);
5511}
5512
5513static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
807abcb0
JA
5514 struct wait_queue_head *head,
5515 struct io_poll_iocb **poll_ptr)
18bceab1
JA
5516{
5517 struct io_kiocb *req = pt->req;
5518
5519 /*
68b11e8b
PB
5520 * The file being polled uses multiple waitqueues for poll handling
5521 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
5522 * if this happens.
18bceab1 5523 */
68b11e8b 5524 if (unlikely(pt->nr_entries)) {
58852d4d
PB
5525 struct io_poll_iocb *poll_one = poll;
5526
23a65db8
PB
5527 /* double add on the same waitqueue head, ignore */
5528 if (poll_one->head == head)
5529 return;
18bceab1 5530 /* already have a 2nd entry, fail a third attempt */
807abcb0 5531 if (*poll_ptr) {
23a65db8
PB
5532 if ((*poll_ptr)->head == head)
5533 return;
18bceab1
JA
5534 pt->error = -EINVAL;
5535 return;
5536 }
ea6a693d
JA
5537 /*
5538 * Can't handle multishot for double wait for now, turn it
5539 * into one-shot mode.
5540 */
7a274727
PB
5541 if (!(poll_one->events & EPOLLONESHOT))
5542 poll_one->events |= EPOLLONESHOT;
18bceab1
JA
5543 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5544 if (!poll) {
5545 pt->error = -ENOMEM;
5546 return;
5547 }
58852d4d 5548 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
de9b4cca 5549 req_ref_get(req);
18bceab1 5550 poll->wait.private = req;
807abcb0 5551 *poll_ptr = poll;
18bceab1
JA
5552 }
5553
68b11e8b 5554 pt->nr_entries++;
18bceab1 5555 poll->head = head;
a31eb4a2
JX
5556
5557 if (poll->events & EPOLLEXCLUSIVE)
5558 add_wait_queue_exclusive(head, &poll->wait);
5559 else
5560 add_wait_queue(head, &poll->wait);
18bceab1
JA
5561}
5562
5563static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5564 struct poll_table_struct *p)
5565{
5566 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
807abcb0 5567 struct async_poll *apoll = pt->req->apoll;
18bceab1 5568
807abcb0 5569 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
18bceab1
JA
5570}
5571
f237c30a 5572static void io_async_task_func(struct io_kiocb *req, bool *locked)
d7718a9d 5573{
d7718a9d
JA
5574 struct async_poll *apoll = req->apoll;
5575 struct io_ring_ctx *ctx = req->ctx;
5576
236daeae 5577 trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
d7718a9d 5578
74ce6ce4 5579 if (io_poll_rewait(req, &apoll->poll)) {
79ebeaee 5580 spin_unlock(&ctx->completion_lock);
74ce6ce4 5581 return;
d7718a9d
JA
5582 }
5583
0ea13b44 5584 hash_del(&req->hash_node);
d4e7cd36 5585 io_poll_remove_double(req);
bd99c71b 5586 apoll->poll.done = true;
79ebeaee 5587 spin_unlock(&ctx->completion_lock);
74ce6ce4 5588
0be0b0e3 5589 if (!READ_ONCE(apoll->poll.canceled))
f237c30a 5590 io_req_task_submit(req, locked);
0be0b0e3 5591 else
2593553a 5592 io_req_complete_failed(req, -ECANCELED);
d7718a9d
JA
5593}
5594
5595static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5596 void *key)
5597{
5598 struct io_kiocb *req = wait->private;
5599 struct io_poll_iocb *poll = &req->apoll->poll;
5600
5601 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5602 key_to_poll(key));
5603
5604 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5605}
5606
5607static void io_poll_req_insert(struct io_kiocb *req)
5608{
5609 struct io_ring_ctx *ctx = req->ctx;
5610 struct hlist_head *list;
5611
5612 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5613 hlist_add_head(&req->hash_node, list);
5614}
5615
5616static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5617 struct io_poll_iocb *poll,
5618 struct io_poll_table *ipt, __poll_t mask,
5619 wait_queue_func_t wake_func)
5620 __acquires(&ctx->completion_lock)
5621{
5622 struct io_ring_ctx *ctx = req->ctx;
5623 bool cancel = false;
5624
4d52f338 5625 INIT_HLIST_NODE(&req->hash_node);
18bceab1 5626 io_init_poll_iocb(poll, mask, wake_func);
b90cd197 5627 poll->file = req->file;
18bceab1 5628 poll->wait.private = req;
d7718a9d
JA
5629
5630 ipt->pt._key = mask;
5631 ipt->req = req;
68b11e8b
PB
5632 ipt->error = 0;
5633 ipt->nr_entries = 0;
d7718a9d 5634
d7718a9d 5635 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
68b11e8b
PB
5636 if (unlikely(!ipt->nr_entries) && !ipt->error)
5637 ipt->error = -EINVAL;
d7718a9d 5638
79ebeaee 5639 spin_lock(&ctx->completion_lock);
a890d01e 5640 if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
46fee9ab 5641 io_poll_remove_double(req);
d7718a9d 5642 if (likely(poll->head)) {
79ebeaee 5643 spin_lock_irq(&poll->head->lock);
d7718a9d
JA
5644 if (unlikely(list_empty(&poll->wait.entry))) {
5645 if (ipt->error)
5646 cancel = true;
5647 ipt->error = 0;
5648 mask = 0;
5649 }
88e41cf9 5650 if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
d7718a9d
JA
5651 list_del_init(&poll->wait.entry);
5652 else if (cancel)
5653 WRITE_ONCE(poll->canceled, true);
5654 else if (!poll->done) /* actually waiting for an event */
5655 io_poll_req_insert(req);
79ebeaee 5656 spin_unlock_irq(&poll->head->lock);
d7718a9d
JA
5657 }
5658
5659 return mask;
5660}
5661
59b735ae
OL
5662enum {
5663 IO_APOLL_OK,
5664 IO_APOLL_ABORTED,
5665 IO_APOLL_READY
5666};
5667
5668static int io_arm_poll_handler(struct io_kiocb *req)
d7718a9d
JA
5669{
5670 const struct io_op_def *def = &io_op_defs[req->opcode];
5671 struct io_ring_ctx *ctx = req->ctx;
5672 struct async_poll *apoll;
5673 struct io_poll_table ipt;
b2d9c3da 5674 __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI;
d7718a9d
JA
5675
5676 if (!req->file || !file_can_poll(req->file))
59b735ae 5677 return IO_APOLL_ABORTED;
24c74678 5678 if (req->flags & REQ_F_POLLED)
59b735ae 5679 return IO_APOLL_ABORTED;
b2d9c3da
PB
5680 if (!def->pollin && !def->pollout)
5681 return IO_APOLL_ABORTED;
5682
5683 if (def->pollin) {
b2d9c3da
PB
5684 mask |= POLLIN | POLLRDNORM;
5685
5686 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5687 if ((req->opcode == IORING_OP_RECVMSG) &&
5688 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5689 mask &= ~POLLIN;
5690 } else {
b2d9c3da
PB
5691 mask |= POLLOUT | POLLWRNORM;
5692 }
5693
d7718a9d
JA
5694 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5695 if (unlikely(!apoll))
59b735ae 5696 return IO_APOLL_ABORTED;
807abcb0 5697 apoll->double_poll = NULL;
d7718a9d 5698 req->apoll = apoll;
b2d9c3da 5699 req->flags |= REQ_F_POLLED;
d7718a9d 5700 ipt.pt._qproc = io_async_queue_proc;
48dcd38d 5701 io_req_set_refcount(req);
d7718a9d
JA
5702
5703 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5704 io_async_wake);
79ebeaee 5705 spin_unlock(&ctx->completion_lock);
41a5169c
HX
5706 if (ret || ipt.error)
5707 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
5708
236daeae
OL
5709 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5710 mask, apoll->poll.events);
59b735ae 5711 return IO_APOLL_OK;
d7718a9d
JA
5712}
5713
5714static bool __io_poll_remove_one(struct io_kiocb *req,
b2e720ac 5715 struct io_poll_iocb *poll, bool do_cancel)
e07785b0 5716 __must_hold(&req->ctx->completion_lock)
221c5eb2 5717{
b41e9852 5718 bool do_complete = false;
221c5eb2 5719
5082620f
JA
5720 if (!poll->head)
5721 return false;
79ebeaee 5722 spin_lock_irq(&poll->head->lock);
b2e720ac
JA
5723 if (do_cancel)
5724 WRITE_ONCE(poll->canceled, true);
392edb45
JA
5725 if (!list_empty(&poll->wait.entry)) {
5726 list_del_init(&poll->wait.entry);
b41e9852 5727 do_complete = true;
221c5eb2 5728 }
79ebeaee 5729 spin_unlock_irq(&poll->head->lock);
3bfa5bcb 5730 hash_del(&req->hash_node);
d7718a9d
JA
5731 return do_complete;
5732}
5733
5d709043 5734static bool io_poll_remove_one(struct io_kiocb *req)
e07785b0 5735 __must_hold(&req->ctx->completion_lock)
d7718a9d
JA
5736{
5737 bool do_complete;
5738
d4e7cd36 5739 io_poll_remove_double(req);
e31001a3 5740 do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
d4e7cd36 5741
b41e9852 5742 if (do_complete) {
d4d19c19 5743 io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
b41e9852 5744 io_commit_cqring(req->ctx);
93d2bcd2 5745 req_set_fail(req);
91c2f697 5746 io_put_req_deferred(req);
5d709043 5747 }
b41e9852 5748 return do_complete;
221c5eb2
JA
5749}
5750
76e1b642
JA
5751/*
5752 * Returns true if we found and killed one or more poll requests
5753 */
6b81928d 5754static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
3dd0c97a 5755 bool cancel_all)
221c5eb2 5756{
78076bb6 5757 struct hlist_node *tmp;
221c5eb2 5758 struct io_kiocb *req;
8e2e1faf 5759 int posted = 0, i;
221c5eb2 5760
79ebeaee 5761 spin_lock(&ctx->completion_lock);
78076bb6
JA
5762 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5763 struct hlist_head *list;
5764
5765 list = &ctx->cancel_hash[i];
f3606e3a 5766 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
f0baed8e 5767 if (io_match_task_safe(req, tsk, cancel_all))
f3606e3a
JA
5768 posted += io_poll_remove_one(req);
5769 }
221c5eb2 5770 }
79ebeaee 5771 spin_unlock(&ctx->completion_lock);
b41e9852 5772
8e2e1faf
JA
5773 if (posted)
5774 io_cqring_ev_posted(ctx);
76e1b642
JA
5775
5776 return posted != 0;
221c5eb2
JA
5777}
5778
9ba5fac8
PB
5779static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5780 bool poll_only)
e07785b0 5781 __must_hold(&ctx->completion_lock)
47f46768 5782{
78076bb6 5783 struct hlist_head *list;
47f46768
JA
5784 struct io_kiocb *req;
5785
78076bb6
JA
5786 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5787 hlist_for_each_entry(req, list, hash_node) {
b41e9852
JA
5788 if (sqe_addr != req->user_data)
5789 continue;
9ba5fac8
PB
5790 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5791 continue;
b2cb805f 5792 return req;
47f46768 5793 }
b2cb805f
JA
5794 return NULL;
5795}
5796
9ba5fac8
PB
5797static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5798 bool poll_only)
e07785b0 5799 __must_hold(&ctx->completion_lock)
b2cb805f
JA
5800{
5801 struct io_kiocb *req;
5802
9ba5fac8 5803 req = io_poll_find(ctx, sqe_addr, poll_only);
b2cb805f
JA
5804 if (!req)
5805 return -ENOENT;
5806 if (io_poll_remove_one(req))
5807 return 0;
5808
5809 return -EALREADY;
47f46768
JA
5810}
5811
9096af3e
PB
5812static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5813 unsigned int flags)
5814{
5815 u32 events;
47f46768 5816
9096af3e
PB
5817 events = READ_ONCE(sqe->poll32_events);
5818#ifdef __BIG_ENDIAN
5819 events = swahw32(events);
5820#endif
5821 if (!(flags & IORING_POLL_ADD_MULTI))
5822 events |= EPOLLONESHOT;
5823 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
47f46768
JA
5824}
5825
c5de0036 5826static int io_poll_update_prep(struct io_kiocb *req,
3529d8c2 5827 const struct io_uring_sqe *sqe)
0969e783 5828{
c5de0036
PB
5829 struct io_poll_update *upd = &req->poll_update;
5830 u32 flags;
5831
0969e783
JA
5832 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5833 return -EINVAL;
26578cda 5834 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
c5de0036
PB
5835 return -EINVAL;
5836 flags = READ_ONCE(sqe->len);
5837 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5838 IORING_POLL_ADD_MULTI))
5839 return -EINVAL;
5840 /* meaningless without update */
5841 if (flags == IORING_POLL_ADD_MULTI)
0969e783
JA
5842 return -EINVAL;
5843
c5de0036
PB
5844 upd->old_user_data = READ_ONCE(sqe->addr);
5845 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5846 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
221c5eb2 5847
c5de0036
PB
5848 upd->new_user_data = READ_ONCE(sqe->off);
5849 if (!upd->update_user_data && upd->new_user_data)
5850 return -EINVAL;
5851 if (upd->update_events)
5852 upd->events = io_poll_parse_events(sqe, flags);
5853 else if (sqe->poll32_events)
5854 return -EINVAL;
221c5eb2 5855
221c5eb2
JA
5856 return 0;
5857}
5858
221c5eb2
JA
5859static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5860 void *key)
5861{
c2f2eb7d
JA
5862 struct io_kiocb *req = wait->private;
5863 struct io_poll_iocb *poll = &req->poll;
221c5eb2 5864
d7718a9d 5865 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
221c5eb2
JA
5866}
5867
221c5eb2
JA
5868static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5869 struct poll_table_struct *p)
5870{
5871 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5872
e8c2bc1f 5873 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
eac406c6
JA
5874}
5875
3529d8c2 5876static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
5877{
5878 struct io_poll_iocb *poll = &req->poll;
c5de0036 5879 u32 flags;
221c5eb2
JA
5880
5881 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5882 return -EINVAL;
c5de0036 5883 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
88e41cf9
JA
5884 return -EINVAL;
5885 flags = READ_ONCE(sqe->len);
c5de0036 5886 if (flags & ~IORING_POLL_ADD_MULTI)
221c5eb2
JA
5887 return -EINVAL;
5888
48dcd38d 5889 io_req_set_refcount(req);
c5de0036 5890 poll->events = io_poll_parse_events(sqe, flags);
0969e783
JA
5891 return 0;
5892}
5893
61e98203 5894static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
0969e783
JA
5895{
5896 struct io_poll_iocb *poll = &req->poll;
5897 struct io_ring_ctx *ctx = req->ctx;
5898 struct io_poll_table ipt;
0969e783 5899 __poll_t mask;
5b7aa38d 5900 bool done;
0969e783 5901
d7718a9d 5902 ipt.pt._qproc = io_poll_queue_proc;
36703247 5903
d7718a9d
JA
5904 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5905 io_poll_wake);
221c5eb2 5906
8c838788 5907 if (mask) { /* no async, we'd stolen it */
221c5eb2 5908 ipt.error = 0;
5b7aa38d 5909 done = io_poll_complete(req, mask);
221c5eb2 5910 }
79ebeaee 5911 spin_unlock(&ctx->completion_lock);
221c5eb2 5912
8c838788
JA
5913 if (mask) {
5914 io_cqring_ev_posted(ctx);
5b7aa38d 5915 if (done)
88e41cf9 5916 io_put_req(req);
221c5eb2 5917 }
8c838788 5918 return ipt.error;
221c5eb2
JA
5919}
5920
c5de0036 5921static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
b69de288
JA
5922{
5923 struct io_ring_ctx *ctx = req->ctx;
5924 struct io_kiocb *preq;
cb3b200e 5925 bool completing;
b69de288
JA
5926 int ret;
5927
79ebeaee 5928 spin_lock(&ctx->completion_lock);
9ba5fac8 5929 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
b69de288
JA
5930 if (!preq) {
5931 ret = -ENOENT;
5932 goto err;
b69de288 5933 }
cb3b200e 5934
c5de0036
PB
5935 if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
5936 completing = true;
5937 ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
5938 goto err;
5939 }
5940
cb3b200e
JA
5941 /*
5942 * Don't allow racy completion with singleshot, as we cannot safely
5943 * update those. For multishot, if we're racing with completion, just
5944 * let completion re-add it.
5945 */
4470ec55 5946 io_poll_remove_double(preq);
cb3b200e
JA
5947 completing = !__io_poll_remove_one(preq, &preq->poll, false);
5948 if (completing && (preq->poll.events & EPOLLONESHOT)) {
5949 ret = -EALREADY;
5950 goto err;
b69de288
JA
5951 }
5952 /* we now have a detached poll request. reissue. */
5953 ret = 0;
5954err:
b69de288 5955 if (ret < 0) {
79ebeaee 5956 spin_unlock(&ctx->completion_lock);
93d2bcd2 5957 req_set_fail(req);
b69de288
JA
5958 io_req_complete(req, ret);
5959 return 0;
5960 }
5961 /* only mask one event flags, keep behavior flags */
9d805892 5962 if (req->poll_update.update_events) {
b69de288 5963 preq->poll.events &= ~0xffff;
9d805892 5964 preq->poll.events |= req->poll_update.events & 0xffff;
b69de288
JA
5965 preq->poll.events |= IO_POLL_UNMASK;
5966 }
9d805892
PB
5967 if (req->poll_update.update_user_data)
5968 preq->user_data = req->poll_update.new_user_data;
79ebeaee 5969 spin_unlock(&ctx->completion_lock);
cb3b200e 5970
b69de288
JA
5971 /* complete update request, we're done with it */
5972 io_req_complete(req, ret);
5973
cb3b200e 5974 if (!completing) {
c5de0036 5975 ret = io_poll_add(preq, issue_flags);
cb3b200e 5976 if (ret < 0) {
93d2bcd2 5977 req_set_fail(preq);
cb3b200e
JA
5978 io_req_complete(preq, ret);
5979 }
b69de288
JA
5980 }
5981 return 0;
5982}
5983
f237c30a 5984static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
89850fce 5985{
89850fce 5986 req_set_fail(req);
505657bc 5987 io_req_complete_post(req, -ETIME, 0);
89850fce
JA
5988}
5989
5262f567
JA
5990static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5991{
ad8a48ac
JA
5992 struct io_timeout_data *data = container_of(timer,
5993 struct io_timeout_data, timer);
5994 struct io_kiocb *req = data->req;
5995 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
5996 unsigned long flags;
5997
89850fce 5998 spin_lock_irqsave(&ctx->timeout_lock, flags);
a71976f3 5999 list_del_init(&req->timeout.list);
01cec8c1
PB
6000 atomic_set(&req->ctx->cq_timeouts,
6001 atomic_read(&req->ctx->cq_timeouts) + 1);
89850fce 6002 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
01cec8c1 6003
89850fce
JA
6004 req->io_task_work.func = io_req_task_timeout;
6005 io_req_task_work_add(req);
5262f567
JA
6006 return HRTIMER_NORESTART;
6007}
6008
fbd15848
PB
6009static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
6010 __u64 user_data)
89850fce 6011 __must_hold(&ctx->timeout_lock)
f254ac04 6012{
fbd15848 6013 struct io_timeout_data *io;
47f46768 6014 struct io_kiocb *req;
fd9c7bc5 6015 bool found = false;
f254ac04 6016
135fcde8 6017 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
fd9c7bc5
PB
6018 found = user_data == req->user_data;
6019 if (found)
47f46768 6020 break;
47f46768 6021 }
fd9c7bc5
PB
6022 if (!found)
6023 return ERR_PTR(-ENOENT);
fbd15848
PB
6024
6025 io = req->async_data;
fd9c7bc5 6026 if (hrtimer_try_to_cancel(&io->timer) == -1)
fbd15848 6027 return ERR_PTR(-EALREADY);
a71976f3 6028 list_del_init(&req->timeout.list);
fbd15848
PB
6029 return req;
6030}
47f46768 6031
fbd15848 6032static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
ec3c3d0f 6033 __must_hold(&ctx->completion_lock)
89850fce 6034 __must_hold(&ctx->timeout_lock)
fbd15848
PB
6035{
6036 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6037
6038 if (IS_ERR(req))
6039 return PTR_ERR(req);
f254ac04 6040
93d2bcd2 6041 req_set_fail(req);
d4d19c19 6042 io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
91c2f697 6043 io_put_req_deferred(req);
f254ac04
JA
6044 return 0;
6045}
6046
50c1df2b
JA
6047static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
6048{
6049 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
6050 case IORING_TIMEOUT_BOOTTIME:
6051 return CLOCK_BOOTTIME;
6052 case IORING_TIMEOUT_REALTIME:
6053 return CLOCK_REALTIME;
6054 default:
6055 /* can't happen, vetted at prep time */
6056 WARN_ON_ONCE(1);
6057 fallthrough;
6058 case 0:
6059 return CLOCK_MONOTONIC;
6060 }
6061}
6062
f1042b6c
PB
6063static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
6064 struct timespec64 *ts, enum hrtimer_mode mode)
6065 __must_hold(&ctx->timeout_lock)
6066{
6067 struct io_timeout_data *io;
6068 struct io_kiocb *req;
6069 bool found = false;
6070
6071 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
6072 found = user_data == req->user_data;
6073 if (found)
6074 break;
6075 }
6076 if (!found)
6077 return -ENOENT;
6078
6079 io = req->async_data;
6080 if (hrtimer_try_to_cancel(&io->timer) == -1)
6081 return -EALREADY;
6082 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
6083 io->timer.function = io_link_timeout_fn;
6084 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
6085 return 0;
6086}
6087
9c8e11b3
PB
6088static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
6089 struct timespec64 *ts, enum hrtimer_mode mode)
89850fce 6090 __must_hold(&ctx->timeout_lock)
47f46768 6091{
9c8e11b3
PB
6092 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6093 struct io_timeout_data *data;
47f46768 6094
9c8e11b3
PB
6095 if (IS_ERR(req))
6096 return PTR_ERR(req);
47f46768 6097
9c8e11b3
PB
6098 req->timeout.off = 0; /* noseq */
6099 data = req->async_data;
6100 list_add_tail(&req->timeout.list, &ctx->timeout_list);
50c1df2b 6101 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
9c8e11b3
PB
6102 data->timer.function = io_timeout_fn;
6103 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
6104 return 0;
47f46768
JA
6105}
6106
3529d8c2
JA
6107static int io_timeout_remove_prep(struct io_kiocb *req,
6108 const struct io_uring_sqe *sqe)
b29472ee 6109{
9c8e11b3
PB
6110 struct io_timeout_rem *tr = &req->timeout_rem;
6111
b29472ee
JA
6112 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6113 return -EINVAL;
61710e43
DA
6114 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6115 return -EINVAL;
26578cda 6116 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
b29472ee
JA
6117 return -EINVAL;
6118
f1042b6c 6119 tr->ltimeout = false;
9c8e11b3
PB
6120 tr->addr = READ_ONCE(sqe->addr);
6121 tr->flags = READ_ONCE(sqe->timeout_flags);
f1042b6c
PB
6122 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
6123 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
6124 return -EINVAL;
6125 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
6126 tr->ltimeout = true;
6127 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
9c8e11b3
PB
6128 return -EINVAL;
6129 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
6130 return -EFAULT;
6131 } else if (tr->flags) {
6132 /* timeout removal doesn't support flags */
b29472ee 6133 return -EINVAL;
9c8e11b3 6134 }
b29472ee 6135
b29472ee
JA
6136 return 0;
6137}
6138
8662daec
PB
6139static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
6140{
6141 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
6142 : HRTIMER_MODE_REL;
6143}
6144
11365043
JA
6145/*
6146 * Remove or update an existing timeout command
6147 */
61e98203 6148static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
11365043 6149{
9c8e11b3 6150 struct io_timeout_rem *tr = &req->timeout_rem;
11365043 6151 struct io_ring_ctx *ctx = req->ctx;
47f46768 6152 int ret;
11365043 6153
ec3c3d0f
PB
6154 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
6155 spin_lock(&ctx->completion_lock);
6156 spin_lock_irq(&ctx->timeout_lock);
9c8e11b3 6157 ret = io_timeout_cancel(ctx, tr->addr);
ec3c3d0f
PB
6158 spin_unlock_irq(&ctx->timeout_lock);
6159 spin_unlock(&ctx->completion_lock);
6160 } else {
f1042b6c
PB
6161 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
6162
ec3c3d0f 6163 spin_lock_irq(&ctx->timeout_lock);
f1042b6c
PB
6164 if (tr->ltimeout)
6165 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
6166 else
6167 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
ec3c3d0f
PB
6168 spin_unlock_irq(&ctx->timeout_lock);
6169 }
11365043 6170
4e88d6e7 6171 if (ret < 0)
93d2bcd2 6172 req_set_fail(req);
505657bc 6173 io_req_complete_post(req, ret, 0);
11365043 6174 return 0;
5262f567
JA
6175}
6176
3529d8c2 6177static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 6178 bool is_timeout_link)
5262f567 6179{
ad8a48ac 6180 struct io_timeout_data *data;
a41525ab 6181 unsigned flags;
56080b02 6182 u32 off = READ_ONCE(sqe->off);
5262f567 6183
ad8a48ac 6184 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 6185 return -EINVAL;
26578cda
PB
6186 if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
6187 sqe->splice_fd_in)
a41525ab 6188 return -EINVAL;
56080b02 6189 if (off && is_timeout_link)
2d28390a 6190 return -EINVAL;
a41525ab 6191 flags = READ_ONCE(sqe->timeout_flags);
50c1df2b
JA
6192 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK))
6193 return -EINVAL;
6194 /* more than one clock specified is invalid, obviously */
6195 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
5262f567 6196 return -EINVAL;
bdf20073 6197
ef9dd637 6198 INIT_LIST_HEAD(&req->timeout.list);
bfe68a22 6199 req->timeout.off = off;
f18ee4cf
PB
6200 if (unlikely(off && !req->ctx->off_timeout_used))
6201 req->ctx->off_timeout_used = true;
26a61679 6202
e8c2bc1f 6203 if (!req->async_data && io_alloc_async_data(req))
26a61679
JA
6204 return -ENOMEM;
6205
e8c2bc1f 6206 data = req->async_data;
ad8a48ac 6207 data->req = req;
50c1df2b 6208 data->flags = flags;
ad8a48ac
JA
6209
6210 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
6211 return -EFAULT;
6212
7abdd4c7 6213 INIT_LIST_HEAD(&req->timeout.list);
8662daec 6214 data->mode = io_translate_timeout_mode(flags);
50c1df2b 6215 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
b97e736a
PB
6216
6217 if (is_timeout_link) {
6218 struct io_submit_link *link = &req->ctx->submit_state.link;
6219
6220 if (!link->head)
6221 return -EINVAL;
6222 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
6223 return -EINVAL;
4d13d1a4
PB
6224 req->timeout.head = link->last;
6225 link->last->flags |= REQ_F_ARM_LTIMEOUT;
b97e736a 6226 }
ad8a48ac
JA
6227 return 0;
6228}
6229
61e98203 6230static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
ad8a48ac 6231{
ad8a48ac 6232 struct io_ring_ctx *ctx = req->ctx;
e8c2bc1f 6233 struct io_timeout_data *data = req->async_data;
ad8a48ac 6234 struct list_head *entry;
bfe68a22 6235 u32 tail, off = req->timeout.off;
ad8a48ac 6236
89850fce 6237 spin_lock_irq(&ctx->timeout_lock);
93bd25bb 6238
5262f567
JA
6239 /*
6240 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
6241 * timeout event to be satisfied. If it isn't set, then this is
6242 * a pure timeout request, sequence isn't used.
5262f567 6243 */
8eb7e2d0 6244 if (io_is_timeout_noseq(req)) {
93bd25bb
JA
6245 entry = ctx->timeout_list.prev;
6246 goto add;
6247 }
5262f567 6248
bfe68a22
PB
6249 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
6250 req->timeout.target_seq = tail + off;
5262f567 6251
f010505b
MDG
6252 /* Update the last seq here in case io_flush_timeouts() hasn't.
6253 * This is safe because ->completion_lock is held, and submissions
6254 * and completions are never mixed in the same ->completion_lock section.
6255 */
6256 ctx->cq_last_tm_flush = tail;
6257
5262f567
JA
6258 /*
6259 * Insertion sort, ensuring the first entry in the list is always
6260 * the one we need first.
6261 */
5262f567 6262 list_for_each_prev(entry, &ctx->timeout_list) {
135fcde8
PB
6263 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
6264 timeout.list);
5262f567 6265
8eb7e2d0 6266 if (io_is_timeout_noseq(nxt))
93bd25bb 6267 continue;
bfe68a22
PB
6268 /* nxt.seq is behind @tail, otherwise would've been completed */
6269 if (off >= nxt->timeout.target_seq - tail)
5262f567
JA
6270 break;
6271 }
93bd25bb 6272add:
135fcde8 6273 list_add(&req->timeout.list, entry);
ad8a48ac
JA
6274 data->timer.function = io_timeout_fn;
6275 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
89850fce 6276 spin_unlock_irq(&ctx->timeout_lock);
5262f567
JA
6277 return 0;
6278}
5262f567 6279
f458dd84
PB
6280struct io_cancel_data {
6281 struct io_ring_ctx *ctx;
6282 u64 user_data;
6283};
6284
62755e35
JA
6285static bool io_cancel_cb(struct io_wq_work *work, void *data)
6286{
6287 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
f458dd84 6288 struct io_cancel_data *cd = data;
62755e35 6289
f458dd84 6290 return req->ctx == cd->ctx && req->user_data == cd->user_data;
62755e35
JA
6291}
6292
f458dd84
PB
6293static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
6294 struct io_ring_ctx *ctx)
62755e35 6295{
f458dd84 6296 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
62755e35 6297 enum io_wq_cancel cancel_ret;
62755e35
JA
6298 int ret = 0;
6299
f458dd84 6300 if (!tctx || !tctx->io_wq)
5aa75ed5
JA
6301 return -ENOENT;
6302
f458dd84 6303 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
62755e35
JA
6304 switch (cancel_ret) {
6305 case IO_WQ_CANCEL_OK:
6306 ret = 0;
6307 break;
6308 case IO_WQ_CANCEL_RUNNING:
6309 ret = -EALREADY;
6310 break;
6311 case IO_WQ_CANCEL_NOTFOUND:
6312 ret = -ENOENT;
6313 break;
6314 }
6315
e977d6d3
JA
6316 return ret;
6317}
6318
8cb01fac 6319static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
47f46768 6320{
8cb01fac 6321 struct io_ring_ctx *ctx = req->ctx;
47f46768
JA
6322 int ret;
6323
dadebc35 6324 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
8cb01fac 6325
f458dd84 6326 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
df9727af 6327 if (ret != -ENOENT)
8cb01fac 6328 return ret;
505657bc
PB
6329
6330 spin_lock(&ctx->completion_lock);
79ebeaee 6331 spin_lock_irq(&ctx->timeout_lock);
47f46768 6332 ret = io_timeout_cancel(ctx, sqe_addr);
79ebeaee 6333 spin_unlock_irq(&ctx->timeout_lock);
47f46768 6334 if (ret != -ENOENT)
505657bc
PB
6335 goto out;
6336 ret = io_poll_cancel(ctx, sqe_addr, false);
6337out:
6338 spin_unlock(&ctx->completion_lock);
6339 return ret;
47f46768
JA
6340}
6341
3529d8c2
JA
6342static int io_async_cancel_prep(struct io_kiocb *req,
6343 const struct io_uring_sqe *sqe)
e977d6d3 6344{
fbf23849 6345 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3 6346 return -EINVAL;
61710e43
DA
6347 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6348 return -EINVAL;
26578cda
PB
6349 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
6350 sqe->splice_fd_in)
e977d6d3
JA
6351 return -EINVAL;
6352
fbf23849
JA
6353 req->cancel.addr = READ_ONCE(sqe->addr);
6354 return 0;
6355}
6356
61e98203 6357static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
fbf23849
JA
6358{
6359 struct io_ring_ctx *ctx = req->ctx;
58f99373
PB
6360 u64 sqe_addr = req->cancel.addr;
6361 struct io_tctx_node *node;
6362 int ret;
6363
8cb01fac 6364 ret = io_try_cancel_userdata(req, sqe_addr);
58f99373
PB
6365 if (ret != -ENOENT)
6366 goto done;
58f99373
PB
6367
6368 /* slow path, try all io-wq's */
6369 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
6370 ret = -ENOENT;
6371 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
6372 struct io_uring_task *tctx = node->task->io_uring;
fbf23849 6373
58f99373
PB
6374 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
6375 if (ret != -ENOENT)
6376 break;
6377 }
6378 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
58f99373 6379done:
58f99373 6380 if (ret < 0)
93d2bcd2 6381 req_set_fail(req);
505657bc 6382 io_req_complete_post(req, ret, 0);
5262f567
JA
6383 return 0;
6384}
6385
269bbe5f 6386static int io_rsrc_update_prep(struct io_kiocb *req,
05f3fb3c
JA
6387 const struct io_uring_sqe *sqe)
6388{
61710e43
DA
6389 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6390 return -EINVAL;
26578cda 6391 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
05f3fb3c
JA
6392 return -EINVAL;
6393
269bbe5f
BM
6394 req->rsrc_update.offset = READ_ONCE(sqe->off);
6395 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
6396 if (!req->rsrc_update.nr_args)
05f3fb3c 6397 return -EINVAL;
269bbe5f 6398 req->rsrc_update.arg = READ_ONCE(sqe->addr);
05f3fb3c
JA
6399 return 0;
6400}
6401
889fca73 6402static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
fbf23849
JA
6403{
6404 struct io_ring_ctx *ctx = req->ctx;
c3bdad02 6405 struct io_uring_rsrc_update2 up;
05f3fb3c 6406 int ret;
fbf23849 6407
269bbe5f
BM
6408 up.offset = req->rsrc_update.offset;
6409 up.data = req->rsrc_update.arg;
c3bdad02
PB
6410 up.nr = 0;
6411 up.tags = 0;
615cee49 6412 up.resv = 0;
c9747fa5 6413 up.resv2 = 0;
05f3fb3c 6414
cdb31c29 6415 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
fdecb662 6416 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
98f0b3b4 6417 &up, req->rsrc_update.nr_args);
cdb31c29 6418 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
05f3fb3c
JA
6419
6420 if (ret < 0)
93d2bcd2 6421 req_set_fail(req);
889fca73 6422 __io_req_complete(req, issue_flags, ret, 0);
5262f567
JA
6423 return 0;
6424}
6425
bfe76559 6426static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 6427{
d625c6ee 6428 switch (req->opcode) {
e781573e 6429 case IORING_OP_NOP:
bfe76559 6430 return 0;
f67676d1
JA
6431 case IORING_OP_READV:
6432 case IORING_OP_READ_FIXED:
3a6820f2 6433 case IORING_OP_READ:
bfe76559 6434 return io_read_prep(req, sqe);
f67676d1
JA
6435 case IORING_OP_WRITEV:
6436 case IORING_OP_WRITE_FIXED:
3a6820f2 6437 case IORING_OP_WRITE:
bfe76559 6438 return io_write_prep(req, sqe);
0969e783 6439 case IORING_OP_POLL_ADD:
bfe76559 6440 return io_poll_add_prep(req, sqe);
0969e783 6441 case IORING_OP_POLL_REMOVE:
c5de0036 6442 return io_poll_update_prep(req, sqe);
8ed8d3c3 6443 case IORING_OP_FSYNC:
1155c76a 6444 return io_fsync_prep(req, sqe);
8ed8d3c3 6445 case IORING_OP_SYNC_FILE_RANGE:
1155c76a 6446 return io_sfr_prep(req, sqe);
03b1230c 6447 case IORING_OP_SENDMSG:
fddaface 6448 case IORING_OP_SEND:
bfe76559 6449 return io_sendmsg_prep(req, sqe);
03b1230c 6450 case IORING_OP_RECVMSG:
fddaface 6451 case IORING_OP_RECV:
bfe76559 6452 return io_recvmsg_prep(req, sqe);
f499a021 6453 case IORING_OP_CONNECT:
bfe76559 6454 return io_connect_prep(req, sqe);
2d28390a 6455 case IORING_OP_TIMEOUT:
bfe76559 6456 return io_timeout_prep(req, sqe, false);
b29472ee 6457 case IORING_OP_TIMEOUT_REMOVE:
bfe76559 6458 return io_timeout_remove_prep(req, sqe);
fbf23849 6459 case IORING_OP_ASYNC_CANCEL:
bfe76559 6460 return io_async_cancel_prep(req, sqe);
2d28390a 6461 case IORING_OP_LINK_TIMEOUT:
bfe76559 6462 return io_timeout_prep(req, sqe, true);
8ed8d3c3 6463 case IORING_OP_ACCEPT:
bfe76559 6464 return io_accept_prep(req, sqe);
d63d1b5e 6465 case IORING_OP_FALLOCATE:
bfe76559 6466 return io_fallocate_prep(req, sqe);
15b71abe 6467 case IORING_OP_OPENAT:
bfe76559 6468 return io_openat_prep(req, sqe);
b5dba59e 6469 case IORING_OP_CLOSE:
bfe76559 6470 return io_close_prep(req, sqe);
05f3fb3c 6471 case IORING_OP_FILES_UPDATE:
269bbe5f 6472 return io_rsrc_update_prep(req, sqe);
eddc7ef5 6473 case IORING_OP_STATX:
bfe76559 6474 return io_statx_prep(req, sqe);
4840e418 6475 case IORING_OP_FADVISE:
bfe76559 6476 return io_fadvise_prep(req, sqe);
c1ca757b 6477 case IORING_OP_MADVISE:
bfe76559 6478 return io_madvise_prep(req, sqe);
cebdb986 6479 case IORING_OP_OPENAT2:
bfe76559 6480 return io_openat2_prep(req, sqe);
3e4827b0 6481 case IORING_OP_EPOLL_CTL:
bfe76559 6482 return io_epoll_ctl_prep(req, sqe);
7d67af2c 6483 case IORING_OP_SPLICE:
bfe76559 6484 return io_splice_prep(req, sqe);
ddf0322d 6485 case IORING_OP_PROVIDE_BUFFERS:
bfe76559 6486 return io_provide_buffers_prep(req, sqe);
067524e9 6487 case IORING_OP_REMOVE_BUFFERS:
bfe76559 6488 return io_remove_buffers_prep(req, sqe);
f2a8d5c7 6489 case IORING_OP_TEE:
bfe76559 6490 return io_tee_prep(req, sqe);
36f4fa68
JA
6491 case IORING_OP_SHUTDOWN:
6492 return io_shutdown_prep(req, sqe);
80a261fd
JA
6493 case IORING_OP_RENAMEAT:
6494 return io_renameat_prep(req, sqe);
14a1143b
JA
6495 case IORING_OP_UNLINKAT:
6496 return io_unlinkat_prep(req, sqe);
e34a02dc
DK
6497 case IORING_OP_MKDIRAT:
6498 return io_mkdirat_prep(req, sqe);
7a8721f8
DK
6499 case IORING_OP_SYMLINKAT:
6500 return io_symlinkat_prep(req, sqe);
cf30da90
DK
6501 case IORING_OP_LINKAT:
6502 return io_linkat_prep(req, sqe);
f67676d1
JA
6503 }
6504
bfe76559
PB
6505 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
6506 req->opcode);
bd54b6fe 6507 return -EINVAL;
bfe76559
PB
6508}
6509
93642ef8 6510static int io_req_prep_async(struct io_kiocb *req)
bfe76559 6511{
b7e298d2
PB
6512 if (!io_op_defs[req->opcode].needs_async_setup)
6513 return 0;
6514 if (WARN_ON_ONCE(req->async_data))
6515 return -EFAULT;
6516 if (io_alloc_async_data(req))
6517 return -EAGAIN;
6518
93642ef8
PB
6519 switch (req->opcode) {
6520 case IORING_OP_READV:
93642ef8
PB
6521 return io_rw_prep_async(req, READ);
6522 case IORING_OP_WRITEV:
93642ef8
PB
6523 return io_rw_prep_async(req, WRITE);
6524 case IORING_OP_SENDMSG:
93642ef8
PB
6525 return io_sendmsg_prep_async(req);
6526 case IORING_OP_RECVMSG:
93642ef8
PB
6527 return io_recvmsg_prep_async(req);
6528 case IORING_OP_CONNECT:
6529 return io_connect_prep_async(req);
6530 }
b7e298d2
PB
6531 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
6532 req->opcode);
6533 return -EFAULT;
f67676d1
JA
6534}
6535
9cf7c104
PB
6536static u32 io_get_sequence(struct io_kiocb *req)
6537{
a3dbdf54 6538 u32 seq = req->ctx->cached_sq_head;
9cf7c104 6539
a3dbdf54
PB
6540 /* need original cached_sq_head, but it was increased for each req */
6541 io_for_each_link(req, req)
6542 seq--;
6543 return seq;
9cf7c104
PB
6544}
6545
76cc33d7 6546static bool io_drain_req(struct io_kiocb *req)
de0617e4 6547{
3c19966d 6548 struct io_kiocb *pos;
a197f664 6549 struct io_ring_ctx *ctx = req->ctx;
27dc8338 6550 struct io_defer_entry *de;
f67676d1 6551 int ret;
9cf7c104 6552 u32 seq;
de0617e4 6553
b8ce1b9d
PB
6554 if (req->flags & REQ_F_FAIL) {
6555 io_req_complete_fail_submit(req);
6556 return true;
6557 }
6558
3c19966d
PB
6559 /*
6560 * If we need to drain a request in the middle of a link, drain the
6561 * head request and the next request/link after the current link.
6562 * Considering sequential execution of links, IOSQE_IO_DRAIN will be
6563 * maintained for every request of our link.
6564 */
6565 if (ctx->drain_next) {
6566 req->flags |= REQ_F_IO_DRAIN;
6567 ctx->drain_next = false;
6568 }
6569 /* not interested in head, start from the first linked */
6570 io_for_each_link(pos, req->link) {
6571 if (pos->flags & REQ_F_IO_DRAIN) {
6572 ctx->drain_next = true;
6573 req->flags |= REQ_F_IO_DRAIN;
6574 break;
6575 }
6576 }
6577
9d858b21 6578 /* Still need defer if there is pending req in defer list. */
9632410b 6579 spin_lock(&ctx->completion_lock);
9cf7c104 6580 if (likely(list_empty_careful(&ctx->defer_list) &&
10c66904 6581 !(req->flags & REQ_F_IO_DRAIN))) {
9632410b 6582 spin_unlock(&ctx->completion_lock);
10c66904 6583 ctx->drain_active = false;
76cc33d7 6584 return false;
10c66904 6585 }
9632410b 6586 spin_unlock(&ctx->completion_lock);
9cf7c104
PB
6587
6588 seq = io_get_sequence(req);
6589 /* Still a chance to pass the sequence check */
6590 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
76cc33d7 6591 return false;
de0617e4 6592
b7e298d2 6593 ret = io_req_prep_async(req);
be7053b7 6594 if (ret)
1b48773f 6595 goto fail;
cbdcb435 6596 io_prep_async_link(req);
27dc8338 6597 de = kmalloc(sizeof(*de), GFP_KERNEL);
76cc33d7 6598 if (!de) {
1b48773f
PB
6599 ret = -ENOMEM;
6600fail:
6601 io_req_complete_failed(req, ret);
76cc33d7
PB
6602 return true;
6603 }
2d28390a 6604
79ebeaee 6605 spin_lock(&ctx->completion_lock);
9cf7c104 6606 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
79ebeaee 6607 spin_unlock(&ctx->completion_lock);
27dc8338 6608 kfree(de);
f237c30a 6609 io_queue_async_work(req, NULL);
76cc33d7 6610 return true;
de0617e4
JA
6611 }
6612
915967f6 6613 trace_io_uring_defer(ctx, req, req->user_data);
27dc8338 6614 de->req = req;
9cf7c104 6615 de->seq = seq;
27dc8338 6616 list_add_tail(&de->list, &ctx->defer_list);
79ebeaee 6617 spin_unlock(&ctx->completion_lock);
76cc33d7 6618 return true;
de0617e4
JA
6619}
6620
68fb8979 6621static void io_clean_op(struct io_kiocb *req)
99bc4c38 6622{
0e1b6fe3
PB
6623 if (req->flags & REQ_F_BUFFER_SELECTED) {
6624 switch (req->opcode) {
6625 case IORING_OP_READV:
6626 case IORING_OP_READ_FIXED:
6627 case IORING_OP_READ:
bcda7baa 6628 kfree((void *)(unsigned long)req->rw.addr);
0e1b6fe3
PB
6629 break;
6630 case IORING_OP_RECVMSG:
6631 case IORING_OP_RECV:
bcda7baa 6632 kfree(req->sr_msg.kbuf);
0e1b6fe3
PB
6633 break;
6634 }
99bc4c38
PB
6635 }
6636
0e1b6fe3
PB
6637 if (req->flags & REQ_F_NEED_CLEANUP) {
6638 switch (req->opcode) {
6639 case IORING_OP_READV:
6640 case IORING_OP_READ_FIXED:
6641 case IORING_OP_READ:
6642 case IORING_OP_WRITEV:
6643 case IORING_OP_WRITE_FIXED:
e8c2bc1f
JA
6644 case IORING_OP_WRITE: {
6645 struct io_async_rw *io = req->async_data;
1dacb4df
PB
6646
6647 kfree(io->free_iovec);
0e1b6fe3 6648 break;
e8c2bc1f 6649 }
0e1b6fe3 6650 case IORING_OP_RECVMSG:
e8c2bc1f
JA
6651 case IORING_OP_SENDMSG: {
6652 struct io_async_msghdr *io = req->async_data;
257e84a5
PB
6653
6654 kfree(io->free_iov);
0e1b6fe3 6655 break;
e8c2bc1f 6656 }
f3cd4850
JA
6657 case IORING_OP_OPENAT:
6658 case IORING_OP_OPENAT2:
6659 if (req->open.filename)
6660 putname(req->open.filename);
6661 break;
80a261fd
JA
6662 case IORING_OP_RENAMEAT:
6663 putname(req->rename.oldpath);
6664 putname(req->rename.newpath);
6665 break;
14a1143b
JA
6666 case IORING_OP_UNLINKAT:
6667 putname(req->unlink.filename);
6668 break;
e34a02dc
DK
6669 case IORING_OP_MKDIRAT:
6670 putname(req->mkdir.filename);
6671 break;
7a8721f8
DK
6672 case IORING_OP_SYMLINKAT:
6673 putname(req->symlink.oldpath);
6674 putname(req->symlink.newpath);
6675 break;
cf30da90
DK
6676 case IORING_OP_LINKAT:
6677 putname(req->hardlink.oldpath);
6678 putname(req->hardlink.newpath);
6679 break;
0e1b6fe3 6680 }
99bc4c38 6681 }
75652a30
JA
6682 if ((req->flags & REQ_F_POLLED) && req->apoll) {
6683 kfree(req->apoll->double_poll);
6684 kfree(req->apoll);
6685 req->apoll = NULL;
6686 }
3a0a6902
PB
6687 if (req->flags & REQ_F_INFLIGHT) {
6688 struct io_uring_task *tctx = req->task->io_uring;
6689
6690 atomic_dec(&tctx->inflight_tracked);
3a0a6902 6691 }
c854357b 6692 if (req->flags & REQ_F_CREDS)
b8e64b53 6693 put_cred(req->creds);
c854357b
PB
6694
6695 req->flags &= ~IO_REQ_CLEAN_FLAGS;
99bc4c38
PB
6696}
6697
889fca73 6698static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 6699{
a197f664 6700 struct io_ring_ctx *ctx = req->ctx;
5730b27e 6701 const struct cred *creds = NULL;
d625c6ee 6702 int ret;
2b188cc1 6703
b8e64b53 6704 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
c10d1f98 6705 creds = override_creds(req->creds);
5730b27e 6706
d625c6ee 6707 switch (req->opcode) {
2b188cc1 6708 case IORING_OP_NOP:
889fca73 6709 ret = io_nop(req, issue_flags);
2b188cc1
JA
6710 break;
6711 case IORING_OP_READV:
edafccee 6712 case IORING_OP_READ_FIXED:
3a6820f2 6713 case IORING_OP_READ:
889fca73 6714 ret = io_read(req, issue_flags);
edafccee 6715 break;
3529d8c2 6716 case IORING_OP_WRITEV:
edafccee 6717 case IORING_OP_WRITE_FIXED:
3a6820f2 6718 case IORING_OP_WRITE:
889fca73 6719 ret = io_write(req, issue_flags);
2b188cc1 6720 break;
c992fe29 6721 case IORING_OP_FSYNC:
45d189c6 6722 ret = io_fsync(req, issue_flags);
c992fe29 6723 break;
221c5eb2 6724 case IORING_OP_POLL_ADD:
61e98203 6725 ret = io_poll_add(req, issue_flags);
221c5eb2
JA
6726 break;
6727 case IORING_OP_POLL_REMOVE:
c5de0036 6728 ret = io_poll_update(req, issue_flags);
221c5eb2 6729 break;
5d17b4a4 6730 case IORING_OP_SYNC_FILE_RANGE:
45d189c6 6731 ret = io_sync_file_range(req, issue_flags);
5d17b4a4 6732 break;
0fa03c62 6733 case IORING_OP_SENDMSG:
889fca73 6734 ret = io_sendmsg(req, issue_flags);
062d04d7 6735 break;
fddaface 6736 case IORING_OP_SEND:
889fca73 6737 ret = io_send(req, issue_flags);
0fa03c62 6738 break;
aa1fa28f 6739 case IORING_OP_RECVMSG:
889fca73 6740 ret = io_recvmsg(req, issue_flags);
062d04d7 6741 break;
fddaface 6742 case IORING_OP_RECV:
889fca73 6743 ret = io_recv(req, issue_flags);
aa1fa28f 6744 break;
5262f567 6745 case IORING_OP_TIMEOUT:
61e98203 6746 ret = io_timeout(req, issue_flags);
5262f567 6747 break;
11365043 6748 case IORING_OP_TIMEOUT_REMOVE:
61e98203 6749 ret = io_timeout_remove(req, issue_flags);
11365043 6750 break;
17f2fe35 6751 case IORING_OP_ACCEPT:
889fca73 6752 ret = io_accept(req, issue_flags);
17f2fe35 6753 break;
f8e85cf2 6754 case IORING_OP_CONNECT:
889fca73 6755 ret = io_connect(req, issue_flags);
f8e85cf2 6756 break;
62755e35 6757 case IORING_OP_ASYNC_CANCEL:
61e98203 6758 ret = io_async_cancel(req, issue_flags);
62755e35 6759 break;
d63d1b5e 6760 case IORING_OP_FALLOCATE:
45d189c6 6761 ret = io_fallocate(req, issue_flags);
d63d1b5e 6762 break;
15b71abe 6763 case IORING_OP_OPENAT:
45d189c6 6764 ret = io_openat(req, issue_flags);
15b71abe 6765 break;
b5dba59e 6766 case IORING_OP_CLOSE:
889fca73 6767 ret = io_close(req, issue_flags);
b5dba59e 6768 break;
05f3fb3c 6769 case IORING_OP_FILES_UPDATE:
889fca73 6770 ret = io_files_update(req, issue_flags);
05f3fb3c 6771 break;
eddc7ef5 6772 case IORING_OP_STATX:
45d189c6 6773 ret = io_statx(req, issue_flags);
eddc7ef5 6774 break;
4840e418 6775 case IORING_OP_FADVISE:
45d189c6 6776 ret = io_fadvise(req, issue_flags);
4840e418 6777 break;
c1ca757b 6778 case IORING_OP_MADVISE:
45d189c6 6779 ret = io_madvise(req, issue_flags);
c1ca757b 6780 break;
cebdb986 6781 case IORING_OP_OPENAT2:
45d189c6 6782 ret = io_openat2(req, issue_flags);
cebdb986 6783 break;
3e4827b0 6784 case IORING_OP_EPOLL_CTL:
889fca73 6785 ret = io_epoll_ctl(req, issue_flags);
3e4827b0 6786 break;
7d67af2c 6787 case IORING_OP_SPLICE:
45d189c6 6788 ret = io_splice(req, issue_flags);
7d67af2c 6789 break;
ddf0322d 6790 case IORING_OP_PROVIDE_BUFFERS:
889fca73 6791 ret = io_provide_buffers(req, issue_flags);
ddf0322d 6792 break;
067524e9 6793 case IORING_OP_REMOVE_BUFFERS:
889fca73 6794 ret = io_remove_buffers(req, issue_flags);
3e4827b0 6795 break;
f2a8d5c7 6796 case IORING_OP_TEE:
45d189c6 6797 ret = io_tee(req, issue_flags);
f2a8d5c7 6798 break;
36f4fa68 6799 case IORING_OP_SHUTDOWN:
45d189c6 6800 ret = io_shutdown(req, issue_flags);
36f4fa68 6801 break;
80a261fd 6802 case IORING_OP_RENAMEAT:
45d189c6 6803 ret = io_renameat(req, issue_flags);
80a261fd 6804 break;
14a1143b 6805 case IORING_OP_UNLINKAT:
45d189c6 6806 ret = io_unlinkat(req, issue_flags);
14a1143b 6807 break;
e34a02dc
DK
6808 case IORING_OP_MKDIRAT:
6809 ret = io_mkdirat(req, issue_flags);
6810 break;
7a8721f8
DK
6811 case IORING_OP_SYMLINKAT:
6812 ret = io_symlinkat(req, issue_flags);
6813 break;
cf30da90
DK
6814 case IORING_OP_LINKAT:
6815 ret = io_linkat(req, issue_flags);
6816 break;
2b188cc1
JA
6817 default:
6818 ret = -EINVAL;
6819 break;
6820 }
6821
5730b27e
JA
6822 if (creds)
6823 revert_creds(creds);
def596e9
JA
6824 if (ret)
6825 return ret;
b532576e 6826 /* If the op doesn't have a file, we're not polling for it */
cb3d8972
PB
6827 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
6828 io_iopoll_req_issued(req);
def596e9
JA
6829
6830 return 0;
2b188cc1
JA
6831}
6832
ebc11b6c
PB
6833static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
6834{
6835 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6836
6837 req = io_put_req_find_next(req);
6838 return req ? &req->work : NULL;
6839}
6840
5280f7e5 6841static void io_wq_submit_work(struct io_wq_work *work)
2b188cc1
JA
6842{
6843 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6df1db6b 6844 struct io_kiocb *timeout;
561fb04a 6845 int ret = 0;
2b188cc1 6846
48dcd38d
PB
6847 /* one will be dropped by ->io_free_work() after returning to io-wq */
6848 if (!(req->flags & REQ_F_REFCOUNT))
6849 __io_req_set_refcount(req, 2);
6850 else
6851 req_ref_get(req);
5d5901a3 6852
6df1db6b
PB
6853 timeout = io_prep_linked_timeout(req);
6854 if (timeout)
6855 io_queue_linked_timeout(timeout);
d4c81f38 6856
dadebc35 6857 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
4014d943 6858 if (work->flags & IO_WQ_WORK_CANCEL)
561fb04a 6859 ret = -ECANCELED;
31b51510 6860
561fb04a 6861 if (!ret) {
561fb04a 6862 do {
889fca73 6863 ret = io_issue_sqe(req, 0);
561fb04a
JA
6864 /*
6865 * We can get EAGAIN for polled IO even though we're
6866 * forcing a sync submission from here, since we can't
6867 * wait for request slots on the block side.
6868 */
6869 if (ret != -EAGAIN)
6870 break;
6871 cond_resched();
6872 } while (1);
6873 }
31b51510 6874
a3df7698 6875 /* avoid locking problems by failing it from a clean context */
5d5901a3 6876 if (ret)
a3df7698 6877 io_req_task_queue_fail(req, ret);
2b188cc1
JA
6878}
6879
aeca241b 6880static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
042b0d85 6881 unsigned i)
65e19f54 6882{
042b0d85 6883 return &table->files[i];
dafecf19
PB
6884}
6885
65e19f54
JA
6886static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6887 int index)
6888{
aeca241b 6889 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
65e19f54 6890
a04b0ac0 6891 return (struct file *) (slot->file_ptr & FFS_MASK);
65e19f54
JA
6892}
6893
a04b0ac0 6894static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
9a321c98
PB
6895{
6896 unsigned long file_ptr = (unsigned long) file;
6897
b191e2df 6898 if (__io_file_supports_nowait(file, READ))
9a321c98 6899 file_ptr |= FFS_ASYNC_READ;
b191e2df 6900 if (__io_file_supports_nowait(file, WRITE))
9a321c98
PB
6901 file_ptr |= FFS_ASYNC_WRITE;
6902 if (S_ISREG(file_inode(file)->i_mode))
6903 file_ptr |= FFS_ISREG;
a04b0ac0 6904 file_slot->file_ptr = file_ptr;
65e19f54
JA
6905}
6906
ac177053
PB
6907static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
6908 struct io_kiocb *req, int fd)
09bb8394 6909{
8da11c19 6910 struct file *file;
ac177053 6911 unsigned long file_ptr;
09bb8394 6912
ac177053
PB
6913 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
6914 return NULL;
6915 fd = array_index_nospec(fd, ctx->nr_user_files);
6916 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
6917 file = (struct file *) (file_ptr & FFS_MASK);
6918 file_ptr &= ~FFS_MASK;
6919 /* mask in overlapping REQ_F and FFS bits */
b191e2df 6920 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT);
ac177053
PB
6921 io_req_set_rsrc_node(req);
6922 return file;
6923}
d44f554e 6924
ac177053 6925static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
ac177053
PB
6926 struct io_kiocb *req, int fd)
6927{
62906e89 6928 struct file *file = fget(fd);
ac177053
PB
6929
6930 trace_io_uring_file_get(ctx, fd);
09bb8394 6931
ac177053
PB
6932 /* we don't allow fixed io_uring files */
6933 if (file && unlikely(file->f_op == &io_uring_fops))
6934 io_req_track_inflight(req);
8371adf5 6935 return file;
09bb8394
JA
6936}
6937
ac177053 6938static inline struct file *io_file_get(struct io_ring_ctx *ctx,
ac177053
PB
6939 struct io_kiocb *req, int fd, bool fixed)
6940{
6941 if (fixed)
6942 return io_file_get_fixed(ctx, req, fd);
6943 else
62906e89 6944 return io_file_get_normal(ctx, req, fd);
ac177053
PB
6945}
6946
f237c30a 6947static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
89b263f6
JA
6948{
6949 struct io_kiocb *prev = req->timeout.prev;
fc590dd1 6950 int ret = -ENOENT;
89b263f6
JA
6951
6952 if (prev) {
fc590dd1
PB
6953 if (!(req->task->flags & PF_EXITING))
6954 ret = io_try_cancel_userdata(req, prev->user_data);
505657bc 6955 io_req_complete_post(req, ret ?: -ETIME, 0);
89b263f6 6956 io_put_req(prev);
89b263f6
JA
6957 } else {
6958 io_req_complete_post(req, -ETIME, 0);
6959 }
6960}
6961
2665abfd 6962static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 6963{
ad8a48ac
JA
6964 struct io_timeout_data *data = container_of(timer,
6965 struct io_timeout_data, timer);
90cd7e42 6966 struct io_kiocb *prev, *req = data->req;
2665abfd 6967 struct io_ring_ctx *ctx = req->ctx;
2665abfd 6968 unsigned long flags;
2665abfd 6969
89b263f6 6970 spin_lock_irqsave(&ctx->timeout_lock, flags);
90cd7e42
PB
6971 prev = req->timeout.head;
6972 req->timeout.head = NULL;
2665abfd
JA
6973
6974 /*
6975 * We don't expect the list to be empty, that will only happen if we
6976 * race with the completion of the linked work.
6977 */
447c19f3 6978 if (prev) {
f2f87370 6979 io_remove_next_linked(prev);
447c19f3
PB
6980 if (!req_ref_inc_not_zero(prev))
6981 prev = NULL;
6982 }
ef9dd637 6983 list_del(&req->timeout.list);
89b263f6
JA
6984 req->timeout.prev = prev;
6985 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
2665abfd 6986
89b263f6
JA
6987 req->io_task_work.func = io_req_task_link_timeout;
6988 io_req_task_work_add(req);
2665abfd
JA
6989 return HRTIMER_NORESTART;
6990}
6991
de968c18 6992static void io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 6993{
de968c18
PB
6994 struct io_ring_ctx *ctx = req->ctx;
6995
89b263f6 6996 spin_lock_irq(&ctx->timeout_lock);
76a46e06 6997 /*
f2f87370
PB
6998 * If the back reference is NULL, then our linked request finished
6999 * before we got a chance to setup the timer
76a46e06 7000 */
90cd7e42 7001 if (req->timeout.head) {
e8c2bc1f 7002 struct io_timeout_data *data = req->async_data;
94ae5e77 7003
ad8a48ac
JA
7004 data->timer.function = io_link_timeout_fn;
7005 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
7006 data->mode);
ef9dd637 7007 list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
2665abfd 7008 }
89b263f6 7009 spin_unlock_irq(&ctx->timeout_lock);
2665abfd 7010 /* drop submission reference */
76a46e06
JA
7011 io_put_req(req);
7012}
2665abfd 7013
c5eef2b9 7014static void __io_queue_sqe(struct io_kiocb *req)
282cdc86 7015 __must_hold(&req->ctx->uring_lock)
2b188cc1 7016{
906c6caa 7017 struct io_kiocb *linked_timeout;
e0c5c576 7018 int ret;
2b188cc1 7019
59b735ae 7020issue_sqe:
c5eef2b9 7021 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
193155c8 7022
491381ce
JA
7023 /*
7024 * We async punt it if the file wasn't marked NOWAIT, or if the file
7025 * doesn't support non-blocking read/write attempts
7026 */
1840038e 7027 if (likely(!ret)) {
e342c807 7028 if (req->flags & REQ_F_COMPLETE_INLINE) {
c5eef2b9 7029 struct io_ring_ctx *ctx = req->ctx;
cd0ca2e0 7030 struct io_submit_state *state = &ctx->submit_state;
e65ef56d 7031
cd0ca2e0
PB
7032 state->compl_reqs[state->compl_nr++] = req;
7033 if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
2a2758f2 7034 io_submit_flush_completions(ctx);
906c6caa 7035 return;
0d63c148 7036 }
906c6caa
PB
7037
7038 linked_timeout = io_prep_linked_timeout(req);
7039 if (linked_timeout)
7040 io_queue_linked_timeout(linked_timeout);
1840038e 7041 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
906c6caa
PB
7042 linked_timeout = io_prep_linked_timeout(req);
7043
59b735ae
OL
7044 switch (io_arm_poll_handler(req)) {
7045 case IO_APOLL_READY:
906c6caa 7046 if (linked_timeout)
4ea672ab 7047 io_queue_linked_timeout(linked_timeout);
59b735ae
OL
7048 goto issue_sqe;
7049 case IO_APOLL_ABORTED:
1840038e
PB
7050 /*
7051 * Queued up for async execution, worker will release
7052 * submit reference when the iocb is actually submitted.
7053 */
f237c30a 7054 io_queue_async_work(req, NULL);
59b735ae 7055 break;
1840038e 7056 }
906c6caa
PB
7057
7058 if (linked_timeout)
7059 io_queue_linked_timeout(linked_timeout);
0d63c148 7060 } else {
f41db273 7061 io_req_complete_failed(req, ret);
9e645e11 7062 }
2b188cc1
JA
7063}
7064
441b8a78 7065static inline void io_queue_sqe(struct io_kiocb *req)
282cdc86 7066 __must_hold(&req->ctx->uring_lock)
4fe2c963 7067{
10c66904 7068 if (unlikely(req->ctx->drain_active) && io_drain_req(req))
76cc33d7 7069 return;
4fe2c963 7070
a8295b98 7071 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) {
c5eef2b9 7072 __io_queue_sqe(req);
a8295b98 7073 } else if (req->flags & REQ_F_FAIL) {
c6d3d9cb 7074 io_req_complete_fail_submit(req);
76cc33d7
PB
7075 } else {
7076 int ret = io_req_prep_async(req);
7077
7078 if (unlikely(ret))
7079 io_req_complete_failed(req, ret);
7080 else
f237c30a 7081 io_queue_async_work(req, NULL);
ce35a47a 7082 }
4fe2c963
JL
7083}
7084
b16fed66
PB
7085/*
7086 * Check SQE restrictions (opcode and flags).
7087 *
7088 * Returns 'true' if SQE is allowed, 'false' otherwise.
7089 */
7090static inline bool io_check_restriction(struct io_ring_ctx *ctx,
7091 struct io_kiocb *req,
7092 unsigned int sqe_flags)
4fe2c963 7093{
4cfb25bf 7094 if (likely(!ctx->restricted))
b16fed66
PB
7095 return true;
7096
7097 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
7098 return false;
7099
7100 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
7101 ctx->restrictions.sqe_flags_required)
7102 return false;
7103
7104 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
7105 ctx->restrictions.sqe_flags_required))
7106 return false;
7107
7108 return true;
4fe2c963
JL
7109}
7110
b16fed66
PB
7111static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
7112 const struct io_uring_sqe *sqe)
282cdc86 7113 __must_hold(&ctx->uring_lock)
b16fed66
PB
7114{
7115 struct io_submit_state *state;
7116 unsigned int sqe_flags;
003e8dcc 7117 int personality, ret = 0;
b16fed66 7118
864ea921 7119 /* req is partially pre-initialised, see io_preinit_req() */
b16fed66
PB
7120 req->opcode = READ_ONCE(sqe->opcode);
7121 /* same numerical values with corresponding REQ_F_*, safe to copy */
7122 req->flags = sqe_flags = READ_ONCE(sqe->flags);
7123 req->user_data = READ_ONCE(sqe->user_data);
b16fed66 7124 req->file = NULL;
b16fed66 7125 req->fixed_rsrc_refs = NULL;
b16fed66 7126 req->task = current;
b16fed66
PB
7127
7128 /* enforce forwards compatibility on users */
dddca226 7129 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
b16fed66 7130 return -EINVAL;
b16fed66
PB
7131 if (unlikely(req->opcode >= IORING_OP_LAST))
7132 return -EINVAL;
4cfb25bf 7133 if (!io_check_restriction(ctx, req, sqe_flags))
b16fed66
PB
7134 return -EACCES;
7135
7136 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
7137 !io_op_defs[req->opcode].buffer_select)
7138 return -EOPNOTSUPP;
3c19966d
PB
7139 if (unlikely(sqe_flags & IOSQE_IO_DRAIN))
7140 ctx->drain_active = true;
863e0560 7141
003e8dcc
JA
7142 personality = READ_ONCE(sqe->personality);
7143 if (personality) {
c10d1f98
PB
7144 req->creds = xa_load(&ctx->personalities, personality);
7145 if (!req->creds)
003e8dcc 7146 return -EINVAL;
c10d1f98 7147 get_cred(req->creds);
b8e64b53 7148 req->flags |= REQ_F_CREDS;
003e8dcc 7149 }
b16fed66
PB
7150 state = &ctx->submit_state;
7151
7152 /*
7153 * Plug now if we have more than 1 IO left after this, and the target
7154 * is potentially a read/write to block based storage.
7155 */
7156 if (!state->plug_started && state->ios_left > 1 &&
7157 io_op_defs[req->opcode].plug) {
7158 blk_start_plug(&state->plug);
7159 state->plug_started = true;
7160 }
7161
7162 if (io_op_defs[req->opcode].needs_file) {
62906e89 7163 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
ac177053 7164 (sqe_flags & IOSQE_FIXED_FILE));
b16fed66
PB
7165 if (unlikely(!req->file))
7166 ret = -EBADF;
7167 }
7168
7169 state->ios_left--;
7170 return ret;
7171}
7172
a6b8cadc 7173static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
a1ab7b35 7174 const struct io_uring_sqe *sqe)
282cdc86 7175 __must_hold(&ctx->uring_lock)
9e645e11 7176{
a1ab7b35 7177 struct io_submit_link *link = &ctx->submit_state.link;
ef4ff581 7178 int ret;
9e645e11 7179
a6b8cadc
PB
7180 ret = io_init_req(ctx, req, sqe);
7181 if (unlikely(ret)) {
7182fail_req:
a8295b98 7183 /* fail even hard links since we don't submit */
de59bc10 7184 if (link->head) {
a8295b98
HX
7185 /*
7186 * we can judge a link req is failed or cancelled by if
7187 * REQ_F_FAIL is set, but the head is an exception since
7188 * it may be set REQ_F_FAIL because of other req's failure
7189 * so let's leverage req->result to distinguish if a head
7190 * is set REQ_F_FAIL because of its failure or other req's
7191 * failure so that we can set the correct ret code for it.
7192 * init result here to avoid affecting the normal path.
7193 */
7194 if (!(link->head->flags & REQ_F_FAIL))
7195 req_fail_link_node(link->head, -ECANCELED);
7196 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
7197 /*
7198 * the current req is a normal req, we should return
7199 * error and thus break the submittion loop.
7200 */
7201 io_req_complete_failed(req, ret);
7202 return ret;
de59bc10 7203 }
a8295b98
HX
7204 req_fail_link_node(req, ret);
7205 } else {
7206 ret = io_req_prep(req, sqe);
7207 if (unlikely(ret))
7208 goto fail_req;
a6b8cadc 7209 }
441b8a78 7210
be7053b7 7211 /* don't need @sqe from now on */
236daeae
OL
7212 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
7213 req->flags, true,
7214 ctx->flags & IORING_SETUP_SQPOLL);
a6b8cadc 7215
9e645e11
JA
7216 /*
7217 * If we already have a head request, queue this one for async
7218 * submittal once the head completes. If we don't have a head but
7219 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
7220 * submitted sync once the chain is complete. If none of those
7221 * conditions are true (normal request), then just queue it.
7222 */
863e0560
PB
7223 if (link->head) {
7224 struct io_kiocb *head = link->head;
4e88d6e7 7225
a8295b98
HX
7226 if (!(req->flags & REQ_F_FAIL)) {
7227 ret = io_req_prep_async(req);
7228 if (unlikely(ret)) {
7229 req_fail_link_node(req, ret);
7230 if (!(head->flags & REQ_F_FAIL))
7231 req_fail_link_node(head, -ECANCELED);
7232 }
7233 }
9d76377f 7234 trace_io_uring_link(ctx, req, head);
f2f87370 7235 link->last->link = req;
863e0560 7236 link->last = req;
32fe525b
PB
7237
7238 /* last request of a link, enqueue the link */
ef4ff581 7239 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
863e0560 7240 link->head = NULL;
5e159204 7241 io_queue_sqe(head);
32fe525b 7242 }
9e645e11 7243 } else {
ef4ff581 7244 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
863e0560
PB
7245 link->head = req;
7246 link->last = req;
711be031 7247 } else {
be7053b7 7248 io_queue_sqe(req);
711be031 7249 }
9e645e11 7250 }
2e6e1fde 7251
1d4240cc 7252 return 0;
9e645e11
JA
7253}
7254
9a56a232
JA
7255/*
7256 * Batched submission is done, ensure local IO is flushed out.
7257 */
ba88ff11
PB
7258static void io_submit_state_end(struct io_submit_state *state,
7259 struct io_ring_ctx *ctx)
9a56a232 7260{
a1ab7b35 7261 if (state->link.head)
de59bc10 7262 io_queue_sqe(state->link.head);
cd0ca2e0 7263 if (state->compl_nr)
2a2758f2 7264 io_submit_flush_completions(ctx);
27926b68
JA
7265 if (state->plug_started)
7266 blk_finish_plug(&state->plug);
9a56a232
JA
7267}
7268
7269/*
7270 * Start submission side cache.
7271 */
7272static void io_submit_state_start(struct io_submit_state *state,
ba88ff11 7273 unsigned int max_ios)
9a56a232 7274{
27926b68 7275 state->plug_started = false;
9a56a232 7276 state->ios_left = max_ios;
a1ab7b35
PB
7277 /* set only head, no need to init link_last in advance */
7278 state->link.head = NULL;
9a56a232
JA
7279}
7280
2b188cc1
JA
7281static void io_commit_sqring(struct io_ring_ctx *ctx)
7282{
75b28aff 7283 struct io_rings *rings = ctx->rings;
2b188cc1 7284
caf582c6
PB
7285 /*
7286 * Ensure any loads from the SQEs are done at this point,
7287 * since once we write the new head, the application could
7288 * write new data to them.
7289 */
7290 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
7291}
7292
2b188cc1 7293/*
dd9ae8a0 7294 * Fetch an sqe, if one is available. Note this returns a pointer to memory
2b188cc1
JA
7295 * that is mapped by userspace. This means that care needs to be taken to
7296 * ensure that reads are stable, as we cannot rely on userspace always
7297 * being a good citizen. If members of the sqe are validated and then later
7298 * used, it's important that those reads are done through READ_ONCE() to
7299 * prevent a re-load down the line.
7300 */
709b302f 7301static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 7302{
ea5ab3b5 7303 unsigned head, mask = ctx->sq_entries - 1;
17d3aeb3 7304 unsigned sq_idx = ctx->cached_sq_head++ & mask;
2b188cc1
JA
7305
7306 /*
7307 * The cached sq head (or cq tail) serves two purposes:
7308 *
7309 * 1) allows us to batch the cost of updating the user visible
7310 * head updates.
7311 * 2) allows the kernel side to track the head on its own, even
7312 * though the application is the one updating it.
7313 */
17d3aeb3 7314 head = READ_ONCE(ctx->sq_array[sq_idx]);
709b302f
PB
7315 if (likely(head < ctx->sq_entries))
7316 return &ctx->sq_sqes[head];
2b188cc1
JA
7317
7318 /* drop invalid entries */
15641e42
PB
7319 ctx->cq_extra--;
7320 WRITE_ONCE(ctx->rings->sq_dropped,
7321 READ_ONCE(ctx->rings->sq_dropped) + 1);
709b302f
PB
7322 return NULL;
7323}
7324
0f212204 7325static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
282cdc86 7326 __must_hold(&ctx->uring_lock)
6c271ce2 7327{
46c4e16a 7328 int submitted = 0;
6c271ce2 7329
ee7d46d9
PB
7330 /* make sure SQ entry isn't read before tail */
7331 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
2b85edfc
PB
7332 if (!percpu_ref_tryget_many(&ctx->refs, nr))
7333 return -EAGAIN;
9a10867a 7334 io_get_task_refs(nr);
6c271ce2 7335
ba88ff11 7336 io_submit_state_start(&ctx->submit_state, nr);
46c4e16a 7337 while (submitted < nr) {
3529d8c2 7338 const struct io_uring_sqe *sqe;
196be95c 7339 struct io_kiocb *req;
fb5ccc98 7340
258b29a9 7341 req = io_alloc_req(ctx);
196be95c
PB
7342 if (unlikely(!req)) {
7343 if (!submitted)
7344 submitted = -EAGAIN;
fb5ccc98 7345 break;
196be95c 7346 }
4fccfcbb
PB
7347 sqe = io_get_sqe(ctx);
7348 if (unlikely(!sqe)) {
0c6e1d7f 7349 list_add(&req->inflight_entry, &ctx->submit_state.free_list);
4fccfcbb
PB
7350 break;
7351 }
d3656344
JA
7352 /* will complete beyond this point, count as submitted */
7353 submitted++;
a1ab7b35 7354 if (io_submit_sqe(ctx, req, sqe))
196be95c 7355 break;
6c271ce2
JA
7356 }
7357
9466f437
PB
7358 if (unlikely(submitted != nr)) {
7359 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
d8a6df10 7360 int unused = nr - ref_used;
9466f437 7361
09899b19 7362 current->io_uring->cached_refs += unused;
d8a6df10 7363 percpu_ref_put_many(&ctx->refs, unused);
9466f437 7364 }
6c271ce2 7365
a1ab7b35 7366 io_submit_state_end(&ctx->submit_state, ctx);
ae9428ca
PB
7367 /* Commit SQ ring head once we've consumed and submitted all SQEs */
7368 io_commit_sqring(ctx);
7369
6c271ce2
JA
7370 return submitted;
7371}
7372
e4b6d902
PB
7373static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
7374{
7375 return READ_ONCE(sqd->state);
7376}
7377
23b3628e
XW
7378static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
7379{
7380 /* Tell userspace we may need a wakeup call */
79ebeaee 7381 spin_lock(&ctx->completion_lock);
20c0b380
NA
7382 WRITE_ONCE(ctx->rings->sq_flags,
7383 ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
79ebeaee 7384 spin_unlock(&ctx->completion_lock);
23b3628e
XW
7385}
7386
7387static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
7388{
79ebeaee 7389 spin_lock(&ctx->completion_lock);
20c0b380
NA
7390 WRITE_ONCE(ctx->rings->sq_flags,
7391 ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
79ebeaee 7392 spin_unlock(&ctx->completion_lock);
23b3628e
XW
7393}
7394
08369246 7395static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
6c271ce2 7396{
c8d1ba58 7397 unsigned int to_submit;
bdcd3eab 7398 int ret = 0;
6c271ce2 7399
c8d1ba58 7400 to_submit = io_sqring_entries(ctx);
e95eee2d 7401 /* if we're handling multiple rings, cap submit size for fairness */
4ce8ad95
OL
7402 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
7403 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
e95eee2d 7404
906a3c6f 7405 if (!list_empty(&ctx->iopoll_list) || to_submit) {
c8d1ba58 7406 unsigned nr_events = 0;
948e1947
PB
7407 const struct cred *creds = NULL;
7408
7409 if (ctx->sq_creds != current_cred())
7410 creds = override_creds(ctx->sq_creds);
a4c0b3de 7411
c8d1ba58 7412 mutex_lock(&ctx->uring_lock);
906a3c6f 7413 if (!list_empty(&ctx->iopoll_list))
a8576af9 7414 io_do_iopoll(ctx, &nr_events, 0);
906a3c6f 7415
3b763ba1
PB
7416 /*
7417 * Don't submit if refs are dying, good for io_uring_register(),
7418 * but also it is relied upon by io_ring_exit_work()
7419 */
0298ef96
PB
7420 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
7421 !(ctx->flags & IORING_SETUP_R_DISABLED))
08369246 7422 ret = io_submit_sqes(ctx, to_submit);
c8d1ba58 7423 mutex_unlock(&ctx->uring_lock);
6c271ce2 7424
acfb381d
PB
7425 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
7426 wake_up(&ctx->sqo_sq_wait);
948e1947
PB
7427 if (creds)
7428 revert_creds(creds);
acfb381d 7429 }
6c271ce2 7430
08369246
XW
7431 return ret;
7432}
6c271ce2 7433
08369246
XW
7434static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
7435{
7436 struct io_ring_ctx *ctx;
7437 unsigned sq_thread_idle = 0;
6c271ce2 7438
c9dca27d
PB
7439 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7440 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
08369246 7441 sqd->sq_thread_idle = sq_thread_idle;
c8d1ba58 7442}
6c271ce2 7443
e4b6d902
PB
7444static bool io_sqd_handle_event(struct io_sq_data *sqd)
7445{
7446 bool did_sig = false;
7447 struct ksignal ksig;
7448
7449 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
7450 signal_pending(current)) {
7451 mutex_unlock(&sqd->lock);
7452 if (signal_pending(current))
7453 did_sig = get_signal(&ksig);
7454 cond_resched();
7455 mutex_lock(&sqd->lock);
7456 }
e4b6d902
PB
7457 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
7458}
7459
c8d1ba58
JA
7460static int io_sq_thread(void *data)
7461{
69fb2131
JA
7462 struct io_sq_data *sqd = data;
7463 struct io_ring_ctx *ctx;
a0d9205f 7464 unsigned long timeout = 0;
37d1e2e3 7465 char buf[TASK_COMM_LEN];
08369246 7466 DEFINE_WAIT(wait);
6c271ce2 7467
696ee88a 7468 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
37d1e2e3 7469 set_task_comm(current, buf);
37d1e2e3
JA
7470
7471 if (sqd->sq_cpu != -1)
7472 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
7473 else
7474 set_cpus_allowed_ptr(current, cpu_online_mask);
7475 current->flags |= PF_NO_SETAFFINITY;
7476
09a6f4ef 7477 mutex_lock(&sqd->lock);
e4b6d902 7478 while (1) {
1a924a80 7479 bool cap_entries, sqt_spin = false;
c1edbf5f 7480
e4b6d902
PB
7481 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
7482 if (io_sqd_handle_event(sqd))
c7d95613 7483 break;
08369246
XW
7484 timeout = jiffies + sqd->sq_thread_idle;
7485 }
e4b6d902 7486
e95eee2d 7487 cap_entries = !list_is_singular(&sqd->ctx_list);
69fb2131 7488 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
948e1947 7489 int ret = __io_sq_thread(ctx, cap_entries);
7c30f36a 7490
08369246
XW
7491 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
7492 sqt_spin = true;
69fb2131 7493 }
dd432ea5
PB
7494 if (io_run_task_work())
7495 sqt_spin = true;
6c271ce2 7496
08369246 7497 if (sqt_spin || !time_after(jiffies, timeout)) {
c8d1ba58 7498 cond_resched();
08369246
XW
7499 if (sqt_spin)
7500 timeout = jiffies + sqd->sq_thread_idle;
7501 continue;
7502 }
7503
08369246 7504 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
dd432ea5 7505 if (!io_sqd_events_pending(sqd) && !current->task_works) {
1a924a80
PB
7506 bool needs_sched = true;
7507
724cb4f9 7508 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
aaa9f0f4
PB
7509 io_ring_set_wakeup_flag(ctx);
7510
724cb4f9
HX
7511 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
7512 !list_empty_careful(&ctx->iopoll_list)) {
7513 needs_sched = false;
7514 break;
7515 }
7516 if (io_sqring_entries(ctx)) {
7517 needs_sched = false;
7518 break;
7519 }
7520 }
7521
7522 if (needs_sched) {
7523 mutex_unlock(&sqd->lock);
7524 schedule();
7525 mutex_lock(&sqd->lock);
7526 }
69fb2131
JA
7527 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7528 io_ring_clear_wakeup_flag(ctx);
6c271ce2 7529 }
08369246
XW
7530
7531 finish_wait(&sqd->wait, &wait);
7532 timeout = jiffies + sqd->sq_thread_idle;
6c271ce2 7533 }
28cea78a 7534
78cc687b 7535 io_uring_cancel_generic(true, sqd);
37d1e2e3 7536 sqd->thread = NULL;
05962f95 7537 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
5f3f26f9 7538 io_ring_set_wakeup_flag(ctx);
521d6a73 7539 io_run_task_work();
734551df
PB
7540 mutex_unlock(&sqd->lock);
7541
37d1e2e3
JA
7542 complete(&sqd->exited);
7543 do_exit(0);
6c271ce2
JA
7544}
7545
bda52162
JA
7546struct io_wait_queue {
7547 struct wait_queue_entry wq;
7548 struct io_ring_ctx *ctx;
5fd46178 7549 unsigned cq_tail;
bda52162
JA
7550 unsigned nr_timeouts;
7551};
7552
6c503150 7553static inline bool io_should_wake(struct io_wait_queue *iowq)
bda52162
JA
7554{
7555 struct io_ring_ctx *ctx = iowq->ctx;
5fd46178 7556 int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
bda52162
JA
7557
7558 /*
d195a66e 7559 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
7560 * started waiting. For timeouts, we always want to return to userspace,
7561 * regardless of event count.
7562 */
5fd46178 7563 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
bda52162
JA
7564}
7565
7566static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7567 int wake_flags, void *key)
7568{
7569 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7570 wq);
7571
6c503150
PB
7572 /*
7573 * Cannot safely flush overflowed CQEs from here, ensure we wake up
7574 * the task, and the next invocation will do it.
7575 */
5ed7a37d 7576 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
6c503150
PB
7577 return autoremove_wake_function(curr, mode, wake_flags, key);
7578 return -1;
bda52162
JA
7579}
7580
af9c1a44
JA
7581static int io_run_task_work_sig(void)
7582{
7583 if (io_run_task_work())
7584 return 1;
7585 if (!signal_pending(current))
7586 return 0;
0b8cfa97 7587 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
792ee0f6 7588 return -ERESTARTSYS;
af9c1a44
JA
7589 return -EINTR;
7590}
7591
eeb60b9a
PB
7592/* when returns >0, the caller should retry */
7593static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
7594 struct io_wait_queue *iowq,
68668891 7595 ktime_t timeout)
eeb60b9a
PB
7596{
7597 int ret;
7598
7599 /* make sure we run task_work before checking for signals */
7600 ret = io_run_task_work_sig();
7601 if (ret || io_should_wake(iowq))
7602 return ret;
7603 /* let the caller flush overflows, retry */
5ed7a37d 7604 if (test_bit(0, &ctx->check_cq_overflow))
eeb60b9a
PB
7605 return 1;
7606
68668891
JA
7607 if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
7608 return -ETIME;
7609 return 1;
eeb60b9a
PB
7610}
7611
2b188cc1
JA
7612/*
7613 * Wait until events become available, if we don't already have some. The
7614 * application must reap them itself, as they reside on the shared cq ring.
7615 */
7616static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
c73ebb68
HX
7617 const sigset_t __user *sig, size_t sigsz,
7618 struct __kernel_timespec __user *uts)
2b188cc1 7619{
90291099 7620 struct io_wait_queue iowq;
75b28aff 7621 struct io_rings *rings = ctx->rings;
68668891 7622 ktime_t timeout = KTIME_MAX;
c1d5a224 7623 int ret;
2b188cc1 7624
b41e9852 7625 do {
90f67366 7626 io_cqring_overflow_flush(ctx);
6c503150 7627 if (io_cqring_events(ctx) >= min_events)
b41e9852 7628 return 0;
4c6e277c 7629 if (!io_run_task_work())
b41e9852 7630 break;
b41e9852 7631 } while (1);
2b188cc1 7632
44df58d4
XW
7633 if (uts) {
7634 struct timespec64 ts;
7635
7636 if (get_timespec64(&ts, uts))
7637 return -EFAULT;
68668891 7638 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
44df58d4
XW
7639 }
7640
2b188cc1 7641 if (sig) {
9e75ad5d
AB
7642#ifdef CONFIG_COMPAT
7643 if (in_compat_syscall())
7644 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 7645 sigsz);
9e75ad5d
AB
7646 else
7647#endif
b772434b 7648 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 7649
2b188cc1
JA
7650 if (ret)
7651 return ret;
7652 }
7653
90291099
PB
7654 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
7655 iowq.wq.private = current;
7656 INIT_LIST_HEAD(&iowq.wq.entry);
7657 iowq.ctx = ctx;
bda52162 7658 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
5fd46178 7659 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
90291099 7660
c826bd7a 7661 trace_io_uring_cqring_wait(ctx, min_events);
bda52162 7662 do {
ca0a2651 7663 /* if we can't even flush overflow, don't wait for more */
90f67366 7664 if (!io_cqring_overflow_flush(ctx)) {
ca0a2651
JA
7665 ret = -EBUSY;
7666 break;
7667 }
311997b3 7668 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
bda52162 7669 TASK_INTERRUPTIBLE);
68668891 7670 ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
311997b3 7671 finish_wait(&ctx->cq_wait, &iowq.wq);
ca0a2651 7672 cond_resched();
eeb60b9a 7673 } while (ret > 0);
bda52162 7674
b7db41c9 7675 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 7676
75b28aff 7677 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
7678}
7679
9123c8ff 7680static void io_free_page_table(void **table, size_t size)
05f3fb3c 7681{
9123c8ff 7682 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
05f3fb3c 7683
846a4ef2 7684 for (i = 0; i < nr_tables; i++)
9123c8ff
PB
7685 kfree(table[i]);
7686 kfree(table);
7687}
7688
7689static void **io_alloc_page_table(size_t size)
7690{
7691 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
7692 size_t init_size = size;
7693 void **table;
7694
0bea96f5 7695 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
9123c8ff
PB
7696 if (!table)
7697 return NULL;
7698
7699 for (i = 0; i < nr_tables; i++) {
27f6b318 7700 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
9123c8ff 7701
0bea96f5 7702 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
9123c8ff
PB
7703 if (!table[i]) {
7704 io_free_page_table(table, init_size);
7705 return NULL;
7706 }
7707 size -= this_size;
7708 }
7709 return table;
05f3fb3c
JA
7710}
7711
28a9fe25 7712static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
1642b445 7713{
28a9fe25
PB
7714 percpu_ref_exit(&ref_node->refs);
7715 kfree(ref_node);
1642b445
PB
7716}
7717
b9bd2bea
PB
7718static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
7719{
7720 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
7721 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
7722 unsigned long flags;
7723 bool first_add = false;
817ab1d1 7724 unsigned long delay = HZ;
b9bd2bea
PB
7725
7726 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
7727 node->done = true;
7728
817ab1d1
DY
7729 /* if we are mid-quiesce then do not delay */
7730 if (node->rsrc_data->quiesce)
7731 delay = 0;
7732
b9bd2bea
PB
7733 while (!list_empty(&ctx->rsrc_ref_list)) {
7734 node = list_first_entry(&ctx->rsrc_ref_list,
7735 struct io_rsrc_node, node);
7736 /* recycle ref nodes in order */
7737 if (!node->done)
7738 break;
7739 list_del(&node->node);
7740 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
7741 }
7742 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
7743
7744 if (first_add)
817ab1d1 7745 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
b9bd2bea
PB
7746}
7747
7748static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
7749{
7750 struct io_rsrc_node *ref_node;
7751
7752 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7753 if (!ref_node)
7754 return NULL;
7755
7756 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
7757 0, GFP_KERNEL)) {
7758 kfree(ref_node);
7759 return NULL;
7760 }
7761 INIT_LIST_HEAD(&ref_node->node);
7762 INIT_LIST_HEAD(&ref_node->rsrc_list);
7763 ref_node->done = false;
7764 return ref_node;
7765}
7766
a7f0ed5a
PB
7767static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
7768 struct io_rsrc_data *data_to_kill)
6b06314c 7769{
a7f0ed5a
PB
7770 WARN_ON_ONCE(!ctx->rsrc_backup_node);
7771 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
6b06314c 7772
a7f0ed5a
PB
7773 if (data_to_kill) {
7774 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
82fbcfa9 7775
a7f0ed5a 7776 rsrc_node->rsrc_data = data_to_kill;
4956b9ea 7777 spin_lock_irq(&ctx->rsrc_ref_lock);
a7f0ed5a 7778 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
4956b9ea 7779 spin_unlock_irq(&ctx->rsrc_ref_lock);
82fbcfa9 7780
3e942498 7781 atomic_inc(&data_to_kill->refs);
a7f0ed5a
PB
7782 percpu_ref_kill(&rsrc_node->refs);
7783 ctx->rsrc_node = NULL;
7784 }
6b06314c 7785
a7f0ed5a
PB
7786 if (!ctx->rsrc_node) {
7787 ctx->rsrc_node = ctx->rsrc_backup_node;
7788 ctx->rsrc_backup_node = NULL;
7789 }
8bad28d8
HX
7790}
7791
a7f0ed5a 7792static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
8dd03afe
PB
7793{
7794 if (ctx->rsrc_backup_node)
7795 return 0;
b895c9a6 7796 ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
8dd03afe 7797 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
8bad28d8
HX
7798}
7799
40ae0ff7 7800static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
8bad28d8
HX
7801{
7802 int ret;
05589553 7803
215c3902 7804 /* As we may drop ->uring_lock, other task may have started quiesce */
8bad28d8
HX
7805 if (data->quiesce)
7806 return -ENXIO;
05589553 7807
8bad28d8 7808 data->quiesce = true;
1ffc5422 7809 do {
a7f0ed5a 7810 ret = io_rsrc_node_switch_start(ctx);
8dd03afe 7811 if (ret)
f2303b1f 7812 break;
a7f0ed5a 7813 io_rsrc_node_switch(ctx, data);
f2303b1f 7814
3e942498
PB
7815 /* kill initial ref, already quiesced if zero */
7816 if (atomic_dec_and_test(&data->refs))
7817 break;
c018db4a 7818 mutex_unlock(&ctx->uring_lock);
8bad28d8 7819 flush_delayed_work(&ctx->rsrc_put_work);
1ffc5422 7820 ret = wait_for_completion_interruptible(&data->done);
c018db4a
JA
7821 if (!ret) {
7822 mutex_lock(&ctx->uring_lock);
2bcee8e2
DY
7823 if (atomic_read(&data->refs) > 0) {
7824 /*
7825 * it has been revived by another thread while
7826 * we were unlocked
7827 */
7828 mutex_unlock(&ctx->uring_lock);
7829 } else {
7830 break;
7831 }
c018db4a 7832 }
8bad28d8 7833
3e942498
PB
7834 atomic_inc(&data->refs);
7835 /* wait for all works potentially completing data->done */
7836 flush_delayed_work(&ctx->rsrc_put_work);
cb5e1b81 7837 reinit_completion(&data->done);
8dd03afe 7838
1ffc5422 7839 ret = io_run_task_work_sig();
8bad28d8 7840 mutex_lock(&ctx->uring_lock);
f2303b1f 7841 } while (ret >= 0);
8bad28d8 7842 data->quiesce = false;
05f3fb3c 7843
8bad28d8 7844 return ret;
d7954b2b
BM
7845}
7846
2d091d62
PB
7847static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
7848{
7849 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
7850 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
7851
7852 return &data->tags[table_idx][off];
7853}
7854
44b31f2f 7855static void io_rsrc_data_free(struct io_rsrc_data *data)
1ad555c6 7856{
2d091d62
PB
7857 size_t size = data->nr * sizeof(data->tags[0][0]);
7858
7859 if (data->tags)
7860 io_free_page_table((void **)data->tags, size);
44b31f2f
PB
7861 kfree(data);
7862}
7863
d878c816
PB
7864static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
7865 u64 __user *utags, unsigned nr,
7866 struct io_rsrc_data **pdata)
1ad555c6 7867{
b895c9a6 7868 struct io_rsrc_data *data;
2d091d62 7869 int ret = -ENOMEM;
d878c816 7870 unsigned i;
1ad555c6
BM
7871
7872 data = kzalloc(sizeof(*data), GFP_KERNEL);
7873 if (!data)
d878c816 7874 return -ENOMEM;
2d091d62 7875 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
b60c8dce 7876 if (!data->tags) {
1ad555c6 7877 kfree(data);
d878c816
PB
7878 return -ENOMEM;
7879 }
2d091d62
PB
7880
7881 data->nr = nr;
7882 data->ctx = ctx;
7883 data->do_put = do_put;
d878c816 7884 if (utags) {
2d091d62 7885 ret = -EFAULT;
d878c816 7886 for (i = 0; i < nr; i++) {
fdd1dc31
CIK
7887 u64 *tag_slot = io_get_tag_slot(data, i);
7888
7889 if (copy_from_user(tag_slot, &utags[i],
7890 sizeof(*tag_slot)))
2d091d62 7891 goto fail;
d878c816 7892 }
1ad555c6 7893 }
b60c8dce 7894
3e942498 7895 atomic_set(&data->refs, 1);
1ad555c6 7896 init_completion(&data->done);
d878c816
PB
7897 *pdata = data;
7898 return 0;
2d091d62
PB
7899fail:
7900 io_rsrc_data_free(data);
7901 return ret;
1ad555c6
BM
7902}
7903
9123c8ff
PB
7904static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
7905{
0bea96f5
PB
7906 table->files = kvcalloc(nr_files, sizeof(table->files[0]),
7907 GFP_KERNEL_ACCOUNT);
9123c8ff
PB
7908 return !!table->files;
7909}
7910
042b0d85 7911static void io_free_file_tables(struct io_file_table *table)
9123c8ff 7912{
042b0d85 7913 kvfree(table->files);
9123c8ff
PB
7914 table->files = NULL;
7915}
7916
fff4db76 7917static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
1ad555c6 7918{
fff4db76
PB
7919#if defined(CONFIG_UNIX)
7920 if (ctx->ring_sock) {
7921 struct sock *sock = ctx->ring_sock->sk;
7922 struct sk_buff *skb;
7923
7924 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7925 kfree_skb(skb);
7926 }
7927#else
7928 int i;
7929
7930 for (i = 0; i < ctx->nr_user_files; i++) {
7931 struct file *file;
7932
7933 file = io_file_from_index(ctx, i);
7934 if (file)
7935 fput(file);
7936 }
7937#endif
042b0d85 7938 io_free_file_tables(&ctx->file_table);
44b31f2f 7939 io_rsrc_data_free(ctx->file_data);
fff4db76
PB
7940 ctx->file_data = NULL;
7941 ctx->nr_user_files = 0;
1ad555c6
BM
7942}
7943
d7954b2b
BM
7944static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7945{
75d51314 7946 unsigned nr = ctx->nr_user_files;
d7954b2b
BM
7947 int ret;
7948
08480400 7949 if (!ctx->file_data)
d7954b2b 7950 return -ENXIO;
75d51314
PB
7951
7952 /*
7953 * Quiesce may unlock ->uring_lock, and while it's not held
7954 * prevent new requests using the table.
7955 */
7956 ctx->nr_user_files = 0;
08480400 7957 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
75d51314 7958 ctx->nr_user_files = nr;
08480400
PB
7959 if (!ret)
7960 __io_sqe_files_unregister(ctx);
7961 return ret;
6b06314c
JA
7962}
7963
37d1e2e3 7964static void io_sq_thread_unpark(struct io_sq_data *sqd)
09a6f4ef 7965 __releases(&sqd->lock)
37d1e2e3 7966{
521d6a73
PB
7967 WARN_ON_ONCE(sqd->thread == current);
7968
9e138a48
PB
7969 /*
7970 * Do the dance but not conditional clear_bit() because it'd race with
7971 * other threads incrementing park_pending and setting the bit.
7972 */
37d1e2e3 7973 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
9e138a48
PB
7974 if (atomic_dec_return(&sqd->park_pending))
7975 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
09a6f4ef 7976 mutex_unlock(&sqd->lock);
37d1e2e3
JA
7977}
7978
86e0d676 7979static void io_sq_thread_park(struct io_sq_data *sqd)
09a6f4ef 7980 __acquires(&sqd->lock)
37d1e2e3 7981{
521d6a73
PB
7982 WARN_ON_ONCE(sqd->thread == current);
7983
9e138a48 7984 atomic_inc(&sqd->park_pending);
86e0d676 7985 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
09a6f4ef 7986 mutex_lock(&sqd->lock);
05962f95 7987 if (sqd->thread)
86e0d676 7988 wake_up_process(sqd->thread);
37d1e2e3
JA
7989}
7990
7991static void io_sq_thread_stop(struct io_sq_data *sqd)
7992{
521d6a73 7993 WARN_ON_ONCE(sqd->thread == current);
88885f66 7994 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
521d6a73 7995
05962f95 7996 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
88885f66 7997 mutex_lock(&sqd->lock);
e8f98f24
JA
7998 if (sqd->thread)
7999 wake_up_process(sqd->thread);
09a6f4ef 8000 mutex_unlock(&sqd->lock);
05962f95 8001 wait_for_completion(&sqd->exited);
37d1e2e3
JA
8002}
8003
534ca6d6 8004static void io_put_sq_data(struct io_sq_data *sqd)
6c271ce2 8005{
534ca6d6 8006 if (refcount_dec_and_test(&sqd->refs)) {
9e138a48
PB
8007 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
8008
37d1e2e3
JA
8009 io_sq_thread_stop(sqd);
8010 kfree(sqd);
8011 }
8012}
8013
8014static void io_sq_thread_finish(struct io_ring_ctx *ctx)
8015{
8016 struct io_sq_data *sqd = ctx->sq_data;
8017
8018 if (sqd) {
05962f95 8019 io_sq_thread_park(sqd);
521d6a73 8020 list_del_init(&ctx->sqd_list);
37d1e2e3 8021 io_sqd_update_thread_idle(sqd);
05962f95 8022 io_sq_thread_unpark(sqd);
37d1e2e3
JA
8023
8024 io_put_sq_data(sqd);
8025 ctx->sq_data = NULL;
534ca6d6
JA
8026 }
8027}
8028
aa06165d
JA
8029static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
8030{
8031 struct io_ring_ctx *ctx_attach;
8032 struct io_sq_data *sqd;
8033 struct fd f;
8034
8035 f = fdget(p->wq_fd);
8036 if (!f.file)
8037 return ERR_PTR(-ENXIO);
8038 if (f.file->f_op != &io_uring_fops) {
8039 fdput(f);
8040 return ERR_PTR(-EINVAL);
8041 }
8042
8043 ctx_attach = f.file->private_data;
8044 sqd = ctx_attach->sq_data;
8045 if (!sqd) {
8046 fdput(f);
8047 return ERR_PTR(-EINVAL);
8048 }
5c2469e0
JA
8049 if (sqd->task_tgid != current->tgid) {
8050 fdput(f);
8051 return ERR_PTR(-EPERM);
8052 }
aa06165d
JA
8053
8054 refcount_inc(&sqd->refs);
8055 fdput(f);
8056 return sqd;
8057}
8058
26984fbf
PB
8059static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
8060 bool *attached)
534ca6d6
JA
8061{
8062 struct io_sq_data *sqd;
8063
26984fbf 8064 *attached = false;
5c2469e0
JA
8065 if (p->flags & IORING_SETUP_ATTACH_WQ) {
8066 sqd = io_attach_sq_data(p);
26984fbf
PB
8067 if (!IS_ERR(sqd)) {
8068 *attached = true;
5c2469e0 8069 return sqd;
26984fbf 8070 }
5c2469e0
JA
8071 /* fall through for EPERM case, setup new sqd/task */
8072 if (PTR_ERR(sqd) != -EPERM)
8073 return sqd;
8074 }
aa06165d 8075
534ca6d6
JA
8076 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
8077 if (!sqd)
8078 return ERR_PTR(-ENOMEM);
8079
9e138a48 8080 atomic_set(&sqd->park_pending, 0);
534ca6d6 8081 refcount_set(&sqd->refs, 1);
69fb2131 8082 INIT_LIST_HEAD(&sqd->ctx_list);
09a6f4ef 8083 mutex_init(&sqd->lock);
534ca6d6 8084 init_waitqueue_head(&sqd->wait);
37d1e2e3 8085 init_completion(&sqd->exited);
534ca6d6
JA
8086 return sqd;
8087}
8088
6b06314c 8089#if defined(CONFIG_UNIX)
6b06314c
JA
8090/*
8091 * Ensure the UNIX gc is aware of our file set, so we are certain that
8092 * the io_uring can be safely unregistered on process exit, even if we have
8093 * loops in the file referencing.
8094 */
8095static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
8096{
8097 struct sock *sk = ctx->ring_sock->sk;
8098 struct scm_fp_list *fpl;
8099 struct sk_buff *skb;
08a45173 8100 int i, nr_files;
6b06314c 8101
6b06314c
JA
8102 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
8103 if (!fpl)
8104 return -ENOMEM;
8105
8106 skb = alloc_skb(0, GFP_KERNEL);
8107 if (!skb) {
8108 kfree(fpl);
8109 return -ENOMEM;
8110 }
8111
8112 skb->sk = sk;
6b06314c 8113
08a45173 8114 nr_files = 0;
62e398be 8115 fpl->user = get_uid(current_user());
6b06314c 8116 for (i = 0; i < nr; i++) {
65e19f54
JA
8117 struct file *file = io_file_from_index(ctx, i + offset);
8118
8119 if (!file)
08a45173 8120 continue;
65e19f54 8121 fpl->fp[nr_files] = get_file(file);
08a45173
JA
8122 unix_inflight(fpl->user, fpl->fp[nr_files]);
8123 nr_files++;
6b06314c
JA
8124 }
8125
08a45173
JA
8126 if (nr_files) {
8127 fpl->max = SCM_MAX_FD;
8128 fpl->count = nr_files;
8129 UNIXCB(skb).fp = fpl;
05f3fb3c 8130 skb->destructor = unix_destruct_scm;
08a45173
JA
8131 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
8132 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 8133
40d8cb26
PB
8134 for (i = 0; i < nr; i++) {
8135 struct file *file = io_file_from_index(ctx, i + offset);
8136
8137 if (file)
8138 fput(file);
8139 }
08a45173
JA
8140 } else {
8141 kfree_skb(skb);
54c4d35a 8142 free_uid(fpl->user);
08a45173
JA
8143 kfree(fpl);
8144 }
6b06314c
JA
8145
8146 return 0;
8147}
8148
8149/*
8150 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
8151 * causes regular reference counting to break down. We rely on the UNIX
8152 * garbage collection to take care of this problem for us.
8153 */
8154static int io_sqe_files_scm(struct io_ring_ctx *ctx)
8155{
8156 unsigned left, total;
8157 int ret = 0;
8158
8159 total = 0;
8160 left = ctx->nr_user_files;
8161 while (left) {
8162 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
8163
8164 ret = __io_sqe_files_scm(ctx, this_files, total);
8165 if (ret)
8166 break;
8167 left -= this_files;
8168 total += this_files;
8169 }
8170
8171 if (!ret)
8172 return 0;
8173
8174 while (total < ctx->nr_user_files) {
65e19f54
JA
8175 struct file *file = io_file_from_index(ctx, total);
8176
8177 if (file)
8178 fput(file);
6b06314c
JA
8179 total++;
8180 }
8181
8182 return ret;
8183}
8184#else
8185static int io_sqe_files_scm(struct io_ring_ctx *ctx)
8186{
8187 return 0;
8188}
8189#endif
8190
47e90392 8191static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
05f3fb3c 8192{
50238531 8193 struct file *file = prsrc->file;
05f3fb3c
JA
8194#if defined(CONFIG_UNIX)
8195 struct sock *sock = ctx->ring_sock->sk;
8196 struct sk_buff_head list, *head = &sock->sk_receive_queue;
8197 struct sk_buff *skb;
8198 int i;
8199
8200 __skb_queue_head_init(&list);
8201
8202 /*
8203 * Find the skb that holds this file in its SCM_RIGHTS. When found,
8204 * remove this entry and rearrange the file array.
8205 */
8206 skb = skb_dequeue(head);
8207 while (skb) {
8208 struct scm_fp_list *fp;
8209
8210 fp = UNIXCB(skb).fp;
8211 for (i = 0; i < fp->count; i++) {
8212 int left;
8213
8214 if (fp->fp[i] != file)
8215 continue;
8216
8217 unix_notinflight(fp->user, fp->fp[i]);
8218 left = fp->count - 1 - i;
8219 if (left) {
8220 memmove(&fp->fp[i], &fp->fp[i + 1],
8221 left * sizeof(struct file *));
8222 }
8223 fp->count--;
8224 if (!fp->count) {
8225 kfree_skb(skb);
8226 skb = NULL;
8227 } else {
8228 __skb_queue_tail(&list, skb);
8229 }
8230 fput(file);
8231 file = NULL;
8232 break;
8233 }
8234
8235 if (!file)
8236 break;
8237
8238 __skb_queue_tail(&list, skb);
8239
8240 skb = skb_dequeue(head);
8241 }
8242
8243 if (skb_peek(&list)) {
8244 spin_lock_irq(&head->lock);
8245 while ((skb = __skb_dequeue(&list)) != NULL)
8246 __skb_queue_tail(head, skb);
8247 spin_unlock_irq(&head->lock);
8248 }
8249#else
8250 fput(file);
8251#endif
8252}
8253
b895c9a6 8254static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
65e19f54 8255{
b895c9a6 8256 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
269bbe5f
BM
8257 struct io_ring_ctx *ctx = rsrc_data->ctx;
8258 struct io_rsrc_put *prsrc, *tmp;
05589553 8259
269bbe5f
BM
8260 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
8261 list_del(&prsrc->list);
b60c8dce
PB
8262
8263 if (prsrc->tag) {
8264 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
b60c8dce
PB
8265
8266 io_ring_submit_lock(ctx, lock_ring);
79ebeaee 8267 spin_lock(&ctx->completion_lock);
b60c8dce 8268 io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
2840f710 8269 ctx->cq_extra++;
b60c8dce 8270 io_commit_cqring(ctx);
79ebeaee 8271 spin_unlock(&ctx->completion_lock);
b60c8dce
PB
8272 io_cqring_ev_posted(ctx);
8273 io_ring_submit_unlock(ctx, lock_ring);
8274 }
8275
40ae0ff7 8276 rsrc_data->do_put(ctx, prsrc);
269bbe5f 8277 kfree(prsrc);
65e19f54 8278 }
05589553 8279
28a9fe25 8280 io_rsrc_node_destroy(ref_node);
3e942498
PB
8281 if (atomic_dec_and_test(&rsrc_data->refs))
8282 complete(&rsrc_data->done);
2faf852d 8283}
65e19f54 8284
269bbe5f 8285static void io_rsrc_put_work(struct work_struct *work)
4a38aed2
JA
8286{
8287 struct io_ring_ctx *ctx;
8288 struct llist_node *node;
8289
269bbe5f
BM
8290 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
8291 node = llist_del_all(&ctx->rsrc_put_llist);
4a38aed2
JA
8292
8293 while (node) {
b895c9a6 8294 struct io_rsrc_node *ref_node;
4a38aed2
JA
8295 struct llist_node *next = node->next;
8296
b895c9a6 8297 ref_node = llist_entry(node, struct io_rsrc_node, llist);
269bbe5f 8298 __io_rsrc_put_work(ref_node);
4a38aed2
JA
8299 node = next;
8300 }
8301}
8302
6b06314c 8303static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
792e3582 8304 unsigned nr_args, u64 __user *tags)
6b06314c
JA
8305{
8306 __s32 __user *fds = (__s32 __user *) arg;
05f3fb3c 8307 struct file *file;
f3baed39 8308 int fd, ret;
846a4ef2 8309 unsigned i;
6b06314c 8310
05f3fb3c 8311 if (ctx->file_data)
6b06314c
JA
8312 return -EBUSY;
8313 if (!nr_args)
8314 return -EINVAL;
8315 if (nr_args > IORING_MAX_FIXED_FILES)
8316 return -EMFILE;
3a1b8a4e
PB
8317 if (nr_args > rlimit(RLIMIT_NOFILE))
8318 return -EMFILE;
a7f0ed5a 8319 ret = io_rsrc_node_switch_start(ctx);
f3baed39
PB
8320 if (ret)
8321 return ret;
d878c816
PB
8322 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
8323 &ctx->file_data);
8324 if (ret)
8325 return ret;
6b06314c 8326
f3baed39 8327 ret = -ENOMEM;
aeca241b 8328 if (!io_alloc_file_tables(&ctx->file_table, nr_args))
1ad555c6 8329 goto out_free;
65e19f54 8330
08a45173 8331 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
d878c816 8332 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
600cf3f8
PB
8333 ret = -EFAULT;
8334 goto out_fput;
8335 }
08a45173 8336 /* allow sparse sets */
792e3582
PB
8337 if (fd == -1) {
8338 ret = -EINVAL;
2d091d62 8339 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
792e3582 8340 goto out_fput;
08a45173 8341 continue;
792e3582 8342 }
6b06314c 8343
05f3fb3c 8344 file = fget(fd);
6b06314c 8345 ret = -EBADF;
792e3582 8346 if (unlikely(!file))
600cf3f8 8347 goto out_fput;
05f3fb3c 8348
6b06314c
JA
8349 /*
8350 * Don't allow io_uring instances to be registered. If UNIX
8351 * isn't enabled, then this causes a reference cycle and this
8352 * instance can never get freed. If UNIX is enabled we'll
8353 * handle it just fine, but there's still no point in allowing
8354 * a ring fd as it doesn't support regular read/write anyway.
8355 */
05f3fb3c
JA
8356 if (file->f_op == &io_uring_fops) {
8357 fput(file);
600cf3f8 8358 goto out_fput;
6b06314c 8359 }
aeca241b 8360 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
6b06314c
JA
8361 }
8362
6b06314c 8363 ret = io_sqe_files_scm(ctx);
05589553 8364 if (ret) {
08480400 8365 __io_sqe_files_unregister(ctx);
05589553
XW
8366 return ret;
8367 }
6b06314c 8368
a7f0ed5a 8369 io_rsrc_node_switch(ctx, NULL);
6b06314c 8370 return ret;
600cf3f8
PB
8371out_fput:
8372 for (i = 0; i < ctx->nr_user_files; i++) {
8373 file = io_file_from_index(ctx, i);
8374 if (file)
8375 fput(file);
8376 }
042b0d85 8377 io_free_file_tables(&ctx->file_table);
600cf3f8 8378 ctx->nr_user_files = 0;
600cf3f8 8379out_free:
44b31f2f 8380 io_rsrc_data_free(ctx->file_data);
55cbc256 8381 ctx->file_data = NULL;
6b06314c
JA
8382 return ret;
8383}
8384
c3a31e60
JA
8385static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
8386 int index)
8387{
8388#if defined(CONFIG_UNIX)
8389 struct sock *sock = ctx->ring_sock->sk;
8390 struct sk_buff_head *head = &sock->sk_receive_queue;
8391 struct sk_buff *skb;
8392
8393 /*
8394 * See if we can merge this file into an existing skb SCM_RIGHTS
8395 * file set. If there's no room, fall back to allocating a new skb
8396 * and filling it in.
8397 */
8398 spin_lock_irq(&head->lock);
8399 skb = skb_peek(head);
8400 if (skb) {
8401 struct scm_fp_list *fpl = UNIXCB(skb).fp;
8402
8403 if (fpl->count < SCM_MAX_FD) {
8404 __skb_unlink(skb, head);
8405 spin_unlock_irq(&head->lock);
8406 fpl->fp[fpl->count] = get_file(file);
8407 unix_inflight(fpl->user, fpl->fp[fpl->count]);
8408 fpl->count++;
8409 spin_lock_irq(&head->lock);
8410 __skb_queue_head(head, skb);
8411 } else {
8412 skb = NULL;
8413 }
8414 }
8415 spin_unlock_irq(&head->lock);
8416
8417 if (skb) {
8418 fput(file);
8419 return 0;
8420 }
8421
8422 return __io_sqe_files_scm(ctx, 1, index);
8423#else
8424 return 0;
8425#endif
8426}
8427
9c7b0ba8
PB
8428static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
8429 struct io_rsrc_node *node, void *rsrc)
8430{
c47dfb44 8431 u64 *tag_slot = io_get_tag_slot(data, idx);
9c7b0ba8
PB
8432 struct io_rsrc_put *prsrc;
8433
8434 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
8435 if (!prsrc)
8436 return -ENOMEM;
8437
c47dfb44
PB
8438 prsrc->tag = *tag_slot;
8439 *tag_slot = 0;
9c7b0ba8
PB
8440 prsrc->rsrc = rsrc;
8441 list_add(&prsrc->list, &node->rsrc_list);
8442 return 0;
8443}
8444
b9445598
PB
8445static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
8446 unsigned int issue_flags, u32 slot_index)
8447{
8448 struct io_ring_ctx *ctx = req->ctx;
8449 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
9c7b0ba8 8450 bool needs_switch = false;
b9445598
PB
8451 struct io_fixed_file *file_slot;
8452 int ret = -EBADF;
8453
8454 io_ring_submit_lock(ctx, !force_nonblock);
8455 if (file->f_op == &io_uring_fops)
8456 goto err;
8457 ret = -ENXIO;
8458 if (!ctx->file_data)
8459 goto err;
8460 ret = -EINVAL;
8461 if (slot_index >= ctx->nr_user_files)
8462 goto err;
8463
8464 slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
8465 file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
9c7b0ba8
PB
8466
8467 if (file_slot->file_ptr) {
8468 struct file *old_file;
8469
8470 ret = io_rsrc_node_switch_start(ctx);
8471 if (ret)
8472 goto err;
8473
8474 old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
8475 ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
8476 ctx->rsrc_node, old_file);
8477 if (ret)
8478 goto err;
8479 file_slot->file_ptr = 0;
8480 needs_switch = true;
8481 }
b9445598
PB
8482
8483 *io_get_tag_slot(ctx->file_data, slot_index) = 0;
8484 io_fixed_file_set(file_slot, file);
8485 ret = io_sqe_file_register(ctx, file, slot_index);
8486 if (ret) {
8487 file_slot->file_ptr = 0;
8488 goto err;
8489 }
8490
8491 ret = 0;
8492err:
9c7b0ba8
PB
8493 if (needs_switch)
8494 io_rsrc_node_switch(ctx, ctx->file_data);
b9445598
PB
8495 io_ring_submit_unlock(ctx, !force_nonblock);
8496 if (ret)
8497 fput(file);
8498 return ret;
8499}
8500
7df778be
PB
8501static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
8502{
8503 unsigned int offset = req->close.file_slot - 1;
8504 struct io_ring_ctx *ctx = req->ctx;
8505 struct io_fixed_file *file_slot;
8506 struct file *file;
22a99dcb 8507 int ret;
7df778be
PB
8508
8509 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
8510 ret = -ENXIO;
8511 if (unlikely(!ctx->file_data))
8512 goto out;
8513 ret = -EINVAL;
8514 if (offset >= ctx->nr_user_files)
8515 goto out;
8516 ret = io_rsrc_node_switch_start(ctx);
8517 if (ret)
8518 goto out;
8519
22a99dcb
PB
8520 offset = array_index_nospec(offset, ctx->nr_user_files);
8521 file_slot = io_fixed_file_slot(&ctx->file_table, offset);
7df778be
PB
8522 ret = -EBADF;
8523 if (!file_slot->file_ptr)
8524 goto out;
8525
8526 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
8527 ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
8528 if (ret)
8529 goto out;
8530
8531 file_slot->file_ptr = 0;
8532 io_rsrc_node_switch(ctx, ctx->file_data);
8533 ret = 0;
8534out:
8535 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
8536 return ret;
8537}
8538
05f3fb3c 8539static int __io_sqe_files_update(struct io_ring_ctx *ctx,
c3bdad02 8540 struct io_uring_rsrc_update2 *up,
05f3fb3c
JA
8541 unsigned nr_args)
8542{
c3bdad02 8543 u64 __user *tags = u64_to_user_ptr(up->tags);
98f0b3b4 8544 __s32 __user *fds = u64_to_user_ptr(up->data);
b895c9a6 8545 struct io_rsrc_data *data = ctx->file_data;
a04b0ac0
PB
8546 struct io_fixed_file *file_slot;
8547 struct file *file;
98f0b3b4
PB
8548 int fd, i, err = 0;
8549 unsigned int done;
05589553 8550 bool needs_switch = false;
c3a31e60 8551
98f0b3b4
PB
8552 if (!ctx->file_data)
8553 return -ENXIO;
8554 if (up->offset + nr_args > ctx->nr_user_files)
c3a31e60
JA
8555 return -EINVAL;
8556
67973b93 8557 for (done = 0; done < nr_args; done++) {
c3bdad02
PB
8558 u64 tag = 0;
8559
8560 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
8561 copy_from_user(&fd, &fds[done], sizeof(fd))) {
c3a31e60
JA
8562 err = -EFAULT;
8563 break;
8564 }
c3bdad02
PB
8565 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
8566 err = -EINVAL;
8567 break;
8568 }
4e0377a1 8569 if (fd == IORING_REGISTER_FILES_SKIP)
8570 continue;
8571
67973b93 8572 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
aeca241b 8573 file_slot = io_fixed_file_slot(&ctx->file_table, i);
ea64ec02 8574
a04b0ac0
PB
8575 if (file_slot->file_ptr) {
8576 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
22a99dcb 8577 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
a5318d3c
HD
8578 if (err)
8579 break;
a04b0ac0 8580 file_slot->file_ptr = 0;
05589553 8581 needs_switch = true;
c3a31e60
JA
8582 }
8583 if (fd != -1) {
c3a31e60
JA
8584 file = fget(fd);
8585 if (!file) {
8586 err = -EBADF;
8587 break;
8588 }
8589 /*
8590 * Don't allow io_uring instances to be registered. If
8591 * UNIX isn't enabled, then this causes a reference
8592 * cycle and this instance can never get freed. If UNIX
8593 * is enabled we'll handle it just fine, but there's
8594 * still no point in allowing a ring fd as it doesn't
8595 * support regular read/write anyway.
8596 */
8597 if (file->f_op == &io_uring_fops) {
8598 fput(file);
8599 err = -EBADF;
8600 break;
8601 }
1dc25797 8602 *io_get_tag_slot(data, i) = tag;
9a321c98 8603 io_fixed_file_set(file_slot, file);
c3a31e60 8604 err = io_sqe_file_register(ctx, file, i);
f3bd9dae 8605 if (err) {
a04b0ac0 8606 file_slot->file_ptr = 0;
f3bd9dae 8607 fput(file);
c3a31e60 8608 break;
f3bd9dae 8609 }
c3a31e60 8610 }
05f3fb3c
JA
8611 }
8612
a7f0ed5a
PB
8613 if (needs_switch)
8614 io_rsrc_node_switch(ctx, data);
c3a31e60
JA
8615 return done ? done : err;
8616}
05589553 8617
685fe7fe
JA
8618static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
8619 struct task_struct *task)
24369c2e 8620{
e941894e 8621 struct io_wq_hash *hash;
24369c2e 8622 struct io_wq_data data;
24369c2e 8623 unsigned int concurrency;
24369c2e 8624
362a9e65 8625 mutex_lock(&ctx->uring_lock);
e941894e
JA
8626 hash = ctx->hash_map;
8627 if (!hash) {
8628 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
362a9e65
YY
8629 if (!hash) {
8630 mutex_unlock(&ctx->uring_lock);
e941894e 8631 return ERR_PTR(-ENOMEM);
362a9e65 8632 }
e941894e
JA
8633 refcount_set(&hash->refs, 1);
8634 init_waitqueue_head(&hash->wait);
8635 ctx->hash_map = hash;
24369c2e 8636 }
362a9e65 8637 mutex_unlock(&ctx->uring_lock);
24369c2e 8638
e941894e 8639 data.hash = hash;
685fe7fe 8640 data.task = task;
ebc11b6c 8641 data.free_work = io_wq_free_work;
f5fa38c5 8642 data.do_work = io_wq_submit_work;
24369c2e 8643
d25e3a3d
JA
8644 /* Do QD, or 4 * CPUS, whatever is smallest */
8645 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
24369c2e 8646
5aa75ed5 8647 return io_wq_create(concurrency, &data);
24369c2e
PB
8648}
8649
5aa75ed5
JA
8650static int io_uring_alloc_task_context(struct task_struct *task,
8651 struct io_ring_ctx *ctx)
0f212204
JA
8652{
8653 struct io_uring_task *tctx;
d8a6df10 8654 int ret;
0f212204 8655
09899b19 8656 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
0f212204
JA
8657 if (unlikely(!tctx))
8658 return -ENOMEM;
8659
d8a6df10
JA
8660 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
8661 if (unlikely(ret)) {
8662 kfree(tctx);
8663 return ret;
8664 }
8665
685fe7fe 8666 tctx->io_wq = io_init_wq_offload(ctx, task);
5aa75ed5
JA
8667 if (IS_ERR(tctx->io_wq)) {
8668 ret = PTR_ERR(tctx->io_wq);
8669 percpu_counter_destroy(&tctx->inflight);
8670 kfree(tctx);
8671 return ret;
8672 }
8673
0f212204
JA
8674 xa_init(&tctx->xa);
8675 init_waitqueue_head(&tctx->wait);
fdaf083c 8676 atomic_set(&tctx->in_idle, 0);
b303fe2e 8677 atomic_set(&tctx->inflight_tracked, 0);
0f212204 8678 task->io_uring = tctx;
7cbf1722
JA
8679 spin_lock_init(&tctx->task_lock);
8680 INIT_WQ_LIST(&tctx->task_list);
7cbf1722 8681 init_task_work(&tctx->task_work, tctx_task_work);
0f212204
JA
8682 return 0;
8683}
8684
8685void __io_uring_free(struct task_struct *tsk)
8686{
8687 struct io_uring_task *tctx = tsk->io_uring;
8688
8689 WARN_ON_ONCE(!xa_empty(&tctx->xa));
ef8eaa4e 8690 WARN_ON_ONCE(tctx->io_wq);
09899b19 8691 WARN_ON_ONCE(tctx->cached_refs);
ef8eaa4e 8692
d8a6df10 8693 percpu_counter_destroy(&tctx->inflight);
0f212204
JA
8694 kfree(tctx);
8695 tsk->io_uring = NULL;
8696}
8697
7e84e1c7
SG
8698static int io_sq_offload_create(struct io_ring_ctx *ctx,
8699 struct io_uring_params *p)
2b188cc1
JA
8700{
8701 int ret;
8702
d25e3a3d
JA
8703 /* Retain compatibility with failing for an invalid attach attempt */
8704 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
8705 IORING_SETUP_ATTACH_WQ) {
8706 struct fd f;
8707
8708 f = fdget(p->wq_fd);
8709 if (!f.file)
8710 return -ENXIO;
0cc936f7
JA
8711 if (f.file->f_op != &io_uring_fops) {
8712 fdput(f);
f2a48dd0 8713 return -EINVAL;
0cc936f7
JA
8714 }
8715 fdput(f);
d25e3a3d 8716 }
6c271ce2 8717 if (ctx->flags & IORING_SETUP_SQPOLL) {
46fe18b1 8718 struct task_struct *tsk;
534ca6d6 8719 struct io_sq_data *sqd;
26984fbf 8720 bool attached;
534ca6d6 8721
26984fbf 8722 sqd = io_get_sq_data(p, &attached);
534ca6d6
JA
8723 if (IS_ERR(sqd)) {
8724 ret = PTR_ERR(sqd);
8725 goto err;
8726 }
69fb2131 8727
7c30f36a 8728 ctx->sq_creds = get_current_cred();
534ca6d6 8729 ctx->sq_data = sqd;
917257da
JA
8730 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8731 if (!ctx->sq_thread_idle)
8732 ctx->sq_thread_idle = HZ;
8733
78d7f6ba 8734 io_sq_thread_park(sqd);
de75a3d3
PB
8735 list_add(&ctx->sqd_list, &sqd->ctx_list);
8736 io_sqd_update_thread_idle(sqd);
26984fbf 8737 /* don't attach to a dying SQPOLL thread, would be racy */
f2a48dd0 8738 ret = (attached && !sqd->thread) ? -ENXIO : 0;
78d7f6ba
PB
8739 io_sq_thread_unpark(sqd);
8740
de75a3d3
PB
8741 if (ret < 0)
8742 goto err;
8743 if (attached)
5aa75ed5 8744 return 0;
aa06165d 8745
6c271ce2 8746 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 8747 int cpu = p->sq_thread_cpu;
6c271ce2 8748
917257da 8749 ret = -EINVAL;
f2a48dd0 8750 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
e8f98f24 8751 goto err_sqpoll;
37d1e2e3 8752 sqd->sq_cpu = cpu;
6c271ce2 8753 } else {
37d1e2e3 8754 sqd->sq_cpu = -1;
6c271ce2 8755 }
37d1e2e3
JA
8756
8757 sqd->task_pid = current->pid;
5c2469e0 8758 sqd->task_tgid = current->tgid;
46fe18b1
JA
8759 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
8760 if (IS_ERR(tsk)) {
8761 ret = PTR_ERR(tsk);
e8f98f24 8762 goto err_sqpoll;
6c271ce2 8763 }
97a73a0f 8764
46fe18b1 8765 sqd->thread = tsk;
97a73a0f 8766 ret = io_uring_alloc_task_context(tsk, ctx);
46fe18b1 8767 wake_up_new_task(tsk);
0f212204
JA
8768 if (ret)
8769 goto err;
6c271ce2
JA
8770 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8771 /* Can't have SQ_AFF without SQPOLL */
8772 ret = -EINVAL;
8773 goto err;
8774 }
8775
2b188cc1 8776 return 0;
f2a48dd0
PB
8777err_sqpoll:
8778 complete(&ctx->sq_data->exited);
2b188cc1 8779err:
37d1e2e3 8780 io_sq_thread_finish(ctx);
2b188cc1
JA
8781 return ret;
8782}
8783
a087e2b5
BM
8784static inline void __io_unaccount_mem(struct user_struct *user,
8785 unsigned long nr_pages)
2b188cc1
JA
8786{
8787 atomic_long_sub(nr_pages, &user->locked_vm);
8788}
8789
a087e2b5
BM
8790static inline int __io_account_mem(struct user_struct *user,
8791 unsigned long nr_pages)
2b188cc1
JA
8792{
8793 unsigned long page_limit, cur_pages, new_pages;
8794
8795 /* Don't allow more pages than we can safely lock */
8796 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8797
8798 do {
8799 cur_pages = atomic_long_read(&user->locked_vm);
8800 new_pages = cur_pages + nr_pages;
8801 if (new_pages > page_limit)
8802 return -ENOMEM;
8803 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8804 new_pages) != cur_pages);
8805
8806 return 0;
8807}
8808
26bfa89e 8809static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 8810{
62e398be 8811 if (ctx->user)
a087e2b5 8812 __io_unaccount_mem(ctx->user, nr_pages);
30975825 8813
26bfa89e
JA
8814 if (ctx->mm_account)
8815 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
8816}
8817
26bfa89e 8818static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 8819{
30975825
BM
8820 int ret;
8821
62e398be 8822 if (ctx->user) {
30975825
BM
8823 ret = __io_account_mem(ctx->user, nr_pages);
8824 if (ret)
8825 return ret;
8826 }
8827
26bfa89e
JA
8828 if (ctx->mm_account)
8829 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
8830
8831 return 0;
8832}
8833
2b188cc1
JA
8834static void io_mem_free(void *ptr)
8835{
52e04ef4
MR
8836 struct page *page;
8837
8838 if (!ptr)
8839 return;
2b188cc1 8840
52e04ef4 8841 page = virt_to_head_page(ptr);
2b188cc1
JA
8842 if (put_page_testzero(page))
8843 free_compound_page(page);
8844}
8845
8846static void *io_mem_alloc(size_t size)
8847{
44736143 8848 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
2b188cc1 8849
44736143 8850 return (void *) __get_free_pages(gfp, get_order(size));
2b188cc1
JA
8851}
8852
75b28aff
HV
8853static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8854 size_t *sq_offset)
8855{
8856 struct io_rings *rings;
8857 size_t off, sq_array_size;
8858
8859 off = struct_size(rings, cqes, cq_entries);
8860 if (off == SIZE_MAX)
8861 return SIZE_MAX;
8862
8863#ifdef CONFIG_SMP
8864 off = ALIGN(off, SMP_CACHE_BYTES);
8865 if (off == 0)
8866 return SIZE_MAX;
8867#endif
8868
b36200f5
DV
8869 if (sq_offset)
8870 *sq_offset = off;
8871
75b28aff
HV
8872 sq_array_size = array_size(sizeof(u32), sq_entries);
8873 if (sq_array_size == SIZE_MAX)
8874 return SIZE_MAX;
8875
8876 if (check_add_overflow(off, sq_array_size, &off))
8877 return SIZE_MAX;
8878
75b28aff
HV
8879 return off;
8880}
8881
41edf1a5 8882static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
7f61a1e9 8883{
41edf1a5 8884 struct io_mapped_ubuf *imu = *slot;
7f61a1e9
PB
8885 unsigned int i;
8886
6224843d
PB
8887 if (imu != ctx->dummy_ubuf) {
8888 for (i = 0; i < imu->nr_bvecs; i++)
8889 unpin_user_page(imu->bvec[i].bv_page);
8890 if (imu->acct_pages)
8891 io_unaccount_mem(ctx, imu->acct_pages);
8892 kvfree(imu);
8893 }
41edf1a5 8894 *slot = NULL;
7f61a1e9
PB
8895}
8896
bd54b6fe 8897static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
edafccee 8898{
634d00df
PB
8899 io_buffer_unmap(ctx, &prsrc->buf);
8900 prsrc->buf = NULL;
bd54b6fe 8901}
edafccee 8902
bd54b6fe
BM
8903static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8904{
8905 unsigned int i;
edafccee 8906
7f61a1e9
PB
8907 for (i = 0; i < ctx->nr_user_bufs; i++)
8908 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
edafccee 8909 kfree(ctx->user_bufs);
bb6659cc 8910 io_rsrc_data_free(ctx->buf_data);
edafccee 8911 ctx->user_bufs = NULL;
bd54b6fe 8912 ctx->buf_data = NULL;
edafccee 8913 ctx->nr_user_bufs = 0;
bd54b6fe
BM
8914}
8915
0a96bbe4 8916static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
edafccee 8917{
44aa2b86 8918 unsigned nr = ctx->nr_user_bufs;
bd54b6fe 8919 int ret;
edafccee 8920
bd54b6fe 8921 if (!ctx->buf_data)
edafccee
JA
8922 return -ENXIO;
8923
44aa2b86
PB
8924 /*
8925 * Quiesce may unlock ->uring_lock, and while it's not held
8926 * prevent new requests using the table.
8927 */
8928 ctx->nr_user_bufs = 0;
bd54b6fe 8929 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
44aa2b86 8930 ctx->nr_user_bufs = nr;
bd54b6fe
BM
8931 if (!ret)
8932 __io_sqe_buffers_unregister(ctx);
8933 return ret;
edafccee
JA
8934}
8935
8936static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8937 void __user *arg, unsigned index)
8938{
8939 struct iovec __user *src;
8940
8941#ifdef CONFIG_COMPAT
8942 if (ctx->compat) {
8943 struct compat_iovec __user *ciovs;
8944 struct compat_iovec ciov;
8945
8946 ciovs = (struct compat_iovec __user *) arg;
8947 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8948 return -EFAULT;
8949
d55e5f5b 8950 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
8951 dst->iov_len = ciov.iov_len;
8952 return 0;
8953 }
8954#endif
8955 src = (struct iovec __user *) arg;
8956 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8957 return -EFAULT;
8958 return 0;
8959}
8960
de293938
JA
8961/*
8962 * Not super efficient, but this is just a registration time. And we do cache
8963 * the last compound head, so generally we'll only do a full search if we don't
8964 * match that one.
8965 *
8966 * We check if the given compound head page has already been accounted, to
8967 * avoid double accounting it. This allows us to account the full size of the
8968 * page, not just the constituent pages of a huge page.
8969 */
8970static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8971 int nr_pages, struct page *hpage)
8972{
8973 int i, j;
8974
8975 /* check current page array */
8976 for (i = 0; i < nr_pages; i++) {
8977 if (!PageCompound(pages[i]))
8978 continue;
8979 if (compound_head(pages[i]) == hpage)
8980 return true;
8981 }
8982
8983 /* check previously registered pages */
8984 for (i = 0; i < ctx->nr_user_bufs; i++) {
41edf1a5 8985 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
de293938
JA
8986
8987 for (j = 0; j < imu->nr_bvecs; j++) {
8988 if (!PageCompound(imu->bvec[j].bv_page))
8989 continue;
8990 if (compound_head(imu->bvec[j].bv_page) == hpage)
8991 return true;
8992 }
8993 }
8994
8995 return false;
8996}
8997
8998static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8999 int nr_pages, struct io_mapped_ubuf *imu,
9000 struct page **last_hpage)
9001{
9002 int i, ret;
9003
216e5835 9004 imu->acct_pages = 0;
de293938
JA
9005 for (i = 0; i < nr_pages; i++) {
9006 if (!PageCompound(pages[i])) {
9007 imu->acct_pages++;
9008 } else {
9009 struct page *hpage;
9010
9011 hpage = compound_head(pages[i]);
9012 if (hpage == *last_hpage)
9013 continue;
9014 *last_hpage = hpage;
9015 if (headpage_already_acct(ctx, pages, i, hpage))
9016 continue;
9017 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
9018 }
9019 }
9020
9021 if (!imu->acct_pages)
9022 return 0;
9023
26bfa89e 9024 ret = io_account_mem(ctx, imu->acct_pages);
de293938
JA
9025 if (ret)
9026 imu->acct_pages = 0;
9027 return ret;
9028}
9029
0a96bbe4 9030static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
41edf1a5 9031 struct io_mapped_ubuf **pimu,
0a96bbe4 9032 struct page **last_hpage)
edafccee 9033{
41edf1a5 9034 struct io_mapped_ubuf *imu = NULL;
edafccee
JA
9035 struct vm_area_struct **vmas = NULL;
9036 struct page **pages = NULL;
0a96bbe4
BM
9037 unsigned long off, start, end, ubuf;
9038 size_t size;
9039 int ret, pret, nr_pages, i;
9040
6224843d
PB
9041 if (!iov->iov_base) {
9042 *pimu = ctx->dummy_ubuf;
9043 return 0;
9044 }
9045
0a96bbe4
BM
9046 ubuf = (unsigned long) iov->iov_base;
9047 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
9048 start = ubuf >> PAGE_SHIFT;
9049 nr_pages = end - start;
9050
41edf1a5 9051 *pimu = NULL;
0a96bbe4
BM
9052 ret = -ENOMEM;
9053
9054 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
9055 if (!pages)
9056 goto done;
9057
9058 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
9059 GFP_KERNEL);
9060 if (!vmas)
9061 goto done;
edafccee 9062
41edf1a5 9063 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
a2b4198c 9064 if (!imu)
0a96bbe4
BM
9065 goto done;
9066
9067 ret = 0;
9068 mmap_read_lock(current->mm);
9069 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
9070 pages, vmas);
9071 if (pret == nr_pages) {
9072 /* don't support file backed memory */
9073 for (i = 0; i < nr_pages; i++) {
9074 struct vm_area_struct *vma = vmas[i];
9075
40dad765
PB
9076 if (vma_is_shmem(vma))
9077 continue;
0a96bbe4
BM
9078 if (vma->vm_file &&
9079 !is_file_hugepages(vma->vm_file)) {
9080 ret = -EOPNOTSUPP;
9081 break;
9082 }
9083 }
9084 } else {
9085 ret = pret < 0 ? pret : -EFAULT;
9086 }
9087 mmap_read_unlock(current->mm);
9088 if (ret) {
9089 /*
9090 * if we did partial map, or found file backed vmas,
9091 * release any pages we did get
9092 */
9093 if (pret > 0)
9094 unpin_user_pages(pages, pret);
0a96bbe4
BM
9095 goto done;
9096 }
9097
9098 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
9099 if (ret) {
9100 unpin_user_pages(pages, pret);
0a96bbe4
BM
9101 goto done;
9102 }
9103
9104 off = ubuf & ~PAGE_MASK;
9105 size = iov->iov_len;
9106 for (i = 0; i < nr_pages; i++) {
9107 size_t vec_len;
9108
9109 vec_len = min_t(size_t, size, PAGE_SIZE - off);
9110 imu->bvec[i].bv_page = pages[i];
9111 imu->bvec[i].bv_len = vec_len;
9112 imu->bvec[i].bv_offset = off;
9113 off = 0;
9114 size -= vec_len;
9115 }
9116 /* store original address for later verification */
9117 imu->ubuf = ubuf;
4751f53d 9118 imu->ubuf_end = ubuf + iov->iov_len;
0a96bbe4 9119 imu->nr_bvecs = nr_pages;
41edf1a5 9120 *pimu = imu;
0a96bbe4
BM
9121 ret = 0;
9122done:
41edf1a5
PB
9123 if (ret)
9124 kvfree(imu);
0a96bbe4
BM
9125 kvfree(pages);
9126 kvfree(vmas);
9127 return ret;
9128}
9129
2b358604 9130static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
0a96bbe4 9131{
87094465
PB
9132 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
9133 return ctx->user_bufs ? 0 : -ENOMEM;
2b358604 9134}
edafccee 9135
2b358604
BM
9136static int io_buffer_validate(struct iovec *iov)
9137{
50e96989
PB
9138 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
9139
2b358604
BM
9140 /*
9141 * Don't impose further limits on the size and buffer
9142 * constraints here, we'll -EINVAL later when IO is
9143 * submitted if they are wrong.
9144 */
6224843d
PB
9145 if (!iov->iov_base)
9146 return iov->iov_len ? -EFAULT : 0;
9147 if (!iov->iov_len)
2b358604 9148 return -EFAULT;
edafccee 9149
2b358604
BM
9150 /* arbitrary limit, but we need something */
9151 if (iov->iov_len > SZ_1G)
9152 return -EFAULT;
edafccee 9153
50e96989
PB
9154 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
9155 return -EOVERFLOW;
9156
2b358604
BM
9157 return 0;
9158}
edafccee 9159
2b358604 9160static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
634d00df 9161 unsigned int nr_args, u64 __user *tags)
2b358604 9162{
bd54b6fe
BM
9163 struct page *last_hpage = NULL;
9164 struct io_rsrc_data *data;
2b358604
BM
9165 int i, ret;
9166 struct iovec iov;
edafccee 9167
87094465
PB
9168 if (ctx->user_bufs)
9169 return -EBUSY;
489809e2 9170 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
87094465 9171 return -EINVAL;
bd54b6fe 9172 ret = io_rsrc_node_switch_start(ctx);
2b358604
BM
9173 if (ret)
9174 return ret;
d878c816
PB
9175 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
9176 if (ret)
9177 return ret;
bd54b6fe
BM
9178 ret = io_buffers_map_alloc(ctx, nr_args);
9179 if (ret) {
bb6659cc 9180 io_rsrc_data_free(data);
bd54b6fe
BM
9181 return ret;
9182 }
edafccee 9183
87094465 9184 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
edafccee
JA
9185 ret = io_copy_iov(ctx, &iov, arg, i);
9186 if (ret)
0a96bbe4 9187 break;
2b358604
BM
9188 ret = io_buffer_validate(&iov);
9189 if (ret)
0a96bbe4 9190 break;
2d091d62 9191 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
cf3770e7
CIK
9192 ret = -EINVAL;
9193 break;
9194 }
edafccee 9195
41edf1a5
PB
9196 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
9197 &last_hpage);
0a96bbe4
BM
9198 if (ret)
9199 break;
edafccee 9200 }
0a96bbe4 9201
bd54b6fe 9202 WARN_ON_ONCE(ctx->buf_data);
0a96bbe4 9203
bd54b6fe
BM
9204 ctx->buf_data = data;
9205 if (ret)
9206 __io_sqe_buffers_unregister(ctx);
9207 else
9208 io_rsrc_node_switch(ctx, NULL);
edafccee
JA
9209 return ret;
9210}
9211
634d00df
PB
9212static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
9213 struct io_uring_rsrc_update2 *up,
9214 unsigned int nr_args)
9215{
9216 u64 __user *tags = u64_to_user_ptr(up->tags);
9217 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
634d00df
PB
9218 struct page *last_hpage = NULL;
9219 bool needs_switch = false;
9220 __u32 done;
9221 int i, err;
9222
9223 if (!ctx->buf_data)
9224 return -ENXIO;
9225 if (up->offset + nr_args > ctx->nr_user_bufs)
9226 return -EINVAL;
9227
9228 for (done = 0; done < nr_args; done++) {
0b8c0e7c
PB
9229 struct io_mapped_ubuf *imu;
9230 int offset = up->offset + done;
634d00df
PB
9231 u64 tag = 0;
9232
9233 err = io_copy_iov(ctx, &iov, iovs, done);
9234 if (err)
9235 break;
9236 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
9237 err = -EFAULT;
9238 break;
9239 }
0b8c0e7c
PB
9240 err = io_buffer_validate(&iov);
9241 if (err)
9242 break;
cf3770e7
CIK
9243 if (!iov.iov_base && tag) {
9244 err = -EINVAL;
9245 break;
9246 }
0b8c0e7c
PB
9247 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
9248 if (err)
9249 break;
634d00df 9250
0b8c0e7c 9251 i = array_index_nospec(offset, ctx->nr_user_bufs);
6224843d 9252 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
22a99dcb 9253 err = io_queue_rsrc_removal(ctx->buf_data, i,
0b8c0e7c
PB
9254 ctx->rsrc_node, ctx->user_bufs[i]);
9255 if (unlikely(err)) {
9256 io_buffer_unmap(ctx, &imu);
634d00df 9257 break;
0b8c0e7c 9258 }
634d00df
PB
9259 ctx->user_bufs[i] = NULL;
9260 needs_switch = true;
9261 }
9262
0b8c0e7c 9263 ctx->user_bufs[i] = imu;
2d091d62 9264 *io_get_tag_slot(ctx->buf_data, offset) = tag;
634d00df
PB
9265 }
9266
9267 if (needs_switch)
9268 io_rsrc_node_switch(ctx, ctx->buf_data);
9269 return done ? done : err;
9270}
9271
9b402849
JA
9272static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
9273{
9274 __s32 __user *fds = arg;
9275 int fd;
9276
9277 if (ctx->cq_ev_fd)
9278 return -EBUSY;
9279
9280 if (copy_from_user(&fd, fds, sizeof(*fds)))
9281 return -EFAULT;
9282
9283 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
9284 if (IS_ERR(ctx->cq_ev_fd)) {
9285 int ret = PTR_ERR(ctx->cq_ev_fd);
fe7e3257 9286
9b402849
JA
9287 ctx->cq_ev_fd = NULL;
9288 return ret;
9289 }
9290
9291 return 0;
9292}
9293
9294static int io_eventfd_unregister(struct io_ring_ctx *ctx)
9295{
9296 if (ctx->cq_ev_fd) {
9297 eventfd_ctx_put(ctx->cq_ev_fd);
9298 ctx->cq_ev_fd = NULL;
9299 return 0;
9300 }
9301
9302 return -ENXIO;
9303}
9304
5a2e745d
JA
9305static void io_destroy_buffers(struct io_ring_ctx *ctx)
9306{
9e15c3a0
JA
9307 struct io_buffer *buf;
9308 unsigned long index;
9309
07edfd19 9310 xa_for_each(&ctx->io_buffers, index, buf)
9e15c3a0 9311 __io_remove_buffers(ctx, buf, index, -1U);
5a2e745d
JA
9312}
9313
7255834e 9314static void io_req_cache_free(struct list_head *list)
1b4c351f 9315{
68e68ee6 9316 struct io_kiocb *req, *nxt;
1b4c351f 9317
bb943b82
PB
9318 list_for_each_entry_safe(req, nxt, list, inflight_entry) {
9319 list_del(&req->inflight_entry);
1b4c351f
JA
9320 kmem_cache_free(req_cachep, req);
9321 }
9322}
9323
4010fec4 9324static void io_req_caches_free(struct io_ring_ctx *ctx)
2b188cc1 9325{
cd0ca2e0 9326 struct io_submit_state *state = &ctx->submit_state;
bf019da7 9327
9a4fdbd8
JA
9328 mutex_lock(&ctx->uring_lock);
9329
cd0ca2e0
PB
9330 if (state->free_reqs) {
9331 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9332 state->free_reqs = 0;
8e5c66c4 9333 }
9a4fdbd8 9334
cd0ca2e0
PB
9335 io_flush_cached_locked_reqs(ctx, state);
9336 io_req_cache_free(&state->free_list);
9a4fdbd8
JA
9337 mutex_unlock(&ctx->uring_lock);
9338}
9339
43597aac 9340static void io_wait_rsrc_data(struct io_rsrc_data *data)
2b188cc1 9341{
43597aac 9342 if (data && !atomic_dec_and_test(&data->refs))
bd54b6fe 9343 wait_for_completion(&data->done);
bd54b6fe 9344}
04fc6c80 9345
2b188cc1
JA
9346static void io_ring_ctx_free(struct io_ring_ctx *ctx)
9347{
37d1e2e3 9348 io_sq_thread_finish(ctx);
2aede0e4 9349
37d1e2e3 9350 if (ctx->mm_account) {
2aede0e4
JA
9351 mmdrop(ctx->mm_account);
9352 ctx->mm_account = NULL;
30975825 9353 }
def596e9 9354
43597aac
PB
9355 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
9356 io_wait_rsrc_data(ctx->buf_data);
9357 io_wait_rsrc_data(ctx->file_data);
9358
8bad28d8 9359 mutex_lock(&ctx->uring_lock);
43597aac 9360 if (ctx->buf_data)
bd54b6fe 9361 __io_sqe_buffers_unregister(ctx);
43597aac 9362 if (ctx->file_data)
08480400 9363 __io_sqe_files_unregister(ctx);
c4ea060e
PB
9364 if (ctx->rings)
9365 __io_cqring_overflow_flush(ctx, true);
8bad28d8 9366 mutex_unlock(&ctx->uring_lock);
9b402849 9367 io_eventfd_unregister(ctx);
5a2e745d 9368 io_destroy_buffers(ctx);
07db298a
PB
9369 if (ctx->sq_creds)
9370 put_cred(ctx->sq_creds);
def596e9 9371
a7f0ed5a
PB
9372 /* there are no registered resources left, nobody uses it */
9373 if (ctx->rsrc_node)
9374 io_rsrc_node_destroy(ctx->rsrc_node);
8dd03afe 9375 if (ctx->rsrc_backup_node)
b895c9a6 9376 io_rsrc_node_destroy(ctx->rsrc_backup_node);
a7f0ed5a
PB
9377 flush_delayed_work(&ctx->rsrc_put_work);
9378
9379 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
9380 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
def596e9 9381
2b188cc1 9382#if defined(CONFIG_UNIX)
355e8d26
EB
9383 if (ctx->ring_sock) {
9384 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 9385 sock_release(ctx->ring_sock);
355e8d26 9386 }
2b188cc1 9387#endif
ef9dd637 9388 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
2b188cc1 9389
75b28aff 9390 io_mem_free(ctx->rings);
2b188cc1 9391 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
9392
9393 percpu_ref_exit(&ctx->refs);
2b188cc1 9394 free_uid(ctx->user);
4010fec4 9395 io_req_caches_free(ctx);
e941894e
JA
9396 if (ctx->hash_map)
9397 io_wq_put_hash(ctx->hash_map);
78076bb6 9398 kfree(ctx->cancel_hash);
6224843d 9399 kfree(ctx->dummy_ubuf);
2b188cc1
JA
9400 kfree(ctx);
9401}
9402
9403static __poll_t io_uring_poll(struct file *file, poll_table *wait)
9404{
9405 struct io_ring_ctx *ctx = file->private_data;
9406 __poll_t mask = 0;
9407
311997b3 9408 poll_wait(file, &ctx->poll_wait, wait);
4f7067c3
SB
9409 /*
9410 * synchronizes with barrier from wq_has_sleeper call in
9411 * io_commit_cqring
9412 */
2b188cc1 9413 smp_rmb();
90554200 9414 if (!io_sqring_full(ctx))
2b188cc1 9415 mask |= EPOLLOUT | EPOLLWRNORM;
ed670c3f
HX
9416
9417 /*
9418 * Don't flush cqring overflow list here, just do a simple check.
9419 * Otherwise there could possible be ABBA deadlock:
9420 * CPU0 CPU1
9421 * ---- ----
9422 * lock(&ctx->uring_lock);
9423 * lock(&ep->mtx);
9424 * lock(&ctx->uring_lock);
9425 * lock(&ep->mtx);
9426 *
9427 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
9428 * pushs them to do the flush.
9429 */
5ed7a37d 9430 if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
2b188cc1
JA
9431 mask |= EPOLLIN | EPOLLRDNORM;
9432
9433 return mask;
9434}
9435
0bead8cd 9436static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
071698e1 9437{
4379bf8b 9438 const struct cred *creds;
071698e1 9439
61cf9370 9440 creds = xa_erase(&ctx->personalities, id);
4379bf8b
JA
9441 if (creds) {
9442 put_cred(creds);
0bead8cd 9443 return 0;
1e6fa521 9444 }
0bead8cd
YD
9445
9446 return -EINVAL;
9447}
9448
d56d938b
PB
9449struct io_tctx_exit {
9450 struct callback_head task_work;
9451 struct completion completion;
baf186c4 9452 struct io_ring_ctx *ctx;
d56d938b
PB
9453};
9454
9455static void io_tctx_exit_cb(struct callback_head *cb)
9456{
9457 struct io_uring_task *tctx = current->io_uring;
9458 struct io_tctx_exit *work;
9459
9460 work = container_of(cb, struct io_tctx_exit, task_work);
9461 /*
9462 * When @in_idle, we're in cancellation and it's racy to remove the
9463 * node. It'll be removed by the end of cancellation, just ignore it.
9464 */
9465 if (!atomic_read(&tctx->in_idle))
eef51daa 9466 io_uring_del_tctx_node((unsigned long)work->ctx);
d56d938b
PB
9467 complete(&work->completion);
9468}
9469
28090c13
PB
9470static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
9471{
9472 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
9473
9474 return req->ctx == data;
9475}
9476
85faa7b8
JA
9477static void io_ring_exit_work(struct work_struct *work)
9478{
d56d938b 9479 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
b5bb3a24 9480 unsigned long timeout = jiffies + HZ * 60 * 5;
58d3be2c 9481 unsigned long interval = HZ / 20;
d56d938b
PB
9482 struct io_tctx_exit exit;
9483 struct io_tctx_node *node;
9484 int ret;
85faa7b8 9485
56952e91
JA
9486 /*
9487 * If we're doing polled IO and end up having requests being
9488 * submitted async (out-of-line), then completions can come in while
9489 * we're waiting for refs to drop. We need to reap these manually,
9490 * as nobody else will be looking for them.
9491 */
b2edc0a7 9492 do {
3dd0c97a 9493 io_uring_try_cancel_requests(ctx, NULL, true);
28090c13
PB
9494 if (ctx->sq_data) {
9495 struct io_sq_data *sqd = ctx->sq_data;
9496 struct task_struct *tsk;
9497
9498 io_sq_thread_park(sqd);
9499 tsk = sqd->thread;
9500 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
9501 io_wq_cancel_cb(tsk->io_uring->io_wq,
9502 io_cancel_ctx_cb, ctx, true);
9503 io_sq_thread_unpark(sqd);
9504 }
b5bb3a24 9505
58d3be2c
PB
9506 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
9507 /* there is little hope left, don't run it too often */
9508 interval = HZ * 60;
9509 }
9510 } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
d56d938b 9511
7f00651a
PB
9512 init_completion(&exit.completion);
9513 init_task_work(&exit.task_work, io_tctx_exit_cb);
9514 exit.ctx = ctx;
89b5066e
PB
9515 /*
9516 * Some may use context even when all refs and requests have been put,
9517 * and they are free to do so while still holding uring_lock or
5b0a6acc 9518 * completion_lock, see io_req_task_submit(). Apart from other work,
89b5066e
PB
9519 * this lock/unlock section also waits them to finish.
9520 */
d56d938b
PB
9521 mutex_lock(&ctx->uring_lock);
9522 while (!list_empty(&ctx->tctx_list)) {
b5bb3a24
PB
9523 WARN_ON_ONCE(time_after(jiffies, timeout));
9524
d56d938b
PB
9525 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
9526 ctx_node);
7f00651a
PB
9527 /* don't spin on a single task if cancellation failed */
9528 list_rotate_left(&ctx->tctx_list);
d56d938b
PB
9529 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
9530 if (WARN_ON_ONCE(ret))
9531 continue;
9532 wake_up_process(node->task);
9533
9534 mutex_unlock(&ctx->uring_lock);
9535 wait_for_completion(&exit.completion);
d56d938b
PB
9536 mutex_lock(&ctx->uring_lock);
9537 }
9538 mutex_unlock(&ctx->uring_lock);
79ebeaee
JA
9539 spin_lock(&ctx->completion_lock);
9540 spin_unlock(&ctx->completion_lock);
d56d938b 9541
85faa7b8
JA
9542 io_ring_ctx_free(ctx);
9543}
9544
80c4cbdb
PB
9545/* Returns true if we found and killed one or more timeouts */
9546static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
3dd0c97a 9547 bool cancel_all)
80c4cbdb
PB
9548{
9549 struct io_kiocb *req, *tmp;
9550 int canceled = 0;
9551
79ebeaee
JA
9552 spin_lock(&ctx->completion_lock);
9553 spin_lock_irq(&ctx->timeout_lock);
80c4cbdb 9554 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
3dd0c97a 9555 if (io_match_task(req, tsk, cancel_all)) {
80c4cbdb
PB
9556 io_kill_timeout(req, -ECANCELED);
9557 canceled++;
9558 }
9559 }
79ebeaee 9560 spin_unlock_irq(&ctx->timeout_lock);
51520426
PB
9561 if (canceled != 0)
9562 io_commit_cqring(ctx);
79ebeaee 9563 spin_unlock(&ctx->completion_lock);
80c4cbdb
PB
9564 if (canceled != 0)
9565 io_cqring_ev_posted(ctx);
9566 return canceled != 0;
9567}
9568
2b188cc1
JA
9569static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
9570{
61cf9370
MWO
9571 unsigned long index;
9572 struct creds *creds;
9573
2b188cc1
JA
9574 mutex_lock(&ctx->uring_lock);
9575 percpu_ref_kill(&ctx->refs);
634578f8 9576 if (ctx->rings)
6c2450ae 9577 __io_cqring_overflow_flush(ctx, true);
61cf9370
MWO
9578 xa_for_each(&ctx->personalities, index, creds)
9579 io_unregister_personality(ctx, index);
2b188cc1
JA
9580 mutex_unlock(&ctx->uring_lock);
9581
3dd0c97a
PB
9582 io_kill_timeouts(ctx, NULL, true);
9583 io_poll_remove_all(ctx, NULL, true);
561fb04a 9584
15dff286 9585 /* if we failed setting up the ctx, we might not have any rings */
b2edc0a7 9586 io_iopoll_try_reap_events(ctx);
309fc03a 9587
85faa7b8 9588 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
fc666777
JA
9589 /*
9590 * Use system_unbound_wq to avoid spawning tons of event kworkers
9591 * if we're exiting a ton of rings at the same time. It just adds
9592 * noise and overhead, there's no discernable change in runtime
9593 * over using system_wq.
9594 */
9595 queue_work(system_unbound_wq, &ctx->exit_work);
2b188cc1
JA
9596}
9597
9598static int io_uring_release(struct inode *inode, struct file *file)
9599{
9600 struct io_ring_ctx *ctx = file->private_data;
9601
9602 file->private_data = NULL;
9603 io_ring_ctx_wait_and_kill(ctx);
9604 return 0;
9605}
9606
f6edbabb
PB
9607struct io_task_cancel {
9608 struct task_struct *task;
3dd0c97a 9609 bool all;
f6edbabb 9610};
f254ac04 9611
f6edbabb 9612static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
b711d4ea 9613{
9a472ef7 9614 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
f6edbabb 9615 struct io_task_cancel *cancel = data;
9a472ef7 9616
f0baed8e 9617 return io_match_task_safe(req, cancel->task, cancel->all);
b711d4ea
JA
9618}
9619
e1915f76 9620static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
3dd0c97a 9621 struct task_struct *task, bool cancel_all)
b7ddce3c 9622{
e1915f76 9623 struct io_defer_entry *de;
b7ddce3c
PB
9624 LIST_HEAD(list);
9625
79ebeaee 9626 spin_lock(&ctx->completion_lock);
b7ddce3c 9627 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
f0baed8e 9628 if (io_match_task_safe(de->req, task, cancel_all)) {
b7ddce3c
PB
9629 list_cut_position(&list, &ctx->defer_list, &de->list);
9630 break;
9631 }
9632 }
79ebeaee 9633 spin_unlock(&ctx->completion_lock);
e1915f76
PB
9634 if (list_empty(&list))
9635 return false;
b7ddce3c
PB
9636
9637 while (!list_empty(&list)) {
9638 de = list_first_entry(&list, struct io_defer_entry, list);
9639 list_del_init(&de->list);
f41db273 9640 io_req_complete_failed(de->req, -ECANCELED);
b7ddce3c
PB
9641 kfree(de);
9642 }
e1915f76 9643 return true;
b7ddce3c
PB
9644}
9645
1b00764f
PB
9646static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
9647{
9648 struct io_tctx_node *node;
9649 enum io_wq_cancel cret;
9650 bool ret = false;
9651
9652 mutex_lock(&ctx->uring_lock);
9653 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
9654 struct io_uring_task *tctx = node->task->io_uring;
9655
9656 /*
9657 * io_wq will stay alive while we hold uring_lock, because it's
9658 * killed after ctx nodes, which requires to take the lock.
9659 */
9660 if (!tctx || !tctx->io_wq)
9661 continue;
9662 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
9663 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9664 }
9665 mutex_unlock(&ctx->uring_lock);
9666
9667 return ret;
9668}
9669
9936c7c2
PB
9670static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
9671 struct task_struct *task,
3dd0c97a 9672 bool cancel_all)
9936c7c2 9673{
3dd0c97a 9674 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
1b00764f 9675 struct io_uring_task *tctx = task ? task->io_uring : NULL;
9936c7c2
PB
9676
9677 while (1) {
9678 enum io_wq_cancel cret;
9679 bool ret = false;
9680
1b00764f
PB
9681 if (!task) {
9682 ret |= io_uring_try_cancel_iowq(ctx);
9683 } else if (tctx && tctx->io_wq) {
9684 /*
9685 * Cancels requests of all rings, not only @ctx, but
9686 * it's fine as the task is in exit/exec.
9687 */
5aa75ed5 9688 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
9936c7c2
PB
9689 &cancel, true);
9690 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9691 }
9692
9693 /* SQPOLL thread does its own polling */
3dd0c97a 9694 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
d052d1d6 9695 (ctx->sq_data && ctx->sq_data->thread == current)) {
9936c7c2
PB
9696 while (!list_empty_careful(&ctx->iopoll_list)) {
9697 io_iopoll_try_reap_events(ctx);
9698 ret = true;
9699 }
9700 }
9701
3dd0c97a
PB
9702 ret |= io_cancel_defer_files(ctx, task, cancel_all);
9703 ret |= io_poll_remove_all(ctx, task, cancel_all);
9704 ret |= io_kill_timeouts(ctx, task, cancel_all);
e5dc480d
PB
9705 if (task)
9706 ret |= io_run_task_work();
9936c7c2
PB
9707 if (!ret)
9708 break;
9709 cond_resched();
9710 }
9711}
9712
eef51daa 9713static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
0f212204 9714{
236434c3 9715 struct io_uring_task *tctx = current->io_uring;
13bf43f5 9716 struct io_tctx_node *node;
a528b04e 9717 int ret;
236434c3
MWO
9718
9719 if (unlikely(!tctx)) {
5aa75ed5 9720 ret = io_uring_alloc_task_context(current, ctx);
0f212204
JA
9721 if (unlikely(ret))
9722 return ret;
e139a1ec 9723
236434c3 9724 tctx = current->io_uring;
e139a1ec
PB
9725 if (ctx->iowq_limits_set) {
9726 unsigned int limits[2] = { ctx->iowq_limits[0],
9727 ctx->iowq_limits[1], };
9728
9729 ret = io_wq_max_workers(tctx->io_wq, limits);
9730 if (ret)
9731 return ret;
9732 }
0f212204 9733 }
cf27f3b1
PB
9734 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
9735 node = kmalloc(sizeof(*node), GFP_KERNEL);
9736 if (!node)
9737 return -ENOMEM;
9738 node->ctx = ctx;
9739 node->task = current;
13bf43f5 9740
cf27f3b1
PB
9741 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
9742 node, GFP_KERNEL));
9743 if (ret) {
9744 kfree(node);
9745 return ret;
0f212204 9746 }
cf27f3b1
PB
9747
9748 mutex_lock(&ctx->uring_lock);
9749 list_add(&node->ctx_node, &ctx->tctx_list);
9750 mutex_unlock(&ctx->uring_lock);
0f212204 9751 }
cf27f3b1 9752 tctx->last = ctx;
0f212204
JA
9753 return 0;
9754}
9755
cf27f3b1
PB
9756/*
9757 * Note that this task has used io_uring. We use it for cancelation purposes.
9758 */
eef51daa 9759static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
cf27f3b1
PB
9760{
9761 struct io_uring_task *tctx = current->io_uring;
9762
9763 if (likely(tctx && tctx->last == ctx))
9764 return 0;
eef51daa 9765 return __io_uring_add_tctx_node(ctx);
cf27f3b1
PB
9766}
9767
0f212204
JA
9768/*
9769 * Remove this io_uring_file -> task mapping.
9770 */
eef51daa 9771static void io_uring_del_tctx_node(unsigned long index)
0f212204
JA
9772{
9773 struct io_uring_task *tctx = current->io_uring;
13bf43f5 9774 struct io_tctx_node *node;
2941267b 9775
eebd2e37
PB
9776 if (!tctx)
9777 return;
13bf43f5
PB
9778 node = xa_erase(&tctx->xa, index);
9779 if (!node)
2941267b 9780 return;
0f212204 9781
13bf43f5
PB
9782 WARN_ON_ONCE(current != node->task);
9783 WARN_ON_ONCE(list_empty(&node->ctx_node));
9784
9785 mutex_lock(&node->ctx->uring_lock);
9786 list_del(&node->ctx_node);
9787 mutex_unlock(&node->ctx->uring_lock);
9788
baf186c4 9789 if (tctx->last == node->ctx)
0f212204 9790 tctx->last = NULL;
13bf43f5 9791 kfree(node);
0f212204
JA
9792}
9793
8452d4a6 9794static void io_uring_clean_tctx(struct io_uring_task *tctx)
de7f1d9e 9795{
ba5ef6dc 9796 struct io_wq *wq = tctx->io_wq;
13bf43f5 9797 struct io_tctx_node *node;
de7f1d9e
PB
9798 unsigned long index;
9799
8bab4c09 9800 xa_for_each(&tctx->xa, index, node) {
eef51daa 9801 io_uring_del_tctx_node(index);
8bab4c09
JA
9802 cond_resched();
9803 }
b16ef427
ME
9804 if (wq) {
9805 /*
9806 * Must be after io_uring_del_task_file() (removes nodes under
9807 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
9808 */
ba5ef6dc 9809 io_wq_put_and_exit(wq);
dadebc35 9810 tctx->io_wq = NULL;
b16ef427 9811 }
de7f1d9e
PB
9812}
9813
3f48cf18 9814static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
521d6a73 9815{
3f48cf18
PB
9816 if (tracked)
9817 return atomic_read(&tctx->inflight_tracked);
521d6a73
PB
9818 return percpu_counter_sum(&tctx->inflight);
9819}
9820
78cc687b
PB
9821/*
9822 * Find any io_uring ctx that this task has registered or done IO on, and cancel
b16590fd 9823 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
78cc687b
PB
9824 */
9825static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
0e9ddb39 9826{
521d6a73 9827 struct io_uring_task *tctx = current->io_uring;
734551df 9828 struct io_ring_ctx *ctx;
0e9ddb39
PB
9829 s64 inflight;
9830 DEFINE_WAIT(wait);
fdaf083c 9831
78cc687b
PB
9832 WARN_ON_ONCE(sqd && sqd->thread != current);
9833
6d042ffb
PO
9834 if (!current->io_uring)
9835 return;
17a91051
PB
9836 if (tctx->io_wq)
9837 io_wq_exit_start(tctx->io_wq);
9838
0e9ddb39
PB
9839 atomic_inc(&tctx->in_idle);
9840 do {
e9dbe221 9841 io_uring_drop_tctx_refs(current);
0e9ddb39 9842 /* read completions before cancelations */
78cc687b 9843 inflight = tctx_inflight(tctx, !cancel_all);
0e9ddb39
PB
9844 if (!inflight)
9845 break;
fdaf083c 9846
78cc687b
PB
9847 if (!sqd) {
9848 struct io_tctx_node *node;
9849 unsigned long index;
0f212204 9850
78cc687b
PB
9851 xa_for_each(&tctx->xa, index, node) {
9852 /* sqpoll task will cancel all its requests */
9853 if (node->ctx->sq_data)
9854 continue;
9855 io_uring_try_cancel_requests(node->ctx, current,
9856 cancel_all);
9857 }
9858 } else {
9859 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9860 io_uring_try_cancel_requests(ctx, current,
9861 cancel_all);
9862 }
17a91051 9863
b16590fd
JA
9864 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
9865 io_run_task_work();
e9dbe221 9866 io_uring_drop_tctx_refs(current);
b16590fd 9867
0f212204 9868 /*
a1bb3cd5
PB
9869 * If we've seen completions, retry without waiting. This
9870 * avoids a race where a completion comes in before we did
9871 * prepare_to_wait().
0f212204 9872 */
3dd0c97a 9873 if (inflight == tctx_inflight(tctx, !cancel_all))
a1bb3cd5 9874 schedule();
f57555ed 9875 finish_wait(&tctx->wait, &wait);
d8a6df10 9876 } while (1);
de7f1d9e 9877
8452d4a6 9878 io_uring_clean_tctx(tctx);
3dd0c97a 9879 if (cancel_all) {
abdebba9
PB
9880 /*
9881 * We shouldn't run task_works after cancel, so just leave
9882 * ->in_idle set for normal exit.
9883 */
9884 atomic_dec(&tctx->in_idle);
3f48cf18
PB
9885 /* for exec all current's requests should be gone, kill tctx */
9886 __io_uring_free(current);
9887 }
44e728b8
PB
9888}
9889
f552a27a 9890void __io_uring_cancel(bool cancel_all)
78cc687b 9891{
f552a27a 9892 io_uring_cancel_generic(cancel_all, NULL);
78cc687b
PB
9893}
9894
6c5c240e
RP
9895static void *io_uring_validate_mmap_request(struct file *file,
9896 loff_t pgoff, size_t sz)
2b188cc1 9897{
2b188cc1 9898 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 9899 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
9900 struct page *page;
9901 void *ptr;
9902
9903 switch (offset) {
9904 case IORING_OFF_SQ_RING:
75b28aff
HV
9905 case IORING_OFF_CQ_RING:
9906 ptr = ctx->rings;
2b188cc1
JA
9907 break;
9908 case IORING_OFF_SQES:
9909 ptr = ctx->sq_sqes;
9910 break;
2b188cc1 9911 default:
6c5c240e 9912 return ERR_PTR(-EINVAL);
2b188cc1
JA
9913 }
9914
9915 page = virt_to_head_page(ptr);
a50b854e 9916 if (sz > page_size(page))
6c5c240e
RP
9917 return ERR_PTR(-EINVAL);
9918
9919 return ptr;
9920}
9921
9922#ifdef CONFIG_MMU
9923
9924static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9925{
9926 size_t sz = vma->vm_end - vma->vm_start;
9927 unsigned long pfn;
9928 void *ptr;
9929
9930 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9931 if (IS_ERR(ptr))
9932 return PTR_ERR(ptr);
2b188cc1
JA
9933
9934 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9935 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9936}
9937
6c5c240e
RP
9938#else /* !CONFIG_MMU */
9939
9940static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9941{
9942 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9943}
9944
9945static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9946{
9947 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9948}
9949
9950static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9951 unsigned long addr, unsigned long len,
9952 unsigned long pgoff, unsigned long flags)
9953{
9954 void *ptr;
9955
9956 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9957 if (IS_ERR(ptr))
9958 return PTR_ERR(ptr);
9959
9960 return (unsigned long) ptr;
9961}
9962
9963#endif /* !CONFIG_MMU */
9964
d9d05217 9965static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
90554200
JA
9966{
9967 DEFINE_WAIT(wait);
9968
9969 do {
9970 if (!io_sqring_full(ctx))
9971 break;
90554200
JA
9972 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9973
9974 if (!io_sqring_full(ctx))
9975 break;
90554200
JA
9976 schedule();
9977 } while (!signal_pending(current));
9978
9979 finish_wait(&ctx->sqo_sq_wait, &wait);
5199328a 9980 return 0;
90554200
JA
9981}
9982
c73ebb68
HX
9983static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9984 struct __kernel_timespec __user **ts,
9985 const sigset_t __user **sig)
9986{
9987 struct io_uring_getevents_arg arg;
9988
9989 /*
9990 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9991 * is just a pointer to the sigset_t.
9992 */
9993 if (!(flags & IORING_ENTER_EXT_ARG)) {
9994 *sig = (const sigset_t __user *) argp;
9995 *ts = NULL;
9996 return 0;
9997 }
9998
9999 /*
10000 * EXT_ARG is set - ensure we agree on the size of it and copy in our
10001 * timespec and sigset_t pointers if good.
10002 */
10003 if (*argsz != sizeof(arg))
10004 return -EINVAL;
10005 if (copy_from_user(&arg, argp, sizeof(arg)))
10006 return -EFAULT;
02a84aaf
DY
10007 if (arg.pad)
10008 return -EINVAL;
c73ebb68
HX
10009 *sig = u64_to_user_ptr(arg.sigmask);
10010 *argsz = arg.sigmask_sz;
10011 *ts = u64_to_user_ptr(arg.ts);
10012 return 0;
10013}
10014
2b188cc1 10015SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
c73ebb68
HX
10016 u32, min_complete, u32, flags, const void __user *, argp,
10017 size_t, argsz)
2b188cc1
JA
10018{
10019 struct io_ring_ctx *ctx;
2b188cc1
JA
10020 int submitted = 0;
10021 struct fd f;
33f993da 10022 long ret;
2b188cc1 10023
4c6e277c 10024 io_run_task_work();
b41e9852 10025
33f993da
PB
10026 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
10027 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
2b188cc1
JA
10028 return -EINVAL;
10029
10030 f = fdget(fd);
33f993da 10031 if (unlikely(!f.file))
2b188cc1
JA
10032 return -EBADF;
10033
10034 ret = -EOPNOTSUPP;
33f993da 10035 if (unlikely(f.file->f_op != &io_uring_fops))
2b188cc1
JA
10036 goto out_fput;
10037
10038 ret = -ENXIO;
10039 ctx = f.file->private_data;
33f993da 10040 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
2b188cc1
JA
10041 goto out_fput;
10042
7e84e1c7 10043 ret = -EBADFD;
33f993da 10044 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
7e84e1c7
SG
10045 goto out;
10046
6c271ce2
JA
10047 /*
10048 * For SQ polling, the thread will do all submissions and completions.
10049 * Just return the requested submit count, and wake the thread if
10050 * we were asked to.
10051 */
b2a9eada 10052 ret = 0;
6c271ce2 10053 if (ctx->flags & IORING_SETUP_SQPOLL) {
90f67366 10054 io_cqring_overflow_flush(ctx);
89448c47 10055
21f96522
JA
10056 if (unlikely(ctx->sq_data->thread == NULL)) {
10057 ret = -EOWNERDEAD;
04147488 10058 goto out;
21f96522 10059 }
6c271ce2 10060 if (flags & IORING_ENTER_SQ_WAKEUP)
534ca6d6 10061 wake_up(&ctx->sq_data->wait);
d9d05217
PB
10062 if (flags & IORING_ENTER_SQ_WAIT) {
10063 ret = io_sqpoll_wait_sq(ctx);
10064 if (ret)
10065 goto out;
10066 }
6c271ce2 10067 submitted = to_submit;
b2a9eada 10068 } else if (to_submit) {
eef51daa 10069 ret = io_uring_add_tctx_node(ctx);
0f212204
JA
10070 if (unlikely(ret))
10071 goto out;
2b188cc1 10072 mutex_lock(&ctx->uring_lock);
0f212204 10073 submitted = io_submit_sqes(ctx, to_submit);
2b188cc1 10074 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
10075
10076 if (submitted != to_submit)
10077 goto out;
2b188cc1
JA
10078 }
10079 if (flags & IORING_ENTER_GETEVENTS) {
c73ebb68
HX
10080 const sigset_t __user *sig;
10081 struct __kernel_timespec __user *ts;
10082
10083 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
10084 if (unlikely(ret))
10085 goto out;
10086
2b188cc1
JA
10087 min_complete = min(min_complete, ctx->cq_entries);
10088
32b2244a
XW
10089 /*
10090 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
10091 * space applications don't need to do io completion events
10092 * polling again, they can rely on io_sq_thread to do polling
10093 * work, which can reduce cpu usage and uring_lock contention.
10094 */
10095 if (ctx->flags & IORING_SETUP_IOPOLL &&
10096 !(ctx->flags & IORING_SETUP_SQPOLL)) {
7668b92a 10097 ret = io_iopoll_check(ctx, min_complete);
def596e9 10098 } else {
c73ebb68 10099 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
def596e9 10100 }
2b188cc1
JA
10101 }
10102
7c504e65 10103out:
6805b32e 10104 percpu_ref_put(&ctx->refs);
2b188cc1
JA
10105out_fput:
10106 fdput(f);
10107 return submitted ? submitted : ret;
10108}
10109
bebdb65e 10110#ifdef CONFIG_PROC_FS
61cf9370
MWO
10111static int io_uring_show_cred(struct seq_file *m, unsigned int id,
10112 const struct cred *cred)
87ce955b 10113{
87ce955b
JA
10114 struct user_namespace *uns = seq_user_ns(m);
10115 struct group_info *gi;
10116 kernel_cap_t cap;
10117 unsigned __capi;
10118 int g;
10119
10120 seq_printf(m, "%5d\n", id);
10121 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
10122 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
10123 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
10124 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
10125 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
10126 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
10127 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
10128 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
10129 seq_puts(m, "\n\tGroups:\t");
10130 gi = cred->group_info;
10131 for (g = 0; g < gi->ngroups; g++) {
10132 seq_put_decimal_ull(m, g ? " " : "",
10133 from_kgid_munged(uns, gi->gid[g]));
10134 }
10135 seq_puts(m, "\n\tCapEff:\t");
10136 cap = cred->cap_effective;
10137 CAP_FOR_EACH_U32(__capi)
10138 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
10139 seq_putc(m, '\n');
10140 return 0;
10141}
10142
10143static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
10144{
dbbe9c64 10145 struct io_sq_data *sq = NULL;
fad8e0de 10146 bool has_lock;
87ce955b
JA
10147 int i;
10148
fad8e0de
JA
10149 /*
10150 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
10151 * since fdinfo case grabs it in the opposite direction of normal use
10152 * cases. If we fail to get the lock, we just don't iterate any
10153 * structures that could be going away outside the io_uring mutex.
10154 */
10155 has_lock = mutex_trylock(&ctx->uring_lock);
10156
5f3f26f9 10157 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
dbbe9c64 10158 sq = ctx->sq_data;
5f3f26f9
JA
10159 if (!sq->thread)
10160 sq = NULL;
10161 }
dbbe9c64
JQ
10162
10163 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
10164 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
87ce955b 10165 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
fad8e0de 10166 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
7b29f92d 10167 struct file *f = io_file_from_index(ctx, i);
87ce955b 10168
87ce955b
JA
10169 if (f)
10170 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
10171 else
10172 seq_printf(m, "%5u: <none>\n", i);
10173 }
10174 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
fad8e0de 10175 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
41edf1a5 10176 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
4751f53d 10177 unsigned int len = buf->ubuf_end - buf->ubuf;
87ce955b 10178
4751f53d 10179 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
87ce955b 10180 }
61cf9370
MWO
10181 if (has_lock && !xa_empty(&ctx->personalities)) {
10182 unsigned long index;
10183 const struct cred *cred;
10184
87ce955b 10185 seq_printf(m, "Personalities:\n");
61cf9370
MWO
10186 xa_for_each(&ctx->personalities, index, cred)
10187 io_uring_show_cred(m, index, cred);
87ce955b 10188 }
d7718a9d 10189 seq_printf(m, "PollList:\n");
79ebeaee 10190 spin_lock(&ctx->completion_lock);
d7718a9d
JA
10191 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
10192 struct hlist_head *list = &ctx->cancel_hash[i];
10193 struct io_kiocb *req;
10194
10195 hlist_for_each_entry(req, list, hash_node)
10196 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
10197 req->task->task_works != NULL);
10198 }
79ebeaee 10199 spin_unlock(&ctx->completion_lock);
fad8e0de
JA
10200 if (has_lock)
10201 mutex_unlock(&ctx->uring_lock);
87ce955b
JA
10202}
10203
10204static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
10205{
10206 struct io_ring_ctx *ctx = f->private_data;
10207
10208 if (percpu_ref_tryget(&ctx->refs)) {
10209 __io_uring_show_fdinfo(ctx, m);
10210 percpu_ref_put(&ctx->refs);
10211 }
10212}
bebdb65e 10213#endif
87ce955b 10214
2b188cc1
JA
10215static const struct file_operations io_uring_fops = {
10216 .release = io_uring_release,
10217 .mmap = io_uring_mmap,
6c5c240e
RP
10218#ifndef CONFIG_MMU
10219 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
10220 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
10221#endif
2b188cc1 10222 .poll = io_uring_poll,
bebdb65e 10223#ifdef CONFIG_PROC_FS
87ce955b 10224 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 10225#endif
2b188cc1
JA
10226};
10227
10228static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
10229 struct io_uring_params *p)
10230{
75b28aff
HV
10231 struct io_rings *rings;
10232 size_t size, sq_array_offset;
2b188cc1 10233
bd740481
JA
10234 /* make sure these are sane, as we already accounted them */
10235 ctx->sq_entries = p->sq_entries;
10236 ctx->cq_entries = p->cq_entries;
10237
75b28aff
HV
10238 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
10239 if (size == SIZE_MAX)
10240 return -EOVERFLOW;
10241
10242 rings = io_mem_alloc(size);
10243 if (!rings)
2b188cc1
JA
10244 return -ENOMEM;
10245
75b28aff
HV
10246 ctx->rings = rings;
10247 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
10248 rings->sq_ring_mask = p->sq_entries - 1;
10249 rings->cq_ring_mask = p->cq_entries - 1;
10250 rings->sq_ring_entries = p->sq_entries;
10251 rings->cq_ring_entries = p->cq_entries;
2b188cc1
JA
10252
10253 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
10254 if (size == SIZE_MAX) {
10255 io_mem_free(ctx->rings);
10256 ctx->rings = NULL;
2b188cc1 10257 return -EOVERFLOW;
eb065d30 10258 }
2b188cc1
JA
10259
10260 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
10261 if (!ctx->sq_sqes) {
10262 io_mem_free(ctx->rings);
10263 ctx->rings = NULL;
2b188cc1 10264 return -ENOMEM;
eb065d30 10265 }
2b188cc1 10266
2b188cc1
JA
10267 return 0;
10268}
10269
9faadcc8
PB
10270static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
10271{
10272 int ret, fd;
10273
10274 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
10275 if (fd < 0)
10276 return fd;
10277
eef51daa 10278 ret = io_uring_add_tctx_node(ctx);
9faadcc8
PB
10279 if (ret) {
10280 put_unused_fd(fd);
10281 return ret;
10282 }
10283 fd_install(fd, file);
10284 return fd;
10285}
10286
2b188cc1
JA
10287/*
10288 * Allocate an anonymous fd, this is what constitutes the application
10289 * visible backing of an io_uring instance. The application mmaps this
10290 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
10291 * we have to tie this fd to a socket for file garbage collection purposes.
10292 */
9faadcc8 10293static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
2b188cc1
JA
10294{
10295 struct file *file;
9faadcc8 10296#if defined(CONFIG_UNIX)
2b188cc1
JA
10297 int ret;
10298
2b188cc1
JA
10299 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
10300 &ctx->ring_sock);
10301 if (ret)
9faadcc8 10302 return ERR_PTR(ret);
2b188cc1
JA
10303#endif
10304
2b188cc1
JA
10305 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
10306 O_RDWR | O_CLOEXEC);
2b188cc1 10307#if defined(CONFIG_UNIX)
9faadcc8
PB
10308 if (IS_ERR(file)) {
10309 sock_release(ctx->ring_sock);
10310 ctx->ring_sock = NULL;
10311 } else {
10312 ctx->ring_sock->file = file;
0f212204 10313 }
2b188cc1 10314#endif
9faadcc8 10315 return file;
2b188cc1
JA
10316}
10317
7f13657d
XW
10318static int io_uring_create(unsigned entries, struct io_uring_params *p,
10319 struct io_uring_params __user *params)
2b188cc1 10320{
2b188cc1 10321 struct io_ring_ctx *ctx;
9faadcc8 10322 struct file *file;
2b188cc1
JA
10323 int ret;
10324
8110c1a6 10325 if (!entries)
2b188cc1 10326 return -EINVAL;
8110c1a6
JA
10327 if (entries > IORING_MAX_ENTRIES) {
10328 if (!(p->flags & IORING_SETUP_CLAMP))
10329 return -EINVAL;
10330 entries = IORING_MAX_ENTRIES;
10331 }
2b188cc1
JA
10332
10333 /*
10334 * Use twice as many entries for the CQ ring. It's possible for the
10335 * application to drive a higher depth than the size of the SQ ring,
10336 * since the sqes are only used at submission time. This allows for
33a107f0
JA
10337 * some flexibility in overcommitting a bit. If the application has
10338 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
10339 * of CQ ring entries manually.
2b188cc1
JA
10340 */
10341 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
10342 if (p->flags & IORING_SETUP_CQSIZE) {
10343 /*
10344 * If IORING_SETUP_CQSIZE is set, we do the same roundup
10345 * to a power-of-two, if it isn't already. We do NOT impose
10346 * any cq vs sq ring sizing.
10347 */
eb2667b3 10348 if (!p->cq_entries)
33a107f0 10349 return -EINVAL;
8110c1a6
JA
10350 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
10351 if (!(p->flags & IORING_SETUP_CLAMP))
10352 return -EINVAL;
10353 p->cq_entries = IORING_MAX_CQ_ENTRIES;
10354 }
eb2667b3
JQ
10355 p->cq_entries = roundup_pow_of_two(p->cq_entries);
10356 if (p->cq_entries < p->sq_entries)
10357 return -EINVAL;
33a107f0
JA
10358 } else {
10359 p->cq_entries = 2 * p->sq_entries;
10360 }
2b188cc1 10361
2b188cc1 10362 ctx = io_ring_ctx_alloc(p);
62e398be 10363 if (!ctx)
2b188cc1 10364 return -ENOMEM;
2b188cc1 10365 ctx->compat = in_compat_syscall();
62e398be
JA
10366 if (!capable(CAP_IPC_LOCK))
10367 ctx->user = get_uid(current_user());
2aede0e4
JA
10368
10369 /*
10370 * This is just grabbed for accounting purposes. When a process exits,
10371 * the mm is exited and dropped before the files, hence we need to hang
10372 * on to this mm purely for the purposes of being able to unaccount
10373 * memory (locked/pinned vm). It's not used for anything else.
10374 */
6b7898eb 10375 mmgrab(current->mm);
2aede0e4 10376 ctx->mm_account = current->mm;
6b7898eb 10377
2b188cc1
JA
10378 ret = io_allocate_scq_urings(ctx, p);
10379 if (ret)
10380 goto err;
10381
7e84e1c7 10382 ret = io_sq_offload_create(ctx, p);
2b188cc1
JA
10383 if (ret)
10384 goto err;
eae071c9 10385 /* always set a rsrc node */
47b228ce
PB
10386 ret = io_rsrc_node_switch_start(ctx);
10387 if (ret)
10388 goto err;
eae071c9 10389 io_rsrc_node_switch(ctx, NULL);
2b188cc1 10390
2b188cc1 10391 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
10392 p->sq_off.head = offsetof(struct io_rings, sq.head);
10393 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
10394 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
10395 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
10396 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
10397 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
10398 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
10399
10400 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
10401 p->cq_off.head = offsetof(struct io_rings, cq.head);
10402 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
10403 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
10404 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
10405 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
10406 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 10407 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 10408
7f13657d
XW
10409 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
10410 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
5769a351 10411 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
c73ebb68 10412 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
9690557e
PB
10413 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
10414 IORING_FEAT_RSRC_TAGS;
7f13657d
XW
10415
10416 if (copy_to_user(params, p, sizeof(*p))) {
10417 ret = -EFAULT;
10418 goto err;
10419 }
d1719f70 10420
9faadcc8
PB
10421 file = io_uring_get_file(ctx);
10422 if (IS_ERR(file)) {
10423 ret = PTR_ERR(file);
10424 goto err;
10425 }
10426
044c1ab3
JA
10427 /*
10428 * Install ring fd as the very last thing, so we don't risk someone
10429 * having closed it before we finish setup
10430 */
9faadcc8
PB
10431 ret = io_uring_install_fd(ctx, file);
10432 if (ret < 0) {
10433 /* fput will clean it up */
10434 fput(file);
10435 return ret;
10436 }
044c1ab3 10437
c826bd7a 10438 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
10439 return ret;
10440err:
10441 io_ring_ctx_wait_and_kill(ctx);
10442 return ret;
10443}
10444
10445/*
10446 * Sets up an aio uring context, and returns the fd. Applications asks for a
10447 * ring size, we return the actual sq/cq ring sizes (among other things) in the
10448 * params structure passed in.
10449 */
10450static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
10451{
10452 struct io_uring_params p;
2b188cc1
JA
10453 int i;
10454
10455 if (copy_from_user(&p, params, sizeof(p)))
10456 return -EFAULT;
10457 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
10458 if (p.resv[i])
10459 return -EINVAL;
10460 }
10461
6c271ce2 10462 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 10463 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
7e84e1c7
SG
10464 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
10465 IORING_SETUP_R_DISABLED))
2b188cc1
JA
10466 return -EINVAL;
10467
7f13657d 10468 return io_uring_create(entries, &p, params);
2b188cc1
JA
10469}
10470
10471SYSCALL_DEFINE2(io_uring_setup, u32, entries,
10472 struct io_uring_params __user *, params)
10473{
10474 return io_uring_setup(entries, params);
10475}
10476
66f4af93
JA
10477static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
10478{
10479 struct io_uring_probe *p;
10480 size_t size;
10481 int i, ret;
10482
10483 size = struct_size(p, ops, nr_args);
10484 if (size == SIZE_MAX)
10485 return -EOVERFLOW;
10486 p = kzalloc(size, GFP_KERNEL);
10487 if (!p)
10488 return -ENOMEM;
10489
10490 ret = -EFAULT;
10491 if (copy_from_user(p, arg, size))
10492 goto out;
10493 ret = -EINVAL;
10494 if (memchr_inv(p, 0, size))
10495 goto out;
10496
10497 p->last_op = IORING_OP_LAST - 1;
10498 if (nr_args > IORING_OP_LAST)
10499 nr_args = IORING_OP_LAST;
10500
10501 for (i = 0; i < nr_args; i++) {
10502 p->ops[i].op = i;
10503 if (!io_op_defs[i].not_supported)
10504 p->ops[i].flags = IO_URING_OP_SUPPORTED;
10505 }
10506 p->ops_len = i;
10507
10508 ret = 0;
10509 if (copy_to_user(arg, p, size))
10510 ret = -EFAULT;
10511out:
10512 kfree(p);
10513 return ret;
10514}
10515
071698e1
JA
10516static int io_register_personality(struct io_ring_ctx *ctx)
10517{
4379bf8b 10518 const struct cred *creds;
61cf9370 10519 u32 id;
1e6fa521 10520 int ret;
071698e1 10521
4379bf8b 10522 creds = get_current_cred();
1e6fa521 10523
61cf9370
MWO
10524 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
10525 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
a30f895a
JA
10526 if (ret < 0) {
10527 put_cred(creds);
10528 return ret;
10529 }
10530 return id;
071698e1
JA
10531}
10532
21b55dbc
SG
10533static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
10534 unsigned int nr_args)
10535{
10536 struct io_uring_restriction *res;
10537 size_t size;
10538 int i, ret;
10539
7e84e1c7
SG
10540 /* Restrictions allowed only if rings started disabled */
10541 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
10542 return -EBADFD;
10543
21b55dbc 10544 /* We allow only a single restrictions registration */
7e84e1c7 10545 if (ctx->restrictions.registered)
21b55dbc
SG
10546 return -EBUSY;
10547
10548 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
10549 return -EINVAL;
10550
10551 size = array_size(nr_args, sizeof(*res));
10552 if (size == SIZE_MAX)
10553 return -EOVERFLOW;
10554
10555 res = memdup_user(arg, size);
10556 if (IS_ERR(res))
10557 return PTR_ERR(res);
10558
10559 ret = 0;
10560
10561 for (i = 0; i < nr_args; i++) {
10562 switch (res[i].opcode) {
10563 case IORING_RESTRICTION_REGISTER_OP:
10564 if (res[i].register_op >= IORING_REGISTER_LAST) {
10565 ret = -EINVAL;
10566 goto out;
10567 }
10568
10569 __set_bit(res[i].register_op,
10570 ctx->restrictions.register_op);
10571 break;
10572 case IORING_RESTRICTION_SQE_OP:
10573 if (res[i].sqe_op >= IORING_OP_LAST) {
10574 ret = -EINVAL;
10575 goto out;
10576 }
10577
10578 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
10579 break;
10580 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
10581 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
10582 break;
10583 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
10584 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
10585 break;
10586 default:
10587 ret = -EINVAL;
10588 goto out;
10589 }
10590 }
10591
10592out:
10593 /* Reset all restrictions if an error happened */
10594 if (ret != 0)
10595 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
10596 else
7e84e1c7 10597 ctx->restrictions.registered = true;
21b55dbc
SG
10598
10599 kfree(res);
10600 return ret;
10601}
10602
7e84e1c7
SG
10603static int io_register_enable_rings(struct io_ring_ctx *ctx)
10604{
10605 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
10606 return -EBADFD;
10607
10608 if (ctx->restrictions.registered)
10609 ctx->restricted = 1;
10610
0298ef96
PB
10611 ctx->flags &= ~IORING_SETUP_R_DISABLED;
10612 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
10613 wake_up(&ctx->sq_data->wait);
7e84e1c7
SG
10614 return 0;
10615}
10616
fdecb662 10617static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
c3bdad02 10618 struct io_uring_rsrc_update2 *up,
98f0b3b4
PB
10619 unsigned nr_args)
10620{
10621 __u32 tmp;
10622 int err;
10623
10624 if (check_add_overflow(up->offset, nr_args, &tmp))
10625 return -EOVERFLOW;
10626 err = io_rsrc_node_switch_start(ctx);
10627 if (err)
10628 return err;
10629
fdecb662
PB
10630 switch (type) {
10631 case IORING_RSRC_FILE:
98f0b3b4 10632 return __io_sqe_files_update(ctx, up, nr_args);
634d00df
PB
10633 case IORING_RSRC_BUFFER:
10634 return __io_sqe_buffers_update(ctx, up, nr_args);
98f0b3b4
PB
10635 }
10636 return -EINVAL;
10637}
10638
c3bdad02
PB
10639static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
10640 unsigned nr_args)
98f0b3b4 10641{
c3bdad02 10642 struct io_uring_rsrc_update2 up;
98f0b3b4
PB
10643
10644 if (!nr_args)
10645 return -EINVAL;
c3bdad02
PB
10646 memset(&up, 0, sizeof(up));
10647 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
10648 return -EFAULT;
c9747fa5 10649 if (up.resv || up.resv2)
4dd8f4c2 10650 return -EINVAL;
c3bdad02
PB
10651 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
10652}
10653
10654static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
992da01a 10655 unsigned size, unsigned type)
c3bdad02
PB
10656{
10657 struct io_uring_rsrc_update2 up;
10658
10659 if (size != sizeof(up))
10660 return -EINVAL;
98f0b3b4
PB
10661 if (copy_from_user(&up, arg, sizeof(up)))
10662 return -EFAULT;
c9747fa5 10663 if (!up.nr || up.resv || up.resv2)
98f0b3b4 10664 return -EINVAL;
992da01a 10665 return __io_register_rsrc_update(ctx, type, &up, up.nr);
98f0b3b4
PB
10666}
10667
792e3582 10668static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
992da01a 10669 unsigned int size, unsigned int type)
792e3582
PB
10670{
10671 struct io_uring_rsrc_register rr;
10672
10673 /* keep it extendible */
10674 if (size != sizeof(rr))
10675 return -EINVAL;
10676
10677 memset(&rr, 0, sizeof(rr));
10678 if (copy_from_user(&rr, arg, size))
10679 return -EFAULT;
992da01a 10680 if (!rr.nr || rr.resv || rr.resv2)
792e3582
PB
10681 return -EINVAL;
10682
992da01a 10683 switch (type) {
792e3582
PB
10684 case IORING_RSRC_FILE:
10685 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
10686 rr.nr, u64_to_user_ptr(rr.tags));
634d00df
PB
10687 case IORING_RSRC_BUFFER:
10688 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
10689 rr.nr, u64_to_user_ptr(rr.tags));
792e3582
PB
10690 }
10691 return -EINVAL;
10692}
10693
fe76421d
JA
10694static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg,
10695 unsigned len)
10696{
10697 struct io_uring_task *tctx = current->io_uring;
10698 cpumask_var_t new_mask;
10699 int ret;
10700
10701 if (!tctx || !tctx->io_wq)
10702 return -EINVAL;
10703
10704 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
10705 return -ENOMEM;
10706
10707 cpumask_clear(new_mask);
10708 if (len > cpumask_size())
10709 len = cpumask_size();
10710
fd213661
ES
10711 if (in_compat_syscall()) {
10712 ret = compat_get_bitmap(cpumask_bits(new_mask),
10713 (const compat_ulong_t __user *)arg,
10714 len * 8 /* CHAR_BIT */);
10715 } else {
10716 ret = copy_from_user(new_mask, arg, len);
10717 }
10718
10719 if (ret) {
fe76421d
JA
10720 free_cpumask_var(new_mask);
10721 return -EFAULT;
10722 }
10723
10724 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
10725 free_cpumask_var(new_mask);
10726 return ret;
10727}
10728
10729static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
10730{
10731 struct io_uring_task *tctx = current->io_uring;
10732
10733 if (!tctx || !tctx->io_wq)
10734 return -EINVAL;
10735
10736 return io_wq_cpu_affinity(tctx->io_wq, NULL);
10737}
10738
2e480058
JA
10739static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
10740 void __user *arg)
b22fa62a 10741 __must_hold(&ctx->uring_lock)
2e480058 10742{
b22fa62a 10743 struct io_tctx_node *node;
fa84693b
JA
10744 struct io_uring_task *tctx = NULL;
10745 struct io_sq_data *sqd = NULL;
2e480058
JA
10746 __u32 new_count[2];
10747 int i, ret;
10748
2e480058
JA
10749 if (copy_from_user(new_count, arg, sizeof(new_count)))
10750 return -EFAULT;
10751 for (i = 0; i < ARRAY_SIZE(new_count); i++)
10752 if (new_count[i] > INT_MAX)
10753 return -EINVAL;
10754
fa84693b
JA
10755 if (ctx->flags & IORING_SETUP_SQPOLL) {
10756 sqd = ctx->sq_data;
10757 if (sqd) {
009ad9f0
JA
10758 /*
10759 * Observe the correct sqd->lock -> ctx->uring_lock
10760 * ordering. Fine to drop uring_lock here, we hold
10761 * a ref to the ctx.
10762 */
41d3a6bd 10763 refcount_inc(&sqd->refs);
009ad9f0 10764 mutex_unlock(&ctx->uring_lock);
fa84693b 10765 mutex_lock(&sqd->lock);
009ad9f0 10766 mutex_lock(&ctx->uring_lock);
41d3a6bd
JA
10767 if (sqd->thread)
10768 tctx = sqd->thread->io_uring;
fa84693b
JA
10769 }
10770 } else {
10771 tctx = current->io_uring;
10772 }
10773
e139a1ec 10774 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
fa84693b 10775
c29d963b
PB
10776 for (i = 0; i < ARRAY_SIZE(new_count); i++)
10777 if (new_count[i])
10778 ctx->iowq_limits[i] = new_count[i];
e139a1ec
PB
10779 ctx->iowq_limits_set = true;
10780
10781 ret = -EINVAL;
10782 if (tctx && tctx->io_wq) {
10783 ret = io_wq_max_workers(tctx->io_wq, new_count);
10784 if (ret)
10785 goto err;
10786 } else {
10787 memset(new_count, 0, sizeof(new_count));
10788 }
fa84693b 10789
41d3a6bd 10790 if (sqd) {
fa84693b 10791 mutex_unlock(&sqd->lock);
41d3a6bd
JA
10792 io_put_sq_data(sqd);
10793 }
2e480058
JA
10794
10795 if (copy_to_user(arg, new_count, sizeof(new_count)))
10796 return -EFAULT;
10797
b22fa62a
PB
10798 /* that's it for SQPOLL, only the SQPOLL task creates requests */
10799 if (sqd)
10800 return 0;
10801
10802 /* now propagate the restriction to all registered users */
10803 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
10804 struct io_uring_task *tctx = node->task->io_uring;
10805
10806 if (WARN_ON_ONCE(!tctx->io_wq))
10807 continue;
10808
10809 for (i = 0; i < ARRAY_SIZE(new_count); i++)
10810 new_count[i] = ctx->iowq_limits[i];
10811 /* ignore errors, it always returns zero anyway */
10812 (void)io_wq_max_workers(tctx->io_wq, new_count);
10813 }
2e480058 10814 return 0;
fa84693b 10815err:
41d3a6bd 10816 if (sqd) {
fa84693b 10817 mutex_unlock(&sqd->lock);
41d3a6bd
JA
10818 io_put_sq_data(sqd);
10819 }
fa84693b 10820 return ret;
2e480058
JA
10821}
10822
071698e1
JA
10823static bool io_register_op_must_quiesce(int op)
10824{
10825 switch (op) {
bd54b6fe
BM
10826 case IORING_REGISTER_BUFFERS:
10827 case IORING_UNREGISTER_BUFFERS:
f4f7d21c 10828 case IORING_REGISTER_FILES:
071698e1
JA
10829 case IORING_UNREGISTER_FILES:
10830 case IORING_REGISTER_FILES_UPDATE:
10831 case IORING_REGISTER_PROBE:
10832 case IORING_REGISTER_PERSONALITY:
10833 case IORING_UNREGISTER_PERSONALITY:
992da01a
PB
10834 case IORING_REGISTER_FILES2:
10835 case IORING_REGISTER_FILES_UPDATE2:
10836 case IORING_REGISTER_BUFFERS2:
10837 case IORING_REGISTER_BUFFERS_UPDATE:
fe76421d
JA
10838 case IORING_REGISTER_IOWQ_AFF:
10839 case IORING_UNREGISTER_IOWQ_AFF:
2e480058 10840 case IORING_REGISTER_IOWQ_MAX_WORKERS:
071698e1
JA
10841 return false;
10842 default:
10843 return true;
10844 }
10845}
10846
e73c5c7c
PB
10847static int io_ctx_quiesce(struct io_ring_ctx *ctx)
10848{
10849 long ret;
10850
10851 percpu_ref_kill(&ctx->refs);
10852
10853 /*
10854 * Drop uring mutex before waiting for references to exit. If another
10855 * thread is currently inside io_uring_enter() it might need to grab the
10856 * uring_lock to make progress. If we hold it here across the drain
10857 * wait, then we can deadlock. It's safe to drop the mutex here, since
10858 * no new references will come in after we've killed the percpu ref.
10859 */
10860 mutex_unlock(&ctx->uring_lock);
10861 do {
10862 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10863 if (!ret)
10864 break;
10865 ret = io_run_task_work_sig();
10866 } while (ret >= 0);
10867 mutex_lock(&ctx->uring_lock);
10868
10869 if (ret)
10870 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
10871 return ret;
10872}
10873
edafccee
JA
10874static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
10875 void __user *arg, unsigned nr_args)
b19062a5
JA
10876 __releases(ctx->uring_lock)
10877 __acquires(ctx->uring_lock)
edafccee
JA
10878{
10879 int ret;
10880
35fa71a0
JA
10881 /*
10882 * We're inside the ring mutex, if the ref is already dying, then
10883 * someone else killed the ctx or is already going through
10884 * io_uring_register().
10885 */
10886 if (percpu_ref_is_dying(&ctx->refs))
10887 return -ENXIO;
10888
75c4021a
PB
10889 if (ctx->restricted) {
10890 if (opcode >= IORING_REGISTER_LAST)
10891 return -EINVAL;
10892 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
10893 if (!test_bit(opcode, ctx->restrictions.register_op))
10894 return -EACCES;
10895 }
10896
071698e1 10897 if (io_register_op_must_quiesce(opcode)) {
e73c5c7c
PB
10898 ret = io_ctx_quiesce(ctx);
10899 if (ret)
f70865db 10900 return ret;
05f3fb3c 10901 }
edafccee
JA
10902
10903 switch (opcode) {
10904 case IORING_REGISTER_BUFFERS:
634d00df 10905 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
edafccee
JA
10906 break;
10907 case IORING_UNREGISTER_BUFFERS:
10908 ret = -EINVAL;
10909 if (arg || nr_args)
10910 break;
0a96bbe4 10911 ret = io_sqe_buffers_unregister(ctx);
edafccee 10912 break;
6b06314c 10913 case IORING_REGISTER_FILES:
792e3582 10914 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
6b06314c
JA
10915 break;
10916 case IORING_UNREGISTER_FILES:
10917 ret = -EINVAL;
10918 if (arg || nr_args)
10919 break;
10920 ret = io_sqe_files_unregister(ctx);
10921 break;
c3a31e60 10922 case IORING_REGISTER_FILES_UPDATE:
c3bdad02 10923 ret = io_register_files_update(ctx, arg, nr_args);
c3a31e60 10924 break;
9b402849 10925 case IORING_REGISTER_EVENTFD:
f2842ab5 10926 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
10927 ret = -EINVAL;
10928 if (nr_args != 1)
10929 break;
10930 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
10931 if (ret)
10932 break;
10933 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
10934 ctx->eventfd_async = 1;
10935 else
10936 ctx->eventfd_async = 0;
9b402849
JA
10937 break;
10938 case IORING_UNREGISTER_EVENTFD:
10939 ret = -EINVAL;
10940 if (arg || nr_args)
10941 break;
10942 ret = io_eventfd_unregister(ctx);
10943 break;
66f4af93
JA
10944 case IORING_REGISTER_PROBE:
10945 ret = -EINVAL;
10946 if (!arg || nr_args > 256)
10947 break;
10948 ret = io_probe(ctx, arg, nr_args);
10949 break;
071698e1
JA
10950 case IORING_REGISTER_PERSONALITY:
10951 ret = -EINVAL;
10952 if (arg || nr_args)
10953 break;
10954 ret = io_register_personality(ctx);
10955 break;
10956 case IORING_UNREGISTER_PERSONALITY:
10957 ret = -EINVAL;
10958 if (arg)
10959 break;
10960 ret = io_unregister_personality(ctx, nr_args);
10961 break;
7e84e1c7
SG
10962 case IORING_REGISTER_ENABLE_RINGS:
10963 ret = -EINVAL;
10964 if (arg || nr_args)
10965 break;
10966 ret = io_register_enable_rings(ctx);
10967 break;
21b55dbc
SG
10968 case IORING_REGISTER_RESTRICTIONS:
10969 ret = io_register_restrictions(ctx, arg, nr_args);
10970 break;
992da01a
PB
10971 case IORING_REGISTER_FILES2:
10972 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
10973 break;
10974 case IORING_REGISTER_FILES_UPDATE2:
10975 ret = io_register_rsrc_update(ctx, arg, nr_args,
10976 IORING_RSRC_FILE);
10977 break;
10978 case IORING_REGISTER_BUFFERS2:
10979 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
792e3582 10980 break;
992da01a
PB
10981 case IORING_REGISTER_BUFFERS_UPDATE:
10982 ret = io_register_rsrc_update(ctx, arg, nr_args,
10983 IORING_RSRC_BUFFER);
c3bdad02 10984 break;
fe76421d
JA
10985 case IORING_REGISTER_IOWQ_AFF:
10986 ret = -EINVAL;
10987 if (!arg || !nr_args)
10988 break;
10989 ret = io_register_iowq_aff(ctx, arg, nr_args);
10990 break;
10991 case IORING_UNREGISTER_IOWQ_AFF:
10992 ret = -EINVAL;
10993 if (arg || nr_args)
10994 break;
10995 ret = io_unregister_iowq_aff(ctx);
10996 break;
2e480058
JA
10997 case IORING_REGISTER_IOWQ_MAX_WORKERS:
10998 ret = -EINVAL;
10999 if (!arg || nr_args != 2)
11000 break;
11001 ret = io_register_iowq_max_workers(ctx, arg);
11002 break;
edafccee
JA
11003 default:
11004 ret = -EINVAL;
11005 break;
11006 }
11007
071698e1 11008 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 11009 /* bring the ctx back to life */
05f3fb3c 11010 percpu_ref_reinit(&ctx->refs);
0f158b4c 11011 reinit_completion(&ctx->ref_comp);
05f3fb3c 11012 }
edafccee
JA
11013 return ret;
11014}
11015
11016SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
11017 void __user *, arg, unsigned int, nr_args)
11018{
11019 struct io_ring_ctx *ctx;
11020 long ret = -EBADF;
11021 struct fd f;
11022
11023 f = fdget(fd);
11024 if (!f.file)
11025 return -EBADF;
11026
11027 ret = -EOPNOTSUPP;
11028 if (f.file->f_op != &io_uring_fops)
11029 goto out_fput;
11030
11031 ctx = f.file->private_data;
11032
b6c23dd5
PB
11033 io_run_task_work();
11034
edafccee
JA
11035 mutex_lock(&ctx->uring_lock);
11036 ret = __io_uring_register(ctx, opcode, arg, nr_args);
11037 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
11038 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
11039 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
11040out_fput:
11041 fdput(f);
11042 return ret;
11043}
11044
2b188cc1
JA
11045static int __init io_uring_init(void)
11046{
d7f62e82
SM
11047#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
11048 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
11049 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
11050} while (0)
11051
11052#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
11053 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
11054 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
11055 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
11056 BUILD_BUG_SQE_ELEM(1, __u8, flags);
11057 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
11058 BUILD_BUG_SQE_ELEM(4, __s32, fd);
11059 BUILD_BUG_SQE_ELEM(8, __u64, off);
11060 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
11061 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 11062 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
11063 BUILD_BUG_SQE_ELEM(24, __u32, len);
11064 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
11065 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
11066 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
11067 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
5769a351
JX
11068 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
11069 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
d7f62e82
SM
11070 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
11071 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
11072 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
11073 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
11074 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
11075 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
11076 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
11077 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 11078 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
11079 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
11080 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
16340eab 11081 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
d7f62e82 11082 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 11083 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
b9445598 11084 BUILD_BUG_SQE_ELEM(44, __u32, file_index);
d7f62e82 11085
b0d658ec
PB
11086 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
11087 sizeof(struct io_uring_rsrc_update));
11088 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
11089 sizeof(struct io_uring_rsrc_update2));
90499ad0
PB
11090
11091 /* ->buf_index is u16 */
11092 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
11093
b0d658ec
PB
11094 /* should fit into one byte */
11095 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
11096
d3656344 11097 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
32c2d33e 11098 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
16340eab 11099
91f245d5
JA
11100 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
11101 SLAB_ACCOUNT);
2b188cc1
JA
11102 return 0;
11103};
11104__initcall(io_uring_init);