]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/io_uring.c
io_uring: kill unnecessary REQ_F_WORK_INITIALIZED checks
[mirror_ubuntu-jammy-kernel.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
2b188cc1
JA
58#include <linux/percpu.h>
59#include <linux/slab.h>
2b188cc1 60#include <linux/blkdev.h>
edafccee 61#include <linux/bvec.h>
2b188cc1
JA
62#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
6b06314c 65#include <net/scm.h>
2b188cc1
JA
66#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
edafccee
JA
70#include <linux/sizes.h>
71#include <linux/hugetlb.h>
aa4c3967 72#include <linux/highmem.h>
15b71abe
JA
73#include <linux/namei.h>
74#include <linux/fsnotify.h>
4840e418 75#include <linux/fadvise.h>
3e4827b0 76#include <linux/eventpoll.h>
ff002b30 77#include <linux/fs_struct.h>
7d67af2c 78#include <linux/splice.h>
b41e9852 79#include <linux/task_work.h>
bcf5a063 80#include <linux/pagemap.h>
0f212204 81#include <linux/io_uring.h>
91d8f519 82#include <linux/blk-cgroup.h>
4ea33a97 83#include <linux/audit.h>
2b188cc1 84
c826bd7a
DD
85#define CREATE_TRACE_POINTS
86#include <trace/events/io_uring.h>
87
2b188cc1
JA
88#include <uapi/linux/io_uring.h>
89
90#include "internal.h"
561fb04a 91#include "io-wq.h"
2b188cc1 92
5277deaa 93#define IORING_MAX_ENTRIES 32768
33a107f0 94#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
95
96/*
97 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
98 */
99#define IORING_FILE_TABLE_SHIFT 9
100#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
101#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
102#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
21b55dbc
SG
103#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
104 IORING_REGISTER_LAST + IORING_OP_LAST)
2b188cc1 105
b16fed66
PB
106#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
107 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
108 IOSQE_BUFFER_SELECT)
109
2b188cc1
JA
110struct io_uring {
111 u32 head ____cacheline_aligned_in_smp;
112 u32 tail ____cacheline_aligned_in_smp;
113};
114
1e84b97b 115/*
75b28aff
HV
116 * This data is shared with the application through the mmap at offsets
117 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
118 *
119 * The offsets to the member fields are published through struct
120 * io_sqring_offsets when calling io_uring_setup.
121 */
75b28aff 122struct io_rings {
1e84b97b
SB
123 /*
124 * Head and tail offsets into the ring; the offsets need to be
125 * masked to get valid indices.
126 *
75b28aff
HV
127 * The kernel controls head of the sq ring and the tail of the cq ring,
128 * and the application controls tail of the sq ring and the head of the
129 * cq ring.
1e84b97b 130 */
75b28aff 131 struct io_uring sq, cq;
1e84b97b 132 /*
75b28aff 133 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
134 * ring_entries - 1)
135 */
75b28aff
HV
136 u32 sq_ring_mask, cq_ring_mask;
137 /* Ring sizes (constant, power of 2) */
138 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
139 /*
140 * Number of invalid entries dropped by the kernel due to
141 * invalid index stored in array
142 *
143 * Written by the kernel, shouldn't be modified by the
144 * application (i.e. get number of "new events" by comparing to
145 * cached value).
146 *
147 * After a new SQ head value was read by the application this
148 * counter includes all submissions that were dropped reaching
149 * the new SQ head (and possibly more).
150 */
75b28aff 151 u32 sq_dropped;
1e84b97b 152 /*
0d9b5b3a 153 * Runtime SQ flags
1e84b97b
SB
154 *
155 * Written by the kernel, shouldn't be modified by the
156 * application.
157 *
158 * The application needs a full memory barrier before checking
159 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
160 */
75b28aff 161 u32 sq_flags;
0d9b5b3a
SG
162 /*
163 * Runtime CQ flags
164 *
165 * Written by the application, shouldn't be modified by the
166 * kernel.
167 */
168 u32 cq_flags;
1e84b97b
SB
169 /*
170 * Number of completion events lost because the queue was full;
171 * this should be avoided by the application by making sure
0b4295b5 172 * there are not more requests pending than there is space in
1e84b97b
SB
173 * the completion queue.
174 *
175 * Written by the kernel, shouldn't be modified by the
176 * application (i.e. get number of "new events" by comparing to
177 * cached value).
178 *
179 * As completion events come in out of order this counter is not
180 * ordered with any other data.
181 */
75b28aff 182 u32 cq_overflow;
1e84b97b
SB
183 /*
184 * Ring buffer of completion events.
185 *
186 * The kernel writes completion events fresh every time they are
187 * produced, so the application is allowed to modify pending
188 * entries.
189 */
75b28aff 190 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
191};
192
45d189c6
PB
193enum io_uring_cmd_flags {
194 IO_URING_F_NONBLOCK = 1,
889fca73 195 IO_URING_F_COMPLETE_DEFER = 2,
45d189c6
PB
196};
197
edafccee
JA
198struct io_mapped_ubuf {
199 u64 ubuf;
200 size_t len;
201 struct bio_vec *bvec;
202 unsigned int nr_bvecs;
de293938 203 unsigned long acct_pages;
edafccee
JA
204};
205
50238531
BM
206struct io_ring_ctx;
207
269bbe5f
BM
208struct io_rsrc_put {
209 struct list_head list;
50238531
BM
210 union {
211 void *rsrc;
212 struct file *file;
213 };
269bbe5f
BM
214};
215
216struct fixed_rsrc_table {
65e19f54 217 struct file **files;
31b51510
JA
218};
219
269bbe5f 220struct fixed_rsrc_ref_node {
05589553
XW
221 struct percpu_ref refs;
222 struct list_head node;
269bbe5f
BM
223 struct list_head rsrc_list;
224 struct fixed_rsrc_data *rsrc_data;
50238531
BM
225 void (*rsrc_put)(struct io_ring_ctx *ctx,
226 struct io_rsrc_put *prsrc);
4a38aed2 227 struct llist_node llist;
e297822b 228 bool done;
05589553
XW
229};
230
269bbe5f
BM
231struct fixed_rsrc_data {
232 struct fixed_rsrc_table *table;
05f3fb3c
JA
233 struct io_ring_ctx *ctx;
234
269bbe5f 235 struct fixed_rsrc_ref_node *node;
05f3fb3c 236 struct percpu_ref refs;
05f3fb3c 237 struct completion done;
8bad28d8 238 bool quiesce;
05f3fb3c
JA
239};
240
5a2e745d
JA
241struct io_buffer {
242 struct list_head list;
243 __u64 addr;
244 __s32 len;
245 __u16 bid;
246};
247
21b55dbc
SG
248struct io_restriction {
249 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
250 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
251 u8 sqe_flags_allowed;
252 u8 sqe_flags_required;
7e84e1c7 253 bool registered;
21b55dbc
SG
254};
255
37d1e2e3
JA
256enum {
257 IO_SQ_THREAD_SHOULD_STOP = 0,
258 IO_SQ_THREAD_SHOULD_PARK,
259};
260
534ca6d6
JA
261struct io_sq_data {
262 refcount_t refs;
69fb2131
JA
263 struct mutex lock;
264
265 /* ctx's that are using this sqd */
266 struct list_head ctx_list;
267 struct list_head ctx_new_list;
268 struct mutex ctx_lock;
269
534ca6d6
JA
270 struct task_struct *thread;
271 struct wait_queue_head wait;
08369246
XW
272
273 unsigned sq_thread_idle;
37d1e2e3
JA
274 int sq_cpu;
275 pid_t task_pid;
276
277 unsigned long state;
278 struct completion startup;
279 struct completion completion;
280 struct completion exited;
534ca6d6
JA
281};
282
258b29a9 283#define IO_IOPOLL_BATCH 8
6dd0be1e 284#define IO_COMPL_BATCH 32
6ff119a6 285#define IO_REQ_CACHE_SIZE 32
bf019da7 286#define IO_REQ_ALLOC_BATCH 8
258b29a9
PB
287
288struct io_comp_state {
6dd0be1e 289 struct io_kiocb *reqs[IO_COMPL_BATCH];
1b4c351f 290 unsigned int nr;
c7dae4ba
JA
291 unsigned int locked_free_nr;
292 /* inline/task_work completion list, under ->uring_lock */
1b4c351f 293 struct list_head free_list;
c7dae4ba
JA
294 /* IRQ completion list, under ->completion_lock */
295 struct list_head locked_free_list;
258b29a9
PB
296};
297
a1ab7b35
PB
298struct io_submit_link {
299 struct io_kiocb *head;
300 struct io_kiocb *last;
301};
302
258b29a9
PB
303struct io_submit_state {
304 struct blk_plug plug;
a1ab7b35 305 struct io_submit_link link;
258b29a9
PB
306
307 /*
308 * io_kiocb alloc cache
309 */
bf019da7 310 void *reqs[IO_REQ_CACHE_SIZE];
258b29a9
PB
311 unsigned int free_reqs;
312
313 bool plug_started;
314
315 /*
316 * Batch completion logic
317 */
318 struct io_comp_state comp;
319
320 /*
321 * File reference cache
322 */
323 struct file *file;
324 unsigned int fd;
325 unsigned int file_refs;
326 unsigned int ios_left;
327};
328
2b188cc1
JA
329struct io_ring_ctx {
330 struct {
331 struct percpu_ref refs;
332 } ____cacheline_aligned_in_smp;
333
334 struct {
335 unsigned int flags;
e1d85334 336 unsigned int compat: 1;
e1d85334
RD
337 unsigned int cq_overflow_flushed: 1;
338 unsigned int drain_next: 1;
339 unsigned int eventfd_async: 1;
21b55dbc 340 unsigned int restricted: 1;
d9d05217 341 unsigned int sqo_dead: 1;
5f3f26f9 342 unsigned int sqo_exec: 1;
2b188cc1 343
75b28aff
HV
344 /*
345 * Ring buffer of indices into array of io_uring_sqe, which is
346 * mmapped by the application using the IORING_OFF_SQES offset.
347 *
348 * This indirection could e.g. be used to assign fixed
349 * io_uring_sqe entries to operations and only submit them to
350 * the queue when needed.
351 *
352 * The kernel modifies neither the indices array nor the entries
353 * array.
354 */
355 u32 *sq_array;
2b188cc1
JA
356 unsigned cached_sq_head;
357 unsigned sq_entries;
358 unsigned sq_mask;
6c271ce2 359 unsigned sq_thread_idle;
498ccd9e 360 unsigned cached_sq_dropped;
2c3bac6d 361 unsigned cached_cq_overflow;
ad3eb2c8 362 unsigned long sq_check_overflow;
de0617e4 363
e941894e
JA
364 /* hashed buffered write serialization */
365 struct io_wq_hash *hash_map;
366
de0617e4 367 struct list_head defer_list;
5262f567 368 struct list_head timeout_list;
1d7bb1d5 369 struct list_head cq_overflow_list;
fcb323cc 370
ad3eb2c8 371 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
372 } ____cacheline_aligned_in_smp;
373
3c1a2ead
JA
374 struct {
375 struct mutex uring_lock;
376 wait_queue_head_t wait;
377 } ____cacheline_aligned_in_smp;
378
379 struct io_submit_state submit_state;
380
206aefde
JA
381 struct io_rings *rings;
382
2aede0e4 383 /*
37d1e2e3 384 * For SQPOLL usage
2aede0e4
JA
385 */
386 struct task_struct *sqo_task;
387
388 /* Only used for accounting purposes */
389 struct mm_struct *mm_account;
390
534ca6d6
JA
391 struct io_sq_data *sq_data; /* if using sq thread polling */
392
90554200 393 struct wait_queue_head sqo_sq_wait;
69fb2131 394 struct list_head sqd_list;
75b28aff 395
6b06314c
JA
396 /*
397 * If used, fixed file set. Writers must ensure that ->refs is dead,
398 * readers must ensure that ->refs is alive as long as the file* is
399 * used. Only updated through io_uring_register(2).
400 */
269bbe5f 401 struct fixed_rsrc_data *file_data;
6b06314c
JA
402 unsigned nr_user_files;
403
edafccee
JA
404 /* if used, fixed mapped user buffers */
405 unsigned nr_user_bufs;
406 struct io_mapped_ubuf *user_bufs;
407
2b188cc1
JA
408 struct user_struct *user;
409
0f158b4c
JA
410 struct completion ref_comp;
411 struct completion sq_thread_comp;
206aefde
JA
412
413#if defined(CONFIG_UNIX)
414 struct socket *ring_sock;
415#endif
416
5a2e745d
JA
417 struct idr io_buffer_idr;
418
071698e1
JA
419 struct idr personality_idr;
420
206aefde
JA
421 struct {
422 unsigned cached_cq_tail;
423 unsigned cq_entries;
424 unsigned cq_mask;
425 atomic_t cq_timeouts;
f010505b 426 unsigned cq_last_tm_flush;
ad3eb2c8 427 unsigned long cq_check_overflow;
206aefde
JA
428 struct wait_queue_head cq_wait;
429 struct fasync_struct *cq_fasync;
430 struct eventfd_ctx *cq_ev_fd;
431 } ____cacheline_aligned_in_smp;
2b188cc1 432
2b188cc1
JA
433 struct {
434 spinlock_t completion_lock;
e94f141b 435
def596e9 436 /*
540e32a0 437 * ->iopoll_list is protected by the ctx->uring_lock for
def596e9
JA
438 * io_uring instances that don't use IORING_SETUP_SQPOLL.
439 * For SQPOLL, only the single threaded io_sq_thread() will
440 * manipulate the list, hence no extra locking is needed there.
441 */
540e32a0 442 struct list_head iopoll_list;
78076bb6
JA
443 struct hlist_head *cancel_hash;
444 unsigned cancel_hash_bits;
e94f141b 445 bool poll_multi_file;
31b51510 446
fcb323cc
JA
447 spinlock_t inflight_lock;
448 struct list_head inflight_list;
2b188cc1 449 } ____cacheline_aligned_in_smp;
85faa7b8 450
269bbe5f
BM
451 struct delayed_work rsrc_put_work;
452 struct llist_head rsrc_put_llist;
d67d2263
BM
453 struct list_head rsrc_ref_list;
454 spinlock_t rsrc_ref_lock;
4a38aed2 455
21b55dbc 456 struct io_restriction restrictions;
3c1a2ead 457
7c25c0d1
JA
458 /* exit task_work */
459 struct callback_head *exit_task_work;
460
e941894e
JA
461 struct wait_queue_head hash_wait;
462
3c1a2ead
JA
463 /* Keep this last, we don't need it for the fast path */
464 struct work_struct exit_work;
2b188cc1
JA
465};
466
09bb8394
JA
467/*
468 * First field must be the file pointer in all the
469 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
470 */
221c5eb2
JA
471struct io_poll_iocb {
472 struct file *file;
018043be 473 struct wait_queue_head *head;
221c5eb2 474 __poll_t events;
8c838788 475 bool done;
221c5eb2 476 bool canceled;
392edb45 477 struct wait_queue_entry wait;
221c5eb2
JA
478};
479
018043be
PB
480struct io_poll_remove {
481 struct file *file;
482 u64 addr;
483};
484
b5dba59e
JA
485struct io_close {
486 struct file *file;
b5dba59e
JA
487 int fd;
488};
489
ad8a48ac
JA
490struct io_timeout_data {
491 struct io_kiocb *req;
492 struct hrtimer timer;
493 struct timespec64 ts;
494 enum hrtimer_mode mode;
495};
496
8ed8d3c3
JA
497struct io_accept {
498 struct file *file;
499 struct sockaddr __user *addr;
500 int __user *addr_len;
501 int flags;
09952e3e 502 unsigned long nofile;
8ed8d3c3
JA
503};
504
505struct io_sync {
506 struct file *file;
507 loff_t len;
508 loff_t off;
509 int flags;
d63d1b5e 510 int mode;
8ed8d3c3
JA
511};
512
fbf23849
JA
513struct io_cancel {
514 struct file *file;
515 u64 addr;
516};
517
b29472ee
JA
518struct io_timeout {
519 struct file *file;
bfe68a22
PB
520 u32 off;
521 u32 target_seq;
135fcde8 522 struct list_head list;
90cd7e42
PB
523 /* head of the link, used by linked timeouts only */
524 struct io_kiocb *head;
b29472ee
JA
525};
526
0bdf7a2d
PB
527struct io_timeout_rem {
528 struct file *file;
529 u64 addr;
9c8e11b3
PB
530
531 /* timeout update */
532 struct timespec64 ts;
533 u32 flags;
0bdf7a2d
PB
534};
535
9adbd45d
JA
536struct io_rw {
537 /* NOTE: kiocb has the file as the first member, so don't do it here */
538 struct kiocb kiocb;
539 u64 addr;
540 u64 len;
541};
542
3fbb51c1
JA
543struct io_connect {
544 struct file *file;
545 struct sockaddr __user *addr;
546 int addr_len;
547};
548
e47293fd
JA
549struct io_sr_msg {
550 struct file *file;
fddaface 551 union {
270a5940 552 struct user_msghdr __user *umsg;
fddaface
JA
553 void __user *buf;
554 };
e47293fd 555 int msg_flags;
bcda7baa 556 int bgid;
fddaface 557 size_t len;
bcda7baa 558 struct io_buffer *kbuf;
e47293fd
JA
559};
560
15b71abe
JA
561struct io_open {
562 struct file *file;
563 int dfd;
15b71abe 564 struct filename *filename;
c12cedf2 565 struct open_how how;
4022e7af 566 unsigned long nofile;
15b71abe
JA
567};
568
269bbe5f 569struct io_rsrc_update {
05f3fb3c
JA
570 struct file *file;
571 u64 arg;
572 u32 nr_args;
573 u32 offset;
574};
575
4840e418
JA
576struct io_fadvise {
577 struct file *file;
578 u64 offset;
579 u32 len;
580 u32 advice;
581};
582
c1ca757b
JA
583struct io_madvise {
584 struct file *file;
585 u64 addr;
586 u32 len;
587 u32 advice;
588};
589
3e4827b0
JA
590struct io_epoll {
591 struct file *file;
592 int epfd;
593 int op;
594 int fd;
595 struct epoll_event event;
e47293fd
JA
596};
597
7d67af2c
PB
598struct io_splice {
599 struct file *file_out;
600 struct file *file_in;
601 loff_t off_out;
602 loff_t off_in;
603 u64 len;
604 unsigned int flags;
605};
606
ddf0322d
JA
607struct io_provide_buf {
608 struct file *file;
609 __u64 addr;
610 __s32 len;
611 __u32 bgid;
612 __u16 nbufs;
613 __u16 bid;
614};
615
1d9e1288
BM
616struct io_statx {
617 struct file *file;
618 int dfd;
619 unsigned int mask;
620 unsigned int flags;
e62753e4 621 const char __user *filename;
1d9e1288
BM
622 struct statx __user *buffer;
623};
624
36f4fa68
JA
625struct io_shutdown {
626 struct file *file;
627 int how;
628};
629
80a261fd
JA
630struct io_rename {
631 struct file *file;
632 int old_dfd;
633 int new_dfd;
634 struct filename *oldpath;
635 struct filename *newpath;
636 int flags;
637};
638
14a1143b
JA
639struct io_unlink {
640 struct file *file;
641 int dfd;
642 int flags;
643 struct filename *filename;
644};
645
3ca405eb
PB
646struct io_completion {
647 struct file *file;
648 struct list_head list;
0f7e466b 649 int cflags;
3ca405eb
PB
650};
651
f499a021
JA
652struct io_async_connect {
653 struct sockaddr_storage address;
654};
655
03b1230c
JA
656struct io_async_msghdr {
657 struct iovec fast_iov[UIO_FASTIOV];
257e84a5
PB
658 /* points to an allocated iov, if NULL we use fast_iov instead */
659 struct iovec *free_iov;
03b1230c
JA
660 struct sockaddr __user *uaddr;
661 struct msghdr msg;
b537916c 662 struct sockaddr_storage addr;
03b1230c
JA
663};
664
f67676d1
JA
665struct io_async_rw {
666 struct iovec fast_iov[UIO_FASTIOV];
ff6165b2
JA
667 const struct iovec *free_iovec;
668 struct iov_iter iter;
227c0c96 669 size_t bytes_done;
bcf5a063 670 struct wait_page_queue wpq;
f67676d1
JA
671};
672
6b47ee6e
PB
673enum {
674 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
675 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
676 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
677 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
678 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 679 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
6b47ee6e 680
6b47ee6e
PB
681 REQ_F_FAIL_LINK_BIT,
682 REQ_F_INFLIGHT_BIT,
683 REQ_F_CUR_POS_BIT,
684 REQ_F_NOWAIT_BIT,
6b47ee6e 685 REQ_F_LINK_TIMEOUT_BIT,
6b47ee6e 686 REQ_F_ISREG_BIT,
99bc4c38 687 REQ_F_NEED_CLEANUP_BIT,
d7718a9d 688 REQ_F_POLLED_BIT,
bcda7baa 689 REQ_F_BUFFER_SELECTED_BIT,
5b0bbee4 690 REQ_F_NO_FILE_TABLE_BIT,
7cdaf587 691 REQ_F_WORK_INITIALIZED_BIT,
900fad45 692 REQ_F_LTIMEOUT_ACTIVE_BIT,
e342c807 693 REQ_F_COMPLETE_INLINE_BIT,
84557871
JA
694
695 /* not a real bit, just to check we're not overflowing the space */
696 __REQ_F_LAST_BIT,
6b47ee6e
PB
697};
698
699enum {
700 /* ctx owns file */
701 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
702 /* drain existing IO first */
703 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
704 /* linked sqes */
705 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
706 /* doesn't sever on completion < 0 */
707 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
708 /* IOSQE_ASYNC */
709 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
710 /* IOSQE_BUFFER_SELECT */
711 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
6b47ee6e 712
6b47ee6e
PB
713 /* fail rest of links */
714 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
715 /* on inflight list */
716 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
717 /* read/write uses file position */
718 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
719 /* must not punt to workers */
720 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
900fad45 721 /* has or had linked timeout */
6b47ee6e 722 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
6b47ee6e
PB
723 /* regular file */
724 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
99bc4c38
PB
725 /* needs cleanup */
726 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
d7718a9d
JA
727 /* already went through poll handler */
728 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
729 /* buffer already selected */
730 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
5b0bbee4
JA
731 /* doesn't need file table for this request */
732 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
7cdaf587
XW
733 /* io_wq_work is initialized */
734 REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
900fad45
PB
735 /* linked timeout is active, i.e. prepared by link's head */
736 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
e342c807
PB
737 /* completion is deferred through io_comp_state */
738 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
d7718a9d
JA
739};
740
741struct async_poll {
742 struct io_poll_iocb poll;
807abcb0 743 struct io_poll_iocb *double_poll;
6b47ee6e
PB
744};
745
7cbf1722
JA
746struct io_task_work {
747 struct io_wq_work_node node;
748 task_work_func_t func;
749};
750
09bb8394
JA
751/*
752 * NOTE! Each of the iocb union members has the file pointer
753 * as the first entry in their struct definition. So you can
754 * access the file pointer through any of the sub-structs,
755 * or directly as just 'ki_filp' in this struct.
756 */
2b188cc1 757struct io_kiocb {
221c5eb2 758 union {
09bb8394 759 struct file *file;
9adbd45d 760 struct io_rw rw;
221c5eb2 761 struct io_poll_iocb poll;
018043be 762 struct io_poll_remove poll_remove;
8ed8d3c3
JA
763 struct io_accept accept;
764 struct io_sync sync;
fbf23849 765 struct io_cancel cancel;
b29472ee 766 struct io_timeout timeout;
0bdf7a2d 767 struct io_timeout_rem timeout_rem;
3fbb51c1 768 struct io_connect connect;
e47293fd 769 struct io_sr_msg sr_msg;
15b71abe 770 struct io_open open;
b5dba59e 771 struct io_close close;
269bbe5f 772 struct io_rsrc_update rsrc_update;
4840e418 773 struct io_fadvise fadvise;
c1ca757b 774 struct io_madvise madvise;
3e4827b0 775 struct io_epoll epoll;
7d67af2c 776 struct io_splice splice;
ddf0322d 777 struct io_provide_buf pbuf;
1d9e1288 778 struct io_statx statx;
36f4fa68 779 struct io_shutdown shutdown;
80a261fd 780 struct io_rename rename;
14a1143b 781 struct io_unlink unlink;
3ca405eb
PB
782 /* use only after cleaning per-op data, see io_clean_op() */
783 struct io_completion compl;
221c5eb2 784 };
2b188cc1 785
e8c2bc1f
JA
786 /* opcode allocated if it needs to store data for async defer */
787 void *async_data;
d625c6ee 788 u8 opcode;
65a6543d
XW
789 /* polled IO has completed */
790 u8 iopoll_completed;
2b188cc1 791
4f4eeba8 792 u16 buf_index;
9cf7c104 793 u32 result;
4f4eeba8 794
010e8e6b
PB
795 struct io_ring_ctx *ctx;
796 unsigned int flags;
797 refcount_t refs;
798 struct task_struct *task;
799 u64 user_data;
d7718a9d 800
f2f87370 801 struct io_kiocb *link;
269bbe5f 802 struct percpu_ref *fixed_rsrc_refs;
fcb323cc 803
d21ffe7e
PB
804 /*
805 * 1. used with ctx->iopoll_list with reads/writes
806 * 2. to track reqs with ->files (see io_op_def::file_table)
807 */
010e8e6b 808 struct list_head inflight_entry;
7cbf1722
JA
809 union {
810 struct io_task_work io_task_work;
811 struct callback_head task_work;
812 };
010e8e6b
PB
813 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
814 struct hlist_node hash_node;
815 struct async_poll *apoll;
816 struct io_wq_work work;
2b188cc1 817};
05589553 818
27dc8338
PB
819struct io_defer_entry {
820 struct list_head list;
821 struct io_kiocb *req;
9cf7c104 822 u32 seq;
2b188cc1
JA
823};
824
d3656344 825struct io_op_def {
d3656344
JA
826 /* needs req->file assigned */
827 unsigned needs_file : 1;
d3656344
JA
828 /* hash wq insertion if file is a regular file */
829 unsigned hash_reg_file : 1;
830 /* unbound wq insertion if file is a non-regular file */
831 unsigned unbound_nonreg_file : 1;
66f4af93
JA
832 /* opcode is not supported by this kernel */
833 unsigned not_supported : 1;
8a72758c
JA
834 /* set if opcode supports polled "wait" */
835 unsigned pollin : 1;
836 unsigned pollout : 1;
bcda7baa
JA
837 /* op supports buffer selection */
838 unsigned buffer_select : 1;
e8c2bc1f
JA
839 /* must always have async data allocated */
840 unsigned needs_async_data : 1;
27926b68
JA
841 /* should block plug */
842 unsigned plug : 1;
e8c2bc1f
JA
843 /* size of async data needed, if any */
844 unsigned short async_size;
d3656344
JA
845};
846
0918682b 847static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
848 [IORING_OP_NOP] = {},
849 [IORING_OP_READV] = {
d3656344
JA
850 .needs_file = 1,
851 .unbound_nonreg_file = 1,
8a72758c 852 .pollin = 1,
4d954c25 853 .buffer_select = 1,
e8c2bc1f 854 .needs_async_data = 1,
27926b68 855 .plug = 1,
e8c2bc1f 856 .async_size = sizeof(struct io_async_rw),
d3656344 857 },
0463b6c5 858 [IORING_OP_WRITEV] = {
d3656344
JA
859 .needs_file = 1,
860 .hash_reg_file = 1,
861 .unbound_nonreg_file = 1,
8a72758c 862 .pollout = 1,
e8c2bc1f 863 .needs_async_data = 1,
27926b68 864 .plug = 1,
e8c2bc1f 865 .async_size = sizeof(struct io_async_rw),
d3656344 866 },
0463b6c5 867 [IORING_OP_FSYNC] = {
d3656344
JA
868 .needs_file = 1,
869 },
0463b6c5 870 [IORING_OP_READ_FIXED] = {
d3656344
JA
871 .needs_file = 1,
872 .unbound_nonreg_file = 1,
8a72758c 873 .pollin = 1,
27926b68 874 .plug = 1,
e8c2bc1f 875 .async_size = sizeof(struct io_async_rw),
d3656344 876 },
0463b6c5 877 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
878 .needs_file = 1,
879 .hash_reg_file = 1,
880 .unbound_nonreg_file = 1,
8a72758c 881 .pollout = 1,
27926b68 882 .plug = 1,
e8c2bc1f 883 .async_size = sizeof(struct io_async_rw),
d3656344 884 },
0463b6c5 885 [IORING_OP_POLL_ADD] = {
d3656344
JA
886 .needs_file = 1,
887 .unbound_nonreg_file = 1,
888 },
0463b6c5
PB
889 [IORING_OP_POLL_REMOVE] = {},
890 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344
JA
891 .needs_file = 1,
892 },
0463b6c5 893 [IORING_OP_SENDMSG] = {
d3656344
JA
894 .needs_file = 1,
895 .unbound_nonreg_file = 1,
8a72758c 896 .pollout = 1,
e8c2bc1f
JA
897 .needs_async_data = 1,
898 .async_size = sizeof(struct io_async_msghdr),
d3656344 899 },
0463b6c5 900 [IORING_OP_RECVMSG] = {
d3656344
JA
901 .needs_file = 1,
902 .unbound_nonreg_file = 1,
8a72758c 903 .pollin = 1,
52de1fe1 904 .buffer_select = 1,
e8c2bc1f
JA
905 .needs_async_data = 1,
906 .async_size = sizeof(struct io_async_msghdr),
d3656344 907 },
0463b6c5 908 [IORING_OP_TIMEOUT] = {
e8c2bc1f
JA
909 .needs_async_data = 1,
910 .async_size = sizeof(struct io_timeout_data),
d3656344 911 },
9c8e11b3
PB
912 [IORING_OP_TIMEOUT_REMOVE] = {
913 /* used by timeout updates' prep() */
9c8e11b3 914 },
0463b6c5 915 [IORING_OP_ACCEPT] = {
d3656344
JA
916 .needs_file = 1,
917 .unbound_nonreg_file = 1,
8a72758c 918 .pollin = 1,
d3656344 919 },
0463b6c5
PB
920 [IORING_OP_ASYNC_CANCEL] = {},
921 [IORING_OP_LINK_TIMEOUT] = {
e8c2bc1f
JA
922 .needs_async_data = 1,
923 .async_size = sizeof(struct io_timeout_data),
d3656344 924 },
0463b6c5 925 [IORING_OP_CONNECT] = {
d3656344
JA
926 .needs_file = 1,
927 .unbound_nonreg_file = 1,
8a72758c 928 .pollout = 1,
e8c2bc1f
JA
929 .needs_async_data = 1,
930 .async_size = sizeof(struct io_async_connect),
d3656344 931 },
0463b6c5 932 [IORING_OP_FALLOCATE] = {
d3656344 933 .needs_file = 1,
d3656344 934 },
44526bed
JA
935 [IORING_OP_OPENAT] = {},
936 [IORING_OP_CLOSE] = {},
937 [IORING_OP_FILES_UPDATE] = {},
938 [IORING_OP_STATX] = {},
0463b6c5 939 [IORING_OP_READ] = {
3a6820f2
JA
940 .needs_file = 1,
941 .unbound_nonreg_file = 1,
8a72758c 942 .pollin = 1,
bcda7baa 943 .buffer_select = 1,
27926b68 944 .plug = 1,
e8c2bc1f 945 .async_size = sizeof(struct io_async_rw),
3a6820f2 946 },
0463b6c5 947 [IORING_OP_WRITE] = {
3a6820f2
JA
948 .needs_file = 1,
949 .unbound_nonreg_file = 1,
8a72758c 950 .pollout = 1,
27926b68 951 .plug = 1,
e8c2bc1f 952 .async_size = sizeof(struct io_async_rw),
3a6820f2 953 },
0463b6c5 954 [IORING_OP_FADVISE] = {
4840e418 955 .needs_file = 1,
c1ca757b 956 },
44526bed 957 [IORING_OP_MADVISE] = {},
0463b6c5 958 [IORING_OP_SEND] = {
fddaface
JA
959 .needs_file = 1,
960 .unbound_nonreg_file = 1,
8a72758c 961 .pollout = 1,
fddaface 962 },
0463b6c5 963 [IORING_OP_RECV] = {
fddaface
JA
964 .needs_file = 1,
965 .unbound_nonreg_file = 1,
8a72758c 966 .pollin = 1,
bcda7baa 967 .buffer_select = 1,
fddaface 968 },
0463b6c5 969 [IORING_OP_OPENAT2] = {
cebdb986 970 },
3e4827b0
JA
971 [IORING_OP_EPOLL_CTL] = {
972 .unbound_nonreg_file = 1,
3e4827b0 973 },
7d67af2c
PB
974 [IORING_OP_SPLICE] = {
975 .needs_file = 1,
976 .hash_reg_file = 1,
977 .unbound_nonreg_file = 1,
ddf0322d
JA
978 },
979 [IORING_OP_PROVIDE_BUFFERS] = {},
067524e9 980 [IORING_OP_REMOVE_BUFFERS] = {},
f2a8d5c7
PB
981 [IORING_OP_TEE] = {
982 .needs_file = 1,
983 .hash_reg_file = 1,
984 .unbound_nonreg_file = 1,
985 },
36f4fa68
JA
986 [IORING_OP_SHUTDOWN] = {
987 .needs_file = 1,
988 },
44526bed
JA
989 [IORING_OP_RENAMEAT] = {},
990 [IORING_OP_UNLINKAT] = {},
d3656344
JA
991};
992
9936c7c2
PB
993static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
994 struct task_struct *task,
995 struct files_struct *files);
37d1e2e3 996static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
269bbe5f 997static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
bc9744cd 998static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
1ffc5422 999 struct io_ring_ctx *ctx);
f2303b1f 1000static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
1ffc5422 1001
23faba36 1002static bool io_rw_reissue(struct io_kiocb *req);
78e19bbe 1003static void io_cqring_fill_event(struct io_kiocb *req, long res);
ec9c02ad 1004static void io_put_req(struct io_kiocb *req);
216578e5 1005static void io_put_req_deferred(struct io_kiocb *req, int nr);
c40f6379 1006static void io_double_put_req(struct io_kiocb *req);
c7dae4ba
JA
1007static void io_dismantle_req(struct io_kiocb *req);
1008static void io_put_task(struct task_struct *task, int nr);
1009static void io_queue_next(struct io_kiocb *req);
94ae5e77 1010static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
7271ef3a 1011static void __io_queue_linked_timeout(struct io_kiocb *req);
94ae5e77 1012static void io_queue_linked_timeout(struct io_kiocb *req);
05f3fb3c 1013static int __io_sqe_files_update(struct io_ring_ctx *ctx,
269bbe5f 1014 struct io_uring_rsrc_update *ip,
05f3fb3c 1015 unsigned nr_args);
3ca405eb 1016static void __io_clean_op(struct io_kiocb *req);
8371adf5
PB
1017static struct file *io_file_get(struct io_submit_state *state,
1018 struct io_kiocb *req, int fd, bool fixed);
c5eef2b9 1019static void __io_queue_sqe(struct io_kiocb *req);
269bbe5f 1020static void io_rsrc_put_work(struct work_struct *work);
de0617e4 1021
847595de
PB
1022static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
1023 struct iov_iter *iter, bool needs_lock);
ff6165b2
JA
1024static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
1025 const struct iovec *fast_iov,
227c0c96 1026 struct iov_iter *iter, bool force);
907d1df3 1027static void io_req_task_queue(struct io_kiocb *req);
65453d1e
JA
1028static void io_submit_flush_completions(struct io_comp_state *cs,
1029 struct io_ring_ctx *ctx);
de0617e4 1030
2b188cc1
JA
1031static struct kmem_cache *req_cachep;
1032
0918682b 1033static const struct file_operations io_uring_fops;
2b188cc1
JA
1034
1035struct sock *io_uring_get_socket(struct file *file)
1036{
1037#if defined(CONFIG_UNIX)
1038 if (file->f_op == &io_uring_fops) {
1039 struct io_ring_ctx *ctx = file->private_data;
1040
1041 return ctx->ring_sock->sk;
1042 }
1043#endif
1044 return NULL;
1045}
1046EXPORT_SYMBOL(io_uring_get_socket);
1047
f2f87370
PB
1048#define io_for_each_link(pos, head) \
1049 for (pos = (head); pos; pos = pos->link)
1050
3ca405eb
PB
1051static inline void io_clean_op(struct io_kiocb *req)
1052{
9d5c8190 1053 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
3ca405eb
PB
1054 __io_clean_op(req);
1055}
1056
36f72fe2
PB
1057static inline void io_set_resource_node(struct io_kiocb *req)
1058{
1059 struct io_ring_ctx *ctx = req->ctx;
1060
269bbe5f
BM
1061 if (!req->fixed_rsrc_refs) {
1062 req->fixed_rsrc_refs = &ctx->file_data->node->refs;
1063 percpu_ref_get(req->fixed_rsrc_refs);
36f72fe2
PB
1064 }
1065}
1066
08d23634
PB
1067static bool io_match_task(struct io_kiocb *head,
1068 struct task_struct *task,
1069 struct files_struct *files)
1070{
1071 struct io_kiocb *req;
1072
84965ff8
JA
1073 if (task && head->task != task) {
1074 /* in terms of cancelation, always match if req task is dead */
1075 if (head->task->flags & PF_EXITING)
1076 return true;
08d23634 1077 return false;
84965ff8 1078 }
08d23634
PB
1079 if (!files)
1080 return true;
1081
1082 io_for_each_link(req, head) {
02a13674
JA
1083 if (req->file && req->file->f_op == &io_uring_fops)
1084 return true;
4379bf8b 1085 if (req->task->files == files)
08d23634
PB
1086 return true;
1087 }
1088 return false;
1089}
1090
c40f6379
JA
1091static inline void req_set_fail_links(struct io_kiocb *req)
1092{
1093 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1094 req->flags |= REQ_F_FAIL_LINK;
1095}
4a38aed2 1096
ec99ca6c
PB
1097static inline void __io_req_init_async(struct io_kiocb *req)
1098{
1099 memset(&req->work, 0, sizeof(req->work));
1100 req->flags |= REQ_F_WORK_INITIALIZED;
1101}
1102
7cdaf587
XW
1103/*
1104 * Note: must call io_req_init_async() for the first time you
1105 * touch any members of io_wq_work.
1106 */
1107static inline void io_req_init_async(struct io_kiocb *req)
1108{
1109 if (req->flags & REQ_F_WORK_INITIALIZED)
1110 return;
1111
ec99ca6c 1112 __io_req_init_async(req);
7cdaf587
XW
1113}
1114
2b188cc1
JA
1115static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1116{
1117 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1118
0f158b4c 1119 complete(&ctx->ref_comp);
2b188cc1
JA
1120}
1121
8eb7e2d0
PB
1122static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1123{
1124 return !req->timeout.off;
1125}
1126
2b188cc1
JA
1127static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1128{
1129 struct io_ring_ctx *ctx;
78076bb6 1130 int hash_bits;
2b188cc1
JA
1131
1132 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1133 if (!ctx)
1134 return NULL;
1135
78076bb6
JA
1136 /*
1137 * Use 5 bits less than the max cq entries, that should give us around
1138 * 32 entries per hash list if totally full and uniformly spread.
1139 */
1140 hash_bits = ilog2(p->cq_entries);
1141 hash_bits -= 5;
1142 if (hash_bits <= 0)
1143 hash_bits = 1;
1144 ctx->cancel_hash_bits = hash_bits;
1145 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1146 GFP_KERNEL);
1147 if (!ctx->cancel_hash)
1148 goto err;
1149 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1150
21482896 1151 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
1152 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1153 goto err;
2b188cc1
JA
1154
1155 ctx->flags = p->flags;
90554200 1156 init_waitqueue_head(&ctx->sqo_sq_wait);
69fb2131 1157 INIT_LIST_HEAD(&ctx->sqd_list);
2b188cc1 1158 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 1159 INIT_LIST_HEAD(&ctx->cq_overflow_list);
0f158b4c
JA
1160 init_completion(&ctx->ref_comp);
1161 init_completion(&ctx->sq_thread_comp);
5a2e745d 1162 idr_init(&ctx->io_buffer_idr);
071698e1 1163 idr_init(&ctx->personality_idr);
2b188cc1
JA
1164 mutex_init(&ctx->uring_lock);
1165 init_waitqueue_head(&ctx->wait);
1166 spin_lock_init(&ctx->completion_lock);
540e32a0 1167 INIT_LIST_HEAD(&ctx->iopoll_list);
de0617e4 1168 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 1169 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
1170 spin_lock_init(&ctx->inflight_lock);
1171 INIT_LIST_HEAD(&ctx->inflight_list);
d67d2263
BM
1172 spin_lock_init(&ctx->rsrc_ref_lock);
1173 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
269bbe5f
BM
1174 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1175 init_llist_head(&ctx->rsrc_put_llist);
1b4c351f 1176 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
c7dae4ba 1177 INIT_LIST_HEAD(&ctx->submit_state.comp.locked_free_list);
2b188cc1 1178 return ctx;
206aefde 1179err:
78076bb6 1180 kfree(ctx->cancel_hash);
206aefde
JA
1181 kfree(ctx);
1182 return NULL;
2b188cc1
JA
1183}
1184
9cf7c104 1185static bool req_need_defer(struct io_kiocb *req, u32 seq)
7adf4eaf 1186{
2bc9930e
JA
1187 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1188 struct io_ring_ctx *ctx = req->ctx;
a197f664 1189
9cf7c104 1190 return seq != ctx->cached_cq_tail
2c3bac6d 1191 + READ_ONCE(ctx->cached_cq_overflow);
2bc9930e 1192 }
de0617e4 1193
9d858b21 1194 return false;
de0617e4
JA
1195}
1196
4edf20f9 1197static void io_req_clean_work(struct io_kiocb *req)
18d9be1a 1198{
7cdaf587 1199 if (!(req->flags & REQ_F_WORK_INITIALIZED))
4edf20f9 1200 return;
51a4cc11 1201
4379bf8b
JA
1202 if (req->work.creds) {
1203 put_cred(req->work.creds);
1204 req->work.creds = NULL;
34e08fed
PB
1205 }
1206 if (req->flags & REQ_F_INFLIGHT) {
1207 struct io_ring_ctx *ctx = req->ctx;
1208 struct io_uring_task *tctx = req->task->io_uring;
1209 unsigned long flags;
1210
1211 spin_lock_irqsave(&ctx->inflight_lock, flags);
1212 list_del(&req->inflight_entry);
1213 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1214 req->flags &= ~REQ_F_INFLIGHT;
1215 if (atomic_read(&tctx->in_idle))
1216 wake_up(&tctx->wait);
1217 }
51a4cc11 1218
e86d0047 1219 req->flags &= ~REQ_F_WORK_INITIALIZED;
1e6fa521
JA
1220}
1221
ce3d5aae
PB
1222static void io_req_track_inflight(struct io_kiocb *req)
1223{
1224 struct io_ring_ctx *ctx = req->ctx;
1225
1226 if (!(req->flags & REQ_F_INFLIGHT)) {
1227 io_req_init_async(req);
1228 req->flags |= REQ_F_INFLIGHT;
1229
1230 spin_lock_irq(&ctx->inflight_lock);
1231 list_add(&req->inflight_entry, &ctx->inflight_list);
1232 spin_unlock_irq(&ctx->inflight_lock);
1233 }
1234}
1235
1e6fa521
JA
1236static void io_prep_async_work(struct io_kiocb *req)
1237{
1238 const struct io_op_def *def = &io_op_defs[req->opcode];
1e6fa521
JA
1239 struct io_ring_ctx *ctx = req->ctx;
1240
1241 io_req_init_async(req);
1242
feaadc4f
PB
1243 if (req->flags & REQ_F_FORCE_ASYNC)
1244 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1245
1e6fa521
JA
1246 if (req->flags & REQ_F_ISREG) {
1247 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
1248 io_wq_hash_work(&req->work, file_inode(req->file));
1249 } else {
1250 if (def->unbound_nonreg_file)
1251 req->work.flags |= IO_WQ_WORK_UNBOUND;
1252 }
4379bf8b
JA
1253 if (!req->work.creds)
1254 req->work.creds = get_current_cred();
561fb04a 1255}
cccf0ee8 1256
cbdcb435 1257static void io_prep_async_link(struct io_kiocb *req)
561fb04a 1258{
cbdcb435 1259 struct io_kiocb *cur;
54a91f3b 1260
f2f87370
PB
1261 io_for_each_link(cur, req)
1262 io_prep_async_work(cur);
561fb04a
JA
1263}
1264
7271ef3a 1265static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
561fb04a 1266{
a197f664 1267 struct io_ring_ctx *ctx = req->ctx;
cbdcb435 1268 struct io_kiocb *link = io_prep_linked_timeout(req);
5aa75ed5 1269 struct io_uring_task *tctx = req->task->io_uring;
561fb04a 1270
3bfe6106
JA
1271 BUG_ON(!tctx);
1272 BUG_ON(!tctx->io_wq);
561fb04a 1273
8766dd51
PB
1274 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1275 &req->work, req->flags);
5aa75ed5 1276 io_wq_enqueue(tctx->io_wq, &req->work);
7271ef3a 1277 return link;
18d9be1a
JA
1278}
1279
cbdcb435
PB
1280static void io_queue_async_work(struct io_kiocb *req)
1281{
7271ef3a
JA
1282 struct io_kiocb *link;
1283
cbdcb435
PB
1284 /* init ->work of the whole link before punting */
1285 io_prep_async_link(req);
7271ef3a
JA
1286 link = __io_queue_async_work(req);
1287
1288 if (link)
1289 io_queue_linked_timeout(link);
cbdcb435
PB
1290}
1291
5262f567
JA
1292static void io_kill_timeout(struct io_kiocb *req)
1293{
e8c2bc1f 1294 struct io_timeout_data *io = req->async_data;
5262f567
JA
1295 int ret;
1296
e8c2bc1f 1297 ret = hrtimer_try_to_cancel(&io->timer);
5262f567 1298 if (ret != -1) {
01cec8c1
PB
1299 atomic_set(&req->ctx->cq_timeouts,
1300 atomic_read(&req->ctx->cq_timeouts) + 1);
135fcde8 1301 list_del_init(&req->timeout.list);
78e19bbe 1302 io_cqring_fill_event(req, 0);
216578e5 1303 io_put_req_deferred(req, 1);
5262f567
JA
1304 }
1305}
1306
76e1b642
JA
1307/*
1308 * Returns true if we found and killed one or more timeouts
1309 */
6b81928d
PB
1310static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
1311 struct files_struct *files)
5262f567
JA
1312{
1313 struct io_kiocb *req, *tmp;
76e1b642 1314 int canceled = 0;
5262f567
JA
1315
1316 spin_lock_irq(&ctx->completion_lock);
f3606e3a 1317 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
6b81928d 1318 if (io_match_task(req, tsk, files)) {
f3606e3a 1319 io_kill_timeout(req);
76e1b642
JA
1320 canceled++;
1321 }
f3606e3a 1322 }
5262f567 1323 spin_unlock_irq(&ctx->completion_lock);
76e1b642 1324 return canceled != 0;
5262f567
JA
1325}
1326
04518945 1327static void __io_queue_deferred(struct io_ring_ctx *ctx)
de0617e4 1328{
04518945 1329 do {
27dc8338
PB
1330 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1331 struct io_defer_entry, list);
de0617e4 1332
9cf7c104 1333 if (req_need_defer(de->req, de->seq))
04518945 1334 break;
27dc8338 1335 list_del_init(&de->list);
907d1df3 1336 io_req_task_queue(de->req);
27dc8338 1337 kfree(de);
04518945
PB
1338 } while (!list_empty(&ctx->defer_list));
1339}
1340
360428f8 1341static void io_flush_timeouts(struct io_ring_ctx *ctx)
de0617e4 1342{
f010505b
MDG
1343 u32 seq;
1344
1345 if (list_empty(&ctx->timeout_list))
1346 return;
1347
1348 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1349
1350 do {
1351 u32 events_needed, events_got;
360428f8 1352 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
135fcde8 1353 struct io_kiocb, timeout.list);
de0617e4 1354
8eb7e2d0 1355 if (io_is_timeout_noseq(req))
360428f8 1356 break;
f010505b
MDG
1357
1358 /*
1359 * Since seq can easily wrap around over time, subtract
1360 * the last seq at which timeouts were flushed before comparing.
1361 * Assuming not more than 2^31-1 events have happened since,
1362 * these subtractions won't have wrapped, so we can check if
1363 * target is in [last_seq, current_seq] by comparing the two.
1364 */
1365 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1366 events_got = seq - ctx->cq_last_tm_flush;
1367 if (events_got < events_needed)
360428f8 1368 break;
bfe68a22 1369
135fcde8 1370 list_del_init(&req->timeout.list);
5262f567 1371 io_kill_timeout(req);
f010505b
MDG
1372 } while (!list_empty(&ctx->timeout_list));
1373
1374 ctx->cq_last_tm_flush = seq;
360428f8 1375}
5262f567 1376
360428f8
PB
1377static void io_commit_cqring(struct io_ring_ctx *ctx)
1378{
1379 io_flush_timeouts(ctx);
ec30e04b
PB
1380
1381 /* order cqe stores with ring update */
1382 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
de0617e4 1383
04518945
PB
1384 if (unlikely(!list_empty(&ctx->defer_list)))
1385 __io_queue_deferred(ctx);
de0617e4
JA
1386}
1387
90554200
JA
1388static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1389{
1390 struct io_rings *r = ctx->rings;
1391
1392 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1393}
1394
888aae2e
PB
1395static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1396{
1397 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1398}
1399
2b188cc1
JA
1400static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1401{
75b28aff 1402 struct io_rings *rings = ctx->rings;
2b188cc1
JA
1403 unsigned tail;
1404
115e12e5
SB
1405 /*
1406 * writes to the cq entry need to come after reading head; the
1407 * control dependency is enough as we're using WRITE_ONCE to
1408 * fill the cq entry
1409 */
888aae2e 1410 if (__io_cqring_events(ctx) == rings->cq_ring_entries)
2b188cc1
JA
1411 return NULL;
1412
888aae2e 1413 tail = ctx->cached_cq_tail++;
75b28aff 1414 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
1415}
1416
f2842ab5
JA
1417static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1418{
f0b493e6
JA
1419 if (!ctx->cq_ev_fd)
1420 return false;
7e55a19c
SG
1421 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1422 return false;
f2842ab5
JA
1423 if (!ctx->eventfd_async)
1424 return true;
b41e9852 1425 return io_wq_current_is_worker();
f2842ab5
JA
1426}
1427
b41e9852 1428static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5 1429{
b1445e59
PB
1430 /* see waitqueue_active() comment */
1431 smp_mb();
1432
1d7bb1d5
JA
1433 if (waitqueue_active(&ctx->wait))
1434 wake_up(&ctx->wait);
534ca6d6
JA
1435 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1436 wake_up(&ctx->sq_data->wait);
b41e9852 1437 if (io_should_trigger_evfd(ctx))
1d7bb1d5 1438 eventfd_signal(ctx->cq_ev_fd, 1);
b1445e59 1439 if (waitqueue_active(&ctx->cq_wait)) {
4aa84f2f
PB
1440 wake_up_interruptible(&ctx->cq_wait);
1441 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1442 }
1d7bb1d5
JA
1443}
1444
80c18e4a
PB
1445static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1446{
b1445e59
PB
1447 /* see waitqueue_active() comment */
1448 smp_mb();
1449
80c18e4a
PB
1450 if (ctx->flags & IORING_SETUP_SQPOLL) {
1451 if (waitqueue_active(&ctx->wait))
1452 wake_up(&ctx->wait);
1453 }
1454 if (io_should_trigger_evfd(ctx))
1455 eventfd_signal(ctx->cq_ev_fd, 1);
b1445e59 1456 if (waitqueue_active(&ctx->cq_wait)) {
4aa84f2f
PB
1457 wake_up_interruptible(&ctx->cq_wait);
1458 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1459 }
80c18e4a
PB
1460}
1461
c4a2ed72 1462/* Returns true if there are no backlogged entries after the flush */
6c503150
PB
1463static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1464 struct task_struct *tsk,
1465 struct files_struct *files)
1d7bb1d5
JA
1466{
1467 struct io_rings *rings = ctx->rings;
e6c8aa9a 1468 struct io_kiocb *req, *tmp;
1d7bb1d5 1469 struct io_uring_cqe *cqe;
1d7bb1d5 1470 unsigned long flags;
b18032bb 1471 bool all_flushed, posted;
1d7bb1d5
JA
1472 LIST_HEAD(list);
1473
e23de15f
PB
1474 if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
1475 return false;
1d7bb1d5 1476
b18032bb 1477 posted = false;
1d7bb1d5 1478 spin_lock_irqsave(&ctx->completion_lock, flags);
e6c8aa9a 1479 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
08d23634 1480 if (!io_match_task(req, tsk, files))
e6c8aa9a
JA
1481 continue;
1482
1d7bb1d5
JA
1483 cqe = io_get_cqring(ctx);
1484 if (!cqe && !force)
1485 break;
1486
40d8ddd4 1487 list_move(&req->compl.list, &list);
1d7bb1d5
JA
1488 if (cqe) {
1489 WRITE_ONCE(cqe->user_data, req->user_data);
1490 WRITE_ONCE(cqe->res, req->result);
0f7e466b 1491 WRITE_ONCE(cqe->flags, req->compl.cflags);
1d7bb1d5 1492 } else {
2c3bac6d 1493 ctx->cached_cq_overflow++;
1d7bb1d5 1494 WRITE_ONCE(ctx->rings->cq_overflow,
2c3bac6d 1495 ctx->cached_cq_overflow);
1d7bb1d5 1496 }
b18032bb 1497 posted = true;
1d7bb1d5
JA
1498 }
1499
09e88404
PB
1500 all_flushed = list_empty(&ctx->cq_overflow_list);
1501 if (all_flushed) {
1502 clear_bit(0, &ctx->sq_check_overflow);
1503 clear_bit(0, &ctx->cq_check_overflow);
1504 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1505 }
46930143 1506
b18032bb
JA
1507 if (posted)
1508 io_commit_cqring(ctx);
1d7bb1d5 1509 spin_unlock_irqrestore(&ctx->completion_lock, flags);
b18032bb
JA
1510 if (posted)
1511 io_cqring_ev_posted(ctx);
1d7bb1d5
JA
1512
1513 while (!list_empty(&list)) {
40d8ddd4
PB
1514 req = list_first_entry(&list, struct io_kiocb, compl.list);
1515 list_del(&req->compl.list);
ec9c02ad 1516 io_put_req(req);
1d7bb1d5 1517 }
c4a2ed72 1518
09e88404 1519 return all_flushed;
1d7bb1d5
JA
1520}
1521
6c503150
PB
1522static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1523 struct task_struct *tsk,
1524 struct files_struct *files)
1525{
1526 if (test_bit(0, &ctx->cq_check_overflow)) {
1527 /* iopoll syncs against uring_lock, not completion_lock */
1528 if (ctx->flags & IORING_SETUP_IOPOLL)
1529 mutex_lock(&ctx->uring_lock);
1530 __io_cqring_overflow_flush(ctx, force, tsk, files);
1531 if (ctx->flags & IORING_SETUP_IOPOLL)
1532 mutex_unlock(&ctx->uring_lock);
1533 }
1534}
1535
bcda7baa 1536static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1537{
78e19bbe 1538 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1539 struct io_uring_cqe *cqe;
1540
78e19bbe 1541 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 1542
2b188cc1
JA
1543 /*
1544 * If we can't get a cq entry, userspace overflowed the
1545 * submission (by quite a lot). Increment the overflow count in
1546 * the ring.
1547 */
1548 cqe = io_get_cqring(ctx);
1d7bb1d5 1549 if (likely(cqe)) {
78e19bbe 1550 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 1551 WRITE_ONCE(cqe->res, res);
bcda7baa 1552 WRITE_ONCE(cqe->flags, cflags);
fdaf083c
JA
1553 } else if (ctx->cq_overflow_flushed ||
1554 atomic_read(&req->task->io_uring->in_idle)) {
0f212204
JA
1555 /*
1556 * If we're in ring overflow flush mode, or in task cancel mode,
1557 * then we cannot store the request for later flushing, we need
1558 * to drop it on the floor.
1559 */
2c3bac6d
PB
1560 ctx->cached_cq_overflow++;
1561 WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
1d7bb1d5 1562 } else {
ad3eb2c8
JA
1563 if (list_empty(&ctx->cq_overflow_list)) {
1564 set_bit(0, &ctx->sq_check_overflow);
1565 set_bit(0, &ctx->cq_check_overflow);
6d5f9049 1566 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
ad3eb2c8 1567 }
40d8ddd4 1568 io_clean_op(req);
1d7bb1d5 1569 req->result = res;
0f7e466b 1570 req->compl.cflags = cflags;
40d8ddd4
PB
1571 refcount_inc(&req->refs);
1572 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
2b188cc1
JA
1573 }
1574}
1575
bcda7baa
JA
1576static void io_cqring_fill_event(struct io_kiocb *req, long res)
1577{
1578 __io_cqring_fill_event(req, res, 0);
1579}
1580
c7dae4ba
JA
1581static inline void io_req_complete_post(struct io_kiocb *req, long res,
1582 unsigned int cflags)
2b188cc1 1583{
78e19bbe 1584 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1585 unsigned long flags;
1586
1587 spin_lock_irqsave(&ctx->completion_lock, flags);
bcda7baa 1588 __io_cqring_fill_event(req, res, cflags);
2b188cc1 1589 io_commit_cqring(ctx);
c7dae4ba
JA
1590 /*
1591 * If we're the last reference to this request, add to our locked
1592 * free_list cache.
1593 */
1594 if (refcount_dec_and_test(&req->refs)) {
1595 struct io_comp_state *cs = &ctx->submit_state.comp;
1596
1597 io_dismantle_req(req);
1598 io_put_task(req->task, 1);
1599 list_add(&req->compl.list, &cs->locked_free_list);
1600 cs->locked_free_nr++;
1601 } else
1602 req = NULL;
2b188cc1
JA
1603 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1604
8c838788 1605 io_cqring_ev_posted(ctx);
c7dae4ba
JA
1606 if (req) {
1607 io_queue_next(req);
1608 percpu_ref_put(&ctx->refs);
229a7b63 1609 }
229a7b63
JA
1610}
1611
a38d68db 1612static void io_req_complete_state(struct io_kiocb *req, long res,
889fca73 1613 unsigned int cflags)
229a7b63 1614{
a38d68db
PB
1615 io_clean_op(req);
1616 req->result = res;
1617 req->compl.cflags = cflags;
e342c807 1618 req->flags |= REQ_F_COMPLETE_INLINE;
e1e16097
JA
1619}
1620
889fca73
PB
1621static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1622 long res, unsigned cflags)
bcda7baa 1623{
889fca73
PB
1624 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1625 io_req_complete_state(req, res, cflags);
a38d68db 1626 else
c7dae4ba 1627 io_req_complete_post(req, res, cflags);
bcda7baa
JA
1628}
1629
a38d68db 1630static inline void io_req_complete(struct io_kiocb *req, long res)
0ddf92e8 1631{
889fca73 1632 __io_req_complete(req, 0, res, 0);
0ddf92e8
JA
1633}
1634
c7dae4ba 1635static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
0ddf92e8 1636{
c7dae4ba
JA
1637 struct io_submit_state *state = &ctx->submit_state;
1638 struct io_comp_state *cs = &state->comp;
e5d1bc0a 1639 struct io_kiocb *req = NULL;
0ddf92e8 1640
c7dae4ba
JA
1641 /*
1642 * If we have more than a batch's worth of requests in our IRQ side
1643 * locked cache, grab the lock and move them over to our submission
1644 * side cache.
1645 */
1646 if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) {
1647 spin_lock_irq(&ctx->completion_lock);
1648 list_splice_init(&cs->locked_free_list, &cs->free_list);
1649 cs->locked_free_nr = 0;
1650 spin_unlock_irq(&ctx->completion_lock);
1651 }
0ddf92e8 1652
c7dae4ba
JA
1653 while (!list_empty(&cs->free_list)) {
1654 req = list_first_entry(&cs->free_list, struct io_kiocb,
1b4c351f
JA
1655 compl.list);
1656 list_del(&req->compl.list);
e5d1bc0a
PB
1657 state->reqs[state->free_reqs++] = req;
1658 if (state->free_reqs == ARRAY_SIZE(state->reqs))
1659 break;
1b4c351f
JA
1660 }
1661
e5d1bc0a 1662 return req != NULL;
0ddf92e8
JA
1663}
1664
e5d1bc0a 1665static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
2b188cc1 1666{
e5d1bc0a
PB
1667 struct io_submit_state *state = &ctx->submit_state;
1668
1669 BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
1670
f6b6c7d6 1671 if (!state->free_reqs) {
291b2821 1672 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2579f913
JA
1673 int ret;
1674
c7dae4ba 1675 if (io_flush_cached_reqs(ctx))
e5d1bc0a
PB
1676 goto got_req;
1677
bf019da7
PB
1678 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1679 state->reqs);
fd6fab2c
JA
1680
1681 /*
1682 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1683 * retry single alloc to be on the safe side.
1684 */
1685 if (unlikely(ret <= 0)) {
1686 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1687 if (!state->reqs[0])
3893f39f 1688 return NULL;
fd6fab2c
JA
1689 ret = 1;
1690 }
291b2821 1691 state->free_reqs = ret;
2b188cc1 1692 }
e5d1bc0a 1693got_req:
291b2821
PB
1694 state->free_reqs--;
1695 return state->reqs[state->free_reqs];
2b188cc1
JA
1696}
1697
8da11c19
PB
1698static inline void io_put_file(struct io_kiocb *req, struct file *file,
1699 bool fixed)
1700{
36f72fe2 1701 if (!fixed)
8da11c19
PB
1702 fput(file);
1703}
1704
4edf20f9 1705static void io_dismantle_req(struct io_kiocb *req)
2b188cc1 1706{
3ca405eb 1707 io_clean_op(req);
929a3af9 1708
e8c2bc1f
JA
1709 if (req->async_data)
1710 kfree(req->async_data);
8da11c19
PB
1711 if (req->file)
1712 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
269bbe5f
BM
1713 if (req->fixed_rsrc_refs)
1714 percpu_ref_put(req->fixed_rsrc_refs);
4edf20f9 1715 io_req_clean_work(req);
e65ef56d
JA
1716}
1717
7c660731
PB
1718static inline void io_put_task(struct task_struct *task, int nr)
1719{
1720 struct io_uring_task *tctx = task->io_uring;
1721
1722 percpu_counter_sub(&tctx->inflight, nr);
1723 if (unlikely(atomic_read(&tctx->in_idle)))
1724 wake_up(&tctx->wait);
1725 put_task_struct_many(task, nr);
1726}
1727
216578e5 1728static void __io_free_req(struct io_kiocb *req)
c6ca97b3 1729{
51a4cc11 1730 struct io_ring_ctx *ctx = req->ctx;
c6ca97b3 1731
216578e5 1732 io_dismantle_req(req);
7c660731 1733 io_put_task(req->task, 1);
c6ca97b3 1734
3893f39f 1735 kmem_cache_free(req_cachep, req);
ecfc5177 1736 percpu_ref_put(&ctx->refs);
e65ef56d
JA
1737}
1738
f2f87370
PB
1739static inline void io_remove_next_linked(struct io_kiocb *req)
1740{
1741 struct io_kiocb *nxt = req->link;
1742
1743 req->link = nxt->link;
1744 nxt->link = NULL;
1745}
1746
c9abd7ad 1747static void io_kill_linked_timeout(struct io_kiocb *req)
2665abfd 1748{
a197f664 1749 struct io_ring_ctx *ctx = req->ctx;
7c86ffee 1750 struct io_kiocb *link;
c9abd7ad
PB
1751 bool cancelled = false;
1752 unsigned long flags;
7c86ffee 1753
c9abd7ad 1754 spin_lock_irqsave(&ctx->completion_lock, flags);
f2f87370
PB
1755 link = req->link;
1756
900fad45
PB
1757 /*
1758 * Can happen if a linked timeout fired and link had been like
1759 * req -> link t-out -> link t-out [-> ...]
1760 */
c9abd7ad
PB
1761 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1762 struct io_timeout_data *io = link->async_data;
1763 int ret;
7c86ffee 1764
f2f87370 1765 io_remove_next_linked(req);
90cd7e42 1766 link->timeout.head = NULL;
c9abd7ad
PB
1767 ret = hrtimer_try_to_cancel(&io->timer);
1768 if (ret != -1) {
1769 io_cqring_fill_event(link, -ECANCELED);
1770 io_commit_cqring(ctx);
1771 cancelled = true;
1772 }
1773 }
7c86ffee 1774 req->flags &= ~REQ_F_LINK_TIMEOUT;
216578e5 1775 spin_unlock_irqrestore(&ctx->completion_lock, flags);
ab0b6451 1776
c9abd7ad 1777 if (cancelled) {
7c86ffee 1778 io_cqring_ev_posted(ctx);
c9abd7ad
PB
1779 io_put_req(link);
1780 }
7c86ffee
PB
1781}
1782
9e645e11 1783
d148ca4b 1784static void io_fail_links(struct io_kiocb *req)
9e645e11 1785{
f2f87370 1786 struct io_kiocb *link, *nxt;
2665abfd 1787 struct io_ring_ctx *ctx = req->ctx;
d148ca4b 1788 unsigned long flags;
9e645e11 1789
d148ca4b 1790 spin_lock_irqsave(&ctx->completion_lock, flags);
f2f87370
PB
1791 link = req->link;
1792 req->link = NULL;
9e645e11 1793
f2f87370
PB
1794 while (link) {
1795 nxt = link->link;
1796 link->link = NULL;
2665abfd 1797
f2f87370 1798 trace_io_uring_fail_link(req, link);
7c86ffee 1799 io_cqring_fill_event(link, -ECANCELED);
216578e5 1800
1575f21a 1801 io_put_req_deferred(link, 2);
f2f87370 1802 link = nxt;
9e645e11 1803 }
2665abfd 1804 io_commit_cqring(ctx);
216578e5 1805 spin_unlock_irqrestore(&ctx->completion_lock, flags);
9e645e11 1806
2665abfd 1807 io_cqring_ev_posted(ctx);
9e645e11
JA
1808}
1809
3fa5e0f3 1810static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
c69f8dbe 1811{
7c86ffee
PB
1812 if (req->flags & REQ_F_LINK_TIMEOUT)
1813 io_kill_linked_timeout(req);
944e58bf 1814
9e645e11
JA
1815 /*
1816 * If LINK is set, we have dependent requests in this chain. If we
1817 * didn't fail this request, queue the first one up, moving any other
1818 * dependencies to the next request. In case of failure, fail the rest
1819 * of the chain.
1820 */
f2f87370
PB
1821 if (likely(!(req->flags & REQ_F_FAIL_LINK))) {
1822 struct io_kiocb *nxt = req->link;
1823
1824 req->link = NULL;
1825 return nxt;
1826 }
9b5f7bd9
PB
1827 io_fail_links(req);
1828 return NULL;
4d7dd462 1829}
9e645e11 1830
f2f87370 1831static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
3fa5e0f3 1832{
cdbff982 1833 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
3fa5e0f3
PB
1834 return NULL;
1835 return __io_req_find_next(req);
1836}
1837
7cbf1722 1838static bool __tctx_task_work(struct io_uring_task *tctx)
c2c4c83c 1839{
65453d1e 1840 struct io_ring_ctx *ctx = NULL;
7cbf1722
JA
1841 struct io_wq_work_list list;
1842 struct io_wq_work_node *node;
c2c4c83c 1843
7cbf1722
JA
1844 if (wq_list_empty(&tctx->task_list))
1845 return false;
6200b0ae 1846
0b81e80c 1847 spin_lock_irq(&tctx->task_lock);
7cbf1722
JA
1848 list = tctx->task_list;
1849 INIT_WQ_LIST(&tctx->task_list);
0b81e80c 1850 spin_unlock_irq(&tctx->task_lock);
c2c4c83c 1851
7cbf1722
JA
1852 node = list.first;
1853 while (node) {
1854 struct io_wq_work_node *next = node->next;
65453d1e 1855 struct io_ring_ctx *this_ctx;
7cbf1722 1856 struct io_kiocb *req;
0ba9c9ed 1857
7cbf1722 1858 req = container_of(node, struct io_kiocb, io_task_work.node);
65453d1e 1859 this_ctx = req->ctx;
7cbf1722
JA
1860 req->task_work.func(&req->task_work);
1861 node = next;
65453d1e
JA
1862
1863 if (!ctx) {
1864 ctx = this_ctx;
1865 } else if (ctx != this_ctx) {
1866 mutex_lock(&ctx->uring_lock);
1867 io_submit_flush_completions(&ctx->submit_state.comp, ctx);
1868 mutex_unlock(&ctx->uring_lock);
1869 ctx = this_ctx;
1870 }
1871 }
1872
1873 if (ctx && ctx->submit_state.comp.nr) {
1874 mutex_lock(&ctx->uring_lock);
1875 io_submit_flush_completions(&ctx->submit_state.comp, ctx);
1876 mutex_unlock(&ctx->uring_lock);
7cbf1722
JA
1877 }
1878
1879 return list.first != NULL;
c2c4c83c
JA
1880}
1881
7cbf1722 1882static void tctx_task_work(struct callback_head *cb)
c40f6379 1883{
7cbf1722 1884 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
c40f6379 1885
1d5f360d
JA
1886 clear_bit(0, &tctx->task_state);
1887
7cbf1722
JA
1888 while (__tctx_task_work(tctx))
1889 cond_resched();
7cbf1722
JA
1890}
1891
1892static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
1893 enum task_work_notify_mode notify)
1894{
1895 struct io_uring_task *tctx = tsk->io_uring;
1896 struct io_wq_work_node *node, *prev;
0b81e80c 1897 unsigned long flags;
7cbf1722
JA
1898 int ret;
1899
1900 WARN_ON_ONCE(!tctx);
1901
0b81e80c 1902 spin_lock_irqsave(&tctx->task_lock, flags);
7cbf1722 1903 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
0b81e80c 1904 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722
JA
1905
1906 /* task_work already pending, we're done */
1907 if (test_bit(0, &tctx->task_state) ||
1908 test_and_set_bit(0, &tctx->task_state))
1909 return 0;
1910
1911 if (!task_work_add(tsk, &tctx->task_work, notify))
1912 return 0;
1913
1914 /*
1915 * Slow path - we failed, find and delete work. if the work is not
1916 * in the list, it got run and we're fine.
1917 */
1918 ret = 0;
0b81e80c 1919 spin_lock_irqsave(&tctx->task_lock, flags);
7cbf1722
JA
1920 wq_list_for_each(node, prev, &tctx->task_list) {
1921 if (&req->io_task_work.node == node) {
1922 wq_list_del(&tctx->task_list, node, prev);
1923 ret = 1;
1924 break;
1925 }
1926 }
0b81e80c 1927 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722
JA
1928 clear_bit(0, &tctx->task_state);
1929 return ret;
1930}
1931
355fb9e2 1932static int io_req_task_work_add(struct io_kiocb *req)
c2c4c83c
JA
1933{
1934 struct task_struct *tsk = req->task;
1935 struct io_ring_ctx *ctx = req->ctx;
91989c70
JA
1936 enum task_work_notify_mode notify;
1937 int ret;
c2c4c83c 1938
6200b0ae
JA
1939 if (tsk->flags & PF_EXITING)
1940 return -ESRCH;
1941
c2c4c83c 1942 /*
0ba9c9ed
JA
1943 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
1944 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
1945 * processing task_work. There's no reliable way to tell if TWA_RESUME
1946 * will do the job.
c2c4c83c 1947 */
91989c70 1948 notify = TWA_NONE;
355fb9e2 1949 if (!(ctx->flags & IORING_SETUP_SQPOLL))
c2c4c83c
JA
1950 notify = TWA_SIGNAL;
1951
7cbf1722 1952 ret = io_task_work_add(tsk, req, notify);
c2c4c83c
JA
1953 if (!ret)
1954 wake_up_process(tsk);
0ba9c9ed 1955
c2c4c83c
JA
1956 return ret;
1957}
1958
eab30c4d 1959static void io_req_task_work_add_fallback(struct io_kiocb *req,
7cbf1722 1960 task_work_func_t cb)
eab30c4d 1961{
7c25c0d1
JA
1962 struct io_ring_ctx *ctx = req->ctx;
1963 struct callback_head *head;
eab30c4d
PB
1964
1965 init_task_work(&req->task_work, cb);
7c25c0d1
JA
1966 do {
1967 head = READ_ONCE(ctx->exit_task_work);
1968 req->task_work.next = head;
1969 } while (cmpxchg(&ctx->exit_task_work, head, &req->task_work) != head);
eab30c4d
PB
1970}
1971
c40f6379
JA
1972static void __io_req_task_cancel(struct io_kiocb *req, int error)
1973{
1974 struct io_ring_ctx *ctx = req->ctx;
1975
1976 spin_lock_irq(&ctx->completion_lock);
1977 io_cqring_fill_event(req, error);
1978 io_commit_cqring(ctx);
1979 spin_unlock_irq(&ctx->completion_lock);
1980
1981 io_cqring_ev_posted(ctx);
1982 req_set_fail_links(req);
1983 io_double_put_req(req);
1984}
1985
1986static void io_req_task_cancel(struct callback_head *cb)
1987{
1988 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
87ceb6a6 1989 struct io_ring_ctx *ctx = req->ctx;
c40f6379 1990
792bb6eb 1991 mutex_lock(&ctx->uring_lock);
a3df7698 1992 __io_req_task_cancel(req, req->result);
792bb6eb 1993 mutex_unlock(&ctx->uring_lock);
87ceb6a6 1994 percpu_ref_put(&ctx->refs);
c40f6379
JA
1995}
1996
1997static void __io_req_task_submit(struct io_kiocb *req)
1998{
1999 struct io_ring_ctx *ctx = req->ctx;
2000
04fc6c80 2001 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
81b6d05c 2002 mutex_lock(&ctx->uring_lock);
4fb6ac32 2003 if (!ctx->sqo_dead && !(current->flags & PF_EXITING) && !current->in_execve)
c5eef2b9 2004 __io_queue_sqe(req);
81b6d05c 2005 else
c40f6379 2006 __io_req_task_cancel(req, -EFAULT);
81b6d05c 2007 mutex_unlock(&ctx->uring_lock);
c40f6379
JA
2008}
2009
2010static void io_req_task_submit(struct callback_head *cb)
2011{
2012 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2013
2014 __io_req_task_submit(req);
2015}
2016
2017static void io_req_task_queue(struct io_kiocb *req)
2018{
c40f6379
JA
2019 int ret;
2020
7cbf1722 2021 req->task_work.func = io_req_task_submit;
355fb9e2 2022 ret = io_req_task_work_add(req);
c40f6379 2023 if (unlikely(ret)) {
a3df7698 2024 req->result = -ECANCELED;
04fc6c80 2025 percpu_ref_get(&req->ctx->refs);
eab30c4d 2026 io_req_task_work_add_fallback(req, io_req_task_cancel);
c40f6379 2027 }
c40f6379
JA
2028}
2029
a3df7698
PB
2030static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2031{
2032 percpu_ref_get(&req->ctx->refs);
2033 req->result = ret;
2034 req->task_work.func = io_req_task_cancel;
2035
2036 if (unlikely(io_req_task_work_add(req)))
2037 io_req_task_work_add_fallback(req, io_req_task_cancel);
2038}
2039
f2f87370 2040static inline void io_queue_next(struct io_kiocb *req)
c69f8dbe 2041{
9b5f7bd9 2042 struct io_kiocb *nxt = io_req_find_next(req);
944e58bf
PB
2043
2044 if (nxt)
906a8c3f 2045 io_req_task_queue(nxt);
c69f8dbe
JL
2046}
2047
c3524383 2048static void io_free_req(struct io_kiocb *req)
7a743e22 2049{
c3524383
PB
2050 io_queue_next(req);
2051 __io_free_req(req);
2052}
8766dd51 2053
2d6500d4 2054struct req_batch {
5af1d13e
PB
2055 struct task_struct *task;
2056 int task_refs;
1b4c351f 2057 int ctx_refs;
2d6500d4
PB
2058};
2059
5af1d13e
PB
2060static inline void io_init_req_batch(struct req_batch *rb)
2061{
5af1d13e 2062 rb->task_refs = 0;
9ae72463 2063 rb->ctx_refs = 0;
5af1d13e
PB
2064 rb->task = NULL;
2065}
2066
2d6500d4
PB
2067static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2068 struct req_batch *rb)
2069{
6e833d53 2070 if (rb->task)
7c660731 2071 io_put_task(rb->task, rb->task_refs);
9ae72463
PB
2072 if (rb->ctx_refs)
2073 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
2d6500d4
PB
2074}
2075
6ff119a6
PB
2076static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2077 struct io_submit_state *state)
2d6500d4 2078{
f2f87370 2079 io_queue_next(req);
2d6500d4 2080
e3bc8e9d 2081 if (req->task != rb->task) {
7c660731
PB
2082 if (rb->task)
2083 io_put_task(rb->task, rb->task_refs);
e3bc8e9d
JA
2084 rb->task = req->task;
2085 rb->task_refs = 0;
5af1d13e 2086 }
e3bc8e9d 2087 rb->task_refs++;
9ae72463 2088 rb->ctx_refs++;
5af1d13e 2089
4edf20f9 2090 io_dismantle_req(req);
bd759045 2091 if (state->free_reqs != ARRAY_SIZE(state->reqs))
6ff119a6 2092 state->reqs[state->free_reqs++] = req;
bd759045
PB
2093 else
2094 list_add(&req->compl.list, &state->comp.free_list);
7a743e22
PB
2095}
2096
905c172f
PB
2097static void io_submit_flush_completions(struct io_comp_state *cs,
2098 struct io_ring_ctx *ctx)
2099{
2100 int i, nr = cs->nr;
2101 struct io_kiocb *req;
2102 struct req_batch rb;
2103
2104 io_init_req_batch(&rb);
2105 spin_lock_irq(&ctx->completion_lock);
2106 for (i = 0; i < nr; i++) {
2107 req = cs->reqs[i];
2108 __io_cqring_fill_event(req, req->result, req->compl.cflags);
2109 }
2110 io_commit_cqring(ctx);
2111 spin_unlock_irq(&ctx->completion_lock);
2112
2113 io_cqring_ev_posted(ctx);
2114 for (i = 0; i < nr; i++) {
2115 req = cs->reqs[i];
2116
2117 /* submission and completion refs */
2118 if (refcount_sub_and_test(2, &req->refs))
6ff119a6 2119 io_req_free_batch(&rb, req, &ctx->submit_state);
905c172f
PB
2120 }
2121
2122 io_req_free_batch_finish(ctx, &rb);
2123 cs->nr = 0;
7a743e22
PB
2124}
2125
ba816ad6
JA
2126/*
2127 * Drop reference to request, return next in chain (if there is one) if this
2128 * was the last reference to this request.
2129 */
9b5f7bd9 2130static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
e65ef56d 2131{
9b5f7bd9
PB
2132 struct io_kiocb *nxt = NULL;
2133
2a44f467 2134 if (refcount_dec_and_test(&req->refs)) {
9b5f7bd9 2135 nxt = io_req_find_next(req);
4d7dd462 2136 __io_free_req(req);
2a44f467 2137 }
9b5f7bd9 2138 return nxt;
2b188cc1
JA
2139}
2140
e65ef56d
JA
2141static void io_put_req(struct io_kiocb *req)
2142{
2143 if (refcount_dec_and_test(&req->refs))
2144 io_free_req(req);
2b188cc1
JA
2145}
2146
216578e5
PB
2147static void io_put_req_deferred_cb(struct callback_head *cb)
2148{
2149 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2150
2151 io_free_req(req);
2152}
2153
2154static void io_free_req_deferred(struct io_kiocb *req)
2155{
2156 int ret;
2157
7cbf1722 2158 req->task_work.func = io_put_req_deferred_cb;
355fb9e2 2159 ret = io_req_task_work_add(req);
eab30c4d
PB
2160 if (unlikely(ret))
2161 io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
216578e5
PB
2162}
2163
2164static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2165{
2166 if (refcount_sub_and_test(refs, &req->refs))
2167 io_free_req_deferred(req);
2168}
2169
978db57e
JA
2170static void io_double_put_req(struct io_kiocb *req)
2171{
2172 /* drop both submit and complete references */
2173 if (refcount_sub_and_test(2, &req->refs))
2174 io_free_req(req);
2175}
2176
6c503150 2177static unsigned io_cqring_events(struct io_ring_ctx *ctx)
a3a0e43f
JA
2178{
2179 /* See comment at the top of this file */
2180 smp_rmb();
e23de15f 2181 return __io_cqring_events(ctx);
a3a0e43f
JA
2182}
2183
fb5ccc98
PB
2184static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2185{
2186 struct io_rings *rings = ctx->rings;
2187
2188 /* make sure SQ entry isn't read before tail */
2189 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2190}
2191
8ff069bf 2192static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
e94f141b 2193{
8ff069bf 2194 unsigned int cflags;
e94f141b 2195
bcda7baa
JA
2196 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2197 cflags |= IORING_CQE_F_BUFFER;
0e1b6fe3 2198 req->flags &= ~REQ_F_BUFFER_SELECTED;
bcda7baa
JA
2199 kfree(kbuf);
2200 return cflags;
e94f141b
JA
2201}
2202
8ff069bf 2203static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
bcda7baa 2204{
4d954c25 2205 struct io_buffer *kbuf;
bcda7baa 2206
4d954c25 2207 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
8ff069bf
PB
2208 return io_put_kbuf(req, kbuf);
2209}
2210
4c6e277c
JA
2211static inline bool io_run_task_work(void)
2212{
6200b0ae
JA
2213 /*
2214 * Not safe to run on exiting task, and the task_work handling will
2215 * not add work to such a task.
2216 */
2217 if (unlikely(current->flags & PF_EXITING))
2218 return false;
4c6e277c
JA
2219 if (current->task_works) {
2220 __set_current_state(TASK_RUNNING);
2221 task_work_run();
2222 return true;
2223 }
2224
2225 return false;
bcda7baa
JA
2226}
2227
def596e9
JA
2228/*
2229 * Find and free completed poll iocbs
2230 */
2231static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2232 struct list_head *done)
2233{
8237e045 2234 struct req_batch rb;
def596e9 2235 struct io_kiocb *req;
bbde017a
XW
2236
2237 /* order with ->result store in io_complete_rw_iopoll() */
2238 smp_rmb();
def596e9 2239
5af1d13e 2240 io_init_req_batch(&rb);
def596e9 2241 while (!list_empty(done)) {
bcda7baa
JA
2242 int cflags = 0;
2243
d21ffe7e 2244 req = list_first_entry(done, struct io_kiocb, inflight_entry);
f161340d
PB
2245 list_del(&req->inflight_entry);
2246
bbde017a
XW
2247 if (READ_ONCE(req->result) == -EAGAIN) {
2248 req->iopoll_completed = 0;
23faba36 2249 if (io_rw_reissue(req))
f161340d 2250 continue;
bbde017a 2251 }
def596e9 2252
bcda7baa 2253 if (req->flags & REQ_F_BUFFER_SELECTED)
8ff069bf 2254 cflags = io_put_rw_kbuf(req);
bcda7baa
JA
2255
2256 __io_cqring_fill_event(req, req->result, cflags);
def596e9
JA
2257 (*nr_events)++;
2258
c3524383 2259 if (refcount_dec_and_test(&req->refs))
6ff119a6 2260 io_req_free_batch(&rb, req, &ctx->submit_state);
def596e9 2261 }
def596e9 2262
09bb8394 2263 io_commit_cqring(ctx);
80c18e4a 2264 io_cqring_ev_posted_iopoll(ctx);
2d6500d4 2265 io_req_free_batch_finish(ctx, &rb);
581f9810
BM
2266}
2267
def596e9
JA
2268static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2269 long min)
2270{
2271 struct io_kiocb *req, *tmp;
2272 LIST_HEAD(done);
2273 bool spin;
2274 int ret;
2275
2276 /*
2277 * Only spin for completions if we don't have multiple devices hanging
2278 * off our complete list, and we're under the requested amount.
2279 */
2280 spin = !ctx->poll_multi_file && *nr_events < min;
2281
2282 ret = 0;
d21ffe7e 2283 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
9adbd45d 2284 struct kiocb *kiocb = &req->rw.kiocb;
def596e9
JA
2285
2286 /*
581f9810
BM
2287 * Move completed and retryable entries to our local lists.
2288 * If we find a request that requires polling, break out
2289 * and complete those lists first, if we have entries there.
def596e9 2290 */
65a6543d 2291 if (READ_ONCE(req->iopoll_completed)) {
d21ffe7e 2292 list_move_tail(&req->inflight_entry, &done);
def596e9
JA
2293 continue;
2294 }
2295 if (!list_empty(&done))
2296 break;
2297
2298 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2299 if (ret < 0)
2300 break;
2301
3aadc23e
PB
2302 /* iopoll may have completed current req */
2303 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2304 list_move_tail(&req->inflight_entry, &done);
3aadc23e 2305
def596e9
JA
2306 if (ret && spin)
2307 spin = false;
2308 ret = 0;
2309 }
2310
2311 if (!list_empty(&done))
2312 io_iopoll_complete(ctx, nr_events, &done);
2313
2314 return ret;
2315}
2316
2317/*
d195a66e 2318 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
def596e9
JA
2319 * non-spinning poll check - we'll still enter the driver poll loop, but only
2320 * as a non-spinning completion check.
2321 */
2322static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2323 long min)
2324{
540e32a0 2325 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
def596e9
JA
2326 int ret;
2327
2328 ret = io_do_iopoll(ctx, nr_events, min);
2329 if (ret < 0)
2330 return ret;
eba0a4dd 2331 if (*nr_events >= min)
def596e9
JA
2332 return 0;
2333 }
2334
2335 return 1;
2336}
2337
2338/*
2339 * We can't just wait for polled events to come to us, we have to actively
2340 * find and complete them.
2341 */
b2edc0a7 2342static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
def596e9
JA
2343{
2344 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2345 return;
2346
2347 mutex_lock(&ctx->uring_lock);
540e32a0 2348 while (!list_empty(&ctx->iopoll_list)) {
def596e9
JA
2349 unsigned int nr_events = 0;
2350
b2edc0a7 2351 io_do_iopoll(ctx, &nr_events, 0);
08f5439f 2352
b2edc0a7
PB
2353 /* let it sleep and repeat later if can't complete a request */
2354 if (nr_events == 0)
2355 break;
08f5439f
JA
2356 /*
2357 * Ensure we allow local-to-the-cpu processing to take place,
2358 * in this case we need to ensure that we reap all events.
3fcee5a6 2359 * Also let task_work, etc. to progress by releasing the mutex
08f5439f 2360 */
3fcee5a6
PB
2361 if (need_resched()) {
2362 mutex_unlock(&ctx->uring_lock);
2363 cond_resched();
2364 mutex_lock(&ctx->uring_lock);
2365 }
def596e9
JA
2366 }
2367 mutex_unlock(&ctx->uring_lock);
2368}
2369
7668b92a 2370static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
def596e9 2371{
7668b92a 2372 unsigned int nr_events = 0;
2b2ed975 2373 int iters = 0, ret = 0;
500f9fba 2374
c7849be9
XW
2375 /*
2376 * We disallow the app entering submit/complete with polling, but we
2377 * still need to lock the ring to prevent racing with polled issue
2378 * that got punted to a workqueue.
2379 */
2380 mutex_lock(&ctx->uring_lock);
def596e9 2381 do {
a3a0e43f
JA
2382 /*
2383 * Don't enter poll loop if we already have events pending.
2384 * If we do, we can potentially be spinning for commands that
2385 * already triggered a CQE (eg in error).
2386 */
6c503150
PB
2387 if (test_bit(0, &ctx->cq_check_overflow))
2388 __io_cqring_overflow_flush(ctx, false, NULL, NULL);
2389 if (io_cqring_events(ctx))
a3a0e43f
JA
2390 break;
2391
500f9fba
JA
2392 /*
2393 * If a submit got punted to a workqueue, we can have the
2394 * application entering polling for a command before it gets
2395 * issued. That app will hold the uring_lock for the duration
2396 * of the poll right here, so we need to take a breather every
2397 * now and then to ensure that the issue has a chance to add
2398 * the poll to the issued list. Otherwise we can spin here
2399 * forever, while the workqueue is stuck trying to acquire the
2400 * very same mutex.
2401 */
2402 if (!(++iters & 7)) {
2403 mutex_unlock(&ctx->uring_lock);
4c6e277c 2404 io_run_task_work();
500f9fba
JA
2405 mutex_lock(&ctx->uring_lock);
2406 }
2407
7668b92a 2408 ret = io_iopoll_getevents(ctx, &nr_events, min);
def596e9
JA
2409 if (ret <= 0)
2410 break;
2411 ret = 0;
7668b92a 2412 } while (min && !nr_events && !need_resched());
def596e9 2413
500f9fba 2414 mutex_unlock(&ctx->uring_lock);
def596e9
JA
2415 return ret;
2416}
2417
491381ce 2418static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 2419{
491381ce
JA
2420 /*
2421 * Tell lockdep we inherited freeze protection from submission
2422 * thread.
2423 */
2424 if (req->flags & REQ_F_ISREG) {
2425 struct inode *inode = file_inode(req->file);
2b188cc1 2426
491381ce 2427 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 2428 }
491381ce 2429 file_end_write(req->file);
2b188cc1
JA
2430}
2431
b63534c4 2432#ifdef CONFIG_BLOCK
dc2a6e9a 2433static bool io_resubmit_prep(struct io_kiocb *req)
b63534c4
JA
2434{
2435 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
4a245479 2436 int rw, ret;
b63534c4 2437 struct iov_iter iter;
b63534c4 2438
dc2a6e9a
PB
2439 /* already prepared */
2440 if (req->async_data)
2441 return true;
b63534c4
JA
2442
2443 switch (req->opcode) {
2444 case IORING_OP_READV:
2445 case IORING_OP_READ_FIXED:
2446 case IORING_OP_READ:
2447 rw = READ;
2448 break;
2449 case IORING_OP_WRITEV:
2450 case IORING_OP_WRITE_FIXED:
2451 case IORING_OP_WRITE:
2452 rw = WRITE;
2453 break;
2454 default:
2455 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2456 req->opcode);
dc2a6e9a 2457 return false;
b63534c4
JA
2458 }
2459
dc2a6e9a
PB
2460 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2461 if (ret < 0)
2462 return false;
6bf985dc 2463 return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
b63534c4 2464}
b63534c4
JA
2465#endif
2466
23faba36 2467static bool io_rw_reissue(struct io_kiocb *req)
b63534c4
JA
2468{
2469#ifdef CONFIG_BLOCK
355afaeb 2470 umode_t mode = file_inode(req->file)->i_mode;
b63534c4 2471
355afaeb
JA
2472 if (!S_ISBLK(mode) && !S_ISREG(mode))
2473 return false;
75c668cd 2474 if ((req->flags & REQ_F_NOWAIT) || io_wq_current_is_worker())
b63534c4 2475 return false;
7c977a58
JA
2476 /*
2477 * If ref is dying, we might be running poll reap from the exit work.
2478 * Don't attempt to reissue from that path, just let it fail with
2479 * -EAGAIN.
2480 */
2481 if (percpu_ref_is_dying(&req->ctx->refs))
2482 return false;
b63534c4 2483
55e6ac1e
PB
2484 lockdep_assert_held(&req->ctx->uring_lock);
2485
37d1e2e3 2486 if (io_resubmit_prep(req)) {
fdee946d
JA
2487 refcount_inc(&req->refs);
2488 io_queue_async_work(req);
b63534c4 2489 return true;
fdee946d 2490 }
dc2a6e9a 2491 req_set_fail_links(req);
b63534c4
JA
2492#endif
2493 return false;
2494}
2495
a1d7c393 2496static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
889fca73 2497 unsigned int issue_flags)
a1d7c393 2498{
2f8e45f1
PB
2499 int cflags = 0;
2500
23faba36
PB
2501 if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
2502 return;
2f8e45f1
PB
2503 if (res != req->result)
2504 req_set_fail_links(req);
23faba36 2505
2f8e45f1
PB
2506 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2507 kiocb_end_write(req);
2508 if (req->flags & REQ_F_BUFFER_SELECTED)
2509 cflags = io_put_rw_kbuf(req);
2510 __io_req_complete(req, issue_flags, res, cflags);
ba816ad6
JA
2511}
2512
2513static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2514{
9adbd45d 2515 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6 2516
889fca73 2517 __io_complete_rw(req, res, res2, 0);
2b188cc1
JA
2518}
2519
def596e9
JA
2520static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2521{
9adbd45d 2522 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 2523
491381ce
JA
2524 if (kiocb->ki_flags & IOCB_WRITE)
2525 kiocb_end_write(req);
def596e9 2526
2d7d6792 2527 if (res != -EAGAIN && res != req->result)
4e88d6e7 2528 req_set_fail_links(req);
bbde017a
XW
2529
2530 WRITE_ONCE(req->result, res);
2531 /* order with io_poll_complete() checking ->result */
cd664b0e
PB
2532 smp_wmb();
2533 WRITE_ONCE(req->iopoll_completed, 1);
def596e9
JA
2534}
2535
2536/*
2537 * After the iocb has been issued, it's safe to be found on the poll list.
2538 * Adding the kiocb to the list AFTER submission ensures that we don't
2539 * find it from a io_iopoll_getevents() thread before the issuer is done
2540 * accessing the kiocb cookie.
2541 */
2e9dbe90 2542static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
def596e9
JA
2543{
2544 struct io_ring_ctx *ctx = req->ctx;
2545
2546 /*
2547 * Track whether we have multiple files in our lists. This will impact
2548 * how we do polling eventually, not spinning if we're on potentially
2549 * different devices.
2550 */
540e32a0 2551 if (list_empty(&ctx->iopoll_list)) {
def596e9
JA
2552 ctx->poll_multi_file = false;
2553 } else if (!ctx->poll_multi_file) {
2554 struct io_kiocb *list_req;
2555
540e32a0 2556 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
d21ffe7e 2557 inflight_entry);
9adbd45d 2558 if (list_req->file != req->file)
def596e9
JA
2559 ctx->poll_multi_file = true;
2560 }
2561
2562 /*
2563 * For fast devices, IO may have already completed. If it has, add
2564 * it to the front so we find it first.
2565 */
65a6543d 2566 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2567 list_add(&req->inflight_entry, &ctx->iopoll_list);
def596e9 2568 else
d21ffe7e 2569 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
bdcd3eab 2570
2e9dbe90
XW
2571 /*
2572 * If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread
2573 * task context or in io worker task context. If current task context is
2574 * sq thread, we don't need to check whether should wake up sq thread.
2575 */
2576 if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) &&
534ca6d6
JA
2577 wq_has_sleeper(&ctx->sq_data->wait))
2578 wake_up(&ctx->sq_data->wait);
def596e9
JA
2579}
2580
9f13c35b
PB
2581static inline void io_state_file_put(struct io_submit_state *state)
2582{
02b23a9a
PB
2583 if (state->file_refs) {
2584 fput_many(state->file, state->file_refs);
2585 state->file_refs = 0;
2586 }
9a56a232
JA
2587}
2588
2589/*
2590 * Get as many references to a file as we have IOs left in this submission,
2591 * assuming most submissions are for one file, or at least that each file
2592 * has more than one submission.
2593 */
8da11c19 2594static struct file *__io_file_get(struct io_submit_state *state, int fd)
9a56a232
JA
2595{
2596 if (!state)
2597 return fget(fd);
2598
6e1271e6 2599 if (state->file_refs) {
9a56a232 2600 if (state->fd == fd) {
6e1271e6 2601 state->file_refs--;
9a56a232
JA
2602 return state->file;
2603 }
02b23a9a 2604 io_state_file_put(state);
9a56a232
JA
2605 }
2606 state->file = fget_many(fd, state->ios_left);
6e1271e6 2607 if (unlikely(!state->file))
9a56a232
JA
2608 return NULL;
2609
2610 state->fd = fd;
6e1271e6 2611 state->file_refs = state->ios_left - 1;
9a56a232
JA
2612 return state->file;
2613}
2614
4503b767
JA
2615static bool io_bdev_nowait(struct block_device *bdev)
2616{
9ba0d0c8 2617 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
4503b767
JA
2618}
2619
2b188cc1
JA
2620/*
2621 * If we tracked the file through the SCM inflight mechanism, we could support
2622 * any file. For now, just ensure that anything potentially problematic is done
2623 * inline.
2624 */
af197f50 2625static bool io_file_supports_async(struct file *file, int rw)
2b188cc1
JA
2626{
2627 umode_t mode = file_inode(file)->i_mode;
2628
4503b767 2629 if (S_ISBLK(mode)) {
4e7b5671
CH
2630 if (IS_ENABLED(CONFIG_BLOCK) &&
2631 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
4503b767
JA
2632 return true;
2633 return false;
2634 }
2635 if (S_ISCHR(mode) || S_ISSOCK(mode))
2b188cc1 2636 return true;
4503b767 2637 if (S_ISREG(mode)) {
4e7b5671
CH
2638 if (IS_ENABLED(CONFIG_BLOCK) &&
2639 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
4503b767
JA
2640 file->f_op != &io_uring_fops)
2641 return true;
2642 return false;
2643 }
2b188cc1 2644
c5b85625
JA
2645 /* any ->read/write should understand O_NONBLOCK */
2646 if (file->f_flags & O_NONBLOCK)
2647 return true;
2648
af197f50
JA
2649 if (!(file->f_mode & FMODE_NOWAIT))
2650 return false;
2651
2652 if (rw == READ)
2653 return file->f_op->read_iter != NULL;
2654
2655 return file->f_op->write_iter != NULL;
2b188cc1
JA
2656}
2657
a88fc400 2658static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 2659{
def596e9 2660 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 2661 struct kiocb *kiocb = &req->rw.kiocb;
75c668cd 2662 struct file *file = req->file;
09bb8394
JA
2663 unsigned ioprio;
2664 int ret;
2b188cc1 2665
75c668cd 2666 if (S_ISREG(file_inode(file)->i_mode))
491381ce
JA
2667 req->flags |= REQ_F_ISREG;
2668
2b188cc1 2669 kiocb->ki_pos = READ_ONCE(sqe->off);
75c668cd 2670 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
ba04291e 2671 req->flags |= REQ_F_CUR_POS;
75c668cd 2672 kiocb->ki_pos = file->f_pos;
ba04291e 2673 }
2b188cc1 2674 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
2675 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2676 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2677 if (unlikely(ret))
2678 return ret;
2b188cc1 2679
75c668cd
PB
2680 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2681 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2682 req->flags |= REQ_F_NOWAIT;
2683
2b188cc1
JA
2684 ioprio = READ_ONCE(sqe->ioprio);
2685 if (ioprio) {
2686 ret = ioprio_check_cap(ioprio);
2687 if (ret)
09bb8394 2688 return ret;
2b188cc1
JA
2689
2690 kiocb->ki_ioprio = ioprio;
2691 } else
2692 kiocb->ki_ioprio = get_current_ioprio();
2693
def596e9 2694 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
2695 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2696 !kiocb->ki_filp->f_op->iopoll)
09bb8394 2697 return -EOPNOTSUPP;
2b188cc1 2698
def596e9
JA
2699 kiocb->ki_flags |= IOCB_HIPRI;
2700 kiocb->ki_complete = io_complete_rw_iopoll;
65a6543d 2701 req->iopoll_completed = 0;
def596e9 2702 } else {
09bb8394
JA
2703 if (kiocb->ki_flags & IOCB_HIPRI)
2704 return -EINVAL;
def596e9
JA
2705 kiocb->ki_complete = io_complete_rw;
2706 }
9adbd45d 2707
3529d8c2
JA
2708 req->rw.addr = READ_ONCE(sqe->addr);
2709 req->rw.len = READ_ONCE(sqe->len);
4f4eeba8 2710 req->buf_index = READ_ONCE(sqe->buf_index);
2b188cc1 2711 return 0;
2b188cc1
JA
2712}
2713
2714static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2715{
2716 switch (ret) {
2717 case -EIOCBQUEUED:
2718 break;
2719 case -ERESTARTSYS:
2720 case -ERESTARTNOINTR:
2721 case -ERESTARTNOHAND:
2722 case -ERESTART_RESTARTBLOCK:
2723 /*
2724 * We can't just restart the syscall, since previously
2725 * submitted sqes may already be in progress. Just fail this
2726 * IO with EINTR.
2727 */
2728 ret = -EINTR;
df561f66 2729 fallthrough;
2b188cc1
JA
2730 default:
2731 kiocb->ki_complete(kiocb, ret, 0);
2732 }
2733}
2734
a1d7c393 2735static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
889fca73 2736 unsigned int issue_flags)
ba816ad6 2737{
ba04291e 2738 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
e8c2bc1f 2739 struct io_async_rw *io = req->async_data;
ba04291e 2740
227c0c96 2741 /* add previously done IO, if any */
e8c2bc1f 2742 if (io && io->bytes_done > 0) {
227c0c96 2743 if (ret < 0)
e8c2bc1f 2744 ret = io->bytes_done;
227c0c96 2745 else
e8c2bc1f 2746 ret += io->bytes_done;
227c0c96
JA
2747 }
2748
ba04291e
JA
2749 if (req->flags & REQ_F_CUR_POS)
2750 req->file->f_pos = kiocb->ki_pos;
bcaec089 2751 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
889fca73 2752 __io_complete_rw(req, ret, 0, issue_flags);
ba816ad6
JA
2753 else
2754 io_rw_done(kiocb, ret);
2755}
2756
847595de 2757static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
edafccee 2758{
9adbd45d
JA
2759 struct io_ring_ctx *ctx = req->ctx;
2760 size_t len = req->rw.len;
edafccee 2761 struct io_mapped_ubuf *imu;
4be1c615 2762 u16 index, buf_index = req->buf_index;
edafccee
JA
2763 size_t offset;
2764 u64 buf_addr;
2765
edafccee
JA
2766 if (unlikely(buf_index >= ctx->nr_user_bufs))
2767 return -EFAULT;
edafccee
JA
2768 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2769 imu = &ctx->user_bufs[index];
9adbd45d 2770 buf_addr = req->rw.addr;
edafccee
JA
2771
2772 /* overflow */
2773 if (buf_addr + len < buf_addr)
2774 return -EFAULT;
2775 /* not inside the mapped region */
2776 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2777 return -EFAULT;
2778
2779 /*
2780 * May not be a start of buffer, set size appropriately
2781 * and advance us to the beginning.
2782 */
2783 offset = buf_addr - imu->ubuf;
2784 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
2785
2786 if (offset) {
2787 /*
2788 * Don't use iov_iter_advance() here, as it's really slow for
2789 * using the latter parts of a big fixed buffer - it iterates
2790 * over each segment manually. We can cheat a bit here, because
2791 * we know that:
2792 *
2793 * 1) it's a BVEC iter, we set it up
2794 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2795 * first and last bvec
2796 *
2797 * So just find our index, and adjust the iterator afterwards.
2798 * If the offset is within the first bvec (or the whole first
2799 * bvec, just use iov_iter_advance(). This makes it easier
2800 * since we can just skip the first segment, which may not
2801 * be PAGE_SIZE aligned.
2802 */
2803 const struct bio_vec *bvec = imu->bvec;
2804
2805 if (offset <= bvec->bv_len) {
2806 iov_iter_advance(iter, offset);
2807 } else {
2808 unsigned long seg_skip;
2809
2810 /* skip first vec */
2811 offset -= bvec->bv_len;
2812 seg_skip = 1 + (offset >> PAGE_SHIFT);
2813
2814 iter->bvec = bvec + seg_skip;
2815 iter->nr_segs -= seg_skip;
99c79f66 2816 iter->count -= bvec->bv_len + offset;
bd11b3a3 2817 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
2818 }
2819 }
2820
847595de 2821 return 0;
edafccee
JA
2822}
2823
bcda7baa
JA
2824static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2825{
2826 if (needs_lock)
2827 mutex_unlock(&ctx->uring_lock);
2828}
2829
2830static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2831{
2832 /*
2833 * "Normal" inline submissions always hold the uring_lock, since we
2834 * grab it from the system call. Same is true for the SQPOLL offload.
2835 * The only exception is when we've detached the request and issue it
2836 * from an async worker thread, grab the lock for that case.
2837 */
2838 if (needs_lock)
2839 mutex_lock(&ctx->uring_lock);
2840}
2841
2842static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2843 int bgid, struct io_buffer *kbuf,
2844 bool needs_lock)
2845{
2846 struct io_buffer *head;
2847
2848 if (req->flags & REQ_F_BUFFER_SELECTED)
2849 return kbuf;
2850
2851 io_ring_submit_lock(req->ctx, needs_lock);
2852
2853 lockdep_assert_held(&req->ctx->uring_lock);
2854
2855 head = idr_find(&req->ctx->io_buffer_idr, bgid);
2856 if (head) {
2857 if (!list_empty(&head->list)) {
2858 kbuf = list_last_entry(&head->list, struct io_buffer,
2859 list);
2860 list_del(&kbuf->list);
2861 } else {
2862 kbuf = head;
2863 idr_remove(&req->ctx->io_buffer_idr, bgid);
2864 }
2865 if (*len > kbuf->len)
2866 *len = kbuf->len;
2867 } else {
2868 kbuf = ERR_PTR(-ENOBUFS);
2869 }
2870
2871 io_ring_submit_unlock(req->ctx, needs_lock);
2872
2873 return kbuf;
2874}
2875
4d954c25
JA
2876static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2877 bool needs_lock)
2878{
2879 struct io_buffer *kbuf;
4f4eeba8 2880 u16 bgid;
4d954c25
JA
2881
2882 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
4f4eeba8 2883 bgid = req->buf_index;
4d954c25
JA
2884 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2885 if (IS_ERR(kbuf))
2886 return kbuf;
2887 req->rw.addr = (u64) (unsigned long) kbuf;
2888 req->flags |= REQ_F_BUFFER_SELECTED;
2889 return u64_to_user_ptr(kbuf->addr);
2890}
2891
2892#ifdef CONFIG_COMPAT
2893static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2894 bool needs_lock)
2895{
2896 struct compat_iovec __user *uiov;
2897 compat_ssize_t clen;
2898 void __user *buf;
2899 ssize_t len;
2900
2901 uiov = u64_to_user_ptr(req->rw.addr);
2902 if (!access_ok(uiov, sizeof(*uiov)))
2903 return -EFAULT;
2904 if (__get_user(clen, &uiov->iov_len))
2905 return -EFAULT;
2906 if (clen < 0)
2907 return -EINVAL;
2908
2909 len = clen;
2910 buf = io_rw_buffer_select(req, &len, needs_lock);
2911 if (IS_ERR(buf))
2912 return PTR_ERR(buf);
2913 iov[0].iov_base = buf;
2914 iov[0].iov_len = (compat_size_t) len;
2915 return 0;
2916}
2917#endif
2918
2919static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2920 bool needs_lock)
2921{
2922 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2923 void __user *buf;
2924 ssize_t len;
2925
2926 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2927 return -EFAULT;
2928
2929 len = iov[0].iov_len;
2930 if (len < 0)
2931 return -EINVAL;
2932 buf = io_rw_buffer_select(req, &len, needs_lock);
2933 if (IS_ERR(buf))
2934 return PTR_ERR(buf);
2935 iov[0].iov_base = buf;
2936 iov[0].iov_len = len;
2937 return 0;
2938}
2939
2940static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2941 bool needs_lock)
2942{
dddb3e26
JA
2943 if (req->flags & REQ_F_BUFFER_SELECTED) {
2944 struct io_buffer *kbuf;
2945
2946 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2947 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2948 iov[0].iov_len = kbuf->len;
4d954c25 2949 return 0;
dddb3e26 2950 }
dd201662 2951 if (req->rw.len != 1)
4d954c25
JA
2952 return -EINVAL;
2953
2954#ifdef CONFIG_COMPAT
2955 if (req->ctx->compat)
2956 return io_compat_import(req, iov, needs_lock);
2957#endif
2958
2959 return __io_iov_buffer_select(req, iov, needs_lock);
2960}
2961
847595de
PB
2962static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
2963 struct iov_iter *iter, bool needs_lock)
2b188cc1 2964{
9adbd45d
JA
2965 void __user *buf = u64_to_user_ptr(req->rw.addr);
2966 size_t sqe_len = req->rw.len;
847595de 2967 u8 opcode = req->opcode;
4d954c25 2968 ssize_t ret;
edafccee 2969
7d009165 2970 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 2971 *iovec = NULL;
9adbd45d 2972 return io_import_fixed(req, rw, iter);
edafccee 2973 }
2b188cc1 2974
bcda7baa 2975 /* buffer index only valid with fixed read/write, or buffer select */
4f4eeba8 2976 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
9adbd45d
JA
2977 return -EINVAL;
2978
3a6820f2 2979 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 2980 if (req->flags & REQ_F_BUFFER_SELECT) {
4d954c25 2981 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
867a23ea 2982 if (IS_ERR(buf))
4d954c25 2983 return PTR_ERR(buf);
3f9d6441 2984 req->rw.len = sqe_len;
bcda7baa
JA
2985 }
2986
3a6820f2
JA
2987 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2988 *iovec = NULL;
10fc72e4 2989 return ret;
3a6820f2
JA
2990 }
2991
4d954c25
JA
2992 if (req->flags & REQ_F_BUFFER_SELECT) {
2993 ret = io_iov_buffer_select(req, *iovec, needs_lock);
847595de
PB
2994 if (!ret)
2995 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
4d954c25
JA
2996 *iovec = NULL;
2997 return ret;
2998 }
2999
89cd35c5
CH
3000 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3001 req->ctx->compat);
2b188cc1
JA
3002}
3003
0fef9483
JA
3004static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3005{
5b09e37e 3006 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
0fef9483
JA
3007}
3008
31b51510 3009/*
32960613
JA
3010 * For files that don't have ->read_iter() and ->write_iter(), handle them
3011 * by looping over ->read() or ->write() manually.
31b51510 3012 */
4017eb91 3013static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
32960613 3014{
4017eb91
JA
3015 struct kiocb *kiocb = &req->rw.kiocb;
3016 struct file *file = req->file;
32960613
JA
3017 ssize_t ret = 0;
3018
3019 /*
3020 * Don't support polled IO through this interface, and we can't
3021 * support non-blocking either. For the latter, this just causes
3022 * the kiocb to be handled from an async context.
3023 */
3024 if (kiocb->ki_flags & IOCB_HIPRI)
3025 return -EOPNOTSUPP;
3026 if (kiocb->ki_flags & IOCB_NOWAIT)
3027 return -EAGAIN;
3028
3029 while (iov_iter_count(iter)) {
311ae9e1 3030 struct iovec iovec;
32960613
JA
3031 ssize_t nr;
3032
311ae9e1
PB
3033 if (!iov_iter_is_bvec(iter)) {
3034 iovec = iov_iter_iovec(iter);
3035 } else {
4017eb91
JA
3036 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3037 iovec.iov_len = req->rw.len;
311ae9e1
PB
3038 }
3039
32960613
JA
3040 if (rw == READ) {
3041 nr = file->f_op->read(file, iovec.iov_base,
0fef9483 3042 iovec.iov_len, io_kiocb_ppos(kiocb));
32960613
JA
3043 } else {
3044 nr = file->f_op->write(file, iovec.iov_base,
0fef9483 3045 iovec.iov_len, io_kiocb_ppos(kiocb));
32960613
JA
3046 }
3047
3048 if (nr < 0) {
3049 if (!ret)
3050 ret = nr;
3051 break;
3052 }
3053 ret += nr;
3054 if (nr != iovec.iov_len)
3055 break;
4017eb91
JA
3056 req->rw.len -= nr;
3057 req->rw.addr += nr;
32960613
JA
3058 iov_iter_advance(iter, nr);
3059 }
3060
3061 return ret;
3062}
3063
ff6165b2
JA
3064static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3065 const struct iovec *fast_iov, struct iov_iter *iter)
f67676d1 3066{
e8c2bc1f 3067 struct io_async_rw *rw = req->async_data;
b64e3444 3068
ff6165b2 3069 memcpy(&rw->iter, iter, sizeof(*iter));
afb87658 3070 rw->free_iovec = iovec;
227c0c96 3071 rw->bytes_done = 0;
ff6165b2 3072 /* can only be fixed buffers, no need to do anything */
9c3a205c 3073 if (iov_iter_is_bvec(iter))
ff6165b2 3074 return;
b64e3444 3075 if (!iovec) {
ff6165b2
JA
3076 unsigned iov_off = 0;
3077
3078 rw->iter.iov = rw->fast_iov;
3079 if (iter->iov != fast_iov) {
3080 iov_off = iter->iov - fast_iov;
3081 rw->iter.iov += iov_off;
3082 }
3083 if (rw->fast_iov != fast_iov)
3084 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
45097dae 3085 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
3086 } else {
3087 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
3088 }
3089}
3090
e8c2bc1f 3091static inline int __io_alloc_async_data(struct io_kiocb *req)
3d9932a8 3092{
e8c2bc1f
JA
3093 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3094 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3095 return req->async_data == NULL;
3d9932a8
XW
3096}
3097
e8c2bc1f 3098static int io_alloc_async_data(struct io_kiocb *req)
f67676d1 3099{
e8c2bc1f 3100 if (!io_op_defs[req->opcode].needs_async_data)
d3656344 3101 return 0;
3d9932a8 3102
e8c2bc1f 3103 return __io_alloc_async_data(req);
b7bb4f7d
JA
3104}
3105
ff6165b2
JA
3106static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3107 const struct iovec *fast_iov,
227c0c96 3108 struct iov_iter *iter, bool force)
b7bb4f7d 3109{
e8c2bc1f 3110 if (!force && !io_op_defs[req->opcode].needs_async_data)
74566df3 3111 return 0;
e8c2bc1f 3112 if (!req->async_data) {
6bf985dc
PB
3113 if (__io_alloc_async_data(req)) {
3114 kfree(iovec);
5d204bcf 3115 return -ENOMEM;
6bf985dc 3116 }
b7bb4f7d 3117
ff6165b2 3118 io_req_map_rw(req, iovec, fast_iov, iter);
5d204bcf 3119 }
b7bb4f7d 3120 return 0;
f67676d1
JA
3121}
3122
73debe68 3123static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
c3e330a4 3124{
e8c2bc1f 3125 struct io_async_rw *iorw = req->async_data;
f4bff104 3126 struct iovec *iov = iorw->fast_iov;
847595de 3127 int ret;
c3e330a4 3128
2846c481 3129 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
c3e330a4
PB
3130 if (unlikely(ret < 0))
3131 return ret;
3132
ab0b196c
PB
3133 iorw->bytes_done = 0;
3134 iorw->free_iovec = iov;
3135 if (iov)
3136 req->flags |= REQ_F_NEED_CLEANUP;
c3e330a4
PB
3137 return 0;
3138}
3139
73debe68 3140static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 3141{
3529d8c2
JA
3142 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3143 return -EBADF;
93642ef8 3144 return io_prep_rw(req, sqe);
f67676d1
JA
3145}
3146
c1dd91d1
JA
3147/*
3148 * This is our waitqueue callback handler, registered through lock_page_async()
3149 * when we initially tried to do the IO with the iocb armed our waitqueue.
3150 * This gets called when the page is unlocked, and we generally expect that to
3151 * happen when the page IO is completed and the page is now uptodate. This will
3152 * queue a task_work based retry of the operation, attempting to copy the data
3153 * again. If the latter fails because the page was NOT uptodate, then we will
3154 * do a thread based blocking retry of the operation. That's the unexpected
3155 * slow path.
3156 */
bcf5a063
JA
3157static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3158 int sync, void *arg)
3159{
3160 struct wait_page_queue *wpq;
3161 struct io_kiocb *req = wait->private;
bcf5a063 3162 struct wait_page_key *key = arg;
bcf5a063
JA
3163
3164 wpq = container_of(wait, struct wait_page_queue, wait);
3165
cdc8fcb4
LT
3166 if (!wake_page_match(wpq, key))
3167 return 0;
3168
c8d317aa 3169 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
bcf5a063
JA
3170 list_del_init(&wait->entry);
3171
bcf5a063
JA
3172 /* submit ref gets dropped, acquire a new one */
3173 refcount_inc(&req->refs);
921b9054 3174 io_req_task_queue(req);
bcf5a063
JA
3175 return 1;
3176}
3177
c1dd91d1
JA
3178/*
3179 * This controls whether a given IO request should be armed for async page
3180 * based retry. If we return false here, the request is handed to the async
3181 * worker threads for retry. If we're doing buffered reads on a regular file,
3182 * we prepare a private wait_page_queue entry and retry the operation. This
3183 * will either succeed because the page is now uptodate and unlocked, or it
3184 * will register a callback when the page is unlocked at IO completion. Through
3185 * that callback, io_uring uses task_work to setup a retry of the operation.
3186 * That retry will attempt the buffered read again. The retry will generally
3187 * succeed, or in rare cases where it fails, we then fall back to using the
3188 * async worker threads for a blocking retry.
3189 */
227c0c96 3190static bool io_rw_should_retry(struct io_kiocb *req)
f67676d1 3191{
e8c2bc1f
JA
3192 struct io_async_rw *rw = req->async_data;
3193 struct wait_page_queue *wait = &rw->wpq;
bcf5a063 3194 struct kiocb *kiocb = &req->rw.kiocb;
f67676d1 3195
bcf5a063
JA
3196 /* never retry for NOWAIT, we just complete with -EAGAIN */
3197 if (req->flags & REQ_F_NOWAIT)
3198 return false;
f67676d1 3199
227c0c96 3200 /* Only for buffered IO */
3b2a4439 3201 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
bcf5a063 3202 return false;
3b2a4439 3203
bcf5a063
JA
3204 /*
3205 * just use poll if we can, and don't attempt if the fs doesn't
3206 * support callback based unlocks
3207 */
3208 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3209 return false;
f67676d1 3210
3b2a4439
JA
3211 wait->wait.func = io_async_buf_func;
3212 wait->wait.private = req;
3213 wait->wait.flags = 0;
3214 INIT_LIST_HEAD(&wait->wait.entry);
3215 kiocb->ki_flags |= IOCB_WAITQ;
c8d317aa 3216 kiocb->ki_flags &= ~IOCB_NOWAIT;
3b2a4439 3217 kiocb->ki_waitq = wait;
3b2a4439 3218 return true;
bcf5a063
JA
3219}
3220
3221static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3222{
3223 if (req->file->f_op->read_iter)
3224 return call_read_iter(req->file, &req->rw.kiocb, iter);
2dd2111d 3225 else if (req->file->f_op->read)
4017eb91 3226 return loop_rw_iter(READ, req, iter);
2dd2111d
GH
3227 else
3228 return -EINVAL;
f67676d1
JA
3229}
3230
889fca73 3231static int io_read(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
3232{
3233 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3234 struct kiocb *kiocb = &req->rw.kiocb;
ff6165b2 3235 struct iov_iter __iter, *iter = &__iter;
e8c2bc1f 3236 struct io_async_rw *rw = req->async_data;
227c0c96 3237 ssize_t io_size, ret, ret2;
45d189c6 3238 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ff6165b2 3239
2846c481 3240 if (rw) {
e8c2bc1f 3241 iter = &rw->iter;
2846c481
PB
3242 iovec = NULL;
3243 } else {
3244 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3245 if (ret < 0)
3246 return ret;
3247 }
632546c4 3248 io_size = iov_iter_count(iter);
fa15bafb 3249 req->result = io_size;
2b188cc1 3250
fd6c2e4c
JA
3251 /* Ensure we clear previously set non-block flag */
3252 if (!force_nonblock)
29de5f6a 3253 kiocb->ki_flags &= ~IOCB_NOWAIT;
a88fc400
PB
3254 else
3255 kiocb->ki_flags |= IOCB_NOWAIT;
3256
24c74678 3257 /* If the file doesn't support async, just async punt */
6713e7a6
PB
3258 if (force_nonblock && !io_file_supports_async(req->file, READ)) {
3259 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
6bf985dc 3260 return ret ?: -EAGAIN;
6713e7a6 3261 }
9e645e11 3262
632546c4 3263 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
5ea5dd45
PB
3264 if (unlikely(ret)) {
3265 kfree(iovec);
3266 return ret;
3267 }
2b188cc1 3268
227c0c96 3269 ret = io_iter_do_read(req, iter);
32960613 3270
57cd657b 3271 if (ret == -EIOCBQUEUED) {
fe1cdd55 3272 goto out_free;
227c0c96 3273 } else if (ret == -EAGAIN) {
eefdf30f
JA
3274 /* IOPOLL retry should happen for io-wq threads */
3275 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
f91daf56 3276 goto done;
75c668cd
PB
3277 /* no retry on NONBLOCK nor RWF_NOWAIT */
3278 if (req->flags & REQ_F_NOWAIT)
355afaeb 3279 goto done;
84216315 3280 /* some cases will consume bytes even on error returns */
632546c4 3281 iov_iter_revert(iter, io_size - iov_iter_count(iter));
f38c7e3a 3282 ret = 0;
7335e3bf 3283 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
75c668cd 3284 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
7335e3bf 3285 /* read all, failed, already did sync or don't want to retry */
00d23d51 3286 goto done;
227c0c96
JA
3287 }
3288
227c0c96 3289 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
6bf985dc
PB
3290 if (ret2)
3291 return ret2;
3292
fe1cdd55 3293 iovec = NULL;
e8c2bc1f 3294 rw = req->async_data;
227c0c96 3295 /* now use our persistent iterator, if we aren't already */
e8c2bc1f 3296 iter = &rw->iter;
227c0c96 3297
b23df91b
PB
3298 do {
3299 io_size -= ret;
3300 rw->bytes_done += ret;
3301 /* if we can retry, do so with the callbacks armed */
3302 if (!io_rw_should_retry(req)) {
3303 kiocb->ki_flags &= ~IOCB_WAITQ;
3304 return -EAGAIN;
3305 }
3306
3307 /*
3308 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3309 * we get -EIOCBQUEUED, then we'll get a notification when the
3310 * desired page gets unlocked. We can also get a partial read
3311 * here, and if we do, then just retry at the new offset.
3312 */
3313 ret = io_iter_do_read(req, iter);
3314 if (ret == -EIOCBQUEUED)
3315 return 0;
227c0c96 3316 /* we got some bytes, but not all. retry. */
b23df91b 3317 } while (ret > 0 && ret < io_size);
227c0c96 3318done:
889fca73 3319 kiocb_done(kiocb, ret, issue_flags);
fe1cdd55
PB
3320out_free:
3321 /* it's faster to check here then delegate to kfree */
3322 if (iovec)
3323 kfree(iovec);
5ea5dd45 3324 return 0;
2b188cc1
JA
3325}
3326
73debe68 3327static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 3328{
3529d8c2
JA
3329 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3330 return -EBADF;
93642ef8 3331 return io_prep_rw(req, sqe);
f67676d1
JA
3332}
3333
889fca73 3334static int io_write(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
3335{
3336 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3337 struct kiocb *kiocb = &req->rw.kiocb;
ff6165b2 3338 struct iov_iter __iter, *iter = &__iter;
e8c2bc1f 3339 struct io_async_rw *rw = req->async_data;
fa15bafb 3340 ssize_t ret, ret2, io_size;
45d189c6 3341 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
2b188cc1 3342
2846c481 3343 if (rw) {
e8c2bc1f 3344 iter = &rw->iter;
2846c481
PB
3345 iovec = NULL;
3346 } else {
3347 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3348 if (ret < 0)
3349 return ret;
3350 }
632546c4 3351 io_size = iov_iter_count(iter);
fa15bafb 3352 req->result = io_size;
2b188cc1 3353
fd6c2e4c
JA
3354 /* Ensure we clear previously set non-block flag */
3355 if (!force_nonblock)
a88fc400
PB
3356 kiocb->ki_flags &= ~IOCB_NOWAIT;
3357 else
3358 kiocb->ki_flags |= IOCB_NOWAIT;
fd6c2e4c 3359
24c74678 3360 /* If the file doesn't support async, just async punt */
af197f50 3361 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
f67676d1 3362 goto copy_iov;
31b51510 3363
10d59345
JA
3364 /* file path doesn't support NOWAIT for non-direct_IO */
3365 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3366 (req->flags & REQ_F_ISREG))
f67676d1 3367 goto copy_iov;
31b51510 3368
632546c4 3369 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
fa15bafb
PB
3370 if (unlikely(ret))
3371 goto out_free;
4ed734b0 3372
fa15bafb
PB
3373 /*
3374 * Open-code file_start_write here to grab freeze protection,
3375 * which will be released by another thread in
3376 * io_complete_rw(). Fool lockdep by telling it the lock got
3377 * released so that it doesn't complain about the held lock when
3378 * we return to userspace.
3379 */
3380 if (req->flags & REQ_F_ISREG) {
8a3c84b6 3381 sb_start_write(file_inode(req->file)->i_sb);
fa15bafb
PB
3382 __sb_writers_release(file_inode(req->file)->i_sb,
3383 SB_FREEZE_WRITE);
3384 }
3385 kiocb->ki_flags |= IOCB_WRITE;
4ed734b0 3386
fa15bafb 3387 if (req->file->f_op->write_iter)
ff6165b2 3388 ret2 = call_write_iter(req->file, kiocb, iter);
2dd2111d 3389 else if (req->file->f_op->write)
4017eb91 3390 ret2 = loop_rw_iter(WRITE, req, iter);
2dd2111d
GH
3391 else
3392 ret2 = -EINVAL;
4ed734b0 3393
fa15bafb
PB
3394 /*
3395 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3396 * retry them without IOCB_NOWAIT.
3397 */
3398 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3399 ret2 = -EAGAIN;
75c668cd
PB
3400 /* no retry on NONBLOCK nor RWF_NOWAIT */
3401 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
355afaeb 3402 goto done;
fa15bafb 3403 if (!force_nonblock || ret2 != -EAGAIN) {
eefdf30f
JA
3404 /* IOPOLL retry should happen for io-wq threads */
3405 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3406 goto copy_iov;
355afaeb 3407done:
889fca73 3408 kiocb_done(kiocb, ret2, issue_flags);
fa15bafb 3409 } else {
f67676d1 3410copy_iov:
84216315 3411 /* some cases will consume bytes even on error returns */
632546c4 3412 iov_iter_revert(iter, io_size - iov_iter_count(iter));
227c0c96 3413 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
6bf985dc 3414 return ret ?: -EAGAIN;
2b188cc1 3415 }
31b51510 3416out_free:
f261c168 3417 /* it's reportedly faster than delegating the null check to kfree() */
252917c3 3418 if (iovec)
6f2cc166 3419 kfree(iovec);
2b188cc1
JA
3420 return ret;
3421}
3422
80a261fd
JA
3423static int io_renameat_prep(struct io_kiocb *req,
3424 const struct io_uring_sqe *sqe)
3425{
3426 struct io_rename *ren = &req->rename;
3427 const char __user *oldf, *newf;
3428
3429 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3430 return -EBADF;
3431
3432 ren->old_dfd = READ_ONCE(sqe->fd);
3433 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3434 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3435 ren->new_dfd = READ_ONCE(sqe->len);
3436 ren->flags = READ_ONCE(sqe->rename_flags);
3437
3438 ren->oldpath = getname(oldf);
3439 if (IS_ERR(ren->oldpath))
3440 return PTR_ERR(ren->oldpath);
3441
3442 ren->newpath = getname(newf);
3443 if (IS_ERR(ren->newpath)) {
3444 putname(ren->oldpath);
3445 return PTR_ERR(ren->newpath);
3446 }
3447
3448 req->flags |= REQ_F_NEED_CLEANUP;
3449 return 0;
3450}
3451
45d189c6 3452static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
80a261fd
JA
3453{
3454 struct io_rename *ren = &req->rename;
3455 int ret;
3456
45d189c6 3457 if (issue_flags & IO_URING_F_NONBLOCK)
80a261fd
JA
3458 return -EAGAIN;
3459
3460 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3461 ren->newpath, ren->flags);
3462
3463 req->flags &= ~REQ_F_NEED_CLEANUP;
3464 if (ret < 0)
3465 req_set_fail_links(req);
3466 io_req_complete(req, ret);
3467 return 0;
3468}
3469
14a1143b
JA
3470static int io_unlinkat_prep(struct io_kiocb *req,
3471 const struct io_uring_sqe *sqe)
3472{
3473 struct io_unlink *un = &req->unlink;
3474 const char __user *fname;
3475
3476 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3477 return -EBADF;
3478
3479 un->dfd = READ_ONCE(sqe->fd);
3480
3481 un->flags = READ_ONCE(sqe->unlink_flags);
3482 if (un->flags & ~AT_REMOVEDIR)
3483 return -EINVAL;
3484
3485 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3486 un->filename = getname(fname);
3487 if (IS_ERR(un->filename))
3488 return PTR_ERR(un->filename);
3489
3490 req->flags |= REQ_F_NEED_CLEANUP;
3491 return 0;
3492}
3493
45d189c6 3494static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
14a1143b
JA
3495{
3496 struct io_unlink *un = &req->unlink;
3497 int ret;
3498
45d189c6 3499 if (issue_flags & IO_URING_F_NONBLOCK)
14a1143b
JA
3500 return -EAGAIN;
3501
3502 if (un->flags & AT_REMOVEDIR)
3503 ret = do_rmdir(un->dfd, un->filename);
3504 else
3505 ret = do_unlinkat(un->dfd, un->filename);
3506
3507 req->flags &= ~REQ_F_NEED_CLEANUP;
3508 if (ret < 0)
3509 req_set_fail_links(req);
3510 io_req_complete(req, ret);
3511 return 0;
3512}
3513
36f4fa68
JA
3514static int io_shutdown_prep(struct io_kiocb *req,
3515 const struct io_uring_sqe *sqe)
3516{
3517#if defined(CONFIG_NET)
3518 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3519 return -EINVAL;
3520 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3521 sqe->buf_index)
3522 return -EINVAL;
3523
3524 req->shutdown.how = READ_ONCE(sqe->len);
3525 return 0;
3526#else
3527 return -EOPNOTSUPP;
3528#endif
3529}
3530
45d189c6 3531static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
36f4fa68
JA
3532{
3533#if defined(CONFIG_NET)
3534 struct socket *sock;
3535 int ret;
3536
45d189c6 3537 if (issue_flags & IO_URING_F_NONBLOCK)
36f4fa68
JA
3538 return -EAGAIN;
3539
48aba79b 3540 sock = sock_from_file(req->file);
36f4fa68 3541 if (unlikely(!sock))
48aba79b 3542 return -ENOTSOCK;
36f4fa68
JA
3543
3544 ret = __sys_shutdown_sock(sock, req->shutdown.how);
a146468d
JA
3545 if (ret < 0)
3546 req_set_fail_links(req);
36f4fa68
JA
3547 io_req_complete(req, ret);
3548 return 0;
3549#else
3550 return -EOPNOTSUPP;
3551#endif
3552}
3553
f2a8d5c7
PB
3554static int __io_splice_prep(struct io_kiocb *req,
3555 const struct io_uring_sqe *sqe)
7d67af2c
PB
3556{
3557 struct io_splice* sp = &req->splice;
3558 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
7d67af2c 3559
3232dd02
PB
3560 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3561 return -EINVAL;
7d67af2c
PB
3562
3563 sp->file_in = NULL;
7d67af2c
PB
3564 sp->len = READ_ONCE(sqe->len);
3565 sp->flags = READ_ONCE(sqe->splice_flags);
3566
3567 if (unlikely(sp->flags & ~valid_flags))
3568 return -EINVAL;
3569
8371adf5
PB
3570 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3571 (sp->flags & SPLICE_F_FD_IN_FIXED));
3572 if (!sp->file_in)
3573 return -EBADF;
7d67af2c
PB
3574 req->flags |= REQ_F_NEED_CLEANUP;
3575
7cdaf587
XW
3576 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3577 /*
3578 * Splice operation will be punted aync, and here need to
3579 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3580 */
3581 io_req_init_async(req);
7d67af2c 3582 req->work.flags |= IO_WQ_WORK_UNBOUND;
7cdaf587 3583 }
7d67af2c
PB
3584
3585 return 0;
3586}
3587
f2a8d5c7
PB
3588static int io_tee_prep(struct io_kiocb *req,
3589 const struct io_uring_sqe *sqe)
3590{
3591 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3592 return -EINVAL;
3593 return __io_splice_prep(req, sqe);
3594}
3595
45d189c6 3596static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
f2a8d5c7
PB
3597{
3598 struct io_splice *sp = &req->splice;
3599 struct file *in = sp->file_in;
3600 struct file *out = sp->file_out;
3601 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3602 long ret = 0;
3603
45d189c6 3604 if (issue_flags & IO_URING_F_NONBLOCK)
f2a8d5c7
PB
3605 return -EAGAIN;
3606 if (sp->len)
3607 ret = do_tee(in, out, sp->len, flags);
3608
3609 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3610 req->flags &= ~REQ_F_NEED_CLEANUP;
3611
f2a8d5c7
PB
3612 if (ret != sp->len)
3613 req_set_fail_links(req);
e1e16097 3614 io_req_complete(req, ret);
f2a8d5c7
PB
3615 return 0;
3616}
3617
3618static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3619{
3620 struct io_splice* sp = &req->splice;
3621
3622 sp->off_in = READ_ONCE(sqe->splice_off_in);
3623 sp->off_out = READ_ONCE(sqe->off);
3624 return __io_splice_prep(req, sqe);
3625}
3626
45d189c6 3627static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
7d67af2c
PB
3628{
3629 struct io_splice *sp = &req->splice;
3630 struct file *in = sp->file_in;
3631 struct file *out = sp->file_out;
3632 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3633 loff_t *poff_in, *poff_out;
c9687426 3634 long ret = 0;
7d67af2c 3635
45d189c6 3636 if (issue_flags & IO_URING_F_NONBLOCK)
2fb3e822 3637 return -EAGAIN;
7d67af2c
PB
3638
3639 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3640 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
c9687426 3641
948a7749 3642 if (sp->len)
c9687426 3643 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
7d67af2c
PB
3644
3645 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3646 req->flags &= ~REQ_F_NEED_CLEANUP;
3647
7d67af2c
PB
3648 if (ret != sp->len)
3649 req_set_fail_links(req);
e1e16097 3650 io_req_complete(req, ret);
7d67af2c
PB
3651 return 0;
3652}
3653
2b188cc1
JA
3654/*
3655 * IORING_OP_NOP just posts a completion event, nothing else.
3656 */
889fca73 3657static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
3658{
3659 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 3660
def596e9
JA
3661 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3662 return -EINVAL;
3663
889fca73 3664 __io_req_complete(req, issue_flags, 0, 0);
2b188cc1
JA
3665 return 0;
3666}
3667
1155c76a 3668static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 3669{
6b06314c 3670 struct io_ring_ctx *ctx = req->ctx;
c992fe29 3671
09bb8394
JA
3672 if (!req->file)
3673 return -EBADF;
c992fe29 3674
6b06314c 3675 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 3676 return -EINVAL;
edafccee 3677 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
3678 return -EINVAL;
3679
8ed8d3c3
JA
3680 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3681 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3682 return -EINVAL;
3683
3684 req->sync.off = READ_ONCE(sqe->off);
3685 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
3686 return 0;
3687}
3688
45d189c6 3689static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 3690{
8ed8d3c3 3691 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
3692 int ret;
3693
ac45abc0 3694 /* fsync always requires a blocking context */
45d189c6 3695 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
3696 return -EAGAIN;
3697
9adbd45d 3698 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
3699 end > 0 ? end : LLONG_MAX,
3700 req->sync.flags & IORING_FSYNC_DATASYNC);
3701 if (ret < 0)
3702 req_set_fail_links(req);
e1e16097 3703 io_req_complete(req, ret);
c992fe29
CH
3704 return 0;
3705}
3706
d63d1b5e
JA
3707static int io_fallocate_prep(struct io_kiocb *req,
3708 const struct io_uring_sqe *sqe)
3709{
3710 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3711 return -EINVAL;
3232dd02
PB
3712 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3713 return -EINVAL;
d63d1b5e
JA
3714
3715 req->sync.off = READ_ONCE(sqe->off);
3716 req->sync.len = READ_ONCE(sqe->addr);
3717 req->sync.mode = READ_ONCE(sqe->len);
3718 return 0;
3719}
3720
45d189c6 3721static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
5d17b4a4 3722{
ac45abc0
PB
3723 int ret;
3724
d63d1b5e 3725 /* fallocate always requiring blocking context */
45d189c6 3726 if (issue_flags & IO_URING_F_NONBLOCK)
5d17b4a4 3727 return -EAGAIN;
ac45abc0
PB
3728 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3729 req->sync.len);
ac45abc0
PB
3730 if (ret < 0)
3731 req_set_fail_links(req);
e1e16097 3732 io_req_complete(req, ret);
5d17b4a4
JA
3733 return 0;
3734}
3735
ec65fea5 3736static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 3737{
f8748881 3738 const char __user *fname;
15b71abe 3739 int ret;
b7bb4f7d 3740
ec65fea5 3741 if (unlikely(sqe->ioprio || sqe->buf_index))
15b71abe 3742 return -EINVAL;
ec65fea5 3743 if (unlikely(req->flags & REQ_F_FIXED_FILE))
cf3040ca 3744 return -EBADF;
03b1230c 3745
ec65fea5
PB
3746 /* open.how should be already initialised */
3747 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
08a1d26e 3748 req->open.how.flags |= O_LARGEFILE;
3529d8c2 3749
25e72d10
PB
3750 req->open.dfd = READ_ONCE(sqe->fd);
3751 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
f8748881 3752 req->open.filename = getname(fname);
15b71abe
JA
3753 if (IS_ERR(req->open.filename)) {
3754 ret = PTR_ERR(req->open.filename);
3755 req->open.filename = NULL;
3756 return ret;
3757 }
4022e7af 3758 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 3759 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 3760 return 0;
03b1230c
JA
3761}
3762
ec65fea5
PB
3763static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3764{
3765 u64 flags, mode;
3766
14587a46 3767 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4eb8dded 3768 return -EINVAL;
ec65fea5
PB
3769 mode = READ_ONCE(sqe->len);
3770 flags = READ_ONCE(sqe->open_flags);
3771 req->open.how = build_open_how(flags, mode);
3772 return __io_openat_prep(req, sqe);
3773}
3774
cebdb986 3775static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 3776{
cebdb986 3777 struct open_how __user *how;
cebdb986 3778 size_t len;
0fa03c62
JA
3779 int ret;
3780
14587a46 3781 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4eb8dded 3782 return -EINVAL;
cebdb986
JA
3783 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3784 len = READ_ONCE(sqe->len);
cebdb986
JA
3785 if (len < OPEN_HOW_SIZE_VER0)
3786 return -EINVAL;
3529d8c2 3787
cebdb986
JA
3788 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3789 len);
3790 if (ret)
3791 return ret;
3529d8c2 3792
ec65fea5 3793 return __io_openat_prep(req, sqe);
cebdb986
JA
3794}
3795
45d189c6 3796static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
15b71abe
JA
3797{
3798 struct open_flags op;
15b71abe 3799 struct file *file;
3a81fd02
JA
3800 bool nonblock_set;
3801 bool resolve_nonblock;
15b71abe
JA
3802 int ret;
3803
cebdb986 3804 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
3805 if (ret)
3806 goto err;
3a81fd02
JA
3807 nonblock_set = op.open_flag & O_NONBLOCK;
3808 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
45d189c6 3809 if (issue_flags & IO_URING_F_NONBLOCK) {
3a81fd02
JA
3810 /*
3811 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3812 * it'll always -EAGAIN
3813 */
3814 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3815 return -EAGAIN;
3816 op.lookup_flags |= LOOKUP_CACHED;
3817 op.open_flag |= O_NONBLOCK;
3818 }
15b71abe 3819
4022e7af 3820 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
15b71abe
JA
3821 if (ret < 0)
3822 goto err;
3823
3824 file = do_filp_open(req->open.dfd, req->open.filename, &op);
3a81fd02 3825 /* only retry if RESOLVE_CACHED wasn't already set by application */
45d189c6
PB
3826 if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
3827 file == ERR_PTR(-EAGAIN)) {
944d1444 3828 /*
3a81fd02
JA
3829 * We could hang on to this 'fd', but seems like marginal
3830 * gain for something that is now known to be a slower path.
3831 * So just put it, and we'll get a new one when we retry.
944d1444 3832 */
3a81fd02
JA
3833 put_unused_fd(ret);
3834 return -EAGAIN;
3835 }
3836
15b71abe
JA
3837 if (IS_ERR(file)) {
3838 put_unused_fd(ret);
3839 ret = PTR_ERR(file);
3840 } else {
45d189c6 3841 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
3a81fd02 3842 file->f_flags &= ~O_NONBLOCK;
15b71abe
JA
3843 fsnotify_open(file);
3844 fd_install(ret, file);
3845 }
3846err:
3847 putname(req->open.filename);
8fef80bf 3848 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe
JA
3849 if (ret < 0)
3850 req_set_fail_links(req);
e1e16097 3851 io_req_complete(req, ret);
15b71abe
JA
3852 return 0;
3853}
3854
45d189c6 3855static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
cebdb986 3856{
45d189c6 3857 return io_openat2(req, issue_flags & IO_URING_F_NONBLOCK);
cebdb986
JA
3858}
3859
067524e9
JA
3860static int io_remove_buffers_prep(struct io_kiocb *req,
3861 const struct io_uring_sqe *sqe)
3862{
3863 struct io_provide_buf *p = &req->pbuf;
3864 u64 tmp;
3865
3866 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3867 return -EINVAL;
3868
3869 tmp = READ_ONCE(sqe->fd);
3870 if (!tmp || tmp > USHRT_MAX)
3871 return -EINVAL;
3872
3873 memset(p, 0, sizeof(*p));
3874 p->nbufs = tmp;
3875 p->bgid = READ_ONCE(sqe->buf_group);
3876 return 0;
3877}
3878
3879static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3880 int bgid, unsigned nbufs)
3881{
3882 unsigned i = 0;
3883
3884 /* shouldn't happen */
3885 if (!nbufs)
3886 return 0;
3887
3888 /* the head kbuf is the list itself */
3889 while (!list_empty(&buf->list)) {
3890 struct io_buffer *nxt;
3891
3892 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3893 list_del(&nxt->list);
3894 kfree(nxt);
3895 if (++i == nbufs)
3896 return i;
3897 }
3898 i++;
3899 kfree(buf);
3900 idr_remove(&ctx->io_buffer_idr, bgid);
3901
3902 return i;
3903}
3904
889fca73 3905static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
067524e9
JA
3906{
3907 struct io_provide_buf *p = &req->pbuf;
3908 struct io_ring_ctx *ctx = req->ctx;
3909 struct io_buffer *head;
3910 int ret = 0;
45d189c6 3911 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
067524e9
JA
3912
3913 io_ring_submit_lock(ctx, !force_nonblock);
3914
3915 lockdep_assert_held(&ctx->uring_lock);
3916
3917 ret = -ENOENT;
3918 head = idr_find(&ctx->io_buffer_idr, p->bgid);
3919 if (head)
3920 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
067524e9
JA
3921 if (ret < 0)
3922 req_set_fail_links(req);
067524e9 3923
31bff9a5
PB
3924 /* need to hold the lock to complete IOPOLL requests */
3925 if (ctx->flags & IORING_SETUP_IOPOLL) {
889fca73 3926 __io_req_complete(req, issue_flags, ret, 0);
31bff9a5
PB
3927 io_ring_submit_unlock(ctx, !force_nonblock);
3928 } else {
3929 io_ring_submit_unlock(ctx, !force_nonblock);
889fca73 3930 __io_req_complete(req, issue_flags, ret, 0);
31bff9a5 3931 }
067524e9
JA
3932 return 0;
3933}
3934
ddf0322d
JA
3935static int io_provide_buffers_prep(struct io_kiocb *req,
3936 const struct io_uring_sqe *sqe)
3937{
3938 struct io_provide_buf *p = &req->pbuf;
3939 u64 tmp;
3940
3941 if (sqe->ioprio || sqe->rw_flags)
3942 return -EINVAL;
3943
3944 tmp = READ_ONCE(sqe->fd);
3945 if (!tmp || tmp > USHRT_MAX)
3946 return -E2BIG;
3947 p->nbufs = tmp;
3948 p->addr = READ_ONCE(sqe->addr);
3949 p->len = READ_ONCE(sqe->len);
3950
efe68c1c 3951 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
ddf0322d
JA
3952 return -EFAULT;
3953
3954 p->bgid = READ_ONCE(sqe->buf_group);
3955 tmp = READ_ONCE(sqe->off);
3956 if (tmp > USHRT_MAX)
3957 return -E2BIG;
3958 p->bid = tmp;
3959 return 0;
3960}
3961
3962static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3963{
3964 struct io_buffer *buf;
3965 u64 addr = pbuf->addr;
3966 int i, bid = pbuf->bid;
3967
3968 for (i = 0; i < pbuf->nbufs; i++) {
3969 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3970 if (!buf)
3971 break;
3972
3973 buf->addr = addr;
3974 buf->len = pbuf->len;
3975 buf->bid = bid;
3976 addr += pbuf->len;
3977 bid++;
3978 if (!*head) {
3979 INIT_LIST_HEAD(&buf->list);
3980 *head = buf;
3981 } else {
3982 list_add_tail(&buf->list, &(*head)->list);
3983 }
3984 }
3985
3986 return i ? i : -ENOMEM;
3987}
3988
889fca73 3989static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
ddf0322d
JA
3990{
3991 struct io_provide_buf *p = &req->pbuf;
3992 struct io_ring_ctx *ctx = req->ctx;
3993 struct io_buffer *head, *list;
3994 int ret = 0;
45d189c6 3995 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ddf0322d
JA
3996
3997 io_ring_submit_lock(ctx, !force_nonblock);
3998
3999 lockdep_assert_held(&ctx->uring_lock);
4000
4001 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
4002
4003 ret = io_add_buffers(p, &head);
4004 if (ret < 0)
4005 goto out;
4006
4007 if (!list) {
4008 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
4009 GFP_KERNEL);
4010 if (ret < 0) {
067524e9 4011 __io_remove_buffers(ctx, head, p->bgid, -1U);
ddf0322d
JA
4012 goto out;
4013 }
4014 }
4015out:
ddf0322d
JA
4016 if (ret < 0)
4017 req_set_fail_links(req);
31bff9a5
PB
4018
4019 /* need to hold the lock to complete IOPOLL requests */
4020 if (ctx->flags & IORING_SETUP_IOPOLL) {
889fca73 4021 __io_req_complete(req, issue_flags, ret, 0);
31bff9a5
PB
4022 io_ring_submit_unlock(ctx, !force_nonblock);
4023 } else {
4024 io_ring_submit_unlock(ctx, !force_nonblock);
889fca73 4025 __io_req_complete(req, issue_flags, ret, 0);
31bff9a5 4026 }
ddf0322d 4027 return 0;
cebdb986
JA
4028}
4029
3e4827b0
JA
4030static int io_epoll_ctl_prep(struct io_kiocb *req,
4031 const struct io_uring_sqe *sqe)
4032{
4033#if defined(CONFIG_EPOLL)
4034 if (sqe->ioprio || sqe->buf_index)
4035 return -EINVAL;
6ca56f84 4036 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
3232dd02 4037 return -EINVAL;
3e4827b0
JA
4038
4039 req->epoll.epfd = READ_ONCE(sqe->fd);
4040 req->epoll.op = READ_ONCE(sqe->len);
4041 req->epoll.fd = READ_ONCE(sqe->off);
4042
4043 if (ep_op_has_event(req->epoll.op)) {
4044 struct epoll_event __user *ev;
4045
4046 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4047 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4048 return -EFAULT;
4049 }
4050
4051 return 0;
4052#else
4053 return -EOPNOTSUPP;
4054#endif
4055}
4056
889fca73 4057static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
3e4827b0
JA
4058{
4059#if defined(CONFIG_EPOLL)
4060 struct io_epoll *ie = &req->epoll;
4061 int ret;
45d189c6 4062 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3e4827b0
JA
4063
4064 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4065 if (force_nonblock && ret == -EAGAIN)
4066 return -EAGAIN;
4067
4068 if (ret < 0)
4069 req_set_fail_links(req);
889fca73 4070 __io_req_complete(req, issue_flags, ret, 0);
3e4827b0
JA
4071 return 0;
4072#else
4073 return -EOPNOTSUPP;
4074#endif
4075}
4076
c1ca757b
JA
4077static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4078{
4079#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4080 if (sqe->ioprio || sqe->buf_index || sqe->off)
4081 return -EINVAL;
3232dd02
PB
4082 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4083 return -EINVAL;
c1ca757b
JA
4084
4085 req->madvise.addr = READ_ONCE(sqe->addr);
4086 req->madvise.len = READ_ONCE(sqe->len);
4087 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4088 return 0;
4089#else
4090 return -EOPNOTSUPP;
4091#endif
4092}
4093
45d189c6 4094static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
c1ca757b
JA
4095{
4096#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4097 struct io_madvise *ma = &req->madvise;
4098 int ret;
4099
45d189c6 4100 if (issue_flags & IO_URING_F_NONBLOCK)
c1ca757b
JA
4101 return -EAGAIN;
4102
0726b01e 4103 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
c1ca757b
JA
4104 if (ret < 0)
4105 req_set_fail_links(req);
e1e16097 4106 io_req_complete(req, ret);
c1ca757b
JA
4107 return 0;
4108#else
4109 return -EOPNOTSUPP;
4110#endif
4111}
4112
4840e418
JA
4113static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4114{
4115 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4116 return -EINVAL;
3232dd02
PB
4117 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4118 return -EINVAL;
4840e418
JA
4119
4120 req->fadvise.offset = READ_ONCE(sqe->off);
4121 req->fadvise.len = READ_ONCE(sqe->len);
4122 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4123 return 0;
4124}
4125
45d189c6 4126static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4840e418
JA
4127{
4128 struct io_fadvise *fa = &req->fadvise;
4129 int ret;
4130
45d189c6 4131 if (issue_flags & IO_URING_F_NONBLOCK) {
3e69426d
JA
4132 switch (fa->advice) {
4133 case POSIX_FADV_NORMAL:
4134 case POSIX_FADV_RANDOM:
4135 case POSIX_FADV_SEQUENTIAL:
4136 break;
4137 default:
4138 return -EAGAIN;
4139 }
4140 }
4840e418
JA
4141
4142 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4143 if (ret < 0)
4144 req_set_fail_links(req);
e1e16097 4145 io_req_complete(req, ret);
4840e418
JA
4146 return 0;
4147}
4148
eddc7ef5
JA
4149static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4150{
6ca56f84 4151 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
3232dd02 4152 return -EINVAL;
eddc7ef5
JA
4153 if (sqe->ioprio || sqe->buf_index)
4154 return -EINVAL;
9c280f90 4155 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 4156 return -EBADF;
eddc7ef5 4157
1d9e1288
BM
4158 req->statx.dfd = READ_ONCE(sqe->fd);
4159 req->statx.mask = READ_ONCE(sqe->len);
e62753e4 4160 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
4161 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4162 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5
JA
4163
4164 return 0;
4165}
4166
45d189c6 4167static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
eddc7ef5 4168{
1d9e1288 4169 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
4170 int ret;
4171
45d189c6 4172 if (issue_flags & IO_URING_F_NONBLOCK) {
5b0bbee4
JA
4173 /* only need file table for an actual valid fd */
4174 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
4175 req->flags |= REQ_F_NO_FILE_TABLE;
eddc7ef5 4176 return -EAGAIN;
5b0bbee4 4177 }
eddc7ef5 4178
e62753e4
BM
4179 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4180 ctx->buffer);
eddc7ef5 4181
eddc7ef5
JA
4182 if (ret < 0)
4183 req_set_fail_links(req);
e1e16097 4184 io_req_complete(req, ret);
eddc7ef5
JA
4185 return 0;
4186}
4187
b5dba59e
JA
4188static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4189{
14587a46 4190 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 4191 return -EINVAL;
b5dba59e
JA
4192 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4193 sqe->rw_flags || sqe->buf_index)
4194 return -EINVAL;
9c280f90 4195 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 4196 return -EBADF;
b5dba59e
JA
4197
4198 req->close.fd = READ_ONCE(sqe->fd);
b5dba59e 4199 return 0;
b5dba59e
JA
4200}
4201
889fca73 4202static int io_close(struct io_kiocb *req, unsigned int issue_flags)
b5dba59e 4203{
9eac1904 4204 struct files_struct *files = current->files;
3af73b28 4205 struct io_close *close = &req->close;
9eac1904
JA
4206 struct fdtable *fdt;
4207 struct file *file;
b5dba59e
JA
4208 int ret;
4209
9eac1904
JA
4210 file = NULL;
4211 ret = -EBADF;
4212 spin_lock(&files->file_lock);
4213 fdt = files_fdtable(files);
4214 if (close->fd >= fdt->max_fds) {
4215 spin_unlock(&files->file_lock);
4216 goto err;
4217 }
4218 file = fdt->fd[close->fd];
4219 if (!file) {
4220 spin_unlock(&files->file_lock);
4221 goto err;
4222 }
4223
4224 if (file->f_op == &io_uring_fops) {
4225 spin_unlock(&files->file_lock);
4226 file = NULL;
4227 goto err;
3af73b28 4228 }
b5dba59e
JA
4229
4230 /* if the file has a flush method, be safe and punt to async */
45d189c6 4231 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
9eac1904 4232 spin_unlock(&files->file_lock);
0bf0eefd 4233 return -EAGAIN;
a2100672 4234 }
b5dba59e 4235
9eac1904
JA
4236 ret = __close_fd_get_file(close->fd, &file);
4237 spin_unlock(&files->file_lock);
4238 if (ret < 0) {
4239 if (ret == -ENOENT)
4240 ret = -EBADF;
4241 goto err;
4242 }
4243
3af73b28 4244 /* No ->flush() or already async, safely close from here */
9eac1904
JA
4245 ret = filp_close(file, current->files);
4246err:
3af73b28
PB
4247 if (ret < 0)
4248 req_set_fail_links(req);
9eac1904
JA
4249 if (file)
4250 fput(file);
889fca73 4251 __io_req_complete(req, issue_flags, ret, 0);
1a417f4e 4252 return 0;
b5dba59e
JA
4253}
4254
1155c76a 4255static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
4256{
4257 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4 4258
5d17b4a4
JA
4259 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4260 return -EINVAL;
4261 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4262 return -EINVAL;
4263
8ed8d3c3
JA
4264 req->sync.off = READ_ONCE(sqe->off);
4265 req->sync.len = READ_ONCE(sqe->len);
4266 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
4267 return 0;
4268}
4269
45d189c6 4270static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 4271{
8ed8d3c3
JA
4272 int ret;
4273
ac45abc0 4274 /* sync_file_range always requires a blocking context */
45d189c6 4275 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
4276 return -EAGAIN;
4277
9adbd45d 4278 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
4279 req->sync.flags);
4280 if (ret < 0)
4281 req_set_fail_links(req);
e1e16097 4282 io_req_complete(req, ret);
5d17b4a4
JA
4283 return 0;
4284}
4285
469956e8 4286#if defined(CONFIG_NET)
02d27d89
PB
4287static int io_setup_async_msg(struct io_kiocb *req,
4288 struct io_async_msghdr *kmsg)
4289{
e8c2bc1f
JA
4290 struct io_async_msghdr *async_msg = req->async_data;
4291
4292 if (async_msg)
02d27d89 4293 return -EAGAIN;
e8c2bc1f 4294 if (io_alloc_async_data(req)) {
257e84a5 4295 kfree(kmsg->free_iov);
02d27d89
PB
4296 return -ENOMEM;
4297 }
e8c2bc1f 4298 async_msg = req->async_data;
02d27d89 4299 req->flags |= REQ_F_NEED_CLEANUP;
e8c2bc1f 4300 memcpy(async_msg, kmsg, sizeof(*kmsg));
2a780802 4301 async_msg->msg.msg_name = &async_msg->addr;
257e84a5
PB
4302 /* if were using fast_iov, set it to the new one */
4303 if (!async_msg->free_iov)
4304 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4305
02d27d89
PB
4306 return -EAGAIN;
4307}
4308
2ae523ed
PB
4309static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4310 struct io_async_msghdr *iomsg)
4311{
2ae523ed 4312 iomsg->msg.msg_name = &iomsg->addr;
257e84a5 4313 iomsg->free_iov = iomsg->fast_iov;
2ae523ed 4314 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
257e84a5 4315 req->sr_msg.msg_flags, &iomsg->free_iov);
2ae523ed
PB
4316}
4317
93642ef8
PB
4318static int io_sendmsg_prep_async(struct io_kiocb *req)
4319{
4320 int ret;
4321
4322 if (!io_op_defs[req->opcode].needs_async_data)
4323 return 0;
4324 ret = io_sendmsg_copy_hdr(req, req->async_data);
4325 if (!ret)
4326 req->flags |= REQ_F_NEED_CLEANUP;
4327 return ret;
4328}
4329
3529d8c2 4330static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 4331{
e47293fd 4332 struct io_sr_msg *sr = &req->sr_msg;
03b1230c 4333
d2b6f48b
PB
4334 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4335 return -EINVAL;
4336
e47293fd 4337 sr->msg_flags = READ_ONCE(sqe->msg_flags);
270a5940 4338 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 4339 sr->len = READ_ONCE(sqe->len);
3529d8c2 4340
d8768362
JA
4341#ifdef CONFIG_COMPAT
4342 if (req->ctx->compat)
4343 sr->msg_flags |= MSG_CMSG_COMPAT;
4344#endif
93642ef8 4345 return 0;
03b1230c
JA
4346}
4347
889fca73 4348static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 4349{
6b754c8b 4350 struct io_async_msghdr iomsg, *kmsg;
0fa03c62 4351 struct socket *sock;
7a7cacba 4352 unsigned flags;
0fa03c62
JA
4353 int ret;
4354
dba4a925 4355 sock = sock_from_file(req->file);
7a7cacba 4356 if (unlikely(!sock))
dba4a925 4357 return -ENOTSOCK;
3529d8c2 4358
257e84a5
PB
4359 kmsg = req->async_data;
4360 if (!kmsg) {
7a7cacba
PB
4361 ret = io_sendmsg_copy_hdr(req, &iomsg);
4362 if (ret)
4363 return ret;
4364 kmsg = &iomsg;
0fa03c62 4365 }
0fa03c62 4366
7a7cacba
PB
4367 flags = req->sr_msg.msg_flags;
4368 if (flags & MSG_DONTWAIT)
4369 req->flags |= REQ_F_NOWAIT;
45d189c6 4370 else if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 4371 flags |= MSG_DONTWAIT;
e47293fd 4372
7a7cacba 4373 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
45d189c6 4374 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
7a7cacba
PB
4375 return io_setup_async_msg(req, kmsg);
4376 if (ret == -ERESTARTSYS)
4377 ret = -EINTR;
0fa03c62 4378
257e84a5
PB
4379 /* fast path, check for non-NULL to avoid function call */
4380 if (kmsg->free_iov)
4381 kfree(kmsg->free_iov);
99bc4c38 4382 req->flags &= ~REQ_F_NEED_CLEANUP;
4e88d6e7
JA
4383 if (ret < 0)
4384 req_set_fail_links(req);
889fca73 4385 __io_req_complete(req, issue_flags, ret, 0);
5d17b4a4 4386 return 0;
03b1230c 4387}
aa1fa28f 4388
889fca73 4389static int io_send(struct io_kiocb *req, unsigned int issue_flags)
fddaface 4390{
7a7cacba
PB
4391 struct io_sr_msg *sr = &req->sr_msg;
4392 struct msghdr msg;
4393 struct iovec iov;
fddaface 4394 struct socket *sock;
7a7cacba 4395 unsigned flags;
fddaface
JA
4396 int ret;
4397
dba4a925 4398 sock = sock_from_file(req->file);
7a7cacba 4399 if (unlikely(!sock))
dba4a925 4400 return -ENOTSOCK;
fddaface 4401
7a7cacba
PB
4402 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4403 if (unlikely(ret))
14db8411 4404 return ret;
fddaface 4405
7a7cacba
PB
4406 msg.msg_name = NULL;
4407 msg.msg_control = NULL;
4408 msg.msg_controllen = 0;
4409 msg.msg_namelen = 0;
fddaface 4410
7a7cacba
PB
4411 flags = req->sr_msg.msg_flags;
4412 if (flags & MSG_DONTWAIT)
4413 req->flags |= REQ_F_NOWAIT;
45d189c6 4414 else if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 4415 flags |= MSG_DONTWAIT;
fddaface 4416
7a7cacba
PB
4417 msg.msg_flags = flags;
4418 ret = sock_sendmsg(sock, &msg);
45d189c6 4419 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
7a7cacba
PB
4420 return -EAGAIN;
4421 if (ret == -ERESTARTSYS)
4422 ret = -EINTR;
fddaface 4423
fddaface
JA
4424 if (ret < 0)
4425 req_set_fail_links(req);
889fca73 4426 __io_req_complete(req, issue_flags, ret, 0);
fddaface 4427 return 0;
fddaface
JA
4428}
4429
1400e697
PB
4430static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4431 struct io_async_msghdr *iomsg)
52de1fe1
JA
4432{
4433 struct io_sr_msg *sr = &req->sr_msg;
4434 struct iovec __user *uiov;
4435 size_t iov_len;
4436 int ret;
4437
1400e697
PB
4438 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4439 &iomsg->uaddr, &uiov, &iov_len);
52de1fe1
JA
4440 if (ret)
4441 return ret;
4442
4443 if (req->flags & REQ_F_BUFFER_SELECT) {
4444 if (iov_len > 1)
4445 return -EINVAL;
5476dfed 4446 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
52de1fe1 4447 return -EFAULT;
5476dfed 4448 sr->len = iomsg->fast_iov[0].iov_len;
257e84a5 4449 iomsg->free_iov = NULL;
52de1fe1 4450 } else {
257e84a5 4451 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 4452 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
257e84a5 4453 &iomsg->free_iov, &iomsg->msg.msg_iter,
89cd35c5 4454 false);
52de1fe1
JA
4455 if (ret > 0)
4456 ret = 0;
4457 }
4458
4459 return ret;
4460}
4461
4462#ifdef CONFIG_COMPAT
4463static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
1400e697 4464 struct io_async_msghdr *iomsg)
52de1fe1
JA
4465{
4466 struct compat_msghdr __user *msg_compat;
4467 struct io_sr_msg *sr = &req->sr_msg;
4468 struct compat_iovec __user *uiov;
4469 compat_uptr_t ptr;
4470 compat_size_t len;
4471 int ret;
4472
270a5940 4473 msg_compat = (struct compat_msghdr __user *) sr->umsg;
1400e697 4474 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
52de1fe1
JA
4475 &ptr, &len);
4476 if (ret)
4477 return ret;
4478
4479 uiov = compat_ptr(ptr);
4480 if (req->flags & REQ_F_BUFFER_SELECT) {
4481 compat_ssize_t clen;
4482
4483 if (len > 1)
4484 return -EINVAL;
4485 if (!access_ok(uiov, sizeof(*uiov)))
4486 return -EFAULT;
4487 if (__get_user(clen, &uiov->iov_len))
4488 return -EFAULT;
4489 if (clen < 0)
4490 return -EINVAL;
2d280bc8 4491 sr->len = clen;
257e84a5 4492 iomsg->free_iov = NULL;
52de1fe1 4493 } else {
257e84a5 4494 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 4495 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
257e84a5 4496 UIO_FASTIOV, &iomsg->free_iov,
89cd35c5 4497 &iomsg->msg.msg_iter, true);
52de1fe1
JA
4498 if (ret < 0)
4499 return ret;
4500 }
4501
4502 return 0;
4503}
4504#endif
4505
1400e697
PB
4506static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4507 struct io_async_msghdr *iomsg)
52de1fe1 4508{
1400e697 4509 iomsg->msg.msg_name = &iomsg->addr;
52de1fe1
JA
4510
4511#ifdef CONFIG_COMPAT
4512 if (req->ctx->compat)
1400e697 4513 return __io_compat_recvmsg_copy_hdr(req, iomsg);
fddaface 4514#endif
52de1fe1 4515
1400e697 4516 return __io_recvmsg_copy_hdr(req, iomsg);
52de1fe1
JA
4517}
4518
bcda7baa 4519static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
7fbb1b54 4520 bool needs_lock)
bcda7baa
JA
4521{
4522 struct io_sr_msg *sr = &req->sr_msg;
4523 struct io_buffer *kbuf;
4524
bcda7baa
JA
4525 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4526 if (IS_ERR(kbuf))
4527 return kbuf;
4528
4529 sr->kbuf = kbuf;
4530 req->flags |= REQ_F_BUFFER_SELECTED;
bcda7baa 4531 return kbuf;
fddaface
JA
4532}
4533
7fbb1b54
PB
4534static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4535{
4536 return io_put_kbuf(req, req->sr_msg.kbuf);
4537}
4538
93642ef8 4539static int io_recvmsg_prep_async(struct io_kiocb *req)
aa1fa28f 4540{
99bc4c38 4541 int ret;
3529d8c2 4542
93642ef8
PB
4543 if (!io_op_defs[req->opcode].needs_async_data)
4544 return 0;
4545 ret = io_recvmsg_copy_hdr(req, req->async_data);
4546 if (!ret)
4547 req->flags |= REQ_F_NEED_CLEANUP;
4548 return ret;
4549}
4550
4551static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4552{
4553 struct io_sr_msg *sr = &req->sr_msg;
4554
d2b6f48b
PB
4555 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4556 return -EINVAL;
4557
3529d8c2 4558 sr->msg_flags = READ_ONCE(sqe->msg_flags);
270a5940 4559 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 4560 sr->len = READ_ONCE(sqe->len);
bcda7baa 4561 sr->bgid = READ_ONCE(sqe->buf_group);
06b76d44 4562
d8768362
JA
4563#ifdef CONFIG_COMPAT
4564 if (req->ctx->compat)
4565 sr->msg_flags |= MSG_CMSG_COMPAT;
4566#endif
93642ef8 4567 return 0;
aa1fa28f
JA
4568}
4569
889fca73 4570static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 4571{
6b754c8b 4572 struct io_async_msghdr iomsg, *kmsg;
03b1230c 4573 struct socket *sock;
7fbb1b54 4574 struct io_buffer *kbuf;
7a7cacba 4575 unsigned flags;
52de1fe1 4576 int ret, cflags = 0;
45d189c6 4577 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
03b1230c 4578
dba4a925 4579 sock = sock_from_file(req->file);
7a7cacba 4580 if (unlikely(!sock))
dba4a925 4581 return -ENOTSOCK;
3529d8c2 4582
257e84a5
PB
4583 kmsg = req->async_data;
4584 if (!kmsg) {
7a7cacba
PB
4585 ret = io_recvmsg_copy_hdr(req, &iomsg);
4586 if (ret)
681fda8d 4587 return ret;
7a7cacba
PB
4588 kmsg = &iomsg;
4589 }
03b1230c 4590
bc02ef33 4591 if (req->flags & REQ_F_BUFFER_SELECT) {
7fbb1b54 4592 kbuf = io_recv_buffer_select(req, !force_nonblock);
bc02ef33 4593 if (IS_ERR(kbuf))
52de1fe1 4594 return PTR_ERR(kbuf);
7a7cacba 4595 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
5476dfed
PB
4596 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4597 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
7a7cacba
PB
4598 1, req->sr_msg.len);
4599 }
52de1fe1 4600
7a7cacba
PB
4601 flags = req->sr_msg.msg_flags;
4602 if (flags & MSG_DONTWAIT)
4603 req->flags |= REQ_F_NOWAIT;
4604 else if (force_nonblock)
4605 flags |= MSG_DONTWAIT;
e47293fd 4606
7a7cacba
PB
4607 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4608 kmsg->uaddr, flags);
0e1b6fe3
PB
4609 if (force_nonblock && ret == -EAGAIN)
4610 return io_setup_async_msg(req, kmsg);
7a7cacba
PB
4611 if (ret == -ERESTARTSYS)
4612 ret = -EINTR;
03b1230c 4613
7fbb1b54
PB
4614 if (req->flags & REQ_F_BUFFER_SELECTED)
4615 cflags = io_put_recv_kbuf(req);
257e84a5
PB
4616 /* fast path, check for non-NULL to avoid function call */
4617 if (kmsg->free_iov)
4618 kfree(kmsg->free_iov);
99bc4c38 4619 req->flags &= ~REQ_F_NEED_CLEANUP;
4e88d6e7
JA
4620 if (ret < 0)
4621 req_set_fail_links(req);
889fca73 4622 __io_req_complete(req, issue_flags, ret, cflags);
03b1230c 4623 return 0;
0fa03c62 4624}
5d17b4a4 4625
889fca73 4626static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
fddaface 4627{
6b754c8b 4628 struct io_buffer *kbuf;
7a7cacba
PB
4629 struct io_sr_msg *sr = &req->sr_msg;
4630 struct msghdr msg;
4631 void __user *buf = sr->buf;
fddaface 4632 struct socket *sock;
7a7cacba
PB
4633 struct iovec iov;
4634 unsigned flags;
bcda7baa 4635 int ret, cflags = 0;
45d189c6 4636 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
fddaface 4637
dba4a925 4638 sock = sock_from_file(req->file);
7a7cacba 4639 if (unlikely(!sock))
dba4a925 4640 return -ENOTSOCK;
fddaface 4641
bc02ef33 4642 if (req->flags & REQ_F_BUFFER_SELECT) {
7fbb1b54 4643 kbuf = io_recv_buffer_select(req, !force_nonblock);
bcda7baa
JA
4644 if (IS_ERR(kbuf))
4645 return PTR_ERR(kbuf);
7a7cacba 4646 buf = u64_to_user_ptr(kbuf->addr);
bc02ef33 4647 }
bcda7baa 4648
7a7cacba 4649 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
14c32eee
PB
4650 if (unlikely(ret))
4651 goto out_free;
fddaface 4652
7a7cacba
PB
4653 msg.msg_name = NULL;
4654 msg.msg_control = NULL;
4655 msg.msg_controllen = 0;
4656 msg.msg_namelen = 0;
4657 msg.msg_iocb = NULL;
4658 msg.msg_flags = 0;
fddaface 4659
7a7cacba
PB
4660 flags = req->sr_msg.msg_flags;
4661 if (flags & MSG_DONTWAIT)
4662 req->flags |= REQ_F_NOWAIT;
4663 else if (force_nonblock)
4664 flags |= MSG_DONTWAIT;
4665
4666 ret = sock_recvmsg(sock, &msg, flags);
4667 if (force_nonblock && ret == -EAGAIN)
4668 return -EAGAIN;
4669 if (ret == -ERESTARTSYS)
4670 ret = -EINTR;
14c32eee 4671out_free:
7fbb1b54
PB
4672 if (req->flags & REQ_F_BUFFER_SELECTED)
4673 cflags = io_put_recv_kbuf(req);
fddaface
JA
4674 if (ret < 0)
4675 req_set_fail_links(req);
889fca73 4676 __io_req_complete(req, issue_flags, ret, cflags);
fddaface 4677 return 0;
fddaface
JA
4678}
4679
3529d8c2 4680static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 4681{
8ed8d3c3
JA
4682 struct io_accept *accept = &req->accept;
4683
14587a46 4684 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
17f2fe35 4685 return -EINVAL;
8042d6ce 4686 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
4687 return -EINVAL;
4688
d55e5f5b
JA
4689 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4690 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 4691 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 4692 accept->nofile = rlimit(RLIMIT_NOFILE);
8ed8d3c3 4693 return 0;
8ed8d3c3 4694}
17f2fe35 4695
889fca73 4696static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3
JA
4697{
4698 struct io_accept *accept = &req->accept;
45d189c6 4699 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ac45abc0 4700 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
8ed8d3c3
JA
4701 int ret;
4702
e697deed
JX
4703 if (req->file->f_flags & O_NONBLOCK)
4704 req->flags |= REQ_F_NOWAIT;
4705
8ed8d3c3 4706 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
09952e3e
JA
4707 accept->addr_len, accept->flags,
4708 accept->nofile);
8ed8d3c3 4709 if (ret == -EAGAIN && force_nonblock)
17f2fe35 4710 return -EAGAIN;
ac45abc0
PB
4711 if (ret < 0) {
4712 if (ret == -ERESTARTSYS)
4713 ret = -EINTR;
4e88d6e7 4714 req_set_fail_links(req);
ac45abc0 4715 }
889fca73 4716 __io_req_complete(req, issue_flags, ret, 0);
17f2fe35 4717 return 0;
8ed8d3c3
JA
4718}
4719
93642ef8
PB
4720static int io_connect_prep_async(struct io_kiocb *req)
4721{
4722 struct io_async_connect *io = req->async_data;
4723 struct io_connect *conn = &req->connect;
4724
4725 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4726}
4727
3529d8c2 4728static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 4729{
3529d8c2 4730 struct io_connect *conn = &req->connect;
f499a021 4731
14587a46 4732 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3fbb51c1
JA
4733 return -EINVAL;
4734 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4735 return -EINVAL;
4736
3529d8c2
JA
4737 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4738 conn->addr_len = READ_ONCE(sqe->addr2);
93642ef8 4739 return 0;
f499a021
JA
4740}
4741
889fca73 4742static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
f8e85cf2 4743{
e8c2bc1f 4744 struct io_async_connect __io, *io;
f8e85cf2 4745 unsigned file_flags;
3fbb51c1 4746 int ret;
45d189c6 4747 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
f8e85cf2 4748
e8c2bc1f
JA
4749 if (req->async_data) {
4750 io = req->async_data;
f499a021 4751 } else {
3529d8c2
JA
4752 ret = move_addr_to_kernel(req->connect.addr,
4753 req->connect.addr_len,
e8c2bc1f 4754 &__io.address);
f499a021
JA
4755 if (ret)
4756 goto out;
4757 io = &__io;
4758 }
4759
3fbb51c1
JA
4760 file_flags = force_nonblock ? O_NONBLOCK : 0;
4761
e8c2bc1f 4762 ret = __sys_connect_file(req->file, &io->address,
3fbb51c1 4763 req->connect.addr_len, file_flags);
87f80d62 4764 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
e8c2bc1f 4765 if (req->async_data)
b7bb4f7d 4766 return -EAGAIN;
e8c2bc1f 4767 if (io_alloc_async_data(req)) {
f499a021
JA
4768 ret = -ENOMEM;
4769 goto out;
4770 }
e8c2bc1f
JA
4771 io = req->async_data;
4772 memcpy(req->async_data, &__io, sizeof(__io));
f8e85cf2 4773 return -EAGAIN;
f499a021 4774 }
f8e85cf2
JA
4775 if (ret == -ERESTARTSYS)
4776 ret = -EINTR;
f499a021 4777out:
4e88d6e7
JA
4778 if (ret < 0)
4779 req_set_fail_links(req);
889fca73 4780 __io_req_complete(req, issue_flags, ret, 0);
f8e85cf2 4781 return 0;
469956e8
Y
4782}
4783#else /* !CONFIG_NET */
99a10081
JA
4784#define IO_NETOP_FN(op) \
4785static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
4786{ \
4787 return -EOPNOTSUPP; \
4788}
4789
4790#define IO_NETOP_PREP(op) \
4791IO_NETOP_FN(op) \
4792static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4793{ \
4794 return -EOPNOTSUPP; \
4795} \
4796
4797#define IO_NETOP_PREP_ASYNC(op) \
4798IO_NETOP_PREP(op) \
4799static int io_##op##_prep_async(struct io_kiocb *req) \
4800{ \
4801 return -EOPNOTSUPP; \
4802}
4803
4804IO_NETOP_PREP_ASYNC(sendmsg);
4805IO_NETOP_PREP_ASYNC(recvmsg);
4806IO_NETOP_PREP_ASYNC(connect);
4807IO_NETOP_PREP(accept);
4808IO_NETOP_FN(send);
4809IO_NETOP_FN(recv);
469956e8 4810#endif /* CONFIG_NET */
f8e85cf2 4811
d7718a9d
JA
4812struct io_poll_table {
4813 struct poll_table_struct pt;
4814 struct io_kiocb *req;
4815 int error;
4816};
ce593a6c 4817
d7718a9d
JA
4818static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4819 __poll_t mask, task_work_func_t func)
4820{
aa96bf8a 4821 int ret;
d7718a9d
JA
4822
4823 /* for instances that support it check for an event match first: */
4824 if (mask && !(mask & poll->events))
4825 return 0;
4826
4827 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4828
4829 list_del_init(&poll->wait.entry);
4830
d7718a9d 4831 req->result = mask;
7cbf1722 4832 req->task_work.func = func;
6d816e08
JA
4833 percpu_ref_get(&req->ctx->refs);
4834
d7718a9d 4835 /*
e3aabf95
JA
4836 * If this fails, then the task is exiting. When a task exits, the
4837 * work gets canceled, so just cancel this request as well instead
4838 * of executing it. We can't safely execute it anyway, as we may not
4839 * have the needed state needed for it anyway.
d7718a9d 4840 */
355fb9e2 4841 ret = io_req_task_work_add(req);
aa96bf8a 4842 if (unlikely(ret)) {
e3aabf95 4843 WRITE_ONCE(poll->canceled, true);
eab30c4d 4844 io_req_task_work_add_fallback(req, func);
aa96bf8a 4845 }
d7718a9d
JA
4846 return 1;
4847}
4848
74ce6ce4
JA
4849static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4850 __acquires(&req->ctx->completion_lock)
4851{
4852 struct io_ring_ctx *ctx = req->ctx;
4853
4854 if (!req->result && !READ_ONCE(poll->canceled)) {
4855 struct poll_table_struct pt = { ._key = poll->events };
4856
4857 req->result = vfs_poll(req->file, &pt) & poll->events;
4858 }
4859
4860 spin_lock_irq(&ctx->completion_lock);
4861 if (!req->result && !READ_ONCE(poll->canceled)) {
4862 add_wait_queue(poll->head, &poll->wait);
4863 return true;
4864 }
4865
4866 return false;
4867}
4868
d4e7cd36 4869static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
18bceab1 4870{
e8c2bc1f 4871 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
d4e7cd36 4872 if (req->opcode == IORING_OP_POLL_ADD)
e8c2bc1f 4873 return req->async_data;
d4e7cd36
JA
4874 return req->apoll->double_poll;
4875}
4876
4877static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4878{
4879 if (req->opcode == IORING_OP_POLL_ADD)
4880 return &req->poll;
4881 return &req->apoll->poll;
4882}
4883
4884static void io_poll_remove_double(struct io_kiocb *req)
4885{
4886 struct io_poll_iocb *poll = io_poll_get_double(req);
18bceab1
JA
4887
4888 lockdep_assert_held(&req->ctx->completion_lock);
4889
4890 if (poll && poll->head) {
4891 struct wait_queue_head *head = poll->head;
4892
4893 spin_lock(&head->lock);
4894 list_del_init(&poll->wait.entry);
4895 if (poll->wait.private)
4896 refcount_dec(&req->refs);
4897 poll->head = NULL;
4898 spin_unlock(&head->lock);
4899 }
4900}
4901
4902static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4903{
4904 struct io_ring_ctx *ctx = req->ctx;
4905
d4e7cd36 4906 io_poll_remove_double(req);
18bceab1
JA
4907 req->poll.done = true;
4908 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4909 io_commit_cqring(ctx);
4910}
4911
dd221f46 4912static void io_poll_task_func(struct callback_head *cb)
18bceab1 4913{
dd221f46 4914 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
18bceab1 4915 struct io_ring_ctx *ctx = req->ctx;
dd221f46 4916 struct io_kiocb *nxt;
18bceab1
JA
4917
4918 if (io_poll_rewait(req, &req->poll)) {
4919 spin_unlock_irq(&ctx->completion_lock);
dd221f46
PB
4920 } else {
4921 hash_del(&req->hash_node);
4922 io_poll_complete(req, req->result, 0);
4923 spin_unlock_irq(&ctx->completion_lock);
18bceab1 4924
dd221f46
PB
4925 nxt = io_put_req_find_next(req);
4926 io_cqring_ev_posted(ctx);
4927 if (nxt)
4928 __io_req_task_submit(nxt);
4929 }
18bceab1 4930
6d816e08 4931 percpu_ref_put(&ctx->refs);
18bceab1
JA
4932}
4933
4934static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4935 int sync, void *key)
4936{
4937 struct io_kiocb *req = wait->private;
d4e7cd36 4938 struct io_poll_iocb *poll = io_poll_get_single(req);
18bceab1
JA
4939 __poll_t mask = key_to_poll(key);
4940
4941 /* for instances that support it check for an event match first: */
4942 if (mask && !(mask & poll->events))
4943 return 0;
4944
8706e04e
JA
4945 list_del_init(&wait->entry);
4946
807abcb0 4947 if (poll && poll->head) {
18bceab1
JA
4948 bool done;
4949
807abcb0
JA
4950 spin_lock(&poll->head->lock);
4951 done = list_empty(&poll->wait.entry);
18bceab1 4952 if (!done)
807abcb0 4953 list_del_init(&poll->wait.entry);
d4e7cd36
JA
4954 /* make sure double remove sees this as being gone */
4955 wait->private = NULL;
807abcb0 4956 spin_unlock(&poll->head->lock);
c8b5e260
JA
4957 if (!done) {
4958 /* use wait func handler, so it matches the rq type */
4959 poll->wait.func(&poll->wait, mode, sync, key);
4960 }
18bceab1
JA
4961 }
4962 refcount_dec(&req->refs);
4963 return 1;
4964}
4965
4966static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4967 wait_queue_func_t wake_func)
4968{
4969 poll->head = NULL;
4970 poll->done = false;
4971 poll->canceled = false;
4972 poll->events = events;
4973 INIT_LIST_HEAD(&poll->wait.entry);
4974 init_waitqueue_func_entry(&poll->wait, wake_func);
4975}
4976
4977static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
807abcb0
JA
4978 struct wait_queue_head *head,
4979 struct io_poll_iocb **poll_ptr)
18bceab1
JA
4980{
4981 struct io_kiocb *req = pt->req;
4982
4983 /*
4984 * If poll->head is already set, it's because the file being polled
4985 * uses multiple waitqueues for poll handling (eg one for read, one
4986 * for write). Setup a separate io_poll_iocb if this happens.
4987 */
4988 if (unlikely(poll->head)) {
58852d4d
PB
4989 struct io_poll_iocb *poll_one = poll;
4990
18bceab1 4991 /* already have a 2nd entry, fail a third attempt */
807abcb0 4992 if (*poll_ptr) {
18bceab1
JA
4993 pt->error = -EINVAL;
4994 return;
4995 }
4996 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
4997 if (!poll) {
4998 pt->error = -ENOMEM;
4999 return;
5000 }
58852d4d 5001 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
18bceab1
JA
5002 refcount_inc(&req->refs);
5003 poll->wait.private = req;
807abcb0 5004 *poll_ptr = poll;
18bceab1
JA
5005 }
5006
5007 pt->error = 0;
5008 poll->head = head;
a31eb4a2
JX
5009
5010 if (poll->events & EPOLLEXCLUSIVE)
5011 add_wait_queue_exclusive(head, &poll->wait);
5012 else
5013 add_wait_queue(head, &poll->wait);
18bceab1
JA
5014}
5015
5016static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5017 struct poll_table_struct *p)
5018{
5019 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
807abcb0 5020 struct async_poll *apoll = pt->req->apoll;
18bceab1 5021
807abcb0 5022 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
18bceab1
JA
5023}
5024
d7718a9d
JA
5025static void io_async_task_func(struct callback_head *cb)
5026{
5027 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5028 struct async_poll *apoll = req->apoll;
5029 struct io_ring_ctx *ctx = req->ctx;
5030
5031 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5032
74ce6ce4 5033 if (io_poll_rewait(req, &apoll->poll)) {
d7718a9d 5034 spin_unlock_irq(&ctx->completion_lock);
6d816e08 5035 percpu_ref_put(&ctx->refs);
74ce6ce4 5036 return;
d7718a9d
JA
5037 }
5038
31067255 5039 /* If req is still hashed, it cannot have been canceled. Don't check. */
0be0b0e3 5040 if (hash_hashed(&req->hash_node))
74ce6ce4 5041 hash_del(&req->hash_node);
2bae047e 5042
d4e7cd36 5043 io_poll_remove_double(req);
74ce6ce4
JA
5044 spin_unlock_irq(&ctx->completion_lock);
5045
0be0b0e3
PB
5046 if (!READ_ONCE(apoll->poll.canceled))
5047 __io_req_task_submit(req);
5048 else
5049 __io_req_task_cancel(req, -ECANCELED);
aa340845 5050
6d816e08 5051 percpu_ref_put(&ctx->refs);
807abcb0 5052 kfree(apoll->double_poll);
31067255 5053 kfree(apoll);
d7718a9d
JA
5054}
5055
5056static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5057 void *key)
5058{
5059 struct io_kiocb *req = wait->private;
5060 struct io_poll_iocb *poll = &req->apoll->poll;
5061
5062 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5063 key_to_poll(key));
5064
5065 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5066}
5067
5068static void io_poll_req_insert(struct io_kiocb *req)
5069{
5070 struct io_ring_ctx *ctx = req->ctx;
5071 struct hlist_head *list;
5072
5073 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5074 hlist_add_head(&req->hash_node, list);
5075}
5076
5077static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5078 struct io_poll_iocb *poll,
5079 struct io_poll_table *ipt, __poll_t mask,
5080 wait_queue_func_t wake_func)
5081 __acquires(&ctx->completion_lock)
5082{
5083 struct io_ring_ctx *ctx = req->ctx;
5084 bool cancel = false;
5085
4d52f338 5086 INIT_HLIST_NODE(&req->hash_node);
18bceab1 5087 io_init_poll_iocb(poll, mask, wake_func);
b90cd197 5088 poll->file = req->file;
18bceab1 5089 poll->wait.private = req;
d7718a9d
JA
5090
5091 ipt->pt._key = mask;
5092 ipt->req = req;
5093 ipt->error = -EINVAL;
5094
d7718a9d
JA
5095 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5096
5097 spin_lock_irq(&ctx->completion_lock);
5098 if (likely(poll->head)) {
5099 spin_lock(&poll->head->lock);
5100 if (unlikely(list_empty(&poll->wait.entry))) {
5101 if (ipt->error)
5102 cancel = true;
5103 ipt->error = 0;
5104 mask = 0;
5105 }
5106 if (mask || ipt->error)
5107 list_del_init(&poll->wait.entry);
5108 else if (cancel)
5109 WRITE_ONCE(poll->canceled, true);
5110 else if (!poll->done) /* actually waiting for an event */
5111 io_poll_req_insert(req);
5112 spin_unlock(&poll->head->lock);
5113 }
5114
5115 return mask;
5116}
5117
5118static bool io_arm_poll_handler(struct io_kiocb *req)
5119{
5120 const struct io_op_def *def = &io_op_defs[req->opcode];
5121 struct io_ring_ctx *ctx = req->ctx;
5122 struct async_poll *apoll;
5123 struct io_poll_table ipt;
5124 __poll_t mask, ret;
9dab14b8 5125 int rw;
d7718a9d
JA
5126
5127 if (!req->file || !file_can_poll(req->file))
5128 return false;
24c74678 5129 if (req->flags & REQ_F_POLLED)
d7718a9d 5130 return false;
9dab14b8
JA
5131 if (def->pollin)
5132 rw = READ;
5133 else if (def->pollout)
5134 rw = WRITE;
5135 else
5136 return false;
5137 /* if we can't nonblock try, then no point in arming a poll handler */
5138 if (!io_file_supports_async(req->file, rw))
d7718a9d
JA
5139 return false;
5140
5141 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5142 if (unlikely(!apoll))
5143 return false;
807abcb0 5144 apoll->double_poll = NULL;
d7718a9d
JA
5145
5146 req->flags |= REQ_F_POLLED;
d7718a9d 5147 req->apoll = apoll;
d7718a9d 5148
8755d97a 5149 mask = 0;
d7718a9d 5150 if (def->pollin)
8755d97a 5151 mask |= POLLIN | POLLRDNORM;
d7718a9d
JA
5152 if (def->pollout)
5153 mask |= POLLOUT | POLLWRNORM;
901341bb
LH
5154
5155 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5156 if ((req->opcode == IORING_OP_RECVMSG) &&
5157 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5158 mask &= ~POLLIN;
5159
d7718a9d
JA
5160 mask |= POLLERR | POLLPRI;
5161
5162 ipt.pt._qproc = io_async_queue_proc;
5163
5164 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5165 io_async_wake);
a36da65c 5166 if (ret || ipt.error) {
d4e7cd36 5167 io_poll_remove_double(req);
d7718a9d 5168 spin_unlock_irq(&ctx->completion_lock);
807abcb0 5169 kfree(apoll->double_poll);
d7718a9d
JA
5170 kfree(apoll);
5171 return false;
5172 }
5173 spin_unlock_irq(&ctx->completion_lock);
5174 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5175 apoll->poll.events);
5176 return true;
5177}
5178
5179static bool __io_poll_remove_one(struct io_kiocb *req,
5180 struct io_poll_iocb *poll)
221c5eb2 5181{
b41e9852 5182 bool do_complete = false;
221c5eb2
JA
5183
5184 spin_lock(&poll->head->lock);
5185 WRITE_ONCE(poll->canceled, true);
392edb45
JA
5186 if (!list_empty(&poll->wait.entry)) {
5187 list_del_init(&poll->wait.entry);
b41e9852 5188 do_complete = true;
221c5eb2
JA
5189 }
5190 spin_unlock(&poll->head->lock);
3bfa5bcb 5191 hash_del(&req->hash_node);
d7718a9d
JA
5192 return do_complete;
5193}
5194
5195static bool io_poll_remove_one(struct io_kiocb *req)
5196{
5197 bool do_complete;
5198
d4e7cd36
JA
5199 io_poll_remove_double(req);
5200
d7718a9d
JA
5201 if (req->opcode == IORING_OP_POLL_ADD) {
5202 do_complete = __io_poll_remove_one(req, &req->poll);
5203 } else {
3bfa5bcb
JA
5204 struct async_poll *apoll = req->apoll;
5205
d7718a9d 5206 /* non-poll requests have submit ref still */
3bfa5bcb
JA
5207 do_complete = __io_poll_remove_one(req, &apoll->poll);
5208 if (do_complete) {
d7718a9d 5209 io_put_req(req);
807abcb0 5210 kfree(apoll->double_poll);
3bfa5bcb
JA
5211 kfree(apoll);
5212 }
b1f573bd
XW
5213 }
5214
b41e9852
JA
5215 if (do_complete) {
5216 io_cqring_fill_event(req, -ECANCELED);
5217 io_commit_cqring(req->ctx);
f254ac04 5218 req_set_fail_links(req);
216578e5 5219 io_put_req_deferred(req, 1);
b41e9852
JA
5220 }
5221
5222 return do_complete;
221c5eb2
JA
5223}
5224
76e1b642
JA
5225/*
5226 * Returns true if we found and killed one or more poll requests
5227 */
6b81928d
PB
5228static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5229 struct files_struct *files)
221c5eb2 5230{
78076bb6 5231 struct hlist_node *tmp;
221c5eb2 5232 struct io_kiocb *req;
8e2e1faf 5233 int posted = 0, i;
221c5eb2
JA
5234
5235 spin_lock_irq(&ctx->completion_lock);
78076bb6
JA
5236 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5237 struct hlist_head *list;
5238
5239 list = &ctx->cancel_hash[i];
f3606e3a 5240 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
6b81928d 5241 if (io_match_task(req, tsk, files))
f3606e3a
JA
5242 posted += io_poll_remove_one(req);
5243 }
221c5eb2
JA
5244 }
5245 spin_unlock_irq(&ctx->completion_lock);
b41e9852 5246
8e2e1faf
JA
5247 if (posted)
5248 io_cqring_ev_posted(ctx);
76e1b642
JA
5249
5250 return posted != 0;
221c5eb2
JA
5251}
5252
47f46768
JA
5253static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5254{
78076bb6 5255 struct hlist_head *list;
47f46768
JA
5256 struct io_kiocb *req;
5257
78076bb6
JA
5258 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5259 hlist_for_each_entry(req, list, hash_node) {
b41e9852
JA
5260 if (sqe_addr != req->user_data)
5261 continue;
5262 if (io_poll_remove_one(req))
eac406c6 5263 return 0;
b41e9852 5264 return -EALREADY;
47f46768
JA
5265 }
5266
5267 return -ENOENT;
5268}
5269
3529d8c2
JA
5270static int io_poll_remove_prep(struct io_kiocb *req,
5271 const struct io_uring_sqe *sqe)
0969e783 5272{
0969e783
JA
5273 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5274 return -EINVAL;
5275 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5276 sqe->poll_events)
5277 return -EINVAL;
5278
018043be 5279 req->poll_remove.addr = READ_ONCE(sqe->addr);
0969e783
JA
5280 return 0;
5281}
5282
221c5eb2
JA
5283/*
5284 * Find a running poll command that matches one specified in sqe->addr,
5285 * and remove it if found.
5286 */
61e98203 5287static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
221c5eb2
JA
5288{
5289 struct io_ring_ctx *ctx = req->ctx;
47f46768 5290 int ret;
221c5eb2 5291
221c5eb2 5292 spin_lock_irq(&ctx->completion_lock);
018043be 5293 ret = io_poll_cancel(ctx, req->poll_remove.addr);
221c5eb2
JA
5294 spin_unlock_irq(&ctx->completion_lock);
5295
4e88d6e7
JA
5296 if (ret < 0)
5297 req_set_fail_links(req);
e1e16097 5298 io_req_complete(req, ret);
221c5eb2
JA
5299 return 0;
5300}
5301
221c5eb2
JA
5302static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5303 void *key)
5304{
c2f2eb7d
JA
5305 struct io_kiocb *req = wait->private;
5306 struct io_poll_iocb *poll = &req->poll;
221c5eb2 5307
d7718a9d 5308 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
221c5eb2
JA
5309}
5310
221c5eb2
JA
5311static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5312 struct poll_table_struct *p)
5313{
5314 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5315
e8c2bc1f 5316 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
eac406c6
JA
5317}
5318
3529d8c2 5319static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
5320{
5321 struct io_poll_iocb *poll = &req->poll;
5769a351 5322 u32 events;
221c5eb2
JA
5323
5324 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5325 return -EINVAL;
5326 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5327 return -EINVAL;
5328
5769a351
JX
5329 events = READ_ONCE(sqe->poll32_events);
5330#ifdef __BIG_ENDIAN
5331 events = swahw32(events);
5332#endif
a31eb4a2
JX
5333 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
5334 (events & EPOLLEXCLUSIVE);
0969e783
JA
5335 return 0;
5336}
5337
61e98203 5338static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
0969e783
JA
5339{
5340 struct io_poll_iocb *poll = &req->poll;
5341 struct io_ring_ctx *ctx = req->ctx;
5342 struct io_poll_table ipt;
0969e783 5343 __poll_t mask;
0969e783 5344
d7718a9d 5345 ipt.pt._qproc = io_poll_queue_proc;
36703247 5346
d7718a9d
JA
5347 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5348 io_poll_wake);
221c5eb2 5349
8c838788 5350 if (mask) { /* no async, we'd stolen it */
221c5eb2 5351 ipt.error = 0;
b0dd8a41 5352 io_poll_complete(req, mask, 0);
221c5eb2 5353 }
221c5eb2
JA
5354 spin_unlock_irq(&ctx->completion_lock);
5355
8c838788
JA
5356 if (mask) {
5357 io_cqring_ev_posted(ctx);
014db007 5358 io_put_req(req);
221c5eb2 5359 }
8c838788 5360 return ipt.error;
221c5eb2
JA
5361}
5362
5262f567
JA
5363static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5364{
ad8a48ac
JA
5365 struct io_timeout_data *data = container_of(timer,
5366 struct io_timeout_data, timer);
5367 struct io_kiocb *req = data->req;
5368 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
5369 unsigned long flags;
5370
5262f567 5371 spin_lock_irqsave(&ctx->completion_lock, flags);
a71976f3 5372 list_del_init(&req->timeout.list);
01cec8c1
PB
5373 atomic_set(&req->ctx->cq_timeouts,
5374 atomic_read(&req->ctx->cq_timeouts) + 1);
5375
78e19bbe 5376 io_cqring_fill_event(req, -ETIME);
5262f567
JA
5377 io_commit_cqring(ctx);
5378 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5379
5380 io_cqring_ev_posted(ctx);
4e88d6e7 5381 req_set_fail_links(req);
5262f567
JA
5382 io_put_req(req);
5383 return HRTIMER_NORESTART;
5384}
5385
fbd15848
PB
5386static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5387 __u64 user_data)
f254ac04 5388{
fbd15848 5389 struct io_timeout_data *io;
47f46768
JA
5390 struct io_kiocb *req;
5391 int ret = -ENOENT;
f254ac04 5392
135fcde8 5393 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
47f46768 5394 if (user_data == req->user_data) {
47f46768
JA
5395 ret = 0;
5396 break;
5397 }
5398 }
5399
5400 if (ret == -ENOENT)
fbd15848
PB
5401 return ERR_PTR(ret);
5402
5403 io = req->async_data;
e8c2bc1f 5404 ret = hrtimer_try_to_cancel(&io->timer);
f254ac04 5405 if (ret == -1)
fbd15848 5406 return ERR_PTR(-EALREADY);
a71976f3 5407 list_del_init(&req->timeout.list);
fbd15848
PB
5408 return req;
5409}
47f46768 5410
fbd15848
PB
5411static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5412{
5413 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5414
5415 if (IS_ERR(req))
5416 return PTR_ERR(req);
f254ac04
JA
5417
5418 req_set_fail_links(req);
f254ac04 5419 io_cqring_fill_event(req, -ECANCELED);
216578e5 5420 io_put_req_deferred(req, 1);
f254ac04
JA
5421 return 0;
5422}
5423
9c8e11b3
PB
5424static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5425 struct timespec64 *ts, enum hrtimer_mode mode)
47f46768 5426{
9c8e11b3
PB
5427 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5428 struct io_timeout_data *data;
47f46768 5429
9c8e11b3
PB
5430 if (IS_ERR(req))
5431 return PTR_ERR(req);
47f46768 5432
9c8e11b3
PB
5433 req->timeout.off = 0; /* noseq */
5434 data = req->async_data;
5435 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5436 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5437 data->timer.function = io_timeout_fn;
5438 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5439 return 0;
47f46768
JA
5440}
5441
3529d8c2
JA
5442static int io_timeout_remove_prep(struct io_kiocb *req,
5443 const struct io_uring_sqe *sqe)
b29472ee 5444{
9c8e11b3
PB
5445 struct io_timeout_rem *tr = &req->timeout_rem;
5446
b29472ee
JA
5447 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5448 return -EINVAL;
61710e43
DA
5449 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5450 return -EINVAL;
9c8e11b3 5451 if (sqe->ioprio || sqe->buf_index || sqe->len)
b29472ee
JA
5452 return -EINVAL;
5453
9c8e11b3
PB
5454 tr->addr = READ_ONCE(sqe->addr);
5455 tr->flags = READ_ONCE(sqe->timeout_flags);
5456 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5457 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5458 return -EINVAL;
5459 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5460 return -EFAULT;
5461 } else if (tr->flags) {
5462 /* timeout removal doesn't support flags */
b29472ee 5463 return -EINVAL;
9c8e11b3 5464 }
b29472ee 5465
b29472ee
JA
5466 return 0;
5467}
5468
8662daec
PB
5469static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5470{
5471 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5472 : HRTIMER_MODE_REL;
5473}
5474
11365043
JA
5475/*
5476 * Remove or update an existing timeout command
5477 */
61e98203 5478static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
11365043 5479{
9c8e11b3 5480 struct io_timeout_rem *tr = &req->timeout_rem;
11365043 5481 struct io_ring_ctx *ctx = req->ctx;
47f46768 5482 int ret;
11365043 5483
11365043 5484 spin_lock_irq(&ctx->completion_lock);
8662daec 5485 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
9c8e11b3 5486 ret = io_timeout_cancel(ctx, tr->addr);
8662daec
PB
5487 else
5488 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5489 io_translate_timeout_mode(tr->flags));
11365043 5490
47f46768 5491 io_cqring_fill_event(req, ret);
11365043
JA
5492 io_commit_cqring(ctx);
5493 spin_unlock_irq(&ctx->completion_lock);
5262f567 5494 io_cqring_ev_posted(ctx);
4e88d6e7
JA
5495 if (ret < 0)
5496 req_set_fail_links(req);
ec9c02ad 5497 io_put_req(req);
11365043 5498 return 0;
5262f567
JA
5499}
5500
3529d8c2 5501static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 5502 bool is_timeout_link)
5262f567 5503{
ad8a48ac 5504 struct io_timeout_data *data;
a41525ab 5505 unsigned flags;
56080b02 5506 u32 off = READ_ONCE(sqe->off);
5262f567 5507
ad8a48ac 5508 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 5509 return -EINVAL;
ad8a48ac 5510 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
a41525ab 5511 return -EINVAL;
56080b02 5512 if (off && is_timeout_link)
2d28390a 5513 return -EINVAL;
a41525ab
JA
5514 flags = READ_ONCE(sqe->timeout_flags);
5515 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 5516 return -EINVAL;
bdf20073 5517
bfe68a22 5518 req->timeout.off = off;
26a61679 5519
e8c2bc1f 5520 if (!req->async_data && io_alloc_async_data(req))
26a61679
JA
5521 return -ENOMEM;
5522
e8c2bc1f 5523 data = req->async_data;
ad8a48ac 5524 data->req = req;
ad8a48ac
JA
5525
5526 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
5527 return -EFAULT;
5528
8662daec 5529 data->mode = io_translate_timeout_mode(flags);
ad8a48ac
JA
5530 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5531 return 0;
5532}
5533
61e98203 5534static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
ad8a48ac 5535{
ad8a48ac 5536 struct io_ring_ctx *ctx = req->ctx;
e8c2bc1f 5537 struct io_timeout_data *data = req->async_data;
ad8a48ac 5538 struct list_head *entry;
bfe68a22 5539 u32 tail, off = req->timeout.off;
ad8a48ac 5540
733f5c95 5541 spin_lock_irq(&ctx->completion_lock);
93bd25bb 5542
5262f567
JA
5543 /*
5544 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
5545 * timeout event to be satisfied. If it isn't set, then this is
5546 * a pure timeout request, sequence isn't used.
5262f567 5547 */
8eb7e2d0 5548 if (io_is_timeout_noseq(req)) {
93bd25bb
JA
5549 entry = ctx->timeout_list.prev;
5550 goto add;
5551 }
5262f567 5552
bfe68a22
PB
5553 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5554 req->timeout.target_seq = tail + off;
5262f567 5555
f010505b
MDG
5556 /* Update the last seq here in case io_flush_timeouts() hasn't.
5557 * This is safe because ->completion_lock is held, and submissions
5558 * and completions are never mixed in the same ->completion_lock section.
5559 */
5560 ctx->cq_last_tm_flush = tail;
5561
5262f567
JA
5562 /*
5563 * Insertion sort, ensuring the first entry in the list is always
5564 * the one we need first.
5565 */
5262f567 5566 list_for_each_prev(entry, &ctx->timeout_list) {
135fcde8
PB
5567 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5568 timeout.list);
5262f567 5569
8eb7e2d0 5570 if (io_is_timeout_noseq(nxt))
93bd25bb 5571 continue;
bfe68a22
PB
5572 /* nxt.seq is behind @tail, otherwise would've been completed */
5573 if (off >= nxt->timeout.target_seq - tail)
5262f567
JA
5574 break;
5575 }
93bd25bb 5576add:
135fcde8 5577 list_add(&req->timeout.list, entry);
ad8a48ac
JA
5578 data->timer.function = io_timeout_fn;
5579 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5262f567 5580 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
5581 return 0;
5582}
5262f567 5583
62755e35
JA
5584static bool io_cancel_cb(struct io_wq_work *work, void *data)
5585{
5586 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5587
5588 return req->user_data == (unsigned long) data;
5589}
5590
5aa75ed5 5591static int io_async_cancel_one(struct io_uring_task *tctx, void *sqe_addr)
62755e35 5592{
62755e35 5593 enum io_wq_cancel cancel_ret;
62755e35
JA
5594 int ret = 0;
5595
5aa75ed5
JA
5596 if (!tctx->io_wq)
5597 return -ENOENT;
5598
5599 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, sqe_addr, false);
62755e35
JA
5600 switch (cancel_ret) {
5601 case IO_WQ_CANCEL_OK:
5602 ret = 0;
5603 break;
5604 case IO_WQ_CANCEL_RUNNING:
5605 ret = -EALREADY;
5606 break;
5607 case IO_WQ_CANCEL_NOTFOUND:
5608 ret = -ENOENT;
5609 break;
5610 }
5611
e977d6d3
JA
5612 return ret;
5613}
5614
47f46768
JA
5615static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5616 struct io_kiocb *req, __u64 sqe_addr,
014db007 5617 int success_ret)
47f46768
JA
5618{
5619 unsigned long flags;
5620 int ret;
5621
5aa75ed5
JA
5622 ret = io_async_cancel_one(req->task->io_uring,
5623 (void *) (unsigned long) sqe_addr);
47f46768
JA
5624 if (ret != -ENOENT) {
5625 spin_lock_irqsave(&ctx->completion_lock, flags);
5626 goto done;
5627 }
5628
5629 spin_lock_irqsave(&ctx->completion_lock, flags);
5630 ret = io_timeout_cancel(ctx, sqe_addr);
5631 if (ret != -ENOENT)
5632 goto done;
5633 ret = io_poll_cancel(ctx, sqe_addr);
5634done:
b0dd8a41
JA
5635 if (!ret)
5636 ret = success_ret;
47f46768
JA
5637 io_cqring_fill_event(req, ret);
5638 io_commit_cqring(ctx);
5639 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5640 io_cqring_ev_posted(ctx);
5641
4e88d6e7
JA
5642 if (ret < 0)
5643 req_set_fail_links(req);
014db007 5644 io_put_req(req);
47f46768
JA
5645}
5646
3529d8c2
JA
5647static int io_async_cancel_prep(struct io_kiocb *req,
5648 const struct io_uring_sqe *sqe)
e977d6d3 5649{
fbf23849 5650 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3 5651 return -EINVAL;
61710e43
DA
5652 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5653 return -EINVAL;
5654 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
e977d6d3
JA
5655 return -EINVAL;
5656
fbf23849
JA
5657 req->cancel.addr = READ_ONCE(sqe->addr);
5658 return 0;
5659}
5660
61e98203 5661static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
fbf23849
JA
5662{
5663 struct io_ring_ctx *ctx = req->ctx;
fbf23849 5664
014db007 5665 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
5262f567
JA
5666 return 0;
5667}
5668
269bbe5f 5669static int io_rsrc_update_prep(struct io_kiocb *req,
05f3fb3c
JA
5670 const struct io_uring_sqe *sqe)
5671{
6ca56f84
JA
5672 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5673 return -EINVAL;
61710e43
DA
5674 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5675 return -EINVAL;
5676 if (sqe->ioprio || sqe->rw_flags)
05f3fb3c
JA
5677 return -EINVAL;
5678
269bbe5f
BM
5679 req->rsrc_update.offset = READ_ONCE(sqe->off);
5680 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5681 if (!req->rsrc_update.nr_args)
05f3fb3c 5682 return -EINVAL;
269bbe5f 5683 req->rsrc_update.arg = READ_ONCE(sqe->addr);
05f3fb3c
JA
5684 return 0;
5685}
5686
889fca73 5687static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
fbf23849
JA
5688{
5689 struct io_ring_ctx *ctx = req->ctx;
269bbe5f 5690 struct io_uring_rsrc_update up;
05f3fb3c 5691 int ret;
fbf23849 5692
45d189c6 5693 if (issue_flags & IO_URING_F_NONBLOCK)
05f3fb3c 5694 return -EAGAIN;
05f3fb3c 5695
269bbe5f
BM
5696 up.offset = req->rsrc_update.offset;
5697 up.data = req->rsrc_update.arg;
05f3fb3c
JA
5698
5699 mutex_lock(&ctx->uring_lock);
269bbe5f 5700 ret = __io_sqe_files_update(ctx, &up, req->rsrc_update.nr_args);
05f3fb3c
JA
5701 mutex_unlock(&ctx->uring_lock);
5702
5703 if (ret < 0)
5704 req_set_fail_links(req);
889fca73 5705 __io_req_complete(req, issue_flags, ret, 0);
5262f567
JA
5706 return 0;
5707}
5708
bfe76559 5709static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 5710{
d625c6ee 5711 switch (req->opcode) {
e781573e 5712 case IORING_OP_NOP:
bfe76559 5713 return 0;
f67676d1
JA
5714 case IORING_OP_READV:
5715 case IORING_OP_READ_FIXED:
3a6820f2 5716 case IORING_OP_READ:
bfe76559 5717 return io_read_prep(req, sqe);
f67676d1
JA
5718 case IORING_OP_WRITEV:
5719 case IORING_OP_WRITE_FIXED:
3a6820f2 5720 case IORING_OP_WRITE:
bfe76559 5721 return io_write_prep(req, sqe);
0969e783 5722 case IORING_OP_POLL_ADD:
bfe76559 5723 return io_poll_add_prep(req, sqe);
0969e783 5724 case IORING_OP_POLL_REMOVE:
bfe76559 5725 return io_poll_remove_prep(req, sqe);
8ed8d3c3 5726 case IORING_OP_FSYNC:
1155c76a 5727 return io_fsync_prep(req, sqe);
8ed8d3c3 5728 case IORING_OP_SYNC_FILE_RANGE:
1155c76a 5729 return io_sfr_prep(req, sqe);
03b1230c 5730 case IORING_OP_SENDMSG:
fddaface 5731 case IORING_OP_SEND:
bfe76559 5732 return io_sendmsg_prep(req, sqe);
03b1230c 5733 case IORING_OP_RECVMSG:
fddaface 5734 case IORING_OP_RECV:
bfe76559 5735 return io_recvmsg_prep(req, sqe);
f499a021 5736 case IORING_OP_CONNECT:
bfe76559 5737 return io_connect_prep(req, sqe);
2d28390a 5738 case IORING_OP_TIMEOUT:
bfe76559 5739 return io_timeout_prep(req, sqe, false);
b29472ee 5740 case IORING_OP_TIMEOUT_REMOVE:
bfe76559 5741 return io_timeout_remove_prep(req, sqe);
fbf23849 5742 case IORING_OP_ASYNC_CANCEL:
bfe76559 5743 return io_async_cancel_prep(req, sqe);
2d28390a 5744 case IORING_OP_LINK_TIMEOUT:
bfe76559 5745 return io_timeout_prep(req, sqe, true);
8ed8d3c3 5746 case IORING_OP_ACCEPT:
bfe76559 5747 return io_accept_prep(req, sqe);
d63d1b5e 5748 case IORING_OP_FALLOCATE:
bfe76559 5749 return io_fallocate_prep(req, sqe);
15b71abe 5750 case IORING_OP_OPENAT:
bfe76559 5751 return io_openat_prep(req, sqe);
b5dba59e 5752 case IORING_OP_CLOSE:
bfe76559 5753 return io_close_prep(req, sqe);
05f3fb3c 5754 case IORING_OP_FILES_UPDATE:
269bbe5f 5755 return io_rsrc_update_prep(req, sqe);
eddc7ef5 5756 case IORING_OP_STATX:
bfe76559 5757 return io_statx_prep(req, sqe);
4840e418 5758 case IORING_OP_FADVISE:
bfe76559 5759 return io_fadvise_prep(req, sqe);
c1ca757b 5760 case IORING_OP_MADVISE:
bfe76559 5761 return io_madvise_prep(req, sqe);
cebdb986 5762 case IORING_OP_OPENAT2:
bfe76559 5763 return io_openat2_prep(req, sqe);
3e4827b0 5764 case IORING_OP_EPOLL_CTL:
bfe76559 5765 return io_epoll_ctl_prep(req, sqe);
7d67af2c 5766 case IORING_OP_SPLICE:
bfe76559 5767 return io_splice_prep(req, sqe);
ddf0322d 5768 case IORING_OP_PROVIDE_BUFFERS:
bfe76559 5769 return io_provide_buffers_prep(req, sqe);
067524e9 5770 case IORING_OP_REMOVE_BUFFERS:
bfe76559 5771 return io_remove_buffers_prep(req, sqe);
f2a8d5c7 5772 case IORING_OP_TEE:
bfe76559 5773 return io_tee_prep(req, sqe);
36f4fa68
JA
5774 case IORING_OP_SHUTDOWN:
5775 return io_shutdown_prep(req, sqe);
80a261fd
JA
5776 case IORING_OP_RENAMEAT:
5777 return io_renameat_prep(req, sqe);
14a1143b
JA
5778 case IORING_OP_UNLINKAT:
5779 return io_unlinkat_prep(req, sqe);
f67676d1
JA
5780 }
5781
bfe76559
PB
5782 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5783 req->opcode);
5784 return-EINVAL;
5785}
5786
93642ef8 5787static int io_req_prep_async(struct io_kiocb *req)
bfe76559 5788{
93642ef8
PB
5789 switch (req->opcode) {
5790 case IORING_OP_READV:
5791 case IORING_OP_READ_FIXED:
5792 case IORING_OP_READ:
5793 return io_rw_prep_async(req, READ);
5794 case IORING_OP_WRITEV:
5795 case IORING_OP_WRITE_FIXED:
5796 case IORING_OP_WRITE:
5797 return io_rw_prep_async(req, WRITE);
5798 case IORING_OP_SENDMSG:
5799 case IORING_OP_SEND:
5800 return io_sendmsg_prep_async(req);
5801 case IORING_OP_RECVMSG:
5802 case IORING_OP_RECV:
5803 return io_recvmsg_prep_async(req);
5804 case IORING_OP_CONNECT:
5805 return io_connect_prep_async(req);
5806 }
5807 return 0;
5808}
5809
be7053b7 5810static int io_req_defer_prep(struct io_kiocb *req)
bfe76559 5811{
be7053b7 5812 if (!io_op_defs[req->opcode].needs_async_data)
bfe76559 5813 return 0;
be7053b7 5814 /* some opcodes init it during the inital prep */
93642ef8 5815 if (req->async_data)
be7053b7
PB
5816 return 0;
5817 if (__io_alloc_async_data(req))
bfe76559 5818 return -EAGAIN;
be7053b7 5819 return io_req_prep_async(req);
f67676d1
JA
5820}
5821
9cf7c104
PB
5822static u32 io_get_sequence(struct io_kiocb *req)
5823{
5824 struct io_kiocb *pos;
5825 struct io_ring_ctx *ctx = req->ctx;
f2f87370 5826 u32 total_submitted, nr_reqs = 0;
9cf7c104 5827
f2f87370
PB
5828 io_for_each_link(pos, req)
5829 nr_reqs++;
9cf7c104
PB
5830
5831 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
5832 return total_submitted - nr_reqs;
5833}
5834
be7053b7 5835static int io_req_defer(struct io_kiocb *req)
de0617e4 5836{
a197f664 5837 struct io_ring_ctx *ctx = req->ctx;
27dc8338 5838 struct io_defer_entry *de;
f67676d1 5839 int ret;
9cf7c104 5840 u32 seq;
de0617e4 5841
9d858b21 5842 /* Still need defer if there is pending req in defer list. */
9cf7c104
PB
5843 if (likely(list_empty_careful(&ctx->defer_list) &&
5844 !(req->flags & REQ_F_IO_DRAIN)))
5845 return 0;
5846
5847 seq = io_get_sequence(req);
5848 /* Still a chance to pass the sequence check */
5849 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
de0617e4
JA
5850 return 0;
5851
be7053b7
PB
5852 ret = io_req_defer_prep(req);
5853 if (ret)
5854 return ret;
cbdcb435 5855 io_prep_async_link(req);
27dc8338
PB
5856 de = kmalloc(sizeof(*de), GFP_KERNEL);
5857 if (!de)
5858 return -ENOMEM;
2d28390a 5859
de0617e4 5860 spin_lock_irq(&ctx->completion_lock);
9cf7c104 5861 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
de0617e4 5862 spin_unlock_irq(&ctx->completion_lock);
27dc8338 5863 kfree(de);
ae34817b
PB
5864 io_queue_async_work(req);
5865 return -EIOCBQUEUED;
de0617e4
JA
5866 }
5867
915967f6 5868 trace_io_uring_defer(ctx, req, req->user_data);
27dc8338 5869 de->req = req;
9cf7c104 5870 de->seq = seq;
27dc8338 5871 list_add_tail(&de->list, &ctx->defer_list);
de0617e4
JA
5872 spin_unlock_irq(&ctx->completion_lock);
5873 return -EIOCBQUEUED;
5874}
5875
3ca405eb 5876static void __io_clean_op(struct io_kiocb *req)
99bc4c38 5877{
0e1b6fe3
PB
5878 if (req->flags & REQ_F_BUFFER_SELECTED) {
5879 switch (req->opcode) {
5880 case IORING_OP_READV:
5881 case IORING_OP_READ_FIXED:
5882 case IORING_OP_READ:
bcda7baa 5883 kfree((void *)(unsigned long)req->rw.addr);
0e1b6fe3
PB
5884 break;
5885 case IORING_OP_RECVMSG:
5886 case IORING_OP_RECV:
bcda7baa 5887 kfree(req->sr_msg.kbuf);
0e1b6fe3
PB
5888 break;
5889 }
5890 req->flags &= ~REQ_F_BUFFER_SELECTED;
99bc4c38
PB
5891 }
5892
0e1b6fe3
PB
5893 if (req->flags & REQ_F_NEED_CLEANUP) {
5894 switch (req->opcode) {
5895 case IORING_OP_READV:
5896 case IORING_OP_READ_FIXED:
5897 case IORING_OP_READ:
5898 case IORING_OP_WRITEV:
5899 case IORING_OP_WRITE_FIXED:
e8c2bc1f
JA
5900 case IORING_OP_WRITE: {
5901 struct io_async_rw *io = req->async_data;
5902 if (io->free_iovec)
5903 kfree(io->free_iovec);
0e1b6fe3 5904 break;
e8c2bc1f 5905 }
0e1b6fe3 5906 case IORING_OP_RECVMSG:
e8c2bc1f
JA
5907 case IORING_OP_SENDMSG: {
5908 struct io_async_msghdr *io = req->async_data;
257e84a5
PB
5909
5910 kfree(io->free_iov);
0e1b6fe3 5911 break;
e8c2bc1f 5912 }
0e1b6fe3
PB
5913 case IORING_OP_SPLICE:
5914 case IORING_OP_TEE:
5915 io_put_file(req, req->splice.file_in,
5916 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
5917 break;
f3cd4850
JA
5918 case IORING_OP_OPENAT:
5919 case IORING_OP_OPENAT2:
5920 if (req->open.filename)
5921 putname(req->open.filename);
5922 break;
80a261fd
JA
5923 case IORING_OP_RENAMEAT:
5924 putname(req->rename.oldpath);
5925 putname(req->rename.newpath);
5926 break;
14a1143b
JA
5927 case IORING_OP_UNLINKAT:
5928 putname(req->unlink.filename);
5929 break;
0e1b6fe3
PB
5930 }
5931 req->flags &= ~REQ_F_NEED_CLEANUP;
99bc4c38 5932 }
99bc4c38
PB
5933}
5934
889fca73 5935static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 5936{
a197f664 5937 struct io_ring_ctx *ctx = req->ctx;
d625c6ee 5938 int ret;
2b188cc1 5939
d625c6ee 5940 switch (req->opcode) {
2b188cc1 5941 case IORING_OP_NOP:
889fca73 5942 ret = io_nop(req, issue_flags);
2b188cc1
JA
5943 break;
5944 case IORING_OP_READV:
edafccee 5945 case IORING_OP_READ_FIXED:
3a6820f2 5946 case IORING_OP_READ:
889fca73 5947 ret = io_read(req, issue_flags);
edafccee 5948 break;
3529d8c2 5949 case IORING_OP_WRITEV:
edafccee 5950 case IORING_OP_WRITE_FIXED:
3a6820f2 5951 case IORING_OP_WRITE:
889fca73 5952 ret = io_write(req, issue_flags);
2b188cc1 5953 break;
c992fe29 5954 case IORING_OP_FSYNC:
45d189c6 5955 ret = io_fsync(req, issue_flags);
c992fe29 5956 break;
221c5eb2 5957 case IORING_OP_POLL_ADD:
61e98203 5958 ret = io_poll_add(req, issue_flags);
221c5eb2
JA
5959 break;
5960 case IORING_OP_POLL_REMOVE:
61e98203 5961 ret = io_poll_remove(req, issue_flags);
221c5eb2 5962 break;
5d17b4a4 5963 case IORING_OP_SYNC_FILE_RANGE:
45d189c6 5964 ret = io_sync_file_range(req, issue_flags);
5d17b4a4 5965 break;
0fa03c62 5966 case IORING_OP_SENDMSG:
889fca73 5967 ret = io_sendmsg(req, issue_flags);
062d04d7 5968 break;
fddaface 5969 case IORING_OP_SEND:
889fca73 5970 ret = io_send(req, issue_flags);
0fa03c62 5971 break;
aa1fa28f 5972 case IORING_OP_RECVMSG:
889fca73 5973 ret = io_recvmsg(req, issue_flags);
062d04d7 5974 break;
fddaface 5975 case IORING_OP_RECV:
889fca73 5976 ret = io_recv(req, issue_flags);
aa1fa28f 5977 break;
5262f567 5978 case IORING_OP_TIMEOUT:
61e98203 5979 ret = io_timeout(req, issue_flags);
5262f567 5980 break;
11365043 5981 case IORING_OP_TIMEOUT_REMOVE:
61e98203 5982 ret = io_timeout_remove(req, issue_flags);
11365043 5983 break;
17f2fe35 5984 case IORING_OP_ACCEPT:
889fca73 5985 ret = io_accept(req, issue_flags);
17f2fe35 5986 break;
f8e85cf2 5987 case IORING_OP_CONNECT:
889fca73 5988 ret = io_connect(req, issue_flags);
f8e85cf2 5989 break;
62755e35 5990 case IORING_OP_ASYNC_CANCEL:
61e98203 5991 ret = io_async_cancel(req, issue_flags);
62755e35 5992 break;
d63d1b5e 5993 case IORING_OP_FALLOCATE:
45d189c6 5994 ret = io_fallocate(req, issue_flags);
d63d1b5e 5995 break;
15b71abe 5996 case IORING_OP_OPENAT:
45d189c6 5997 ret = io_openat(req, issue_flags);
15b71abe 5998 break;
b5dba59e 5999 case IORING_OP_CLOSE:
889fca73 6000 ret = io_close(req, issue_flags);
b5dba59e 6001 break;
05f3fb3c 6002 case IORING_OP_FILES_UPDATE:
889fca73 6003 ret = io_files_update(req, issue_flags);
05f3fb3c 6004 break;
eddc7ef5 6005 case IORING_OP_STATX:
45d189c6 6006 ret = io_statx(req, issue_flags);
eddc7ef5 6007 break;
4840e418 6008 case IORING_OP_FADVISE:
45d189c6 6009 ret = io_fadvise(req, issue_flags);
4840e418 6010 break;
c1ca757b 6011 case IORING_OP_MADVISE:
45d189c6 6012 ret = io_madvise(req, issue_flags);
c1ca757b 6013 break;
cebdb986 6014 case IORING_OP_OPENAT2:
45d189c6 6015 ret = io_openat2(req, issue_flags);
cebdb986 6016 break;
3e4827b0 6017 case IORING_OP_EPOLL_CTL:
889fca73 6018 ret = io_epoll_ctl(req, issue_flags);
3e4827b0 6019 break;
7d67af2c 6020 case IORING_OP_SPLICE:
45d189c6 6021 ret = io_splice(req, issue_flags);
7d67af2c 6022 break;
ddf0322d 6023 case IORING_OP_PROVIDE_BUFFERS:
889fca73 6024 ret = io_provide_buffers(req, issue_flags);
ddf0322d 6025 break;
067524e9 6026 case IORING_OP_REMOVE_BUFFERS:
889fca73 6027 ret = io_remove_buffers(req, issue_flags);
3e4827b0 6028 break;
f2a8d5c7 6029 case IORING_OP_TEE:
45d189c6 6030 ret = io_tee(req, issue_flags);
f2a8d5c7 6031 break;
36f4fa68 6032 case IORING_OP_SHUTDOWN:
45d189c6 6033 ret = io_shutdown(req, issue_flags);
36f4fa68 6034 break;
80a261fd 6035 case IORING_OP_RENAMEAT:
45d189c6 6036 ret = io_renameat(req, issue_flags);
80a261fd 6037 break;
14a1143b 6038 case IORING_OP_UNLINKAT:
45d189c6 6039 ret = io_unlinkat(req, issue_flags);
14a1143b 6040 break;
2b188cc1
JA
6041 default:
6042 ret = -EINVAL;
6043 break;
6044 }
6045
def596e9
JA
6046 if (ret)
6047 return ret;
6048
b532576e
JA
6049 /* If the op doesn't have a file, we're not polling for it */
6050 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
11ba820b
JA
6051 const bool in_async = io_wq_current_is_worker();
6052
11ba820b
JA
6053 /* workqueue context doesn't hold uring_lock, grab it now */
6054 if (in_async)
6055 mutex_lock(&ctx->uring_lock);
6056
2e9dbe90 6057 io_iopoll_req_issued(req, in_async);
11ba820b
JA
6058
6059 if (in_async)
6060 mutex_unlock(&ctx->uring_lock);
def596e9
JA
6061 }
6062
6063 return 0;
2b188cc1
JA
6064}
6065
5280f7e5 6066static void io_wq_submit_work(struct io_wq_work *work)
2b188cc1
JA
6067{
6068 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6df1db6b 6069 struct io_kiocb *timeout;
561fb04a 6070 int ret = 0;
2b188cc1 6071
6df1db6b
PB
6072 timeout = io_prep_linked_timeout(req);
6073 if (timeout)
6074 io_queue_linked_timeout(timeout);
d4c81f38 6075
4014d943 6076 if (work->flags & IO_WQ_WORK_CANCEL)
561fb04a 6077 ret = -ECANCELED;
31b51510 6078
561fb04a 6079 if (!ret) {
561fb04a 6080 do {
889fca73 6081 ret = io_issue_sqe(req, 0);
561fb04a
JA
6082 /*
6083 * We can get EAGAIN for polled IO even though we're
6084 * forcing a sync submission from here, since we can't
6085 * wait for request slots on the block side.
6086 */
6087 if (ret != -EAGAIN)
6088 break;
6089 cond_resched();
6090 } while (1);
6091 }
31b51510 6092
a3df7698 6093 /* avoid locking problems by failing it from a clean context */
561fb04a 6094 if (ret) {
a3df7698
PB
6095 /* io-wq is going to take one down */
6096 refcount_inc(&req->refs);
6097 io_req_task_queue_fail(req, ret);
edafccee 6098 }
2b188cc1
JA
6099}
6100
65e19f54
JA
6101static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6102 int index)
6103{
269bbe5f 6104 struct fixed_rsrc_table *table;
65e19f54 6105
05f3fb3c 6106 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
84695089 6107 return table->files[index & IORING_FILE_TABLE_MASK];
65e19f54
JA
6108}
6109
8371adf5
PB
6110static struct file *io_file_get(struct io_submit_state *state,
6111 struct io_kiocb *req, int fd, bool fixed)
09bb8394 6112{
a197f664 6113 struct io_ring_ctx *ctx = req->ctx;
8da11c19 6114 struct file *file;
09bb8394 6115
8da11c19 6116 if (fixed) {
479f517b 6117 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
8371adf5 6118 return NULL;
b7620121 6119 fd = array_index_nospec(fd, ctx->nr_user_files);
8da11c19 6120 file = io_file_from_index(ctx, fd);
36f72fe2 6121 io_set_resource_node(req);
09bb8394 6122 } else {
c826bd7a 6123 trace_io_uring_file_get(ctx, fd);
8da11c19 6124 file = __io_file_get(state, fd);
09bb8394
JA
6125 }
6126
ce3d5aae
PB
6127 if (file && unlikely(file->f_op == &io_uring_fops))
6128 io_req_track_inflight(req);
8371adf5 6129 return file;
09bb8394
JA
6130}
6131
2665abfd 6132static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 6133{
ad8a48ac
JA
6134 struct io_timeout_data *data = container_of(timer,
6135 struct io_timeout_data, timer);
90cd7e42 6136 struct io_kiocb *prev, *req = data->req;
2665abfd 6137 struct io_ring_ctx *ctx = req->ctx;
2665abfd 6138 unsigned long flags;
2665abfd
JA
6139
6140 spin_lock_irqsave(&ctx->completion_lock, flags);
90cd7e42
PB
6141 prev = req->timeout.head;
6142 req->timeout.head = NULL;
2665abfd
JA
6143
6144 /*
6145 * We don't expect the list to be empty, that will only happen if we
6146 * race with the completion of the linked work.
6147 */
90cd7e42 6148 if (prev && refcount_inc_not_zero(&prev->refs))
f2f87370 6149 io_remove_next_linked(prev);
90cd7e42
PB
6150 else
6151 prev = NULL;
2665abfd
JA
6152 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6153
6154 if (prev) {
4e88d6e7 6155 req_set_fail_links(prev);
014db007 6156 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
9ae1f8dd 6157 io_put_req_deferred(prev, 1);
47f46768 6158 } else {
9ae1f8dd
PB
6159 io_req_complete_post(req, -ETIME, 0);
6160 io_put_req_deferred(req, 1);
2665abfd 6161 }
2665abfd
JA
6162 return HRTIMER_NORESTART;
6163}
6164
7271ef3a 6165static void __io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 6166{
76a46e06 6167 /*
f2f87370
PB
6168 * If the back reference is NULL, then our linked request finished
6169 * before we got a chance to setup the timer
76a46e06 6170 */
90cd7e42 6171 if (req->timeout.head) {
e8c2bc1f 6172 struct io_timeout_data *data = req->async_data;
94ae5e77 6173
ad8a48ac
JA
6174 data->timer.function = io_link_timeout_fn;
6175 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6176 data->mode);
2665abfd 6177 }
7271ef3a
JA
6178}
6179
6180static void io_queue_linked_timeout(struct io_kiocb *req)
6181{
6182 struct io_ring_ctx *ctx = req->ctx;
6183
6184 spin_lock_irq(&ctx->completion_lock);
6185 __io_queue_linked_timeout(req);
76a46e06 6186 spin_unlock_irq(&ctx->completion_lock);
2665abfd 6187
2665abfd 6188 /* drop submission reference */
76a46e06
JA
6189 io_put_req(req);
6190}
2665abfd 6191
ad8a48ac 6192static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2665abfd 6193{
f2f87370 6194 struct io_kiocb *nxt = req->link;
2665abfd 6195
f2f87370
PB
6196 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6197 nxt->opcode != IORING_OP_LINK_TIMEOUT)
76a46e06 6198 return NULL;
2665abfd 6199
90cd7e42 6200 nxt->timeout.head = req;
900fad45 6201 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
76a46e06 6202 req->flags |= REQ_F_LINK_TIMEOUT;
76a46e06 6203 return nxt;
2665abfd
JA
6204}
6205
c5eef2b9 6206static void __io_queue_sqe(struct io_kiocb *req)
2b188cc1 6207{
d3d7298d 6208 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
193155c8 6209 const struct cred *old_creds = NULL;
e0c5c576 6210 int ret;
2b188cc1 6211
4379bf8b
JA
6212 if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds &&
6213 req->work.creds != current_cred())
6214 old_creds = override_creds(req->work.creds);
193155c8 6215
c5eef2b9 6216 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
193155c8 6217
d3d7298d
PB
6218 if (old_creds)
6219 revert_creds(old_creds);
491381ce
JA
6220
6221 /*
6222 * We async punt it if the file wasn't marked NOWAIT, or if the file
6223 * doesn't support non-blocking read/write attempts
6224 */
24c74678 6225 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
f063c547 6226 if (!io_arm_poll_handler(req)) {
f063c547
PB
6227 /*
6228 * Queued up for async execution, worker will release
6229 * submit reference when the iocb is actually submitted.
6230 */
6231 io_queue_async_work(req);
2b188cc1 6232 }
0d63c148
PB
6233 } else if (likely(!ret)) {
6234 /* drop submission reference */
e342c807 6235 if (req->flags & REQ_F_COMPLETE_INLINE) {
c5eef2b9
PB
6236 struct io_ring_ctx *ctx = req->ctx;
6237 struct io_comp_state *cs = &ctx->submit_state.comp;
e65ef56d 6238
6dd0be1e 6239 cs->reqs[cs->nr++] = req;
d3d7298d 6240 if (cs->nr == ARRAY_SIZE(cs->reqs))
c5eef2b9 6241 io_submit_flush_completions(cs, ctx);
9affd664 6242 } else {
d3d7298d 6243 io_put_req(req);
0d63c148
PB
6244 }
6245 } else {
4e88d6e7 6246 req_set_fail_links(req);
e65ef56d 6247 io_put_req(req);
e1e16097 6248 io_req_complete(req, ret);
9e645e11 6249 }
d3d7298d
PB
6250 if (linked_timeout)
6251 io_queue_linked_timeout(linked_timeout);
2b188cc1
JA
6252}
6253
be7053b7 6254static void io_queue_sqe(struct io_kiocb *req)
4fe2c963
JL
6255{
6256 int ret;
6257
be7053b7 6258 ret = io_req_defer(req);
4fe2c963
JL
6259 if (ret) {
6260 if (ret != -EIOCBQUEUED) {
1118591a 6261fail_req:
4e88d6e7 6262 req_set_fail_links(req);
e1e16097
JA
6263 io_put_req(req);
6264 io_req_complete(req, ret);
4fe2c963 6265 }
2550878f 6266 } else if (req->flags & REQ_F_FORCE_ASYNC) {
be7053b7
PB
6267 ret = io_req_defer_prep(req);
6268 if (unlikely(ret))
6269 goto fail_req;
ce35a47a
JA
6270 io_queue_async_work(req);
6271 } else {
c5eef2b9 6272 __io_queue_sqe(req);
ce35a47a 6273 }
4fe2c963
JL
6274}
6275
b16fed66
PB
6276/*
6277 * Check SQE restrictions (opcode and flags).
6278 *
6279 * Returns 'true' if SQE is allowed, 'false' otherwise.
6280 */
6281static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6282 struct io_kiocb *req,
6283 unsigned int sqe_flags)
4fe2c963 6284{
b16fed66
PB
6285 if (!ctx->restricted)
6286 return true;
6287
6288 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6289 return false;
6290
6291 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6292 ctx->restrictions.sqe_flags_required)
6293 return false;
6294
6295 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6296 ctx->restrictions.sqe_flags_required))
6297 return false;
6298
6299 return true;
4fe2c963
JL
6300}
6301
b16fed66
PB
6302static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
6303 const struct io_uring_sqe *sqe)
6304{
6305 struct io_submit_state *state;
6306 unsigned int sqe_flags;
6307 int id, ret = 0;
6308
6309 req->opcode = READ_ONCE(sqe->opcode);
6310 /* same numerical values with corresponding REQ_F_*, safe to copy */
6311 req->flags = sqe_flags = READ_ONCE(sqe->flags);
6312 req->user_data = READ_ONCE(sqe->user_data);
6313 req->async_data = NULL;
6314 req->file = NULL;
6315 req->ctx = ctx;
6316 req->link = NULL;
6317 req->fixed_rsrc_refs = NULL;
6318 /* one is dropped after submission, the other at completion */
6319 refcount_set(&req->refs, 2);
6320 req->task = current;
6321 req->result = 0;
6322
6323 /* enforce forwards compatibility on users */
ebf4a5db
PB
6324 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
6325 req->flags = 0;
b16fed66 6326 return -EINVAL;
ebf4a5db 6327 }
b16fed66
PB
6328
6329 if (unlikely(req->opcode >= IORING_OP_LAST))
6330 return -EINVAL;
6331
b16fed66
PB
6332 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6333 return -EACCES;
6334
6335 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6336 !io_op_defs[req->opcode].buffer_select)
6337 return -EOPNOTSUPP;
863e0560 6338
b16fed66
PB
6339 id = READ_ONCE(sqe->personality);
6340 if (id) {
b16fed66 6341 __io_req_init_async(req);
4379bf8b
JA
6342 req->work.creds = idr_find(&ctx->personality_idr, id);
6343 if (unlikely(!req->work.creds))
6344 return -EINVAL;
6345 get_cred(req->work.creds);
b16fed66
PB
6346 }
6347
6348 state = &ctx->submit_state;
6349
6350 /*
6351 * Plug now if we have more than 1 IO left after this, and the target
6352 * is potentially a read/write to block based storage.
6353 */
6354 if (!state->plug_started && state->ios_left > 1 &&
6355 io_op_defs[req->opcode].plug) {
6356 blk_start_plug(&state->plug);
6357 state->plug_started = true;
6358 }
6359
6360 if (io_op_defs[req->opcode].needs_file) {
6361 bool fixed = req->flags & REQ_F_FIXED_FILE;
6362
6363 req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
6364 if (unlikely(!req->file))
6365 ret = -EBADF;
6366 }
6367
6368 state->ios_left--;
6369 return ret;
6370}
6371
a6b8cadc 6372static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
a1ab7b35 6373 const struct io_uring_sqe *sqe)
9e645e11 6374{
a1ab7b35 6375 struct io_submit_link *link = &ctx->submit_state.link;
ef4ff581 6376 int ret;
9e645e11 6377
a6b8cadc
PB
6378 ret = io_init_req(ctx, req, sqe);
6379 if (unlikely(ret)) {
6380fail_req:
6381 io_put_req(req);
6382 io_req_complete(req, ret);
de59bc10
PB
6383 if (link->head) {
6384 /* fail even hard links since we don't submit */
cf109604 6385 link->head->flags |= REQ_F_FAIL_LINK;
de59bc10
PB
6386 io_put_req(link->head);
6387 io_req_complete(link->head, -ECANCELED);
6388 link->head = NULL;
6389 }
a6b8cadc
PB
6390 return ret;
6391 }
be7053b7
PB
6392 ret = io_req_prep(req, sqe);
6393 if (unlikely(ret))
6394 goto fail_req;
a6b8cadc 6395
be7053b7 6396 /* don't need @sqe from now on */
a6b8cadc
PB
6397 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
6398 true, ctx->flags & IORING_SETUP_SQPOLL);
6399
9e645e11
JA
6400 /*
6401 * If we already have a head request, queue this one for async
6402 * submittal once the head completes. If we don't have a head but
6403 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6404 * submitted sync once the chain is complete. If none of those
6405 * conditions are true (normal request), then just queue it.
6406 */
863e0560
PB
6407 if (link->head) {
6408 struct io_kiocb *head = link->head;
4e88d6e7 6409
8cdf2193
PB
6410 /*
6411 * Taking sequential execution of a link, draining both sides
6412 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6413 * requests in the link. So, it drains the head and the
6414 * next after the link request. The last one is done via
6415 * drain_next flag to persist the effect across calls.
6416 */
ef4ff581 6417 if (req->flags & REQ_F_IO_DRAIN) {
711be031
PB
6418 head->flags |= REQ_F_IO_DRAIN;
6419 ctx->drain_next = 1;
6420 }
be7053b7 6421 ret = io_req_defer_prep(req);
cf109604 6422 if (unlikely(ret))
a6b8cadc 6423 goto fail_req;
9d76377f 6424 trace_io_uring_link(ctx, req, head);
f2f87370 6425 link->last->link = req;
863e0560 6426 link->last = req;
32fe525b
PB
6427
6428 /* last request of a link, enqueue the link */
ef4ff581 6429 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
de59bc10 6430 io_queue_sqe(head);
863e0560 6431 link->head = NULL;
32fe525b 6432 }
9e645e11 6433 } else {
711be031
PB
6434 if (unlikely(ctx->drain_next)) {
6435 req->flags |= REQ_F_IO_DRAIN;
ef4ff581 6436 ctx->drain_next = 0;
711be031 6437 }
ef4ff581 6438 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
863e0560
PB
6439 link->head = req;
6440 link->last = req;
711be031 6441 } else {
be7053b7 6442 io_queue_sqe(req);
711be031 6443 }
9e645e11 6444 }
2e6e1fde 6445
1d4240cc 6446 return 0;
9e645e11
JA
6447}
6448
9a56a232
JA
6449/*
6450 * Batched submission is done, ensure local IO is flushed out.
6451 */
ba88ff11
PB
6452static void io_submit_state_end(struct io_submit_state *state,
6453 struct io_ring_ctx *ctx)
9a56a232 6454{
a1ab7b35 6455 if (state->link.head)
de59bc10 6456 io_queue_sqe(state->link.head);
6dd0be1e 6457 if (state->comp.nr)
ba88ff11 6458 io_submit_flush_completions(&state->comp, ctx);
27926b68
JA
6459 if (state->plug_started)
6460 blk_finish_plug(&state->plug);
9f13c35b 6461 io_state_file_put(state);
9a56a232
JA
6462}
6463
6464/*
6465 * Start submission side cache.
6466 */
6467static void io_submit_state_start(struct io_submit_state *state,
ba88ff11 6468 unsigned int max_ios)
9a56a232 6469{
27926b68 6470 state->plug_started = false;
9a56a232 6471 state->ios_left = max_ios;
a1ab7b35
PB
6472 /* set only head, no need to init link_last in advance */
6473 state->link.head = NULL;
9a56a232
JA
6474}
6475
2b188cc1
JA
6476static void io_commit_sqring(struct io_ring_ctx *ctx)
6477{
75b28aff 6478 struct io_rings *rings = ctx->rings;
2b188cc1 6479
caf582c6
PB
6480 /*
6481 * Ensure any loads from the SQEs are done at this point,
6482 * since once we write the new head, the application could
6483 * write new data to them.
6484 */
6485 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
6486}
6487
2b188cc1 6488/*
3529d8c2 6489 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
2b188cc1
JA
6490 * that is mapped by userspace. This means that care needs to be taken to
6491 * ensure that reads are stable, as we cannot rely on userspace always
6492 * being a good citizen. If members of the sqe are validated and then later
6493 * used, it's important that those reads are done through READ_ONCE() to
6494 * prevent a re-load down the line.
6495 */
709b302f 6496static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 6497{
75b28aff 6498 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
6499 unsigned head;
6500
6501 /*
6502 * The cached sq head (or cq tail) serves two purposes:
6503 *
6504 * 1) allows us to batch the cost of updating the user visible
6505 * head updates.
6506 * 2) allows the kernel side to track the head on its own, even
6507 * though the application is the one updating it.
6508 */
4fccfcbb 6509 head = READ_ONCE(sq_array[ctx->cached_sq_head++ & ctx->sq_mask]);
709b302f
PB
6510 if (likely(head < ctx->sq_entries))
6511 return &ctx->sq_sqes[head];
2b188cc1
JA
6512
6513 /* drop invalid entries */
498ccd9e 6514 ctx->cached_sq_dropped++;
ee7d46d9 6515 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
709b302f
PB
6516 return NULL;
6517}
6518
0f212204 6519static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
6c271ce2 6520{
46c4e16a 6521 int submitted = 0;
6c271ce2 6522
c4a2ed72 6523 /* if we have a backlog and couldn't flush it all, return BUSY */
ad3eb2c8 6524 if (test_bit(0, &ctx->sq_check_overflow)) {
6c503150 6525 if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
ad3eb2c8
JA
6526 return -EBUSY;
6527 }
6c271ce2 6528
ee7d46d9
PB
6529 /* make sure SQ entry isn't read before tail */
6530 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
9ef4f124 6531
2b85edfc
PB
6532 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6533 return -EAGAIN;
6c271ce2 6534
d8a6df10 6535 percpu_counter_add(&current->io_uring->inflight, nr);
faf7b51c 6536 refcount_add(nr, &current->usage);
ba88ff11 6537 io_submit_state_start(&ctx->submit_state, nr);
b14cca0c 6538
46c4e16a 6539 while (submitted < nr) {
3529d8c2 6540 const struct io_uring_sqe *sqe;
196be95c 6541 struct io_kiocb *req;
fb5ccc98 6542
258b29a9 6543 req = io_alloc_req(ctx);
196be95c
PB
6544 if (unlikely(!req)) {
6545 if (!submitted)
6546 submitted = -EAGAIN;
fb5ccc98 6547 break;
196be95c 6548 }
4fccfcbb
PB
6549 sqe = io_get_sqe(ctx);
6550 if (unlikely(!sqe)) {
6551 kmem_cache_free(req_cachep, req);
6552 break;
6553 }
d3656344
JA
6554 /* will complete beyond this point, count as submitted */
6555 submitted++;
a1ab7b35 6556 if (io_submit_sqe(ctx, req, sqe))
196be95c 6557 break;
6c271ce2
JA
6558 }
6559
9466f437
PB
6560 if (unlikely(submitted != nr)) {
6561 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
d8a6df10
JA
6562 struct io_uring_task *tctx = current->io_uring;
6563 int unused = nr - ref_used;
9466f437 6564
d8a6df10
JA
6565 percpu_ref_put_many(&ctx->refs, unused);
6566 percpu_counter_sub(&tctx->inflight, unused);
6567 put_task_struct_many(current, unused);
9466f437 6568 }
6c271ce2 6569
a1ab7b35 6570 io_submit_state_end(&ctx->submit_state, ctx);
ae9428ca
PB
6571 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6572 io_commit_sqring(ctx);
6573
6c271ce2
JA
6574 return submitted;
6575}
6576
23b3628e
XW
6577static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6578{
6579 /* Tell userspace we may need a wakeup call */
6580 spin_lock_irq(&ctx->completion_lock);
6581 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6582 spin_unlock_irq(&ctx->completion_lock);
6583}
6584
6585static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6586{
6587 spin_lock_irq(&ctx->completion_lock);
6588 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6589 spin_unlock_irq(&ctx->completion_lock);
6590}
6591
08369246 6592static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
6c271ce2 6593{
c8d1ba58 6594 unsigned int to_submit;
bdcd3eab 6595 int ret = 0;
6c271ce2 6596
c8d1ba58 6597 to_submit = io_sqring_entries(ctx);
e95eee2d
JA
6598 /* if we're handling multiple rings, cap submit size for fairness */
6599 if (cap_entries && to_submit > 8)
6600 to_submit = 8;
6601
906a3c6f 6602 if (!list_empty(&ctx->iopoll_list) || to_submit) {
c8d1ba58 6603 unsigned nr_events = 0;
a4c0b3de 6604
c8d1ba58 6605 mutex_lock(&ctx->uring_lock);
906a3c6f 6606 if (!list_empty(&ctx->iopoll_list))
c8d1ba58 6607 io_do_iopoll(ctx, &nr_events, 0);
906a3c6f 6608
d9d05217
PB
6609 if (to_submit && !ctx->sqo_dead &&
6610 likely(!percpu_ref_is_dying(&ctx->refs)))
08369246 6611 ret = io_submit_sqes(ctx, to_submit);
c8d1ba58
JA
6612 mutex_unlock(&ctx->uring_lock);
6613 }
6c271ce2 6614
90554200
JA
6615 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6616 wake_up(&ctx->sqo_sq_wait);
6c271ce2 6617
08369246
XW
6618 return ret;
6619}
6c271ce2 6620
08369246
XW
6621static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6622{
6623 struct io_ring_ctx *ctx;
6624 unsigned sq_thread_idle = 0;
6c271ce2 6625
08369246
XW
6626 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6627 if (sq_thread_idle < ctx->sq_thread_idle)
6628 sq_thread_idle = ctx->sq_thread_idle;
c8d1ba58 6629 }
c1edbf5f 6630
08369246 6631 sqd->sq_thread_idle = sq_thread_idle;
c8d1ba58 6632}
6c271ce2 6633
69fb2131
JA
6634static void io_sqd_init_new(struct io_sq_data *sqd)
6635{
6636 struct io_ring_ctx *ctx;
6637
6638 while (!list_empty(&sqd->ctx_new_list)) {
6639 ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
69fb2131
JA
6640 list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
6641 complete(&ctx->sq_thread_comp);
6642 }
08369246
XW
6643
6644 io_sqd_update_thread_idle(sqd);
69fb2131
JA
6645}
6646
37d1e2e3
JA
6647static bool io_sq_thread_should_stop(struct io_sq_data *sqd)
6648{
6649 return test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
6650}
6651
6652static bool io_sq_thread_should_park(struct io_sq_data *sqd)
6653{
6654 return test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
6655}
6656
6657static void io_sq_thread_parkme(struct io_sq_data *sqd)
6658{
6659 for (;;) {
6660 /*
6661 * TASK_PARKED is a special state; we must serialize against
6662 * possible pending wakeups to avoid store-store collisions on
6663 * task->state.
6664 *
6665 * Such a collision might possibly result in the task state
6666 * changin from TASK_PARKED and us failing the
6667 * wait_task_inactive() in kthread_park().
6668 */
6669 set_special_state(TASK_PARKED);
6670 if (!test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state))
6671 break;
6672
6673 /*
6674 * Thread is going to call schedule(), do not preempt it,
6675 * or the caller of kthread_park() may spend more time in
6676 * wait_task_inactive().
6677 */
6678 preempt_disable();
6679 complete(&sqd->completion);
6680 schedule_preempt_disabled();
6681 preempt_enable();
6682 }
6683 __set_current_state(TASK_RUNNING);
6684}
6685
c8d1ba58
JA
6686static int io_sq_thread(void *data)
6687{
69fb2131
JA
6688 struct io_sq_data *sqd = data;
6689 struct io_ring_ctx *ctx;
a0d9205f 6690 unsigned long timeout = 0;
37d1e2e3 6691 char buf[TASK_COMM_LEN];
08369246 6692 DEFINE_WAIT(wait);
6c271ce2 6693
37d1e2e3
JA
6694 sprintf(buf, "iou-sqp-%d", sqd->task_pid);
6695 set_task_comm(current, buf);
6696 sqd->thread = current;
6697 current->pf_io_worker = NULL;
6698
6699 if (sqd->sq_cpu != -1)
6700 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
6701 else
6702 set_cpus_allowed_ptr(current, cpu_online_mask);
6703 current->flags |= PF_NO_SETAFFINITY;
6704
6705 complete(&sqd->completion);
6c271ce2 6706
37d1e2e3
JA
6707 wait_for_completion(&sqd->startup);
6708
6709 while (!io_sq_thread_should_stop(sqd)) {
08369246
XW
6710 int ret;
6711 bool cap_entries, sqt_spin, needs_sched;
c1edbf5f
JA
6712
6713 /*
69fb2131 6714 * Any changes to the sqd lists are synchronized through the
37d1e2e3 6715 * thread parking. This synchronizes the thread vs users,
69fb2131 6716 * the users are synchronized on the sqd->ctx_lock.
c1edbf5f 6717 */
37d1e2e3
JA
6718 if (io_sq_thread_should_park(sqd)) {
6719 io_sq_thread_parkme(sqd);
6720 continue;
65b2b213 6721 }
08369246 6722 if (unlikely(!list_empty(&sqd->ctx_new_list))) {
69fb2131 6723 io_sqd_init_new(sqd);
08369246
XW
6724 timeout = jiffies + sqd->sq_thread_idle;
6725 }
37d1e2e3
JA
6726 if (fatal_signal_pending(current))
6727 break;
08369246 6728 sqt_spin = false;
e95eee2d 6729 cap_entries = !list_is_singular(&sqd->ctx_list);
69fb2131 6730 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
08369246
XW
6731 ret = __io_sq_thread(ctx, cap_entries);
6732 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
6733 sqt_spin = true;
69fb2131 6734 }
6c271ce2 6735
08369246 6736 if (sqt_spin || !time_after(jiffies, timeout)) {
c8d1ba58
JA
6737 io_run_task_work();
6738 cond_resched();
08369246
XW
6739 if (sqt_spin)
6740 timeout = jiffies + sqd->sq_thread_idle;
6741 continue;
6742 }
6743
08369246
XW
6744 needs_sched = true;
6745 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
6746 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6747 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6748 !list_empty_careful(&ctx->iopoll_list)) {
6749 needs_sched = false;
6750 break;
6751 }
6752 if (io_sqring_entries(ctx)) {
6753 needs_sched = false;
6754 break;
6755 }
6756 }
6757
37d1e2e3 6758 if (needs_sched && !io_sq_thread_should_park(sqd)) {
69fb2131
JA
6759 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6760 io_ring_set_wakeup_flag(ctx);
08369246 6761
69fb2131 6762 schedule();
69fb2131
JA
6763 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6764 io_ring_clear_wakeup_flag(ctx);
6c271ce2 6765 }
08369246
XW
6766
6767 finish_wait(&sqd->wait, &wait);
6768 timeout = jiffies + sqd->sq_thread_idle;
6c271ce2
JA
6769 }
6770
37d1e2e3
JA
6771 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6772 io_uring_cancel_sqpoll(ctx);
06058632 6773
37d1e2e3 6774 io_run_task_work();
28cea78a 6775
8629397e
JA
6776 if (io_sq_thread_should_park(sqd))
6777 io_sq_thread_parkme(sqd);
6778
37d1e2e3
JA
6779 /*
6780 * Clear thread under lock so that concurrent parks work correctly
6781 */
8629397e 6782 complete(&sqd->completion);
37d1e2e3
JA
6783 mutex_lock(&sqd->lock);
6784 sqd->thread = NULL;
5f3f26f9
JA
6785 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6786 ctx->sqo_exec = 1;
6787 io_ring_set_wakeup_flag(ctx);
6788 }
06058632 6789
37d1e2e3 6790 complete(&sqd->exited);
e54945ae 6791 mutex_unlock(&sqd->lock);
37d1e2e3 6792 do_exit(0);
6c271ce2
JA
6793}
6794
bda52162
JA
6795struct io_wait_queue {
6796 struct wait_queue_entry wq;
6797 struct io_ring_ctx *ctx;
6798 unsigned to_wait;
6799 unsigned nr_timeouts;
6800};
6801
6c503150 6802static inline bool io_should_wake(struct io_wait_queue *iowq)
bda52162
JA
6803{
6804 struct io_ring_ctx *ctx = iowq->ctx;
6805
6806 /*
d195a66e 6807 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
6808 * started waiting. For timeouts, we always want to return to userspace,
6809 * regardless of event count.
6810 */
6c503150 6811 return io_cqring_events(ctx) >= iowq->to_wait ||
bda52162
JA
6812 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6813}
6814
6815static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6816 int wake_flags, void *key)
6817{
6818 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6819 wq);
6820
6c503150
PB
6821 /*
6822 * Cannot safely flush overflowed CQEs from here, ensure we wake up
6823 * the task, and the next invocation will do it.
6824 */
6825 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
6826 return autoremove_wake_function(curr, mode, wake_flags, key);
6827 return -1;
bda52162
JA
6828}
6829
af9c1a44
JA
6830static int io_run_task_work_sig(void)
6831{
6832 if (io_run_task_work())
6833 return 1;
6834 if (!signal_pending(current))
6835 return 0;
792ee0f6
JA
6836 if (test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))
6837 return -ERESTARTSYS;
af9c1a44
JA
6838 return -EINTR;
6839}
6840
eeb60b9a
PB
6841/* when returns >0, the caller should retry */
6842static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
6843 struct io_wait_queue *iowq,
6844 signed long *timeout)
6845{
6846 int ret;
6847
6848 /* make sure we run task_work before checking for signals */
6849 ret = io_run_task_work_sig();
6850 if (ret || io_should_wake(iowq))
6851 return ret;
6852 /* let the caller flush overflows, retry */
6853 if (test_bit(0, &ctx->cq_check_overflow))
6854 return 1;
6855
6856 *timeout = schedule_timeout(*timeout);
6857 return !*timeout ? -ETIME : 1;
6858}
6859
2b188cc1
JA
6860/*
6861 * Wait until events become available, if we don't already have some. The
6862 * application must reap them itself, as they reside on the shared cq ring.
6863 */
6864static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
c73ebb68
HX
6865 const sigset_t __user *sig, size_t sigsz,
6866 struct __kernel_timespec __user *uts)
2b188cc1 6867{
bda52162
JA
6868 struct io_wait_queue iowq = {
6869 .wq = {
6870 .private = current,
6871 .func = io_wake_function,
6872 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6873 },
6874 .ctx = ctx,
6875 .to_wait = min_events,
6876 };
75b28aff 6877 struct io_rings *rings = ctx->rings;
c1d5a224
PB
6878 signed long timeout = MAX_SCHEDULE_TIMEOUT;
6879 int ret;
2b188cc1 6880
b41e9852 6881 do {
6c503150
PB
6882 io_cqring_overflow_flush(ctx, false, NULL, NULL);
6883 if (io_cqring_events(ctx) >= min_events)
b41e9852 6884 return 0;
4c6e277c 6885 if (!io_run_task_work())
b41e9852 6886 break;
b41e9852 6887 } while (1);
2b188cc1
JA
6888
6889 if (sig) {
9e75ad5d
AB
6890#ifdef CONFIG_COMPAT
6891 if (in_compat_syscall())
6892 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 6893 sigsz);
9e75ad5d
AB
6894 else
6895#endif
b772434b 6896 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 6897
2b188cc1
JA
6898 if (ret)
6899 return ret;
6900 }
6901
c73ebb68 6902 if (uts) {
c1d5a224
PB
6903 struct timespec64 ts;
6904
c73ebb68
HX
6905 if (get_timespec64(&ts, uts))
6906 return -EFAULT;
6907 timeout = timespec64_to_jiffies(&ts);
6908 }
6909
bda52162 6910 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 6911 trace_io_uring_cqring_wait(ctx, min_events);
bda52162 6912 do {
6c503150 6913 io_cqring_overflow_flush(ctx, false, NULL, NULL);
bda52162
JA
6914 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6915 TASK_INTERRUPTIBLE);
eeb60b9a
PB
6916 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
6917 finish_wait(&ctx->wait, &iowq.wq);
6918 } while (ret > 0);
bda52162 6919
b7db41c9 6920 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 6921
75b28aff 6922 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
6923}
6924
6b06314c
JA
6925static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6926{
6927#if defined(CONFIG_UNIX)
6928 if (ctx->ring_sock) {
6929 struct sock *sock = ctx->ring_sock->sk;
6930 struct sk_buff *skb;
6931
6932 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6933 kfree_skb(skb);
6934 }
6935#else
6936 int i;
6937
65e19f54
JA
6938 for (i = 0; i < ctx->nr_user_files; i++) {
6939 struct file *file;
6940
6941 file = io_file_from_index(ctx, i);
6942 if (file)
6943 fput(file);
6944 }
6b06314c
JA
6945#endif
6946}
6947
00835dce 6948static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
05f3fb3c 6949{
269bbe5f 6950 struct fixed_rsrc_data *data;
05f3fb3c 6951
269bbe5f 6952 data = container_of(ref, struct fixed_rsrc_data, refs);
05f3fb3c
JA
6953 complete(&data->done);
6954}
6955
2a63b2d9 6956static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
1642b445 6957{
2a63b2d9 6958 spin_lock_bh(&ctx->rsrc_ref_lock);
1642b445
PB
6959}
6960
2a63b2d9 6961static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
6b06314c 6962{
2a63b2d9
BM
6963 spin_unlock_bh(&ctx->rsrc_ref_lock);
6964}
65e19f54 6965
d67d2263
BM
6966static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
6967 struct fixed_rsrc_data *rsrc_data,
269bbe5f 6968 struct fixed_rsrc_ref_node *ref_node)
1642b445 6969{
2a63b2d9 6970 io_rsrc_ref_lock(ctx);
269bbe5f 6971 rsrc_data->node = ref_node;
d67d2263 6972 list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
2a63b2d9 6973 io_rsrc_ref_unlock(ctx);
269bbe5f 6974 percpu_ref_get(&rsrc_data->refs);
1642b445
PB
6975}
6976
8bad28d8 6977static void io_sqe_rsrc_kill_node(struct io_ring_ctx *ctx, struct fixed_rsrc_data *data)
6b06314c 6978{
8bad28d8 6979 struct fixed_rsrc_ref_node *ref_node = NULL;
6b06314c 6980
2a63b2d9 6981 io_rsrc_ref_lock(ctx);
1e5d770b 6982 ref_node = data->node;
e6cb007c 6983 data->node = NULL;
2a63b2d9 6984 io_rsrc_ref_unlock(ctx);
05589553
XW
6985 if (ref_node)
6986 percpu_ref_kill(&ref_node->refs);
8bad28d8
HX
6987}
6988
6989static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
6990 struct io_ring_ctx *ctx,
f2303b1f
PB
6991 void (*rsrc_put)(struct io_ring_ctx *ctx,
6992 struct io_rsrc_put *prsrc))
8bad28d8 6993{
f2303b1f 6994 struct fixed_rsrc_ref_node *backup_node;
8bad28d8 6995 int ret;
05589553 6996
8bad28d8
HX
6997 if (data->quiesce)
6998 return -ENXIO;
05589553 6999
8bad28d8 7000 data->quiesce = true;
1ffc5422 7001 do {
f2303b1f
PB
7002 ret = -ENOMEM;
7003 backup_node = alloc_fixed_rsrc_ref_node(ctx);
7004 if (!backup_node)
7005 break;
7006 backup_node->rsrc_data = data;
7007 backup_node->rsrc_put = rsrc_put;
7008
8bad28d8
HX
7009 io_sqe_rsrc_kill_node(ctx, data);
7010 percpu_ref_kill(&data->refs);
7011 flush_delayed_work(&ctx->rsrc_put_work);
7012
1ffc5422
PB
7013 ret = wait_for_completion_interruptible(&data->done);
7014 if (!ret)
7015 break;
8bad28d8 7016
cb5e1b81 7017 percpu_ref_resurrect(&data->refs);
8bad28d8
HX
7018 io_sqe_rsrc_set_node(ctx, data, backup_node);
7019 backup_node = NULL;
cb5e1b81 7020 reinit_completion(&data->done);
8bad28d8 7021 mutex_unlock(&ctx->uring_lock);
1ffc5422 7022 ret = io_run_task_work_sig();
8bad28d8 7023 mutex_lock(&ctx->uring_lock);
f2303b1f 7024 } while (ret >= 0);
8bad28d8 7025 data->quiesce = false;
05f3fb3c 7026
8bad28d8
HX
7027 if (backup_node)
7028 destroy_fixed_rsrc_ref_node(backup_node);
7029 return ret;
d7954b2b
BM
7030}
7031
1ad555c6
BM
7032static struct fixed_rsrc_data *alloc_fixed_rsrc_data(struct io_ring_ctx *ctx)
7033{
7034 struct fixed_rsrc_data *data;
7035
7036 data = kzalloc(sizeof(*data), GFP_KERNEL);
7037 if (!data)
7038 return NULL;
7039
00835dce 7040 if (percpu_ref_init(&data->refs, io_rsrc_data_ref_zero,
1ad555c6
BM
7041 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
7042 kfree(data);
7043 return NULL;
7044 }
7045 data->ctx = ctx;
7046 init_completion(&data->done);
7047 return data;
7048}
7049
7050static void free_fixed_rsrc_data(struct fixed_rsrc_data *data)
7051{
7052 percpu_ref_exit(&data->refs);
7053 kfree(data->table);
7054 kfree(data);
7055}
7056
d7954b2b
BM
7057static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7058{
7059 struct fixed_rsrc_data *data = ctx->file_data;
d7954b2b
BM
7060 unsigned nr_tables, i;
7061 int ret;
7062
8bad28d8
HX
7063 /*
7064 * percpu_ref_is_dying() is to stop parallel files unregister
7065 * Since we possibly drop uring lock later in this function to
7066 * run task work.
7067 */
7068 if (!data || percpu_ref_is_dying(&data->refs))
d7954b2b 7069 return -ENXIO;
f2303b1f 7070 ret = io_rsrc_ref_quiesce(data, ctx, io_ring_file_put);
d7954b2b
BM
7071 if (ret)
7072 return ret;
7073
6b06314c 7074 __io_sqe_files_unregister(ctx);
65e19f54
JA
7075 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
7076 for (i = 0; i < nr_tables; i++)
05f3fb3c 7077 kfree(data->table[i].files);
1ad555c6 7078 free_fixed_rsrc_data(data);
05f3fb3c 7079 ctx->file_data = NULL;
6b06314c
JA
7080 ctx->nr_user_files = 0;
7081 return 0;
7082}
7083
37d1e2e3
JA
7084static void io_sq_thread_unpark(struct io_sq_data *sqd)
7085 __releases(&sqd->lock)
7086{
7087 if (!sqd->thread)
7088 return;
7089 if (sqd->thread == current)
7090 return;
7091 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
7092 wake_up_state(sqd->thread, TASK_PARKED);
7093 mutex_unlock(&sqd->lock);
7094}
7095
7096static bool io_sq_thread_park(struct io_sq_data *sqd)
7097 __acquires(&sqd->lock)
7098{
7099 if (sqd->thread == current)
7100 return true;
7101 mutex_lock(&sqd->lock);
7102 if (!sqd->thread) {
7103 mutex_unlock(&sqd->lock);
7104 return false;
7105 }
7106 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
7107 wake_up_process(sqd->thread);
7108 wait_for_completion(&sqd->completion);
7109 return true;
7110}
7111
7112static void io_sq_thread_stop(struct io_sq_data *sqd)
7113{
e54945ae 7114 if (test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state))
37d1e2e3 7115 return;
e54945ae
JA
7116 mutex_lock(&sqd->lock);
7117 if (sqd->thread) {
7118 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
7119 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state));
7120 wake_up_process(sqd->thread);
7121 mutex_unlock(&sqd->lock);
7122 wait_for_completion(&sqd->exited);
7123 WARN_ON_ONCE(sqd->thread);
7124 } else {
7125 mutex_unlock(&sqd->lock);
7126 }
37d1e2e3
JA
7127}
7128
534ca6d6 7129static void io_put_sq_data(struct io_sq_data *sqd)
6c271ce2 7130{
534ca6d6 7131 if (refcount_dec_and_test(&sqd->refs)) {
37d1e2e3
JA
7132 io_sq_thread_stop(sqd);
7133 kfree(sqd);
7134 }
7135}
7136
7137static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7138{
7139 struct io_sq_data *sqd = ctx->sq_data;
7140
7141 if (sqd) {
eb85890b 7142 complete(&sqd->startup);
534ca6d6 7143 if (sqd->thread) {
37d1e2e3
JA
7144 wait_for_completion(&ctx->sq_thread_comp);
7145 io_sq_thread_park(sqd);
534ca6d6
JA
7146 }
7147
37d1e2e3
JA
7148 mutex_lock(&sqd->ctx_lock);
7149 list_del(&ctx->sqd_list);
7150 io_sqd_update_thread_idle(sqd);
7151 mutex_unlock(&sqd->ctx_lock);
7152
7153 if (sqd->thread)
7154 io_sq_thread_unpark(sqd);
7155
7156 io_put_sq_data(sqd);
7157 ctx->sq_data = NULL;
534ca6d6
JA
7158 }
7159}
7160
aa06165d
JA
7161static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7162{
7163 struct io_ring_ctx *ctx_attach;
7164 struct io_sq_data *sqd;
7165 struct fd f;
7166
7167 f = fdget(p->wq_fd);
7168 if (!f.file)
7169 return ERR_PTR(-ENXIO);
7170 if (f.file->f_op != &io_uring_fops) {
7171 fdput(f);
7172 return ERR_PTR(-EINVAL);
7173 }
7174
7175 ctx_attach = f.file->private_data;
7176 sqd = ctx_attach->sq_data;
7177 if (!sqd) {
7178 fdput(f);
7179 return ERR_PTR(-EINVAL);
7180 }
7181
7182 refcount_inc(&sqd->refs);
7183 fdput(f);
7184 return sqd;
7185}
7186
534ca6d6
JA
7187static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
7188{
7189 struct io_sq_data *sqd;
7190
aa06165d
JA
7191 if (p->flags & IORING_SETUP_ATTACH_WQ)
7192 return io_attach_sq_data(p);
7193
534ca6d6
JA
7194 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7195 if (!sqd)
7196 return ERR_PTR(-ENOMEM);
7197
7198 refcount_set(&sqd->refs, 1);
69fb2131
JA
7199 INIT_LIST_HEAD(&sqd->ctx_list);
7200 INIT_LIST_HEAD(&sqd->ctx_new_list);
7201 mutex_init(&sqd->ctx_lock);
7202 mutex_init(&sqd->lock);
534ca6d6 7203 init_waitqueue_head(&sqd->wait);
37d1e2e3
JA
7204 init_completion(&sqd->startup);
7205 init_completion(&sqd->completion);
7206 init_completion(&sqd->exited);
534ca6d6
JA
7207 return sqd;
7208}
7209
6b06314c 7210#if defined(CONFIG_UNIX)
6b06314c
JA
7211/*
7212 * Ensure the UNIX gc is aware of our file set, so we are certain that
7213 * the io_uring can be safely unregistered on process exit, even if we have
7214 * loops in the file referencing.
7215 */
7216static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7217{
7218 struct sock *sk = ctx->ring_sock->sk;
7219 struct scm_fp_list *fpl;
7220 struct sk_buff *skb;
08a45173 7221 int i, nr_files;
6b06314c 7222
6b06314c
JA
7223 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7224 if (!fpl)
7225 return -ENOMEM;
7226
7227 skb = alloc_skb(0, GFP_KERNEL);
7228 if (!skb) {
7229 kfree(fpl);
7230 return -ENOMEM;
7231 }
7232
7233 skb->sk = sk;
6b06314c 7234
08a45173 7235 nr_files = 0;
62e398be 7236 fpl->user = get_uid(current_user());
6b06314c 7237 for (i = 0; i < nr; i++) {
65e19f54
JA
7238 struct file *file = io_file_from_index(ctx, i + offset);
7239
7240 if (!file)
08a45173 7241 continue;
65e19f54 7242 fpl->fp[nr_files] = get_file(file);
08a45173
JA
7243 unix_inflight(fpl->user, fpl->fp[nr_files]);
7244 nr_files++;
6b06314c
JA
7245 }
7246
08a45173
JA
7247 if (nr_files) {
7248 fpl->max = SCM_MAX_FD;
7249 fpl->count = nr_files;
7250 UNIXCB(skb).fp = fpl;
05f3fb3c 7251 skb->destructor = unix_destruct_scm;
08a45173
JA
7252 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7253 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 7254
08a45173
JA
7255 for (i = 0; i < nr_files; i++)
7256 fput(fpl->fp[i]);
7257 } else {
7258 kfree_skb(skb);
7259 kfree(fpl);
7260 }
6b06314c
JA
7261
7262 return 0;
7263}
7264
7265/*
7266 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7267 * causes regular reference counting to break down. We rely on the UNIX
7268 * garbage collection to take care of this problem for us.
7269 */
7270static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7271{
7272 unsigned left, total;
7273 int ret = 0;
7274
7275 total = 0;
7276 left = ctx->nr_user_files;
7277 while (left) {
7278 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
7279
7280 ret = __io_sqe_files_scm(ctx, this_files, total);
7281 if (ret)
7282 break;
7283 left -= this_files;
7284 total += this_files;
7285 }
7286
7287 if (!ret)
7288 return 0;
7289
7290 while (total < ctx->nr_user_files) {
65e19f54
JA
7291 struct file *file = io_file_from_index(ctx, total);
7292
7293 if (file)
7294 fput(file);
6b06314c
JA
7295 total++;
7296 }
7297
7298 return ret;
7299}
7300#else
7301static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7302{
7303 return 0;
7304}
7305#endif
7306
269bbe5f 7307static int io_sqe_alloc_file_tables(struct fixed_rsrc_data *file_data,
5398ae69 7308 unsigned nr_tables, unsigned nr_files)
65e19f54
JA
7309{
7310 int i;
7311
7312 for (i = 0; i < nr_tables; i++) {
269bbe5f 7313 struct fixed_rsrc_table *table = &file_data->table[i];
65e19f54
JA
7314 unsigned this_files;
7315
7316 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7317 table->files = kcalloc(this_files, sizeof(struct file *),
7318 GFP_KERNEL);
7319 if (!table->files)
7320 break;
7321 nr_files -= this_files;
7322 }
7323
7324 if (i == nr_tables)
7325 return 0;
7326
7327 for (i = 0; i < nr_tables; i++) {
269bbe5f 7328 struct fixed_rsrc_table *table = &file_data->table[i];
65e19f54
JA
7329 kfree(table->files);
7330 }
7331 return 1;
7332}
7333
50238531 7334static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
05f3fb3c 7335{
50238531 7336 struct file *file = prsrc->file;
05f3fb3c
JA
7337#if defined(CONFIG_UNIX)
7338 struct sock *sock = ctx->ring_sock->sk;
7339 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7340 struct sk_buff *skb;
7341 int i;
7342
7343 __skb_queue_head_init(&list);
7344
7345 /*
7346 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7347 * remove this entry and rearrange the file array.
7348 */
7349 skb = skb_dequeue(head);
7350 while (skb) {
7351 struct scm_fp_list *fp;
7352
7353 fp = UNIXCB(skb).fp;
7354 for (i = 0; i < fp->count; i++) {
7355 int left;
7356
7357 if (fp->fp[i] != file)
7358 continue;
7359
7360 unix_notinflight(fp->user, fp->fp[i]);
7361 left = fp->count - 1 - i;
7362 if (left) {
7363 memmove(&fp->fp[i], &fp->fp[i + 1],
7364 left * sizeof(struct file *));
7365 }
7366 fp->count--;
7367 if (!fp->count) {
7368 kfree_skb(skb);
7369 skb = NULL;
7370 } else {
7371 __skb_queue_tail(&list, skb);
7372 }
7373 fput(file);
7374 file = NULL;
7375 break;
7376 }
7377
7378 if (!file)
7379 break;
7380
7381 __skb_queue_tail(&list, skb);
7382
7383 skb = skb_dequeue(head);
7384 }
7385
7386 if (skb_peek(&list)) {
7387 spin_lock_irq(&head->lock);
7388 while ((skb = __skb_dequeue(&list)) != NULL)
7389 __skb_queue_tail(head, skb);
7390 spin_unlock_irq(&head->lock);
7391 }
7392#else
7393 fput(file);
7394#endif
7395}
7396
269bbe5f 7397static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node)
65e19f54 7398{
269bbe5f
BM
7399 struct fixed_rsrc_data *rsrc_data = ref_node->rsrc_data;
7400 struct io_ring_ctx *ctx = rsrc_data->ctx;
7401 struct io_rsrc_put *prsrc, *tmp;
05589553 7402
269bbe5f
BM
7403 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7404 list_del(&prsrc->list);
50238531 7405 ref_node->rsrc_put(ctx, prsrc);
269bbe5f 7406 kfree(prsrc);
65e19f54 7407 }
05589553 7408
05589553
XW
7409 percpu_ref_exit(&ref_node->refs);
7410 kfree(ref_node);
269bbe5f 7411 percpu_ref_put(&rsrc_data->refs);
2faf852d 7412}
65e19f54 7413
269bbe5f 7414static void io_rsrc_put_work(struct work_struct *work)
4a38aed2
JA
7415{
7416 struct io_ring_ctx *ctx;
7417 struct llist_node *node;
7418
269bbe5f
BM
7419 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7420 node = llist_del_all(&ctx->rsrc_put_llist);
4a38aed2
JA
7421
7422 while (node) {
269bbe5f 7423 struct fixed_rsrc_ref_node *ref_node;
4a38aed2
JA
7424 struct llist_node *next = node->next;
7425
269bbe5f
BM
7426 ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist);
7427 __io_rsrc_put_work(ref_node);
4a38aed2
JA
7428 node = next;
7429 }
7430}
7431
ea64ec02
PB
7432static struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
7433 unsigned i)
2faf852d 7434{
ea64ec02
PB
7435 struct fixed_rsrc_table *table;
7436
7437 table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7438 return &table->files[i & IORING_FILE_TABLE_MASK];
7439}
7440
00835dce 7441static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
2faf852d 7442{
269bbe5f
BM
7443 struct fixed_rsrc_ref_node *ref_node;
7444 struct fixed_rsrc_data *data;
4a38aed2 7445 struct io_ring_ctx *ctx;
e297822b 7446 bool first_add = false;
4a38aed2 7447 int delay = HZ;
65e19f54 7448
269bbe5f
BM
7449 ref_node = container_of(ref, struct fixed_rsrc_ref_node, refs);
7450 data = ref_node->rsrc_data;
e297822b
PB
7451 ctx = data->ctx;
7452
2a63b2d9 7453 io_rsrc_ref_lock(ctx);
e297822b
PB
7454 ref_node->done = true;
7455
d67d2263
BM
7456 while (!list_empty(&ctx->rsrc_ref_list)) {
7457 ref_node = list_first_entry(&ctx->rsrc_ref_list,
269bbe5f 7458 struct fixed_rsrc_ref_node, node);
e297822b
PB
7459 /* recycle ref nodes in order */
7460 if (!ref_node->done)
7461 break;
7462 list_del(&ref_node->node);
269bbe5f 7463 first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
e297822b 7464 }
2a63b2d9 7465 io_rsrc_ref_unlock(ctx);
05589553 7466
e297822b 7467 if (percpu_ref_is_dying(&data->refs))
4a38aed2 7468 delay = 0;
05589553 7469
4a38aed2 7470 if (!delay)
269bbe5f 7471 mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0);
4a38aed2 7472 else if (first_add)
269bbe5f 7473 queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
05f3fb3c 7474}
65e19f54 7475
6802535d 7476static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
05589553 7477 struct io_ring_ctx *ctx)
05f3fb3c 7478{
269bbe5f 7479 struct fixed_rsrc_ref_node *ref_node;
05f3fb3c 7480
05589553
XW
7481 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7482 if (!ref_node)
3e2224c5 7483 return NULL;
05f3fb3c 7484
00835dce 7485 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
05589553
XW
7486 0, GFP_KERNEL)) {
7487 kfree(ref_node);
3e2224c5 7488 return NULL;
05589553
XW
7489 }
7490 INIT_LIST_HEAD(&ref_node->node);
269bbe5f 7491 INIT_LIST_HEAD(&ref_node->rsrc_list);
e297822b 7492 ref_node->done = false;
05589553 7493 return ref_node;
05589553
XW
7494}
7495
bc9744cd
PB
7496static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
7497 struct fixed_rsrc_ref_node *ref_node)
6802535d 7498{
269bbe5f 7499 ref_node->rsrc_data = ctx->file_data;
50238531 7500 ref_node->rsrc_put = io_ring_file_put;
05589553
XW
7501}
7502
269bbe5f 7503static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node)
05589553
XW
7504{
7505 percpu_ref_exit(&ref_node->refs);
7506 kfree(ref_node);
65e19f54
JA
7507}
7508
ea64ec02 7509
6b06314c
JA
7510static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7511 unsigned nr_args)
7512{
7513 __s32 __user *fds = (__s32 __user *) arg;
600cf3f8 7514 unsigned nr_tables, i;
05f3fb3c 7515 struct file *file;
600cf3f8 7516 int fd, ret = -ENOMEM;
269bbe5f
BM
7517 struct fixed_rsrc_ref_node *ref_node;
7518 struct fixed_rsrc_data *file_data;
6b06314c 7519
05f3fb3c 7520 if (ctx->file_data)
6b06314c
JA
7521 return -EBUSY;
7522 if (!nr_args)
7523 return -EINVAL;
7524 if (nr_args > IORING_MAX_FIXED_FILES)
7525 return -EMFILE;
7526
1ad555c6 7527 file_data = alloc_fixed_rsrc_data(ctx);
5398ae69 7528 if (!file_data)
05f3fb3c 7529 return -ENOMEM;
13770a71 7530 ctx->file_data = file_data;
05f3fb3c 7531
65e19f54 7532 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
035fbafc 7533 file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
5398ae69 7534 GFP_KERNEL);
600cf3f8
PB
7535 if (!file_data->table)
7536 goto out_free;
05f3fb3c 7537
600cf3f8 7538 if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
1ad555c6 7539 goto out_free;
65e19f54 7540
08a45173 7541 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
600cf3f8
PB
7542 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7543 ret = -EFAULT;
7544 goto out_fput;
7545 }
08a45173 7546 /* allow sparse sets */
600cf3f8 7547 if (fd == -1)
08a45173 7548 continue;
6b06314c 7549
05f3fb3c 7550 file = fget(fd);
6b06314c 7551 ret = -EBADF;
05f3fb3c 7552 if (!file)
600cf3f8 7553 goto out_fput;
05f3fb3c 7554
6b06314c
JA
7555 /*
7556 * Don't allow io_uring instances to be registered. If UNIX
7557 * isn't enabled, then this causes a reference cycle and this
7558 * instance can never get freed. If UNIX is enabled we'll
7559 * handle it just fine, but there's still no point in allowing
7560 * a ring fd as it doesn't support regular read/write anyway.
7561 */
05f3fb3c
JA
7562 if (file->f_op == &io_uring_fops) {
7563 fput(file);
600cf3f8 7564 goto out_fput;
6b06314c 7565 }
ea64ec02 7566 *io_fixed_file_slot(file_data, i) = file;
6b06314c
JA
7567 }
7568
6b06314c 7569 ret = io_sqe_files_scm(ctx);
05589553 7570 if (ret) {
6b06314c 7571 io_sqe_files_unregister(ctx);
05589553
XW
7572 return ret;
7573 }
6b06314c 7574
bc9744cd 7575 ref_node = alloc_fixed_rsrc_ref_node(ctx);
3e2224c5 7576 if (!ref_node) {
05589553 7577 io_sqe_files_unregister(ctx);
3e2224c5 7578 return -ENOMEM;
05589553 7579 }
bc9744cd 7580 init_fixed_file_ref_node(ctx, ref_node);
05589553 7581
d67d2263 7582 io_sqe_rsrc_set_node(ctx, file_data, ref_node);
6b06314c 7583 return ret;
600cf3f8
PB
7584out_fput:
7585 for (i = 0; i < ctx->nr_user_files; i++) {
7586 file = io_file_from_index(ctx, i);
7587 if (file)
7588 fput(file);
7589 }
7590 for (i = 0; i < nr_tables; i++)
7591 kfree(file_data->table[i].files);
7592 ctx->nr_user_files = 0;
600cf3f8 7593out_free:
1ad555c6 7594 free_fixed_rsrc_data(ctx->file_data);
55cbc256 7595 ctx->file_data = NULL;
6b06314c
JA
7596 return ret;
7597}
7598
c3a31e60
JA
7599static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7600 int index)
7601{
7602#if defined(CONFIG_UNIX)
7603 struct sock *sock = ctx->ring_sock->sk;
7604 struct sk_buff_head *head = &sock->sk_receive_queue;
7605 struct sk_buff *skb;
7606
7607 /*
7608 * See if we can merge this file into an existing skb SCM_RIGHTS
7609 * file set. If there's no room, fall back to allocating a new skb
7610 * and filling it in.
7611 */
7612 spin_lock_irq(&head->lock);
7613 skb = skb_peek(head);
7614 if (skb) {
7615 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7616
7617 if (fpl->count < SCM_MAX_FD) {
7618 __skb_unlink(skb, head);
7619 spin_unlock_irq(&head->lock);
7620 fpl->fp[fpl->count] = get_file(file);
7621 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7622 fpl->count++;
7623 spin_lock_irq(&head->lock);
7624 __skb_queue_head(head, skb);
7625 } else {
7626 skb = NULL;
7627 }
7628 }
7629 spin_unlock_irq(&head->lock);
7630
7631 if (skb) {
7632 fput(file);
7633 return 0;
7634 }
7635
7636 return __io_sqe_files_scm(ctx, 1, index);
7637#else
7638 return 0;
7639#endif
7640}
7641
50238531 7642static int io_queue_rsrc_removal(struct fixed_rsrc_data *data, void *rsrc)
05f3fb3c 7643{
269bbe5f
BM
7644 struct io_rsrc_put *prsrc;
7645 struct fixed_rsrc_ref_node *ref_node = data->node;
05f3fb3c 7646
269bbe5f
BM
7647 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7648 if (!prsrc)
a5318d3c 7649 return -ENOMEM;
05f3fb3c 7650
50238531 7651 prsrc->rsrc = rsrc;
269bbe5f 7652 list_add(&prsrc->list, &ref_node->rsrc_list);
05589553 7653
a5318d3c 7654 return 0;
05f3fb3c
JA
7655}
7656
269bbe5f
BM
7657static inline int io_queue_file_removal(struct fixed_rsrc_data *data,
7658 struct file *file)
7659{
50238531 7660 return io_queue_rsrc_removal(data, (void *)file);
269bbe5f
BM
7661}
7662
05f3fb3c 7663static int __io_sqe_files_update(struct io_ring_ctx *ctx,
269bbe5f 7664 struct io_uring_rsrc_update *up,
05f3fb3c
JA
7665 unsigned nr_args)
7666{
269bbe5f
BM
7667 struct fixed_rsrc_data *data = ctx->file_data;
7668 struct fixed_rsrc_ref_node *ref_node;
ea64ec02 7669 struct file *file, **file_slot;
c3a31e60
JA
7670 __s32 __user *fds;
7671 int fd, i, err;
7672 __u32 done;
05589553 7673 bool needs_switch = false;
c3a31e60 7674
05f3fb3c 7675 if (check_add_overflow(up->offset, nr_args, &done))
c3a31e60
JA
7676 return -EOVERFLOW;
7677 if (done > ctx->nr_user_files)
7678 return -EINVAL;
7679
bc9744cd 7680 ref_node = alloc_fixed_rsrc_ref_node(ctx);
3e2224c5
MWO
7681 if (!ref_node)
7682 return -ENOMEM;
bc9744cd 7683 init_fixed_file_ref_node(ctx, ref_node);
05589553 7684
269bbe5f 7685 fds = u64_to_user_ptr(up->data);
67973b93 7686 for (done = 0; done < nr_args; done++) {
c3a31e60
JA
7687 err = 0;
7688 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7689 err = -EFAULT;
7690 break;
7691 }
4e0377a1 7692 if (fd == IORING_REGISTER_FILES_SKIP)
7693 continue;
7694
67973b93 7695 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
ea64ec02
PB
7696 file_slot = io_fixed_file_slot(ctx->file_data, i);
7697
7698 if (*file_slot) {
7699 err = io_queue_file_removal(data, *file_slot);
a5318d3c
HD
7700 if (err)
7701 break;
ea64ec02 7702 *file_slot = NULL;
05589553 7703 needs_switch = true;
c3a31e60
JA
7704 }
7705 if (fd != -1) {
c3a31e60
JA
7706 file = fget(fd);
7707 if (!file) {
7708 err = -EBADF;
7709 break;
7710 }
7711 /*
7712 * Don't allow io_uring instances to be registered. If
7713 * UNIX isn't enabled, then this causes a reference
7714 * cycle and this instance can never get freed. If UNIX
7715 * is enabled we'll handle it just fine, but there's
7716 * still no point in allowing a ring fd as it doesn't
7717 * support regular read/write anyway.
7718 */
7719 if (file->f_op == &io_uring_fops) {
7720 fput(file);
7721 err = -EBADF;
7722 break;
7723 }
e68a3ff8 7724 *file_slot = file;
c3a31e60 7725 err = io_sqe_file_register(ctx, file, i);
f3bd9dae 7726 if (err) {
e68a3ff8 7727 *file_slot = NULL;
f3bd9dae 7728 fput(file);
c3a31e60 7729 break;
f3bd9dae 7730 }
c3a31e60 7731 }
05f3fb3c
JA
7732 }
7733
05589553 7734 if (needs_switch) {
b2e96852 7735 percpu_ref_kill(&data->node->refs);
d67d2263 7736 io_sqe_rsrc_set_node(ctx, data, ref_node);
05589553 7737 } else
269bbe5f 7738 destroy_fixed_rsrc_ref_node(ref_node);
c3a31e60
JA
7739
7740 return done ? done : err;
7741}
05589553 7742
05f3fb3c
JA
7743static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7744 unsigned nr_args)
7745{
269bbe5f 7746 struct io_uring_rsrc_update up;
05f3fb3c
JA
7747
7748 if (!ctx->file_data)
7749 return -ENXIO;
7750 if (!nr_args)
7751 return -EINVAL;
7752 if (copy_from_user(&up, arg, sizeof(up)))
7753 return -EFAULT;
7754 if (up.resv)
7755 return -EINVAL;
7756
7757 return __io_sqe_files_update(ctx, &up, nr_args);
7758}
c3a31e60 7759
5280f7e5 7760static struct io_wq_work *io_free_work(struct io_wq_work *work)
7d723065
JA
7761{
7762 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7763
5280f7e5
PB
7764 req = io_put_req_find_next(req);
7765 return req ? &req->work : NULL;
7d723065
JA
7766}
7767
5aa75ed5 7768static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx)
24369c2e 7769{
e941894e 7770 struct io_wq_hash *hash;
24369c2e 7771 struct io_wq_data data;
24369c2e 7772 unsigned int concurrency;
24369c2e 7773
e941894e
JA
7774 hash = ctx->hash_map;
7775 if (!hash) {
7776 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
7777 if (!hash)
7778 return ERR_PTR(-ENOMEM);
7779 refcount_set(&hash->refs, 1);
7780 init_waitqueue_head(&hash->wait);
7781 ctx->hash_map = hash;
24369c2e
PB
7782 }
7783
e941894e 7784 data.hash = hash;
e9fd9396 7785 data.free_work = io_free_work;
f5fa38c5 7786 data.do_work = io_wq_submit_work;
24369c2e 7787
d25e3a3d
JA
7788 /* Do QD, or 4 * CPUS, whatever is smallest */
7789 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
24369c2e 7790
5aa75ed5 7791 return io_wq_create(concurrency, &data);
24369c2e
PB
7792}
7793
5aa75ed5
JA
7794static int io_uring_alloc_task_context(struct task_struct *task,
7795 struct io_ring_ctx *ctx)
0f212204
JA
7796{
7797 struct io_uring_task *tctx;
d8a6df10 7798 int ret;
0f212204
JA
7799
7800 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
7801 if (unlikely(!tctx))
7802 return -ENOMEM;
7803
d8a6df10
JA
7804 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7805 if (unlikely(ret)) {
7806 kfree(tctx);
7807 return ret;
7808 }
7809
5aa75ed5
JA
7810 tctx->io_wq = io_init_wq_offload(ctx);
7811 if (IS_ERR(tctx->io_wq)) {
7812 ret = PTR_ERR(tctx->io_wq);
7813 percpu_counter_destroy(&tctx->inflight);
7814 kfree(tctx);
7815 return ret;
7816 }
7817
0f212204
JA
7818 xa_init(&tctx->xa);
7819 init_waitqueue_head(&tctx->wait);
7820 tctx->last = NULL;
fdaf083c
JA
7821 atomic_set(&tctx->in_idle, 0);
7822 tctx->sqpoll = false;
0f212204 7823 task->io_uring = tctx;
7cbf1722
JA
7824 spin_lock_init(&tctx->task_lock);
7825 INIT_WQ_LIST(&tctx->task_list);
7826 tctx->task_state = 0;
7827 init_task_work(&tctx->task_work, tctx_task_work);
0f212204
JA
7828 return 0;
7829}
7830
7831void __io_uring_free(struct task_struct *tsk)
7832{
7833 struct io_uring_task *tctx = tsk->io_uring;
7834
7835 WARN_ON_ONCE(!xa_empty(&tctx->xa));
ef8eaa4e
PB
7836 WARN_ON_ONCE(tctx->io_wq);
7837
d8a6df10 7838 percpu_counter_destroy(&tctx->inflight);
0f212204
JA
7839 kfree(tctx);
7840 tsk->io_uring = NULL;
7841}
7842
5f3f26f9
JA
7843static int io_sq_thread_fork(struct io_sq_data *sqd, struct io_ring_ctx *ctx)
7844{
7845 int ret;
7846
7847 clear_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
7848 reinit_completion(&sqd->completion);
7849 ctx->sqo_dead = ctx->sqo_exec = 0;
7850 sqd->task_pid = current->pid;
7851 current->flags |= PF_IO_WORKER;
7852 ret = io_wq_fork_thread(io_sq_thread, sqd);
7853 current->flags &= ~PF_IO_WORKER;
7854 if (ret < 0) {
7855 sqd->thread = NULL;
7856 return ret;
7857 }
7858 wait_for_completion(&sqd->completion);
7859 return io_uring_alloc_task_context(sqd->thread, ctx);
7860}
7861
7e84e1c7
SG
7862static int io_sq_offload_create(struct io_ring_ctx *ctx,
7863 struct io_uring_params *p)
2b188cc1
JA
7864{
7865 int ret;
7866
d25e3a3d
JA
7867 /* Retain compatibility with failing for an invalid attach attempt */
7868 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
7869 IORING_SETUP_ATTACH_WQ) {
7870 struct fd f;
7871
7872 f = fdget(p->wq_fd);
7873 if (!f.file)
7874 return -ENXIO;
7875 if (f.file->f_op != &io_uring_fops) {
7876 fdput(f);
7877 return -EINVAL;
7878 }
7879 fdput(f);
7880 }
6c271ce2 7881 if (ctx->flags & IORING_SETUP_SQPOLL) {
534ca6d6
JA
7882 struct io_sq_data *sqd;
7883
3ec482d1 7884 ret = -EPERM;
ce59fc69 7885 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
3ec482d1
JA
7886 goto err;
7887
534ca6d6
JA
7888 sqd = io_get_sq_data(p);
7889 if (IS_ERR(sqd)) {
7890 ret = PTR_ERR(sqd);
7891 goto err;
7892 }
69fb2131 7893
534ca6d6 7894 ctx->sq_data = sqd;
69fb2131
JA
7895 io_sq_thread_park(sqd);
7896 mutex_lock(&sqd->ctx_lock);
7897 list_add(&ctx->sqd_list, &sqd->ctx_new_list);
7898 mutex_unlock(&sqd->ctx_lock);
7899 io_sq_thread_unpark(sqd);
534ca6d6 7900
917257da
JA
7901 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
7902 if (!ctx->sq_thread_idle)
7903 ctx->sq_thread_idle = HZ;
7904
aa06165d 7905 if (sqd->thread)
5aa75ed5 7906 return 0;
aa06165d 7907
6c271ce2 7908 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 7909 int cpu = p->sq_thread_cpu;
6c271ce2 7910
917257da 7911 ret = -EINVAL;
44a9bd18
JA
7912 if (cpu >= nr_cpu_ids)
7913 goto err;
7889f44d 7914 if (!cpu_online(cpu))
917257da
JA
7915 goto err;
7916
37d1e2e3 7917 sqd->sq_cpu = cpu;
6c271ce2 7918 } else {
37d1e2e3 7919 sqd->sq_cpu = -1;
6c271ce2 7920 }
37d1e2e3
JA
7921
7922 sqd->task_pid = current->pid;
7923 current->flags |= PF_IO_WORKER;
7924 ret = io_wq_fork_thread(io_sq_thread, sqd);
7925 current->flags &= ~PF_IO_WORKER;
7926 if (ret < 0) {
534ca6d6 7927 sqd->thread = NULL;
6c271ce2
JA
7928 goto err;
7929 }
37d1e2e3 7930 wait_for_completion(&sqd->completion);
5aa75ed5 7931 ret = io_uring_alloc_task_context(sqd->thread, ctx);
0f212204
JA
7932 if (ret)
7933 goto err;
6c271ce2
JA
7934 } else if (p->flags & IORING_SETUP_SQ_AFF) {
7935 /* Can't have SQ_AFF without SQPOLL */
7936 ret = -EINVAL;
7937 goto err;
7938 }
7939
2b188cc1
JA
7940 return 0;
7941err:
37d1e2e3 7942 io_sq_thread_finish(ctx);
2b188cc1
JA
7943 return ret;
7944}
7945
7e84e1c7
SG
7946static void io_sq_offload_start(struct io_ring_ctx *ctx)
7947{
534ca6d6
JA
7948 struct io_sq_data *sqd = ctx->sq_data;
7949
eb85890b 7950 if (ctx->flags & IORING_SETUP_SQPOLL)
37d1e2e3 7951 complete(&sqd->startup);
7e84e1c7
SG
7952}
7953
a087e2b5
BM
7954static inline void __io_unaccount_mem(struct user_struct *user,
7955 unsigned long nr_pages)
2b188cc1
JA
7956{
7957 atomic_long_sub(nr_pages, &user->locked_vm);
7958}
7959
a087e2b5
BM
7960static inline int __io_account_mem(struct user_struct *user,
7961 unsigned long nr_pages)
2b188cc1
JA
7962{
7963 unsigned long page_limit, cur_pages, new_pages;
7964
7965 /* Don't allow more pages than we can safely lock */
7966 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
7967
7968 do {
7969 cur_pages = atomic_long_read(&user->locked_vm);
7970 new_pages = cur_pages + nr_pages;
7971 if (new_pages > page_limit)
7972 return -ENOMEM;
7973 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
7974 new_pages) != cur_pages);
7975
7976 return 0;
7977}
7978
26bfa89e 7979static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 7980{
62e398be 7981 if (ctx->user)
a087e2b5 7982 __io_unaccount_mem(ctx->user, nr_pages);
30975825 7983
26bfa89e
JA
7984 if (ctx->mm_account)
7985 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
7986}
7987
26bfa89e 7988static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 7989{
30975825
BM
7990 int ret;
7991
62e398be 7992 if (ctx->user) {
30975825
BM
7993 ret = __io_account_mem(ctx->user, nr_pages);
7994 if (ret)
7995 return ret;
7996 }
7997
26bfa89e
JA
7998 if (ctx->mm_account)
7999 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
8000
8001 return 0;
8002}
8003
2b188cc1
JA
8004static void io_mem_free(void *ptr)
8005{
52e04ef4
MR
8006 struct page *page;
8007
8008 if (!ptr)
8009 return;
2b188cc1 8010
52e04ef4 8011 page = virt_to_head_page(ptr);
2b188cc1
JA
8012 if (put_page_testzero(page))
8013 free_compound_page(page);
8014}
8015
8016static void *io_mem_alloc(size_t size)
8017{
8018 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
26bfa89e 8019 __GFP_NORETRY | __GFP_ACCOUNT;
2b188cc1
JA
8020
8021 return (void *) __get_free_pages(gfp_flags, get_order(size));
8022}
8023
75b28aff
HV
8024static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8025 size_t *sq_offset)
8026{
8027 struct io_rings *rings;
8028 size_t off, sq_array_size;
8029
8030 off = struct_size(rings, cqes, cq_entries);
8031 if (off == SIZE_MAX)
8032 return SIZE_MAX;
8033
8034#ifdef CONFIG_SMP
8035 off = ALIGN(off, SMP_CACHE_BYTES);
8036 if (off == 0)
8037 return SIZE_MAX;
8038#endif
8039
b36200f5
DV
8040 if (sq_offset)
8041 *sq_offset = off;
8042
75b28aff
HV
8043 sq_array_size = array_size(sizeof(u32), sq_entries);
8044 if (sq_array_size == SIZE_MAX)
8045 return SIZE_MAX;
8046
8047 if (check_add_overflow(off, sq_array_size, &off))
8048 return SIZE_MAX;
8049
75b28aff
HV
8050 return off;
8051}
8052
0a96bbe4 8053static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
edafccee
JA
8054{
8055 int i, j;
8056
8057 if (!ctx->user_bufs)
8058 return -ENXIO;
8059
8060 for (i = 0; i < ctx->nr_user_bufs; i++) {
8061 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8062
8063 for (j = 0; j < imu->nr_bvecs; j++)
f1f6a7dd 8064 unpin_user_page(imu->bvec[j].bv_page);
edafccee 8065
de293938 8066 if (imu->acct_pages)
26bfa89e 8067 io_unaccount_mem(ctx, imu->acct_pages);
d4ef6475 8068 kvfree(imu->bvec);
edafccee
JA
8069 imu->nr_bvecs = 0;
8070 }
8071
8072 kfree(ctx->user_bufs);
8073 ctx->user_bufs = NULL;
8074 ctx->nr_user_bufs = 0;
8075 return 0;
8076}
8077
8078static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8079 void __user *arg, unsigned index)
8080{
8081 struct iovec __user *src;
8082
8083#ifdef CONFIG_COMPAT
8084 if (ctx->compat) {
8085 struct compat_iovec __user *ciovs;
8086 struct compat_iovec ciov;
8087
8088 ciovs = (struct compat_iovec __user *) arg;
8089 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8090 return -EFAULT;
8091
d55e5f5b 8092 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
8093 dst->iov_len = ciov.iov_len;
8094 return 0;
8095 }
8096#endif
8097 src = (struct iovec __user *) arg;
8098 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8099 return -EFAULT;
8100 return 0;
8101}
8102
de293938
JA
8103/*
8104 * Not super efficient, but this is just a registration time. And we do cache
8105 * the last compound head, so generally we'll only do a full search if we don't
8106 * match that one.
8107 *
8108 * We check if the given compound head page has already been accounted, to
8109 * avoid double accounting it. This allows us to account the full size of the
8110 * page, not just the constituent pages of a huge page.
8111 */
8112static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8113 int nr_pages, struct page *hpage)
8114{
8115 int i, j;
8116
8117 /* check current page array */
8118 for (i = 0; i < nr_pages; i++) {
8119 if (!PageCompound(pages[i]))
8120 continue;
8121 if (compound_head(pages[i]) == hpage)
8122 return true;
8123 }
8124
8125 /* check previously registered pages */
8126 for (i = 0; i < ctx->nr_user_bufs; i++) {
8127 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8128
8129 for (j = 0; j < imu->nr_bvecs; j++) {
8130 if (!PageCompound(imu->bvec[j].bv_page))
8131 continue;
8132 if (compound_head(imu->bvec[j].bv_page) == hpage)
8133 return true;
8134 }
8135 }
8136
8137 return false;
8138}
8139
8140static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8141 int nr_pages, struct io_mapped_ubuf *imu,
8142 struct page **last_hpage)
8143{
8144 int i, ret;
8145
8146 for (i = 0; i < nr_pages; i++) {
8147 if (!PageCompound(pages[i])) {
8148 imu->acct_pages++;
8149 } else {
8150 struct page *hpage;
8151
8152 hpage = compound_head(pages[i]);
8153 if (hpage == *last_hpage)
8154 continue;
8155 *last_hpage = hpage;
8156 if (headpage_already_acct(ctx, pages, i, hpage))
8157 continue;
8158 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8159 }
8160 }
8161
8162 if (!imu->acct_pages)
8163 return 0;
8164
26bfa89e 8165 ret = io_account_mem(ctx, imu->acct_pages);
de293938
JA
8166 if (ret)
8167 imu->acct_pages = 0;
8168 return ret;
8169}
8170
0a96bbe4
BM
8171static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
8172 struct io_mapped_ubuf *imu,
8173 struct page **last_hpage)
edafccee
JA
8174{
8175 struct vm_area_struct **vmas = NULL;
8176 struct page **pages = NULL;
0a96bbe4
BM
8177 unsigned long off, start, end, ubuf;
8178 size_t size;
8179 int ret, pret, nr_pages, i;
8180
8181 ubuf = (unsigned long) iov->iov_base;
8182 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8183 start = ubuf >> PAGE_SHIFT;
8184 nr_pages = end - start;
8185
8186 ret = -ENOMEM;
8187
8188 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8189 if (!pages)
8190 goto done;
8191
8192 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8193 GFP_KERNEL);
8194 if (!vmas)
8195 goto done;
edafccee 8196
0a96bbe4
BM
8197 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
8198 GFP_KERNEL);
8199 if (!imu->bvec)
8200 goto done;
8201
8202 ret = 0;
8203 mmap_read_lock(current->mm);
8204 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8205 pages, vmas);
8206 if (pret == nr_pages) {
8207 /* don't support file backed memory */
8208 for (i = 0; i < nr_pages; i++) {
8209 struct vm_area_struct *vma = vmas[i];
8210
8211 if (vma->vm_file &&
8212 !is_file_hugepages(vma->vm_file)) {
8213 ret = -EOPNOTSUPP;
8214 break;
8215 }
8216 }
8217 } else {
8218 ret = pret < 0 ? pret : -EFAULT;
8219 }
8220 mmap_read_unlock(current->mm);
8221 if (ret) {
8222 /*
8223 * if we did partial map, or found file backed vmas,
8224 * release any pages we did get
8225 */
8226 if (pret > 0)
8227 unpin_user_pages(pages, pret);
8228 kvfree(imu->bvec);
8229 goto done;
8230 }
8231
8232 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8233 if (ret) {
8234 unpin_user_pages(pages, pret);
8235 kvfree(imu->bvec);
8236 goto done;
8237 }
8238
8239 off = ubuf & ~PAGE_MASK;
8240 size = iov->iov_len;
8241 for (i = 0; i < nr_pages; i++) {
8242 size_t vec_len;
8243
8244 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8245 imu->bvec[i].bv_page = pages[i];
8246 imu->bvec[i].bv_len = vec_len;
8247 imu->bvec[i].bv_offset = off;
8248 off = 0;
8249 size -= vec_len;
8250 }
8251 /* store original address for later verification */
8252 imu->ubuf = ubuf;
8253 imu->len = iov->iov_len;
8254 imu->nr_bvecs = nr_pages;
8255 ret = 0;
8256done:
8257 kvfree(pages);
8258 kvfree(vmas);
8259 return ret;
8260}
8261
2b358604 8262static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
0a96bbe4 8263{
edafccee
JA
8264 if (ctx->user_bufs)
8265 return -EBUSY;
8266 if (!nr_args || nr_args > UIO_MAXIOV)
8267 return -EINVAL;
8268
8269 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8270 GFP_KERNEL);
8271 if (!ctx->user_bufs)
8272 return -ENOMEM;
8273
2b358604
BM
8274 return 0;
8275}
edafccee 8276
2b358604
BM
8277static int io_buffer_validate(struct iovec *iov)
8278{
8279 /*
8280 * Don't impose further limits on the size and buffer
8281 * constraints here, we'll -EINVAL later when IO is
8282 * submitted if they are wrong.
8283 */
8284 if (!iov->iov_base || !iov->iov_len)
8285 return -EFAULT;
edafccee 8286
2b358604
BM
8287 /* arbitrary limit, but we need something */
8288 if (iov->iov_len > SZ_1G)
8289 return -EFAULT;
edafccee 8290
2b358604
BM
8291 return 0;
8292}
edafccee 8293
2b358604
BM
8294static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8295 unsigned int nr_args)
8296{
8297 int i, ret;
8298 struct iovec iov;
8299 struct page *last_hpage = NULL;
edafccee 8300
2b358604
BM
8301 ret = io_buffers_map_alloc(ctx, nr_args);
8302 if (ret)
8303 return ret;
edafccee 8304
edafccee
JA
8305 for (i = 0; i < nr_args; i++) {
8306 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
edafccee 8307
edafccee
JA
8308 ret = io_copy_iov(ctx, &iov, arg, i);
8309 if (ret)
0a96bbe4 8310 break;
de293938 8311
2b358604
BM
8312 ret = io_buffer_validate(&iov);
8313 if (ret)
0a96bbe4 8314 break;
edafccee 8315
0a96bbe4
BM
8316 ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
8317 if (ret)
8318 break;
edafccee
JA
8319
8320 ctx->nr_user_bufs++;
8321 }
0a96bbe4
BM
8322
8323 if (ret)
8324 io_sqe_buffers_unregister(ctx);
8325
edafccee
JA
8326 return ret;
8327}
8328
9b402849
JA
8329static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8330{
8331 __s32 __user *fds = arg;
8332 int fd;
8333
8334 if (ctx->cq_ev_fd)
8335 return -EBUSY;
8336
8337 if (copy_from_user(&fd, fds, sizeof(*fds)))
8338 return -EFAULT;
8339
8340 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8341 if (IS_ERR(ctx->cq_ev_fd)) {
8342 int ret = PTR_ERR(ctx->cq_ev_fd);
8343 ctx->cq_ev_fd = NULL;
8344 return ret;
8345 }
8346
8347 return 0;
8348}
8349
8350static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8351{
8352 if (ctx->cq_ev_fd) {
8353 eventfd_ctx_put(ctx->cq_ev_fd);
8354 ctx->cq_ev_fd = NULL;
8355 return 0;
8356 }
8357
8358 return -ENXIO;
8359}
8360
5a2e745d
JA
8361static int __io_destroy_buffers(int id, void *p, void *data)
8362{
8363 struct io_ring_ctx *ctx = data;
8364 struct io_buffer *buf = p;
8365
067524e9 8366 __io_remove_buffers(ctx, buf, id, -1U);
5a2e745d
JA
8367 return 0;
8368}
8369
8370static void io_destroy_buffers(struct io_ring_ctx *ctx)
8371{
8372 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
8373 idr_destroy(&ctx->io_buffer_idr);
8374}
8375
68e68ee6 8376static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
1b4c351f 8377{
68e68ee6 8378 struct io_kiocb *req, *nxt;
1b4c351f 8379
68e68ee6
JA
8380 list_for_each_entry_safe(req, nxt, list, compl.list) {
8381 if (tsk && req->task != tsk)
8382 continue;
1b4c351f
JA
8383 list_del(&req->compl.list);
8384 kmem_cache_free(req_cachep, req);
8385 }
8386}
8387
4010fec4 8388static void io_req_caches_free(struct io_ring_ctx *ctx)
2b188cc1 8389{
bf019da7 8390 struct io_submit_state *submit_state = &ctx->submit_state;
e5547d2c 8391 struct io_comp_state *cs = &ctx->submit_state.comp;
bf019da7 8392
9a4fdbd8
JA
8393 mutex_lock(&ctx->uring_lock);
8394
8e5c66c4 8395 if (submit_state->free_reqs) {
9a4fdbd8
JA
8396 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8397 submit_state->reqs);
8e5c66c4
PB
8398 submit_state->free_reqs = 0;
8399 }
9a4fdbd8
JA
8400
8401 spin_lock_irq(&ctx->completion_lock);
e5547d2c
PB
8402 list_splice_init(&cs->locked_free_list, &cs->free_list);
8403 cs->locked_free_nr = 0;
9a4fdbd8
JA
8404 spin_unlock_irq(&ctx->completion_lock);
8405
e5547d2c
PB
8406 io_req_cache_free(&cs->free_list, NULL);
8407
9a4fdbd8
JA
8408 mutex_unlock(&ctx->uring_lock);
8409}
8410
2b188cc1
JA
8411static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8412{
04fc6c80
PB
8413 /*
8414 * Some may use context even when all refs and requests have been put,
8415 * and they are free to do so while still holding uring_lock, see
8416 * __io_req_task_submit(). Wait for them to finish.
8417 */
8418 mutex_lock(&ctx->uring_lock);
8419 mutex_unlock(&ctx->uring_lock);
8420
37d1e2e3 8421 io_sq_thread_finish(ctx);
0a96bbe4 8422 io_sqe_buffers_unregister(ctx);
2aede0e4 8423
37d1e2e3 8424 if (ctx->mm_account) {
2aede0e4
JA
8425 mmdrop(ctx->mm_account);
8426 ctx->mm_account = NULL;
30975825 8427 }
def596e9 8428
8bad28d8 8429 mutex_lock(&ctx->uring_lock);
6b06314c 8430 io_sqe_files_unregister(ctx);
8bad28d8 8431 mutex_unlock(&ctx->uring_lock);
9b402849 8432 io_eventfd_unregister(ctx);
5a2e745d 8433 io_destroy_buffers(ctx);
41726c9a 8434 idr_destroy(&ctx->personality_idr);
def596e9 8435
2b188cc1 8436#if defined(CONFIG_UNIX)
355e8d26
EB
8437 if (ctx->ring_sock) {
8438 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 8439 sock_release(ctx->ring_sock);
355e8d26 8440 }
2b188cc1
JA
8441#endif
8442
75b28aff 8443 io_mem_free(ctx->rings);
2b188cc1 8444 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
8445
8446 percpu_ref_exit(&ctx->refs);
2b188cc1 8447 free_uid(ctx->user);
4010fec4 8448 io_req_caches_free(ctx);
e941894e
JA
8449 if (ctx->hash_map)
8450 io_wq_put_hash(ctx->hash_map);
78076bb6 8451 kfree(ctx->cancel_hash);
2b188cc1
JA
8452 kfree(ctx);
8453}
8454
8455static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8456{
8457 struct io_ring_ctx *ctx = file->private_data;
8458 __poll_t mask = 0;
8459
8460 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
8461 /*
8462 * synchronizes with barrier from wq_has_sleeper call in
8463 * io_commit_cqring
8464 */
2b188cc1 8465 smp_rmb();
90554200 8466 if (!io_sqring_full(ctx))
2b188cc1 8467 mask |= EPOLLOUT | EPOLLWRNORM;
ed670c3f
HX
8468
8469 /*
8470 * Don't flush cqring overflow list here, just do a simple check.
8471 * Otherwise there could possible be ABBA deadlock:
8472 * CPU0 CPU1
8473 * ---- ----
8474 * lock(&ctx->uring_lock);
8475 * lock(&ep->mtx);
8476 * lock(&ctx->uring_lock);
8477 * lock(&ep->mtx);
8478 *
8479 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8480 * pushs them to do the flush.
8481 */
8482 if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
2b188cc1
JA
8483 mask |= EPOLLIN | EPOLLRDNORM;
8484
8485 return mask;
8486}
8487
8488static int io_uring_fasync(int fd, struct file *file, int on)
8489{
8490 struct io_ring_ctx *ctx = file->private_data;
8491
8492 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8493}
8494
0bead8cd 8495static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
071698e1 8496{
4379bf8b 8497 const struct cred *creds;
071698e1 8498
4379bf8b
JA
8499 creds = idr_remove(&ctx->personality_idr, id);
8500 if (creds) {
8501 put_cred(creds);
0bead8cd 8502 return 0;
1e6fa521 8503 }
0bead8cd
YD
8504
8505 return -EINVAL;
8506}
8507
8508static int io_remove_personalities(int id, void *p, void *data)
8509{
8510 struct io_ring_ctx *ctx = data;
8511
8512 io_unregister_personality(ctx, id);
071698e1
JA
8513 return 0;
8514}
8515
ba50a036 8516static bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
7c25c0d1
JA
8517{
8518 struct callback_head *work, *head, *next;
ba50a036 8519 bool executed = false;
7c25c0d1
JA
8520
8521 do {
8522 do {
8523 head = NULL;
8524 work = READ_ONCE(ctx->exit_task_work);
8525 } while (cmpxchg(&ctx->exit_task_work, work, head) != work);
8526
8527 if (!work)
8528 break;
8529
8530 do {
8531 next = work->next;
8532 work->func(work);
8533 work = next;
8534 cond_resched();
8535 } while (work);
ba50a036 8536 executed = true;
7c25c0d1 8537 } while (1);
ba50a036
PB
8538
8539 return executed;
7c25c0d1
JA
8540}
8541
85faa7b8
JA
8542static void io_ring_exit_work(struct work_struct *work)
8543{
b2edc0a7
PB
8544 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
8545 exit_work);
85faa7b8 8546
56952e91
JA
8547 /*
8548 * If we're doing polled IO and end up having requests being
8549 * submitted async (out-of-line), then completions can come in while
8550 * we're waiting for refs to drop. We need to reap these manually,
8551 * as nobody else will be looking for them.
8552 */
b2edc0a7 8553 do {
9936c7c2 8554 io_uring_try_cancel_requests(ctx, NULL, NULL);
7c25c0d1 8555 io_run_ctx_fallback(ctx);
b2edc0a7 8556 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
85faa7b8
JA
8557 io_ring_ctx_free(ctx);
8558}
8559
2b188cc1
JA
8560static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8561{
8562 mutex_lock(&ctx->uring_lock);
8563 percpu_ref_kill(&ctx->refs);
d9d05217
PB
8564
8565 if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
8566 ctx->sqo_dead = 1;
8567
cda286f0
PB
8568 /* if force is set, the ring is going away. always drop after that */
8569 ctx->cq_overflow_flushed = 1;
634578f8 8570 if (ctx->rings)
6c503150 8571 __io_cqring_overflow_flush(ctx, true, NULL, NULL);
5c766a90 8572 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
2b188cc1
JA
8573 mutex_unlock(&ctx->uring_lock);
8574
6b81928d
PB
8575 io_kill_timeouts(ctx, NULL, NULL);
8576 io_poll_remove_all(ctx, NULL, NULL);
561fb04a 8577
15dff286 8578 /* if we failed setting up the ctx, we might not have any rings */
b2edc0a7 8579 io_iopoll_try_reap_events(ctx);
309fc03a 8580
85faa7b8 8581 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
fc666777
JA
8582 /*
8583 * Use system_unbound_wq to avoid spawning tons of event kworkers
8584 * if we're exiting a ton of rings at the same time. It just adds
8585 * noise and overhead, there's no discernable change in runtime
8586 * over using system_wq.
8587 */
8588 queue_work(system_unbound_wq, &ctx->exit_work);
2b188cc1
JA
8589}
8590
8591static int io_uring_release(struct inode *inode, struct file *file)
8592{
8593 struct io_ring_ctx *ctx = file->private_data;
8594
8595 file->private_data = NULL;
8596 io_ring_ctx_wait_and_kill(ctx);
8597 return 0;
8598}
8599
f6edbabb
PB
8600struct io_task_cancel {
8601 struct task_struct *task;
8602 struct files_struct *files;
8603};
f254ac04 8604
f6edbabb 8605static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
b711d4ea 8606{
9a472ef7 8607 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
f6edbabb 8608 struct io_task_cancel *cancel = data;
9a472ef7
PB
8609 bool ret;
8610
f6edbabb 8611 if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
9a472ef7
PB
8612 unsigned long flags;
8613 struct io_ring_ctx *ctx = req->ctx;
8614
8615 /* protect against races with linked timeouts */
8616 spin_lock_irqsave(&ctx->completion_lock, flags);
f6edbabb 8617 ret = io_match_task(req, cancel->task, cancel->files);
9a472ef7
PB
8618 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8619 } else {
f6edbabb 8620 ret = io_match_task(req, cancel->task, cancel->files);
9a472ef7
PB
8621 }
8622 return ret;
b711d4ea
JA
8623}
8624
b7ddce3c 8625static void io_cancel_defer_files(struct io_ring_ctx *ctx,
ef9865a4 8626 struct task_struct *task,
b7ddce3c
PB
8627 struct files_struct *files)
8628{
8629 struct io_defer_entry *de = NULL;
8630 LIST_HEAD(list);
8631
8632 spin_lock_irq(&ctx->completion_lock);
8633 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
08d23634 8634 if (io_match_task(de->req, task, files)) {
b7ddce3c
PB
8635 list_cut_position(&list, &ctx->defer_list, &de->list);
8636 break;
8637 }
8638 }
8639 spin_unlock_irq(&ctx->completion_lock);
8640
8641 while (!list_empty(&list)) {
8642 de = list_first_entry(&list, struct io_defer_entry, list);
8643 list_del_init(&de->list);
8644 req_set_fail_links(de->req);
8645 io_put_req(de->req);
8646 io_req_complete(de->req, -ECANCELED);
8647 kfree(de);
8648 }
8649}
8650
9936c7c2
PB
8651static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
8652 struct task_struct *task,
8653 struct files_struct *files)
8654{
8655 struct io_task_cancel cancel = { .task = task, .files = files, };
5aa75ed5 8656 struct io_uring_task *tctx = current->io_uring;
9936c7c2
PB
8657
8658 while (1) {
8659 enum io_wq_cancel cret;
8660 bool ret = false;
8661
5aa75ed5
JA
8662 if (tctx && tctx->io_wq) {
8663 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
9936c7c2
PB
8664 &cancel, true);
8665 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8666 }
8667
8668 /* SQPOLL thread does its own polling */
8669 if (!(ctx->flags & IORING_SETUP_SQPOLL) && !files) {
8670 while (!list_empty_careful(&ctx->iopoll_list)) {
8671 io_iopoll_try_reap_events(ctx);
8672 ret = true;
8673 }
8674 }
8675
8676 ret |= io_poll_remove_all(ctx, task, files);
8677 ret |= io_kill_timeouts(ctx, task, files);
8678 ret |= io_run_task_work();
ba50a036 8679 ret |= io_run_ctx_fallback(ctx);
9936c7c2
PB
8680 io_cqring_overflow_flush(ctx, true, task, files);
8681 if (!ret)
8682 break;
8683 cond_resched();
8684 }
8685}
8686
ca70f00b
PB
8687static int io_uring_count_inflight(struct io_ring_ctx *ctx,
8688 struct task_struct *task,
8689 struct files_struct *files)
8690{
8691 struct io_kiocb *req;
8692 int cnt = 0;
8693
8694 spin_lock_irq(&ctx->inflight_lock);
8695 list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
8696 cnt += io_match_task(req, task, files);
8697 spin_unlock_irq(&ctx->inflight_lock);
8698 return cnt;
8699}
8700
b52fda00 8701static void io_uring_cancel_files(struct io_ring_ctx *ctx,
df9923f9 8702 struct task_struct *task,
fcb323cc
JA
8703 struct files_struct *files)
8704{
fcb323cc 8705 while (!list_empty_careful(&ctx->inflight_list)) {
d8f1b971 8706 DEFINE_WAIT(wait);
ca70f00b 8707 int inflight;
fcb323cc 8708
ca70f00b
PB
8709 inflight = io_uring_count_inflight(ctx, task, files);
8710 if (!inflight)
fcb323cc 8711 break;
f6edbabb 8712
9936c7c2 8713 io_uring_try_cancel_requests(ctx, task, files);
ca70f00b 8714
34343786
PB
8715 if (ctx->sq_data)
8716 io_sq_thread_unpark(ctx->sq_data);
ca70f00b
PB
8717 prepare_to_wait(&task->io_uring->wait, &wait,
8718 TASK_UNINTERRUPTIBLE);
8719 if (inflight == io_uring_count_inflight(ctx, task, files))
8720 schedule();
c98de08c 8721 finish_wait(&task->io_uring->wait, &wait);
34343786
PB
8722 if (ctx->sq_data)
8723 io_sq_thread_park(ctx->sq_data);
0f212204 8724 }
0f212204
JA
8725}
8726
d9d05217
PB
8727static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
8728{
d9d05217
PB
8729 mutex_lock(&ctx->uring_lock);
8730 ctx->sqo_dead = 1;
8731 mutex_unlock(&ctx->uring_lock);
8732
8733 /* make sure callers enter the ring to get error */
b4411616
PB
8734 if (ctx->rings)
8735 io_ring_set_wakeup_flag(ctx);
d9d05217
PB
8736}
8737
0f212204
JA
8738/*
8739 * We need to iteratively cancel requests, in case a request has dependent
8740 * hard links. These persist even for failure of cancelations, hence keep
8741 * looping until none are found.
8742 */
8743static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8744 struct files_struct *files)
8745{
8746 struct task_struct *task = current;
37d1e2e3 8747 bool did_park = false;
0f212204 8748
fdaf083c 8749 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
d9d05217 8750 io_disable_sqo_submit(ctx);
37d1e2e3
JA
8751 did_park = io_sq_thread_park(ctx->sq_data);
8752 if (did_park) {
8753 task = ctx->sq_data->thread;
8754 atomic_inc(&task->io_uring->in_idle);
8755 }
fdaf083c 8756 }
0f212204 8757
df9923f9 8758 io_cancel_defer_files(ctx, task, files);
0f212204 8759
3a7efd1a 8760 io_uring_cancel_files(ctx, task, files);
b52fda00 8761 if (!files)
9936c7c2 8762 io_uring_try_cancel_requests(ctx, task, NULL);
fdaf083c 8763
37d1e2e3 8764 if (did_park) {
fdaf083c 8765 atomic_dec(&task->io_uring->in_idle);
fdaf083c
JA
8766 io_sq_thread_unpark(ctx->sq_data);
8767 }
0f212204
JA
8768}
8769
8770/*
8771 * Note that this task has used io_uring. We use it for cancelation purposes.
8772 */
fdaf083c 8773static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
0f212204 8774{
236434c3 8775 struct io_uring_task *tctx = current->io_uring;
a528b04e 8776 int ret;
236434c3
MWO
8777
8778 if (unlikely(!tctx)) {
5aa75ed5 8779 ret = io_uring_alloc_task_context(current, ctx);
0f212204
JA
8780 if (unlikely(ret))
8781 return ret;
236434c3 8782 tctx = current->io_uring;
0f212204 8783 }
236434c3
MWO
8784 if (tctx->last != file) {
8785 void *old = xa_load(&tctx->xa, (unsigned long)file);
0f212204 8786
236434c3 8787 if (!old) {
0f212204 8788 get_file(file);
a528b04e
PB
8789 ret = xa_err(xa_store(&tctx->xa, (unsigned long)file,
8790 file, GFP_KERNEL));
8791 if (ret) {
8792 fput(file);
8793 return ret;
8794 }
ecfc8492
PB
8795
8796 /* one and only SQPOLL file note, held by sqo_task */
8797 WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) &&
8798 current != ctx->sqo_task);
0f212204 8799 }
236434c3 8800 tctx->last = file;
0f212204
JA
8801 }
8802
fdaf083c
JA
8803 /*
8804 * This is race safe in that the task itself is doing this, hence it
8805 * cannot be going through the exit/cancel paths at the same time.
8806 * This cannot be modified while exit/cancel is running.
8807 */
8808 if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
8809 tctx->sqpoll = true;
8810
0f212204
JA
8811 return 0;
8812}
8813
8814/*
8815 * Remove this io_uring_file -> task mapping.
8816 */
8817static void io_uring_del_task_file(struct file *file)
8818{
8819 struct io_uring_task *tctx = current->io_uring;
0f212204
JA
8820
8821 if (tctx->last == file)
8822 tctx->last = NULL;
5e2ed8c4 8823 file = xa_erase(&tctx->xa, (unsigned long)file);
0f212204
JA
8824 if (file)
8825 fput(file);
8826}
8827
8452d4a6 8828static void io_uring_clean_tctx(struct io_uring_task *tctx)
de7f1d9e
PB
8829{
8830 struct file *file;
8831 unsigned long index;
8832
8833 xa_for_each(&tctx->xa, index, file)
8834 io_uring_del_task_file(file);
8452d4a6
PB
8835 if (tctx->io_wq) {
8836 io_wq_put_and_exit(tctx->io_wq);
8837 tctx->io_wq = NULL;
8838 }
de7f1d9e
PB
8839}
8840
0f212204
JA
8841void __io_uring_files_cancel(struct files_struct *files)
8842{
8843 struct io_uring_task *tctx = current->io_uring;
ce765372
MWO
8844 struct file *file;
8845 unsigned long index;
0f212204
JA
8846
8847 /* make sure overflow events are dropped */
fdaf083c 8848 atomic_inc(&tctx->in_idle);
de7f1d9e
PB
8849 xa_for_each(&tctx->xa, index, file)
8850 io_uring_cancel_task_requests(file->private_data, files);
fdaf083c 8851 atomic_dec(&tctx->in_idle);
de7f1d9e 8852
8452d4a6
PB
8853 if (files)
8854 io_uring_clean_tctx(tctx);
fdaf083c
JA
8855}
8856
8857static s64 tctx_inflight(struct io_uring_task *tctx)
8858{
0e9ddb39
PB
8859 return percpu_counter_sum(&tctx->inflight);
8860}
fdaf083c 8861
0e9ddb39
PB
8862static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
8863{
37d1e2e3 8864 struct io_sq_data *sqd = ctx->sq_data;
0e9ddb39
PB
8865 struct io_uring_task *tctx;
8866 s64 inflight;
8867 DEFINE_WAIT(wait);
fdaf083c 8868
37d1e2e3 8869 if (!sqd)
0e9ddb39 8870 return;
0e9ddb39 8871 io_disable_sqo_submit(ctx);
37d1e2e3
JA
8872 if (!io_sq_thread_park(sqd))
8873 return;
8874 tctx = ctx->sq_data->thread->io_uring;
e54945ae
JA
8875 /* can happen on fork/alloc failure, just ignore that state */
8876 if (!tctx) {
8877 io_sq_thread_unpark(sqd);
8878 return;
8879 }
fdaf083c 8880
0e9ddb39
PB
8881 atomic_inc(&tctx->in_idle);
8882 do {
8883 /* read completions before cancelations */
8884 inflight = tctx_inflight(tctx);
8885 if (!inflight)
8886 break;
8887 io_uring_cancel_task_requests(ctx, NULL);
fdaf083c 8888
0e9ddb39
PB
8889 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
8890 /*
8891 * If we've seen completions, retry without waiting. This
8892 * avoids a race where a completion comes in before we did
8893 * prepare_to_wait().
8894 */
8895 if (inflight == tctx_inflight(tctx))
8896 schedule();
8897 finish_wait(&tctx->wait, &wait);
8898 } while (1);
8899 atomic_dec(&tctx->in_idle);
37d1e2e3 8900 io_sq_thread_unpark(sqd);
0f212204
JA
8901}
8902
0f212204
JA
8903/*
8904 * Find any io_uring fd that this task has registered or done IO on, and cancel
8905 * requests.
8906 */
8907void __io_uring_task_cancel(void)
8908{
8909 struct io_uring_task *tctx = current->io_uring;
8910 DEFINE_WAIT(wait);
d8a6df10 8911 s64 inflight;
0f212204
JA
8912
8913 /* make sure overflow events are dropped */
fdaf083c 8914 atomic_inc(&tctx->in_idle);
0f212204 8915
0b5cd6c3 8916 /* trigger io_disable_sqo_submit() */
0e9ddb39
PB
8917 if (tctx->sqpoll) {
8918 struct file *file;
8919 unsigned long index;
8920
8921 xa_for_each(&tctx->xa, index, file)
8922 io_uring_cancel_sqpoll(file->private_data);
8923 }
0b5cd6c3 8924
d8a6df10 8925 do {
0f212204 8926 /* read completions before cancelations */
fdaf083c 8927 inflight = tctx_inflight(tctx);
d8a6df10
JA
8928 if (!inflight)
8929 break;
0f212204
JA
8930 __io_uring_files_cancel(NULL);
8931
8932 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
8933
8934 /*
a1bb3cd5
PB
8935 * If we've seen completions, retry without waiting. This
8936 * avoids a race where a completion comes in before we did
8937 * prepare_to_wait().
0f212204 8938 */
a1bb3cd5
PB
8939 if (inflight == tctx_inflight(tctx))
8940 schedule();
f57555ed 8941 finish_wait(&tctx->wait, &wait);
d8a6df10 8942 } while (1);
0f212204 8943
fdaf083c 8944 atomic_dec(&tctx->in_idle);
de7f1d9e 8945
8452d4a6
PB
8946 io_uring_clean_tctx(tctx);
8947 /* all current's requests should be gone, we can kill tctx */
8948 __io_uring_free(current);
44e728b8
PB
8949}
8950
fcb323cc
JA
8951static int io_uring_flush(struct file *file, void *data)
8952{
6b5733eb 8953 struct io_uring_task *tctx = current->io_uring;
d9d05217 8954 struct io_ring_ctx *ctx = file->private_data;
6b5733eb 8955
3bfe6106
JA
8956 /* Ignore helper thread files exit */
8957 if (current->flags & PF_IO_WORKER)
8958 return 0;
8959
41be53e9 8960 if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
84965ff8 8961 io_uring_cancel_task_requests(ctx, NULL);
4010fec4 8962 io_req_caches_free(ctx);
41be53e9 8963 }
84965ff8 8964
7c25c0d1
JA
8965 io_run_ctx_fallback(ctx);
8966
6b5733eb 8967 if (!tctx)
4f793dc4
PB
8968 return 0;
8969
6b5733eb
PB
8970 /* we should have cancelled and erased it before PF_EXITING */
8971 WARN_ON_ONCE((current->flags & PF_EXITING) &&
8972 xa_load(&tctx->xa, (unsigned long)file));
8973
4f793dc4
PB
8974 /*
8975 * fput() is pending, will be 2 if the only other ref is our potential
8976 * task file note. If the task is exiting, drop regardless of count.
8977 */
6b5733eb
PB
8978 if (atomic_long_read(&file->f_count) != 2)
8979 return 0;
4f793dc4 8980
d9d05217
PB
8981 if (ctx->flags & IORING_SETUP_SQPOLL) {
8982 /* there is only one file note, which is owned by sqo_task */
4325cb49
PB
8983 WARN_ON_ONCE(ctx->sqo_task != current &&
8984 xa_load(&tctx->xa, (unsigned long)file));
8985 /* sqo_dead check is for when this happens after cancellation */
8986 WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
d9d05217
PB
8987 !xa_load(&tctx->xa, (unsigned long)file));
8988
8989 io_disable_sqo_submit(ctx);
8990 }
8991
8992 if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
8993 io_uring_del_task_file(file);
fcb323cc
JA
8994 return 0;
8995}
8996
6c5c240e
RP
8997static void *io_uring_validate_mmap_request(struct file *file,
8998 loff_t pgoff, size_t sz)
2b188cc1 8999{
2b188cc1 9000 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 9001 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
9002 struct page *page;
9003 void *ptr;
9004
9005 switch (offset) {
9006 case IORING_OFF_SQ_RING:
75b28aff
HV
9007 case IORING_OFF_CQ_RING:
9008 ptr = ctx->rings;
2b188cc1
JA
9009 break;
9010 case IORING_OFF_SQES:
9011 ptr = ctx->sq_sqes;
9012 break;
2b188cc1 9013 default:
6c5c240e 9014 return ERR_PTR(-EINVAL);
2b188cc1
JA
9015 }
9016
9017 page = virt_to_head_page(ptr);
a50b854e 9018 if (sz > page_size(page))
6c5c240e
RP
9019 return ERR_PTR(-EINVAL);
9020
9021 return ptr;
9022}
9023
9024#ifdef CONFIG_MMU
9025
9026static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9027{
9028 size_t sz = vma->vm_end - vma->vm_start;
9029 unsigned long pfn;
9030 void *ptr;
9031
9032 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9033 if (IS_ERR(ptr))
9034 return PTR_ERR(ptr);
2b188cc1
JA
9035
9036 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9037 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9038}
9039
6c5c240e
RP
9040#else /* !CONFIG_MMU */
9041
9042static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9043{
9044 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9045}
9046
9047static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9048{
9049 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9050}
9051
9052static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9053 unsigned long addr, unsigned long len,
9054 unsigned long pgoff, unsigned long flags)
9055{
9056 void *ptr;
9057
9058 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9059 if (IS_ERR(ptr))
9060 return PTR_ERR(ptr);
9061
9062 return (unsigned long) ptr;
9063}
9064
9065#endif /* !CONFIG_MMU */
9066
d9d05217 9067static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
90554200 9068{
d9d05217 9069 int ret = 0;
90554200
JA
9070 DEFINE_WAIT(wait);
9071
9072 do {
9073 if (!io_sqring_full(ctx))
9074 break;
9075
9076 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9077
d9d05217
PB
9078 if (unlikely(ctx->sqo_dead)) {
9079 ret = -EOWNERDEAD;
9080 goto out;
9081 }
9082
90554200
JA
9083 if (!io_sqring_full(ctx))
9084 break;
9085
9086 schedule();
9087 } while (!signal_pending(current));
9088
9089 finish_wait(&ctx->sqo_sq_wait, &wait);
d9d05217
PB
9090out:
9091 return ret;
90554200
JA
9092}
9093
c73ebb68
HX
9094static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9095 struct __kernel_timespec __user **ts,
9096 const sigset_t __user **sig)
9097{
9098 struct io_uring_getevents_arg arg;
9099
9100 /*
9101 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9102 * is just a pointer to the sigset_t.
9103 */
9104 if (!(flags & IORING_ENTER_EXT_ARG)) {
9105 *sig = (const sigset_t __user *) argp;
9106 *ts = NULL;
9107 return 0;
9108 }
9109
9110 /*
9111 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9112 * timespec and sigset_t pointers if good.
9113 */
9114 if (*argsz != sizeof(arg))
9115 return -EINVAL;
9116 if (copy_from_user(&arg, argp, sizeof(arg)))
9117 return -EFAULT;
9118 *sig = u64_to_user_ptr(arg.sigmask);
9119 *argsz = arg.sigmask_sz;
9120 *ts = u64_to_user_ptr(arg.ts);
9121 return 0;
9122}
9123
2b188cc1 9124SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
c73ebb68
HX
9125 u32, min_complete, u32, flags, const void __user *, argp,
9126 size_t, argsz)
2b188cc1
JA
9127{
9128 struct io_ring_ctx *ctx;
9129 long ret = -EBADF;
9130 int submitted = 0;
9131 struct fd f;
9132
4c6e277c 9133 io_run_task_work();
b41e9852 9134
90554200 9135 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
c73ebb68 9136 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))
2b188cc1
JA
9137 return -EINVAL;
9138
9139 f = fdget(fd);
9140 if (!f.file)
9141 return -EBADF;
9142
9143 ret = -EOPNOTSUPP;
9144 if (f.file->f_op != &io_uring_fops)
9145 goto out_fput;
9146
9147 ret = -ENXIO;
9148 ctx = f.file->private_data;
9149 if (!percpu_ref_tryget(&ctx->refs))
9150 goto out_fput;
9151
7e84e1c7
SG
9152 ret = -EBADFD;
9153 if (ctx->flags & IORING_SETUP_R_DISABLED)
9154 goto out;
9155
6c271ce2
JA
9156 /*
9157 * For SQ polling, the thread will do all submissions and completions.
9158 * Just return the requested submit count, and wake the thread if
9159 * we were asked to.
9160 */
b2a9eada 9161 ret = 0;
6c271ce2 9162 if (ctx->flags & IORING_SETUP_SQPOLL) {
6c503150 9163 io_cqring_overflow_flush(ctx, false, NULL, NULL);
89448c47 9164
5f3f26f9
JA
9165 if (unlikely(ctx->sqo_exec)) {
9166 ret = io_sq_thread_fork(ctx->sq_data, ctx);
9167 if (ret)
9168 goto out;
9169 ctx->sqo_exec = 0;
9170 }
d9d05217
PB
9171 ret = -EOWNERDEAD;
9172 if (unlikely(ctx->sqo_dead))
9173 goto out;
6c271ce2 9174 if (flags & IORING_ENTER_SQ_WAKEUP)
534ca6d6 9175 wake_up(&ctx->sq_data->wait);
d9d05217
PB
9176 if (flags & IORING_ENTER_SQ_WAIT) {
9177 ret = io_sqpoll_wait_sq(ctx);
9178 if (ret)
9179 goto out;
9180 }
6c271ce2 9181 submitted = to_submit;
b2a9eada 9182 } else if (to_submit) {
fdaf083c 9183 ret = io_uring_add_task_file(ctx, f.file);
0f212204
JA
9184 if (unlikely(ret))
9185 goto out;
2b188cc1 9186 mutex_lock(&ctx->uring_lock);
0f212204 9187 submitted = io_submit_sqes(ctx, to_submit);
2b188cc1 9188 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
9189
9190 if (submitted != to_submit)
9191 goto out;
2b188cc1
JA
9192 }
9193 if (flags & IORING_ENTER_GETEVENTS) {
c73ebb68
HX
9194 const sigset_t __user *sig;
9195 struct __kernel_timespec __user *ts;
9196
9197 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9198 if (unlikely(ret))
9199 goto out;
9200
2b188cc1
JA
9201 min_complete = min(min_complete, ctx->cq_entries);
9202
32b2244a
XW
9203 /*
9204 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9205 * space applications don't need to do io completion events
9206 * polling again, they can rely on io_sq_thread to do polling
9207 * work, which can reduce cpu usage and uring_lock contention.
9208 */
9209 if (ctx->flags & IORING_SETUP_IOPOLL &&
9210 !(ctx->flags & IORING_SETUP_SQPOLL)) {
7668b92a 9211 ret = io_iopoll_check(ctx, min_complete);
def596e9 9212 } else {
c73ebb68 9213 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
def596e9 9214 }
2b188cc1
JA
9215 }
9216
7c504e65 9217out:
6805b32e 9218 percpu_ref_put(&ctx->refs);
2b188cc1
JA
9219out_fput:
9220 fdput(f);
9221 return submitted ? submitted : ret;
9222}
9223
bebdb65e 9224#ifdef CONFIG_PROC_FS
87ce955b
JA
9225static int io_uring_show_cred(int id, void *p, void *data)
9226{
4379bf8b 9227 const struct cred *cred = p;
87ce955b
JA
9228 struct seq_file *m = data;
9229 struct user_namespace *uns = seq_user_ns(m);
9230 struct group_info *gi;
9231 kernel_cap_t cap;
9232 unsigned __capi;
9233 int g;
9234
9235 seq_printf(m, "%5d\n", id);
9236 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9237 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9238 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9239 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9240 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9241 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9242 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9243 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9244 seq_puts(m, "\n\tGroups:\t");
9245 gi = cred->group_info;
9246 for (g = 0; g < gi->ngroups; g++) {
9247 seq_put_decimal_ull(m, g ? " " : "",
9248 from_kgid_munged(uns, gi->gid[g]));
9249 }
9250 seq_puts(m, "\n\tCapEff:\t");
9251 cap = cred->cap_effective;
9252 CAP_FOR_EACH_U32(__capi)
9253 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9254 seq_putc(m, '\n');
9255 return 0;
9256}
9257
9258static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9259{
dbbe9c64 9260 struct io_sq_data *sq = NULL;
fad8e0de 9261 bool has_lock;
87ce955b
JA
9262 int i;
9263
fad8e0de
JA
9264 /*
9265 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9266 * since fdinfo case grabs it in the opposite direction of normal use
9267 * cases. If we fail to get the lock, we just don't iterate any
9268 * structures that could be going away outside the io_uring mutex.
9269 */
9270 has_lock = mutex_trylock(&ctx->uring_lock);
9271
5f3f26f9 9272 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
dbbe9c64 9273 sq = ctx->sq_data;
5f3f26f9
JA
9274 if (!sq->thread)
9275 sq = NULL;
9276 }
dbbe9c64
JQ
9277
9278 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9279 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
87ce955b 9280 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
fad8e0de 9281 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
ea64ec02 9282 struct file *f = *io_fixed_file_slot(ctx->file_data, i);
87ce955b 9283
87ce955b
JA
9284 if (f)
9285 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9286 else
9287 seq_printf(m, "%5u: <none>\n", i);
9288 }
9289 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
fad8e0de 9290 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
87ce955b
JA
9291 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9292
9293 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9294 (unsigned int) buf->len);
9295 }
fad8e0de 9296 if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
87ce955b
JA
9297 seq_printf(m, "Personalities:\n");
9298 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
9299 }
d7718a9d
JA
9300 seq_printf(m, "PollList:\n");
9301 spin_lock_irq(&ctx->completion_lock);
9302 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9303 struct hlist_head *list = &ctx->cancel_hash[i];
9304 struct io_kiocb *req;
9305
9306 hlist_for_each_entry(req, list, hash_node)
9307 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9308 req->task->task_works != NULL);
9309 }
9310 spin_unlock_irq(&ctx->completion_lock);
fad8e0de
JA
9311 if (has_lock)
9312 mutex_unlock(&ctx->uring_lock);
87ce955b
JA
9313}
9314
9315static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9316{
9317 struct io_ring_ctx *ctx = f->private_data;
9318
9319 if (percpu_ref_tryget(&ctx->refs)) {
9320 __io_uring_show_fdinfo(ctx, m);
9321 percpu_ref_put(&ctx->refs);
9322 }
9323}
bebdb65e 9324#endif
87ce955b 9325
2b188cc1
JA
9326static const struct file_operations io_uring_fops = {
9327 .release = io_uring_release,
fcb323cc 9328 .flush = io_uring_flush,
2b188cc1 9329 .mmap = io_uring_mmap,
6c5c240e
RP
9330#ifndef CONFIG_MMU
9331 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9332 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9333#endif
2b188cc1
JA
9334 .poll = io_uring_poll,
9335 .fasync = io_uring_fasync,
bebdb65e 9336#ifdef CONFIG_PROC_FS
87ce955b 9337 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 9338#endif
2b188cc1
JA
9339};
9340
9341static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9342 struct io_uring_params *p)
9343{
75b28aff
HV
9344 struct io_rings *rings;
9345 size_t size, sq_array_offset;
2b188cc1 9346
bd740481
JA
9347 /* make sure these are sane, as we already accounted them */
9348 ctx->sq_entries = p->sq_entries;
9349 ctx->cq_entries = p->cq_entries;
9350
75b28aff
HV
9351 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9352 if (size == SIZE_MAX)
9353 return -EOVERFLOW;
9354
9355 rings = io_mem_alloc(size);
9356 if (!rings)
2b188cc1
JA
9357 return -ENOMEM;
9358
75b28aff
HV
9359 ctx->rings = rings;
9360 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9361 rings->sq_ring_mask = p->sq_entries - 1;
9362 rings->cq_ring_mask = p->cq_entries - 1;
9363 rings->sq_ring_entries = p->sq_entries;
9364 rings->cq_ring_entries = p->cq_entries;
9365 ctx->sq_mask = rings->sq_ring_mask;
9366 ctx->cq_mask = rings->cq_ring_mask;
2b188cc1
JA
9367
9368 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
9369 if (size == SIZE_MAX) {
9370 io_mem_free(ctx->rings);
9371 ctx->rings = NULL;
2b188cc1 9372 return -EOVERFLOW;
eb065d30 9373 }
2b188cc1
JA
9374
9375 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
9376 if (!ctx->sq_sqes) {
9377 io_mem_free(ctx->rings);
9378 ctx->rings = NULL;
2b188cc1 9379 return -ENOMEM;
eb065d30 9380 }
2b188cc1 9381
2b188cc1
JA
9382 return 0;
9383}
9384
9faadcc8
PB
9385static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9386{
9387 int ret, fd;
9388
9389 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9390 if (fd < 0)
9391 return fd;
9392
9393 ret = io_uring_add_task_file(ctx, file);
9394 if (ret) {
9395 put_unused_fd(fd);
9396 return ret;
9397 }
9398 fd_install(fd, file);
9399 return fd;
9400}
9401
2b188cc1
JA
9402/*
9403 * Allocate an anonymous fd, this is what constitutes the application
9404 * visible backing of an io_uring instance. The application mmaps this
9405 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9406 * we have to tie this fd to a socket for file garbage collection purposes.
9407 */
9faadcc8 9408static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
2b188cc1
JA
9409{
9410 struct file *file;
9faadcc8 9411#if defined(CONFIG_UNIX)
2b188cc1
JA
9412 int ret;
9413
2b188cc1
JA
9414 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9415 &ctx->ring_sock);
9416 if (ret)
9faadcc8 9417 return ERR_PTR(ret);
2b188cc1
JA
9418#endif
9419
2b188cc1
JA
9420 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9421 O_RDWR | O_CLOEXEC);
2b188cc1 9422#if defined(CONFIG_UNIX)
9faadcc8
PB
9423 if (IS_ERR(file)) {
9424 sock_release(ctx->ring_sock);
9425 ctx->ring_sock = NULL;
9426 } else {
9427 ctx->ring_sock->file = file;
0f212204 9428 }
2b188cc1 9429#endif
9faadcc8 9430 return file;
2b188cc1
JA
9431}
9432
7f13657d
XW
9433static int io_uring_create(unsigned entries, struct io_uring_params *p,
9434 struct io_uring_params __user *params)
2b188cc1 9435{
2b188cc1 9436 struct io_ring_ctx *ctx;
9faadcc8 9437 struct file *file;
2b188cc1
JA
9438 int ret;
9439
8110c1a6 9440 if (!entries)
2b188cc1 9441 return -EINVAL;
8110c1a6
JA
9442 if (entries > IORING_MAX_ENTRIES) {
9443 if (!(p->flags & IORING_SETUP_CLAMP))
9444 return -EINVAL;
9445 entries = IORING_MAX_ENTRIES;
9446 }
2b188cc1
JA
9447
9448 /*
9449 * Use twice as many entries for the CQ ring. It's possible for the
9450 * application to drive a higher depth than the size of the SQ ring,
9451 * since the sqes are only used at submission time. This allows for
33a107f0
JA
9452 * some flexibility in overcommitting a bit. If the application has
9453 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9454 * of CQ ring entries manually.
2b188cc1
JA
9455 */
9456 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
9457 if (p->flags & IORING_SETUP_CQSIZE) {
9458 /*
9459 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9460 * to a power-of-two, if it isn't already. We do NOT impose
9461 * any cq vs sq ring sizing.
9462 */
eb2667b3 9463 if (!p->cq_entries)
33a107f0 9464 return -EINVAL;
8110c1a6
JA
9465 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9466 if (!(p->flags & IORING_SETUP_CLAMP))
9467 return -EINVAL;
9468 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9469 }
eb2667b3
JQ
9470 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9471 if (p->cq_entries < p->sq_entries)
9472 return -EINVAL;
33a107f0
JA
9473 } else {
9474 p->cq_entries = 2 * p->sq_entries;
9475 }
2b188cc1 9476
2b188cc1 9477 ctx = io_ring_ctx_alloc(p);
62e398be 9478 if (!ctx)
2b188cc1 9479 return -ENOMEM;
2b188cc1 9480 ctx->compat = in_compat_syscall();
62e398be
JA
9481 if (!capable(CAP_IPC_LOCK))
9482 ctx->user = get_uid(current_user());
37d1e2e3 9483 ctx->sqo_task = current;
2aede0e4
JA
9484
9485 /*
9486 * This is just grabbed for accounting purposes. When a process exits,
9487 * the mm is exited and dropped before the files, hence we need to hang
9488 * on to this mm purely for the purposes of being able to unaccount
9489 * memory (locked/pinned vm). It's not used for anything else.
9490 */
6b7898eb 9491 mmgrab(current->mm);
2aede0e4 9492 ctx->mm_account = current->mm;
6b7898eb 9493
2b188cc1
JA
9494 ret = io_allocate_scq_urings(ctx, p);
9495 if (ret)
9496 goto err;
9497
7e84e1c7 9498 ret = io_sq_offload_create(ctx, p);
2b188cc1
JA
9499 if (ret)
9500 goto err;
9501
7e84e1c7
SG
9502 if (!(p->flags & IORING_SETUP_R_DISABLED))
9503 io_sq_offload_start(ctx);
9504
2b188cc1 9505 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
9506 p->sq_off.head = offsetof(struct io_rings, sq.head);
9507 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9508 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9509 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9510 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9511 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9512 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
9513
9514 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
9515 p->cq_off.head = offsetof(struct io_rings, cq.head);
9516 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9517 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9518 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9519 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9520 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 9521 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 9522
7f13657d
XW
9523 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9524 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
5769a351 9525 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
c73ebb68 9526 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
1c0aa1fa 9527 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
7f13657d
XW
9528
9529 if (copy_to_user(params, p, sizeof(*p))) {
9530 ret = -EFAULT;
9531 goto err;
9532 }
d1719f70 9533
9faadcc8
PB
9534 file = io_uring_get_file(ctx);
9535 if (IS_ERR(file)) {
9536 ret = PTR_ERR(file);
9537 goto err;
9538 }
9539
044c1ab3
JA
9540 /*
9541 * Install ring fd as the very last thing, so we don't risk someone
9542 * having closed it before we finish setup
9543 */
9faadcc8
PB
9544 ret = io_uring_install_fd(ctx, file);
9545 if (ret < 0) {
06585c49 9546 io_disable_sqo_submit(ctx);
9faadcc8
PB
9547 /* fput will clean it up */
9548 fput(file);
9549 return ret;
9550 }
044c1ab3 9551
c826bd7a 9552 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
9553 return ret;
9554err:
d9d05217 9555 io_disable_sqo_submit(ctx);
2b188cc1
JA
9556 io_ring_ctx_wait_and_kill(ctx);
9557 return ret;
9558}
9559
9560/*
9561 * Sets up an aio uring context, and returns the fd. Applications asks for a
9562 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9563 * params structure passed in.
9564 */
9565static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9566{
9567 struct io_uring_params p;
2b188cc1
JA
9568 int i;
9569
9570 if (copy_from_user(&p, params, sizeof(p)))
9571 return -EFAULT;
9572 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9573 if (p.resv[i])
9574 return -EINVAL;
9575 }
9576
6c271ce2 9577 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 9578 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
7e84e1c7
SG
9579 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9580 IORING_SETUP_R_DISABLED))
2b188cc1
JA
9581 return -EINVAL;
9582
7f13657d 9583 return io_uring_create(entries, &p, params);
2b188cc1
JA
9584}
9585
9586SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9587 struct io_uring_params __user *, params)
9588{
9589 return io_uring_setup(entries, params);
9590}
9591
66f4af93
JA
9592static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9593{
9594 struct io_uring_probe *p;
9595 size_t size;
9596 int i, ret;
9597
9598 size = struct_size(p, ops, nr_args);
9599 if (size == SIZE_MAX)
9600 return -EOVERFLOW;
9601 p = kzalloc(size, GFP_KERNEL);
9602 if (!p)
9603 return -ENOMEM;
9604
9605 ret = -EFAULT;
9606 if (copy_from_user(p, arg, size))
9607 goto out;
9608 ret = -EINVAL;
9609 if (memchr_inv(p, 0, size))
9610 goto out;
9611
9612 p->last_op = IORING_OP_LAST - 1;
9613 if (nr_args > IORING_OP_LAST)
9614 nr_args = IORING_OP_LAST;
9615
9616 for (i = 0; i < nr_args; i++) {
9617 p->ops[i].op = i;
9618 if (!io_op_defs[i].not_supported)
9619 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9620 }
9621 p->ops_len = i;
9622
9623 ret = 0;
9624 if (copy_to_user(arg, p, size))
9625 ret = -EFAULT;
9626out:
9627 kfree(p);
9628 return ret;
9629}
9630
071698e1
JA
9631static int io_register_personality(struct io_ring_ctx *ctx)
9632{
4379bf8b 9633 const struct cred *creds;
1e6fa521 9634 int ret;
071698e1 9635
4379bf8b 9636 creds = get_current_cred();
1e6fa521 9637
4379bf8b
JA
9638 ret = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
9639 USHRT_MAX, GFP_KERNEL);
9640 if (ret < 0)
9641 put_cred(creds);
1e6fa521 9642 return ret;
071698e1
JA
9643}
9644
21b55dbc
SG
9645static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9646 unsigned int nr_args)
9647{
9648 struct io_uring_restriction *res;
9649 size_t size;
9650 int i, ret;
9651
7e84e1c7
SG
9652 /* Restrictions allowed only if rings started disabled */
9653 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9654 return -EBADFD;
9655
21b55dbc 9656 /* We allow only a single restrictions registration */
7e84e1c7 9657 if (ctx->restrictions.registered)
21b55dbc
SG
9658 return -EBUSY;
9659
9660 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9661 return -EINVAL;
9662
9663 size = array_size(nr_args, sizeof(*res));
9664 if (size == SIZE_MAX)
9665 return -EOVERFLOW;
9666
9667 res = memdup_user(arg, size);
9668 if (IS_ERR(res))
9669 return PTR_ERR(res);
9670
9671 ret = 0;
9672
9673 for (i = 0; i < nr_args; i++) {
9674 switch (res[i].opcode) {
9675 case IORING_RESTRICTION_REGISTER_OP:
9676 if (res[i].register_op >= IORING_REGISTER_LAST) {
9677 ret = -EINVAL;
9678 goto out;
9679 }
9680
9681 __set_bit(res[i].register_op,
9682 ctx->restrictions.register_op);
9683 break;
9684 case IORING_RESTRICTION_SQE_OP:
9685 if (res[i].sqe_op >= IORING_OP_LAST) {
9686 ret = -EINVAL;
9687 goto out;
9688 }
9689
9690 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9691 break;
9692 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9693 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9694 break;
9695 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9696 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9697 break;
9698 default:
9699 ret = -EINVAL;
9700 goto out;
9701 }
9702 }
9703
9704out:
9705 /* Reset all restrictions if an error happened */
9706 if (ret != 0)
9707 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9708 else
7e84e1c7 9709 ctx->restrictions.registered = true;
21b55dbc
SG
9710
9711 kfree(res);
9712 return ret;
9713}
9714
7e84e1c7
SG
9715static int io_register_enable_rings(struct io_ring_ctx *ctx)
9716{
9717 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9718 return -EBADFD;
9719
9720 if (ctx->restrictions.registered)
9721 ctx->restricted = 1;
9722
9723 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9724
9725 io_sq_offload_start(ctx);
9726
9727 return 0;
9728}
9729
071698e1
JA
9730static bool io_register_op_must_quiesce(int op)
9731{
9732 switch (op) {
9733 case IORING_UNREGISTER_FILES:
9734 case IORING_REGISTER_FILES_UPDATE:
9735 case IORING_REGISTER_PROBE:
9736 case IORING_REGISTER_PERSONALITY:
9737 case IORING_UNREGISTER_PERSONALITY:
9738 return false;
9739 default:
9740 return true;
9741 }
9742}
9743
edafccee
JA
9744static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9745 void __user *arg, unsigned nr_args)
b19062a5
JA
9746 __releases(ctx->uring_lock)
9747 __acquires(ctx->uring_lock)
edafccee
JA
9748{
9749 int ret;
9750
35fa71a0
JA
9751 /*
9752 * We're inside the ring mutex, if the ref is already dying, then
9753 * someone else killed the ctx or is already going through
9754 * io_uring_register().
9755 */
9756 if (percpu_ref_is_dying(&ctx->refs))
9757 return -ENXIO;
9758
071698e1 9759 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 9760 percpu_ref_kill(&ctx->refs);
b19062a5 9761
05f3fb3c
JA
9762 /*
9763 * Drop uring mutex before waiting for references to exit. If
9764 * another thread is currently inside io_uring_enter() it might
9765 * need to grab the uring_lock to make progress. If we hold it
9766 * here across the drain wait, then we can deadlock. It's safe
9767 * to drop the mutex here, since no new references will come in
9768 * after we've killed the percpu ref.
9769 */
9770 mutex_unlock(&ctx->uring_lock);
af9c1a44
JA
9771 do {
9772 ret = wait_for_completion_interruptible(&ctx->ref_comp);
9773 if (!ret)
9774 break;
ed6930c9
JA
9775 ret = io_run_task_work_sig();
9776 if (ret < 0)
9777 break;
af9c1a44
JA
9778 } while (1);
9779
05f3fb3c 9780 mutex_lock(&ctx->uring_lock);
af9c1a44 9781
c150368b
JA
9782 if (ret) {
9783 percpu_ref_resurrect(&ctx->refs);
21b55dbc
SG
9784 goto out_quiesce;
9785 }
9786 }
9787
9788 if (ctx->restricted) {
9789 if (opcode >= IORING_REGISTER_LAST) {
9790 ret = -EINVAL;
9791 goto out;
9792 }
9793
9794 if (!test_bit(opcode, ctx->restrictions.register_op)) {
9795 ret = -EACCES;
c150368b
JA
9796 goto out;
9797 }
05f3fb3c 9798 }
edafccee
JA
9799
9800 switch (opcode) {
9801 case IORING_REGISTER_BUFFERS:
0a96bbe4 9802 ret = io_sqe_buffers_register(ctx, arg, nr_args);
edafccee
JA
9803 break;
9804 case IORING_UNREGISTER_BUFFERS:
9805 ret = -EINVAL;
9806 if (arg || nr_args)
9807 break;
0a96bbe4 9808 ret = io_sqe_buffers_unregister(ctx);
edafccee 9809 break;
6b06314c
JA
9810 case IORING_REGISTER_FILES:
9811 ret = io_sqe_files_register(ctx, arg, nr_args);
9812 break;
9813 case IORING_UNREGISTER_FILES:
9814 ret = -EINVAL;
9815 if (arg || nr_args)
9816 break;
9817 ret = io_sqe_files_unregister(ctx);
9818 break;
c3a31e60
JA
9819 case IORING_REGISTER_FILES_UPDATE:
9820 ret = io_sqe_files_update(ctx, arg, nr_args);
9821 break;
9b402849 9822 case IORING_REGISTER_EVENTFD:
f2842ab5 9823 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
9824 ret = -EINVAL;
9825 if (nr_args != 1)
9826 break;
9827 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
9828 if (ret)
9829 break;
9830 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
9831 ctx->eventfd_async = 1;
9832 else
9833 ctx->eventfd_async = 0;
9b402849
JA
9834 break;
9835 case IORING_UNREGISTER_EVENTFD:
9836 ret = -EINVAL;
9837 if (arg || nr_args)
9838 break;
9839 ret = io_eventfd_unregister(ctx);
9840 break;
66f4af93
JA
9841 case IORING_REGISTER_PROBE:
9842 ret = -EINVAL;
9843 if (!arg || nr_args > 256)
9844 break;
9845 ret = io_probe(ctx, arg, nr_args);
9846 break;
071698e1
JA
9847 case IORING_REGISTER_PERSONALITY:
9848 ret = -EINVAL;
9849 if (arg || nr_args)
9850 break;
9851 ret = io_register_personality(ctx);
9852 break;
9853 case IORING_UNREGISTER_PERSONALITY:
9854 ret = -EINVAL;
9855 if (arg)
9856 break;
9857 ret = io_unregister_personality(ctx, nr_args);
9858 break;
7e84e1c7
SG
9859 case IORING_REGISTER_ENABLE_RINGS:
9860 ret = -EINVAL;
9861 if (arg || nr_args)
9862 break;
9863 ret = io_register_enable_rings(ctx);
9864 break;
21b55dbc
SG
9865 case IORING_REGISTER_RESTRICTIONS:
9866 ret = io_register_restrictions(ctx, arg, nr_args);
9867 break;
edafccee
JA
9868 default:
9869 ret = -EINVAL;
9870 break;
9871 }
9872
21b55dbc 9873out:
071698e1 9874 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 9875 /* bring the ctx back to life */
05f3fb3c 9876 percpu_ref_reinit(&ctx->refs);
21b55dbc 9877out_quiesce:
0f158b4c 9878 reinit_completion(&ctx->ref_comp);
05f3fb3c 9879 }
edafccee
JA
9880 return ret;
9881}
9882
9883SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
9884 void __user *, arg, unsigned int, nr_args)
9885{
9886 struct io_ring_ctx *ctx;
9887 long ret = -EBADF;
9888 struct fd f;
9889
9890 f = fdget(fd);
9891 if (!f.file)
9892 return -EBADF;
9893
9894 ret = -EOPNOTSUPP;
9895 if (f.file->f_op != &io_uring_fops)
9896 goto out_fput;
9897
9898 ctx = f.file->private_data;
9899
b6c23dd5
PB
9900 io_run_task_work();
9901
edafccee
JA
9902 mutex_lock(&ctx->uring_lock);
9903 ret = __io_uring_register(ctx, opcode, arg, nr_args);
9904 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
9905 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
9906 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
9907out_fput:
9908 fdput(f);
9909 return ret;
9910}
9911
2b188cc1
JA
9912static int __init io_uring_init(void)
9913{
d7f62e82
SM
9914#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
9915 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
9916 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
9917} while (0)
9918
9919#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
9920 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
9921 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
9922 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
9923 BUILD_BUG_SQE_ELEM(1, __u8, flags);
9924 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
9925 BUILD_BUG_SQE_ELEM(4, __s32, fd);
9926 BUILD_BUG_SQE_ELEM(8, __u64, off);
9927 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
9928 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 9929 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
9930 BUILD_BUG_SQE_ELEM(24, __u32, len);
9931 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
9932 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
9933 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
9934 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
5769a351
JX
9935 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
9936 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
d7f62e82
SM
9937 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
9938 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
9939 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
9940 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
9941 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
9942 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
9943 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
9944 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 9945 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
9946 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
9947 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
9948 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 9949 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
d7f62e82 9950
d3656344 9951 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
84557871 9952 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
91f245d5
JA
9953 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
9954 SLAB_ACCOUNT);
2b188cc1
JA
9955 return 0;
9956};
9957__initcall(io_uring_init);