]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/io_uring.c
io_uring: kill fictitious submit iteration index
[mirror_ubuntu-jammy-kernel.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
2b188cc1
JA
58#include <linux/percpu.h>
59#include <linux/slab.h>
6c271ce2 60#include <linux/kthread.h>
2b188cc1 61#include <linux/blkdev.h>
edafccee 62#include <linux/bvec.h>
2b188cc1
JA
63#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
6b06314c 66#include <net/scm.h>
2b188cc1
JA
67#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
edafccee
JA
71#include <linux/sizes.h>
72#include <linux/hugetlb.h>
aa4c3967 73#include <linux/highmem.h>
15b71abe
JA
74#include <linux/namei.h>
75#include <linux/fsnotify.h>
4840e418 76#include <linux/fadvise.h>
3e4827b0 77#include <linux/eventpoll.h>
ff002b30 78#include <linux/fs_struct.h>
7d67af2c 79#include <linux/splice.h>
b41e9852 80#include <linux/task_work.h>
bcf5a063 81#include <linux/pagemap.h>
0f212204 82#include <linux/io_uring.h>
91d8f519 83#include <linux/blk-cgroup.h>
4ea33a97 84#include <linux/audit.h>
2b188cc1 85
c826bd7a
DD
86#define CREATE_TRACE_POINTS
87#include <trace/events/io_uring.h>
88
2b188cc1
JA
89#include <uapi/linux/io_uring.h>
90
91#include "internal.h"
561fb04a 92#include "io-wq.h"
2b188cc1 93
5277deaa 94#define IORING_MAX_ENTRIES 32768
33a107f0 95#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
96
97/*
98 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
99 */
100#define IORING_FILE_TABLE_SHIFT 9
101#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
102#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
103#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
21b55dbc
SG
104#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
105 IORING_REGISTER_LAST + IORING_OP_LAST)
2b188cc1
JA
106
107struct io_uring {
108 u32 head ____cacheline_aligned_in_smp;
109 u32 tail ____cacheline_aligned_in_smp;
110};
111
1e84b97b 112/*
75b28aff
HV
113 * This data is shared with the application through the mmap at offsets
114 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
115 *
116 * The offsets to the member fields are published through struct
117 * io_sqring_offsets when calling io_uring_setup.
118 */
75b28aff 119struct io_rings {
1e84b97b
SB
120 /*
121 * Head and tail offsets into the ring; the offsets need to be
122 * masked to get valid indices.
123 *
75b28aff
HV
124 * The kernel controls head of the sq ring and the tail of the cq ring,
125 * and the application controls tail of the sq ring and the head of the
126 * cq ring.
1e84b97b 127 */
75b28aff 128 struct io_uring sq, cq;
1e84b97b 129 /*
75b28aff 130 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
131 * ring_entries - 1)
132 */
75b28aff
HV
133 u32 sq_ring_mask, cq_ring_mask;
134 /* Ring sizes (constant, power of 2) */
135 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
136 /*
137 * Number of invalid entries dropped by the kernel due to
138 * invalid index stored in array
139 *
140 * Written by the kernel, shouldn't be modified by the
141 * application (i.e. get number of "new events" by comparing to
142 * cached value).
143 *
144 * After a new SQ head value was read by the application this
145 * counter includes all submissions that were dropped reaching
146 * the new SQ head (and possibly more).
147 */
75b28aff 148 u32 sq_dropped;
1e84b97b 149 /*
0d9b5b3a 150 * Runtime SQ flags
1e84b97b
SB
151 *
152 * Written by the kernel, shouldn't be modified by the
153 * application.
154 *
155 * The application needs a full memory barrier before checking
156 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
157 */
75b28aff 158 u32 sq_flags;
0d9b5b3a
SG
159 /*
160 * Runtime CQ flags
161 *
162 * Written by the application, shouldn't be modified by the
163 * kernel.
164 */
165 u32 cq_flags;
1e84b97b
SB
166 /*
167 * Number of completion events lost because the queue was full;
168 * this should be avoided by the application by making sure
0b4295b5 169 * there are not more requests pending than there is space in
1e84b97b
SB
170 * the completion queue.
171 *
172 * Written by the kernel, shouldn't be modified by the
173 * application (i.e. get number of "new events" by comparing to
174 * cached value).
175 *
176 * As completion events come in out of order this counter is not
177 * ordered with any other data.
178 */
75b28aff 179 u32 cq_overflow;
1e84b97b
SB
180 /*
181 * Ring buffer of completion events.
182 *
183 * The kernel writes completion events fresh every time they are
184 * produced, so the application is allowed to modify pending
185 * entries.
186 */
75b28aff 187 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
188};
189
45d189c6
PB
190enum io_uring_cmd_flags {
191 IO_URING_F_NONBLOCK = 1,
889fca73 192 IO_URING_F_COMPLETE_DEFER = 2,
45d189c6
PB
193};
194
edafccee
JA
195struct io_mapped_ubuf {
196 u64 ubuf;
197 size_t len;
198 struct bio_vec *bvec;
199 unsigned int nr_bvecs;
de293938 200 unsigned long acct_pages;
edafccee
JA
201};
202
50238531
BM
203struct io_ring_ctx;
204
269bbe5f
BM
205struct io_rsrc_put {
206 struct list_head list;
50238531
BM
207 union {
208 void *rsrc;
209 struct file *file;
210 };
269bbe5f
BM
211};
212
213struct fixed_rsrc_table {
65e19f54 214 struct file **files;
31b51510
JA
215};
216
269bbe5f 217struct fixed_rsrc_ref_node {
05589553
XW
218 struct percpu_ref refs;
219 struct list_head node;
269bbe5f
BM
220 struct list_head rsrc_list;
221 struct fixed_rsrc_data *rsrc_data;
50238531
BM
222 void (*rsrc_put)(struct io_ring_ctx *ctx,
223 struct io_rsrc_put *prsrc);
4a38aed2 224 struct llist_node llist;
e297822b 225 bool done;
05589553
XW
226};
227
269bbe5f
BM
228struct fixed_rsrc_data {
229 struct fixed_rsrc_table *table;
05f3fb3c
JA
230 struct io_ring_ctx *ctx;
231
269bbe5f 232 struct fixed_rsrc_ref_node *node;
05f3fb3c 233 struct percpu_ref refs;
05f3fb3c
JA
234 struct completion done;
235};
236
5a2e745d
JA
237struct io_buffer {
238 struct list_head list;
239 __u64 addr;
240 __s32 len;
241 __u16 bid;
242};
243
21b55dbc
SG
244struct io_restriction {
245 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
246 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
247 u8 sqe_flags_allowed;
248 u8 sqe_flags_required;
7e84e1c7 249 bool registered;
21b55dbc
SG
250};
251
534ca6d6
JA
252struct io_sq_data {
253 refcount_t refs;
69fb2131
JA
254 struct mutex lock;
255
256 /* ctx's that are using this sqd */
257 struct list_head ctx_list;
258 struct list_head ctx_new_list;
259 struct mutex ctx_lock;
260
534ca6d6
JA
261 struct task_struct *thread;
262 struct wait_queue_head wait;
08369246
XW
263
264 unsigned sq_thread_idle;
534ca6d6
JA
265};
266
258b29a9 267#define IO_IOPOLL_BATCH 8
6dd0be1e 268#define IO_COMPL_BATCH 32
6ff119a6 269#define IO_REQ_CACHE_SIZE 32
bf019da7 270#define IO_REQ_ALLOC_BATCH 8
258b29a9
PB
271
272struct io_comp_state {
6dd0be1e 273 struct io_kiocb *reqs[IO_COMPL_BATCH];
1b4c351f 274 unsigned int nr;
c7dae4ba
JA
275 unsigned int locked_free_nr;
276 /* inline/task_work completion list, under ->uring_lock */
1b4c351f 277 struct list_head free_list;
c7dae4ba
JA
278 /* IRQ completion list, under ->completion_lock */
279 struct list_head locked_free_list;
258b29a9
PB
280};
281
282struct io_submit_state {
283 struct blk_plug plug;
284
285 /*
286 * io_kiocb alloc cache
287 */
bf019da7 288 void *reqs[IO_REQ_CACHE_SIZE];
258b29a9
PB
289 unsigned int free_reqs;
290
291 bool plug_started;
292
293 /*
294 * Batch completion logic
295 */
296 struct io_comp_state comp;
297
298 /*
299 * File reference cache
300 */
301 struct file *file;
302 unsigned int fd;
303 unsigned int file_refs;
304 unsigned int ios_left;
305};
306
2b188cc1
JA
307struct io_ring_ctx {
308 struct {
309 struct percpu_ref refs;
310 } ____cacheline_aligned_in_smp;
311
312 struct {
313 unsigned int flags;
e1d85334 314 unsigned int compat: 1;
aad5d8da 315 unsigned int limit_mem: 1;
e1d85334
RD
316 unsigned int cq_overflow_flushed: 1;
317 unsigned int drain_next: 1;
318 unsigned int eventfd_async: 1;
21b55dbc 319 unsigned int restricted: 1;
d9d05217 320 unsigned int sqo_dead: 1;
2b188cc1 321
75b28aff
HV
322 /*
323 * Ring buffer of indices into array of io_uring_sqe, which is
324 * mmapped by the application using the IORING_OFF_SQES offset.
325 *
326 * This indirection could e.g. be used to assign fixed
327 * io_uring_sqe entries to operations and only submit them to
328 * the queue when needed.
329 *
330 * The kernel modifies neither the indices array nor the entries
331 * array.
332 */
333 u32 *sq_array;
2b188cc1
JA
334 unsigned cached_sq_head;
335 unsigned sq_entries;
336 unsigned sq_mask;
6c271ce2 337 unsigned sq_thread_idle;
498ccd9e 338 unsigned cached_sq_dropped;
2c3bac6d 339 unsigned cached_cq_overflow;
ad3eb2c8 340 unsigned long sq_check_overflow;
de0617e4
JA
341
342 struct list_head defer_list;
5262f567 343 struct list_head timeout_list;
1d7bb1d5 344 struct list_head cq_overflow_list;
fcb323cc 345
ad3eb2c8 346 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
347 } ____cacheline_aligned_in_smp;
348
3c1a2ead
JA
349 struct {
350 struct mutex uring_lock;
351 wait_queue_head_t wait;
352 } ____cacheline_aligned_in_smp;
353
354 struct io_submit_state submit_state;
355
206aefde
JA
356 struct io_rings *rings;
357
2b188cc1 358 /* IO offload */
561fb04a 359 struct io_wq *io_wq;
2aede0e4
JA
360
361 /*
362 * For SQPOLL usage - we hold a reference to the parent task, so we
363 * have access to the ->files
364 */
365 struct task_struct *sqo_task;
366
367 /* Only used for accounting purposes */
368 struct mm_struct *mm_account;
369
91d8f519
DZ
370#ifdef CONFIG_BLK_CGROUP
371 struct cgroup_subsys_state *sqo_blkcg_css;
372#endif
373
534ca6d6
JA
374 struct io_sq_data *sq_data; /* if using sq thread polling */
375
90554200 376 struct wait_queue_head sqo_sq_wait;
69fb2131 377 struct list_head sqd_list;
75b28aff 378
6b06314c
JA
379 /*
380 * If used, fixed file set. Writers must ensure that ->refs is dead,
381 * readers must ensure that ->refs is alive as long as the file* is
382 * used. Only updated through io_uring_register(2).
383 */
269bbe5f 384 struct fixed_rsrc_data *file_data;
6b06314c
JA
385 unsigned nr_user_files;
386
edafccee
JA
387 /* if used, fixed mapped user buffers */
388 unsigned nr_user_bufs;
389 struct io_mapped_ubuf *user_bufs;
390
2b188cc1
JA
391 struct user_struct *user;
392
0b8c0ec7 393 const struct cred *creds;
181e448d 394
4ea33a97
JA
395#ifdef CONFIG_AUDIT
396 kuid_t loginuid;
397 unsigned int sessionid;
398#endif
399
0f158b4c
JA
400 struct completion ref_comp;
401 struct completion sq_thread_comp;
206aefde
JA
402
403#if defined(CONFIG_UNIX)
404 struct socket *ring_sock;
405#endif
406
5a2e745d
JA
407 struct idr io_buffer_idr;
408
071698e1
JA
409 struct idr personality_idr;
410
206aefde
JA
411 struct {
412 unsigned cached_cq_tail;
413 unsigned cq_entries;
414 unsigned cq_mask;
415 atomic_t cq_timeouts;
f010505b 416 unsigned cq_last_tm_flush;
ad3eb2c8 417 unsigned long cq_check_overflow;
206aefde
JA
418 struct wait_queue_head cq_wait;
419 struct fasync_struct *cq_fasync;
420 struct eventfd_ctx *cq_ev_fd;
421 } ____cacheline_aligned_in_smp;
2b188cc1 422
2b188cc1
JA
423 struct {
424 spinlock_t completion_lock;
e94f141b 425
def596e9 426 /*
540e32a0 427 * ->iopoll_list is protected by the ctx->uring_lock for
def596e9
JA
428 * io_uring instances that don't use IORING_SETUP_SQPOLL.
429 * For SQPOLL, only the single threaded io_sq_thread() will
430 * manipulate the list, hence no extra locking is needed there.
431 */
540e32a0 432 struct list_head iopoll_list;
78076bb6
JA
433 struct hlist_head *cancel_hash;
434 unsigned cancel_hash_bits;
e94f141b 435 bool poll_multi_file;
31b51510 436
fcb323cc
JA
437 spinlock_t inflight_lock;
438 struct list_head inflight_list;
2b188cc1 439 } ____cacheline_aligned_in_smp;
85faa7b8 440
269bbe5f
BM
441 struct delayed_work rsrc_put_work;
442 struct llist_head rsrc_put_llist;
d67d2263
BM
443 struct list_head rsrc_ref_list;
444 spinlock_t rsrc_ref_lock;
4a38aed2 445
21b55dbc 446 struct io_restriction restrictions;
3c1a2ead
JA
447
448 /* Keep this last, we don't need it for the fast path */
449 struct work_struct exit_work;
2b188cc1
JA
450};
451
09bb8394
JA
452/*
453 * First field must be the file pointer in all the
454 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
455 */
221c5eb2
JA
456struct io_poll_iocb {
457 struct file *file;
018043be 458 struct wait_queue_head *head;
221c5eb2 459 __poll_t events;
8c838788 460 bool done;
221c5eb2 461 bool canceled;
392edb45 462 struct wait_queue_entry wait;
221c5eb2
JA
463};
464
018043be
PB
465struct io_poll_remove {
466 struct file *file;
467 u64 addr;
468};
469
b5dba59e
JA
470struct io_close {
471 struct file *file;
b5dba59e
JA
472 int fd;
473};
474
ad8a48ac
JA
475struct io_timeout_data {
476 struct io_kiocb *req;
477 struct hrtimer timer;
478 struct timespec64 ts;
479 enum hrtimer_mode mode;
480};
481
8ed8d3c3
JA
482struct io_accept {
483 struct file *file;
484 struct sockaddr __user *addr;
485 int __user *addr_len;
486 int flags;
09952e3e 487 unsigned long nofile;
8ed8d3c3
JA
488};
489
490struct io_sync {
491 struct file *file;
492 loff_t len;
493 loff_t off;
494 int flags;
d63d1b5e 495 int mode;
8ed8d3c3
JA
496};
497
fbf23849
JA
498struct io_cancel {
499 struct file *file;
500 u64 addr;
501};
502
b29472ee
JA
503struct io_timeout {
504 struct file *file;
bfe68a22
PB
505 u32 off;
506 u32 target_seq;
135fcde8 507 struct list_head list;
90cd7e42
PB
508 /* head of the link, used by linked timeouts only */
509 struct io_kiocb *head;
b29472ee
JA
510};
511
0bdf7a2d
PB
512struct io_timeout_rem {
513 struct file *file;
514 u64 addr;
9c8e11b3
PB
515
516 /* timeout update */
517 struct timespec64 ts;
518 u32 flags;
0bdf7a2d
PB
519};
520
9adbd45d
JA
521struct io_rw {
522 /* NOTE: kiocb has the file as the first member, so don't do it here */
523 struct kiocb kiocb;
524 u64 addr;
525 u64 len;
526};
527
3fbb51c1
JA
528struct io_connect {
529 struct file *file;
530 struct sockaddr __user *addr;
531 int addr_len;
532};
533
e47293fd
JA
534struct io_sr_msg {
535 struct file *file;
fddaface 536 union {
270a5940 537 struct user_msghdr __user *umsg;
fddaface
JA
538 void __user *buf;
539 };
e47293fd 540 int msg_flags;
bcda7baa 541 int bgid;
fddaface 542 size_t len;
bcda7baa 543 struct io_buffer *kbuf;
e47293fd
JA
544};
545
15b71abe
JA
546struct io_open {
547 struct file *file;
548 int dfd;
15b71abe 549 struct filename *filename;
c12cedf2 550 struct open_how how;
4022e7af 551 unsigned long nofile;
15b71abe
JA
552};
553
269bbe5f 554struct io_rsrc_update {
05f3fb3c
JA
555 struct file *file;
556 u64 arg;
557 u32 nr_args;
558 u32 offset;
559};
560
4840e418
JA
561struct io_fadvise {
562 struct file *file;
563 u64 offset;
564 u32 len;
565 u32 advice;
566};
567
c1ca757b
JA
568struct io_madvise {
569 struct file *file;
570 u64 addr;
571 u32 len;
572 u32 advice;
573};
574
3e4827b0
JA
575struct io_epoll {
576 struct file *file;
577 int epfd;
578 int op;
579 int fd;
580 struct epoll_event event;
e47293fd
JA
581};
582
7d67af2c
PB
583struct io_splice {
584 struct file *file_out;
585 struct file *file_in;
586 loff_t off_out;
587 loff_t off_in;
588 u64 len;
589 unsigned int flags;
590};
591
ddf0322d
JA
592struct io_provide_buf {
593 struct file *file;
594 __u64 addr;
595 __s32 len;
596 __u32 bgid;
597 __u16 nbufs;
598 __u16 bid;
599};
600
1d9e1288
BM
601struct io_statx {
602 struct file *file;
603 int dfd;
604 unsigned int mask;
605 unsigned int flags;
e62753e4 606 const char __user *filename;
1d9e1288
BM
607 struct statx __user *buffer;
608};
609
36f4fa68
JA
610struct io_shutdown {
611 struct file *file;
612 int how;
613};
614
80a261fd
JA
615struct io_rename {
616 struct file *file;
617 int old_dfd;
618 int new_dfd;
619 struct filename *oldpath;
620 struct filename *newpath;
621 int flags;
622};
623
14a1143b
JA
624struct io_unlink {
625 struct file *file;
626 int dfd;
627 int flags;
628 struct filename *filename;
629};
630
3ca405eb
PB
631struct io_completion {
632 struct file *file;
633 struct list_head list;
0f7e466b 634 int cflags;
3ca405eb
PB
635};
636
f499a021
JA
637struct io_async_connect {
638 struct sockaddr_storage address;
639};
640
03b1230c
JA
641struct io_async_msghdr {
642 struct iovec fast_iov[UIO_FASTIOV];
257e84a5
PB
643 /* points to an allocated iov, if NULL we use fast_iov instead */
644 struct iovec *free_iov;
03b1230c
JA
645 struct sockaddr __user *uaddr;
646 struct msghdr msg;
b537916c 647 struct sockaddr_storage addr;
03b1230c
JA
648};
649
f67676d1
JA
650struct io_async_rw {
651 struct iovec fast_iov[UIO_FASTIOV];
ff6165b2
JA
652 const struct iovec *free_iovec;
653 struct iov_iter iter;
227c0c96 654 size_t bytes_done;
bcf5a063 655 struct wait_page_queue wpq;
f67676d1
JA
656};
657
6b47ee6e
PB
658enum {
659 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
660 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
661 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
662 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
663 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 664 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
6b47ee6e 665
6b47ee6e
PB
666 REQ_F_FAIL_LINK_BIT,
667 REQ_F_INFLIGHT_BIT,
668 REQ_F_CUR_POS_BIT,
669 REQ_F_NOWAIT_BIT,
6b47ee6e 670 REQ_F_LINK_TIMEOUT_BIT,
6b47ee6e 671 REQ_F_ISREG_BIT,
99bc4c38 672 REQ_F_NEED_CLEANUP_BIT,
d7718a9d 673 REQ_F_POLLED_BIT,
bcda7baa 674 REQ_F_BUFFER_SELECTED_BIT,
5b0bbee4 675 REQ_F_NO_FILE_TABLE_BIT,
7cdaf587 676 REQ_F_WORK_INITIALIZED_BIT,
900fad45 677 REQ_F_LTIMEOUT_ACTIVE_BIT,
e342c807 678 REQ_F_COMPLETE_INLINE_BIT,
84557871
JA
679
680 /* not a real bit, just to check we're not overflowing the space */
681 __REQ_F_LAST_BIT,
6b47ee6e
PB
682};
683
684enum {
685 /* ctx owns file */
686 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
687 /* drain existing IO first */
688 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
689 /* linked sqes */
690 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
691 /* doesn't sever on completion < 0 */
692 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
693 /* IOSQE_ASYNC */
694 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
695 /* IOSQE_BUFFER_SELECT */
696 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
6b47ee6e 697
6b47ee6e
PB
698 /* fail rest of links */
699 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
700 /* on inflight list */
701 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
702 /* read/write uses file position */
703 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
704 /* must not punt to workers */
705 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
900fad45 706 /* has or had linked timeout */
6b47ee6e 707 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
6b47ee6e
PB
708 /* regular file */
709 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
99bc4c38
PB
710 /* needs cleanup */
711 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
d7718a9d
JA
712 /* already went through poll handler */
713 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
714 /* buffer already selected */
715 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
5b0bbee4
JA
716 /* doesn't need file table for this request */
717 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
7cdaf587
XW
718 /* io_wq_work is initialized */
719 REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
900fad45
PB
720 /* linked timeout is active, i.e. prepared by link's head */
721 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
e342c807
PB
722 /* completion is deferred through io_comp_state */
723 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
d7718a9d
JA
724};
725
726struct async_poll {
727 struct io_poll_iocb poll;
807abcb0 728 struct io_poll_iocb *double_poll;
6b47ee6e
PB
729};
730
7cbf1722
JA
731struct io_task_work {
732 struct io_wq_work_node node;
733 task_work_func_t func;
734};
735
09bb8394
JA
736/*
737 * NOTE! Each of the iocb union members has the file pointer
738 * as the first entry in their struct definition. So you can
739 * access the file pointer through any of the sub-structs,
740 * or directly as just 'ki_filp' in this struct.
741 */
2b188cc1 742struct io_kiocb {
221c5eb2 743 union {
09bb8394 744 struct file *file;
9adbd45d 745 struct io_rw rw;
221c5eb2 746 struct io_poll_iocb poll;
018043be 747 struct io_poll_remove poll_remove;
8ed8d3c3
JA
748 struct io_accept accept;
749 struct io_sync sync;
fbf23849 750 struct io_cancel cancel;
b29472ee 751 struct io_timeout timeout;
0bdf7a2d 752 struct io_timeout_rem timeout_rem;
3fbb51c1 753 struct io_connect connect;
e47293fd 754 struct io_sr_msg sr_msg;
15b71abe 755 struct io_open open;
b5dba59e 756 struct io_close close;
269bbe5f 757 struct io_rsrc_update rsrc_update;
4840e418 758 struct io_fadvise fadvise;
c1ca757b 759 struct io_madvise madvise;
3e4827b0 760 struct io_epoll epoll;
7d67af2c 761 struct io_splice splice;
ddf0322d 762 struct io_provide_buf pbuf;
1d9e1288 763 struct io_statx statx;
36f4fa68 764 struct io_shutdown shutdown;
80a261fd 765 struct io_rename rename;
14a1143b 766 struct io_unlink unlink;
3ca405eb
PB
767 /* use only after cleaning per-op data, see io_clean_op() */
768 struct io_completion compl;
221c5eb2 769 };
2b188cc1 770
e8c2bc1f
JA
771 /* opcode allocated if it needs to store data for async defer */
772 void *async_data;
d625c6ee 773 u8 opcode;
65a6543d
XW
774 /* polled IO has completed */
775 u8 iopoll_completed;
2b188cc1 776
4f4eeba8 777 u16 buf_index;
9cf7c104 778 u32 result;
4f4eeba8 779
010e8e6b
PB
780 struct io_ring_ctx *ctx;
781 unsigned int flags;
782 refcount_t refs;
783 struct task_struct *task;
784 u64 user_data;
d7718a9d 785
f2f87370 786 struct io_kiocb *link;
269bbe5f 787 struct percpu_ref *fixed_rsrc_refs;
fcb323cc 788
d21ffe7e
PB
789 /*
790 * 1. used with ctx->iopoll_list with reads/writes
791 * 2. to track reqs with ->files (see io_op_def::file_table)
792 */
010e8e6b 793 struct list_head inflight_entry;
7cbf1722
JA
794 union {
795 struct io_task_work io_task_work;
796 struct callback_head task_work;
797 };
010e8e6b
PB
798 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
799 struct hlist_node hash_node;
800 struct async_poll *apoll;
801 struct io_wq_work work;
2b188cc1 802};
05589553 803
27dc8338
PB
804struct io_defer_entry {
805 struct list_head list;
806 struct io_kiocb *req;
9cf7c104 807 u32 seq;
2b188cc1
JA
808};
809
d3656344 810struct io_op_def {
d3656344
JA
811 /* needs req->file assigned */
812 unsigned needs_file : 1;
d3656344
JA
813 /* hash wq insertion if file is a regular file */
814 unsigned hash_reg_file : 1;
815 /* unbound wq insertion if file is a non-regular file */
816 unsigned unbound_nonreg_file : 1;
66f4af93
JA
817 /* opcode is not supported by this kernel */
818 unsigned not_supported : 1;
8a72758c
JA
819 /* set if opcode supports polled "wait" */
820 unsigned pollin : 1;
821 unsigned pollout : 1;
bcda7baa
JA
822 /* op supports buffer selection */
823 unsigned buffer_select : 1;
e8c2bc1f
JA
824 /* must always have async data allocated */
825 unsigned needs_async_data : 1;
27926b68
JA
826 /* should block plug */
827 unsigned plug : 1;
e8c2bc1f
JA
828 /* size of async data needed, if any */
829 unsigned short async_size;
0f203765 830 unsigned work_flags;
d3656344
JA
831};
832
0918682b 833static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
834 [IORING_OP_NOP] = {},
835 [IORING_OP_READV] = {
d3656344
JA
836 .needs_file = 1,
837 .unbound_nonreg_file = 1,
8a72758c 838 .pollin = 1,
4d954c25 839 .buffer_select = 1,
e8c2bc1f 840 .needs_async_data = 1,
27926b68 841 .plug = 1,
e8c2bc1f 842 .async_size = sizeof(struct io_async_rw),
0f203765 843 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
d3656344 844 },
0463b6c5 845 [IORING_OP_WRITEV] = {
d3656344
JA
846 .needs_file = 1,
847 .hash_reg_file = 1,
848 .unbound_nonreg_file = 1,
8a72758c 849 .pollout = 1,
e8c2bc1f 850 .needs_async_data = 1,
27926b68 851 .plug = 1,
e8c2bc1f 852 .async_size = sizeof(struct io_async_rw),
69228338
JA
853 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
854 IO_WQ_WORK_FSIZE,
d3656344 855 },
0463b6c5 856 [IORING_OP_FSYNC] = {
d3656344 857 .needs_file = 1,
0f203765 858 .work_flags = IO_WQ_WORK_BLKCG,
d3656344 859 },
0463b6c5 860 [IORING_OP_READ_FIXED] = {
d3656344
JA
861 .needs_file = 1,
862 .unbound_nonreg_file = 1,
8a72758c 863 .pollin = 1,
27926b68 864 .plug = 1,
e8c2bc1f 865 .async_size = sizeof(struct io_async_rw),
4017eb91 866 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
d3656344 867 },
0463b6c5 868 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
869 .needs_file = 1,
870 .hash_reg_file = 1,
871 .unbound_nonreg_file = 1,
8a72758c 872 .pollout = 1,
27926b68 873 .plug = 1,
e8c2bc1f 874 .async_size = sizeof(struct io_async_rw),
4017eb91
JA
875 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE |
876 IO_WQ_WORK_MM,
d3656344 877 },
0463b6c5 878 [IORING_OP_POLL_ADD] = {
d3656344
JA
879 .needs_file = 1,
880 .unbound_nonreg_file = 1,
881 },
0463b6c5
PB
882 [IORING_OP_POLL_REMOVE] = {},
883 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344 884 .needs_file = 1,
0f203765 885 .work_flags = IO_WQ_WORK_BLKCG,
d3656344 886 },
0463b6c5 887 [IORING_OP_SENDMSG] = {
d3656344
JA
888 .needs_file = 1,
889 .unbound_nonreg_file = 1,
8a72758c 890 .pollout = 1,
e8c2bc1f
JA
891 .needs_async_data = 1,
892 .async_size = sizeof(struct io_async_msghdr),
10cad2c4 893 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
d3656344 894 },
0463b6c5 895 [IORING_OP_RECVMSG] = {
d3656344
JA
896 .needs_file = 1,
897 .unbound_nonreg_file = 1,
8a72758c 898 .pollin = 1,
52de1fe1 899 .buffer_select = 1,
e8c2bc1f
JA
900 .needs_async_data = 1,
901 .async_size = sizeof(struct io_async_msghdr),
10cad2c4 902 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
d3656344 903 },
0463b6c5 904 [IORING_OP_TIMEOUT] = {
e8c2bc1f
JA
905 .needs_async_data = 1,
906 .async_size = sizeof(struct io_timeout_data),
0f203765 907 .work_flags = IO_WQ_WORK_MM,
d3656344 908 },
9c8e11b3
PB
909 [IORING_OP_TIMEOUT_REMOVE] = {
910 /* used by timeout updates' prep() */
911 .work_flags = IO_WQ_WORK_MM,
912 },
0463b6c5 913 [IORING_OP_ACCEPT] = {
d3656344
JA
914 .needs_file = 1,
915 .unbound_nonreg_file = 1,
8a72758c 916 .pollin = 1,
0f203765 917 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
d3656344 918 },
0463b6c5
PB
919 [IORING_OP_ASYNC_CANCEL] = {},
920 [IORING_OP_LINK_TIMEOUT] = {
e8c2bc1f
JA
921 .needs_async_data = 1,
922 .async_size = sizeof(struct io_timeout_data),
0f203765 923 .work_flags = IO_WQ_WORK_MM,
d3656344 924 },
0463b6c5 925 [IORING_OP_CONNECT] = {
d3656344
JA
926 .needs_file = 1,
927 .unbound_nonreg_file = 1,
8a72758c 928 .pollout = 1,
e8c2bc1f
JA
929 .needs_async_data = 1,
930 .async_size = sizeof(struct io_async_connect),
0f203765 931 .work_flags = IO_WQ_WORK_MM,
d3656344 932 },
0463b6c5 933 [IORING_OP_FALLOCATE] = {
d3656344 934 .needs_file = 1,
69228338 935 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE,
d3656344 936 },
0463b6c5 937 [IORING_OP_OPENAT] = {
0f203765 938 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG |
14587a46 939 IO_WQ_WORK_FS | IO_WQ_WORK_MM,
d3656344 940 },
0463b6c5 941 [IORING_OP_CLOSE] = {
0f203765 942 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
d3656344 943 },
0463b6c5 944 [IORING_OP_FILES_UPDATE] = {
0f203765 945 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM,
d3656344 946 },
0463b6c5 947 [IORING_OP_STATX] = {
0f203765
JA
948 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM |
949 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
d3656344 950 },
0463b6c5 951 [IORING_OP_READ] = {
3a6820f2
JA
952 .needs_file = 1,
953 .unbound_nonreg_file = 1,
8a72758c 954 .pollin = 1,
bcda7baa 955 .buffer_select = 1,
27926b68 956 .plug = 1,
e8c2bc1f 957 .async_size = sizeof(struct io_async_rw),
0f203765 958 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
3a6820f2 959 },
0463b6c5 960 [IORING_OP_WRITE] = {
3a6820f2
JA
961 .needs_file = 1,
962 .unbound_nonreg_file = 1,
8a72758c 963 .pollout = 1,
27926b68 964 .plug = 1,
e8c2bc1f 965 .async_size = sizeof(struct io_async_rw),
69228338
JA
966 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
967 IO_WQ_WORK_FSIZE,
3a6820f2 968 },
0463b6c5 969 [IORING_OP_FADVISE] = {
4840e418 970 .needs_file = 1,
0f203765 971 .work_flags = IO_WQ_WORK_BLKCG,
4840e418 972 },
0463b6c5 973 [IORING_OP_MADVISE] = {
0f203765 974 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
c1ca757b 975 },
0463b6c5 976 [IORING_OP_SEND] = {
fddaface
JA
977 .needs_file = 1,
978 .unbound_nonreg_file = 1,
8a72758c 979 .pollout = 1,
0f203765 980 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
fddaface 981 },
0463b6c5 982 [IORING_OP_RECV] = {
fddaface
JA
983 .needs_file = 1,
984 .unbound_nonreg_file = 1,
8a72758c 985 .pollin = 1,
bcda7baa 986 .buffer_select = 1,
0f203765 987 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
fddaface 988 },
0463b6c5 989 [IORING_OP_OPENAT2] = {
0f203765 990 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_FS |
14587a46 991 IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
cebdb986 992 },
3e4827b0
JA
993 [IORING_OP_EPOLL_CTL] = {
994 .unbound_nonreg_file = 1,
0f203765 995 .work_flags = IO_WQ_WORK_FILES,
3e4827b0 996 },
7d67af2c
PB
997 [IORING_OP_SPLICE] = {
998 .needs_file = 1,
999 .hash_reg_file = 1,
1000 .unbound_nonreg_file = 1,
0f203765 1001 .work_flags = IO_WQ_WORK_BLKCG,
ddf0322d
JA
1002 },
1003 [IORING_OP_PROVIDE_BUFFERS] = {},
067524e9 1004 [IORING_OP_REMOVE_BUFFERS] = {},
f2a8d5c7
PB
1005 [IORING_OP_TEE] = {
1006 .needs_file = 1,
1007 .hash_reg_file = 1,
1008 .unbound_nonreg_file = 1,
1009 },
36f4fa68
JA
1010 [IORING_OP_SHUTDOWN] = {
1011 .needs_file = 1,
1012 },
80a261fd
JA
1013 [IORING_OP_RENAMEAT] = {
1014 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
1015 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
1016 },
14a1143b
JA
1017 [IORING_OP_UNLINKAT] = {
1018 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
1019 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
1020 },
d3656344
JA
1021};
1022
9936c7c2
PB
1023static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1024 struct task_struct *task,
1025 struct files_struct *files);
269bbe5f 1026static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
bc9744cd 1027static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
1ffc5422 1028 struct io_ring_ctx *ctx);
bc9744cd
PB
1029static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
1030 struct fixed_rsrc_ref_node *ref_node);
1ffc5422 1031
23faba36 1032static bool io_rw_reissue(struct io_kiocb *req);
78e19bbe 1033static void io_cqring_fill_event(struct io_kiocb *req, long res);
ec9c02ad 1034static void io_put_req(struct io_kiocb *req);
216578e5 1035static void io_put_req_deferred(struct io_kiocb *req, int nr);
c40f6379 1036static void io_double_put_req(struct io_kiocb *req);
c7dae4ba
JA
1037static void io_dismantle_req(struct io_kiocb *req);
1038static void io_put_task(struct task_struct *task, int nr);
1039static void io_queue_next(struct io_kiocb *req);
94ae5e77 1040static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
7271ef3a 1041static void __io_queue_linked_timeout(struct io_kiocb *req);
94ae5e77 1042static void io_queue_linked_timeout(struct io_kiocb *req);
05f3fb3c 1043static int __io_sqe_files_update(struct io_ring_ctx *ctx,
269bbe5f 1044 struct io_uring_rsrc_update *ip,
05f3fb3c 1045 unsigned nr_args);
3ca405eb 1046static void __io_clean_op(struct io_kiocb *req);
8371adf5
PB
1047static struct file *io_file_get(struct io_submit_state *state,
1048 struct io_kiocb *req, int fd, bool fixed);
c5eef2b9 1049static void __io_queue_sqe(struct io_kiocb *req);
269bbe5f 1050static void io_rsrc_put_work(struct work_struct *work);
de0617e4 1051
847595de
PB
1052static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
1053 struct iov_iter *iter, bool needs_lock);
ff6165b2
JA
1054static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
1055 const struct iovec *fast_iov,
227c0c96 1056 struct iov_iter *iter, bool force);
907d1df3 1057static void io_req_task_queue(struct io_kiocb *req);
65453d1e
JA
1058static void io_submit_flush_completions(struct io_comp_state *cs,
1059 struct io_ring_ctx *ctx);
de0617e4 1060
2b188cc1
JA
1061static struct kmem_cache *req_cachep;
1062
0918682b 1063static const struct file_operations io_uring_fops;
2b188cc1
JA
1064
1065struct sock *io_uring_get_socket(struct file *file)
1066{
1067#if defined(CONFIG_UNIX)
1068 if (file->f_op == &io_uring_fops) {
1069 struct io_ring_ctx *ctx = file->private_data;
1070
1071 return ctx->ring_sock->sk;
1072 }
1073#endif
1074 return NULL;
1075}
1076EXPORT_SYMBOL(io_uring_get_socket);
1077
f2f87370
PB
1078#define io_for_each_link(pos, head) \
1079 for (pos = (head); pos; pos = pos->link)
1080
3ca405eb
PB
1081static inline void io_clean_op(struct io_kiocb *req)
1082{
9d5c8190 1083 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
3ca405eb
PB
1084 __io_clean_op(req);
1085}
1086
36f72fe2
PB
1087static inline void io_set_resource_node(struct io_kiocb *req)
1088{
1089 struct io_ring_ctx *ctx = req->ctx;
1090
269bbe5f
BM
1091 if (!req->fixed_rsrc_refs) {
1092 req->fixed_rsrc_refs = &ctx->file_data->node->refs;
1093 percpu_ref_get(req->fixed_rsrc_refs);
36f72fe2
PB
1094 }
1095}
1096
08d23634
PB
1097static bool io_match_task(struct io_kiocb *head,
1098 struct task_struct *task,
1099 struct files_struct *files)
1100{
1101 struct io_kiocb *req;
1102
84965ff8
JA
1103 if (task && head->task != task) {
1104 /* in terms of cancelation, always match if req task is dead */
1105 if (head->task->flags & PF_EXITING)
1106 return true;
08d23634 1107 return false;
84965ff8 1108 }
08d23634
PB
1109 if (!files)
1110 return true;
1111
1112 io_for_each_link(req, head) {
02a13674
JA
1113 if (!(req->flags & REQ_F_WORK_INITIALIZED))
1114 continue;
1115 if (req->file && req->file->f_op == &io_uring_fops)
1116 return true;
1117 if ((req->work.flags & IO_WQ_WORK_FILES) &&
08d23634
PB
1118 req->work.identity->files == files)
1119 return true;
1120 }
1121 return false;
1122}
1123
28cea78a 1124static void io_sq_thread_drop_mm_files(void)
c40f6379 1125{
28cea78a 1126 struct files_struct *files = current->files;
c40f6379
JA
1127 struct mm_struct *mm = current->mm;
1128
1129 if (mm) {
1130 kthread_unuse_mm(mm);
1131 mmput(mm);
4b70cf9d 1132 current->mm = NULL;
c40f6379 1133 }
28cea78a
JA
1134 if (files) {
1135 struct nsproxy *nsproxy = current->nsproxy;
1136
1137 task_lock(current);
1138 current->files = NULL;
1139 current->nsproxy = NULL;
1140 task_unlock(current);
1141 put_files_struct(files);
1142 put_nsproxy(nsproxy);
1143 }
1144}
1145
1a38ffc9 1146static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
28cea78a
JA
1147{
1148 if (!current->files) {
1149 struct files_struct *files;
1150 struct nsproxy *nsproxy;
1151
1152 task_lock(ctx->sqo_task);
1153 files = ctx->sqo_task->files;
1154 if (!files) {
1155 task_unlock(ctx->sqo_task);
1a38ffc9 1156 return -EOWNERDEAD;
28cea78a
JA
1157 }
1158 atomic_inc(&files->count);
1159 get_nsproxy(ctx->sqo_task->nsproxy);
1160 nsproxy = ctx->sqo_task->nsproxy;
1161 task_unlock(ctx->sqo_task);
1162
1163 task_lock(current);
1164 current->files = files;
1165 current->nsproxy = nsproxy;
1166 task_unlock(current);
1167 }
1a38ffc9 1168 return 0;
c40f6379
JA
1169}
1170
1171static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
1172{
4b70cf9d
JA
1173 struct mm_struct *mm;
1174
1175 if (current->mm)
1176 return 0;
1177
4b70cf9d
JA
1178 task_lock(ctx->sqo_task);
1179 mm = ctx->sqo_task->mm;
1180 if (unlikely(!mm || !mmget_not_zero(mm)))
1181 mm = NULL;
1182 task_unlock(ctx->sqo_task);
1183
1184 if (mm) {
1185 kthread_use_mm(mm);
1186 return 0;
c40f6379
JA
1187 }
1188
4b70cf9d 1189 return -EFAULT;
c40f6379
JA
1190}
1191
4e326358
PB
1192static int __io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
1193 struct io_kiocb *req)
c40f6379 1194{
28cea78a 1195 const struct io_op_def *def = &io_op_defs[req->opcode];
1a38ffc9 1196 int ret;
28cea78a
JA
1197
1198 if (def->work_flags & IO_WQ_WORK_MM) {
1a38ffc9 1199 ret = __io_sq_thread_acquire_mm(ctx);
28cea78a
JA
1200 if (unlikely(ret))
1201 return ret;
1202 }
1203
1a38ffc9
PB
1204 if (def->needs_file || (def->work_flags & IO_WQ_WORK_FILES)) {
1205 ret = __io_sq_thread_acquire_files(ctx);
1206 if (unlikely(ret))
1207 return ret;
1208 }
28cea78a
JA
1209
1210 return 0;
c40f6379
JA
1211}
1212
4e326358
PB
1213static inline int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
1214 struct io_kiocb *req)
1215{
4e326358
PB
1216 if (!(ctx->flags & IORING_SETUP_SQPOLL))
1217 return 0;
1218 return __io_sq_thread_acquire_mm_files(ctx, req);
1219}
1220
91d8f519
DZ
1221static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
1222 struct cgroup_subsys_state **cur_css)
1223
1224{
1225#ifdef CONFIG_BLK_CGROUP
1226 /* puts the old one when swapping */
1227 if (*cur_css != ctx->sqo_blkcg_css) {
1228 kthread_associate_blkcg(ctx->sqo_blkcg_css);
1229 *cur_css = ctx->sqo_blkcg_css;
1230 }
1231#endif
1232}
1233
1234static void io_sq_thread_unassociate_blkcg(void)
1235{
1236#ifdef CONFIG_BLK_CGROUP
1237 kthread_associate_blkcg(NULL);
1238#endif
1239}
1240
c40f6379
JA
1241static inline void req_set_fail_links(struct io_kiocb *req)
1242{
1243 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1244 req->flags |= REQ_F_FAIL_LINK;
1245}
4a38aed2 1246
1e6fa521
JA
1247/*
1248 * None of these are dereferenced, they are simply used to check if any of
1249 * them have changed. If we're under current and check they are still the
1250 * same, we're fine to grab references to them for actual out-of-line use.
1251 */
1252static void io_init_identity(struct io_identity *id)
1253{
1254 id->files = current->files;
1255 id->mm = current->mm;
1256#ifdef CONFIG_BLK_CGROUP
1257 rcu_read_lock();
1258 id->blkcg_css = blkcg_css();
1259 rcu_read_unlock();
1260#endif
1261 id->creds = current_cred();
1262 id->nsproxy = current->nsproxy;
1263 id->fs = current->fs;
1264 id->fsize = rlimit(RLIMIT_FSIZE);
4ea33a97
JA
1265#ifdef CONFIG_AUDIT
1266 id->loginuid = current->loginuid;
1267 id->sessionid = current->sessionid;
1268#endif
1e6fa521
JA
1269 refcount_set(&id->count, 1);
1270}
1271
ec99ca6c
PB
1272static inline void __io_req_init_async(struct io_kiocb *req)
1273{
1274 memset(&req->work, 0, sizeof(req->work));
1275 req->flags |= REQ_F_WORK_INITIALIZED;
1276}
1277
7cdaf587
XW
1278/*
1279 * Note: must call io_req_init_async() for the first time you
1280 * touch any members of io_wq_work.
1281 */
1282static inline void io_req_init_async(struct io_kiocb *req)
1283{
500a373d
JA
1284 struct io_uring_task *tctx = current->io_uring;
1285
7cdaf587
XW
1286 if (req->flags & REQ_F_WORK_INITIALIZED)
1287 return;
1288
ec99ca6c 1289 __io_req_init_async(req);
500a373d
JA
1290
1291 /* Grab a ref if this isn't our static identity */
1292 req->work.identity = tctx->identity;
1293 if (tctx->identity != &tctx->__identity)
1294 refcount_inc(&req->work.identity->count);
7cdaf587
XW
1295}
1296
2b188cc1
JA
1297static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1298{
1299 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1300
0f158b4c 1301 complete(&ctx->ref_comp);
2b188cc1
JA
1302}
1303
8eb7e2d0
PB
1304static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1305{
1306 return !req->timeout.off;
1307}
1308
2b188cc1
JA
1309static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1310{
1311 struct io_ring_ctx *ctx;
78076bb6 1312 int hash_bits;
2b188cc1
JA
1313
1314 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1315 if (!ctx)
1316 return NULL;
1317
78076bb6
JA
1318 /*
1319 * Use 5 bits less than the max cq entries, that should give us around
1320 * 32 entries per hash list if totally full and uniformly spread.
1321 */
1322 hash_bits = ilog2(p->cq_entries);
1323 hash_bits -= 5;
1324 if (hash_bits <= 0)
1325 hash_bits = 1;
1326 ctx->cancel_hash_bits = hash_bits;
1327 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1328 GFP_KERNEL);
1329 if (!ctx->cancel_hash)
1330 goto err;
1331 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1332
21482896 1333 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
1334 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1335 goto err;
2b188cc1
JA
1336
1337 ctx->flags = p->flags;
90554200 1338 init_waitqueue_head(&ctx->sqo_sq_wait);
69fb2131 1339 INIT_LIST_HEAD(&ctx->sqd_list);
2b188cc1 1340 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 1341 INIT_LIST_HEAD(&ctx->cq_overflow_list);
0f158b4c
JA
1342 init_completion(&ctx->ref_comp);
1343 init_completion(&ctx->sq_thread_comp);
5a2e745d 1344 idr_init(&ctx->io_buffer_idr);
071698e1 1345 idr_init(&ctx->personality_idr);
2b188cc1
JA
1346 mutex_init(&ctx->uring_lock);
1347 init_waitqueue_head(&ctx->wait);
1348 spin_lock_init(&ctx->completion_lock);
540e32a0 1349 INIT_LIST_HEAD(&ctx->iopoll_list);
de0617e4 1350 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 1351 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
1352 spin_lock_init(&ctx->inflight_lock);
1353 INIT_LIST_HEAD(&ctx->inflight_list);
d67d2263
BM
1354 spin_lock_init(&ctx->rsrc_ref_lock);
1355 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
269bbe5f
BM
1356 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1357 init_llist_head(&ctx->rsrc_put_llist);
1b4c351f 1358 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
c7dae4ba 1359 INIT_LIST_HEAD(&ctx->submit_state.comp.locked_free_list);
2b188cc1 1360 return ctx;
206aefde 1361err:
78076bb6 1362 kfree(ctx->cancel_hash);
206aefde
JA
1363 kfree(ctx);
1364 return NULL;
2b188cc1
JA
1365}
1366
9cf7c104 1367static bool req_need_defer(struct io_kiocb *req, u32 seq)
7adf4eaf 1368{
2bc9930e
JA
1369 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1370 struct io_ring_ctx *ctx = req->ctx;
a197f664 1371
9cf7c104 1372 return seq != ctx->cached_cq_tail
2c3bac6d 1373 + READ_ONCE(ctx->cached_cq_overflow);
2bc9930e 1374 }
de0617e4 1375
9d858b21 1376 return false;
de0617e4
JA
1377}
1378
5c3462cf 1379static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
1e6fa521 1380{
500a373d 1381 if (req->work.identity == &tctx->__identity)
1e6fa521
JA
1382 return;
1383 if (refcount_dec_and_test(&req->work.identity->count))
1384 kfree(req->work.identity);
1385}
1386
4edf20f9 1387static void io_req_clean_work(struct io_kiocb *req)
18d9be1a 1388{
7cdaf587 1389 if (!(req->flags & REQ_F_WORK_INITIALIZED))
4edf20f9 1390 return;
51a4cc11 1391
e86d0047 1392 if (req->work.flags & IO_WQ_WORK_MM)
98447d65 1393 mmdrop(req->work.identity->mm);
91d8f519 1394#ifdef CONFIG_BLK_CGROUP
e86d0047 1395 if (req->work.flags & IO_WQ_WORK_BLKCG)
98447d65 1396 css_put(req->work.identity->blkcg_css);
91d8f519 1397#endif
e86d0047 1398 if (req->work.flags & IO_WQ_WORK_CREDS)
98447d65 1399 put_cred(req->work.identity->creds);
dfead8a8 1400 if (req->work.flags & IO_WQ_WORK_FS) {
98447d65 1401 struct fs_struct *fs = req->work.identity->fs;
51a4cc11 1402
98447d65 1403 spin_lock(&req->work.identity->fs->lock);
ff002b30
JA
1404 if (--fs->users)
1405 fs = NULL;
98447d65 1406 spin_unlock(&req->work.identity->fs->lock);
ff002b30
JA
1407 if (fs)
1408 free_fs_struct(fs);
1409 }
34e08fed
PB
1410 if (req->work.flags & IO_WQ_WORK_FILES) {
1411 put_files_struct(req->work.identity->files);
1412 put_nsproxy(req->work.identity->nsproxy);
34e08fed
PB
1413 }
1414 if (req->flags & REQ_F_INFLIGHT) {
1415 struct io_ring_ctx *ctx = req->ctx;
1416 struct io_uring_task *tctx = req->task->io_uring;
1417 unsigned long flags;
1418
1419 spin_lock_irqsave(&ctx->inflight_lock, flags);
1420 list_del(&req->inflight_entry);
1421 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1422 req->flags &= ~REQ_F_INFLIGHT;
1423 if (atomic_read(&tctx->in_idle))
1424 wake_up(&tctx->wait);
1425 }
51a4cc11 1426
e86d0047
PB
1427 req->flags &= ~REQ_F_WORK_INITIALIZED;
1428 req->work.flags &= ~(IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | IO_WQ_WORK_FS |
1429 IO_WQ_WORK_CREDS | IO_WQ_WORK_FILES);
5c3462cf 1430 io_put_identity(req->task->io_uring, req);
561fb04a
JA
1431}
1432
1e6fa521
JA
1433/*
1434 * Create a private copy of io_identity, since some fields don't match
1435 * the current context.
1436 */
1437static bool io_identity_cow(struct io_kiocb *req)
1438{
5c3462cf 1439 struct io_uring_task *tctx = current->io_uring;
1e6fa521
JA
1440 const struct cred *creds = NULL;
1441 struct io_identity *id;
1442
1443 if (req->work.flags & IO_WQ_WORK_CREDS)
1444 creds = req->work.identity->creds;
1445
1446 id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL);
1447 if (unlikely(!id)) {
1448 req->work.flags |= IO_WQ_WORK_CANCEL;
1449 return false;
1450 }
1451
1452 /*
1453 * We can safely just re-init the creds we copied Either the field
1454 * matches the current one, or we haven't grabbed it yet. The only
1455 * exception is ->creds, through registered personalities, so handle
1456 * that one separately.
1457 */
1458 io_init_identity(id);
1459 if (creds)
e8c954df 1460 id->creds = creds;
1e6fa521
JA
1461
1462 /* add one for this request */
1463 refcount_inc(&id->count);
1464
cb8a8ae3
JA
1465 /* drop tctx and req identity references, if needed */
1466 if (tctx->identity != &tctx->__identity &&
1467 refcount_dec_and_test(&tctx->identity->count))
1468 kfree(tctx->identity);
1469 if (req->work.identity != &tctx->__identity &&
1470 refcount_dec_and_test(&req->work.identity->count))
1e6fa521
JA
1471 kfree(req->work.identity);
1472
1473 req->work.identity = id;
500a373d 1474 tctx->identity = id;
1e6fa521
JA
1475 return true;
1476}
1477
ce3d5aae
PB
1478static void io_req_track_inflight(struct io_kiocb *req)
1479{
1480 struct io_ring_ctx *ctx = req->ctx;
1481
1482 if (!(req->flags & REQ_F_INFLIGHT)) {
1483 io_req_init_async(req);
1484 req->flags |= REQ_F_INFLIGHT;
1485
1486 spin_lock_irq(&ctx->inflight_lock);
1487 list_add(&req->inflight_entry, &ctx->inflight_list);
1488 spin_unlock_irq(&ctx->inflight_lock);
1489 }
1490}
1491
1e6fa521 1492static bool io_grab_identity(struct io_kiocb *req)
18d9be1a 1493{
d3656344 1494 const struct io_op_def *def = &io_op_defs[req->opcode];
5c3462cf 1495 struct io_identity *id = req->work.identity;
54a91f3b 1496
69228338
JA
1497 if (def->work_flags & IO_WQ_WORK_FSIZE) {
1498 if (id->fsize != rlimit(RLIMIT_FSIZE))
1499 return false;
1500 req->work.flags |= IO_WQ_WORK_FSIZE;
1501 }
91d8f519 1502#ifdef CONFIG_BLK_CGROUP
dfead8a8
JA
1503 if (!(req->work.flags & IO_WQ_WORK_BLKCG) &&
1504 (def->work_flags & IO_WQ_WORK_BLKCG)) {
91d8f519 1505 rcu_read_lock();
1e6fa521
JA
1506 if (id->blkcg_css != blkcg_css()) {
1507 rcu_read_unlock();
1508 return false;
1509 }
91d8f519
DZ
1510 /*
1511 * This should be rare, either the cgroup is dying or the task
1512 * is moving cgroups. Just punt to root for the handful of ios.
1513 */
1e6fa521 1514 if (css_tryget_online(id->blkcg_css))
dfead8a8 1515 req->work.flags |= IO_WQ_WORK_BLKCG;
91d8f519
DZ
1516 rcu_read_unlock();
1517 }
1518#endif
dfead8a8 1519 if (!(req->work.flags & IO_WQ_WORK_CREDS)) {
1e6fa521
JA
1520 if (id->creds != current_cred())
1521 return false;
1522 get_cred(id->creds);
dfead8a8
JA
1523 req->work.flags |= IO_WQ_WORK_CREDS;
1524 }
4ea33a97
JA
1525#ifdef CONFIG_AUDIT
1526 if (!uid_eq(current->loginuid, id->loginuid) ||
1527 current->sessionid != id->sessionid)
1528 return false;
1529#endif
dfead8a8
JA
1530 if (!(req->work.flags & IO_WQ_WORK_FS) &&
1531 (def->work_flags & IO_WQ_WORK_FS)) {
1e6fa521
JA
1532 if (current->fs != id->fs)
1533 return false;
1534 spin_lock(&id->fs->lock);
1535 if (!id->fs->in_exec) {
1536 id->fs->users++;
dfead8a8 1537 req->work.flags |= IO_WQ_WORK_FS;
dca9cf8b
PB
1538 } else {
1539 req->work.flags |= IO_WQ_WORK_CANCEL;
1540 }
1541 spin_unlock(&current->fs->lock);
1542 }
af604703
PB
1543 if (!(req->work.flags & IO_WQ_WORK_FILES) &&
1544 (def->work_flags & IO_WQ_WORK_FILES) &&
1545 !(req->flags & REQ_F_NO_FILE_TABLE)) {
1546 if (id->files != current->files ||
1547 id->nsproxy != current->nsproxy)
1548 return false;
1549 atomic_inc(&id->files->count);
1550 get_nsproxy(id->nsproxy);
af604703 1551 req->work.flags |= IO_WQ_WORK_FILES;
ce3d5aae 1552 io_req_track_inflight(req);
af604703 1553 }
77788775
JA
1554 if (!(req->work.flags & IO_WQ_WORK_MM) &&
1555 (def->work_flags & IO_WQ_WORK_MM)) {
1556 if (id->mm != current->mm)
1557 return false;
1558 mmgrab(id->mm);
1559 req->work.flags |= IO_WQ_WORK_MM;
1560 }
1e6fa521
JA
1561
1562 return true;
1563}
1564
1565static void io_prep_async_work(struct io_kiocb *req)
1566{
1567 const struct io_op_def *def = &io_op_defs[req->opcode];
1e6fa521
JA
1568 struct io_ring_ctx *ctx = req->ctx;
1569
1570 io_req_init_async(req);
1571
feaadc4f
PB
1572 if (req->flags & REQ_F_FORCE_ASYNC)
1573 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1574
1e6fa521
JA
1575 if (req->flags & REQ_F_ISREG) {
1576 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
1577 io_wq_hash_work(&req->work, file_inode(req->file));
1578 } else {
1579 if (def->unbound_nonreg_file)
1580 req->work.flags |= IO_WQ_WORK_UNBOUND;
1581 }
1582
1e6fa521
JA
1583 /* if we fail grabbing identity, we must COW, regrab, and retry */
1584 if (io_grab_identity(req))
1585 return;
1586
1587 if (!io_identity_cow(req))
1588 return;
1589
1590 /* can't fail at this point */
1591 if (!io_grab_identity(req))
1592 WARN_ON(1);
561fb04a 1593}
cccf0ee8 1594
cbdcb435 1595static void io_prep_async_link(struct io_kiocb *req)
561fb04a 1596{
cbdcb435 1597 struct io_kiocb *cur;
54a91f3b 1598
f2f87370
PB
1599 io_for_each_link(cur, req)
1600 io_prep_async_work(cur);
561fb04a
JA
1601}
1602
7271ef3a 1603static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
561fb04a 1604{
a197f664 1605 struct io_ring_ctx *ctx = req->ctx;
cbdcb435 1606 struct io_kiocb *link = io_prep_linked_timeout(req);
561fb04a 1607
8766dd51
PB
1608 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1609 &req->work, req->flags);
1610 io_wq_enqueue(ctx->io_wq, &req->work);
7271ef3a 1611 return link;
18d9be1a
JA
1612}
1613
cbdcb435
PB
1614static void io_queue_async_work(struct io_kiocb *req)
1615{
7271ef3a
JA
1616 struct io_kiocb *link;
1617
cbdcb435
PB
1618 /* init ->work of the whole link before punting */
1619 io_prep_async_link(req);
7271ef3a
JA
1620 link = __io_queue_async_work(req);
1621
1622 if (link)
1623 io_queue_linked_timeout(link);
cbdcb435
PB
1624}
1625
5262f567
JA
1626static void io_kill_timeout(struct io_kiocb *req)
1627{
e8c2bc1f 1628 struct io_timeout_data *io = req->async_data;
5262f567
JA
1629 int ret;
1630
e8c2bc1f 1631 ret = hrtimer_try_to_cancel(&io->timer);
5262f567 1632 if (ret != -1) {
01cec8c1
PB
1633 atomic_set(&req->ctx->cq_timeouts,
1634 atomic_read(&req->ctx->cq_timeouts) + 1);
135fcde8 1635 list_del_init(&req->timeout.list);
78e19bbe 1636 io_cqring_fill_event(req, 0);
216578e5 1637 io_put_req_deferred(req, 1);
5262f567
JA
1638 }
1639}
1640
76e1b642
JA
1641/*
1642 * Returns true if we found and killed one or more timeouts
1643 */
6b81928d
PB
1644static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
1645 struct files_struct *files)
5262f567
JA
1646{
1647 struct io_kiocb *req, *tmp;
76e1b642 1648 int canceled = 0;
5262f567
JA
1649
1650 spin_lock_irq(&ctx->completion_lock);
f3606e3a 1651 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
6b81928d 1652 if (io_match_task(req, tsk, files)) {
f3606e3a 1653 io_kill_timeout(req);
76e1b642
JA
1654 canceled++;
1655 }
f3606e3a 1656 }
5262f567 1657 spin_unlock_irq(&ctx->completion_lock);
76e1b642 1658 return canceled != 0;
5262f567
JA
1659}
1660
04518945 1661static void __io_queue_deferred(struct io_ring_ctx *ctx)
de0617e4 1662{
04518945 1663 do {
27dc8338
PB
1664 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1665 struct io_defer_entry, list);
de0617e4 1666
9cf7c104 1667 if (req_need_defer(de->req, de->seq))
04518945 1668 break;
27dc8338 1669 list_del_init(&de->list);
907d1df3 1670 io_req_task_queue(de->req);
27dc8338 1671 kfree(de);
04518945
PB
1672 } while (!list_empty(&ctx->defer_list));
1673}
1674
360428f8 1675static void io_flush_timeouts(struct io_ring_ctx *ctx)
de0617e4 1676{
f010505b
MDG
1677 u32 seq;
1678
1679 if (list_empty(&ctx->timeout_list))
1680 return;
1681
1682 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1683
1684 do {
1685 u32 events_needed, events_got;
360428f8 1686 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
135fcde8 1687 struct io_kiocb, timeout.list);
de0617e4 1688
8eb7e2d0 1689 if (io_is_timeout_noseq(req))
360428f8 1690 break;
f010505b
MDG
1691
1692 /*
1693 * Since seq can easily wrap around over time, subtract
1694 * the last seq at which timeouts were flushed before comparing.
1695 * Assuming not more than 2^31-1 events have happened since,
1696 * these subtractions won't have wrapped, so we can check if
1697 * target is in [last_seq, current_seq] by comparing the two.
1698 */
1699 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1700 events_got = seq - ctx->cq_last_tm_flush;
1701 if (events_got < events_needed)
360428f8 1702 break;
bfe68a22 1703
135fcde8 1704 list_del_init(&req->timeout.list);
5262f567 1705 io_kill_timeout(req);
f010505b
MDG
1706 } while (!list_empty(&ctx->timeout_list));
1707
1708 ctx->cq_last_tm_flush = seq;
360428f8 1709}
5262f567 1710
360428f8
PB
1711static void io_commit_cqring(struct io_ring_ctx *ctx)
1712{
1713 io_flush_timeouts(ctx);
ec30e04b
PB
1714
1715 /* order cqe stores with ring update */
1716 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
de0617e4 1717
04518945
PB
1718 if (unlikely(!list_empty(&ctx->defer_list)))
1719 __io_queue_deferred(ctx);
de0617e4
JA
1720}
1721
90554200
JA
1722static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1723{
1724 struct io_rings *r = ctx->rings;
1725
1726 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1727}
1728
888aae2e
PB
1729static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1730{
1731 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1732}
1733
2b188cc1
JA
1734static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1735{
75b28aff 1736 struct io_rings *rings = ctx->rings;
2b188cc1
JA
1737 unsigned tail;
1738
115e12e5
SB
1739 /*
1740 * writes to the cq entry need to come after reading head; the
1741 * control dependency is enough as we're using WRITE_ONCE to
1742 * fill the cq entry
1743 */
888aae2e 1744 if (__io_cqring_events(ctx) == rings->cq_ring_entries)
2b188cc1
JA
1745 return NULL;
1746
888aae2e 1747 tail = ctx->cached_cq_tail++;
75b28aff 1748 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
1749}
1750
f2842ab5
JA
1751static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1752{
f0b493e6
JA
1753 if (!ctx->cq_ev_fd)
1754 return false;
7e55a19c
SG
1755 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1756 return false;
f2842ab5
JA
1757 if (!ctx->eventfd_async)
1758 return true;
b41e9852 1759 return io_wq_current_is_worker();
f2842ab5
JA
1760}
1761
b41e9852 1762static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5 1763{
b1445e59
PB
1764 /* see waitqueue_active() comment */
1765 smp_mb();
1766
1d7bb1d5
JA
1767 if (waitqueue_active(&ctx->wait))
1768 wake_up(&ctx->wait);
534ca6d6
JA
1769 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1770 wake_up(&ctx->sq_data->wait);
b41e9852 1771 if (io_should_trigger_evfd(ctx))
1d7bb1d5 1772 eventfd_signal(ctx->cq_ev_fd, 1);
b1445e59 1773 if (waitqueue_active(&ctx->cq_wait)) {
4aa84f2f
PB
1774 wake_up_interruptible(&ctx->cq_wait);
1775 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1776 }
1d7bb1d5
JA
1777}
1778
80c18e4a
PB
1779static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1780{
b1445e59
PB
1781 /* see waitqueue_active() comment */
1782 smp_mb();
1783
80c18e4a
PB
1784 if (ctx->flags & IORING_SETUP_SQPOLL) {
1785 if (waitqueue_active(&ctx->wait))
1786 wake_up(&ctx->wait);
1787 }
1788 if (io_should_trigger_evfd(ctx))
1789 eventfd_signal(ctx->cq_ev_fd, 1);
b1445e59 1790 if (waitqueue_active(&ctx->cq_wait)) {
4aa84f2f
PB
1791 wake_up_interruptible(&ctx->cq_wait);
1792 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1793 }
80c18e4a
PB
1794}
1795
c4a2ed72 1796/* Returns true if there are no backlogged entries after the flush */
6c503150
PB
1797static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1798 struct task_struct *tsk,
1799 struct files_struct *files)
1d7bb1d5
JA
1800{
1801 struct io_rings *rings = ctx->rings;
e6c8aa9a 1802 struct io_kiocb *req, *tmp;
1d7bb1d5 1803 struct io_uring_cqe *cqe;
1d7bb1d5 1804 unsigned long flags;
b18032bb 1805 bool all_flushed, posted;
1d7bb1d5
JA
1806 LIST_HEAD(list);
1807
e23de15f
PB
1808 if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
1809 return false;
1d7bb1d5 1810
b18032bb 1811 posted = false;
1d7bb1d5 1812 spin_lock_irqsave(&ctx->completion_lock, flags);
e6c8aa9a 1813 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
08d23634 1814 if (!io_match_task(req, tsk, files))
e6c8aa9a
JA
1815 continue;
1816
1d7bb1d5
JA
1817 cqe = io_get_cqring(ctx);
1818 if (!cqe && !force)
1819 break;
1820
40d8ddd4 1821 list_move(&req->compl.list, &list);
1d7bb1d5
JA
1822 if (cqe) {
1823 WRITE_ONCE(cqe->user_data, req->user_data);
1824 WRITE_ONCE(cqe->res, req->result);
0f7e466b 1825 WRITE_ONCE(cqe->flags, req->compl.cflags);
1d7bb1d5 1826 } else {
2c3bac6d 1827 ctx->cached_cq_overflow++;
1d7bb1d5 1828 WRITE_ONCE(ctx->rings->cq_overflow,
2c3bac6d 1829 ctx->cached_cq_overflow);
1d7bb1d5 1830 }
b18032bb 1831 posted = true;
1d7bb1d5
JA
1832 }
1833
09e88404
PB
1834 all_flushed = list_empty(&ctx->cq_overflow_list);
1835 if (all_flushed) {
1836 clear_bit(0, &ctx->sq_check_overflow);
1837 clear_bit(0, &ctx->cq_check_overflow);
1838 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1839 }
46930143 1840
b18032bb
JA
1841 if (posted)
1842 io_commit_cqring(ctx);
1d7bb1d5 1843 spin_unlock_irqrestore(&ctx->completion_lock, flags);
b18032bb
JA
1844 if (posted)
1845 io_cqring_ev_posted(ctx);
1d7bb1d5
JA
1846
1847 while (!list_empty(&list)) {
40d8ddd4
PB
1848 req = list_first_entry(&list, struct io_kiocb, compl.list);
1849 list_del(&req->compl.list);
ec9c02ad 1850 io_put_req(req);
1d7bb1d5 1851 }
c4a2ed72 1852
09e88404 1853 return all_flushed;
1d7bb1d5
JA
1854}
1855
6c503150
PB
1856static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1857 struct task_struct *tsk,
1858 struct files_struct *files)
1859{
1860 if (test_bit(0, &ctx->cq_check_overflow)) {
1861 /* iopoll syncs against uring_lock, not completion_lock */
1862 if (ctx->flags & IORING_SETUP_IOPOLL)
1863 mutex_lock(&ctx->uring_lock);
1864 __io_cqring_overflow_flush(ctx, force, tsk, files);
1865 if (ctx->flags & IORING_SETUP_IOPOLL)
1866 mutex_unlock(&ctx->uring_lock);
1867 }
1868}
1869
bcda7baa 1870static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1871{
78e19bbe 1872 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1873 struct io_uring_cqe *cqe;
1874
78e19bbe 1875 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 1876
2b188cc1
JA
1877 /*
1878 * If we can't get a cq entry, userspace overflowed the
1879 * submission (by quite a lot). Increment the overflow count in
1880 * the ring.
1881 */
1882 cqe = io_get_cqring(ctx);
1d7bb1d5 1883 if (likely(cqe)) {
78e19bbe 1884 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 1885 WRITE_ONCE(cqe->res, res);
bcda7baa 1886 WRITE_ONCE(cqe->flags, cflags);
fdaf083c
JA
1887 } else if (ctx->cq_overflow_flushed ||
1888 atomic_read(&req->task->io_uring->in_idle)) {
0f212204
JA
1889 /*
1890 * If we're in ring overflow flush mode, or in task cancel mode,
1891 * then we cannot store the request for later flushing, we need
1892 * to drop it on the floor.
1893 */
2c3bac6d
PB
1894 ctx->cached_cq_overflow++;
1895 WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
1d7bb1d5 1896 } else {
ad3eb2c8
JA
1897 if (list_empty(&ctx->cq_overflow_list)) {
1898 set_bit(0, &ctx->sq_check_overflow);
1899 set_bit(0, &ctx->cq_check_overflow);
6d5f9049 1900 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
ad3eb2c8 1901 }
40d8ddd4 1902 io_clean_op(req);
1d7bb1d5 1903 req->result = res;
0f7e466b 1904 req->compl.cflags = cflags;
40d8ddd4
PB
1905 refcount_inc(&req->refs);
1906 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
2b188cc1
JA
1907 }
1908}
1909
bcda7baa
JA
1910static void io_cqring_fill_event(struct io_kiocb *req, long res)
1911{
1912 __io_cqring_fill_event(req, res, 0);
1913}
1914
c7dae4ba
JA
1915static inline void io_req_complete_post(struct io_kiocb *req, long res,
1916 unsigned int cflags)
2b188cc1 1917{
78e19bbe 1918 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1919 unsigned long flags;
1920
1921 spin_lock_irqsave(&ctx->completion_lock, flags);
bcda7baa 1922 __io_cqring_fill_event(req, res, cflags);
2b188cc1 1923 io_commit_cqring(ctx);
c7dae4ba
JA
1924 /*
1925 * If we're the last reference to this request, add to our locked
1926 * free_list cache.
1927 */
1928 if (refcount_dec_and_test(&req->refs)) {
1929 struct io_comp_state *cs = &ctx->submit_state.comp;
1930
1931 io_dismantle_req(req);
1932 io_put_task(req->task, 1);
1933 list_add(&req->compl.list, &cs->locked_free_list);
1934 cs->locked_free_nr++;
1935 } else
1936 req = NULL;
2b188cc1
JA
1937 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1938
8c838788 1939 io_cqring_ev_posted(ctx);
c7dae4ba
JA
1940 if (req) {
1941 io_queue_next(req);
1942 percpu_ref_put(&ctx->refs);
1943 }
2b188cc1
JA
1944}
1945
a38d68db 1946static void io_req_complete_state(struct io_kiocb *req, long res,
889fca73 1947 unsigned int cflags)
229a7b63 1948{
a38d68db
PB
1949 io_clean_op(req);
1950 req->result = res;
1951 req->compl.cflags = cflags;
e342c807 1952 req->flags |= REQ_F_COMPLETE_INLINE;
a38d68db
PB
1953}
1954
889fca73
PB
1955static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1956 long res, unsigned cflags)
a38d68db 1957{
889fca73
PB
1958 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1959 io_req_complete_state(req, res, cflags);
a38d68db 1960 else
c7dae4ba 1961 io_req_complete_post(req, res, cflags);
e1e16097
JA
1962}
1963
a38d68db 1964static inline void io_req_complete(struct io_kiocb *req, long res)
bcda7baa 1965{
889fca73 1966 __io_req_complete(req, 0, res, 0);
bcda7baa
JA
1967}
1968
c7dae4ba 1969static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
2b188cc1 1970{
c7dae4ba
JA
1971 struct io_submit_state *state = &ctx->submit_state;
1972 struct io_comp_state *cs = &state->comp;
e5d1bc0a 1973 struct io_kiocb *req = NULL;
1b4c351f 1974
c7dae4ba
JA
1975 /*
1976 * If we have more than a batch's worth of requests in our IRQ side
1977 * locked cache, grab the lock and move them over to our submission
1978 * side cache.
1979 */
1980 if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) {
1981 spin_lock_irq(&ctx->completion_lock);
1982 list_splice_init(&cs->locked_free_list, &cs->free_list);
1983 cs->locked_free_nr = 0;
1984 spin_unlock_irq(&ctx->completion_lock);
1985 }
1986
1987 while (!list_empty(&cs->free_list)) {
1988 req = list_first_entry(&cs->free_list, struct io_kiocb,
1b4c351f
JA
1989 compl.list);
1990 list_del(&req->compl.list);
e5d1bc0a
PB
1991 state->reqs[state->free_reqs++] = req;
1992 if (state->free_reqs == ARRAY_SIZE(state->reqs))
1993 break;
1b4c351f
JA
1994 }
1995
e5d1bc0a
PB
1996 return req != NULL;
1997}
1998
1999static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
2000{
2001 struct io_submit_state *state = &ctx->submit_state;
2002
2003 BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
2004
f6b6c7d6 2005 if (!state->free_reqs) {
291b2821 2006 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2579f913
JA
2007 int ret;
2008
c7dae4ba 2009 if (io_flush_cached_reqs(ctx))
e5d1bc0a
PB
2010 goto got_req;
2011
bf019da7
PB
2012 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
2013 state->reqs);
fd6fab2c
JA
2014
2015 /*
2016 * Bulk alloc is all-or-nothing. If we fail to get a batch,
2017 * retry single alloc to be on the safe side.
2018 */
2019 if (unlikely(ret <= 0)) {
2020 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
2021 if (!state->reqs[0])
3893f39f 2022 return NULL;
fd6fab2c
JA
2023 ret = 1;
2024 }
291b2821 2025 state->free_reqs = ret;
2b188cc1 2026 }
e5d1bc0a 2027got_req:
291b2821
PB
2028 state->free_reqs--;
2029 return state->reqs[state->free_reqs];
2b188cc1
JA
2030}
2031
8da11c19
PB
2032static inline void io_put_file(struct io_kiocb *req, struct file *file,
2033 bool fixed)
2034{
36f72fe2 2035 if (!fixed)
8da11c19
PB
2036 fput(file);
2037}
2038
4edf20f9 2039static void io_dismantle_req(struct io_kiocb *req)
2b188cc1 2040{
3ca405eb 2041 io_clean_op(req);
929a3af9 2042
e8c2bc1f
JA
2043 if (req->async_data)
2044 kfree(req->async_data);
8da11c19
PB
2045 if (req->file)
2046 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
269bbe5f
BM
2047 if (req->fixed_rsrc_refs)
2048 percpu_ref_put(req->fixed_rsrc_refs);
4edf20f9 2049 io_req_clean_work(req);
e65ef56d
JA
2050}
2051
7c660731
PB
2052static inline void io_put_task(struct task_struct *task, int nr)
2053{
2054 struct io_uring_task *tctx = task->io_uring;
2055
2056 percpu_counter_sub(&tctx->inflight, nr);
2057 if (unlikely(atomic_read(&tctx->in_idle)))
2058 wake_up(&tctx->wait);
2059 put_task_struct_many(task, nr);
2060}
2061
216578e5 2062static void __io_free_req(struct io_kiocb *req)
c6ca97b3 2063{
51a4cc11 2064 struct io_ring_ctx *ctx = req->ctx;
c6ca97b3 2065
216578e5 2066 io_dismantle_req(req);
7c660731 2067 io_put_task(req->task, 1);
e3bc8e9d 2068
3893f39f 2069 kmem_cache_free(req_cachep, req);
ecfc5177 2070 percpu_ref_put(&ctx->refs);
e65ef56d
JA
2071}
2072
f2f87370
PB
2073static inline void io_remove_next_linked(struct io_kiocb *req)
2074{
2075 struct io_kiocb *nxt = req->link;
2076
2077 req->link = nxt->link;
2078 nxt->link = NULL;
2079}
2080
c9abd7ad 2081static void io_kill_linked_timeout(struct io_kiocb *req)
2665abfd 2082{
a197f664 2083 struct io_ring_ctx *ctx = req->ctx;
7c86ffee 2084 struct io_kiocb *link;
c9abd7ad
PB
2085 bool cancelled = false;
2086 unsigned long flags;
7c86ffee 2087
c9abd7ad 2088 spin_lock_irqsave(&ctx->completion_lock, flags);
f2f87370
PB
2089 link = req->link;
2090
900fad45
PB
2091 /*
2092 * Can happen if a linked timeout fired and link had been like
2093 * req -> link t-out -> link t-out [-> ...]
2094 */
c9abd7ad
PB
2095 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
2096 struct io_timeout_data *io = link->async_data;
2097 int ret;
7c86ffee 2098
f2f87370 2099 io_remove_next_linked(req);
90cd7e42 2100 link->timeout.head = NULL;
c9abd7ad
PB
2101 ret = hrtimer_try_to_cancel(&io->timer);
2102 if (ret != -1) {
2103 io_cqring_fill_event(link, -ECANCELED);
2104 io_commit_cqring(ctx);
2105 cancelled = true;
2106 }
2107 }
7c86ffee 2108 req->flags &= ~REQ_F_LINK_TIMEOUT;
216578e5 2109 spin_unlock_irqrestore(&ctx->completion_lock, flags);
ab0b6451 2110
c9abd7ad 2111 if (cancelled) {
7c86ffee 2112 io_cqring_ev_posted(ctx);
c9abd7ad
PB
2113 io_put_req(link);
2114 }
7c86ffee
PB
2115}
2116
9e645e11 2117
d148ca4b 2118static void io_fail_links(struct io_kiocb *req)
9e645e11 2119{
f2f87370 2120 struct io_kiocb *link, *nxt;
2665abfd 2121 struct io_ring_ctx *ctx = req->ctx;
d148ca4b 2122 unsigned long flags;
9e645e11 2123
d148ca4b 2124 spin_lock_irqsave(&ctx->completion_lock, flags);
f2f87370
PB
2125 link = req->link;
2126 req->link = NULL;
9e645e11 2127
f2f87370
PB
2128 while (link) {
2129 nxt = link->link;
2130 link->link = NULL;
2665abfd 2131
f2f87370 2132 trace_io_uring_fail_link(req, link);
7c86ffee 2133 io_cqring_fill_event(link, -ECANCELED);
216578e5
PB
2134
2135 /*
2136 * It's ok to free under spinlock as they're not linked anymore,
2137 * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on
2138 * work.fs->lock.
2139 */
2140 if (link->flags & REQ_F_WORK_INITIALIZED)
2141 io_put_req_deferred(link, 2);
2142 else
2143 io_double_put_req(link);
f2f87370 2144 link = nxt;
9e645e11 2145 }
2665abfd 2146 io_commit_cqring(ctx);
216578e5 2147 spin_unlock_irqrestore(&ctx->completion_lock, flags);
9e645e11 2148
2665abfd 2149 io_cqring_ev_posted(ctx);
9e645e11
JA
2150}
2151
3fa5e0f3 2152static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
c69f8dbe 2153{
7c86ffee
PB
2154 if (req->flags & REQ_F_LINK_TIMEOUT)
2155 io_kill_linked_timeout(req);
944e58bf 2156
9e645e11
JA
2157 /*
2158 * If LINK is set, we have dependent requests in this chain. If we
2159 * didn't fail this request, queue the first one up, moving any other
2160 * dependencies to the next request. In case of failure, fail the rest
2161 * of the chain.
2162 */
f2f87370
PB
2163 if (likely(!(req->flags & REQ_F_FAIL_LINK))) {
2164 struct io_kiocb *nxt = req->link;
2165
2166 req->link = NULL;
2167 return nxt;
2168 }
9b5f7bd9
PB
2169 io_fail_links(req);
2170 return NULL;
4d7dd462 2171}
9e645e11 2172
f2f87370 2173static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
3fa5e0f3 2174{
cdbff982 2175 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
3fa5e0f3
PB
2176 return NULL;
2177 return __io_req_find_next(req);
2178}
2179
7cbf1722
JA
2180static bool __tctx_task_work(struct io_uring_task *tctx)
2181{
65453d1e 2182 struct io_ring_ctx *ctx = NULL;
7cbf1722
JA
2183 struct io_wq_work_list list;
2184 struct io_wq_work_node *node;
2185
2186 if (wq_list_empty(&tctx->task_list))
2187 return false;
2188
0b81e80c 2189 spin_lock_irq(&tctx->task_lock);
7cbf1722
JA
2190 list = tctx->task_list;
2191 INIT_WQ_LIST(&tctx->task_list);
0b81e80c 2192 spin_unlock_irq(&tctx->task_lock);
7cbf1722
JA
2193
2194 node = list.first;
2195 while (node) {
2196 struct io_wq_work_node *next = node->next;
65453d1e 2197 struct io_ring_ctx *this_ctx;
7cbf1722
JA
2198 struct io_kiocb *req;
2199
2200 req = container_of(node, struct io_kiocb, io_task_work.node);
65453d1e 2201 this_ctx = req->ctx;
7cbf1722
JA
2202 req->task_work.func(&req->task_work);
2203 node = next;
65453d1e
JA
2204
2205 if (!ctx) {
2206 ctx = this_ctx;
2207 } else if (ctx != this_ctx) {
2208 mutex_lock(&ctx->uring_lock);
2209 io_submit_flush_completions(&ctx->submit_state.comp, ctx);
2210 mutex_unlock(&ctx->uring_lock);
2211 ctx = this_ctx;
2212 }
2213 }
2214
2215 if (ctx && ctx->submit_state.comp.nr) {
2216 mutex_lock(&ctx->uring_lock);
2217 io_submit_flush_completions(&ctx->submit_state.comp, ctx);
2218 mutex_unlock(&ctx->uring_lock);
7cbf1722
JA
2219 }
2220
2221 return list.first != NULL;
2222}
2223
2224static void tctx_task_work(struct callback_head *cb)
2225{
2226 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
2227
2228 while (__tctx_task_work(tctx))
2229 cond_resched();
2230
2231 clear_bit(0, &tctx->task_state);
2232}
2233
2234static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
2235 enum task_work_notify_mode notify)
2236{
2237 struct io_uring_task *tctx = tsk->io_uring;
2238 struct io_wq_work_node *node, *prev;
0b81e80c 2239 unsigned long flags;
7cbf1722
JA
2240 int ret;
2241
2242 WARN_ON_ONCE(!tctx);
2243
0b81e80c 2244 spin_lock_irqsave(&tctx->task_lock, flags);
7cbf1722 2245 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
0b81e80c 2246 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722
JA
2247
2248 /* task_work already pending, we're done */
2249 if (test_bit(0, &tctx->task_state) ||
2250 test_and_set_bit(0, &tctx->task_state))
2251 return 0;
2252
2253 if (!task_work_add(tsk, &tctx->task_work, notify))
2254 return 0;
2255
2256 /*
2257 * Slow path - we failed, find and delete work. if the work is not
2258 * in the list, it got run and we're fine.
2259 */
2260 ret = 0;
0b81e80c 2261 spin_lock_irqsave(&tctx->task_lock, flags);
7cbf1722
JA
2262 wq_list_for_each(node, prev, &tctx->task_list) {
2263 if (&req->io_task_work.node == node) {
2264 wq_list_del(&tctx->task_list, node, prev);
2265 ret = 1;
2266 break;
2267 }
2268 }
0b81e80c 2269 spin_unlock_irqrestore(&tctx->task_lock, flags);
7cbf1722
JA
2270 clear_bit(0, &tctx->task_state);
2271 return ret;
2272}
2273
355fb9e2 2274static int io_req_task_work_add(struct io_kiocb *req)
c2c4c83c
JA
2275{
2276 struct task_struct *tsk = req->task;
2277 struct io_ring_ctx *ctx = req->ctx;
91989c70
JA
2278 enum task_work_notify_mode notify;
2279 int ret;
c2c4c83c 2280
6200b0ae
JA
2281 if (tsk->flags & PF_EXITING)
2282 return -ESRCH;
2283
c2c4c83c 2284 /*
0ba9c9ed
JA
2285 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2286 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2287 * processing task_work. There's no reliable way to tell if TWA_RESUME
2288 * will do the job.
c2c4c83c 2289 */
91989c70 2290 notify = TWA_NONE;
355fb9e2 2291 if (!(ctx->flags & IORING_SETUP_SQPOLL))
c2c4c83c
JA
2292 notify = TWA_SIGNAL;
2293
7cbf1722 2294 ret = io_task_work_add(tsk, req, notify);
c2c4c83c
JA
2295 if (!ret)
2296 wake_up_process(tsk);
0ba9c9ed 2297
c2c4c83c
JA
2298 return ret;
2299}
2300
eab30c4d 2301static void io_req_task_work_add_fallback(struct io_kiocb *req,
7cbf1722 2302 task_work_func_t cb)
eab30c4d
PB
2303{
2304 struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq);
2305
2306 init_task_work(&req->task_work, cb);
2307 task_work_add(tsk, &req->task_work, TWA_NONE);
2308 wake_up_process(tsk);
2309}
2310
c40f6379
JA
2311static void __io_req_task_cancel(struct io_kiocb *req, int error)
2312{
2313 struct io_ring_ctx *ctx = req->ctx;
2314
2315 spin_lock_irq(&ctx->completion_lock);
2316 io_cqring_fill_event(req, error);
2317 io_commit_cqring(ctx);
2318 spin_unlock_irq(&ctx->completion_lock);
2319
2320 io_cqring_ev_posted(ctx);
2321 req_set_fail_links(req);
2322 io_double_put_req(req);
2323}
2324
2325static void io_req_task_cancel(struct callback_head *cb)
2326{
2327 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
87ceb6a6 2328 struct io_ring_ctx *ctx = req->ctx;
c40f6379
JA
2329
2330 __io_req_task_cancel(req, -ECANCELED);
87ceb6a6 2331 percpu_ref_put(&ctx->refs);
c40f6379
JA
2332}
2333
2334static void __io_req_task_submit(struct io_kiocb *req)
2335{
2336 struct io_ring_ctx *ctx = req->ctx;
2337
04fc6c80 2338 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
81b6d05c 2339 mutex_lock(&ctx->uring_lock);
dc0eced5
PB
2340 if (!ctx->sqo_dead && !(current->flags & PF_EXITING) &&
2341 !io_sq_thread_acquire_mm_files(ctx, req))
c5eef2b9 2342 __io_queue_sqe(req);
81b6d05c 2343 else
c40f6379 2344 __io_req_task_cancel(req, -EFAULT);
81b6d05c 2345 mutex_unlock(&ctx->uring_lock);
c40f6379
JA
2346}
2347
2348static void io_req_task_submit(struct callback_head *cb)
2349{
2350 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2351
2352 __io_req_task_submit(req);
2353}
2354
2355static void io_req_task_queue(struct io_kiocb *req)
2356{
c40f6379
JA
2357 int ret;
2358
7cbf1722 2359 req->task_work.func = io_req_task_submit;
355fb9e2 2360 ret = io_req_task_work_add(req);
04fc6c80
PB
2361 if (unlikely(ret)) {
2362 percpu_ref_get(&req->ctx->refs);
eab30c4d 2363 io_req_task_work_add_fallback(req, io_req_task_cancel);
04fc6c80 2364 }
c40f6379
JA
2365}
2366
f2f87370 2367static inline void io_queue_next(struct io_kiocb *req)
c69f8dbe 2368{
9b5f7bd9 2369 struct io_kiocb *nxt = io_req_find_next(req);
944e58bf
PB
2370
2371 if (nxt)
906a8c3f 2372 io_req_task_queue(nxt);
c69f8dbe
JL
2373}
2374
c3524383 2375static void io_free_req(struct io_kiocb *req)
7a743e22 2376{
c3524383
PB
2377 io_queue_next(req);
2378 __io_free_req(req);
2379}
8766dd51 2380
2d6500d4 2381struct req_batch {
5af1d13e
PB
2382 struct task_struct *task;
2383 int task_refs;
1b4c351f 2384 int ctx_refs;
2d6500d4
PB
2385};
2386
5af1d13e
PB
2387static inline void io_init_req_batch(struct req_batch *rb)
2388{
5af1d13e 2389 rb->task_refs = 0;
9ae72463 2390 rb->ctx_refs = 0;
5af1d13e
PB
2391 rb->task = NULL;
2392}
2393
2d6500d4
PB
2394static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2395 struct req_batch *rb)
2396{
6e833d53 2397 if (rb->task)
7c660731 2398 io_put_task(rb->task, rb->task_refs);
9ae72463
PB
2399 if (rb->ctx_refs)
2400 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
2d6500d4
PB
2401}
2402
6ff119a6
PB
2403static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2404 struct io_submit_state *state)
2d6500d4 2405{
f2f87370 2406 io_queue_next(req);
2d6500d4 2407
e3bc8e9d 2408 if (req->task != rb->task) {
7c660731
PB
2409 if (rb->task)
2410 io_put_task(rb->task, rb->task_refs);
e3bc8e9d
JA
2411 rb->task = req->task;
2412 rb->task_refs = 0;
5af1d13e 2413 }
e3bc8e9d 2414 rb->task_refs++;
9ae72463 2415 rb->ctx_refs++;
5af1d13e 2416
4edf20f9 2417 io_dismantle_req(req);
bd759045 2418 if (state->free_reqs != ARRAY_SIZE(state->reqs))
6ff119a6 2419 state->reqs[state->free_reqs++] = req;
bd759045
PB
2420 else
2421 list_add(&req->compl.list, &state->comp.free_list);
7a743e22
PB
2422}
2423
905c172f
PB
2424static void io_submit_flush_completions(struct io_comp_state *cs,
2425 struct io_ring_ctx *ctx)
2426{
2427 int i, nr = cs->nr;
2428 struct io_kiocb *req;
2429 struct req_batch rb;
2430
2431 io_init_req_batch(&rb);
2432 spin_lock_irq(&ctx->completion_lock);
2433 for (i = 0; i < nr; i++) {
2434 req = cs->reqs[i];
2435 __io_cqring_fill_event(req, req->result, req->compl.cflags);
2436 }
2437 io_commit_cqring(ctx);
2438 spin_unlock_irq(&ctx->completion_lock);
2439
2440 io_cqring_ev_posted(ctx);
2441 for (i = 0; i < nr; i++) {
2442 req = cs->reqs[i];
2443
2444 /* submission and completion refs */
2445 if (refcount_sub_and_test(2, &req->refs))
6ff119a6 2446 io_req_free_batch(&rb, req, &ctx->submit_state);
905c172f
PB
2447 }
2448
2449 io_req_free_batch_finish(ctx, &rb);
2450 cs->nr = 0;
2451}
2452
ba816ad6
JA
2453/*
2454 * Drop reference to request, return next in chain (if there is one) if this
2455 * was the last reference to this request.
2456 */
9b5f7bd9 2457static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
e65ef56d 2458{
9b5f7bd9
PB
2459 struct io_kiocb *nxt = NULL;
2460
2a44f467 2461 if (refcount_dec_and_test(&req->refs)) {
9b5f7bd9 2462 nxt = io_req_find_next(req);
4d7dd462 2463 __io_free_req(req);
2a44f467 2464 }
9b5f7bd9 2465 return nxt;
2b188cc1
JA
2466}
2467
e65ef56d
JA
2468static void io_put_req(struct io_kiocb *req)
2469{
2470 if (refcount_dec_and_test(&req->refs))
2471 io_free_req(req);
2b188cc1
JA
2472}
2473
216578e5
PB
2474static void io_put_req_deferred_cb(struct callback_head *cb)
2475{
2476 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2477
2478 io_free_req(req);
2479}
2480
2481static void io_free_req_deferred(struct io_kiocb *req)
2482{
2483 int ret;
2484
7cbf1722 2485 req->task_work.func = io_put_req_deferred_cb;
355fb9e2 2486 ret = io_req_task_work_add(req);
eab30c4d
PB
2487 if (unlikely(ret))
2488 io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
216578e5
PB
2489}
2490
2491static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2492{
2493 if (refcount_sub_and_test(refs, &req->refs))
2494 io_free_req_deferred(req);
2495}
2496
978db57e
JA
2497static void io_double_put_req(struct io_kiocb *req)
2498{
2499 /* drop both submit and complete references */
2500 if (refcount_sub_and_test(2, &req->refs))
2501 io_free_req(req);
2502}
2503
6c503150 2504static unsigned io_cqring_events(struct io_ring_ctx *ctx)
a3a0e43f
JA
2505{
2506 /* See comment at the top of this file */
2507 smp_rmb();
e23de15f 2508 return __io_cqring_events(ctx);
a3a0e43f
JA
2509}
2510
fb5ccc98
PB
2511static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2512{
2513 struct io_rings *rings = ctx->rings;
2514
2515 /* make sure SQ entry isn't read before tail */
2516 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2517}
2518
8ff069bf 2519static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
e94f141b 2520{
8ff069bf 2521 unsigned int cflags;
e94f141b 2522
bcda7baa
JA
2523 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2524 cflags |= IORING_CQE_F_BUFFER;
0e1b6fe3 2525 req->flags &= ~REQ_F_BUFFER_SELECTED;
bcda7baa
JA
2526 kfree(kbuf);
2527 return cflags;
e94f141b
JA
2528}
2529
8ff069bf 2530static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
bcda7baa 2531{
4d954c25 2532 struct io_buffer *kbuf;
bcda7baa 2533
4d954c25 2534 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
8ff069bf
PB
2535 return io_put_kbuf(req, kbuf);
2536}
2537
4c6e277c
JA
2538static inline bool io_run_task_work(void)
2539{
6200b0ae
JA
2540 /*
2541 * Not safe to run on exiting task, and the task_work handling will
2542 * not add work to such a task.
2543 */
2544 if (unlikely(current->flags & PF_EXITING))
2545 return false;
4c6e277c
JA
2546 if (current->task_works) {
2547 __set_current_state(TASK_RUNNING);
2548 task_work_run();
2549 return true;
2550 }
2551
2552 return false;
bcda7baa
JA
2553}
2554
def596e9
JA
2555/*
2556 * Find and free completed poll iocbs
2557 */
2558static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2559 struct list_head *done)
2560{
8237e045 2561 struct req_batch rb;
def596e9 2562 struct io_kiocb *req;
bbde017a
XW
2563
2564 /* order with ->result store in io_complete_rw_iopoll() */
2565 smp_rmb();
def596e9 2566
5af1d13e 2567 io_init_req_batch(&rb);
def596e9 2568 while (!list_empty(done)) {
bcda7baa
JA
2569 int cflags = 0;
2570
d21ffe7e 2571 req = list_first_entry(done, struct io_kiocb, inflight_entry);
f161340d
PB
2572 list_del(&req->inflight_entry);
2573
bbde017a
XW
2574 if (READ_ONCE(req->result) == -EAGAIN) {
2575 req->iopoll_completed = 0;
23faba36 2576 if (io_rw_reissue(req))
f161340d 2577 continue;
bbde017a 2578 }
def596e9 2579
bcda7baa 2580 if (req->flags & REQ_F_BUFFER_SELECTED)
8ff069bf 2581 cflags = io_put_rw_kbuf(req);
bcda7baa
JA
2582
2583 __io_cqring_fill_event(req, req->result, cflags);
def596e9
JA
2584 (*nr_events)++;
2585
c3524383 2586 if (refcount_dec_and_test(&req->refs))
6ff119a6 2587 io_req_free_batch(&rb, req, &ctx->submit_state);
def596e9 2588 }
def596e9 2589
09bb8394 2590 io_commit_cqring(ctx);
80c18e4a 2591 io_cqring_ev_posted_iopoll(ctx);
2d6500d4 2592 io_req_free_batch_finish(ctx, &rb);
581f9810
BM
2593}
2594
def596e9
JA
2595static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2596 long min)
2597{
2598 struct io_kiocb *req, *tmp;
2599 LIST_HEAD(done);
2600 bool spin;
2601 int ret;
2602
2603 /*
2604 * Only spin for completions if we don't have multiple devices hanging
2605 * off our complete list, and we're under the requested amount.
2606 */
2607 spin = !ctx->poll_multi_file && *nr_events < min;
2608
2609 ret = 0;
d21ffe7e 2610 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
9adbd45d 2611 struct kiocb *kiocb = &req->rw.kiocb;
def596e9
JA
2612
2613 /*
581f9810
BM
2614 * Move completed and retryable entries to our local lists.
2615 * If we find a request that requires polling, break out
2616 * and complete those lists first, if we have entries there.
def596e9 2617 */
65a6543d 2618 if (READ_ONCE(req->iopoll_completed)) {
d21ffe7e 2619 list_move_tail(&req->inflight_entry, &done);
def596e9
JA
2620 continue;
2621 }
2622 if (!list_empty(&done))
2623 break;
2624
2625 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2626 if (ret < 0)
2627 break;
2628
3aadc23e
PB
2629 /* iopoll may have completed current req */
2630 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2631 list_move_tail(&req->inflight_entry, &done);
3aadc23e 2632
def596e9
JA
2633 if (ret && spin)
2634 spin = false;
2635 ret = 0;
2636 }
2637
2638 if (!list_empty(&done))
2639 io_iopoll_complete(ctx, nr_events, &done);
2640
2641 return ret;
2642}
2643
2644/*
d195a66e 2645 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
def596e9
JA
2646 * non-spinning poll check - we'll still enter the driver poll loop, but only
2647 * as a non-spinning completion check.
2648 */
2649static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2650 long min)
2651{
540e32a0 2652 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
def596e9
JA
2653 int ret;
2654
2655 ret = io_do_iopoll(ctx, nr_events, min);
2656 if (ret < 0)
2657 return ret;
eba0a4dd 2658 if (*nr_events >= min)
def596e9
JA
2659 return 0;
2660 }
2661
2662 return 1;
2663}
2664
2665/*
2666 * We can't just wait for polled events to come to us, we have to actively
2667 * find and complete them.
2668 */
b2edc0a7 2669static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
def596e9
JA
2670{
2671 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2672 return;
2673
2674 mutex_lock(&ctx->uring_lock);
540e32a0 2675 while (!list_empty(&ctx->iopoll_list)) {
def596e9
JA
2676 unsigned int nr_events = 0;
2677
b2edc0a7 2678 io_do_iopoll(ctx, &nr_events, 0);
08f5439f 2679
b2edc0a7
PB
2680 /* let it sleep and repeat later if can't complete a request */
2681 if (nr_events == 0)
2682 break;
08f5439f
JA
2683 /*
2684 * Ensure we allow local-to-the-cpu processing to take place,
2685 * in this case we need to ensure that we reap all events.
3fcee5a6 2686 * Also let task_work, etc. to progress by releasing the mutex
08f5439f 2687 */
3fcee5a6
PB
2688 if (need_resched()) {
2689 mutex_unlock(&ctx->uring_lock);
2690 cond_resched();
2691 mutex_lock(&ctx->uring_lock);
2692 }
def596e9
JA
2693 }
2694 mutex_unlock(&ctx->uring_lock);
2695}
2696
7668b92a 2697static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
def596e9 2698{
7668b92a 2699 unsigned int nr_events = 0;
2b2ed975 2700 int iters = 0, ret = 0;
500f9fba 2701
c7849be9
XW
2702 /*
2703 * We disallow the app entering submit/complete with polling, but we
2704 * still need to lock the ring to prevent racing with polled issue
2705 * that got punted to a workqueue.
2706 */
2707 mutex_lock(&ctx->uring_lock);
def596e9 2708 do {
a3a0e43f
JA
2709 /*
2710 * Don't enter poll loop if we already have events pending.
2711 * If we do, we can potentially be spinning for commands that
2712 * already triggered a CQE (eg in error).
2713 */
6c503150
PB
2714 if (test_bit(0, &ctx->cq_check_overflow))
2715 __io_cqring_overflow_flush(ctx, false, NULL, NULL);
2716 if (io_cqring_events(ctx))
a3a0e43f
JA
2717 break;
2718
500f9fba
JA
2719 /*
2720 * If a submit got punted to a workqueue, we can have the
2721 * application entering polling for a command before it gets
2722 * issued. That app will hold the uring_lock for the duration
2723 * of the poll right here, so we need to take a breather every
2724 * now and then to ensure that the issue has a chance to add
2725 * the poll to the issued list. Otherwise we can spin here
2726 * forever, while the workqueue is stuck trying to acquire the
2727 * very same mutex.
2728 */
2729 if (!(++iters & 7)) {
2730 mutex_unlock(&ctx->uring_lock);
4c6e277c 2731 io_run_task_work();
500f9fba
JA
2732 mutex_lock(&ctx->uring_lock);
2733 }
2734
7668b92a 2735 ret = io_iopoll_getevents(ctx, &nr_events, min);
def596e9
JA
2736 if (ret <= 0)
2737 break;
2738 ret = 0;
7668b92a 2739 } while (min && !nr_events && !need_resched());
def596e9 2740
500f9fba 2741 mutex_unlock(&ctx->uring_lock);
def596e9
JA
2742 return ret;
2743}
2744
491381ce 2745static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 2746{
491381ce
JA
2747 /*
2748 * Tell lockdep we inherited freeze protection from submission
2749 * thread.
2750 */
2751 if (req->flags & REQ_F_ISREG) {
2752 struct inode *inode = file_inode(req->file);
2b188cc1 2753
491381ce 2754 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 2755 }
491381ce 2756 file_end_write(req->file);
2b188cc1
JA
2757}
2758
b63534c4 2759#ifdef CONFIG_BLOCK
dc2a6e9a 2760static bool io_resubmit_prep(struct io_kiocb *req)
b63534c4
JA
2761{
2762 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
4a245479 2763 int rw, ret;
b63534c4 2764 struct iov_iter iter;
b63534c4 2765
dc2a6e9a
PB
2766 /* already prepared */
2767 if (req->async_data)
2768 return true;
b63534c4
JA
2769
2770 switch (req->opcode) {
2771 case IORING_OP_READV:
2772 case IORING_OP_READ_FIXED:
2773 case IORING_OP_READ:
2774 rw = READ;
2775 break;
2776 case IORING_OP_WRITEV:
2777 case IORING_OP_WRITE_FIXED:
2778 case IORING_OP_WRITE:
2779 rw = WRITE;
2780 break;
2781 default:
2782 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2783 req->opcode);
dc2a6e9a 2784 return false;
b63534c4
JA
2785 }
2786
dc2a6e9a
PB
2787 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2788 if (ret < 0)
2789 return false;
6bf985dc 2790 return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
b63534c4 2791}
b63534c4
JA
2792#endif
2793
23faba36 2794static bool io_rw_reissue(struct io_kiocb *req)
b63534c4
JA
2795{
2796#ifdef CONFIG_BLOCK
23faba36 2797 umode_t mode = file_inode(req->file)->i_mode;
b63534c4
JA
2798 int ret;
2799
75c668cd
PB
2800 if (!S_ISBLK(mode) && !S_ISREG(mode))
2801 return false;
2802 if ((req->flags & REQ_F_NOWAIT) || io_wq_current_is_worker())
b63534c4
JA
2803 return false;
2804
55e6ac1e
PB
2805 lockdep_assert_held(&req->ctx->uring_lock);
2806
28cea78a 2807 ret = io_sq_thread_acquire_mm_files(req->ctx, req);
6d816e08 2808
dc2a6e9a 2809 if (!ret && io_resubmit_prep(req)) {
fdee946d
JA
2810 refcount_inc(&req->refs);
2811 io_queue_async_work(req);
b63534c4 2812 return true;
fdee946d 2813 }
dc2a6e9a 2814 req_set_fail_links(req);
b63534c4
JA
2815#endif
2816 return false;
2817}
2818
a1d7c393 2819static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
889fca73 2820 unsigned int issue_flags)
a1d7c393 2821{
2f8e45f1
PB
2822 int cflags = 0;
2823
23faba36
PB
2824 if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
2825 return;
2f8e45f1
PB
2826 if (res != req->result)
2827 req_set_fail_links(req);
23faba36 2828
2f8e45f1
PB
2829 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2830 kiocb_end_write(req);
2831 if (req->flags & REQ_F_BUFFER_SELECTED)
2832 cflags = io_put_rw_kbuf(req);
2833 __io_req_complete(req, issue_flags, res, cflags);
ba816ad6
JA
2834}
2835
2836static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2837{
9adbd45d 2838 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6 2839
889fca73 2840 __io_complete_rw(req, res, res2, 0);
2b188cc1
JA
2841}
2842
def596e9
JA
2843static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2844{
9adbd45d 2845 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 2846
491381ce
JA
2847 if (kiocb->ki_flags & IOCB_WRITE)
2848 kiocb_end_write(req);
def596e9 2849
2d7d6792 2850 if (res != -EAGAIN && res != req->result)
4e88d6e7 2851 req_set_fail_links(req);
bbde017a
XW
2852
2853 WRITE_ONCE(req->result, res);
2854 /* order with io_poll_complete() checking ->result */
cd664b0e
PB
2855 smp_wmb();
2856 WRITE_ONCE(req->iopoll_completed, 1);
def596e9
JA
2857}
2858
2859/*
2860 * After the iocb has been issued, it's safe to be found on the poll list.
2861 * Adding the kiocb to the list AFTER submission ensures that we don't
2862 * find it from a io_iopoll_getevents() thread before the issuer is done
2863 * accessing the kiocb cookie.
2864 */
2e9dbe90 2865static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
def596e9
JA
2866{
2867 struct io_ring_ctx *ctx = req->ctx;
2868
2869 /*
2870 * Track whether we have multiple files in our lists. This will impact
2871 * how we do polling eventually, not spinning if we're on potentially
2872 * different devices.
2873 */
540e32a0 2874 if (list_empty(&ctx->iopoll_list)) {
def596e9
JA
2875 ctx->poll_multi_file = false;
2876 } else if (!ctx->poll_multi_file) {
2877 struct io_kiocb *list_req;
2878
540e32a0 2879 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
d21ffe7e 2880 inflight_entry);
9adbd45d 2881 if (list_req->file != req->file)
def596e9
JA
2882 ctx->poll_multi_file = true;
2883 }
2884
2885 /*
2886 * For fast devices, IO may have already completed. If it has, add
2887 * it to the front so we find it first.
2888 */
65a6543d 2889 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2890 list_add(&req->inflight_entry, &ctx->iopoll_list);
def596e9 2891 else
d21ffe7e 2892 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
bdcd3eab 2893
2e9dbe90
XW
2894 /*
2895 * If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread
2896 * task context or in io worker task context. If current task context is
2897 * sq thread, we don't need to check whether should wake up sq thread.
2898 */
2899 if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) &&
534ca6d6
JA
2900 wq_has_sleeper(&ctx->sq_data->wait))
2901 wake_up(&ctx->sq_data->wait);
def596e9
JA
2902}
2903
9f13c35b
PB
2904static inline void io_state_file_put(struct io_submit_state *state)
2905{
02b23a9a
PB
2906 if (state->file_refs) {
2907 fput_many(state->file, state->file_refs);
2908 state->file_refs = 0;
2909 }
9a56a232
JA
2910}
2911
2912/*
2913 * Get as many references to a file as we have IOs left in this submission,
2914 * assuming most submissions are for one file, or at least that each file
2915 * has more than one submission.
2916 */
8da11c19 2917static struct file *__io_file_get(struct io_submit_state *state, int fd)
9a56a232
JA
2918{
2919 if (!state)
2920 return fget(fd);
2921
6e1271e6 2922 if (state->file_refs) {
9a56a232 2923 if (state->fd == fd) {
6e1271e6 2924 state->file_refs--;
9a56a232
JA
2925 return state->file;
2926 }
02b23a9a 2927 io_state_file_put(state);
9a56a232
JA
2928 }
2929 state->file = fget_many(fd, state->ios_left);
6e1271e6 2930 if (unlikely(!state->file))
9a56a232
JA
2931 return NULL;
2932
2933 state->fd = fd;
6e1271e6 2934 state->file_refs = state->ios_left - 1;
9a56a232
JA
2935 return state->file;
2936}
2937
4503b767
JA
2938static bool io_bdev_nowait(struct block_device *bdev)
2939{
9ba0d0c8 2940 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
4503b767
JA
2941}
2942
2b188cc1
JA
2943/*
2944 * If we tracked the file through the SCM inflight mechanism, we could support
2945 * any file. For now, just ensure that anything potentially problematic is done
2946 * inline.
2947 */
af197f50 2948static bool io_file_supports_async(struct file *file, int rw)
2b188cc1
JA
2949{
2950 umode_t mode = file_inode(file)->i_mode;
2951
4503b767 2952 if (S_ISBLK(mode)) {
4e7b5671
CH
2953 if (IS_ENABLED(CONFIG_BLOCK) &&
2954 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
4503b767
JA
2955 return true;
2956 return false;
2957 }
2958 if (S_ISCHR(mode) || S_ISSOCK(mode))
2b188cc1 2959 return true;
4503b767 2960 if (S_ISREG(mode)) {
4e7b5671
CH
2961 if (IS_ENABLED(CONFIG_BLOCK) &&
2962 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
4503b767
JA
2963 file->f_op != &io_uring_fops)
2964 return true;
2965 return false;
2966 }
2b188cc1 2967
c5b85625
JA
2968 /* any ->read/write should understand O_NONBLOCK */
2969 if (file->f_flags & O_NONBLOCK)
2970 return true;
2971
af197f50
JA
2972 if (!(file->f_mode & FMODE_NOWAIT))
2973 return false;
2974
2975 if (rw == READ)
2976 return file->f_op->read_iter != NULL;
2977
2978 return file->f_op->write_iter != NULL;
2b188cc1
JA
2979}
2980
a88fc400 2981static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 2982{
def596e9 2983 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 2984 struct kiocb *kiocb = &req->rw.kiocb;
75c668cd 2985 struct file *file = req->file;
09bb8394
JA
2986 unsigned ioprio;
2987 int ret;
2b188cc1 2988
75c668cd 2989 if (S_ISREG(file_inode(file)->i_mode))
491381ce
JA
2990 req->flags |= REQ_F_ISREG;
2991
2b188cc1 2992 kiocb->ki_pos = READ_ONCE(sqe->off);
75c668cd 2993 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
ba04291e 2994 req->flags |= REQ_F_CUR_POS;
75c668cd 2995 kiocb->ki_pos = file->f_pos;
ba04291e 2996 }
2b188cc1 2997 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
2998 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2999 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
3000 if (unlikely(ret))
3001 return ret;
2b188cc1 3002
75c668cd
PB
3003 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
3004 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
3005 req->flags |= REQ_F_NOWAIT;
3006
2b188cc1
JA
3007 ioprio = READ_ONCE(sqe->ioprio);
3008 if (ioprio) {
3009 ret = ioprio_check_cap(ioprio);
3010 if (ret)
09bb8394 3011 return ret;
2b188cc1
JA
3012
3013 kiocb->ki_ioprio = ioprio;
3014 } else
3015 kiocb->ki_ioprio = get_current_ioprio();
3016
def596e9 3017 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
3018 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
3019 !kiocb->ki_filp->f_op->iopoll)
09bb8394 3020 return -EOPNOTSUPP;
2b188cc1 3021
def596e9
JA
3022 kiocb->ki_flags |= IOCB_HIPRI;
3023 kiocb->ki_complete = io_complete_rw_iopoll;
65a6543d 3024 req->iopoll_completed = 0;
def596e9 3025 } else {
09bb8394
JA
3026 if (kiocb->ki_flags & IOCB_HIPRI)
3027 return -EINVAL;
def596e9
JA
3028 kiocb->ki_complete = io_complete_rw;
3029 }
9adbd45d 3030
3529d8c2
JA
3031 req->rw.addr = READ_ONCE(sqe->addr);
3032 req->rw.len = READ_ONCE(sqe->len);
4f4eeba8 3033 req->buf_index = READ_ONCE(sqe->buf_index);
2b188cc1 3034 return 0;
2b188cc1
JA
3035}
3036
3037static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
3038{
3039 switch (ret) {
3040 case -EIOCBQUEUED:
3041 break;
3042 case -ERESTARTSYS:
3043 case -ERESTARTNOINTR:
3044 case -ERESTARTNOHAND:
3045 case -ERESTART_RESTARTBLOCK:
3046 /*
3047 * We can't just restart the syscall, since previously
3048 * submitted sqes may already be in progress. Just fail this
3049 * IO with EINTR.
3050 */
3051 ret = -EINTR;
df561f66 3052 fallthrough;
2b188cc1
JA
3053 default:
3054 kiocb->ki_complete(kiocb, ret, 0);
3055 }
3056}
3057
a1d7c393 3058static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
889fca73 3059 unsigned int issue_flags)
ba816ad6 3060{
ba04291e 3061 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
e8c2bc1f 3062 struct io_async_rw *io = req->async_data;
ba04291e 3063
227c0c96 3064 /* add previously done IO, if any */
e8c2bc1f 3065 if (io && io->bytes_done > 0) {
227c0c96 3066 if (ret < 0)
e8c2bc1f 3067 ret = io->bytes_done;
227c0c96 3068 else
e8c2bc1f 3069 ret += io->bytes_done;
227c0c96
JA
3070 }
3071
ba04291e
JA
3072 if (req->flags & REQ_F_CUR_POS)
3073 req->file->f_pos = kiocb->ki_pos;
bcaec089 3074 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
889fca73 3075 __io_complete_rw(req, ret, 0, issue_flags);
ba816ad6
JA
3076 else
3077 io_rw_done(kiocb, ret);
3078}
3079
847595de 3080static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
edafccee 3081{
9adbd45d
JA
3082 struct io_ring_ctx *ctx = req->ctx;
3083 size_t len = req->rw.len;
edafccee 3084 struct io_mapped_ubuf *imu;
4be1c615 3085 u16 index, buf_index = req->buf_index;
edafccee
JA
3086 size_t offset;
3087 u64 buf_addr;
3088
edafccee
JA
3089 if (unlikely(buf_index >= ctx->nr_user_bufs))
3090 return -EFAULT;
edafccee
JA
3091 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
3092 imu = &ctx->user_bufs[index];
9adbd45d 3093 buf_addr = req->rw.addr;
edafccee
JA
3094
3095 /* overflow */
3096 if (buf_addr + len < buf_addr)
3097 return -EFAULT;
3098 /* not inside the mapped region */
3099 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
3100 return -EFAULT;
3101
3102 /*
3103 * May not be a start of buffer, set size appropriately
3104 * and advance us to the beginning.
3105 */
3106 offset = buf_addr - imu->ubuf;
3107 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
3108
3109 if (offset) {
3110 /*
3111 * Don't use iov_iter_advance() here, as it's really slow for
3112 * using the latter parts of a big fixed buffer - it iterates
3113 * over each segment manually. We can cheat a bit here, because
3114 * we know that:
3115 *
3116 * 1) it's a BVEC iter, we set it up
3117 * 2) all bvecs are PAGE_SIZE in size, except potentially the
3118 * first and last bvec
3119 *
3120 * So just find our index, and adjust the iterator afterwards.
3121 * If the offset is within the first bvec (or the whole first
3122 * bvec, just use iov_iter_advance(). This makes it easier
3123 * since we can just skip the first segment, which may not
3124 * be PAGE_SIZE aligned.
3125 */
3126 const struct bio_vec *bvec = imu->bvec;
3127
3128 if (offset <= bvec->bv_len) {
3129 iov_iter_advance(iter, offset);
3130 } else {
3131 unsigned long seg_skip;
3132
3133 /* skip first vec */
3134 offset -= bvec->bv_len;
3135 seg_skip = 1 + (offset >> PAGE_SHIFT);
3136
3137 iter->bvec = bvec + seg_skip;
3138 iter->nr_segs -= seg_skip;
99c79f66 3139 iter->count -= bvec->bv_len + offset;
bd11b3a3 3140 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
3141 }
3142 }
3143
847595de 3144 return 0;
edafccee
JA
3145}
3146
bcda7baa
JA
3147static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
3148{
3149 if (needs_lock)
3150 mutex_unlock(&ctx->uring_lock);
3151}
3152
3153static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
3154{
3155 /*
3156 * "Normal" inline submissions always hold the uring_lock, since we
3157 * grab it from the system call. Same is true for the SQPOLL offload.
3158 * The only exception is when we've detached the request and issue it
3159 * from an async worker thread, grab the lock for that case.
3160 */
3161 if (needs_lock)
3162 mutex_lock(&ctx->uring_lock);
3163}
3164
3165static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
3166 int bgid, struct io_buffer *kbuf,
3167 bool needs_lock)
3168{
3169 struct io_buffer *head;
3170
3171 if (req->flags & REQ_F_BUFFER_SELECTED)
3172 return kbuf;
3173
3174 io_ring_submit_lock(req->ctx, needs_lock);
3175
3176 lockdep_assert_held(&req->ctx->uring_lock);
3177
3178 head = idr_find(&req->ctx->io_buffer_idr, bgid);
3179 if (head) {
3180 if (!list_empty(&head->list)) {
3181 kbuf = list_last_entry(&head->list, struct io_buffer,
3182 list);
3183 list_del(&kbuf->list);
3184 } else {
3185 kbuf = head;
3186 idr_remove(&req->ctx->io_buffer_idr, bgid);
3187 }
3188 if (*len > kbuf->len)
3189 *len = kbuf->len;
3190 } else {
3191 kbuf = ERR_PTR(-ENOBUFS);
3192 }
3193
3194 io_ring_submit_unlock(req->ctx, needs_lock);
3195
3196 return kbuf;
3197}
3198
4d954c25
JA
3199static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
3200 bool needs_lock)
3201{
3202 struct io_buffer *kbuf;
4f4eeba8 3203 u16 bgid;
4d954c25
JA
3204
3205 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
4f4eeba8 3206 bgid = req->buf_index;
4d954c25
JA
3207 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
3208 if (IS_ERR(kbuf))
3209 return kbuf;
3210 req->rw.addr = (u64) (unsigned long) kbuf;
3211 req->flags |= REQ_F_BUFFER_SELECTED;
3212 return u64_to_user_ptr(kbuf->addr);
3213}
3214
3215#ifdef CONFIG_COMPAT
3216static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3217 bool needs_lock)
3218{
3219 struct compat_iovec __user *uiov;
3220 compat_ssize_t clen;
3221 void __user *buf;
3222 ssize_t len;
3223
3224 uiov = u64_to_user_ptr(req->rw.addr);
3225 if (!access_ok(uiov, sizeof(*uiov)))
3226 return -EFAULT;
3227 if (__get_user(clen, &uiov->iov_len))
3228 return -EFAULT;
3229 if (clen < 0)
3230 return -EINVAL;
3231
3232 len = clen;
3233 buf = io_rw_buffer_select(req, &len, needs_lock);
3234 if (IS_ERR(buf))
3235 return PTR_ERR(buf);
3236 iov[0].iov_base = buf;
3237 iov[0].iov_len = (compat_size_t) len;
3238 return 0;
3239}
3240#endif
3241
3242static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3243 bool needs_lock)
3244{
3245 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3246 void __user *buf;
3247 ssize_t len;
3248
3249 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3250 return -EFAULT;
3251
3252 len = iov[0].iov_len;
3253 if (len < 0)
3254 return -EINVAL;
3255 buf = io_rw_buffer_select(req, &len, needs_lock);
3256 if (IS_ERR(buf))
3257 return PTR_ERR(buf);
3258 iov[0].iov_base = buf;
3259 iov[0].iov_len = len;
3260 return 0;
3261}
3262
3263static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3264 bool needs_lock)
3265{
dddb3e26
JA
3266 if (req->flags & REQ_F_BUFFER_SELECTED) {
3267 struct io_buffer *kbuf;
3268
3269 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3270 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3271 iov[0].iov_len = kbuf->len;
4d954c25 3272 return 0;
dddb3e26 3273 }
dd201662 3274 if (req->rw.len != 1)
4d954c25
JA
3275 return -EINVAL;
3276
3277#ifdef CONFIG_COMPAT
3278 if (req->ctx->compat)
3279 return io_compat_import(req, iov, needs_lock);
3280#endif
3281
3282 return __io_iov_buffer_select(req, iov, needs_lock);
3283}
3284
847595de
PB
3285static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3286 struct iov_iter *iter, bool needs_lock)
2b188cc1 3287{
9adbd45d
JA
3288 void __user *buf = u64_to_user_ptr(req->rw.addr);
3289 size_t sqe_len = req->rw.len;
847595de 3290 u8 opcode = req->opcode;
4d954c25 3291 ssize_t ret;
edafccee 3292
7d009165 3293 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 3294 *iovec = NULL;
9adbd45d 3295 return io_import_fixed(req, rw, iter);
edafccee 3296 }
2b188cc1 3297
bcda7baa 3298 /* buffer index only valid with fixed read/write, or buffer select */
4f4eeba8 3299 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
9adbd45d
JA
3300 return -EINVAL;
3301
3a6820f2 3302 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 3303 if (req->flags & REQ_F_BUFFER_SELECT) {
4d954c25 3304 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
867a23ea 3305 if (IS_ERR(buf))
4d954c25 3306 return PTR_ERR(buf);
3f9d6441 3307 req->rw.len = sqe_len;
bcda7baa
JA
3308 }
3309
3a6820f2
JA
3310 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3311 *iovec = NULL;
10fc72e4 3312 return ret;
3a6820f2
JA
3313 }
3314
4d954c25
JA
3315 if (req->flags & REQ_F_BUFFER_SELECT) {
3316 ret = io_iov_buffer_select(req, *iovec, needs_lock);
847595de
PB
3317 if (!ret)
3318 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
4d954c25
JA
3319 *iovec = NULL;
3320 return ret;
3321 }
3322
89cd35c5
CH
3323 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3324 req->ctx->compat);
2b188cc1
JA
3325}
3326
0fef9483
JA
3327static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3328{
5b09e37e 3329 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
0fef9483
JA
3330}
3331
31b51510 3332/*
32960613
JA
3333 * For files that don't have ->read_iter() and ->write_iter(), handle them
3334 * by looping over ->read() or ->write() manually.
31b51510 3335 */
4017eb91 3336static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
32960613 3337{
4017eb91
JA
3338 struct kiocb *kiocb = &req->rw.kiocb;
3339 struct file *file = req->file;
32960613
JA
3340 ssize_t ret = 0;
3341
3342 /*
3343 * Don't support polled IO through this interface, and we can't
3344 * support non-blocking either. For the latter, this just causes
3345 * the kiocb to be handled from an async context.
3346 */
3347 if (kiocb->ki_flags & IOCB_HIPRI)
3348 return -EOPNOTSUPP;
3349 if (kiocb->ki_flags & IOCB_NOWAIT)
3350 return -EAGAIN;
3351
3352 while (iov_iter_count(iter)) {
311ae9e1 3353 struct iovec iovec;
32960613
JA
3354 ssize_t nr;
3355
311ae9e1
PB
3356 if (!iov_iter_is_bvec(iter)) {
3357 iovec = iov_iter_iovec(iter);
3358 } else {
4017eb91
JA
3359 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3360 iovec.iov_len = req->rw.len;
311ae9e1
PB
3361 }
3362
32960613
JA
3363 if (rw == READ) {
3364 nr = file->f_op->read(file, iovec.iov_base,
0fef9483 3365 iovec.iov_len, io_kiocb_ppos(kiocb));
32960613
JA
3366 } else {
3367 nr = file->f_op->write(file, iovec.iov_base,
0fef9483 3368 iovec.iov_len, io_kiocb_ppos(kiocb));
32960613
JA
3369 }
3370
3371 if (nr < 0) {
3372 if (!ret)
3373 ret = nr;
3374 break;
3375 }
3376 ret += nr;
3377 if (nr != iovec.iov_len)
3378 break;
4017eb91
JA
3379 req->rw.len -= nr;
3380 req->rw.addr += nr;
32960613
JA
3381 iov_iter_advance(iter, nr);
3382 }
3383
3384 return ret;
3385}
3386
ff6165b2
JA
3387static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3388 const struct iovec *fast_iov, struct iov_iter *iter)
f67676d1 3389{
e8c2bc1f 3390 struct io_async_rw *rw = req->async_data;
b64e3444 3391
ff6165b2 3392 memcpy(&rw->iter, iter, sizeof(*iter));
afb87658 3393 rw->free_iovec = iovec;
227c0c96 3394 rw->bytes_done = 0;
ff6165b2 3395 /* can only be fixed buffers, no need to do anything */
9c3a205c 3396 if (iov_iter_is_bvec(iter))
ff6165b2 3397 return;
b64e3444 3398 if (!iovec) {
ff6165b2
JA
3399 unsigned iov_off = 0;
3400
3401 rw->iter.iov = rw->fast_iov;
3402 if (iter->iov != fast_iov) {
3403 iov_off = iter->iov - fast_iov;
3404 rw->iter.iov += iov_off;
3405 }
3406 if (rw->fast_iov != fast_iov)
3407 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
45097dae 3408 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
3409 } else {
3410 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
3411 }
3412}
3413
e8c2bc1f 3414static inline int __io_alloc_async_data(struct io_kiocb *req)
3d9932a8 3415{
e8c2bc1f
JA
3416 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3417 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3418 return req->async_data == NULL;
3d9932a8
XW
3419}
3420
e8c2bc1f 3421static int io_alloc_async_data(struct io_kiocb *req)
f67676d1 3422{
e8c2bc1f 3423 if (!io_op_defs[req->opcode].needs_async_data)
d3656344 3424 return 0;
3d9932a8 3425
e8c2bc1f 3426 return __io_alloc_async_data(req);
b7bb4f7d
JA
3427}
3428
ff6165b2
JA
3429static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3430 const struct iovec *fast_iov,
227c0c96 3431 struct iov_iter *iter, bool force)
b7bb4f7d 3432{
e8c2bc1f 3433 if (!force && !io_op_defs[req->opcode].needs_async_data)
74566df3 3434 return 0;
e8c2bc1f 3435 if (!req->async_data) {
6bf985dc
PB
3436 if (__io_alloc_async_data(req)) {
3437 kfree(iovec);
5d204bcf 3438 return -ENOMEM;
6bf985dc 3439 }
b7bb4f7d 3440
ff6165b2 3441 io_req_map_rw(req, iovec, fast_iov, iter);
5d204bcf 3442 }
b7bb4f7d 3443 return 0;
f67676d1
JA
3444}
3445
73debe68 3446static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
c3e330a4 3447{
e8c2bc1f 3448 struct io_async_rw *iorw = req->async_data;
f4bff104 3449 struct iovec *iov = iorw->fast_iov;
847595de 3450 int ret;
c3e330a4 3451
2846c481 3452 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
c3e330a4
PB
3453 if (unlikely(ret < 0))
3454 return ret;
3455
ab0b196c
PB
3456 iorw->bytes_done = 0;
3457 iorw->free_iovec = iov;
3458 if (iov)
3459 req->flags |= REQ_F_NEED_CLEANUP;
c3e330a4
PB
3460 return 0;
3461}
3462
73debe68 3463static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1
JA
3464{
3465 ssize_t ret;
3466
a88fc400 3467 ret = io_prep_rw(req, sqe);
3529d8c2
JA
3468 if (ret)
3469 return ret;
f67676d1 3470
3529d8c2
JA
3471 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3472 return -EBADF;
f67676d1 3473
5f798bea 3474 /* either don't need iovec imported or already have it */
2d199895 3475 if (!req->async_data)
3529d8c2 3476 return 0;
73debe68 3477 return io_rw_prep_async(req, READ);
f67676d1
JA
3478}
3479
c1dd91d1
JA
3480/*
3481 * This is our waitqueue callback handler, registered through lock_page_async()
3482 * when we initially tried to do the IO with the iocb armed our waitqueue.
3483 * This gets called when the page is unlocked, and we generally expect that to
3484 * happen when the page IO is completed and the page is now uptodate. This will
3485 * queue a task_work based retry of the operation, attempting to copy the data
3486 * again. If the latter fails because the page was NOT uptodate, then we will
3487 * do a thread based blocking retry of the operation. That's the unexpected
3488 * slow path.
3489 */
bcf5a063
JA
3490static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3491 int sync, void *arg)
3492{
3493 struct wait_page_queue *wpq;
3494 struct io_kiocb *req = wait->private;
bcf5a063 3495 struct wait_page_key *key = arg;
bcf5a063
JA
3496
3497 wpq = container_of(wait, struct wait_page_queue, wait);
3498
cdc8fcb4
LT
3499 if (!wake_page_match(wpq, key))
3500 return 0;
3501
c8d317aa 3502 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
bcf5a063
JA
3503 list_del_init(&wait->entry);
3504
bcf5a063
JA
3505 /* submit ref gets dropped, acquire a new one */
3506 refcount_inc(&req->refs);
921b9054 3507 io_req_task_queue(req);
bcf5a063
JA
3508 return 1;
3509}
3510
c1dd91d1
JA
3511/*
3512 * This controls whether a given IO request should be armed for async page
3513 * based retry. If we return false here, the request is handed to the async
3514 * worker threads for retry. If we're doing buffered reads on a regular file,
3515 * we prepare a private wait_page_queue entry and retry the operation. This
3516 * will either succeed because the page is now uptodate and unlocked, or it
3517 * will register a callback when the page is unlocked at IO completion. Through
3518 * that callback, io_uring uses task_work to setup a retry of the operation.
3519 * That retry will attempt the buffered read again. The retry will generally
3520 * succeed, or in rare cases where it fails, we then fall back to using the
3521 * async worker threads for a blocking retry.
3522 */
227c0c96 3523static bool io_rw_should_retry(struct io_kiocb *req)
f67676d1 3524{
e8c2bc1f
JA
3525 struct io_async_rw *rw = req->async_data;
3526 struct wait_page_queue *wait = &rw->wpq;
bcf5a063 3527 struct kiocb *kiocb = &req->rw.kiocb;
f67676d1 3528
bcf5a063
JA
3529 /* never retry for NOWAIT, we just complete with -EAGAIN */
3530 if (req->flags & REQ_F_NOWAIT)
3531 return false;
f67676d1 3532
227c0c96 3533 /* Only for buffered IO */
3b2a4439 3534 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
bcf5a063 3535 return false;
3b2a4439 3536
bcf5a063
JA
3537 /*
3538 * just use poll if we can, and don't attempt if the fs doesn't
3539 * support callback based unlocks
3540 */
3541 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3542 return false;
f67676d1 3543
3b2a4439
JA
3544 wait->wait.func = io_async_buf_func;
3545 wait->wait.private = req;
3546 wait->wait.flags = 0;
3547 INIT_LIST_HEAD(&wait->wait.entry);
3548 kiocb->ki_flags |= IOCB_WAITQ;
c8d317aa 3549 kiocb->ki_flags &= ~IOCB_NOWAIT;
3b2a4439 3550 kiocb->ki_waitq = wait;
3b2a4439 3551 return true;
bcf5a063
JA
3552}
3553
3554static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3555{
3556 if (req->file->f_op->read_iter)
3557 return call_read_iter(req->file, &req->rw.kiocb, iter);
2dd2111d 3558 else if (req->file->f_op->read)
4017eb91 3559 return loop_rw_iter(READ, req, iter);
2dd2111d
GH
3560 else
3561 return -EINVAL;
f67676d1
JA
3562}
3563
889fca73 3564static int io_read(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
3565{
3566 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3567 struct kiocb *kiocb = &req->rw.kiocb;
ff6165b2 3568 struct iov_iter __iter, *iter = &__iter;
e8c2bc1f 3569 struct io_async_rw *rw = req->async_data;
227c0c96 3570 ssize_t io_size, ret, ret2;
45d189c6 3571 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ff6165b2 3572
2846c481 3573 if (rw) {
e8c2bc1f 3574 iter = &rw->iter;
2846c481
PB
3575 iovec = NULL;
3576 } else {
3577 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3578 if (ret < 0)
3579 return ret;
3580 }
632546c4 3581 io_size = iov_iter_count(iter);
fa15bafb 3582 req->result = io_size;
2b188cc1 3583
fd6c2e4c
JA
3584 /* Ensure we clear previously set non-block flag */
3585 if (!force_nonblock)
29de5f6a 3586 kiocb->ki_flags &= ~IOCB_NOWAIT;
a88fc400
PB
3587 else
3588 kiocb->ki_flags |= IOCB_NOWAIT;
3589
24c74678 3590 /* If the file doesn't support async, just async punt */
6713e7a6
PB
3591 if (force_nonblock && !io_file_supports_async(req->file, READ)) {
3592 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
6bf985dc 3593 return ret ?: -EAGAIN;
6713e7a6 3594 }
9e645e11 3595
632546c4 3596 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
5ea5dd45
PB
3597 if (unlikely(ret)) {
3598 kfree(iovec);
3599 return ret;
3600 }
2b188cc1 3601
227c0c96 3602 ret = io_iter_do_read(req, iter);
32960613 3603
57cd657b 3604 if (ret == -EIOCBQUEUED) {
fe1cdd55 3605 goto out_free;
227c0c96 3606 } else if (ret == -EAGAIN) {
eefdf30f
JA
3607 /* IOPOLL retry should happen for io-wq threads */
3608 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
f91daf56 3609 goto done;
75c668cd
PB
3610 /* no retry on NONBLOCK nor RWF_NOWAIT */
3611 if (req->flags & REQ_F_NOWAIT)
355afaeb 3612 goto done;
84216315 3613 /* some cases will consume bytes even on error returns */
632546c4 3614 iov_iter_revert(iter, io_size - iov_iter_count(iter));
f38c7e3a 3615 ret = 0;
7335e3bf 3616 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
75c668cd 3617 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
7335e3bf 3618 /* read all, failed, already did sync or don't want to retry */
00d23d51 3619 goto done;
1a2cc0ce 3620 }
227c0c96 3621
227c0c96 3622 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
6bf985dc
PB
3623 if (ret2)
3624 return ret2;
3625
fe1cdd55 3626 iovec = NULL;
e8c2bc1f 3627 rw = req->async_data;
227c0c96 3628 /* now use our persistent iterator, if we aren't already */
e8c2bc1f 3629 iter = &rw->iter;
227c0c96 3630
b23df91b
PB
3631 do {
3632 io_size -= ret;
3633 rw->bytes_done += ret;
3634 /* if we can retry, do so with the callbacks armed */
3635 if (!io_rw_should_retry(req)) {
3636 kiocb->ki_flags &= ~IOCB_WAITQ;
3637 return -EAGAIN;
3638 }
3639
3640 /*
3641 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3642 * we get -EIOCBQUEUED, then we'll get a notification when the
3643 * desired page gets unlocked. We can also get a partial read
3644 * here, and if we do, then just retry at the new offset.
3645 */
3646 ret = io_iter_do_read(req, iter);
3647 if (ret == -EIOCBQUEUED)
3648 return 0;
3649 /* we got some bytes, but not all. retry. */
3650 } while (ret > 0 && ret < io_size);
227c0c96 3651done:
889fca73 3652 kiocb_done(kiocb, ret, issue_flags);
fe1cdd55
PB
3653out_free:
3654 /* it's faster to check here then delegate to kfree */
3655 if (iovec)
3656 kfree(iovec);
5ea5dd45 3657 return 0;
2b188cc1
JA
3658}
3659
73debe68 3660static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1
JA
3661{
3662 ssize_t ret;
3663
a88fc400 3664 ret = io_prep_rw(req, sqe);
3529d8c2
JA
3665 if (ret)
3666 return ret;
f67676d1 3667
3529d8c2
JA
3668 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3669 return -EBADF;
f67676d1 3670
5f798bea 3671 /* either don't need iovec imported or already have it */
2d199895 3672 if (!req->async_data)
3529d8c2 3673 return 0;
73debe68 3674 return io_rw_prep_async(req, WRITE);
f67676d1
JA
3675}
3676
889fca73 3677static int io_write(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
3678{
3679 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3680 struct kiocb *kiocb = &req->rw.kiocb;
ff6165b2 3681 struct iov_iter __iter, *iter = &__iter;
e8c2bc1f 3682 struct io_async_rw *rw = req->async_data;
fa15bafb 3683 ssize_t ret, ret2, io_size;
45d189c6 3684 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
2b188cc1 3685
2846c481 3686 if (rw) {
e8c2bc1f 3687 iter = &rw->iter;
2846c481
PB
3688 iovec = NULL;
3689 } else {
3690 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3691 if (ret < 0)
3692 return ret;
3693 }
632546c4 3694 io_size = iov_iter_count(iter);
fa15bafb 3695 req->result = io_size;
2b188cc1 3696
fd6c2e4c
JA
3697 /* Ensure we clear previously set non-block flag */
3698 if (!force_nonblock)
a88fc400
PB
3699 kiocb->ki_flags &= ~IOCB_NOWAIT;
3700 else
3701 kiocb->ki_flags |= IOCB_NOWAIT;
fd6c2e4c 3702
24c74678 3703 /* If the file doesn't support async, just async punt */
af197f50 3704 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
f67676d1 3705 goto copy_iov;
31b51510 3706
10d59345
JA
3707 /* file path doesn't support NOWAIT for non-direct_IO */
3708 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3709 (req->flags & REQ_F_ISREG))
f67676d1 3710 goto copy_iov;
31b51510 3711
632546c4 3712 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
fa15bafb
PB
3713 if (unlikely(ret))
3714 goto out_free;
4ed734b0 3715
fa15bafb
PB
3716 /*
3717 * Open-code file_start_write here to grab freeze protection,
3718 * which will be released by another thread in
3719 * io_complete_rw(). Fool lockdep by telling it the lock got
3720 * released so that it doesn't complain about the held lock when
3721 * we return to userspace.
3722 */
3723 if (req->flags & REQ_F_ISREG) {
8a3c84b6 3724 sb_start_write(file_inode(req->file)->i_sb);
fa15bafb
PB
3725 __sb_writers_release(file_inode(req->file)->i_sb,
3726 SB_FREEZE_WRITE);
3727 }
3728 kiocb->ki_flags |= IOCB_WRITE;
4ed734b0 3729
fa15bafb 3730 if (req->file->f_op->write_iter)
ff6165b2 3731 ret2 = call_write_iter(req->file, kiocb, iter);
2dd2111d 3732 else if (req->file->f_op->write)
4017eb91 3733 ret2 = loop_rw_iter(WRITE, req, iter);
2dd2111d
GH
3734 else
3735 ret2 = -EINVAL;
4ed734b0 3736
fa15bafb
PB
3737 /*
3738 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3739 * retry them without IOCB_NOWAIT.
3740 */
3741 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3742 ret2 = -EAGAIN;
75c668cd
PB
3743 /* no retry on NONBLOCK nor RWF_NOWAIT */
3744 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
355afaeb 3745 goto done;
fa15bafb 3746 if (!force_nonblock || ret2 != -EAGAIN) {
eefdf30f
JA
3747 /* IOPOLL retry should happen for io-wq threads */
3748 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3749 goto copy_iov;
355afaeb 3750done:
889fca73 3751 kiocb_done(kiocb, ret2, issue_flags);
fa15bafb 3752 } else {
f67676d1 3753copy_iov:
84216315 3754 /* some cases will consume bytes even on error returns */
632546c4 3755 iov_iter_revert(iter, io_size - iov_iter_count(iter));
227c0c96 3756 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
6bf985dc 3757 return ret ?: -EAGAIN;
2b188cc1 3758 }
31b51510 3759out_free:
f261c168 3760 /* it's reportedly faster than delegating the null check to kfree() */
252917c3 3761 if (iovec)
6f2cc166 3762 kfree(iovec);
2b188cc1
JA
3763 return ret;
3764}
3765
80a261fd
JA
3766static int io_renameat_prep(struct io_kiocb *req,
3767 const struct io_uring_sqe *sqe)
3768{
3769 struct io_rename *ren = &req->rename;
3770 const char __user *oldf, *newf;
3771
3772 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3773 return -EBADF;
3774
3775 ren->old_dfd = READ_ONCE(sqe->fd);
3776 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3777 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3778 ren->new_dfd = READ_ONCE(sqe->len);
3779 ren->flags = READ_ONCE(sqe->rename_flags);
3780
3781 ren->oldpath = getname(oldf);
3782 if (IS_ERR(ren->oldpath))
3783 return PTR_ERR(ren->oldpath);
3784
3785 ren->newpath = getname(newf);
3786 if (IS_ERR(ren->newpath)) {
3787 putname(ren->oldpath);
3788 return PTR_ERR(ren->newpath);
3789 }
3790
3791 req->flags |= REQ_F_NEED_CLEANUP;
3792 return 0;
3793}
3794
45d189c6 3795static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
80a261fd
JA
3796{
3797 struct io_rename *ren = &req->rename;
3798 int ret;
3799
45d189c6 3800 if (issue_flags & IO_URING_F_NONBLOCK)
80a261fd
JA
3801 return -EAGAIN;
3802
3803 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3804 ren->newpath, ren->flags);
3805
3806 req->flags &= ~REQ_F_NEED_CLEANUP;
3807 if (ret < 0)
3808 req_set_fail_links(req);
3809 io_req_complete(req, ret);
3810 return 0;
3811}
3812
14a1143b
JA
3813static int io_unlinkat_prep(struct io_kiocb *req,
3814 const struct io_uring_sqe *sqe)
3815{
3816 struct io_unlink *un = &req->unlink;
3817 const char __user *fname;
3818
3819 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3820 return -EBADF;
3821
3822 un->dfd = READ_ONCE(sqe->fd);
3823
3824 un->flags = READ_ONCE(sqe->unlink_flags);
3825 if (un->flags & ~AT_REMOVEDIR)
3826 return -EINVAL;
3827
3828 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3829 un->filename = getname(fname);
3830 if (IS_ERR(un->filename))
3831 return PTR_ERR(un->filename);
3832
3833 req->flags |= REQ_F_NEED_CLEANUP;
3834 return 0;
3835}
3836
45d189c6 3837static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
14a1143b
JA
3838{
3839 struct io_unlink *un = &req->unlink;
3840 int ret;
3841
45d189c6 3842 if (issue_flags & IO_URING_F_NONBLOCK)
14a1143b
JA
3843 return -EAGAIN;
3844
3845 if (un->flags & AT_REMOVEDIR)
3846 ret = do_rmdir(un->dfd, un->filename);
3847 else
3848 ret = do_unlinkat(un->dfd, un->filename);
3849
3850 req->flags &= ~REQ_F_NEED_CLEANUP;
3851 if (ret < 0)
3852 req_set_fail_links(req);
3853 io_req_complete(req, ret);
3854 return 0;
3855}
3856
36f4fa68
JA
3857static int io_shutdown_prep(struct io_kiocb *req,
3858 const struct io_uring_sqe *sqe)
3859{
3860#if defined(CONFIG_NET)
3861 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3862 return -EINVAL;
3863 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3864 sqe->buf_index)
3865 return -EINVAL;
3866
3867 req->shutdown.how = READ_ONCE(sqe->len);
3868 return 0;
3869#else
3870 return -EOPNOTSUPP;
3871#endif
3872}
3873
45d189c6 3874static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
36f4fa68
JA
3875{
3876#if defined(CONFIG_NET)
3877 struct socket *sock;
3878 int ret;
3879
45d189c6 3880 if (issue_flags & IO_URING_F_NONBLOCK)
36f4fa68
JA
3881 return -EAGAIN;
3882
48aba79b 3883 sock = sock_from_file(req->file);
36f4fa68 3884 if (unlikely(!sock))
48aba79b 3885 return -ENOTSOCK;
36f4fa68
JA
3886
3887 ret = __sys_shutdown_sock(sock, req->shutdown.how);
a146468d
JA
3888 if (ret < 0)
3889 req_set_fail_links(req);
36f4fa68
JA
3890 io_req_complete(req, ret);
3891 return 0;
3892#else
3893 return -EOPNOTSUPP;
3894#endif
3895}
3896
f2a8d5c7
PB
3897static int __io_splice_prep(struct io_kiocb *req,
3898 const struct io_uring_sqe *sqe)
7d67af2c
PB
3899{
3900 struct io_splice* sp = &req->splice;
3901 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
7d67af2c 3902
3232dd02
PB
3903 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3904 return -EINVAL;
7d67af2c
PB
3905
3906 sp->file_in = NULL;
7d67af2c
PB
3907 sp->len = READ_ONCE(sqe->len);
3908 sp->flags = READ_ONCE(sqe->splice_flags);
3909
3910 if (unlikely(sp->flags & ~valid_flags))
3911 return -EINVAL;
3912
8371adf5
PB
3913 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3914 (sp->flags & SPLICE_F_FD_IN_FIXED));
3915 if (!sp->file_in)
3916 return -EBADF;
7d67af2c
PB
3917 req->flags |= REQ_F_NEED_CLEANUP;
3918
7cdaf587
XW
3919 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3920 /*
3921 * Splice operation will be punted aync, and here need to
3922 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3923 */
3924 io_req_init_async(req);
7d67af2c 3925 req->work.flags |= IO_WQ_WORK_UNBOUND;
7cdaf587 3926 }
7d67af2c
PB
3927
3928 return 0;
3929}
3930
f2a8d5c7
PB
3931static int io_tee_prep(struct io_kiocb *req,
3932 const struct io_uring_sqe *sqe)
3933{
3934 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3935 return -EINVAL;
3936 return __io_splice_prep(req, sqe);
3937}
3938
45d189c6 3939static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
f2a8d5c7
PB
3940{
3941 struct io_splice *sp = &req->splice;
3942 struct file *in = sp->file_in;
3943 struct file *out = sp->file_out;
3944 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3945 long ret = 0;
3946
45d189c6 3947 if (issue_flags & IO_URING_F_NONBLOCK)
f2a8d5c7
PB
3948 return -EAGAIN;
3949 if (sp->len)
3950 ret = do_tee(in, out, sp->len, flags);
3951
3952 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3953 req->flags &= ~REQ_F_NEED_CLEANUP;
3954
f2a8d5c7
PB
3955 if (ret != sp->len)
3956 req_set_fail_links(req);
e1e16097 3957 io_req_complete(req, ret);
f2a8d5c7
PB
3958 return 0;
3959}
3960
3961static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3962{
3963 struct io_splice* sp = &req->splice;
3964
3965 sp->off_in = READ_ONCE(sqe->splice_off_in);
3966 sp->off_out = READ_ONCE(sqe->off);
3967 return __io_splice_prep(req, sqe);
3968}
3969
45d189c6 3970static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
7d67af2c
PB
3971{
3972 struct io_splice *sp = &req->splice;
3973 struct file *in = sp->file_in;
3974 struct file *out = sp->file_out;
3975 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3976 loff_t *poff_in, *poff_out;
c9687426 3977 long ret = 0;
7d67af2c 3978
45d189c6 3979 if (issue_flags & IO_URING_F_NONBLOCK)
2fb3e822 3980 return -EAGAIN;
7d67af2c
PB
3981
3982 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3983 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
c9687426 3984
948a7749 3985 if (sp->len)
c9687426 3986 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
7d67af2c
PB
3987
3988 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3989 req->flags &= ~REQ_F_NEED_CLEANUP;
3990
7d67af2c
PB
3991 if (ret != sp->len)
3992 req_set_fail_links(req);
e1e16097 3993 io_req_complete(req, ret);
7d67af2c
PB
3994 return 0;
3995}
3996
2b188cc1
JA
3997/*
3998 * IORING_OP_NOP just posts a completion event, nothing else.
3999 */
889fca73 4000static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1
JA
4001{
4002 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 4003
def596e9
JA
4004 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4005 return -EINVAL;
4006
889fca73 4007 __io_req_complete(req, issue_flags, 0, 0);
2b188cc1
JA
4008 return 0;
4009}
4010
3529d8c2 4011static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 4012{
6b06314c 4013 struct io_ring_ctx *ctx = req->ctx;
c992fe29 4014
09bb8394
JA
4015 if (!req->file)
4016 return -EBADF;
c992fe29 4017
6b06314c 4018 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 4019 return -EINVAL;
edafccee 4020 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
4021 return -EINVAL;
4022
8ed8d3c3
JA
4023 req->sync.flags = READ_ONCE(sqe->fsync_flags);
4024 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
4025 return -EINVAL;
4026
4027 req->sync.off = READ_ONCE(sqe->off);
4028 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
4029 return 0;
4030}
4031
45d189c6 4032static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 4033{
8ed8d3c3 4034 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
4035 int ret;
4036
ac45abc0 4037 /* fsync always requires a blocking context */
45d189c6 4038 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
4039 return -EAGAIN;
4040
9adbd45d 4041 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
4042 end > 0 ? end : LLONG_MAX,
4043 req->sync.flags & IORING_FSYNC_DATASYNC);
4044 if (ret < 0)
4045 req_set_fail_links(req);
e1e16097 4046 io_req_complete(req, ret);
c992fe29
CH
4047 return 0;
4048}
4049
d63d1b5e
JA
4050static int io_fallocate_prep(struct io_kiocb *req,
4051 const struct io_uring_sqe *sqe)
4052{
4053 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
4054 return -EINVAL;
3232dd02
PB
4055 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4056 return -EINVAL;
d63d1b5e
JA
4057
4058 req->sync.off = READ_ONCE(sqe->off);
4059 req->sync.len = READ_ONCE(sqe->addr);
4060 req->sync.mode = READ_ONCE(sqe->len);
4061 return 0;
4062}
4063
45d189c6 4064static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
5d17b4a4 4065{
ac45abc0
PB
4066 int ret;
4067
d63d1b5e 4068 /* fallocate always requiring blocking context */
45d189c6 4069 if (issue_flags & IO_URING_F_NONBLOCK)
5d17b4a4 4070 return -EAGAIN;
ac45abc0
PB
4071 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
4072 req->sync.len);
ac45abc0
PB
4073 if (ret < 0)
4074 req_set_fail_links(req);
e1e16097 4075 io_req_complete(req, ret);
5d17b4a4
JA
4076 return 0;
4077}
4078
ec65fea5 4079static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 4080{
f8748881 4081 const char __user *fname;
15b71abe 4082 int ret;
b7bb4f7d 4083
ec65fea5 4084 if (unlikely(sqe->ioprio || sqe->buf_index))
15b71abe 4085 return -EINVAL;
ec65fea5 4086 if (unlikely(req->flags & REQ_F_FIXED_FILE))
cf3040ca 4087 return -EBADF;
03b1230c 4088
ec65fea5
PB
4089 /* open.how should be already initialised */
4090 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
08a1d26e 4091 req->open.how.flags |= O_LARGEFILE;
3529d8c2 4092
25e72d10
PB
4093 req->open.dfd = READ_ONCE(sqe->fd);
4094 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
f8748881 4095 req->open.filename = getname(fname);
15b71abe
JA
4096 if (IS_ERR(req->open.filename)) {
4097 ret = PTR_ERR(req->open.filename);
4098 req->open.filename = NULL;
4099 return ret;
4100 }
4022e7af 4101 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 4102 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 4103 return 0;
03b1230c
JA
4104}
4105
ec65fea5
PB
4106static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4107{
4108 u64 flags, mode;
4109
14587a46 4110 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4eb8dded 4111 return -EINVAL;
ec65fea5
PB
4112 mode = READ_ONCE(sqe->len);
4113 flags = READ_ONCE(sqe->open_flags);
4114 req->open.how = build_open_how(flags, mode);
4115 return __io_openat_prep(req, sqe);
4116}
4117
cebdb986 4118static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 4119{
cebdb986 4120 struct open_how __user *how;
cebdb986 4121 size_t len;
0fa03c62
JA
4122 int ret;
4123
14587a46 4124 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4eb8dded 4125 return -EINVAL;
cebdb986
JA
4126 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4127 len = READ_ONCE(sqe->len);
cebdb986
JA
4128 if (len < OPEN_HOW_SIZE_VER0)
4129 return -EINVAL;
3529d8c2 4130
cebdb986
JA
4131 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4132 len);
4133 if (ret)
4134 return ret;
3529d8c2 4135
ec65fea5 4136 return __io_openat_prep(req, sqe);
cebdb986
JA
4137}
4138
45d189c6 4139static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
15b71abe
JA
4140{
4141 struct open_flags op;
15b71abe 4142 struct file *file;
3a81fd02
JA
4143 bool nonblock_set;
4144 bool resolve_nonblock;
15b71abe
JA
4145 int ret;
4146
cebdb986 4147 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
4148 if (ret)
4149 goto err;
3a81fd02
JA
4150 nonblock_set = op.open_flag & O_NONBLOCK;
4151 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
45d189c6 4152 if (issue_flags & IO_URING_F_NONBLOCK) {
3a81fd02
JA
4153 /*
4154 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
4155 * it'll always -EAGAIN
4156 */
4157 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
4158 return -EAGAIN;
4159 op.lookup_flags |= LOOKUP_CACHED;
4160 op.open_flag |= O_NONBLOCK;
4161 }
15b71abe 4162
4022e7af 4163 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
15b71abe
JA
4164 if (ret < 0)
4165 goto err;
4166
4167 file = do_filp_open(req->open.dfd, req->open.filename, &op);
3a81fd02 4168 /* only retry if RESOLVE_CACHED wasn't already set by application */
45d189c6
PB
4169 if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
4170 file == ERR_PTR(-EAGAIN)) {
3a81fd02
JA
4171 /*
4172 * We could hang on to this 'fd', but seems like marginal
4173 * gain for something that is now known to be a slower path.
4174 * So just put it, and we'll get a new one when we retry.
4175 */
4176 put_unused_fd(ret);
4177 return -EAGAIN;
4178 }
4179
15b71abe
JA
4180 if (IS_ERR(file)) {
4181 put_unused_fd(ret);
4182 ret = PTR_ERR(file);
4183 } else {
45d189c6 4184 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
3a81fd02 4185 file->f_flags &= ~O_NONBLOCK;
15b71abe
JA
4186 fsnotify_open(file);
4187 fd_install(ret, file);
4188 }
4189err:
4190 putname(req->open.filename);
8fef80bf 4191 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe
JA
4192 if (ret < 0)
4193 req_set_fail_links(req);
e1e16097 4194 io_req_complete(req, ret);
15b71abe
JA
4195 return 0;
4196}
4197
45d189c6 4198static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
cebdb986 4199{
45d189c6 4200 return io_openat2(req, issue_flags & IO_URING_F_NONBLOCK);
cebdb986
JA
4201}
4202
067524e9
JA
4203static int io_remove_buffers_prep(struct io_kiocb *req,
4204 const struct io_uring_sqe *sqe)
4205{
4206 struct io_provide_buf *p = &req->pbuf;
4207 u64 tmp;
4208
4209 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
4210 return -EINVAL;
4211
4212 tmp = READ_ONCE(sqe->fd);
4213 if (!tmp || tmp > USHRT_MAX)
4214 return -EINVAL;
4215
4216 memset(p, 0, sizeof(*p));
4217 p->nbufs = tmp;
4218 p->bgid = READ_ONCE(sqe->buf_group);
4219 return 0;
4220}
4221
4222static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
4223 int bgid, unsigned nbufs)
4224{
4225 unsigned i = 0;
4226
4227 /* shouldn't happen */
4228 if (!nbufs)
4229 return 0;
4230
4231 /* the head kbuf is the list itself */
4232 while (!list_empty(&buf->list)) {
4233 struct io_buffer *nxt;
4234
4235 nxt = list_first_entry(&buf->list, struct io_buffer, list);
4236 list_del(&nxt->list);
4237 kfree(nxt);
4238 if (++i == nbufs)
4239 return i;
4240 }
4241 i++;
4242 kfree(buf);
4243 idr_remove(&ctx->io_buffer_idr, bgid);
4244
4245 return i;
4246}
4247
889fca73 4248static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
067524e9
JA
4249{
4250 struct io_provide_buf *p = &req->pbuf;
4251 struct io_ring_ctx *ctx = req->ctx;
4252 struct io_buffer *head;
4253 int ret = 0;
45d189c6 4254 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
067524e9
JA
4255
4256 io_ring_submit_lock(ctx, !force_nonblock);
4257
4258 lockdep_assert_held(&ctx->uring_lock);
4259
4260 ret = -ENOENT;
4261 head = idr_find(&ctx->io_buffer_idr, p->bgid);
4262 if (head)
4263 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
067524e9
JA
4264 if (ret < 0)
4265 req_set_fail_links(req);
067524e9 4266
31bff9a5
PB
4267 /* need to hold the lock to complete IOPOLL requests */
4268 if (ctx->flags & IORING_SETUP_IOPOLL) {
889fca73 4269 __io_req_complete(req, issue_flags, ret, 0);
31bff9a5
PB
4270 io_ring_submit_unlock(ctx, !force_nonblock);
4271 } else {
4272 io_ring_submit_unlock(ctx, !force_nonblock);
889fca73 4273 __io_req_complete(req, issue_flags, ret, 0);
31bff9a5 4274 }
067524e9
JA
4275 return 0;
4276}
4277
ddf0322d
JA
4278static int io_provide_buffers_prep(struct io_kiocb *req,
4279 const struct io_uring_sqe *sqe)
4280{
4281 struct io_provide_buf *p = &req->pbuf;
4282 u64 tmp;
4283
4284 if (sqe->ioprio || sqe->rw_flags)
4285 return -EINVAL;
4286
4287 tmp = READ_ONCE(sqe->fd);
4288 if (!tmp || tmp > USHRT_MAX)
4289 return -E2BIG;
4290 p->nbufs = tmp;
4291 p->addr = READ_ONCE(sqe->addr);
4292 p->len = READ_ONCE(sqe->len);
4293
efe68c1c 4294 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
ddf0322d
JA
4295 return -EFAULT;
4296
4297 p->bgid = READ_ONCE(sqe->buf_group);
4298 tmp = READ_ONCE(sqe->off);
4299 if (tmp > USHRT_MAX)
4300 return -E2BIG;
4301 p->bid = tmp;
4302 return 0;
4303}
4304
4305static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4306{
4307 struct io_buffer *buf;
4308 u64 addr = pbuf->addr;
4309 int i, bid = pbuf->bid;
4310
4311 for (i = 0; i < pbuf->nbufs; i++) {
4312 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4313 if (!buf)
4314 break;
4315
4316 buf->addr = addr;
4317 buf->len = pbuf->len;
4318 buf->bid = bid;
4319 addr += pbuf->len;
4320 bid++;
4321 if (!*head) {
4322 INIT_LIST_HEAD(&buf->list);
4323 *head = buf;
4324 } else {
4325 list_add_tail(&buf->list, &(*head)->list);
4326 }
4327 }
4328
4329 return i ? i : -ENOMEM;
4330}
4331
889fca73 4332static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
ddf0322d
JA
4333{
4334 struct io_provide_buf *p = &req->pbuf;
4335 struct io_ring_ctx *ctx = req->ctx;
4336 struct io_buffer *head, *list;
4337 int ret = 0;
45d189c6 4338 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ddf0322d
JA
4339
4340 io_ring_submit_lock(ctx, !force_nonblock);
4341
4342 lockdep_assert_held(&ctx->uring_lock);
4343
4344 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
4345
4346 ret = io_add_buffers(p, &head);
4347 if (ret < 0)
4348 goto out;
4349
4350 if (!list) {
4351 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
4352 GFP_KERNEL);
4353 if (ret < 0) {
067524e9 4354 __io_remove_buffers(ctx, head, p->bgid, -1U);
ddf0322d
JA
4355 goto out;
4356 }
4357 }
4358out:
ddf0322d
JA
4359 if (ret < 0)
4360 req_set_fail_links(req);
31bff9a5
PB
4361
4362 /* need to hold the lock to complete IOPOLL requests */
4363 if (ctx->flags & IORING_SETUP_IOPOLL) {
889fca73 4364 __io_req_complete(req, issue_flags, ret, 0);
31bff9a5
PB
4365 io_ring_submit_unlock(ctx, !force_nonblock);
4366 } else {
4367 io_ring_submit_unlock(ctx, !force_nonblock);
889fca73 4368 __io_req_complete(req, issue_flags, ret, 0);
31bff9a5 4369 }
ddf0322d 4370 return 0;
cebdb986
JA
4371}
4372
3e4827b0
JA
4373static int io_epoll_ctl_prep(struct io_kiocb *req,
4374 const struct io_uring_sqe *sqe)
4375{
4376#if defined(CONFIG_EPOLL)
4377 if (sqe->ioprio || sqe->buf_index)
4378 return -EINVAL;
6ca56f84 4379 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
3232dd02 4380 return -EINVAL;
3e4827b0
JA
4381
4382 req->epoll.epfd = READ_ONCE(sqe->fd);
4383 req->epoll.op = READ_ONCE(sqe->len);
4384 req->epoll.fd = READ_ONCE(sqe->off);
4385
4386 if (ep_op_has_event(req->epoll.op)) {
4387 struct epoll_event __user *ev;
4388
4389 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4390 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4391 return -EFAULT;
4392 }
4393
4394 return 0;
4395#else
4396 return -EOPNOTSUPP;
4397#endif
4398}
4399
889fca73 4400static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
3e4827b0
JA
4401{
4402#if defined(CONFIG_EPOLL)
4403 struct io_epoll *ie = &req->epoll;
4404 int ret;
45d189c6 4405 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3e4827b0
JA
4406
4407 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4408 if (force_nonblock && ret == -EAGAIN)
4409 return -EAGAIN;
4410
4411 if (ret < 0)
4412 req_set_fail_links(req);
889fca73 4413 __io_req_complete(req, issue_flags, ret, 0);
3e4827b0
JA
4414 return 0;
4415#else
4416 return -EOPNOTSUPP;
4417#endif
4418}
4419
c1ca757b
JA
4420static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4421{
4422#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4423 if (sqe->ioprio || sqe->buf_index || sqe->off)
4424 return -EINVAL;
3232dd02
PB
4425 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4426 return -EINVAL;
c1ca757b
JA
4427
4428 req->madvise.addr = READ_ONCE(sqe->addr);
4429 req->madvise.len = READ_ONCE(sqe->len);
4430 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4431 return 0;
4432#else
4433 return -EOPNOTSUPP;
4434#endif
4435}
4436
45d189c6 4437static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
c1ca757b
JA
4438{
4439#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4440 struct io_madvise *ma = &req->madvise;
4441 int ret;
4442
45d189c6 4443 if (issue_flags & IO_URING_F_NONBLOCK)
c1ca757b
JA
4444 return -EAGAIN;
4445
0726b01e 4446 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
c1ca757b
JA
4447 if (ret < 0)
4448 req_set_fail_links(req);
e1e16097 4449 io_req_complete(req, ret);
c1ca757b
JA
4450 return 0;
4451#else
4452 return -EOPNOTSUPP;
4453#endif
4454}
4455
4840e418
JA
4456static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4457{
4458 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4459 return -EINVAL;
3232dd02
PB
4460 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4461 return -EINVAL;
4840e418
JA
4462
4463 req->fadvise.offset = READ_ONCE(sqe->off);
4464 req->fadvise.len = READ_ONCE(sqe->len);
4465 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4466 return 0;
4467}
4468
45d189c6 4469static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4840e418
JA
4470{
4471 struct io_fadvise *fa = &req->fadvise;
4472 int ret;
4473
45d189c6 4474 if (issue_flags & IO_URING_F_NONBLOCK) {
3e69426d
JA
4475 switch (fa->advice) {
4476 case POSIX_FADV_NORMAL:
4477 case POSIX_FADV_RANDOM:
4478 case POSIX_FADV_SEQUENTIAL:
4479 break;
4480 default:
4481 return -EAGAIN;
4482 }
4483 }
4840e418
JA
4484
4485 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4486 if (ret < 0)
4487 req_set_fail_links(req);
e1e16097 4488 io_req_complete(req, ret);
4840e418
JA
4489 return 0;
4490}
4491
eddc7ef5
JA
4492static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4493{
6ca56f84 4494 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
3232dd02 4495 return -EINVAL;
eddc7ef5
JA
4496 if (sqe->ioprio || sqe->buf_index)
4497 return -EINVAL;
9c280f90 4498 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 4499 return -EBADF;
eddc7ef5 4500
1d9e1288
BM
4501 req->statx.dfd = READ_ONCE(sqe->fd);
4502 req->statx.mask = READ_ONCE(sqe->len);
e62753e4 4503 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
4504 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4505 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5
JA
4506
4507 return 0;
4508}
4509
45d189c6 4510static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
eddc7ef5 4511{
1d9e1288 4512 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
4513 int ret;
4514
45d189c6 4515 if (issue_flags & IO_URING_F_NONBLOCK) {
5b0bbee4
JA
4516 /* only need file table for an actual valid fd */
4517 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
4518 req->flags |= REQ_F_NO_FILE_TABLE;
eddc7ef5 4519 return -EAGAIN;
5b0bbee4 4520 }
eddc7ef5 4521
e62753e4
BM
4522 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4523 ctx->buffer);
eddc7ef5 4524
eddc7ef5
JA
4525 if (ret < 0)
4526 req_set_fail_links(req);
e1e16097 4527 io_req_complete(req, ret);
eddc7ef5
JA
4528 return 0;
4529}
4530
b5dba59e
JA
4531static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4532{
14587a46 4533 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3232dd02 4534 return -EINVAL;
b5dba59e
JA
4535 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4536 sqe->rw_flags || sqe->buf_index)
4537 return -EINVAL;
9c280f90 4538 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 4539 return -EBADF;
b5dba59e
JA
4540
4541 req->close.fd = READ_ONCE(sqe->fd);
b5dba59e 4542 return 0;
b5dba59e
JA
4543}
4544
889fca73 4545static int io_close(struct io_kiocb *req, unsigned int issue_flags)
b5dba59e 4546{
9eac1904 4547 struct files_struct *files = current->files;
3af73b28 4548 struct io_close *close = &req->close;
9eac1904
JA
4549 struct fdtable *fdt;
4550 struct file *file;
b5dba59e
JA
4551 int ret;
4552
9eac1904
JA
4553 file = NULL;
4554 ret = -EBADF;
4555 spin_lock(&files->file_lock);
4556 fdt = files_fdtable(files);
4557 if (close->fd >= fdt->max_fds) {
4558 spin_unlock(&files->file_lock);
4559 goto err;
4560 }
4561 file = fdt->fd[close->fd];
4562 if (!file) {
4563 spin_unlock(&files->file_lock);
4564 goto err;
4565 }
4566
4567 if (file->f_op == &io_uring_fops) {
4568 spin_unlock(&files->file_lock);
4569 file = NULL;
4570 goto err;
3af73b28 4571 }
b5dba59e
JA
4572
4573 /* if the file has a flush method, be safe and punt to async */
45d189c6 4574 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
9eac1904 4575 spin_unlock(&files->file_lock);
0bf0eefd 4576 return -EAGAIN;
a2100672 4577 }
b5dba59e 4578
9eac1904
JA
4579 ret = __close_fd_get_file(close->fd, &file);
4580 spin_unlock(&files->file_lock);
4581 if (ret < 0) {
4582 if (ret == -ENOENT)
4583 ret = -EBADF;
4584 goto err;
4585 }
4586
3af73b28 4587 /* No ->flush() or already async, safely close from here */
9eac1904
JA
4588 ret = filp_close(file, current->files);
4589err:
3af73b28
PB
4590 if (ret < 0)
4591 req_set_fail_links(req);
9eac1904
JA
4592 if (file)
4593 fput(file);
889fca73 4594 __io_req_complete(req, issue_flags, ret, 0);
1a417f4e 4595 return 0;
b5dba59e
JA
4596}
4597
3529d8c2 4598static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
4599{
4600 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4
JA
4601
4602 if (!req->file)
4603 return -EBADF;
5d17b4a4
JA
4604
4605 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4606 return -EINVAL;
4607 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4608 return -EINVAL;
4609
8ed8d3c3
JA
4610 req->sync.off = READ_ONCE(sqe->off);
4611 req->sync.len = READ_ONCE(sqe->len);
4612 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
4613 return 0;
4614}
4615
45d189c6 4616static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3 4617{
8ed8d3c3
JA
4618 int ret;
4619
ac45abc0 4620 /* sync_file_range always requires a blocking context */
45d189c6 4621 if (issue_flags & IO_URING_F_NONBLOCK)
ac45abc0
PB
4622 return -EAGAIN;
4623
9adbd45d 4624 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
4625 req->sync.flags);
4626 if (ret < 0)
4627 req_set_fail_links(req);
e1e16097 4628 io_req_complete(req, ret);
5d17b4a4
JA
4629 return 0;
4630}
4631
469956e8 4632#if defined(CONFIG_NET)
02d27d89
PB
4633static int io_setup_async_msg(struct io_kiocb *req,
4634 struct io_async_msghdr *kmsg)
4635{
e8c2bc1f
JA
4636 struct io_async_msghdr *async_msg = req->async_data;
4637
4638 if (async_msg)
02d27d89 4639 return -EAGAIN;
e8c2bc1f 4640 if (io_alloc_async_data(req)) {
257e84a5 4641 kfree(kmsg->free_iov);
02d27d89
PB
4642 return -ENOMEM;
4643 }
e8c2bc1f 4644 async_msg = req->async_data;
02d27d89 4645 req->flags |= REQ_F_NEED_CLEANUP;
e8c2bc1f 4646 memcpy(async_msg, kmsg, sizeof(*kmsg));
2a780802 4647 async_msg->msg.msg_name = &async_msg->addr;
257e84a5
PB
4648 /* if were using fast_iov, set it to the new one */
4649 if (!async_msg->free_iov)
4650 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4651
02d27d89
PB
4652 return -EAGAIN;
4653}
4654
2ae523ed
PB
4655static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4656 struct io_async_msghdr *iomsg)
4657{
2ae523ed 4658 iomsg->msg.msg_name = &iomsg->addr;
257e84a5 4659 iomsg->free_iov = iomsg->fast_iov;
2ae523ed 4660 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
257e84a5 4661 req->sr_msg.msg_flags, &iomsg->free_iov);
2ae523ed
PB
4662}
4663
3529d8c2 4664static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 4665{
e8c2bc1f 4666 struct io_async_msghdr *async_msg = req->async_data;
e47293fd 4667 struct io_sr_msg *sr = &req->sr_msg;
99bc4c38 4668 int ret;
03b1230c 4669
d2b6f48b
PB
4670 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4671 return -EINVAL;
4672
e47293fd 4673 sr->msg_flags = READ_ONCE(sqe->msg_flags);
270a5940 4674 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 4675 sr->len = READ_ONCE(sqe->len);
3529d8c2 4676
d8768362
JA
4677#ifdef CONFIG_COMPAT
4678 if (req->ctx->compat)
4679 sr->msg_flags |= MSG_CMSG_COMPAT;
4680#endif
4681
e8c2bc1f 4682 if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
3529d8c2 4683 return 0;
e8c2bc1f 4684 ret = io_sendmsg_copy_hdr(req, async_msg);
99bc4c38
PB
4685 if (!ret)
4686 req->flags |= REQ_F_NEED_CLEANUP;
4687 return ret;
03b1230c
JA
4688}
4689
889fca73 4690static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 4691{
6b754c8b 4692 struct io_async_msghdr iomsg, *kmsg;
0fa03c62 4693 struct socket *sock;
7a7cacba 4694 unsigned flags;
0fa03c62
JA
4695 int ret;
4696
dba4a925 4697 sock = sock_from_file(req->file);
7a7cacba 4698 if (unlikely(!sock))
dba4a925 4699 return -ENOTSOCK;
3529d8c2 4700
257e84a5
PB
4701 kmsg = req->async_data;
4702 if (!kmsg) {
7a7cacba
PB
4703 ret = io_sendmsg_copy_hdr(req, &iomsg);
4704 if (ret)
4705 return ret;
4706 kmsg = &iomsg;
0fa03c62 4707 }
0fa03c62 4708
7a7cacba
PB
4709 flags = req->sr_msg.msg_flags;
4710 if (flags & MSG_DONTWAIT)
4711 req->flags |= REQ_F_NOWAIT;
45d189c6 4712 else if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 4713 flags |= MSG_DONTWAIT;
e47293fd 4714
7a7cacba 4715 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
45d189c6 4716 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
7a7cacba
PB
4717 return io_setup_async_msg(req, kmsg);
4718 if (ret == -ERESTARTSYS)
4719 ret = -EINTR;
0fa03c62 4720
257e84a5
PB
4721 /* fast path, check for non-NULL to avoid function call */
4722 if (kmsg->free_iov)
4723 kfree(kmsg->free_iov);
99bc4c38 4724 req->flags &= ~REQ_F_NEED_CLEANUP;
4e88d6e7
JA
4725 if (ret < 0)
4726 req_set_fail_links(req);
889fca73 4727 __io_req_complete(req, issue_flags, ret, 0);
5d17b4a4 4728 return 0;
03b1230c 4729}
aa1fa28f 4730
889fca73 4731static int io_send(struct io_kiocb *req, unsigned int issue_flags)
fddaface 4732{
7a7cacba
PB
4733 struct io_sr_msg *sr = &req->sr_msg;
4734 struct msghdr msg;
4735 struct iovec iov;
fddaface 4736 struct socket *sock;
7a7cacba 4737 unsigned flags;
fddaface
JA
4738 int ret;
4739
dba4a925 4740 sock = sock_from_file(req->file);
7a7cacba 4741 if (unlikely(!sock))
dba4a925 4742 return -ENOTSOCK;
fddaface 4743
7a7cacba
PB
4744 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4745 if (unlikely(ret))
14db8411 4746 return ret;
fddaface 4747
7a7cacba
PB
4748 msg.msg_name = NULL;
4749 msg.msg_control = NULL;
4750 msg.msg_controllen = 0;
4751 msg.msg_namelen = 0;
fddaface 4752
7a7cacba
PB
4753 flags = req->sr_msg.msg_flags;
4754 if (flags & MSG_DONTWAIT)
4755 req->flags |= REQ_F_NOWAIT;
45d189c6 4756 else if (issue_flags & IO_URING_F_NONBLOCK)
7a7cacba 4757 flags |= MSG_DONTWAIT;
fddaface 4758
7a7cacba
PB
4759 msg.msg_flags = flags;
4760 ret = sock_sendmsg(sock, &msg);
45d189c6 4761 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
7a7cacba
PB
4762 return -EAGAIN;
4763 if (ret == -ERESTARTSYS)
4764 ret = -EINTR;
fddaface 4765
fddaface
JA
4766 if (ret < 0)
4767 req_set_fail_links(req);
889fca73 4768 __io_req_complete(req, issue_flags, ret, 0);
fddaface 4769 return 0;
fddaface
JA
4770}
4771
1400e697
PB
4772static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4773 struct io_async_msghdr *iomsg)
52de1fe1
JA
4774{
4775 struct io_sr_msg *sr = &req->sr_msg;
4776 struct iovec __user *uiov;
4777 size_t iov_len;
4778 int ret;
4779
1400e697
PB
4780 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4781 &iomsg->uaddr, &uiov, &iov_len);
52de1fe1
JA
4782 if (ret)
4783 return ret;
4784
4785 if (req->flags & REQ_F_BUFFER_SELECT) {
4786 if (iov_len > 1)
4787 return -EINVAL;
5476dfed 4788 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
52de1fe1 4789 return -EFAULT;
5476dfed 4790 sr->len = iomsg->fast_iov[0].iov_len;
257e84a5 4791 iomsg->free_iov = NULL;
52de1fe1 4792 } else {
257e84a5 4793 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 4794 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
257e84a5 4795 &iomsg->free_iov, &iomsg->msg.msg_iter,
89cd35c5 4796 false);
52de1fe1
JA
4797 if (ret > 0)
4798 ret = 0;
4799 }
4800
4801 return ret;
4802}
4803
4804#ifdef CONFIG_COMPAT
4805static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
1400e697 4806 struct io_async_msghdr *iomsg)
52de1fe1
JA
4807{
4808 struct compat_msghdr __user *msg_compat;
4809 struct io_sr_msg *sr = &req->sr_msg;
4810 struct compat_iovec __user *uiov;
4811 compat_uptr_t ptr;
4812 compat_size_t len;
4813 int ret;
4814
270a5940 4815 msg_compat = (struct compat_msghdr __user *) sr->umsg;
1400e697 4816 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
52de1fe1
JA
4817 &ptr, &len);
4818 if (ret)
4819 return ret;
4820
4821 uiov = compat_ptr(ptr);
4822 if (req->flags & REQ_F_BUFFER_SELECT) {
4823 compat_ssize_t clen;
4824
4825 if (len > 1)
4826 return -EINVAL;
4827 if (!access_ok(uiov, sizeof(*uiov)))
4828 return -EFAULT;
4829 if (__get_user(clen, &uiov->iov_len))
4830 return -EFAULT;
4831 if (clen < 0)
4832 return -EINVAL;
2d280bc8 4833 sr->len = clen;
257e84a5 4834 iomsg->free_iov = NULL;
52de1fe1 4835 } else {
257e84a5 4836 iomsg->free_iov = iomsg->fast_iov;
89cd35c5 4837 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
257e84a5 4838 UIO_FASTIOV, &iomsg->free_iov,
89cd35c5 4839 &iomsg->msg.msg_iter, true);
52de1fe1
JA
4840 if (ret < 0)
4841 return ret;
4842 }
4843
4844 return 0;
4845}
4846#endif
4847
1400e697
PB
4848static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4849 struct io_async_msghdr *iomsg)
52de1fe1 4850{
1400e697 4851 iomsg->msg.msg_name = &iomsg->addr;
52de1fe1
JA
4852
4853#ifdef CONFIG_COMPAT
4854 if (req->ctx->compat)
1400e697 4855 return __io_compat_recvmsg_copy_hdr(req, iomsg);
fddaface 4856#endif
52de1fe1 4857
1400e697 4858 return __io_recvmsg_copy_hdr(req, iomsg);
52de1fe1
JA
4859}
4860
bcda7baa 4861static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
7fbb1b54 4862 bool needs_lock)
bcda7baa
JA
4863{
4864 struct io_sr_msg *sr = &req->sr_msg;
4865 struct io_buffer *kbuf;
4866
bcda7baa
JA
4867 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4868 if (IS_ERR(kbuf))
4869 return kbuf;
4870
4871 sr->kbuf = kbuf;
4872 req->flags |= REQ_F_BUFFER_SELECTED;
bcda7baa 4873 return kbuf;
fddaface
JA
4874}
4875
7fbb1b54
PB
4876static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4877{
4878 return io_put_kbuf(req, req->sr_msg.kbuf);
4879}
4880
3529d8c2
JA
4881static int io_recvmsg_prep(struct io_kiocb *req,
4882 const struct io_uring_sqe *sqe)
aa1fa28f 4883{
e8c2bc1f 4884 struct io_async_msghdr *async_msg = req->async_data;
e47293fd 4885 struct io_sr_msg *sr = &req->sr_msg;
99bc4c38 4886 int ret;
3529d8c2 4887
d2b6f48b
PB
4888 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4889 return -EINVAL;
4890
3529d8c2 4891 sr->msg_flags = READ_ONCE(sqe->msg_flags);
270a5940 4892 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 4893 sr->len = READ_ONCE(sqe->len);
bcda7baa 4894 sr->bgid = READ_ONCE(sqe->buf_group);
06b76d44 4895
d8768362
JA
4896#ifdef CONFIG_COMPAT
4897 if (req->ctx->compat)
4898 sr->msg_flags |= MSG_CMSG_COMPAT;
4899#endif
4900
e8c2bc1f 4901 if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
5f798bea 4902 return 0;
e8c2bc1f 4903 ret = io_recvmsg_copy_hdr(req, async_msg);
99bc4c38
PB
4904 if (!ret)
4905 req->flags |= REQ_F_NEED_CLEANUP;
4906 return ret;
aa1fa28f
JA
4907}
4908
889fca73 4909static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
aa1fa28f 4910{
6b754c8b 4911 struct io_async_msghdr iomsg, *kmsg;
03b1230c 4912 struct socket *sock;
7fbb1b54 4913 struct io_buffer *kbuf;
7a7cacba 4914 unsigned flags;
52de1fe1 4915 int ret, cflags = 0;
45d189c6 4916 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
03b1230c 4917
dba4a925 4918 sock = sock_from_file(req->file);
7a7cacba 4919 if (unlikely(!sock))
dba4a925 4920 return -ENOTSOCK;
3529d8c2 4921
257e84a5
PB
4922 kmsg = req->async_data;
4923 if (!kmsg) {
7a7cacba
PB
4924 ret = io_recvmsg_copy_hdr(req, &iomsg);
4925 if (ret)
681fda8d 4926 return ret;
7a7cacba
PB
4927 kmsg = &iomsg;
4928 }
03b1230c 4929
bc02ef33 4930 if (req->flags & REQ_F_BUFFER_SELECT) {
7fbb1b54 4931 kbuf = io_recv_buffer_select(req, !force_nonblock);
bc02ef33 4932 if (IS_ERR(kbuf))
52de1fe1 4933 return PTR_ERR(kbuf);
7a7cacba 4934 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
5476dfed
PB
4935 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4936 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
7a7cacba
PB
4937 1, req->sr_msg.len);
4938 }
52de1fe1 4939
7a7cacba
PB
4940 flags = req->sr_msg.msg_flags;
4941 if (flags & MSG_DONTWAIT)
4942 req->flags |= REQ_F_NOWAIT;
4943 else if (force_nonblock)
4944 flags |= MSG_DONTWAIT;
e47293fd 4945
7a7cacba
PB
4946 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4947 kmsg->uaddr, flags);
0e1b6fe3
PB
4948 if (force_nonblock && ret == -EAGAIN)
4949 return io_setup_async_msg(req, kmsg);
7a7cacba
PB
4950 if (ret == -ERESTARTSYS)
4951 ret = -EINTR;
03b1230c 4952
7fbb1b54
PB
4953 if (req->flags & REQ_F_BUFFER_SELECTED)
4954 cflags = io_put_recv_kbuf(req);
257e84a5
PB
4955 /* fast path, check for non-NULL to avoid function call */
4956 if (kmsg->free_iov)
4957 kfree(kmsg->free_iov);
99bc4c38 4958 req->flags &= ~REQ_F_NEED_CLEANUP;
4e88d6e7
JA
4959 if (ret < 0)
4960 req_set_fail_links(req);
889fca73 4961 __io_req_complete(req, issue_flags, ret, cflags);
03b1230c 4962 return 0;
0fa03c62 4963}
5d17b4a4 4964
889fca73 4965static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
fddaface 4966{
6b754c8b 4967 struct io_buffer *kbuf;
7a7cacba
PB
4968 struct io_sr_msg *sr = &req->sr_msg;
4969 struct msghdr msg;
4970 void __user *buf = sr->buf;
fddaface 4971 struct socket *sock;
7a7cacba
PB
4972 struct iovec iov;
4973 unsigned flags;
bcda7baa 4974 int ret, cflags = 0;
45d189c6 4975 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
fddaface 4976
dba4a925 4977 sock = sock_from_file(req->file);
7a7cacba 4978 if (unlikely(!sock))
dba4a925 4979 return -ENOTSOCK;
fddaface 4980
bc02ef33 4981 if (req->flags & REQ_F_BUFFER_SELECT) {
7fbb1b54 4982 kbuf = io_recv_buffer_select(req, !force_nonblock);
bcda7baa
JA
4983 if (IS_ERR(kbuf))
4984 return PTR_ERR(kbuf);
7a7cacba 4985 buf = u64_to_user_ptr(kbuf->addr);
bc02ef33 4986 }
bcda7baa 4987
7a7cacba 4988 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
14c32eee
PB
4989 if (unlikely(ret))
4990 goto out_free;
fddaface 4991
7a7cacba
PB
4992 msg.msg_name = NULL;
4993 msg.msg_control = NULL;
4994 msg.msg_controllen = 0;
4995 msg.msg_namelen = 0;
4996 msg.msg_iocb = NULL;
4997 msg.msg_flags = 0;
fddaface 4998
7a7cacba
PB
4999 flags = req->sr_msg.msg_flags;
5000 if (flags & MSG_DONTWAIT)
5001 req->flags |= REQ_F_NOWAIT;
5002 else if (force_nonblock)
5003 flags |= MSG_DONTWAIT;
5004
5005 ret = sock_recvmsg(sock, &msg, flags);
5006 if (force_nonblock && ret == -EAGAIN)
5007 return -EAGAIN;
5008 if (ret == -ERESTARTSYS)
5009 ret = -EINTR;
14c32eee 5010out_free:
7fbb1b54
PB
5011 if (req->flags & REQ_F_BUFFER_SELECTED)
5012 cflags = io_put_recv_kbuf(req);
fddaface
JA
5013 if (ret < 0)
5014 req_set_fail_links(req);
889fca73 5015 __io_req_complete(req, issue_flags, ret, cflags);
fddaface 5016 return 0;
fddaface
JA
5017}
5018
3529d8c2 5019static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 5020{
8ed8d3c3
JA
5021 struct io_accept *accept = &req->accept;
5022
14587a46 5023 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
17f2fe35 5024 return -EINVAL;
8042d6ce 5025 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
5026 return -EINVAL;
5027
d55e5f5b
JA
5028 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5029 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 5030 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 5031 accept->nofile = rlimit(RLIMIT_NOFILE);
8ed8d3c3 5032 return 0;
8ed8d3c3 5033}
17f2fe35 5034
889fca73 5035static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
8ed8d3c3
JA
5036{
5037 struct io_accept *accept = &req->accept;
45d189c6 5038 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ac45abc0 5039 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
8ed8d3c3
JA
5040 int ret;
5041
e697deed
JX
5042 if (req->file->f_flags & O_NONBLOCK)
5043 req->flags |= REQ_F_NOWAIT;
5044
8ed8d3c3 5045 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
09952e3e
JA
5046 accept->addr_len, accept->flags,
5047 accept->nofile);
8ed8d3c3 5048 if (ret == -EAGAIN && force_nonblock)
17f2fe35 5049 return -EAGAIN;
ac45abc0
PB
5050 if (ret < 0) {
5051 if (ret == -ERESTARTSYS)
5052 ret = -EINTR;
4e88d6e7 5053 req_set_fail_links(req);
ac45abc0 5054 }
889fca73 5055 __io_req_complete(req, issue_flags, ret, 0);
17f2fe35 5056 return 0;
8ed8d3c3
JA
5057}
5058
3529d8c2 5059static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 5060{
3529d8c2 5061 struct io_connect *conn = &req->connect;
e8c2bc1f 5062 struct io_async_connect *io = req->async_data;
f499a021 5063
14587a46 5064 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3fbb51c1
JA
5065 return -EINVAL;
5066 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
5067 return -EINVAL;
5068
3529d8c2
JA
5069 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5070 conn->addr_len = READ_ONCE(sqe->addr2);
5071
5072 if (!io)
5073 return 0;
5074
5075 return move_addr_to_kernel(conn->addr, conn->addr_len,
e8c2bc1f 5076 &io->address);
f499a021
JA
5077}
5078
889fca73 5079static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
f8e85cf2 5080{
e8c2bc1f 5081 struct io_async_connect __io, *io;
f8e85cf2 5082 unsigned file_flags;
3fbb51c1 5083 int ret;
45d189c6 5084 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
f8e85cf2 5085
e8c2bc1f
JA
5086 if (req->async_data) {
5087 io = req->async_data;
f499a021 5088 } else {
3529d8c2
JA
5089 ret = move_addr_to_kernel(req->connect.addr,
5090 req->connect.addr_len,
e8c2bc1f 5091 &__io.address);
f499a021
JA
5092 if (ret)
5093 goto out;
5094 io = &__io;
5095 }
5096
3fbb51c1
JA
5097 file_flags = force_nonblock ? O_NONBLOCK : 0;
5098
e8c2bc1f 5099 ret = __sys_connect_file(req->file, &io->address,
3fbb51c1 5100 req->connect.addr_len, file_flags);
87f80d62 5101 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
e8c2bc1f 5102 if (req->async_data)
b7bb4f7d 5103 return -EAGAIN;
e8c2bc1f 5104 if (io_alloc_async_data(req)) {
f499a021
JA
5105 ret = -ENOMEM;
5106 goto out;
5107 }
e8c2bc1f
JA
5108 io = req->async_data;
5109 memcpy(req->async_data, &__io, sizeof(__io));
f8e85cf2 5110 return -EAGAIN;
f499a021 5111 }
f8e85cf2
JA
5112 if (ret == -ERESTARTSYS)
5113 ret = -EINTR;
f499a021 5114out:
4e88d6e7
JA
5115 if (ret < 0)
5116 req_set_fail_links(req);
889fca73 5117 __io_req_complete(req, issue_flags, ret, 0);
f8e85cf2 5118 return 0;
469956e8
Y
5119}
5120#else /* !CONFIG_NET */
5121static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5122{
f8e85cf2 5123 return -EOPNOTSUPP;
f8e85cf2
JA
5124}
5125
889fca73 5126static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
469956e8
Y
5127{
5128 return -EOPNOTSUPP;
5129}
5130
889fca73 5131static int io_send(struct io_kiocb *req, unsigned int issue_flags)
469956e8
Y
5132{
5133 return -EOPNOTSUPP;
5134}
5135
5136static int io_recvmsg_prep(struct io_kiocb *req,
5137 const struct io_uring_sqe *sqe)
5138{
5139 return -EOPNOTSUPP;
5140}
5141
889fca73 5142static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
469956e8
Y
5143{
5144 return -EOPNOTSUPP;
5145}
5146
889fca73 5147static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
469956e8
Y
5148{
5149 return -EOPNOTSUPP;
5150}
5151
5152static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5153{
5154 return -EOPNOTSUPP;
5155}
5156
889fca73 5157static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
469956e8
Y
5158{
5159 return -EOPNOTSUPP;
5160}
ce593a6c 5161
469956e8
Y
5162static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5163{
5164 return -EOPNOTSUPP;
5165}
5166
889fca73 5167static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
469956e8 5168{
f8e85cf2 5169 return -EOPNOTSUPP;
ce593a6c 5170}
469956e8 5171#endif /* CONFIG_NET */
f8e85cf2 5172
d7718a9d
JA
5173struct io_poll_table {
5174 struct poll_table_struct pt;
5175 struct io_kiocb *req;
5176 int error;
5177};
ce593a6c 5178
d7718a9d
JA
5179static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
5180 __poll_t mask, task_work_func_t func)
5181{
aa96bf8a 5182 int ret;
d7718a9d
JA
5183
5184 /* for instances that support it check for an event match first: */
5185 if (mask && !(mask & poll->events))
5186 return 0;
5187
5188 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
5189
5190 list_del_init(&poll->wait.entry);
5191
d7718a9d 5192 req->result = mask;
7cbf1722 5193 req->task_work.func = func;
6d816e08
JA
5194 percpu_ref_get(&req->ctx->refs);
5195
d7718a9d 5196 /*
e3aabf95
JA
5197 * If this fails, then the task is exiting. When a task exits, the
5198 * work gets canceled, so just cancel this request as well instead
5199 * of executing it. We can't safely execute it anyway, as we may not
5200 * have the needed state needed for it anyway.
d7718a9d 5201 */
355fb9e2 5202 ret = io_req_task_work_add(req);
aa96bf8a 5203 if (unlikely(ret)) {
e3aabf95 5204 WRITE_ONCE(poll->canceled, true);
eab30c4d 5205 io_req_task_work_add_fallback(req, func);
aa96bf8a 5206 }
d7718a9d
JA
5207 return 1;
5208}
5209
74ce6ce4
JA
5210static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
5211 __acquires(&req->ctx->completion_lock)
5212{
5213 struct io_ring_ctx *ctx = req->ctx;
5214
5215 if (!req->result && !READ_ONCE(poll->canceled)) {
5216 struct poll_table_struct pt = { ._key = poll->events };
5217
5218 req->result = vfs_poll(req->file, &pt) & poll->events;
5219 }
5220
5221 spin_lock_irq(&ctx->completion_lock);
5222 if (!req->result && !READ_ONCE(poll->canceled)) {
5223 add_wait_queue(poll->head, &poll->wait);
5224 return true;
5225 }
5226
5227 return false;
5228}
5229
d4e7cd36 5230static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
18bceab1 5231{
e8c2bc1f 5232 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
d4e7cd36 5233 if (req->opcode == IORING_OP_POLL_ADD)
e8c2bc1f 5234 return req->async_data;
d4e7cd36
JA
5235 return req->apoll->double_poll;
5236}
5237
5238static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5239{
5240 if (req->opcode == IORING_OP_POLL_ADD)
5241 return &req->poll;
5242 return &req->apoll->poll;
5243}
5244
5245static void io_poll_remove_double(struct io_kiocb *req)
5246{
5247 struct io_poll_iocb *poll = io_poll_get_double(req);
18bceab1
JA
5248
5249 lockdep_assert_held(&req->ctx->completion_lock);
5250
5251 if (poll && poll->head) {
5252 struct wait_queue_head *head = poll->head;
5253
5254 spin_lock(&head->lock);
5255 list_del_init(&poll->wait.entry);
5256 if (poll->wait.private)
5257 refcount_dec(&req->refs);
5258 poll->head = NULL;
5259 spin_unlock(&head->lock);
5260 }
5261}
5262
5263static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
5264{
5265 struct io_ring_ctx *ctx = req->ctx;
5266
d4e7cd36 5267 io_poll_remove_double(req);
18bceab1
JA
5268 req->poll.done = true;
5269 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
5270 io_commit_cqring(ctx);
5271}
5272
dd221f46 5273static void io_poll_task_func(struct callback_head *cb)
18bceab1 5274{
dd221f46 5275 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
18bceab1 5276 struct io_ring_ctx *ctx = req->ctx;
dd221f46 5277 struct io_kiocb *nxt;
18bceab1
JA
5278
5279 if (io_poll_rewait(req, &req->poll)) {
5280 spin_unlock_irq(&ctx->completion_lock);
dd221f46
PB
5281 } else {
5282 hash_del(&req->hash_node);
5283 io_poll_complete(req, req->result, 0);
5284 spin_unlock_irq(&ctx->completion_lock);
18bceab1 5285
dd221f46
PB
5286 nxt = io_put_req_find_next(req);
5287 io_cqring_ev_posted(ctx);
5288 if (nxt)
5289 __io_req_task_submit(nxt);
5290 }
18bceab1 5291
6d816e08 5292 percpu_ref_put(&ctx->refs);
18bceab1
JA
5293}
5294
5295static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
5296 int sync, void *key)
5297{
5298 struct io_kiocb *req = wait->private;
d4e7cd36 5299 struct io_poll_iocb *poll = io_poll_get_single(req);
18bceab1
JA
5300 __poll_t mask = key_to_poll(key);
5301
5302 /* for instances that support it check for an event match first: */
5303 if (mask && !(mask & poll->events))
5304 return 0;
5305
8706e04e
JA
5306 list_del_init(&wait->entry);
5307
807abcb0 5308 if (poll && poll->head) {
18bceab1
JA
5309 bool done;
5310
807abcb0
JA
5311 spin_lock(&poll->head->lock);
5312 done = list_empty(&poll->wait.entry);
18bceab1 5313 if (!done)
807abcb0 5314 list_del_init(&poll->wait.entry);
d4e7cd36
JA
5315 /* make sure double remove sees this as being gone */
5316 wait->private = NULL;
807abcb0 5317 spin_unlock(&poll->head->lock);
c8b5e260
JA
5318 if (!done) {
5319 /* use wait func handler, so it matches the rq type */
5320 poll->wait.func(&poll->wait, mode, sync, key);
5321 }
18bceab1
JA
5322 }
5323 refcount_dec(&req->refs);
5324 return 1;
5325}
5326
5327static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5328 wait_queue_func_t wake_func)
5329{
5330 poll->head = NULL;
5331 poll->done = false;
5332 poll->canceled = false;
5333 poll->events = events;
5334 INIT_LIST_HEAD(&poll->wait.entry);
5335 init_waitqueue_func_entry(&poll->wait, wake_func);
5336}
5337
5338static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
807abcb0
JA
5339 struct wait_queue_head *head,
5340 struct io_poll_iocb **poll_ptr)
18bceab1
JA
5341{
5342 struct io_kiocb *req = pt->req;
5343
5344 /*
5345 * If poll->head is already set, it's because the file being polled
5346 * uses multiple waitqueues for poll handling (eg one for read, one
5347 * for write). Setup a separate io_poll_iocb if this happens.
5348 */
5349 if (unlikely(poll->head)) {
58852d4d
PB
5350 struct io_poll_iocb *poll_one = poll;
5351
18bceab1 5352 /* already have a 2nd entry, fail a third attempt */
807abcb0 5353 if (*poll_ptr) {
18bceab1
JA
5354 pt->error = -EINVAL;
5355 return;
5356 }
5357 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5358 if (!poll) {
5359 pt->error = -ENOMEM;
5360 return;
5361 }
58852d4d 5362 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
18bceab1
JA
5363 refcount_inc(&req->refs);
5364 poll->wait.private = req;
807abcb0 5365 *poll_ptr = poll;
18bceab1
JA
5366 }
5367
5368 pt->error = 0;
5369 poll->head = head;
a31eb4a2
JX
5370
5371 if (poll->events & EPOLLEXCLUSIVE)
5372 add_wait_queue_exclusive(head, &poll->wait);
5373 else
5374 add_wait_queue(head, &poll->wait);
18bceab1
JA
5375}
5376
5377static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5378 struct poll_table_struct *p)
5379{
5380 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
807abcb0 5381 struct async_poll *apoll = pt->req->apoll;
18bceab1 5382
807abcb0 5383 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
18bceab1
JA
5384}
5385
d7718a9d
JA
5386static void io_async_task_func(struct callback_head *cb)
5387{
5388 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5389 struct async_poll *apoll = req->apoll;
5390 struct io_ring_ctx *ctx = req->ctx;
5391
5392 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5393
74ce6ce4 5394 if (io_poll_rewait(req, &apoll->poll)) {
d7718a9d 5395 spin_unlock_irq(&ctx->completion_lock);
6d816e08 5396 percpu_ref_put(&ctx->refs);
74ce6ce4 5397 return;
d7718a9d
JA
5398 }
5399
31067255 5400 /* If req is still hashed, it cannot have been canceled. Don't check. */
0be0b0e3 5401 if (hash_hashed(&req->hash_node))
74ce6ce4 5402 hash_del(&req->hash_node);
2bae047e 5403
d4e7cd36 5404 io_poll_remove_double(req);
74ce6ce4
JA
5405 spin_unlock_irq(&ctx->completion_lock);
5406
0be0b0e3
PB
5407 if (!READ_ONCE(apoll->poll.canceled))
5408 __io_req_task_submit(req);
5409 else
5410 __io_req_task_cancel(req, -ECANCELED);
aa340845 5411
6d816e08 5412 percpu_ref_put(&ctx->refs);
807abcb0 5413 kfree(apoll->double_poll);
31067255 5414 kfree(apoll);
d7718a9d
JA
5415}
5416
5417static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5418 void *key)
5419{
5420 struct io_kiocb *req = wait->private;
5421 struct io_poll_iocb *poll = &req->apoll->poll;
5422
5423 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5424 key_to_poll(key));
5425
5426 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5427}
5428
5429static void io_poll_req_insert(struct io_kiocb *req)
5430{
5431 struct io_ring_ctx *ctx = req->ctx;
5432 struct hlist_head *list;
5433
5434 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5435 hlist_add_head(&req->hash_node, list);
5436}
5437
5438static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5439 struct io_poll_iocb *poll,
5440 struct io_poll_table *ipt, __poll_t mask,
5441 wait_queue_func_t wake_func)
5442 __acquires(&ctx->completion_lock)
5443{
5444 struct io_ring_ctx *ctx = req->ctx;
5445 bool cancel = false;
5446
4d52f338 5447 INIT_HLIST_NODE(&req->hash_node);
18bceab1 5448 io_init_poll_iocb(poll, mask, wake_func);
b90cd197 5449 poll->file = req->file;
18bceab1 5450 poll->wait.private = req;
d7718a9d
JA
5451
5452 ipt->pt._key = mask;
5453 ipt->req = req;
5454 ipt->error = -EINVAL;
5455
d7718a9d
JA
5456 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5457
5458 spin_lock_irq(&ctx->completion_lock);
5459 if (likely(poll->head)) {
5460 spin_lock(&poll->head->lock);
5461 if (unlikely(list_empty(&poll->wait.entry))) {
5462 if (ipt->error)
5463 cancel = true;
5464 ipt->error = 0;
5465 mask = 0;
5466 }
5467 if (mask || ipt->error)
5468 list_del_init(&poll->wait.entry);
5469 else if (cancel)
5470 WRITE_ONCE(poll->canceled, true);
5471 else if (!poll->done) /* actually waiting for an event */
5472 io_poll_req_insert(req);
5473 spin_unlock(&poll->head->lock);
5474 }
5475
5476 return mask;
5477}
5478
5479static bool io_arm_poll_handler(struct io_kiocb *req)
5480{
5481 const struct io_op_def *def = &io_op_defs[req->opcode];
5482 struct io_ring_ctx *ctx = req->ctx;
5483 struct async_poll *apoll;
5484 struct io_poll_table ipt;
5485 __poll_t mask, ret;
9dab14b8 5486 int rw;
d7718a9d
JA
5487
5488 if (!req->file || !file_can_poll(req->file))
5489 return false;
24c74678 5490 if (req->flags & REQ_F_POLLED)
d7718a9d 5491 return false;
9dab14b8
JA
5492 if (def->pollin)
5493 rw = READ;
5494 else if (def->pollout)
5495 rw = WRITE;
5496 else
5497 return false;
5498 /* if we can't nonblock try, then no point in arming a poll handler */
5499 if (!io_file_supports_async(req->file, rw))
d7718a9d
JA
5500 return false;
5501
5502 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5503 if (unlikely(!apoll))
5504 return false;
807abcb0 5505 apoll->double_poll = NULL;
d7718a9d
JA
5506
5507 req->flags |= REQ_F_POLLED;
d7718a9d 5508 req->apoll = apoll;
d7718a9d 5509
8755d97a 5510 mask = 0;
d7718a9d 5511 if (def->pollin)
8755d97a 5512 mask |= POLLIN | POLLRDNORM;
d7718a9d
JA
5513 if (def->pollout)
5514 mask |= POLLOUT | POLLWRNORM;
901341bb
LH
5515
5516 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5517 if ((req->opcode == IORING_OP_RECVMSG) &&
5518 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5519 mask &= ~POLLIN;
5520
d7718a9d
JA
5521 mask |= POLLERR | POLLPRI;
5522
5523 ipt.pt._qproc = io_async_queue_proc;
5524
5525 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5526 io_async_wake);
a36da65c 5527 if (ret || ipt.error) {
d4e7cd36 5528 io_poll_remove_double(req);
d7718a9d 5529 spin_unlock_irq(&ctx->completion_lock);
807abcb0 5530 kfree(apoll->double_poll);
d7718a9d
JA
5531 kfree(apoll);
5532 return false;
5533 }
5534 spin_unlock_irq(&ctx->completion_lock);
5535 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5536 apoll->poll.events);
5537 return true;
5538}
5539
5540static bool __io_poll_remove_one(struct io_kiocb *req,
5541 struct io_poll_iocb *poll)
221c5eb2 5542{
b41e9852 5543 bool do_complete = false;
221c5eb2
JA
5544
5545 spin_lock(&poll->head->lock);
5546 WRITE_ONCE(poll->canceled, true);
392edb45
JA
5547 if (!list_empty(&poll->wait.entry)) {
5548 list_del_init(&poll->wait.entry);
b41e9852 5549 do_complete = true;
221c5eb2
JA
5550 }
5551 spin_unlock(&poll->head->lock);
3bfa5bcb 5552 hash_del(&req->hash_node);
d7718a9d
JA
5553 return do_complete;
5554}
5555
5556static bool io_poll_remove_one(struct io_kiocb *req)
5557{
5558 bool do_complete;
5559
d4e7cd36
JA
5560 io_poll_remove_double(req);
5561
d7718a9d
JA
5562 if (req->opcode == IORING_OP_POLL_ADD) {
5563 do_complete = __io_poll_remove_one(req, &req->poll);
5564 } else {
3bfa5bcb
JA
5565 struct async_poll *apoll = req->apoll;
5566
d7718a9d 5567 /* non-poll requests have submit ref still */
3bfa5bcb
JA
5568 do_complete = __io_poll_remove_one(req, &apoll->poll);
5569 if (do_complete) {
d7718a9d 5570 io_put_req(req);
807abcb0 5571 kfree(apoll->double_poll);
3bfa5bcb
JA
5572 kfree(apoll);
5573 }
b1f573bd
XW
5574 }
5575
b41e9852
JA
5576 if (do_complete) {
5577 io_cqring_fill_event(req, -ECANCELED);
5578 io_commit_cqring(req->ctx);
f254ac04 5579 req_set_fail_links(req);
216578e5 5580 io_put_req_deferred(req, 1);
b41e9852
JA
5581 }
5582
5583 return do_complete;
221c5eb2
JA
5584}
5585
76e1b642
JA
5586/*
5587 * Returns true if we found and killed one or more poll requests
5588 */
6b81928d
PB
5589static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5590 struct files_struct *files)
221c5eb2 5591{
78076bb6 5592 struct hlist_node *tmp;
221c5eb2 5593 struct io_kiocb *req;
8e2e1faf 5594 int posted = 0, i;
221c5eb2
JA
5595
5596 spin_lock_irq(&ctx->completion_lock);
78076bb6
JA
5597 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5598 struct hlist_head *list;
5599
5600 list = &ctx->cancel_hash[i];
f3606e3a 5601 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
6b81928d 5602 if (io_match_task(req, tsk, files))
f3606e3a
JA
5603 posted += io_poll_remove_one(req);
5604 }
221c5eb2
JA
5605 }
5606 spin_unlock_irq(&ctx->completion_lock);
b41e9852 5607
8e2e1faf
JA
5608 if (posted)
5609 io_cqring_ev_posted(ctx);
76e1b642
JA
5610
5611 return posted != 0;
221c5eb2
JA
5612}
5613
47f46768
JA
5614static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5615{
78076bb6 5616 struct hlist_head *list;
47f46768
JA
5617 struct io_kiocb *req;
5618
78076bb6
JA
5619 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5620 hlist_for_each_entry(req, list, hash_node) {
b41e9852
JA
5621 if (sqe_addr != req->user_data)
5622 continue;
5623 if (io_poll_remove_one(req))
eac406c6 5624 return 0;
b41e9852 5625 return -EALREADY;
47f46768
JA
5626 }
5627
5628 return -ENOENT;
5629}
5630
3529d8c2
JA
5631static int io_poll_remove_prep(struct io_kiocb *req,
5632 const struct io_uring_sqe *sqe)
0969e783 5633{
0969e783
JA
5634 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5635 return -EINVAL;
5636 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5637 sqe->poll_events)
5638 return -EINVAL;
5639
018043be 5640 req->poll_remove.addr = READ_ONCE(sqe->addr);
0969e783
JA
5641 return 0;
5642}
5643
221c5eb2
JA
5644/*
5645 * Find a running poll command that matches one specified in sqe->addr,
5646 * and remove it if found.
5647 */
61e98203 5648static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
221c5eb2
JA
5649{
5650 struct io_ring_ctx *ctx = req->ctx;
47f46768 5651 int ret;
221c5eb2 5652
221c5eb2 5653 spin_lock_irq(&ctx->completion_lock);
018043be 5654 ret = io_poll_cancel(ctx, req->poll_remove.addr);
221c5eb2
JA
5655 spin_unlock_irq(&ctx->completion_lock);
5656
4e88d6e7
JA
5657 if (ret < 0)
5658 req_set_fail_links(req);
e1e16097 5659 io_req_complete(req, ret);
221c5eb2
JA
5660 return 0;
5661}
5662
221c5eb2
JA
5663static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5664 void *key)
5665{
c2f2eb7d
JA
5666 struct io_kiocb *req = wait->private;
5667 struct io_poll_iocb *poll = &req->poll;
221c5eb2 5668
d7718a9d 5669 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
221c5eb2
JA
5670}
5671
221c5eb2
JA
5672static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5673 struct poll_table_struct *p)
5674{
5675 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5676
e8c2bc1f 5677 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
eac406c6
JA
5678}
5679
3529d8c2 5680static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
5681{
5682 struct io_poll_iocb *poll = &req->poll;
5769a351 5683 u32 events;
221c5eb2
JA
5684
5685 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5686 return -EINVAL;
5687 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5688 return -EINVAL;
5689
5769a351
JX
5690 events = READ_ONCE(sqe->poll32_events);
5691#ifdef __BIG_ENDIAN
5692 events = swahw32(events);
5693#endif
a31eb4a2
JX
5694 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
5695 (events & EPOLLEXCLUSIVE);
0969e783
JA
5696 return 0;
5697}
5698
61e98203 5699static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
0969e783
JA
5700{
5701 struct io_poll_iocb *poll = &req->poll;
5702 struct io_ring_ctx *ctx = req->ctx;
5703 struct io_poll_table ipt;
0969e783 5704 __poll_t mask;
0969e783 5705
d7718a9d 5706 ipt.pt._qproc = io_poll_queue_proc;
36703247 5707
d7718a9d
JA
5708 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5709 io_poll_wake);
221c5eb2 5710
8c838788 5711 if (mask) { /* no async, we'd stolen it */
221c5eb2 5712 ipt.error = 0;
b0dd8a41 5713 io_poll_complete(req, mask, 0);
221c5eb2 5714 }
221c5eb2
JA
5715 spin_unlock_irq(&ctx->completion_lock);
5716
8c838788
JA
5717 if (mask) {
5718 io_cqring_ev_posted(ctx);
014db007 5719 io_put_req(req);
221c5eb2 5720 }
8c838788 5721 return ipt.error;
221c5eb2
JA
5722}
5723
5262f567
JA
5724static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5725{
ad8a48ac
JA
5726 struct io_timeout_data *data = container_of(timer,
5727 struct io_timeout_data, timer);
5728 struct io_kiocb *req = data->req;
5729 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
5730 unsigned long flags;
5731
5262f567 5732 spin_lock_irqsave(&ctx->completion_lock, flags);
a71976f3 5733 list_del_init(&req->timeout.list);
01cec8c1
PB
5734 atomic_set(&req->ctx->cq_timeouts,
5735 atomic_read(&req->ctx->cq_timeouts) + 1);
5736
78e19bbe 5737 io_cqring_fill_event(req, -ETIME);
5262f567
JA
5738 io_commit_cqring(ctx);
5739 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5740
5741 io_cqring_ev_posted(ctx);
4e88d6e7 5742 req_set_fail_links(req);
5262f567
JA
5743 io_put_req(req);
5744 return HRTIMER_NORESTART;
5745}
5746
fbd15848
PB
5747static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5748 __u64 user_data)
f254ac04 5749{
fbd15848 5750 struct io_timeout_data *io;
47f46768
JA
5751 struct io_kiocb *req;
5752 int ret = -ENOENT;
f254ac04 5753
135fcde8 5754 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
47f46768 5755 if (user_data == req->user_data) {
47f46768
JA
5756 ret = 0;
5757 break;
5758 }
5759 }
5760
5761 if (ret == -ENOENT)
fbd15848
PB
5762 return ERR_PTR(ret);
5763
5764 io = req->async_data;
e8c2bc1f 5765 ret = hrtimer_try_to_cancel(&io->timer);
f254ac04 5766 if (ret == -1)
fbd15848 5767 return ERR_PTR(-EALREADY);
a71976f3 5768 list_del_init(&req->timeout.list);
fbd15848
PB
5769 return req;
5770}
47f46768 5771
fbd15848
PB
5772static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5773{
5774 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5775
5776 if (IS_ERR(req))
5777 return PTR_ERR(req);
f254ac04
JA
5778
5779 req_set_fail_links(req);
f254ac04 5780 io_cqring_fill_event(req, -ECANCELED);
216578e5 5781 io_put_req_deferred(req, 1);
f254ac04
JA
5782 return 0;
5783}
5784
9c8e11b3
PB
5785static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5786 struct timespec64 *ts, enum hrtimer_mode mode)
47f46768 5787{
9c8e11b3
PB
5788 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5789 struct io_timeout_data *data;
47f46768 5790
9c8e11b3
PB
5791 if (IS_ERR(req))
5792 return PTR_ERR(req);
47f46768 5793
9c8e11b3
PB
5794 req->timeout.off = 0; /* noseq */
5795 data = req->async_data;
5796 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5797 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5798 data->timer.function = io_timeout_fn;
5799 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5800 return 0;
47f46768
JA
5801}
5802
3529d8c2
JA
5803static int io_timeout_remove_prep(struct io_kiocb *req,
5804 const struct io_uring_sqe *sqe)
b29472ee 5805{
9c8e11b3
PB
5806 struct io_timeout_rem *tr = &req->timeout_rem;
5807
b29472ee
JA
5808 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5809 return -EINVAL;
61710e43
DA
5810 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5811 return -EINVAL;
9c8e11b3 5812 if (sqe->ioprio || sqe->buf_index || sqe->len)
b29472ee
JA
5813 return -EINVAL;
5814
9c8e11b3
PB
5815 tr->addr = READ_ONCE(sqe->addr);
5816 tr->flags = READ_ONCE(sqe->timeout_flags);
5817 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5818 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5819 return -EINVAL;
5820 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5821 return -EFAULT;
5822 } else if (tr->flags) {
5823 /* timeout removal doesn't support flags */
b29472ee 5824 return -EINVAL;
9c8e11b3 5825 }
b29472ee 5826
b29472ee
JA
5827 return 0;
5828}
5829
8662daec
PB
5830static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5831{
5832 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5833 : HRTIMER_MODE_REL;
5834}
5835
11365043
JA
5836/*
5837 * Remove or update an existing timeout command
5838 */
61e98203 5839static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
11365043 5840{
9c8e11b3 5841 struct io_timeout_rem *tr = &req->timeout_rem;
11365043 5842 struct io_ring_ctx *ctx = req->ctx;
47f46768 5843 int ret;
11365043 5844
11365043 5845 spin_lock_irq(&ctx->completion_lock);
8662daec 5846 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
9c8e11b3 5847 ret = io_timeout_cancel(ctx, tr->addr);
8662daec
PB
5848 else
5849 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5850 io_translate_timeout_mode(tr->flags));
11365043 5851
47f46768 5852 io_cqring_fill_event(req, ret);
11365043
JA
5853 io_commit_cqring(ctx);
5854 spin_unlock_irq(&ctx->completion_lock);
5262f567 5855 io_cqring_ev_posted(ctx);
4e88d6e7
JA
5856 if (ret < 0)
5857 req_set_fail_links(req);
ec9c02ad 5858 io_put_req(req);
11365043 5859 return 0;
5262f567
JA
5860}
5861
3529d8c2 5862static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 5863 bool is_timeout_link)
5262f567 5864{
ad8a48ac 5865 struct io_timeout_data *data;
a41525ab 5866 unsigned flags;
56080b02 5867 u32 off = READ_ONCE(sqe->off);
5262f567 5868
ad8a48ac 5869 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 5870 return -EINVAL;
ad8a48ac 5871 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
a41525ab 5872 return -EINVAL;
56080b02 5873 if (off && is_timeout_link)
2d28390a 5874 return -EINVAL;
a41525ab
JA
5875 flags = READ_ONCE(sqe->timeout_flags);
5876 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 5877 return -EINVAL;
bdf20073 5878
bfe68a22 5879 req->timeout.off = off;
26a61679 5880
e8c2bc1f 5881 if (!req->async_data && io_alloc_async_data(req))
26a61679
JA
5882 return -ENOMEM;
5883
e8c2bc1f 5884 data = req->async_data;
ad8a48ac 5885 data->req = req;
ad8a48ac
JA
5886
5887 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
5888 return -EFAULT;
5889
8662daec 5890 data->mode = io_translate_timeout_mode(flags);
ad8a48ac
JA
5891 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5892 return 0;
5893}
5894
61e98203 5895static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
ad8a48ac 5896{
ad8a48ac 5897 struct io_ring_ctx *ctx = req->ctx;
e8c2bc1f 5898 struct io_timeout_data *data = req->async_data;
ad8a48ac 5899 struct list_head *entry;
bfe68a22 5900 u32 tail, off = req->timeout.off;
ad8a48ac 5901
733f5c95 5902 spin_lock_irq(&ctx->completion_lock);
93bd25bb 5903
5262f567
JA
5904 /*
5905 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
5906 * timeout event to be satisfied. If it isn't set, then this is
5907 * a pure timeout request, sequence isn't used.
5262f567 5908 */
8eb7e2d0 5909 if (io_is_timeout_noseq(req)) {
93bd25bb
JA
5910 entry = ctx->timeout_list.prev;
5911 goto add;
5912 }
5262f567 5913
bfe68a22
PB
5914 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5915 req->timeout.target_seq = tail + off;
5262f567 5916
f010505b
MDG
5917 /* Update the last seq here in case io_flush_timeouts() hasn't.
5918 * This is safe because ->completion_lock is held, and submissions
5919 * and completions are never mixed in the same ->completion_lock section.
5920 */
5921 ctx->cq_last_tm_flush = tail;
5922
5262f567
JA
5923 /*
5924 * Insertion sort, ensuring the first entry in the list is always
5925 * the one we need first.
5926 */
5262f567 5927 list_for_each_prev(entry, &ctx->timeout_list) {
135fcde8
PB
5928 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5929 timeout.list);
5262f567 5930
8eb7e2d0 5931 if (io_is_timeout_noseq(nxt))
93bd25bb 5932 continue;
bfe68a22
PB
5933 /* nxt.seq is behind @tail, otherwise would've been completed */
5934 if (off >= nxt->timeout.target_seq - tail)
5262f567
JA
5935 break;
5936 }
93bd25bb 5937add:
135fcde8 5938 list_add(&req->timeout.list, entry);
ad8a48ac
JA
5939 data->timer.function = io_timeout_fn;
5940 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5262f567 5941 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
5942 return 0;
5943}
5262f567 5944
62755e35
JA
5945static bool io_cancel_cb(struct io_wq_work *work, void *data)
5946{
5947 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5948
5949 return req->user_data == (unsigned long) data;
5950}
5951
e977d6d3 5952static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
62755e35 5953{
62755e35 5954 enum io_wq_cancel cancel_ret;
62755e35
JA
5955 int ret = 0;
5956
4f26bda1 5957 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
62755e35
JA
5958 switch (cancel_ret) {
5959 case IO_WQ_CANCEL_OK:
5960 ret = 0;
5961 break;
5962 case IO_WQ_CANCEL_RUNNING:
5963 ret = -EALREADY;
5964 break;
5965 case IO_WQ_CANCEL_NOTFOUND:
5966 ret = -ENOENT;
5967 break;
5968 }
5969
e977d6d3
JA
5970 return ret;
5971}
5972
47f46768
JA
5973static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5974 struct io_kiocb *req, __u64 sqe_addr,
014db007 5975 int success_ret)
47f46768
JA
5976{
5977 unsigned long flags;
5978 int ret;
5979
5980 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
5981 if (ret != -ENOENT) {
5982 spin_lock_irqsave(&ctx->completion_lock, flags);
5983 goto done;
5984 }
5985
5986 spin_lock_irqsave(&ctx->completion_lock, flags);
5987 ret = io_timeout_cancel(ctx, sqe_addr);
5988 if (ret != -ENOENT)
5989 goto done;
5990 ret = io_poll_cancel(ctx, sqe_addr);
5991done:
b0dd8a41
JA
5992 if (!ret)
5993 ret = success_ret;
47f46768
JA
5994 io_cqring_fill_event(req, ret);
5995 io_commit_cqring(ctx);
5996 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5997 io_cqring_ev_posted(ctx);
5998
4e88d6e7
JA
5999 if (ret < 0)
6000 req_set_fail_links(req);
014db007 6001 io_put_req(req);
47f46768
JA
6002}
6003
3529d8c2
JA
6004static int io_async_cancel_prep(struct io_kiocb *req,
6005 const struct io_uring_sqe *sqe)
e977d6d3 6006{
fbf23849 6007 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3 6008 return -EINVAL;
61710e43
DA
6009 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6010 return -EINVAL;
6011 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
e977d6d3
JA
6012 return -EINVAL;
6013
fbf23849
JA
6014 req->cancel.addr = READ_ONCE(sqe->addr);
6015 return 0;
6016}
6017
61e98203 6018static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
fbf23849
JA
6019{
6020 struct io_ring_ctx *ctx = req->ctx;
fbf23849 6021
014db007 6022 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
5262f567
JA
6023 return 0;
6024}
6025
269bbe5f 6026static int io_rsrc_update_prep(struct io_kiocb *req,
05f3fb3c
JA
6027 const struct io_uring_sqe *sqe)
6028{
6ca56f84
JA
6029 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
6030 return -EINVAL;
61710e43
DA
6031 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6032 return -EINVAL;
6033 if (sqe->ioprio || sqe->rw_flags)
05f3fb3c
JA
6034 return -EINVAL;
6035
269bbe5f
BM
6036 req->rsrc_update.offset = READ_ONCE(sqe->off);
6037 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
6038 if (!req->rsrc_update.nr_args)
05f3fb3c 6039 return -EINVAL;
269bbe5f 6040 req->rsrc_update.arg = READ_ONCE(sqe->addr);
05f3fb3c
JA
6041 return 0;
6042}
6043
889fca73 6044static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
fbf23849
JA
6045{
6046 struct io_ring_ctx *ctx = req->ctx;
269bbe5f 6047 struct io_uring_rsrc_update up;
05f3fb3c 6048 int ret;
fbf23849 6049
45d189c6 6050 if (issue_flags & IO_URING_F_NONBLOCK)
05f3fb3c 6051 return -EAGAIN;
05f3fb3c 6052
269bbe5f
BM
6053 up.offset = req->rsrc_update.offset;
6054 up.data = req->rsrc_update.arg;
05f3fb3c
JA
6055
6056 mutex_lock(&ctx->uring_lock);
269bbe5f 6057 ret = __io_sqe_files_update(ctx, &up, req->rsrc_update.nr_args);
05f3fb3c
JA
6058 mutex_unlock(&ctx->uring_lock);
6059
6060 if (ret < 0)
6061 req_set_fail_links(req);
889fca73 6062 __io_req_complete(req, issue_flags, ret, 0);
5262f567
JA
6063 return 0;
6064}
6065
bfe76559 6066static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f67676d1 6067{
d625c6ee 6068 switch (req->opcode) {
e781573e 6069 case IORING_OP_NOP:
bfe76559 6070 return 0;
f67676d1
JA
6071 case IORING_OP_READV:
6072 case IORING_OP_READ_FIXED:
3a6820f2 6073 case IORING_OP_READ:
bfe76559 6074 return io_read_prep(req, sqe);
f67676d1
JA
6075 case IORING_OP_WRITEV:
6076 case IORING_OP_WRITE_FIXED:
3a6820f2 6077 case IORING_OP_WRITE:
bfe76559 6078 return io_write_prep(req, sqe);
0969e783 6079 case IORING_OP_POLL_ADD:
bfe76559 6080 return io_poll_add_prep(req, sqe);
0969e783 6081 case IORING_OP_POLL_REMOVE:
bfe76559 6082 return io_poll_remove_prep(req, sqe);
8ed8d3c3 6083 case IORING_OP_FSYNC:
bfe76559 6084 return io_prep_fsync(req, sqe);
8ed8d3c3 6085 case IORING_OP_SYNC_FILE_RANGE:
bfe76559 6086 return io_prep_sfr(req, sqe);
03b1230c 6087 case IORING_OP_SENDMSG:
fddaface 6088 case IORING_OP_SEND:
bfe76559 6089 return io_sendmsg_prep(req, sqe);
03b1230c 6090 case IORING_OP_RECVMSG:
fddaface 6091 case IORING_OP_RECV:
bfe76559 6092 return io_recvmsg_prep(req, sqe);
f499a021 6093 case IORING_OP_CONNECT:
bfe76559 6094 return io_connect_prep(req, sqe);
2d28390a 6095 case IORING_OP_TIMEOUT:
bfe76559 6096 return io_timeout_prep(req, sqe, false);
b29472ee 6097 case IORING_OP_TIMEOUT_REMOVE:
bfe76559 6098 return io_timeout_remove_prep(req, sqe);
fbf23849 6099 case IORING_OP_ASYNC_CANCEL:
bfe76559 6100 return io_async_cancel_prep(req, sqe);
2d28390a 6101 case IORING_OP_LINK_TIMEOUT:
bfe76559 6102 return io_timeout_prep(req, sqe, true);
8ed8d3c3 6103 case IORING_OP_ACCEPT:
bfe76559 6104 return io_accept_prep(req, sqe);
d63d1b5e 6105 case IORING_OP_FALLOCATE:
bfe76559 6106 return io_fallocate_prep(req, sqe);
15b71abe 6107 case IORING_OP_OPENAT:
bfe76559 6108 return io_openat_prep(req, sqe);
b5dba59e 6109 case IORING_OP_CLOSE:
bfe76559 6110 return io_close_prep(req, sqe);
05f3fb3c 6111 case IORING_OP_FILES_UPDATE:
269bbe5f 6112 return io_rsrc_update_prep(req, sqe);
eddc7ef5 6113 case IORING_OP_STATX:
bfe76559 6114 return io_statx_prep(req, sqe);
4840e418 6115 case IORING_OP_FADVISE:
bfe76559 6116 return io_fadvise_prep(req, sqe);
c1ca757b 6117 case IORING_OP_MADVISE:
bfe76559 6118 return io_madvise_prep(req, sqe);
cebdb986 6119 case IORING_OP_OPENAT2:
bfe76559 6120 return io_openat2_prep(req, sqe);
3e4827b0 6121 case IORING_OP_EPOLL_CTL:
bfe76559 6122 return io_epoll_ctl_prep(req, sqe);
7d67af2c 6123 case IORING_OP_SPLICE:
bfe76559 6124 return io_splice_prep(req, sqe);
ddf0322d 6125 case IORING_OP_PROVIDE_BUFFERS:
bfe76559 6126 return io_provide_buffers_prep(req, sqe);
067524e9 6127 case IORING_OP_REMOVE_BUFFERS:
bfe76559 6128 return io_remove_buffers_prep(req, sqe);
f2a8d5c7 6129 case IORING_OP_TEE:
bfe76559 6130 return io_tee_prep(req, sqe);
36f4fa68
JA
6131 case IORING_OP_SHUTDOWN:
6132 return io_shutdown_prep(req, sqe);
80a261fd
JA
6133 case IORING_OP_RENAMEAT:
6134 return io_renameat_prep(req, sqe);
14a1143b
JA
6135 case IORING_OP_UNLINKAT:
6136 return io_unlinkat_prep(req, sqe);
f67676d1
JA
6137 }
6138
bfe76559
PB
6139 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
6140 req->opcode);
6141 return-EINVAL;
6142}
6143
6144static int io_req_defer_prep(struct io_kiocb *req,
6145 const struct io_uring_sqe *sqe)
6146{
bfe76559
PB
6147 if (!sqe)
6148 return 0;
6149 if (io_alloc_async_data(req))
6150 return -EAGAIN;
bfe76559 6151 return io_req_prep(req, sqe);
f67676d1
JA
6152}
6153
9cf7c104
PB
6154static u32 io_get_sequence(struct io_kiocb *req)
6155{
6156 struct io_kiocb *pos;
6157 struct io_ring_ctx *ctx = req->ctx;
f2f87370 6158 u32 total_submitted, nr_reqs = 0;
9cf7c104 6159
f2f87370
PB
6160 io_for_each_link(pos, req)
6161 nr_reqs++;
9cf7c104
PB
6162
6163 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
6164 return total_submitted - nr_reqs;
6165}
6166
3529d8c2 6167static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
de0617e4 6168{
a197f664 6169 struct io_ring_ctx *ctx = req->ctx;
27dc8338 6170 struct io_defer_entry *de;
f67676d1 6171 int ret;
9cf7c104 6172 u32 seq;
de0617e4 6173
9d858b21 6174 /* Still need defer if there is pending req in defer list. */
9cf7c104
PB
6175 if (likely(list_empty_careful(&ctx->defer_list) &&
6176 !(req->flags & REQ_F_IO_DRAIN)))
6177 return 0;
6178
6179 seq = io_get_sequence(req);
6180 /* Still a chance to pass the sequence check */
6181 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
de0617e4
JA
6182 return 0;
6183
e8c2bc1f 6184 if (!req->async_data) {
650b5481 6185 ret = io_req_defer_prep(req, sqe);
327d6d96 6186 if (ret)
650b5481
PB
6187 return ret;
6188 }
cbdcb435 6189 io_prep_async_link(req);
27dc8338
PB
6190 de = kmalloc(sizeof(*de), GFP_KERNEL);
6191 if (!de)
6192 return -ENOMEM;
2d28390a 6193
de0617e4 6194 spin_lock_irq(&ctx->completion_lock);
9cf7c104 6195 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
de0617e4 6196 spin_unlock_irq(&ctx->completion_lock);
27dc8338 6197 kfree(de);
ae34817b
PB
6198 io_queue_async_work(req);
6199 return -EIOCBQUEUED;
de0617e4
JA
6200 }
6201
915967f6 6202 trace_io_uring_defer(ctx, req, req->user_data);
27dc8338 6203 de->req = req;
9cf7c104 6204 de->seq = seq;
27dc8338 6205 list_add_tail(&de->list, &ctx->defer_list);
de0617e4
JA
6206 spin_unlock_irq(&ctx->completion_lock);
6207 return -EIOCBQUEUED;
6208}
6209
3ca405eb 6210static void __io_clean_op(struct io_kiocb *req)
99bc4c38 6211{
0e1b6fe3
PB
6212 if (req->flags & REQ_F_BUFFER_SELECTED) {
6213 switch (req->opcode) {
6214 case IORING_OP_READV:
6215 case IORING_OP_READ_FIXED:
6216 case IORING_OP_READ:
bcda7baa 6217 kfree((void *)(unsigned long)req->rw.addr);
0e1b6fe3
PB
6218 break;
6219 case IORING_OP_RECVMSG:
6220 case IORING_OP_RECV:
bcda7baa 6221 kfree(req->sr_msg.kbuf);
0e1b6fe3
PB
6222 break;
6223 }
6224 req->flags &= ~REQ_F_BUFFER_SELECTED;
99bc4c38
PB
6225 }
6226
0e1b6fe3
PB
6227 if (req->flags & REQ_F_NEED_CLEANUP) {
6228 switch (req->opcode) {
6229 case IORING_OP_READV:
6230 case IORING_OP_READ_FIXED:
6231 case IORING_OP_READ:
6232 case IORING_OP_WRITEV:
6233 case IORING_OP_WRITE_FIXED:
e8c2bc1f
JA
6234 case IORING_OP_WRITE: {
6235 struct io_async_rw *io = req->async_data;
6236 if (io->free_iovec)
6237 kfree(io->free_iovec);
0e1b6fe3 6238 break;
e8c2bc1f 6239 }
0e1b6fe3 6240 case IORING_OP_RECVMSG:
e8c2bc1f
JA
6241 case IORING_OP_SENDMSG: {
6242 struct io_async_msghdr *io = req->async_data;
257e84a5
PB
6243
6244 kfree(io->free_iov);
0e1b6fe3 6245 break;
e8c2bc1f 6246 }
0e1b6fe3
PB
6247 case IORING_OP_SPLICE:
6248 case IORING_OP_TEE:
6249 io_put_file(req, req->splice.file_in,
6250 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
6251 break;
f3cd4850
JA
6252 case IORING_OP_OPENAT:
6253 case IORING_OP_OPENAT2:
6254 if (req->open.filename)
6255 putname(req->open.filename);
6256 break;
80a261fd
JA
6257 case IORING_OP_RENAMEAT:
6258 putname(req->rename.oldpath);
6259 putname(req->rename.newpath);
6260 break;
14a1143b
JA
6261 case IORING_OP_UNLINKAT:
6262 putname(req->unlink.filename);
6263 break;
0e1b6fe3
PB
6264 }
6265 req->flags &= ~REQ_F_NEED_CLEANUP;
99bc4c38 6266 }
99bc4c38
PB
6267}
6268
889fca73 6269static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
2b188cc1 6270{
a197f664 6271 struct io_ring_ctx *ctx = req->ctx;
d625c6ee 6272 int ret;
2b188cc1 6273
d625c6ee 6274 switch (req->opcode) {
2b188cc1 6275 case IORING_OP_NOP:
889fca73 6276 ret = io_nop(req, issue_flags);
2b188cc1
JA
6277 break;
6278 case IORING_OP_READV:
edafccee 6279 case IORING_OP_READ_FIXED:
3a6820f2 6280 case IORING_OP_READ:
889fca73 6281 ret = io_read(req, issue_flags);
edafccee 6282 break;
3529d8c2 6283 case IORING_OP_WRITEV:
edafccee 6284 case IORING_OP_WRITE_FIXED:
3a6820f2 6285 case IORING_OP_WRITE:
889fca73 6286 ret = io_write(req, issue_flags);
2b188cc1 6287 break;
c992fe29 6288 case IORING_OP_FSYNC:
45d189c6 6289 ret = io_fsync(req, issue_flags);
c992fe29 6290 break;
221c5eb2 6291 case IORING_OP_POLL_ADD:
61e98203 6292 ret = io_poll_add(req, issue_flags);
221c5eb2
JA
6293 break;
6294 case IORING_OP_POLL_REMOVE:
61e98203 6295 ret = io_poll_remove(req, issue_flags);
221c5eb2 6296 break;
5d17b4a4 6297 case IORING_OP_SYNC_FILE_RANGE:
45d189c6 6298 ret = io_sync_file_range(req, issue_flags);
5d17b4a4 6299 break;
0fa03c62 6300 case IORING_OP_SENDMSG:
889fca73 6301 ret = io_sendmsg(req, issue_flags);
062d04d7 6302 break;
fddaface 6303 case IORING_OP_SEND:
889fca73 6304 ret = io_send(req, issue_flags);
0fa03c62 6305 break;
aa1fa28f 6306 case IORING_OP_RECVMSG:
889fca73 6307 ret = io_recvmsg(req, issue_flags);
062d04d7 6308 break;
fddaface 6309 case IORING_OP_RECV:
889fca73 6310 ret = io_recv(req, issue_flags);
aa1fa28f 6311 break;
5262f567 6312 case IORING_OP_TIMEOUT:
61e98203 6313 ret = io_timeout(req, issue_flags);
5262f567 6314 break;
11365043 6315 case IORING_OP_TIMEOUT_REMOVE:
61e98203 6316 ret = io_timeout_remove(req, issue_flags);
11365043 6317 break;
17f2fe35 6318 case IORING_OP_ACCEPT:
889fca73 6319 ret = io_accept(req, issue_flags);
17f2fe35 6320 break;
f8e85cf2 6321 case IORING_OP_CONNECT:
889fca73 6322 ret = io_connect(req, issue_flags);
f8e85cf2 6323 break;
62755e35 6324 case IORING_OP_ASYNC_CANCEL:
61e98203 6325 ret = io_async_cancel(req, issue_flags);
62755e35 6326 break;
d63d1b5e 6327 case IORING_OP_FALLOCATE:
45d189c6 6328 ret = io_fallocate(req, issue_flags);
d63d1b5e 6329 break;
15b71abe 6330 case IORING_OP_OPENAT:
45d189c6 6331 ret = io_openat(req, issue_flags);
15b71abe 6332 break;
b5dba59e 6333 case IORING_OP_CLOSE:
889fca73 6334 ret = io_close(req, issue_flags);
b5dba59e 6335 break;
05f3fb3c 6336 case IORING_OP_FILES_UPDATE:
889fca73 6337 ret = io_files_update(req, issue_flags);
05f3fb3c 6338 break;
eddc7ef5 6339 case IORING_OP_STATX:
45d189c6 6340 ret = io_statx(req, issue_flags);
eddc7ef5 6341 break;
4840e418 6342 case IORING_OP_FADVISE:
45d189c6 6343 ret = io_fadvise(req, issue_flags);
4840e418 6344 break;
c1ca757b 6345 case IORING_OP_MADVISE:
45d189c6 6346 ret = io_madvise(req, issue_flags);
c1ca757b 6347 break;
cebdb986 6348 case IORING_OP_OPENAT2:
45d189c6 6349 ret = io_openat2(req, issue_flags);
cebdb986 6350 break;
3e4827b0 6351 case IORING_OP_EPOLL_CTL:
889fca73 6352 ret = io_epoll_ctl(req, issue_flags);
3e4827b0 6353 break;
7d67af2c 6354 case IORING_OP_SPLICE:
45d189c6 6355 ret = io_splice(req, issue_flags);
7d67af2c 6356 break;
ddf0322d 6357 case IORING_OP_PROVIDE_BUFFERS:
889fca73 6358 ret = io_provide_buffers(req, issue_flags);
ddf0322d 6359 break;
067524e9 6360 case IORING_OP_REMOVE_BUFFERS:
889fca73 6361 ret = io_remove_buffers(req, issue_flags);
3e4827b0 6362 break;
f2a8d5c7 6363 case IORING_OP_TEE:
45d189c6 6364 ret = io_tee(req, issue_flags);
f2a8d5c7 6365 break;
36f4fa68 6366 case IORING_OP_SHUTDOWN:
45d189c6 6367 ret = io_shutdown(req, issue_flags);
36f4fa68 6368 break;
80a261fd 6369 case IORING_OP_RENAMEAT:
45d189c6 6370 ret = io_renameat(req, issue_flags);
80a261fd 6371 break;
14a1143b 6372 case IORING_OP_UNLINKAT:
45d189c6 6373 ret = io_unlinkat(req, issue_flags);
14a1143b 6374 break;
2b188cc1
JA
6375 default:
6376 ret = -EINVAL;
6377 break;
6378 }
6379
def596e9
JA
6380 if (ret)
6381 return ret;
6382
b532576e
JA
6383 /* If the op doesn't have a file, we're not polling for it */
6384 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
11ba820b
JA
6385 const bool in_async = io_wq_current_is_worker();
6386
11ba820b
JA
6387 /* workqueue context doesn't hold uring_lock, grab it now */
6388 if (in_async)
6389 mutex_lock(&ctx->uring_lock);
6390
2e9dbe90 6391 io_iopoll_req_issued(req, in_async);
11ba820b
JA
6392
6393 if (in_async)
6394 mutex_unlock(&ctx->uring_lock);
def596e9
JA
6395 }
6396
6397 return 0;
2b188cc1
JA
6398}
6399
5280f7e5 6400static void io_wq_submit_work(struct io_wq_work *work)
2b188cc1
JA
6401{
6402 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6df1db6b 6403 struct io_kiocb *timeout;
561fb04a 6404 int ret = 0;
2b188cc1 6405
6df1db6b
PB
6406 timeout = io_prep_linked_timeout(req);
6407 if (timeout)
6408 io_queue_linked_timeout(timeout);
d4c81f38 6409
4014d943 6410 if (work->flags & IO_WQ_WORK_CANCEL)
561fb04a 6411 ret = -ECANCELED;
31b51510 6412
561fb04a 6413 if (!ret) {
561fb04a 6414 do {
889fca73 6415 ret = io_issue_sqe(req, 0);
561fb04a
JA
6416 /*
6417 * We can get EAGAIN for polled IO even though we're
6418 * forcing a sync submission from here, since we can't
6419 * wait for request slots on the block side.
6420 */
6421 if (ret != -EAGAIN)
6422 break;
6423 cond_resched();
6424 } while (1);
6425 }
31b51510 6426
561fb04a 6427 if (ret) {
c07e6719
XW
6428 struct io_ring_ctx *lock_ctx = NULL;
6429
6430 if (req->ctx->flags & IORING_SETUP_IOPOLL)
6431 lock_ctx = req->ctx;
6432
dad1b124 6433 /*
c07e6719
XW
6434 * io_iopoll_complete() does not hold completion_lock to
6435 * complete polled io, so here for polled io, we can not call
6436 * io_req_complete() directly, otherwise there maybe concurrent
6437 * access to cqring, defer_list, etc, which is not safe. Given
6438 * that io_iopoll_complete() is always called under uring_lock,
6439 * so here for polled io, we also get uring_lock to complete
6440 * it.
dad1b124 6441 */
c07e6719
XW
6442 if (lock_ctx)
6443 mutex_lock(&lock_ctx->uring_lock);
dad1b124 6444
c07e6719
XW
6445 req_set_fail_links(req);
6446 io_req_complete(req, ret);
6447
6448 if (lock_ctx)
6449 mutex_unlock(&lock_ctx->uring_lock);
edafccee 6450 }
2b188cc1
JA
6451}
6452
65e19f54
JA
6453static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6454 int index)
6455{
269bbe5f 6456 struct fixed_rsrc_table *table;
65e19f54 6457
05f3fb3c 6458 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
84695089 6459 return table->files[index & IORING_FILE_TABLE_MASK];
65e19f54
JA
6460}
6461
8371adf5
PB
6462static struct file *io_file_get(struct io_submit_state *state,
6463 struct io_kiocb *req, int fd, bool fixed)
09bb8394 6464{
a197f664 6465 struct io_ring_ctx *ctx = req->ctx;
8da11c19 6466 struct file *file;
09bb8394 6467
8da11c19 6468 if (fixed) {
479f517b 6469 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
8371adf5 6470 return NULL;
b7620121 6471 fd = array_index_nospec(fd, ctx->nr_user_files);
8da11c19 6472 file = io_file_from_index(ctx, fd);
36f72fe2 6473 io_set_resource_node(req);
09bb8394 6474 } else {
c826bd7a 6475 trace_io_uring_file_get(ctx, fd);
8da11c19 6476 file = __io_file_get(state, fd);
09bb8394
JA
6477 }
6478
ce3d5aae
PB
6479 if (file && unlikely(file->f_op == &io_uring_fops))
6480 io_req_track_inflight(req);
8371adf5 6481 return file;
09bb8394
JA
6482}
6483
2665abfd 6484static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 6485{
ad8a48ac
JA
6486 struct io_timeout_data *data = container_of(timer,
6487 struct io_timeout_data, timer);
90cd7e42 6488 struct io_kiocb *prev, *req = data->req;
2665abfd 6489 struct io_ring_ctx *ctx = req->ctx;
2665abfd 6490 unsigned long flags;
2665abfd
JA
6491
6492 spin_lock_irqsave(&ctx->completion_lock, flags);
90cd7e42
PB
6493 prev = req->timeout.head;
6494 req->timeout.head = NULL;
2665abfd
JA
6495
6496 /*
6497 * We don't expect the list to be empty, that will only happen if we
6498 * race with the completion of the linked work.
6499 */
90cd7e42 6500 if (prev && refcount_inc_not_zero(&prev->refs))
f2f87370 6501 io_remove_next_linked(prev);
90cd7e42
PB
6502 else
6503 prev = NULL;
2665abfd
JA
6504 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6505
6506 if (prev) {
4e88d6e7 6507 req_set_fail_links(prev);
014db007 6508 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
9ae1f8dd 6509 io_put_req_deferred(prev, 1);
47f46768 6510 } else {
9ae1f8dd
PB
6511 io_req_complete_post(req, -ETIME, 0);
6512 io_put_req_deferred(req, 1);
2665abfd 6513 }
2665abfd
JA
6514 return HRTIMER_NORESTART;
6515}
6516
7271ef3a 6517static void __io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 6518{
76a46e06 6519 /*
f2f87370
PB
6520 * If the back reference is NULL, then our linked request finished
6521 * before we got a chance to setup the timer
76a46e06 6522 */
90cd7e42 6523 if (req->timeout.head) {
e8c2bc1f 6524 struct io_timeout_data *data = req->async_data;
94ae5e77 6525
ad8a48ac
JA
6526 data->timer.function = io_link_timeout_fn;
6527 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6528 data->mode);
2665abfd 6529 }
7271ef3a
JA
6530}
6531
6532static void io_queue_linked_timeout(struct io_kiocb *req)
6533{
6534 struct io_ring_ctx *ctx = req->ctx;
6535
6536 spin_lock_irq(&ctx->completion_lock);
6537 __io_queue_linked_timeout(req);
76a46e06 6538 spin_unlock_irq(&ctx->completion_lock);
2665abfd 6539
2665abfd 6540 /* drop submission reference */
76a46e06
JA
6541 io_put_req(req);
6542}
2665abfd 6543
ad8a48ac 6544static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2665abfd 6545{
f2f87370 6546 struct io_kiocb *nxt = req->link;
2665abfd 6547
f2f87370
PB
6548 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6549 nxt->opcode != IORING_OP_LINK_TIMEOUT)
76a46e06 6550 return NULL;
2665abfd 6551
90cd7e42 6552 nxt->timeout.head = req;
900fad45 6553 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
76a46e06 6554 req->flags |= REQ_F_LINK_TIMEOUT;
76a46e06 6555 return nxt;
2665abfd
JA
6556}
6557
c5eef2b9 6558static void __io_queue_sqe(struct io_kiocb *req)
2b188cc1 6559{
d3d7298d 6560 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
193155c8 6561 const struct cred *old_creds = NULL;
c5eef2b9 6562 int ret;
2b188cc1 6563
2e5aa6cb
PB
6564 if ((req->flags & REQ_F_WORK_INITIALIZED) &&
6565 (req->work.flags & IO_WQ_WORK_CREDS) &&
d3d7298d
PB
6566 req->work.identity->creds != current_cred())
6567 old_creds = override_creds(req->work.identity->creds);
193155c8 6568
c5eef2b9 6569 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
491381ce 6570
d3d7298d
PB
6571 if (old_creds)
6572 revert_creds(old_creds);
6573
491381ce
JA
6574 /*
6575 * We async punt it if the file wasn't marked NOWAIT, or if the file
6576 * doesn't support non-blocking read/write attempts
6577 */
24c74678 6578 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
f063c547 6579 if (!io_arm_poll_handler(req)) {
f063c547
PB
6580 /*
6581 * Queued up for async execution, worker will release
6582 * submit reference when the iocb is actually submitted.
6583 */
6584 io_queue_async_work(req);
2b188cc1 6585 }
0d63c148
PB
6586 } else if (likely(!ret)) {
6587 /* drop submission reference */
e342c807 6588 if (req->flags & REQ_F_COMPLETE_INLINE) {
c5eef2b9
PB
6589 struct io_ring_ctx *ctx = req->ctx;
6590 struct io_comp_state *cs = &ctx->submit_state.comp;
6591
6dd0be1e 6592 cs->reqs[cs->nr++] = req;
d3d7298d 6593 if (cs->nr == ARRAY_SIZE(cs->reqs))
c5eef2b9 6594 io_submit_flush_completions(cs, ctx);
9affd664 6595 } else {
d3d7298d 6596 io_put_req(req);
0d63c148
PB
6597 }
6598 } else {
4e88d6e7 6599 req_set_fail_links(req);
e65ef56d 6600 io_put_req(req);
e1e16097 6601 io_req_complete(req, ret);
9e645e11 6602 }
d3d7298d
PB
6603 if (linked_timeout)
6604 io_queue_linked_timeout(linked_timeout);
2b188cc1
JA
6605}
6606
c5eef2b9 6607static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4fe2c963
JL
6608{
6609 int ret;
6610
3529d8c2 6611 ret = io_req_defer(req, sqe);
4fe2c963
JL
6612 if (ret) {
6613 if (ret != -EIOCBQUEUED) {
1118591a 6614fail_req:
4e88d6e7 6615 req_set_fail_links(req);
e1e16097
JA
6616 io_put_req(req);
6617 io_req_complete(req, ret);
4fe2c963 6618 }
2550878f 6619 } else if (req->flags & REQ_F_FORCE_ASYNC) {
e8c2bc1f 6620 if (!req->async_data) {
bd2ab18a 6621 ret = io_req_defer_prep(req, sqe);
327d6d96 6622 if (unlikely(ret))
bd2ab18a
PB
6623 goto fail_req;
6624 }
ce35a47a
JA
6625 io_queue_async_work(req);
6626 } else {
c1379e24
PB
6627 if (sqe) {
6628 ret = io_req_prep(req, sqe);
6629 if (unlikely(ret))
6630 goto fail_req;
6631 }
c5eef2b9 6632 __io_queue_sqe(req);
ce35a47a 6633 }
4fe2c963
JL
6634}
6635
c5eef2b9 6636static inline void io_queue_link_head(struct io_kiocb *req)
4fe2c963 6637{
94ae5e77 6638 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
e1e16097
JA
6639 io_put_req(req);
6640 io_req_complete(req, -ECANCELED);
1b4a51b6 6641 } else
c5eef2b9 6642 io_queue_sqe(req, NULL);
4fe2c963
JL
6643}
6644
863e0560
PB
6645struct io_submit_link {
6646 struct io_kiocb *head;
6647 struct io_kiocb *last;
6648};
6649
1d4240cc 6650static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
c5eef2b9 6651 struct io_submit_link *link)
9e645e11 6652{
a197f664 6653 struct io_ring_ctx *ctx = req->ctx;
ef4ff581 6654 int ret;
9e645e11 6655
9e645e11
JA
6656 /*
6657 * If we already have a head request, queue this one for async
6658 * submittal once the head completes. If we don't have a head but
6659 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6660 * submitted sync once the chain is complete. If none of those
6661 * conditions are true (normal request), then just queue it.
6662 */
863e0560
PB
6663 if (link->head) {
6664 struct io_kiocb *head = link->head;
4e88d6e7 6665
8cdf2193
PB
6666 /*
6667 * Taking sequential execution of a link, draining both sides
6668 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6669 * requests in the link. So, it drains the head and the
6670 * next after the link request. The last one is done via
6671 * drain_next flag to persist the effect across calls.
6672 */
ef4ff581 6673 if (req->flags & REQ_F_IO_DRAIN) {
711be031
PB
6674 head->flags |= REQ_F_IO_DRAIN;
6675 ctx->drain_next = 1;
6676 }
3529d8c2 6677 ret = io_req_defer_prep(req, sqe);
327d6d96 6678 if (unlikely(ret)) {
4e88d6e7 6679 /* fail even hard links since we don't submit */
9d76377f 6680 head->flags |= REQ_F_FAIL_LINK;
1d4240cc 6681 return ret;
2d28390a 6682 }
9d76377f 6683 trace_io_uring_link(ctx, req, head);
f2f87370 6684 link->last->link = req;
863e0560 6685 link->last = req;
32fe525b
PB
6686
6687 /* last request of a link, enqueue the link */
ef4ff581 6688 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
c5eef2b9 6689 io_queue_link_head(head);
863e0560 6690 link->head = NULL;
32fe525b 6691 }
9e645e11 6692 } else {
711be031
PB
6693 if (unlikely(ctx->drain_next)) {
6694 req->flags |= REQ_F_IO_DRAIN;
ef4ff581 6695 ctx->drain_next = 0;
711be031 6696 }
ef4ff581 6697 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
711be031 6698 ret = io_req_defer_prep(req, sqe);
327d6d96 6699 if (unlikely(ret))
711be031 6700 req->flags |= REQ_F_FAIL_LINK;
863e0560
PB
6701 link->head = req;
6702 link->last = req;
711be031 6703 } else {
c5eef2b9 6704 io_queue_sqe(req, sqe);
711be031 6705 }
9e645e11 6706 }
2e6e1fde 6707
1d4240cc 6708 return 0;
9e645e11
JA
6709}
6710
9a56a232
JA
6711/*
6712 * Batched submission is done, ensure local IO is flushed out.
6713 */
ba88ff11
PB
6714static void io_submit_state_end(struct io_submit_state *state,
6715 struct io_ring_ctx *ctx)
9a56a232 6716{
6dd0be1e 6717 if (state->comp.nr)
ba88ff11 6718 io_submit_flush_completions(&state->comp, ctx);
27926b68
JA
6719 if (state->plug_started)
6720 blk_finish_plug(&state->plug);
9f13c35b 6721 io_state_file_put(state);
9a56a232
JA
6722}
6723
6724/*
6725 * Start submission side cache.
6726 */
6727static void io_submit_state_start(struct io_submit_state *state,
ba88ff11 6728 unsigned int max_ios)
9a56a232 6729{
27926b68 6730 state->plug_started = false;
9a56a232
JA
6731 state->ios_left = max_ios;
6732}
6733
2b188cc1
JA
6734static void io_commit_sqring(struct io_ring_ctx *ctx)
6735{
75b28aff 6736 struct io_rings *rings = ctx->rings;
2b188cc1 6737
caf582c6
PB
6738 /*
6739 * Ensure any loads from the SQEs are done at this point,
6740 * since once we write the new head, the application could
6741 * write new data to them.
6742 */
6743 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
6744}
6745
2b188cc1 6746/*
3529d8c2 6747 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
2b188cc1
JA
6748 * that is mapped by userspace. This means that care needs to be taken to
6749 * ensure that reads are stable, as we cannot rely on userspace always
6750 * being a good citizen. If members of the sqe are validated and then later
6751 * used, it's important that those reads are done through READ_ONCE() to
6752 * prevent a re-load down the line.
6753 */
709b302f 6754static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 6755{
75b28aff 6756 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
6757 unsigned head;
6758
6759 /*
6760 * The cached sq head (or cq tail) serves two purposes:
6761 *
6762 * 1) allows us to batch the cost of updating the user visible
6763 * head updates.
6764 * 2) allows the kernel side to track the head on its own, even
6765 * though the application is the one updating it.
6766 */
4fccfcbb 6767 head = READ_ONCE(sq_array[ctx->cached_sq_head++ & ctx->sq_mask]);
709b302f
PB
6768 if (likely(head < ctx->sq_entries))
6769 return &ctx->sq_sqes[head];
2b188cc1
JA
6770
6771 /* drop invalid entries */
498ccd9e 6772 ctx->cached_sq_dropped++;
ee7d46d9 6773 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
709b302f
PB
6774 return NULL;
6775}
6776
21b55dbc
SG
6777/*
6778 * Check SQE restrictions (opcode and flags).
6779 *
6780 * Returns 'true' if SQE is allowed, 'false' otherwise.
6781 */
6782static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6783 struct io_kiocb *req,
6784 unsigned int sqe_flags)
6785{
6786 if (!ctx->restricted)
6787 return true;
6788
6789 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6790 return false;
6791
6792 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6793 ctx->restrictions.sqe_flags_required)
6794 return false;
6795
6796 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6797 ctx->restrictions.sqe_flags_required))
6798 return false;
6799
6800 return true;
6801}
6802
ef4ff581
PB
6803#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
6804 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
6805 IOSQE_BUFFER_SELECT)
6806
6807static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
258b29a9 6808 const struct io_uring_sqe *sqe)
0553b8bd 6809{
258b29a9 6810 struct io_submit_state *state;
ef4ff581 6811 unsigned int sqe_flags;
5be9ad1e 6812 int id, ret = 0;
ef4ff581 6813
0553b8bd 6814 req->opcode = READ_ONCE(sqe->opcode);
5be9ad1e
PB
6815 /* same numerical values with corresponding REQ_F_*, safe to copy */
6816 req->flags = sqe_flags = READ_ONCE(sqe->flags);
0553b8bd 6817 req->user_data = READ_ONCE(sqe->user_data);
e8c2bc1f 6818 req->async_data = NULL;
0553b8bd
PB
6819 req->file = NULL;
6820 req->ctx = ctx;
f2f87370 6821 req->link = NULL;
269bbe5f 6822 req->fixed_rsrc_refs = NULL;
0553b8bd
PB
6823 /* one is dropped after submission, the other at completion */
6824 refcount_set(&req->refs, 2);
4dd2824d 6825 req->task = current;
0553b8bd 6826 req->result = 0;
ef4ff581 6827
5be9ad1e
PB
6828 /* enforce forwards compatibility on users */
6829 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
6830 return -EINVAL;
6831
ef4ff581
PB
6832 if (unlikely(req->opcode >= IORING_OP_LAST))
6833 return -EINVAL;
6834
28cea78a 6835 if (unlikely(io_sq_thread_acquire_mm_files(ctx, req)))
9d8426a0 6836 return -EFAULT;
ef4ff581 6837
21b55dbc
SG
6838 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6839 return -EACCES;
6840
ef4ff581
PB
6841 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6842 !io_op_defs[req->opcode].buffer_select)
6843 return -EOPNOTSUPP;
6844
6845 id = READ_ONCE(sqe->personality);
6846 if (id) {
1e6fa521
JA
6847 struct io_identity *iod;
6848
1e6fa521
JA
6849 iod = idr_find(&ctx->personality_idr, id);
6850 if (unlikely(!iod))
ef4ff581 6851 return -EINVAL;
1e6fa521 6852 refcount_inc(&iod->count);
ec99ca6c
PB
6853
6854 __io_req_init_async(req);
1e6fa521
JA
6855 get_cred(iod->creds);
6856 req->work.identity = iod;
dfead8a8 6857 req->work.flags |= IO_WQ_WORK_CREDS;
ef4ff581
PB
6858 }
6859
258b29a9 6860 state = &ctx->submit_state;
ef4ff581 6861
27926b68
JA
6862 /*
6863 * Plug now if we have more than 1 IO left after this, and the target
6864 * is potentially a read/write to block based storage.
6865 */
6866 if (!state->plug_started && state->ios_left > 1 &&
6867 io_op_defs[req->opcode].plug) {
6868 blk_start_plug(&state->plug);
6869 state->plug_started = true;
6870 }
6871
bd5bbda7
PB
6872 if (io_op_defs[req->opcode].needs_file) {
6873 bool fixed = req->flags & REQ_F_FIXED_FILE;
6874
6875 req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
ba13e23f 6876 if (unlikely(!req->file))
bd5bbda7
PB
6877 ret = -EBADF;
6878 }
63ff8223 6879
71b547c0
PB
6880 state->ios_left--;
6881 return ret;
0553b8bd
PB
6882}
6883
0f212204 6884static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
6c271ce2 6885{
863e0560 6886 struct io_submit_link link;
46c4e16a 6887 int submitted = 0;
6c271ce2 6888
c4a2ed72 6889 /* if we have a backlog and couldn't flush it all, return BUSY */
ad3eb2c8 6890 if (test_bit(0, &ctx->sq_check_overflow)) {
6c503150 6891 if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
ad3eb2c8
JA
6892 return -EBUSY;
6893 }
6c271ce2 6894
ee7d46d9
PB
6895 /* make sure SQ entry isn't read before tail */
6896 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
9ef4f124 6897
2b85edfc
PB
6898 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6899 return -EAGAIN;
6c271ce2 6900
d8a6df10 6901 percpu_counter_add(&current->io_uring->inflight, nr);
faf7b51c 6902 refcount_add(nr, &current->usage);
6c271ce2 6903
ba88ff11 6904 io_submit_state_start(&ctx->submit_state, nr);
863e0560 6905 link.head = NULL;
b14cca0c 6906
46c4e16a 6907 while (submitted < nr) {
3529d8c2 6908 const struct io_uring_sqe *sqe;
196be95c 6909 struct io_kiocb *req;
1cb1edb2 6910 int err;
fb5ccc98 6911
258b29a9 6912 req = io_alloc_req(ctx);
196be95c
PB
6913 if (unlikely(!req)) {
6914 if (!submitted)
6915 submitted = -EAGAIN;
fb5ccc98 6916 break;
196be95c 6917 }
4fccfcbb
PB
6918 sqe = io_get_sqe(ctx);
6919 if (unlikely(!sqe)) {
6920 kmem_cache_free(req_cachep, req);
6921 break;
6922 }
d3656344
JA
6923 /* will complete beyond this point, count as submitted */
6924 submitted++;
6925
258b29a9 6926 err = io_init_req(ctx, req, sqe);
ef4ff581 6927 if (unlikely(err)) {
1cb1edb2 6928fail_req:
e1e16097
JA
6929 io_put_req(req);
6930 io_req_complete(req, err);
196be95c
PB
6931 break;
6932 }
fb5ccc98 6933
354420f7 6934 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
2d7e9358 6935 true, ctx->flags & IORING_SETUP_SQPOLL);
c5eef2b9 6936 err = io_submit_sqe(req, sqe, &link);
1d4240cc
PB
6937 if (err)
6938 goto fail_req;
6c271ce2
JA
6939 }
6940
9466f437
PB
6941 if (unlikely(submitted != nr)) {
6942 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
d8a6df10
JA
6943 struct io_uring_task *tctx = current->io_uring;
6944 int unused = nr - ref_used;
9466f437 6945
d8a6df10
JA
6946 percpu_ref_put_many(&ctx->refs, unused);
6947 percpu_counter_sub(&tctx->inflight, unused);
6948 put_task_struct_many(current, unused);
9466f437 6949 }
863e0560 6950 if (link.head)
c5eef2b9 6951 io_queue_link_head(link.head);
ba88ff11 6952 io_submit_state_end(&ctx->submit_state, ctx);
6c271ce2 6953
ae9428ca
PB
6954 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6955 io_commit_sqring(ctx);
6956
6c271ce2
JA
6957 return submitted;
6958}
6959
23b3628e
XW
6960static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6961{
6962 /* Tell userspace we may need a wakeup call */
6963 spin_lock_irq(&ctx->completion_lock);
6964 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6965 spin_unlock_irq(&ctx->completion_lock);
6966}
6967
6968static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6969{
6970 spin_lock_irq(&ctx->completion_lock);
6971 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6972 spin_unlock_irq(&ctx->completion_lock);
6973}
6974
08369246 6975static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
6c271ce2 6976{
c8d1ba58 6977 unsigned int to_submit;
bdcd3eab 6978 int ret = 0;
6c271ce2 6979
c8d1ba58 6980 to_submit = io_sqring_entries(ctx);
e95eee2d
JA
6981 /* if we're handling multiple rings, cap submit size for fairness */
6982 if (cap_entries && to_submit > 8)
6983 to_submit = 8;
6984
906a3c6f 6985 if (!list_empty(&ctx->iopoll_list) || to_submit) {
c8d1ba58 6986 unsigned nr_events = 0;
a4c0b3de 6987
c8d1ba58 6988 mutex_lock(&ctx->uring_lock);
906a3c6f 6989 if (!list_empty(&ctx->iopoll_list))
c8d1ba58 6990 io_do_iopoll(ctx, &nr_events, 0);
906a3c6f 6991
d9d05217
PB
6992 if (to_submit && !ctx->sqo_dead &&
6993 likely(!percpu_ref_is_dying(&ctx->refs)))
08369246 6994 ret = io_submit_sqes(ctx, to_submit);
c8d1ba58
JA
6995 mutex_unlock(&ctx->uring_lock);
6996 }
6c271ce2 6997
90554200
JA
6998 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6999 wake_up(&ctx->sqo_sq_wait);
6c271ce2 7000
08369246
XW
7001 return ret;
7002}
6c271ce2 7003
08369246
XW
7004static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
7005{
7006 struct io_ring_ctx *ctx;
7007 unsigned sq_thread_idle = 0;
6c271ce2 7008
08369246
XW
7009 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
7010 if (sq_thread_idle < ctx->sq_thread_idle)
7011 sq_thread_idle = ctx->sq_thread_idle;
c8d1ba58 7012 }
c1edbf5f 7013
08369246 7014 sqd->sq_thread_idle = sq_thread_idle;
c8d1ba58 7015}
6c271ce2 7016
69fb2131
JA
7017static void io_sqd_init_new(struct io_sq_data *sqd)
7018{
7019 struct io_ring_ctx *ctx;
7020
7021 while (!list_empty(&sqd->ctx_new_list)) {
7022 ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
69fb2131
JA
7023 list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
7024 complete(&ctx->sq_thread_comp);
7025 }
08369246
XW
7026
7027 io_sqd_update_thread_idle(sqd);
69fb2131
JA
7028}
7029
c8d1ba58
JA
7030static int io_sq_thread(void *data)
7031{
91d8f519 7032 struct cgroup_subsys_state *cur_css = NULL;
28cea78a
JA
7033 struct files_struct *old_files = current->files;
7034 struct nsproxy *old_nsproxy = current->nsproxy;
69fb2131
JA
7035 const struct cred *old_cred = NULL;
7036 struct io_sq_data *sqd = data;
7037 struct io_ring_ctx *ctx;
a0d9205f 7038 unsigned long timeout = 0;
08369246 7039 DEFINE_WAIT(wait);
6c271ce2 7040
28cea78a
JA
7041 task_lock(current);
7042 current->files = NULL;
7043 current->nsproxy = NULL;
7044 task_unlock(current);
6c271ce2 7045
69fb2131 7046 while (!kthread_should_stop()) {
08369246
XW
7047 int ret;
7048 bool cap_entries, sqt_spin, needs_sched;
c1edbf5f
JA
7049
7050 /*
69fb2131
JA
7051 * Any changes to the sqd lists are synchronized through the
7052 * kthread parking. This synchronizes the thread vs users,
7053 * the users are synchronized on the sqd->ctx_lock.
c1edbf5f 7054 */
65b2b213 7055 if (kthread_should_park()) {
69fb2131 7056 kthread_parkme();
65b2b213
XW
7057 /*
7058 * When sq thread is unparked, in case the previous park operation
7059 * comes from io_put_sq_data(), which means that sq thread is going
7060 * to be stopped, so here needs to have a check.
7061 */
7062 if (kthread_should_stop())
7063 break;
7064 }
7143b5ac 7065
08369246 7066 if (unlikely(!list_empty(&sqd->ctx_new_list))) {
69fb2131 7067 io_sqd_init_new(sqd);
08369246
XW
7068 timeout = jiffies + sqd->sq_thread_idle;
7069 }
6c271ce2 7070
08369246 7071 sqt_spin = false;
e95eee2d 7072 cap_entries = !list_is_singular(&sqd->ctx_list);
69fb2131
JA
7073 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
7074 if (current->cred != ctx->creds) {
7075 if (old_cred)
7076 revert_creds(old_cred);
7077 old_cred = override_creds(ctx->creds);
bdcd3eab 7078 }
91d8f519 7079 io_sq_thread_associate_blkcg(ctx, &cur_css);
4ea33a97
JA
7080#ifdef CONFIG_AUDIT
7081 current->loginuid = ctx->loginuid;
7082 current->sessionid = ctx->sessionid;
7083#endif
bdcd3eab 7084
08369246
XW
7085 ret = __io_sq_thread(ctx, cap_entries);
7086 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
7087 sqt_spin = true;
6c271ce2 7088
28cea78a 7089 io_sq_thread_drop_mm_files();
69fb2131 7090 }
6c271ce2 7091
08369246 7092 if (sqt_spin || !time_after(jiffies, timeout)) {
c8d1ba58 7093 io_run_task_work();
d434ab6d 7094 io_sq_thread_drop_mm_files();
c8d1ba58 7095 cond_resched();
08369246
XW
7096 if (sqt_spin)
7097 timeout = jiffies + sqd->sq_thread_idle;
7098 continue;
7099 }
7100
08369246
XW
7101 needs_sched = true;
7102 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
7103 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
7104 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
7105 !list_empty_careful(&ctx->iopoll_list)) {
7106 needs_sched = false;
7107 break;
7108 }
7109 if (io_sqring_entries(ctx)) {
7110 needs_sched = false;
7111 break;
7112 }
7113 }
7114
8b28fdf2 7115 if (needs_sched && !kthread_should_park()) {
69fb2131
JA
7116 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7117 io_ring_set_wakeup_flag(ctx);
08369246 7118
69fb2131 7119 schedule();
69fb2131
JA
7120 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7121 io_ring_clear_wakeup_flag(ctx);
6c271ce2 7122 }
08369246
XW
7123
7124 finish_wait(&sqd->wait, &wait);
7125 timeout = jiffies + sqd->sq_thread_idle;
6c271ce2
JA
7126 }
7127
4c6e277c 7128 io_run_task_work();
d434ab6d 7129 io_sq_thread_drop_mm_files();
b41e9852 7130
91d8f519
DZ
7131 if (cur_css)
7132 io_sq_thread_unassociate_blkcg();
69fb2131
JA
7133 if (old_cred)
7134 revert_creds(old_cred);
06058632 7135
28cea78a
JA
7136 task_lock(current);
7137 current->files = old_files;
7138 current->nsproxy = old_nsproxy;
7139 task_unlock(current);
7140
2bbcd6d3 7141 kthread_parkme();
06058632 7142
6c271ce2
JA
7143 return 0;
7144}
7145
bda52162
JA
7146struct io_wait_queue {
7147 struct wait_queue_entry wq;
7148 struct io_ring_ctx *ctx;
7149 unsigned to_wait;
7150 unsigned nr_timeouts;
7151};
7152
6c503150 7153static inline bool io_should_wake(struct io_wait_queue *iowq)
bda52162
JA
7154{
7155 struct io_ring_ctx *ctx = iowq->ctx;
7156
7157 /*
d195a66e 7158 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
7159 * started waiting. For timeouts, we always want to return to userspace,
7160 * regardless of event count.
7161 */
6c503150 7162 return io_cqring_events(ctx) >= iowq->to_wait ||
bda52162
JA
7163 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
7164}
7165
7166static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7167 int wake_flags, void *key)
7168{
7169 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7170 wq);
7171
6c503150
PB
7172 /*
7173 * Cannot safely flush overflowed CQEs from here, ensure we wake up
7174 * the task, and the next invocation will do it.
7175 */
7176 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
7177 return autoremove_wake_function(curr, mode, wake_flags, key);
7178 return -1;
bda52162
JA
7179}
7180
af9c1a44
JA
7181static int io_run_task_work_sig(void)
7182{
7183 if (io_run_task_work())
7184 return 1;
7185 if (!signal_pending(current))
7186 return 0;
792ee0f6
JA
7187 if (test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))
7188 return -ERESTARTSYS;
af9c1a44
JA
7189 return -EINTR;
7190}
7191
eeb60b9a
PB
7192/* when returns >0, the caller should retry */
7193static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
7194 struct io_wait_queue *iowq,
7195 signed long *timeout)
7196{
7197 int ret;
7198
7199 /* make sure we run task_work before checking for signals */
7200 ret = io_run_task_work_sig();
7201 if (ret || io_should_wake(iowq))
7202 return ret;
7203 /* let the caller flush overflows, retry */
7204 if (test_bit(0, &ctx->cq_check_overflow))
7205 return 1;
7206
7207 *timeout = schedule_timeout(*timeout);
7208 return !*timeout ? -ETIME : 1;
7209}
7210
2b188cc1
JA
7211/*
7212 * Wait until events become available, if we don't already have some. The
7213 * application must reap them itself, as they reside on the shared cq ring.
7214 */
7215static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
c73ebb68
HX
7216 const sigset_t __user *sig, size_t sigsz,
7217 struct __kernel_timespec __user *uts)
2b188cc1 7218{
bda52162
JA
7219 struct io_wait_queue iowq = {
7220 .wq = {
7221 .private = current,
7222 .func = io_wake_function,
7223 .entry = LIST_HEAD_INIT(iowq.wq.entry),
7224 },
7225 .ctx = ctx,
7226 .to_wait = min_events,
7227 };
75b28aff 7228 struct io_rings *rings = ctx->rings;
c1d5a224
PB
7229 signed long timeout = MAX_SCHEDULE_TIMEOUT;
7230 int ret;
2b188cc1 7231
b41e9852 7232 do {
6c503150
PB
7233 io_cqring_overflow_flush(ctx, false, NULL, NULL);
7234 if (io_cqring_events(ctx) >= min_events)
b41e9852 7235 return 0;
4c6e277c 7236 if (!io_run_task_work())
b41e9852 7237 break;
b41e9852 7238 } while (1);
2b188cc1
JA
7239
7240 if (sig) {
9e75ad5d
AB
7241#ifdef CONFIG_COMPAT
7242 if (in_compat_syscall())
7243 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 7244 sigsz);
9e75ad5d
AB
7245 else
7246#endif
b772434b 7247 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 7248
2b188cc1
JA
7249 if (ret)
7250 return ret;
7251 }
7252
c73ebb68 7253 if (uts) {
c1d5a224
PB
7254 struct timespec64 ts;
7255
c73ebb68
HX
7256 if (get_timespec64(&ts, uts))
7257 return -EFAULT;
7258 timeout = timespec64_to_jiffies(&ts);
7259 }
7260
bda52162 7261 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 7262 trace_io_uring_cqring_wait(ctx, min_events);
bda52162 7263 do {
6c503150 7264 io_cqring_overflow_flush(ctx, false, NULL, NULL);
bda52162
JA
7265 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
7266 TASK_INTERRUPTIBLE);
eeb60b9a
PB
7267 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
7268 finish_wait(&ctx->wait, &iowq.wq);
7269 } while (ret > 0);
bda52162 7270
b7db41c9 7271 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 7272
75b28aff 7273 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
7274}
7275
6b06314c
JA
7276static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7277{
7278#if defined(CONFIG_UNIX)
7279 if (ctx->ring_sock) {
7280 struct sock *sock = ctx->ring_sock->sk;
7281 struct sk_buff *skb;
7282
7283 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7284 kfree_skb(skb);
7285 }
7286#else
7287 int i;
7288
65e19f54
JA
7289 for (i = 0; i < ctx->nr_user_files; i++) {
7290 struct file *file;
7291
7292 file = io_file_from_index(ctx, i);
7293 if (file)
7294 fput(file);
7295 }
6b06314c
JA
7296#endif
7297}
7298
00835dce 7299static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
05f3fb3c 7300{
269bbe5f 7301 struct fixed_rsrc_data *data;
05f3fb3c 7302
269bbe5f 7303 data = container_of(ref, struct fixed_rsrc_data, refs);
05f3fb3c
JA
7304 complete(&data->done);
7305}
7306
2a63b2d9
BM
7307static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
7308{
7309 spin_lock_bh(&ctx->rsrc_ref_lock);
7310}
7311
7312static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
7313{
7314 spin_unlock_bh(&ctx->rsrc_ref_lock);
7315}
7316
d67d2263
BM
7317static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
7318 struct fixed_rsrc_data *rsrc_data,
269bbe5f 7319 struct fixed_rsrc_ref_node *ref_node)
1642b445 7320{
2a63b2d9 7321 io_rsrc_ref_lock(ctx);
269bbe5f 7322 rsrc_data->node = ref_node;
d67d2263 7323 list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
2a63b2d9 7324 io_rsrc_ref_unlock(ctx);
269bbe5f 7325 percpu_ref_get(&rsrc_data->refs);
1642b445
PB
7326}
7327
d7954b2b
BM
7328static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
7329 struct io_ring_ctx *ctx,
7330 struct fixed_rsrc_ref_node *backup_node)
6b06314c 7331{
d7954b2b 7332 struct fixed_rsrc_ref_node *ref_node;
1ffc5422 7333 int ret;
65e19f54 7334
2a63b2d9 7335 io_rsrc_ref_lock(ctx);
1e5d770b 7336 ref_node = data->node;
2a63b2d9 7337 io_rsrc_ref_unlock(ctx);
05589553
XW
7338 if (ref_node)
7339 percpu_ref_kill(&ref_node->refs);
7340
7341 percpu_ref_kill(&data->refs);
7342
7343 /* wait for all refs nodes to complete */
269bbe5f 7344 flush_delayed_work(&ctx->rsrc_put_work);
1ffc5422
PB
7345 do {
7346 ret = wait_for_completion_interruptible(&data->done);
7347 if (!ret)
7348 break;
7349 ret = io_run_task_work_sig();
7350 if (ret < 0) {
7351 percpu_ref_resurrect(&data->refs);
7352 reinit_completion(&data->done);
d67d2263 7353 io_sqe_rsrc_set_node(ctx, data, backup_node);
1ffc5422
PB
7354 return ret;
7355 }
7356 } while (1);
05f3fb3c 7357
d7954b2b
BM
7358 destroy_fixed_rsrc_ref_node(backup_node);
7359 return 0;
7360}
7361
1ad555c6
BM
7362static struct fixed_rsrc_data *alloc_fixed_rsrc_data(struct io_ring_ctx *ctx)
7363{
7364 struct fixed_rsrc_data *data;
7365
7366 data = kzalloc(sizeof(*data), GFP_KERNEL);
7367 if (!data)
7368 return NULL;
7369
00835dce 7370 if (percpu_ref_init(&data->refs, io_rsrc_data_ref_zero,
1ad555c6
BM
7371 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
7372 kfree(data);
7373 return NULL;
7374 }
7375 data->ctx = ctx;
7376 init_completion(&data->done);
7377 return data;
7378}
7379
7380static void free_fixed_rsrc_data(struct fixed_rsrc_data *data)
7381{
7382 percpu_ref_exit(&data->refs);
7383 kfree(data->table);
7384 kfree(data);
7385}
7386
d7954b2b
BM
7387static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7388{
7389 struct fixed_rsrc_data *data = ctx->file_data;
7390 struct fixed_rsrc_ref_node *backup_node;
7391 unsigned nr_tables, i;
7392 int ret;
7393
7394 if (!data)
7395 return -ENXIO;
7396 backup_node = alloc_fixed_rsrc_ref_node(ctx);
7397 if (!backup_node)
7398 return -ENOMEM;
7399 init_fixed_file_ref_node(ctx, backup_node);
7400
7401 ret = io_rsrc_ref_quiesce(data, ctx, backup_node);
7402 if (ret)
7403 return ret;
7404
6b06314c 7405 __io_sqe_files_unregister(ctx);
65e19f54
JA
7406 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
7407 for (i = 0; i < nr_tables; i++)
05f3fb3c 7408 kfree(data->table[i].files);
1ad555c6 7409 free_fixed_rsrc_data(data);
05f3fb3c 7410 ctx->file_data = NULL;
6b06314c
JA
7411 ctx->nr_user_files = 0;
7412 return 0;
7413}
7414
534ca6d6 7415static void io_put_sq_data(struct io_sq_data *sqd)
6c271ce2 7416{
534ca6d6 7417 if (refcount_dec_and_test(&sqd->refs)) {
2bbcd6d3
RP
7418 /*
7419 * The park is a bit of a work-around, without it we get
7420 * warning spews on shutdown with SQPOLL set and affinity
7421 * set to a single CPU.
7422 */
534ca6d6
JA
7423 if (sqd->thread) {
7424 kthread_park(sqd->thread);
7425 kthread_stop(sqd->thread);
7426 }
7427
7428 kfree(sqd);
7429 }
7430}
7431
aa06165d
JA
7432static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7433{
7434 struct io_ring_ctx *ctx_attach;
7435 struct io_sq_data *sqd;
7436 struct fd f;
7437
7438 f = fdget(p->wq_fd);
7439 if (!f.file)
7440 return ERR_PTR(-ENXIO);
7441 if (f.file->f_op != &io_uring_fops) {
7442 fdput(f);
7443 return ERR_PTR(-EINVAL);
7444 }
7445
7446 ctx_attach = f.file->private_data;
7447 sqd = ctx_attach->sq_data;
7448 if (!sqd) {
7449 fdput(f);
7450 return ERR_PTR(-EINVAL);
7451 }
7452
7453 refcount_inc(&sqd->refs);
7454 fdput(f);
7455 return sqd;
7456}
7457
534ca6d6
JA
7458static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
7459{
7460 struct io_sq_data *sqd;
7461
aa06165d
JA
7462 if (p->flags & IORING_SETUP_ATTACH_WQ)
7463 return io_attach_sq_data(p);
7464
534ca6d6
JA
7465 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7466 if (!sqd)
7467 return ERR_PTR(-ENOMEM);
7468
7469 refcount_set(&sqd->refs, 1);
69fb2131
JA
7470 INIT_LIST_HEAD(&sqd->ctx_list);
7471 INIT_LIST_HEAD(&sqd->ctx_new_list);
7472 mutex_init(&sqd->ctx_lock);
7473 mutex_init(&sqd->lock);
534ca6d6
JA
7474 init_waitqueue_head(&sqd->wait);
7475 return sqd;
7476}
7477
69fb2131
JA
7478static void io_sq_thread_unpark(struct io_sq_data *sqd)
7479 __releases(&sqd->lock)
7480{
7481 if (!sqd->thread)
7482 return;
7483 kthread_unpark(sqd->thread);
7484 mutex_unlock(&sqd->lock);
7485}
7486
7487static void io_sq_thread_park(struct io_sq_data *sqd)
7488 __acquires(&sqd->lock)
7489{
7490 if (!sqd->thread)
7491 return;
7492 mutex_lock(&sqd->lock);
7493 kthread_park(sqd->thread);
7494}
7495
534ca6d6
JA
7496static void io_sq_thread_stop(struct io_ring_ctx *ctx)
7497{
7498 struct io_sq_data *sqd = ctx->sq_data;
7499
7500 if (sqd) {
7501 if (sqd->thread) {
7502 /*
7503 * We may arrive here from the error branch in
7504 * io_sq_offload_create() where the kthread is created
7505 * without being waked up, thus wake it up now to make
7506 * sure the wait will complete.
7507 */
7508 wake_up_process(sqd->thread);
7509 wait_for_completion(&ctx->sq_thread_comp);
69fb2131
JA
7510
7511 io_sq_thread_park(sqd);
7512 }
7513
7514 mutex_lock(&sqd->ctx_lock);
7515 list_del(&ctx->sqd_list);
08369246 7516 io_sqd_update_thread_idle(sqd);
69fb2131
JA
7517 mutex_unlock(&sqd->ctx_lock);
7518
08369246 7519 if (sqd->thread)
69fb2131 7520 io_sq_thread_unpark(sqd);
534ca6d6
JA
7521
7522 io_put_sq_data(sqd);
7523 ctx->sq_data = NULL;
6c271ce2
JA
7524 }
7525}
7526
6b06314c
JA
7527static void io_finish_async(struct io_ring_ctx *ctx)
7528{
6c271ce2
JA
7529 io_sq_thread_stop(ctx);
7530
561fb04a
JA
7531 if (ctx->io_wq) {
7532 io_wq_destroy(ctx->io_wq);
7533 ctx->io_wq = NULL;
6b06314c
JA
7534 }
7535}
7536
7537#if defined(CONFIG_UNIX)
6b06314c
JA
7538/*
7539 * Ensure the UNIX gc is aware of our file set, so we are certain that
7540 * the io_uring can be safely unregistered on process exit, even if we have
7541 * loops in the file referencing.
7542 */
7543static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7544{
7545 struct sock *sk = ctx->ring_sock->sk;
7546 struct scm_fp_list *fpl;
7547 struct sk_buff *skb;
08a45173 7548 int i, nr_files;
6b06314c 7549
6b06314c
JA
7550 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7551 if (!fpl)
7552 return -ENOMEM;
7553
7554 skb = alloc_skb(0, GFP_KERNEL);
7555 if (!skb) {
7556 kfree(fpl);
7557 return -ENOMEM;
7558 }
7559
7560 skb->sk = sk;
6b06314c 7561
08a45173 7562 nr_files = 0;
6b06314c
JA
7563 fpl->user = get_uid(ctx->user);
7564 for (i = 0; i < nr; i++) {
65e19f54
JA
7565 struct file *file = io_file_from_index(ctx, i + offset);
7566
7567 if (!file)
08a45173 7568 continue;
65e19f54 7569 fpl->fp[nr_files] = get_file(file);
08a45173
JA
7570 unix_inflight(fpl->user, fpl->fp[nr_files]);
7571 nr_files++;
6b06314c
JA
7572 }
7573
08a45173
JA
7574 if (nr_files) {
7575 fpl->max = SCM_MAX_FD;
7576 fpl->count = nr_files;
7577 UNIXCB(skb).fp = fpl;
05f3fb3c 7578 skb->destructor = unix_destruct_scm;
08a45173
JA
7579 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7580 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 7581
08a45173
JA
7582 for (i = 0; i < nr_files; i++)
7583 fput(fpl->fp[i]);
7584 } else {
7585 kfree_skb(skb);
7586 kfree(fpl);
7587 }
6b06314c
JA
7588
7589 return 0;
7590}
7591
7592/*
7593 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7594 * causes regular reference counting to break down. We rely on the UNIX
7595 * garbage collection to take care of this problem for us.
7596 */
7597static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7598{
7599 unsigned left, total;
7600 int ret = 0;
7601
7602 total = 0;
7603 left = ctx->nr_user_files;
7604 while (left) {
7605 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
7606
7607 ret = __io_sqe_files_scm(ctx, this_files, total);
7608 if (ret)
7609 break;
7610 left -= this_files;
7611 total += this_files;
7612 }
7613
7614 if (!ret)
7615 return 0;
7616
7617 while (total < ctx->nr_user_files) {
65e19f54
JA
7618 struct file *file = io_file_from_index(ctx, total);
7619
7620 if (file)
7621 fput(file);
6b06314c
JA
7622 total++;
7623 }
7624
7625 return ret;
7626}
7627#else
7628static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7629{
7630 return 0;
7631}
7632#endif
7633
269bbe5f 7634static int io_sqe_alloc_file_tables(struct fixed_rsrc_data *file_data,
5398ae69 7635 unsigned nr_tables, unsigned nr_files)
65e19f54
JA
7636{
7637 int i;
7638
7639 for (i = 0; i < nr_tables; i++) {
269bbe5f 7640 struct fixed_rsrc_table *table = &file_data->table[i];
65e19f54
JA
7641 unsigned this_files;
7642
7643 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7644 table->files = kcalloc(this_files, sizeof(struct file *),
7645 GFP_KERNEL);
7646 if (!table->files)
7647 break;
7648 nr_files -= this_files;
7649 }
7650
7651 if (i == nr_tables)
7652 return 0;
7653
7654 for (i = 0; i < nr_tables; i++) {
269bbe5f 7655 struct fixed_rsrc_table *table = &file_data->table[i];
65e19f54
JA
7656 kfree(table->files);
7657 }
7658 return 1;
7659}
7660
50238531 7661static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
05f3fb3c 7662{
50238531 7663 struct file *file = prsrc->file;
05f3fb3c
JA
7664#if defined(CONFIG_UNIX)
7665 struct sock *sock = ctx->ring_sock->sk;
7666 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7667 struct sk_buff *skb;
7668 int i;
7669
7670 __skb_queue_head_init(&list);
7671
7672 /*
7673 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7674 * remove this entry and rearrange the file array.
7675 */
7676 skb = skb_dequeue(head);
7677 while (skb) {
7678 struct scm_fp_list *fp;
7679
7680 fp = UNIXCB(skb).fp;
7681 for (i = 0; i < fp->count; i++) {
7682 int left;
7683
7684 if (fp->fp[i] != file)
7685 continue;
7686
7687 unix_notinflight(fp->user, fp->fp[i]);
7688 left = fp->count - 1 - i;
7689 if (left) {
7690 memmove(&fp->fp[i], &fp->fp[i + 1],
7691 left * sizeof(struct file *));
7692 }
7693 fp->count--;
7694 if (!fp->count) {
7695 kfree_skb(skb);
7696 skb = NULL;
7697 } else {
7698 __skb_queue_tail(&list, skb);
7699 }
7700 fput(file);
7701 file = NULL;
7702 break;
7703 }
7704
7705 if (!file)
7706 break;
7707
7708 __skb_queue_tail(&list, skb);
7709
7710 skb = skb_dequeue(head);
7711 }
7712
7713 if (skb_peek(&list)) {
7714 spin_lock_irq(&head->lock);
7715 while ((skb = __skb_dequeue(&list)) != NULL)
7716 __skb_queue_tail(head, skb);
7717 spin_unlock_irq(&head->lock);
7718 }
7719#else
7720 fput(file);
7721#endif
7722}
7723
269bbe5f 7724static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node)
65e19f54 7725{
269bbe5f
BM
7726 struct fixed_rsrc_data *rsrc_data = ref_node->rsrc_data;
7727 struct io_ring_ctx *ctx = rsrc_data->ctx;
7728 struct io_rsrc_put *prsrc, *tmp;
05589553 7729
269bbe5f
BM
7730 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7731 list_del(&prsrc->list);
50238531 7732 ref_node->rsrc_put(ctx, prsrc);
269bbe5f 7733 kfree(prsrc);
65e19f54 7734 }
05589553 7735
05589553
XW
7736 percpu_ref_exit(&ref_node->refs);
7737 kfree(ref_node);
269bbe5f 7738 percpu_ref_put(&rsrc_data->refs);
2faf852d 7739}
65e19f54 7740
269bbe5f 7741static void io_rsrc_put_work(struct work_struct *work)
4a38aed2
JA
7742{
7743 struct io_ring_ctx *ctx;
7744 struct llist_node *node;
7745
269bbe5f
BM
7746 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7747 node = llist_del_all(&ctx->rsrc_put_llist);
4a38aed2
JA
7748
7749 while (node) {
269bbe5f 7750 struct fixed_rsrc_ref_node *ref_node;
4a38aed2
JA
7751 struct llist_node *next = node->next;
7752
269bbe5f
BM
7753 ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist);
7754 __io_rsrc_put_work(ref_node);
4a38aed2
JA
7755 node = next;
7756 }
7757}
7758
ea64ec02
PB
7759static struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
7760 unsigned i)
7761{
7762 struct fixed_rsrc_table *table;
7763
7764 table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7765 return &table->files[i & IORING_FILE_TABLE_MASK];
7766}
7767
00835dce 7768static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
2faf852d 7769{
269bbe5f
BM
7770 struct fixed_rsrc_ref_node *ref_node;
7771 struct fixed_rsrc_data *data;
4a38aed2 7772 struct io_ring_ctx *ctx;
e297822b 7773 bool first_add = false;
4a38aed2 7774 int delay = HZ;
65e19f54 7775
269bbe5f
BM
7776 ref_node = container_of(ref, struct fixed_rsrc_ref_node, refs);
7777 data = ref_node->rsrc_data;
e297822b
PB
7778 ctx = data->ctx;
7779
2a63b2d9 7780 io_rsrc_ref_lock(ctx);
e297822b
PB
7781 ref_node->done = true;
7782
d67d2263
BM
7783 while (!list_empty(&ctx->rsrc_ref_list)) {
7784 ref_node = list_first_entry(&ctx->rsrc_ref_list,
269bbe5f 7785 struct fixed_rsrc_ref_node, node);
e297822b
PB
7786 /* recycle ref nodes in order */
7787 if (!ref_node->done)
7788 break;
7789 list_del(&ref_node->node);
269bbe5f 7790 first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
e297822b 7791 }
2a63b2d9 7792 io_rsrc_ref_unlock(ctx);
05589553 7793
e297822b 7794 if (percpu_ref_is_dying(&data->refs))
4a38aed2 7795 delay = 0;
05589553 7796
4a38aed2 7797 if (!delay)
269bbe5f 7798 mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0);
4a38aed2 7799 else if (first_add)
269bbe5f 7800 queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
05f3fb3c 7801}
65e19f54 7802
6802535d 7803static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
05589553 7804 struct io_ring_ctx *ctx)
05f3fb3c 7805{
269bbe5f 7806 struct fixed_rsrc_ref_node *ref_node;
05f3fb3c 7807
05589553
XW
7808 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7809 if (!ref_node)
3e2224c5 7810 return NULL;
05f3fb3c 7811
00835dce 7812 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
05589553
XW
7813 0, GFP_KERNEL)) {
7814 kfree(ref_node);
3e2224c5 7815 return NULL;
05589553
XW
7816 }
7817 INIT_LIST_HEAD(&ref_node->node);
269bbe5f 7818 INIT_LIST_HEAD(&ref_node->rsrc_list);
6802535d
BM
7819 ref_node->done = false;
7820 return ref_node;
7821}
7822
bc9744cd
PB
7823static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
7824 struct fixed_rsrc_ref_node *ref_node)
6802535d 7825{
269bbe5f 7826 ref_node->rsrc_data = ctx->file_data;
50238531 7827 ref_node->rsrc_put = io_ring_file_put;
05589553
XW
7828}
7829
269bbe5f 7830static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node)
05589553
XW
7831{
7832 percpu_ref_exit(&ref_node->refs);
7833 kfree(ref_node);
65e19f54
JA
7834}
7835
ea64ec02 7836
6b06314c
JA
7837static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7838 unsigned nr_args)
7839{
7840 __s32 __user *fds = (__s32 __user *) arg;
600cf3f8 7841 unsigned nr_tables, i;
05f3fb3c 7842 struct file *file;
600cf3f8 7843 int fd, ret = -ENOMEM;
269bbe5f
BM
7844 struct fixed_rsrc_ref_node *ref_node;
7845 struct fixed_rsrc_data *file_data;
6b06314c 7846
05f3fb3c 7847 if (ctx->file_data)
6b06314c
JA
7848 return -EBUSY;
7849 if (!nr_args)
7850 return -EINVAL;
7851 if (nr_args > IORING_MAX_FIXED_FILES)
7852 return -EMFILE;
7853
1ad555c6 7854 file_data = alloc_fixed_rsrc_data(ctx);
5398ae69 7855 if (!file_data)
05f3fb3c 7856 return -ENOMEM;
13770a71 7857 ctx->file_data = file_data;
05f3fb3c 7858
65e19f54 7859 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
035fbafc 7860 file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
5398ae69 7861 GFP_KERNEL);
600cf3f8
PB
7862 if (!file_data->table)
7863 goto out_free;
05f3fb3c 7864
600cf3f8 7865 if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
1ad555c6 7866 goto out_free;
65e19f54 7867
08a45173 7868 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
600cf3f8
PB
7869 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7870 ret = -EFAULT;
7871 goto out_fput;
7872 }
08a45173 7873 /* allow sparse sets */
600cf3f8 7874 if (fd == -1)
08a45173 7875 continue;
6b06314c 7876
05f3fb3c 7877 file = fget(fd);
6b06314c 7878 ret = -EBADF;
05f3fb3c 7879 if (!file)
600cf3f8 7880 goto out_fput;
05f3fb3c 7881
6b06314c
JA
7882 /*
7883 * Don't allow io_uring instances to be registered. If UNIX
7884 * isn't enabled, then this causes a reference cycle and this
7885 * instance can never get freed. If UNIX is enabled we'll
7886 * handle it just fine, but there's still no point in allowing
7887 * a ring fd as it doesn't support regular read/write anyway.
7888 */
05f3fb3c
JA
7889 if (file->f_op == &io_uring_fops) {
7890 fput(file);
600cf3f8 7891 goto out_fput;
6b06314c 7892 }
ea64ec02 7893 *io_fixed_file_slot(file_data, i) = file;
6b06314c
JA
7894 }
7895
6b06314c 7896 ret = io_sqe_files_scm(ctx);
05589553 7897 if (ret) {
6b06314c 7898 io_sqe_files_unregister(ctx);
05589553
XW
7899 return ret;
7900 }
6b06314c 7901
bc9744cd 7902 ref_node = alloc_fixed_rsrc_ref_node(ctx);
3e2224c5 7903 if (!ref_node) {
05589553 7904 io_sqe_files_unregister(ctx);
3e2224c5 7905 return -ENOMEM;
05589553 7906 }
bc9744cd 7907 init_fixed_file_ref_node(ctx, ref_node);
05589553 7908
d67d2263 7909 io_sqe_rsrc_set_node(ctx, file_data, ref_node);
6b06314c 7910 return ret;
600cf3f8
PB
7911out_fput:
7912 for (i = 0; i < ctx->nr_user_files; i++) {
7913 file = io_file_from_index(ctx, i);
7914 if (file)
7915 fput(file);
7916 }
7917 for (i = 0; i < nr_tables; i++)
7918 kfree(file_data->table[i].files);
7919 ctx->nr_user_files = 0;
600cf3f8 7920out_free:
1ad555c6 7921 free_fixed_rsrc_data(ctx->file_data);
55cbc256 7922 ctx->file_data = NULL;
6b06314c
JA
7923 return ret;
7924}
7925
c3a31e60
JA
7926static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7927 int index)
7928{
7929#if defined(CONFIG_UNIX)
7930 struct sock *sock = ctx->ring_sock->sk;
7931 struct sk_buff_head *head = &sock->sk_receive_queue;
7932 struct sk_buff *skb;
7933
7934 /*
7935 * See if we can merge this file into an existing skb SCM_RIGHTS
7936 * file set. If there's no room, fall back to allocating a new skb
7937 * and filling it in.
7938 */
7939 spin_lock_irq(&head->lock);
7940 skb = skb_peek(head);
7941 if (skb) {
7942 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7943
7944 if (fpl->count < SCM_MAX_FD) {
7945 __skb_unlink(skb, head);
7946 spin_unlock_irq(&head->lock);
7947 fpl->fp[fpl->count] = get_file(file);
7948 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7949 fpl->count++;
7950 spin_lock_irq(&head->lock);
7951 __skb_queue_head(head, skb);
7952 } else {
7953 skb = NULL;
7954 }
7955 }
7956 spin_unlock_irq(&head->lock);
7957
7958 if (skb) {
7959 fput(file);
7960 return 0;
7961 }
7962
7963 return __io_sqe_files_scm(ctx, 1, index);
7964#else
7965 return 0;
7966#endif
7967}
7968
50238531 7969static int io_queue_rsrc_removal(struct fixed_rsrc_data *data, void *rsrc)
05f3fb3c 7970{
269bbe5f
BM
7971 struct io_rsrc_put *prsrc;
7972 struct fixed_rsrc_ref_node *ref_node = data->node;
05f3fb3c 7973
269bbe5f
BM
7974 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7975 if (!prsrc)
a5318d3c 7976 return -ENOMEM;
05f3fb3c 7977
50238531 7978 prsrc->rsrc = rsrc;
269bbe5f 7979 list_add(&prsrc->list, &ref_node->rsrc_list);
05589553 7980
a5318d3c 7981 return 0;
05f3fb3c
JA
7982}
7983
269bbe5f
BM
7984static inline int io_queue_file_removal(struct fixed_rsrc_data *data,
7985 struct file *file)
7986{
50238531 7987 return io_queue_rsrc_removal(data, (void *)file);
269bbe5f
BM
7988}
7989
05f3fb3c 7990static int __io_sqe_files_update(struct io_ring_ctx *ctx,
269bbe5f 7991 struct io_uring_rsrc_update *up,
05f3fb3c
JA
7992 unsigned nr_args)
7993{
269bbe5f
BM
7994 struct fixed_rsrc_data *data = ctx->file_data;
7995 struct fixed_rsrc_ref_node *ref_node;
ea64ec02 7996 struct file *file, **file_slot;
c3a31e60
JA
7997 __s32 __user *fds;
7998 int fd, i, err;
7999 __u32 done;
05589553 8000 bool needs_switch = false;
c3a31e60 8001
05f3fb3c 8002 if (check_add_overflow(up->offset, nr_args, &done))
c3a31e60
JA
8003 return -EOVERFLOW;
8004 if (done > ctx->nr_user_files)
8005 return -EINVAL;
8006
bc9744cd 8007 ref_node = alloc_fixed_rsrc_ref_node(ctx);
3e2224c5
MWO
8008 if (!ref_node)
8009 return -ENOMEM;
bc9744cd 8010 init_fixed_file_ref_node(ctx, ref_node);
05589553 8011
269bbe5f 8012 fds = u64_to_user_ptr(up->data);
67973b93 8013 for (done = 0; done < nr_args; done++) {
c3a31e60
JA
8014 err = 0;
8015 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
8016 err = -EFAULT;
8017 break;
8018 }
4e0377a1 8019 if (fd == IORING_REGISTER_FILES_SKIP)
8020 continue;
8021
67973b93 8022 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
ea64ec02
PB
8023 file_slot = io_fixed_file_slot(ctx->file_data, i);
8024
8025 if (*file_slot) {
8026 err = io_queue_file_removal(data, *file_slot);
a5318d3c
HD
8027 if (err)
8028 break;
ea64ec02 8029 *file_slot = NULL;
05589553 8030 needs_switch = true;
c3a31e60
JA
8031 }
8032 if (fd != -1) {
c3a31e60
JA
8033 file = fget(fd);
8034 if (!file) {
8035 err = -EBADF;
8036 break;
8037 }
8038 /*
8039 * Don't allow io_uring instances to be registered. If
8040 * UNIX isn't enabled, then this causes a reference
8041 * cycle and this instance can never get freed. If UNIX
8042 * is enabled we'll handle it just fine, but there's
8043 * still no point in allowing a ring fd as it doesn't
8044 * support regular read/write anyway.
8045 */
8046 if (file->f_op == &io_uring_fops) {
8047 fput(file);
8048 err = -EBADF;
8049 break;
8050 }
e68a3ff8 8051 *file_slot = file;
c3a31e60 8052 err = io_sqe_file_register(ctx, file, i);
f3bd9dae 8053 if (err) {
e68a3ff8 8054 *file_slot = NULL;
f3bd9dae 8055 fput(file);
c3a31e60 8056 break;
f3bd9dae 8057 }
c3a31e60 8058 }
05f3fb3c
JA
8059 }
8060
05589553 8061 if (needs_switch) {
b2e96852 8062 percpu_ref_kill(&data->node->refs);
d67d2263 8063 io_sqe_rsrc_set_node(ctx, data, ref_node);
05589553 8064 } else
269bbe5f 8065 destroy_fixed_rsrc_ref_node(ref_node);
c3a31e60
JA
8066
8067 return done ? done : err;
8068}
05589553 8069
05f3fb3c
JA
8070static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
8071 unsigned nr_args)
8072{
269bbe5f 8073 struct io_uring_rsrc_update up;
05f3fb3c
JA
8074
8075 if (!ctx->file_data)
8076 return -ENXIO;
8077 if (!nr_args)
8078 return -EINVAL;
8079 if (copy_from_user(&up, arg, sizeof(up)))
8080 return -EFAULT;
8081 if (up.resv)
8082 return -EINVAL;
8083
8084 return __io_sqe_files_update(ctx, &up, nr_args);
8085}
c3a31e60 8086
5280f7e5 8087static struct io_wq_work *io_free_work(struct io_wq_work *work)
7d723065
JA
8088{
8089 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8090
5280f7e5
PB
8091 req = io_put_req_find_next(req);
8092 return req ? &req->work : NULL;
7d723065
JA
8093}
8094
24369c2e
PB
8095static int io_init_wq_offload(struct io_ring_ctx *ctx,
8096 struct io_uring_params *p)
8097{
8098 struct io_wq_data data;
8099 struct fd f;
8100 struct io_ring_ctx *ctx_attach;
8101 unsigned int concurrency;
8102 int ret = 0;
8103
8104 data.user = ctx->user;
e9fd9396 8105 data.free_work = io_free_work;
f5fa38c5 8106 data.do_work = io_wq_submit_work;
24369c2e
PB
8107
8108 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
8109 /* Do QD, or 4 * CPUS, whatever is smallest */
8110 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
8111
8112 ctx->io_wq = io_wq_create(concurrency, &data);
8113 if (IS_ERR(ctx->io_wq)) {
8114 ret = PTR_ERR(ctx->io_wq);
8115 ctx->io_wq = NULL;
8116 }
8117 return ret;
8118 }
8119
8120 f = fdget(p->wq_fd);
8121 if (!f.file)
8122 return -EBADF;
8123
8124 if (f.file->f_op != &io_uring_fops) {
8125 ret = -EINVAL;
8126 goto out_fput;
8127 }
8128
8129 ctx_attach = f.file->private_data;
8130 /* @io_wq is protected by holding the fd */
8131 if (!io_wq_get(ctx_attach->io_wq, &data)) {
8132 ret = -EINVAL;
8133 goto out_fput;
8134 }
8135
8136 ctx->io_wq = ctx_attach->io_wq;
8137out_fput:
8138 fdput(f);
8139 return ret;
8140}
8141
0f212204
JA
8142static int io_uring_alloc_task_context(struct task_struct *task)
8143{
8144 struct io_uring_task *tctx;
d8a6df10 8145 int ret;
0f212204
JA
8146
8147 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
8148 if (unlikely(!tctx))
8149 return -ENOMEM;
8150
d8a6df10
JA
8151 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
8152 if (unlikely(ret)) {
8153 kfree(tctx);
8154 return ret;
8155 }
8156
0f212204
JA
8157 xa_init(&tctx->xa);
8158 init_waitqueue_head(&tctx->wait);
8159 tctx->last = NULL;
fdaf083c
JA
8160 atomic_set(&tctx->in_idle, 0);
8161 tctx->sqpoll = false;
500a373d
JA
8162 io_init_identity(&tctx->__identity);
8163 tctx->identity = &tctx->__identity;
0f212204 8164 task->io_uring = tctx;
7cbf1722
JA
8165 spin_lock_init(&tctx->task_lock);
8166 INIT_WQ_LIST(&tctx->task_list);
8167 tctx->task_state = 0;
8168 init_task_work(&tctx->task_work, tctx_task_work);
0f212204
JA
8169 return 0;
8170}
8171
8172void __io_uring_free(struct task_struct *tsk)
8173{
8174 struct io_uring_task *tctx = tsk->io_uring;
8175
8176 WARN_ON_ONCE(!xa_empty(&tctx->xa));
500a373d
JA
8177 WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
8178 if (tctx->identity != &tctx->__identity)
8179 kfree(tctx->identity);
d8a6df10 8180 percpu_counter_destroy(&tctx->inflight);
0f212204
JA
8181 kfree(tctx);
8182 tsk->io_uring = NULL;
8183}
8184
7e84e1c7
SG
8185static int io_sq_offload_create(struct io_ring_ctx *ctx,
8186 struct io_uring_params *p)
2b188cc1
JA
8187{
8188 int ret;
8189
6c271ce2 8190 if (ctx->flags & IORING_SETUP_SQPOLL) {
534ca6d6
JA
8191 struct io_sq_data *sqd;
8192
3ec482d1 8193 ret = -EPERM;
ce59fc69 8194 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
3ec482d1
JA
8195 goto err;
8196
534ca6d6
JA
8197 sqd = io_get_sq_data(p);
8198 if (IS_ERR(sqd)) {
8199 ret = PTR_ERR(sqd);
8200 goto err;
8201 }
69fb2131 8202
534ca6d6 8203 ctx->sq_data = sqd;
69fb2131
JA
8204 io_sq_thread_park(sqd);
8205 mutex_lock(&sqd->ctx_lock);
8206 list_add(&ctx->sqd_list, &sqd->ctx_new_list);
8207 mutex_unlock(&sqd->ctx_lock);
8208 io_sq_thread_unpark(sqd);
534ca6d6 8209
917257da
JA
8210 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8211 if (!ctx->sq_thread_idle)
8212 ctx->sq_thread_idle = HZ;
8213
aa06165d
JA
8214 if (sqd->thread)
8215 goto done;
8216
6c271ce2 8217 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 8218 int cpu = p->sq_thread_cpu;
6c271ce2 8219
917257da 8220 ret = -EINVAL;
44a9bd18
JA
8221 if (cpu >= nr_cpu_ids)
8222 goto err;
7889f44d 8223 if (!cpu_online(cpu))
917257da
JA
8224 goto err;
8225
69fb2131 8226 sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd,
534ca6d6 8227 cpu, "io_uring-sq");
6c271ce2 8228 } else {
69fb2131 8229 sqd->thread = kthread_create(io_sq_thread, sqd,
6c271ce2
JA
8230 "io_uring-sq");
8231 }
534ca6d6
JA
8232 if (IS_ERR(sqd->thread)) {
8233 ret = PTR_ERR(sqd->thread);
8234 sqd->thread = NULL;
6c271ce2
JA
8235 goto err;
8236 }
534ca6d6 8237 ret = io_uring_alloc_task_context(sqd->thread);
0f212204
JA
8238 if (ret)
8239 goto err;
6c271ce2
JA
8240 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8241 /* Can't have SQ_AFF without SQPOLL */
8242 ret = -EINVAL;
8243 goto err;
8244 }
8245
aa06165d 8246done:
24369c2e
PB
8247 ret = io_init_wq_offload(ctx, p);
8248 if (ret)
2b188cc1 8249 goto err;
2b188cc1
JA
8250
8251 return 0;
8252err:
54a91f3b 8253 io_finish_async(ctx);
2b188cc1
JA
8254 return ret;
8255}
8256
7e84e1c7
SG
8257static void io_sq_offload_start(struct io_ring_ctx *ctx)
8258{
534ca6d6
JA
8259 struct io_sq_data *sqd = ctx->sq_data;
8260
8261 if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread)
8262 wake_up_process(sqd->thread);
7e84e1c7
SG
8263}
8264
a087e2b5
BM
8265static inline void __io_unaccount_mem(struct user_struct *user,
8266 unsigned long nr_pages)
2b188cc1
JA
8267{
8268 atomic_long_sub(nr_pages, &user->locked_vm);
8269}
8270
a087e2b5
BM
8271static inline int __io_account_mem(struct user_struct *user,
8272 unsigned long nr_pages)
2b188cc1
JA
8273{
8274 unsigned long page_limit, cur_pages, new_pages;
8275
8276 /* Don't allow more pages than we can safely lock */
8277 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8278
8279 do {
8280 cur_pages = atomic_long_read(&user->locked_vm);
8281 new_pages = cur_pages + nr_pages;
8282 if (new_pages > page_limit)
8283 return -ENOMEM;
8284 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8285 new_pages) != cur_pages);
8286
8287 return 0;
8288}
8289
26bfa89e 8290static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 8291{
aad5d8da 8292 if (ctx->limit_mem)
a087e2b5 8293 __io_unaccount_mem(ctx->user, nr_pages);
30975825 8294
26bfa89e
JA
8295 if (ctx->mm_account)
8296 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
8297}
8298
26bfa89e 8299static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
a087e2b5 8300{
30975825
BM
8301 int ret;
8302
8303 if (ctx->limit_mem) {
8304 ret = __io_account_mem(ctx->user, nr_pages);
8305 if (ret)
8306 return ret;
8307 }
8308
26bfa89e
JA
8309 if (ctx->mm_account)
8310 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
a087e2b5
BM
8311
8312 return 0;
8313}
8314
2b188cc1
JA
8315static void io_mem_free(void *ptr)
8316{
52e04ef4
MR
8317 struct page *page;
8318
8319 if (!ptr)
8320 return;
2b188cc1 8321
52e04ef4 8322 page = virt_to_head_page(ptr);
2b188cc1
JA
8323 if (put_page_testzero(page))
8324 free_compound_page(page);
8325}
8326
8327static void *io_mem_alloc(size_t size)
8328{
8329 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
26bfa89e 8330 __GFP_NORETRY | __GFP_ACCOUNT;
2b188cc1
JA
8331
8332 return (void *) __get_free_pages(gfp_flags, get_order(size));
8333}
8334
75b28aff
HV
8335static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8336 size_t *sq_offset)
8337{
8338 struct io_rings *rings;
8339 size_t off, sq_array_size;
8340
8341 off = struct_size(rings, cqes, cq_entries);
8342 if (off == SIZE_MAX)
8343 return SIZE_MAX;
8344
8345#ifdef CONFIG_SMP
8346 off = ALIGN(off, SMP_CACHE_BYTES);
8347 if (off == 0)
8348 return SIZE_MAX;
8349#endif
8350
b36200f5
DV
8351 if (sq_offset)
8352 *sq_offset = off;
8353
75b28aff
HV
8354 sq_array_size = array_size(sizeof(u32), sq_entries);
8355 if (sq_array_size == SIZE_MAX)
8356 return SIZE_MAX;
8357
8358 if (check_add_overflow(off, sq_array_size, &off))
8359 return SIZE_MAX;
8360
75b28aff
HV
8361 return off;
8362}
8363
0a96bbe4 8364static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
edafccee
JA
8365{
8366 int i, j;
8367
8368 if (!ctx->user_bufs)
8369 return -ENXIO;
8370
8371 for (i = 0; i < ctx->nr_user_bufs; i++) {
8372 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8373
8374 for (j = 0; j < imu->nr_bvecs; j++)
f1f6a7dd 8375 unpin_user_page(imu->bvec[j].bv_page);
edafccee 8376
de293938 8377 if (imu->acct_pages)
26bfa89e 8378 io_unaccount_mem(ctx, imu->acct_pages);
d4ef6475 8379 kvfree(imu->bvec);
edafccee
JA
8380 imu->nr_bvecs = 0;
8381 }
8382
8383 kfree(ctx->user_bufs);
8384 ctx->user_bufs = NULL;
8385 ctx->nr_user_bufs = 0;
8386 return 0;
8387}
8388
8389static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8390 void __user *arg, unsigned index)
8391{
8392 struct iovec __user *src;
8393
8394#ifdef CONFIG_COMPAT
8395 if (ctx->compat) {
8396 struct compat_iovec __user *ciovs;
8397 struct compat_iovec ciov;
8398
8399 ciovs = (struct compat_iovec __user *) arg;
8400 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8401 return -EFAULT;
8402
d55e5f5b 8403 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
8404 dst->iov_len = ciov.iov_len;
8405 return 0;
8406 }
8407#endif
8408 src = (struct iovec __user *) arg;
8409 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8410 return -EFAULT;
8411 return 0;
8412}
8413
de293938
JA
8414/*
8415 * Not super efficient, but this is just a registration time. And we do cache
8416 * the last compound head, so generally we'll only do a full search if we don't
8417 * match that one.
8418 *
8419 * We check if the given compound head page has already been accounted, to
8420 * avoid double accounting it. This allows us to account the full size of the
8421 * page, not just the constituent pages of a huge page.
8422 */
8423static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8424 int nr_pages, struct page *hpage)
8425{
8426 int i, j;
8427
8428 /* check current page array */
8429 for (i = 0; i < nr_pages; i++) {
8430 if (!PageCompound(pages[i]))
8431 continue;
8432 if (compound_head(pages[i]) == hpage)
8433 return true;
8434 }
8435
8436 /* check previously registered pages */
8437 for (i = 0; i < ctx->nr_user_bufs; i++) {
8438 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8439
8440 for (j = 0; j < imu->nr_bvecs; j++) {
8441 if (!PageCompound(imu->bvec[j].bv_page))
8442 continue;
8443 if (compound_head(imu->bvec[j].bv_page) == hpage)
8444 return true;
8445 }
8446 }
8447
8448 return false;
8449}
8450
8451static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8452 int nr_pages, struct io_mapped_ubuf *imu,
8453 struct page **last_hpage)
8454{
8455 int i, ret;
8456
8457 for (i = 0; i < nr_pages; i++) {
8458 if (!PageCompound(pages[i])) {
8459 imu->acct_pages++;
8460 } else {
8461 struct page *hpage;
8462
8463 hpage = compound_head(pages[i]);
8464 if (hpage == *last_hpage)
8465 continue;
8466 *last_hpage = hpage;
8467 if (headpage_already_acct(ctx, pages, i, hpage))
8468 continue;
8469 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8470 }
8471 }
8472
8473 if (!imu->acct_pages)
8474 return 0;
8475
26bfa89e 8476 ret = io_account_mem(ctx, imu->acct_pages);
de293938
JA
8477 if (ret)
8478 imu->acct_pages = 0;
8479 return ret;
8480}
8481
0a96bbe4
BM
8482static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
8483 struct io_mapped_ubuf *imu,
8484 struct page **last_hpage)
edafccee
JA
8485{
8486 struct vm_area_struct **vmas = NULL;
8487 struct page **pages = NULL;
0a96bbe4
BM
8488 unsigned long off, start, end, ubuf;
8489 size_t size;
8490 int ret, pret, nr_pages, i;
8491
8492 ubuf = (unsigned long) iov->iov_base;
8493 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8494 start = ubuf >> PAGE_SHIFT;
8495 nr_pages = end - start;
8496
8497 ret = -ENOMEM;
8498
8499 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8500 if (!pages)
8501 goto done;
8502
8503 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8504 GFP_KERNEL);
8505 if (!vmas)
8506 goto done;
8507
8508 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
8509 GFP_KERNEL);
8510 if (!imu->bvec)
8511 goto done;
8512
8513 ret = 0;
8514 mmap_read_lock(current->mm);
8515 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8516 pages, vmas);
8517 if (pret == nr_pages) {
8518 /* don't support file backed memory */
8519 for (i = 0; i < nr_pages; i++) {
8520 struct vm_area_struct *vma = vmas[i];
8521
8522 if (vma->vm_file &&
8523 !is_file_hugepages(vma->vm_file)) {
8524 ret = -EOPNOTSUPP;
8525 break;
8526 }
8527 }
8528 } else {
8529 ret = pret < 0 ? pret : -EFAULT;
8530 }
8531 mmap_read_unlock(current->mm);
8532 if (ret) {
8533 /*
8534 * if we did partial map, or found file backed vmas,
8535 * release any pages we did get
8536 */
8537 if (pret > 0)
8538 unpin_user_pages(pages, pret);
8539 kvfree(imu->bvec);
8540 goto done;
8541 }
8542
8543 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8544 if (ret) {
8545 unpin_user_pages(pages, pret);
8546 kvfree(imu->bvec);
8547 goto done;
8548 }
8549
8550 off = ubuf & ~PAGE_MASK;
8551 size = iov->iov_len;
8552 for (i = 0; i < nr_pages; i++) {
8553 size_t vec_len;
8554
8555 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8556 imu->bvec[i].bv_page = pages[i];
8557 imu->bvec[i].bv_len = vec_len;
8558 imu->bvec[i].bv_offset = off;
8559 off = 0;
8560 size -= vec_len;
8561 }
8562 /* store original address for later verification */
8563 imu->ubuf = ubuf;
8564 imu->len = iov->iov_len;
8565 imu->nr_bvecs = nr_pages;
8566 ret = 0;
8567done:
8568 kvfree(pages);
8569 kvfree(vmas);
8570 return ret;
8571}
8572
2b358604 8573static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
0a96bbe4 8574{
edafccee
JA
8575 if (ctx->user_bufs)
8576 return -EBUSY;
8577 if (!nr_args || nr_args > UIO_MAXIOV)
8578 return -EINVAL;
8579
8580 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8581 GFP_KERNEL);
8582 if (!ctx->user_bufs)
8583 return -ENOMEM;
8584
2b358604
BM
8585 return 0;
8586}
8587
8588static int io_buffer_validate(struct iovec *iov)
8589{
8590 /*
8591 * Don't impose further limits on the size and buffer
8592 * constraints here, we'll -EINVAL later when IO is
8593 * submitted if they are wrong.
8594 */
8595 if (!iov->iov_base || !iov->iov_len)
8596 return -EFAULT;
8597
8598 /* arbitrary limit, but we need something */
8599 if (iov->iov_len > SZ_1G)
8600 return -EFAULT;
8601
8602 return 0;
8603}
8604
8605static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8606 unsigned int nr_args)
8607{
8608 int i, ret;
8609 struct iovec iov;
8610 struct page *last_hpage = NULL;
8611
8612 ret = io_buffers_map_alloc(ctx, nr_args);
8613 if (ret)
8614 return ret;
8615
edafccee
JA
8616 for (i = 0; i < nr_args; i++) {
8617 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
edafccee
JA
8618
8619 ret = io_copy_iov(ctx, &iov, arg, i);
8620 if (ret)
0a96bbe4 8621 break;
edafccee 8622
2b358604
BM
8623 ret = io_buffer_validate(&iov);
8624 if (ret)
0a96bbe4 8625 break;
edafccee 8626
0a96bbe4
BM
8627 ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
8628 if (ret)
8629 break;
edafccee
JA
8630
8631 ctx->nr_user_bufs++;
8632 }
0a96bbe4
BM
8633
8634 if (ret)
8635 io_sqe_buffers_unregister(ctx);
8636
edafccee
JA
8637 return ret;
8638}
8639
9b402849
JA
8640static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8641{
8642 __s32 __user *fds = arg;
8643 int fd;
8644
8645 if (ctx->cq_ev_fd)
8646 return -EBUSY;
8647
8648 if (copy_from_user(&fd, fds, sizeof(*fds)))
8649 return -EFAULT;
8650
8651 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8652 if (IS_ERR(ctx->cq_ev_fd)) {
8653 int ret = PTR_ERR(ctx->cq_ev_fd);
8654 ctx->cq_ev_fd = NULL;
8655 return ret;
8656 }
8657
8658 return 0;
8659}
8660
8661static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8662{
8663 if (ctx->cq_ev_fd) {
8664 eventfd_ctx_put(ctx->cq_ev_fd);
8665 ctx->cq_ev_fd = NULL;
8666 return 0;
8667 }
8668
8669 return -ENXIO;
8670}
8671
5a2e745d
JA
8672static int __io_destroy_buffers(int id, void *p, void *data)
8673{
8674 struct io_ring_ctx *ctx = data;
8675 struct io_buffer *buf = p;
8676
067524e9 8677 __io_remove_buffers(ctx, buf, id, -1U);
5a2e745d
JA
8678 return 0;
8679}
8680
8681static void io_destroy_buffers(struct io_ring_ctx *ctx)
8682{
8683 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
8684 idr_destroy(&ctx->io_buffer_idr);
8685}
8686
68e68ee6 8687static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
1b4c351f 8688{
68e68ee6 8689 struct io_kiocb *req, *nxt;
1b4c351f 8690
68e68ee6
JA
8691 list_for_each_entry_safe(req, nxt, list, compl.list) {
8692 if (tsk && req->task != tsk)
8693 continue;
1b4c351f
JA
8694 list_del(&req->compl.list);
8695 kmem_cache_free(req_cachep, req);
8696 }
8697}
8698
9a4fdbd8 8699static void io_req_caches_free(struct io_ring_ctx *ctx, struct task_struct *tsk)
2b188cc1 8700{
bf019da7
PB
8701 struct io_submit_state *submit_state = &ctx->submit_state;
8702
9a4fdbd8
JA
8703 mutex_lock(&ctx->uring_lock);
8704
8705 if (submit_state->free_reqs)
8706 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8707 submit_state->reqs);
8708
8709 io_req_cache_free(&submit_state->comp.free_list, NULL);
8710
8711 spin_lock_irq(&ctx->completion_lock);
8712 io_req_cache_free(&submit_state->comp.locked_free_list, NULL);
8713 spin_unlock_irq(&ctx->completion_lock);
8714
8715 mutex_unlock(&ctx->uring_lock);
8716}
8717
8718static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8719{
04fc6c80
PB
8720 /*
8721 * Some may use context even when all refs and requests have been put,
8722 * and they are free to do so while still holding uring_lock, see
8723 * __io_req_task_submit(). Wait for them to finish.
8724 */
8725 mutex_lock(&ctx->uring_lock);
8726 mutex_unlock(&ctx->uring_lock);
8727
6b06314c 8728 io_finish_async(ctx);
0a96bbe4 8729 io_sqe_buffers_unregister(ctx);
2aede0e4
JA
8730
8731 if (ctx->sqo_task) {
8732 put_task_struct(ctx->sqo_task);
8733 ctx->sqo_task = NULL;
8734 mmdrop(ctx->mm_account);
8735 ctx->mm_account = NULL;
30975825 8736 }
def596e9 8737
91d8f519
DZ
8738#ifdef CONFIG_BLK_CGROUP
8739 if (ctx->sqo_blkcg_css)
8740 css_put(ctx->sqo_blkcg_css);
8741#endif
8742
6b06314c 8743 io_sqe_files_unregister(ctx);
9b402849 8744 io_eventfd_unregister(ctx);
5a2e745d 8745 io_destroy_buffers(ctx);
41726c9a 8746 idr_destroy(&ctx->personality_idr);
def596e9 8747
2b188cc1 8748#if defined(CONFIG_UNIX)
355e8d26
EB
8749 if (ctx->ring_sock) {
8750 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 8751 sock_release(ctx->ring_sock);
355e8d26 8752 }
2b188cc1
JA
8753#endif
8754
75b28aff 8755 io_mem_free(ctx->rings);
2b188cc1 8756 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
8757
8758 percpu_ref_exit(&ctx->refs);
2b188cc1 8759 free_uid(ctx->user);
181e448d 8760 put_cred(ctx->creds);
9a4fdbd8 8761 io_req_caches_free(ctx, NULL);
78076bb6 8762 kfree(ctx->cancel_hash);
2b188cc1
JA
8763 kfree(ctx);
8764}
8765
8766static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8767{
8768 struct io_ring_ctx *ctx = file->private_data;
8769 __poll_t mask = 0;
8770
8771 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
8772 /*
8773 * synchronizes with barrier from wq_has_sleeper call in
8774 * io_commit_cqring
8775 */
2b188cc1 8776 smp_rmb();
90554200 8777 if (!io_sqring_full(ctx))
2b188cc1 8778 mask |= EPOLLOUT | EPOLLWRNORM;
ed670c3f
HX
8779
8780 /*
8781 * Don't flush cqring overflow list here, just do a simple check.
8782 * Otherwise there could possible be ABBA deadlock:
8783 * CPU0 CPU1
8784 * ---- ----
8785 * lock(&ctx->uring_lock);
8786 * lock(&ep->mtx);
8787 * lock(&ctx->uring_lock);
8788 * lock(&ep->mtx);
8789 *
8790 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8791 * pushs them to do the flush.
8792 */
8793 if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
2b188cc1
JA
8794 mask |= EPOLLIN | EPOLLRDNORM;
8795
8796 return mask;
8797}
8798
8799static int io_uring_fasync(int fd, struct file *file, int on)
8800{
8801 struct io_ring_ctx *ctx = file->private_data;
8802
8803 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8804}
8805
0bead8cd 8806static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
071698e1 8807{
1e6fa521 8808 struct io_identity *iod;
071698e1 8809
1e6fa521
JA
8810 iod = idr_remove(&ctx->personality_idr, id);
8811 if (iod) {
8812 put_cred(iod->creds);
8813 if (refcount_dec_and_test(&iod->count))
8814 kfree(iod);
0bead8cd 8815 return 0;
1e6fa521 8816 }
0bead8cd
YD
8817
8818 return -EINVAL;
8819}
8820
8821static int io_remove_personalities(int id, void *p, void *data)
8822{
8823 struct io_ring_ctx *ctx = data;
8824
8825 io_unregister_personality(ctx, id);
071698e1
JA
8826 return 0;
8827}
8828
85faa7b8
JA
8829static void io_ring_exit_work(struct work_struct *work)
8830{
b2edc0a7
PB
8831 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
8832 exit_work);
85faa7b8 8833
56952e91
JA
8834 /*
8835 * If we're doing polled IO and end up having requests being
8836 * submitted async (out-of-line), then completions can come in while
8837 * we're waiting for refs to drop. We need to reap these manually,
8838 * as nobody else will be looking for them.
8839 */
b2edc0a7 8840 do {
9936c7c2 8841 io_uring_try_cancel_requests(ctx, NULL, NULL);
b2edc0a7 8842 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
85faa7b8
JA
8843 io_ring_ctx_free(ctx);
8844}
8845
00c18640
JA
8846static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8847{
8848 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8849
8850 return req->ctx == data;
8851}
8852
2b188cc1
JA
8853static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8854{
8855 mutex_lock(&ctx->uring_lock);
8856 percpu_ref_kill(&ctx->refs);
d9d05217
PB
8857
8858 if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
8859 ctx->sqo_dead = 1;
8860
cda286f0
PB
8861 /* if force is set, the ring is going away. always drop after that */
8862 ctx->cq_overflow_flushed = 1;
634578f8 8863 if (ctx->rings)
6c503150 8864 __io_cqring_overflow_flush(ctx, true, NULL, NULL);
5c766a90 8865 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
2b188cc1
JA
8866 mutex_unlock(&ctx->uring_lock);
8867
6b81928d
PB
8868 io_kill_timeouts(ctx, NULL, NULL);
8869 io_poll_remove_all(ctx, NULL, NULL);
561fb04a
JA
8870
8871 if (ctx->io_wq)
00c18640 8872 io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true);
561fb04a 8873
15dff286 8874 /* if we failed setting up the ctx, we might not have any rings */
b2edc0a7 8875 io_iopoll_try_reap_events(ctx);
309fc03a 8876
85faa7b8 8877 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
fc666777
JA
8878 /*
8879 * Use system_unbound_wq to avoid spawning tons of event kworkers
8880 * if we're exiting a ton of rings at the same time. It just adds
8881 * noise and overhead, there's no discernable change in runtime
8882 * over using system_wq.
8883 */
8884 queue_work(system_unbound_wq, &ctx->exit_work);
2b188cc1
JA
8885}
8886
8887static int io_uring_release(struct inode *inode, struct file *file)
8888{
8889 struct io_ring_ctx *ctx = file->private_data;
8890
8891 file->private_data = NULL;
8892 io_ring_ctx_wait_and_kill(ctx);
8893 return 0;
8894}
8895
f6edbabb
PB
8896struct io_task_cancel {
8897 struct task_struct *task;
8898 struct files_struct *files;
8899};
f254ac04 8900
f6edbabb 8901static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
b711d4ea 8902{
9a472ef7 8903 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
f6edbabb 8904 struct io_task_cancel *cancel = data;
9a472ef7
PB
8905 bool ret;
8906
f6edbabb 8907 if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
9a472ef7
PB
8908 unsigned long flags;
8909 struct io_ring_ctx *ctx = req->ctx;
8910
8911 /* protect against races with linked timeouts */
8912 spin_lock_irqsave(&ctx->completion_lock, flags);
f6edbabb 8913 ret = io_match_task(req, cancel->task, cancel->files);
9a472ef7
PB
8914 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8915 } else {
f6edbabb 8916 ret = io_match_task(req, cancel->task, cancel->files);
9a472ef7
PB
8917 }
8918 return ret;
b711d4ea
JA
8919}
8920
b7ddce3c 8921static void io_cancel_defer_files(struct io_ring_ctx *ctx,
ef9865a4 8922 struct task_struct *task,
b7ddce3c
PB
8923 struct files_struct *files)
8924{
8925 struct io_defer_entry *de = NULL;
8926 LIST_HEAD(list);
8927
8928 spin_lock_irq(&ctx->completion_lock);
8929 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
08d23634 8930 if (io_match_task(de->req, task, files)) {
b7ddce3c
PB
8931 list_cut_position(&list, &ctx->defer_list, &de->list);
8932 break;
8933 }
8934 }
8935 spin_unlock_irq(&ctx->completion_lock);
8936
8937 while (!list_empty(&list)) {
8938 de = list_first_entry(&list, struct io_defer_entry, list);
8939 list_del_init(&de->list);
8940 req_set_fail_links(de->req);
8941 io_put_req(de->req);
8942 io_req_complete(de->req, -ECANCELED);
8943 kfree(de);
8944 }
8945}
8946
9936c7c2
PB
8947static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
8948 struct task_struct *task,
8949 struct files_struct *files)
8950{
8951 struct io_task_cancel cancel = { .task = task, .files = files, };
8952
8953 while (1) {
8954 enum io_wq_cancel cret;
8955 bool ret = false;
8956
8957 if (ctx->io_wq) {
8958 cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
8959 &cancel, true);
8960 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8961 }
8962
8963 /* SQPOLL thread does its own polling */
8964 if (!(ctx->flags & IORING_SETUP_SQPOLL) && !files) {
8965 while (!list_empty_careful(&ctx->iopoll_list)) {
8966 io_iopoll_try_reap_events(ctx);
8967 ret = true;
8968 }
8969 }
8970
8971 ret |= io_poll_remove_all(ctx, task, files);
8972 ret |= io_kill_timeouts(ctx, task, files);
8973 ret |= io_run_task_work();
8974 io_cqring_overflow_flush(ctx, true, task, files);
8975 if (!ret)
8976 break;
8977 cond_resched();
8978 }
8979}
8980
ca70f00b
PB
8981static int io_uring_count_inflight(struct io_ring_ctx *ctx,
8982 struct task_struct *task,
8983 struct files_struct *files)
8984{
8985 struct io_kiocb *req;
8986 int cnt = 0;
8987
8988 spin_lock_irq(&ctx->inflight_lock);
8989 list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
8990 cnt += io_match_task(req, task, files);
8991 spin_unlock_irq(&ctx->inflight_lock);
8992 return cnt;
8993}
8994
b52fda00 8995static void io_uring_cancel_files(struct io_ring_ctx *ctx,
df9923f9 8996 struct task_struct *task,
fcb323cc
JA
8997 struct files_struct *files)
8998{
fcb323cc 8999 while (!list_empty_careful(&ctx->inflight_list)) {
d8f1b971 9000 DEFINE_WAIT(wait);
ca70f00b 9001 int inflight;
fcb323cc 9002
ca70f00b
PB
9003 inflight = io_uring_count_inflight(ctx, task, files);
9004 if (!inflight)
fcb323cc 9005 break;
f6edbabb 9006
9936c7c2 9007 io_uring_try_cancel_requests(ctx, task, files);
34343786
PB
9008
9009 if (ctx->sq_data)
9010 io_sq_thread_unpark(ctx->sq_data);
ca70f00b
PB
9011 prepare_to_wait(&task->io_uring->wait, &wait,
9012 TASK_UNINTERRUPTIBLE);
9013 if (inflight == io_uring_count_inflight(ctx, task, files))
9014 schedule();
c98de08c 9015 finish_wait(&task->io_uring->wait, &wait);
34343786
PB
9016 if (ctx->sq_data)
9017 io_sq_thread_park(ctx->sq_data);
fcb323cc 9018 }
44e728b8
PB
9019}
9020
d9d05217
PB
9021static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
9022{
d9d05217
PB
9023 mutex_lock(&ctx->uring_lock);
9024 ctx->sqo_dead = 1;
9025 mutex_unlock(&ctx->uring_lock);
9026
9027 /* make sure callers enter the ring to get error */
b4411616
PB
9028 if (ctx->rings)
9029 io_ring_set_wakeup_flag(ctx);
d9d05217
PB
9030}
9031
0f212204
JA
9032/*
9033 * We need to iteratively cancel requests, in case a request has dependent
9034 * hard links. These persist even for failure of cancelations, hence keep
9035 * looping until none are found.
9036 */
9037static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
9038 struct files_struct *files)
9039{
9040 struct task_struct *task = current;
9041
fdaf083c 9042 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
d9d05217 9043 io_disable_sqo_submit(ctx);
534ca6d6 9044 task = ctx->sq_data->thread;
fdaf083c
JA
9045 atomic_inc(&task->io_uring->in_idle);
9046 io_sq_thread_park(ctx->sq_data);
9047 }
0f212204 9048
df9923f9 9049 io_cancel_defer_files(ctx, task, files);
0f212204 9050
3a7efd1a 9051 io_uring_cancel_files(ctx, task, files);
b52fda00 9052 if (!files)
9936c7c2 9053 io_uring_try_cancel_requests(ctx, task, NULL);
fdaf083c
JA
9054
9055 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
9056 atomic_dec(&task->io_uring->in_idle);
9057 /*
9058 * If the files that are going away are the ones in the thread
9059 * identity, clear them out.
9060 */
9061 if (task->io_uring->identity->files == files)
9062 task->io_uring->identity->files = NULL;
9063 io_sq_thread_unpark(ctx->sq_data);
9064 }
0f212204
JA
9065}
9066
9067/*
9068 * Note that this task has used io_uring. We use it for cancelation purposes.
9069 */
fdaf083c 9070static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
0f212204 9071{
236434c3 9072 struct io_uring_task *tctx = current->io_uring;
a528b04e 9073 int ret;
236434c3
MWO
9074
9075 if (unlikely(!tctx)) {
0f212204
JA
9076 ret = io_uring_alloc_task_context(current);
9077 if (unlikely(ret))
9078 return ret;
236434c3 9079 tctx = current->io_uring;
0f212204 9080 }
236434c3
MWO
9081 if (tctx->last != file) {
9082 void *old = xa_load(&tctx->xa, (unsigned long)file);
0f212204 9083
236434c3 9084 if (!old) {
0f212204 9085 get_file(file);
a528b04e
PB
9086 ret = xa_err(xa_store(&tctx->xa, (unsigned long)file,
9087 file, GFP_KERNEL));
9088 if (ret) {
9089 fput(file);
9090 return ret;
9091 }
ecfc8492
PB
9092
9093 /* one and only SQPOLL file note, held by sqo_task */
9094 WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) &&
9095 current != ctx->sqo_task);
0f212204 9096 }
236434c3 9097 tctx->last = file;
0f212204
JA
9098 }
9099
fdaf083c
JA
9100 /*
9101 * This is race safe in that the task itself is doing this, hence it
9102 * cannot be going through the exit/cancel paths at the same time.
9103 * This cannot be modified while exit/cancel is running.
9104 */
9105 if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
9106 tctx->sqpoll = true;
9107
0f212204
JA
9108 return 0;
9109}
9110
9111/*
9112 * Remove this io_uring_file -> task mapping.
9113 */
9114static void io_uring_del_task_file(struct file *file)
9115{
9116 struct io_uring_task *tctx = current->io_uring;
0f212204
JA
9117
9118 if (tctx->last == file)
9119 tctx->last = NULL;
5e2ed8c4 9120 file = xa_erase(&tctx->xa, (unsigned long)file);
0f212204
JA
9121 if (file)
9122 fput(file);
9123}
9124
de7f1d9e
PB
9125static void io_uring_remove_task_files(struct io_uring_task *tctx)
9126{
9127 struct file *file;
9128 unsigned long index;
9129
9130 xa_for_each(&tctx->xa, index, file)
9131 io_uring_del_task_file(file);
9132}
9133
0f212204
JA
9134void __io_uring_files_cancel(struct files_struct *files)
9135{
9136 struct io_uring_task *tctx = current->io_uring;
ce765372
MWO
9137 struct file *file;
9138 unsigned long index;
0f212204
JA
9139
9140 /* make sure overflow events are dropped */
fdaf083c 9141 atomic_inc(&tctx->in_idle);
de7f1d9e
PB
9142 xa_for_each(&tctx->xa, index, file)
9143 io_uring_cancel_task_requests(file->private_data, files);
fdaf083c 9144 atomic_dec(&tctx->in_idle);
de7f1d9e
PB
9145
9146 if (files)
9147 io_uring_remove_task_files(tctx);
fdaf083c
JA
9148}
9149
9150static s64 tctx_inflight(struct io_uring_task *tctx)
9151{
0e9ddb39
PB
9152 return percpu_counter_sum(&tctx->inflight);
9153}
fdaf083c 9154
0e9ddb39
PB
9155static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
9156{
9157 struct io_uring_task *tctx;
9158 s64 inflight;
9159 DEFINE_WAIT(wait);
fdaf083c 9160
0e9ddb39
PB
9161 if (!ctx->sq_data)
9162 return;
9163 tctx = ctx->sq_data->thread->io_uring;
9164 io_disable_sqo_submit(ctx);
fdaf083c 9165
0e9ddb39
PB
9166 atomic_inc(&tctx->in_idle);
9167 do {
9168 /* read completions before cancelations */
9169 inflight = tctx_inflight(tctx);
9170 if (!inflight)
9171 break;
9172 io_uring_cancel_task_requests(ctx, NULL);
fdaf083c 9173
0e9ddb39
PB
9174 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9175 /*
9176 * If we've seen completions, retry without waiting. This
9177 * avoids a race where a completion comes in before we did
9178 * prepare_to_wait().
9179 */
9180 if (inflight == tctx_inflight(tctx))
9181 schedule();
9182 finish_wait(&tctx->wait, &wait);
9183 } while (1);
9184 atomic_dec(&tctx->in_idle);
0f212204
JA
9185}
9186
0f212204
JA
9187/*
9188 * Find any io_uring fd that this task has registered or done IO on, and cancel
9189 * requests.
9190 */
9191void __io_uring_task_cancel(void)
9192{
9193 struct io_uring_task *tctx = current->io_uring;
9194 DEFINE_WAIT(wait);
d8a6df10 9195 s64 inflight;
0f212204
JA
9196
9197 /* make sure overflow events are dropped */
fdaf083c 9198 atomic_inc(&tctx->in_idle);
0f212204 9199
0b5cd6c3 9200 /* trigger io_disable_sqo_submit() */
0e9ddb39
PB
9201 if (tctx->sqpoll) {
9202 struct file *file;
9203 unsigned long index;
9204
9205 xa_for_each(&tctx->xa, index, file)
9206 io_uring_cancel_sqpoll(file->private_data);
9207 }
0b5cd6c3 9208
d8a6df10 9209 do {
0f212204 9210 /* read completions before cancelations */
fdaf083c 9211 inflight = tctx_inflight(tctx);
d8a6df10
JA
9212 if (!inflight)
9213 break;
0f212204
JA
9214 __io_uring_files_cancel(NULL);
9215
9216 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9217
9218 /*
a1bb3cd5
PB
9219 * If we've seen completions, retry without waiting. This
9220 * avoids a race where a completion comes in before we did
9221 * prepare_to_wait().
0f212204 9222 */
a1bb3cd5
PB
9223 if (inflight == tctx_inflight(tctx))
9224 schedule();
f57555ed 9225 finish_wait(&tctx->wait, &wait);
d8a6df10 9226 } while (1);
0f212204 9227
fdaf083c 9228 atomic_dec(&tctx->in_idle);
de7f1d9e
PB
9229
9230 io_uring_remove_task_files(tctx);
44e728b8
PB
9231}
9232
fcb323cc
JA
9233static int io_uring_flush(struct file *file, void *data)
9234{
6b5733eb 9235 struct io_uring_task *tctx = current->io_uring;
d9d05217 9236 struct io_ring_ctx *ctx = file->private_data;
6b5733eb 9237
41be53e9 9238 if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
84965ff8 9239 io_uring_cancel_task_requests(ctx, NULL);
41be53e9
JA
9240 io_req_caches_free(ctx, current);
9241 }
84965ff8 9242
6b5733eb 9243 if (!tctx)
4f793dc4
PB
9244 return 0;
9245
6b5733eb
PB
9246 /* we should have cancelled and erased it before PF_EXITING */
9247 WARN_ON_ONCE((current->flags & PF_EXITING) &&
9248 xa_load(&tctx->xa, (unsigned long)file));
9249
4f793dc4
PB
9250 /*
9251 * fput() is pending, will be 2 if the only other ref is our potential
9252 * task file note. If the task is exiting, drop regardless of count.
9253 */
6b5733eb
PB
9254 if (atomic_long_read(&file->f_count) != 2)
9255 return 0;
4f793dc4 9256
d9d05217
PB
9257 if (ctx->flags & IORING_SETUP_SQPOLL) {
9258 /* there is only one file note, which is owned by sqo_task */
4325cb49
PB
9259 WARN_ON_ONCE(ctx->sqo_task != current &&
9260 xa_load(&tctx->xa, (unsigned long)file));
9261 /* sqo_dead check is for when this happens after cancellation */
9262 WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
d9d05217
PB
9263 !xa_load(&tctx->xa, (unsigned long)file));
9264
9265 io_disable_sqo_submit(ctx);
9266 }
9267
9268 if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
9269 io_uring_del_task_file(file);
fcb323cc
JA
9270 return 0;
9271}
9272
6c5c240e
RP
9273static void *io_uring_validate_mmap_request(struct file *file,
9274 loff_t pgoff, size_t sz)
2b188cc1 9275{
2b188cc1 9276 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 9277 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
9278 struct page *page;
9279 void *ptr;
9280
9281 switch (offset) {
9282 case IORING_OFF_SQ_RING:
75b28aff
HV
9283 case IORING_OFF_CQ_RING:
9284 ptr = ctx->rings;
2b188cc1
JA
9285 break;
9286 case IORING_OFF_SQES:
9287 ptr = ctx->sq_sqes;
9288 break;
2b188cc1 9289 default:
6c5c240e 9290 return ERR_PTR(-EINVAL);
2b188cc1
JA
9291 }
9292
9293 page = virt_to_head_page(ptr);
a50b854e 9294 if (sz > page_size(page))
6c5c240e
RP
9295 return ERR_PTR(-EINVAL);
9296
9297 return ptr;
9298}
9299
9300#ifdef CONFIG_MMU
9301
9302static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9303{
9304 size_t sz = vma->vm_end - vma->vm_start;
9305 unsigned long pfn;
9306 void *ptr;
9307
9308 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9309 if (IS_ERR(ptr))
9310 return PTR_ERR(ptr);
2b188cc1
JA
9311
9312 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9313 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9314}
9315
6c5c240e
RP
9316#else /* !CONFIG_MMU */
9317
9318static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9319{
9320 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9321}
9322
9323static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9324{
9325 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9326}
9327
9328static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9329 unsigned long addr, unsigned long len,
9330 unsigned long pgoff, unsigned long flags)
9331{
9332 void *ptr;
9333
9334 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9335 if (IS_ERR(ptr))
9336 return PTR_ERR(ptr);
9337
9338 return (unsigned long) ptr;
9339}
9340
9341#endif /* !CONFIG_MMU */
9342
d9d05217 9343static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
90554200 9344{
d9d05217 9345 int ret = 0;
90554200
JA
9346 DEFINE_WAIT(wait);
9347
9348 do {
9349 if (!io_sqring_full(ctx))
9350 break;
9351
9352 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9353
d9d05217
PB
9354 if (unlikely(ctx->sqo_dead)) {
9355 ret = -EOWNERDEAD;
9356 goto out;
9357 }
9358
90554200
JA
9359 if (!io_sqring_full(ctx))
9360 break;
9361
9362 schedule();
9363 } while (!signal_pending(current));
9364
9365 finish_wait(&ctx->sqo_sq_wait, &wait);
d9d05217
PB
9366out:
9367 return ret;
90554200
JA
9368}
9369
c73ebb68
HX
9370static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9371 struct __kernel_timespec __user **ts,
9372 const sigset_t __user **sig)
9373{
9374 struct io_uring_getevents_arg arg;
9375
9376 /*
9377 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9378 * is just a pointer to the sigset_t.
9379 */
9380 if (!(flags & IORING_ENTER_EXT_ARG)) {
9381 *sig = (const sigset_t __user *) argp;
9382 *ts = NULL;
9383 return 0;
9384 }
9385
9386 /*
9387 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9388 * timespec and sigset_t pointers if good.
9389 */
9390 if (*argsz != sizeof(arg))
9391 return -EINVAL;
9392 if (copy_from_user(&arg, argp, sizeof(arg)))
9393 return -EFAULT;
9394 *sig = u64_to_user_ptr(arg.sigmask);
9395 *argsz = arg.sigmask_sz;
9396 *ts = u64_to_user_ptr(arg.ts);
9397 return 0;
9398}
9399
2b188cc1 9400SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
c73ebb68
HX
9401 u32, min_complete, u32, flags, const void __user *, argp,
9402 size_t, argsz)
2b188cc1
JA
9403{
9404 struct io_ring_ctx *ctx;
9405 long ret = -EBADF;
9406 int submitted = 0;
9407 struct fd f;
9408
4c6e277c 9409 io_run_task_work();
b41e9852 9410
90554200 9411 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
c73ebb68 9412 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))
2b188cc1
JA
9413 return -EINVAL;
9414
9415 f = fdget(fd);
9416 if (!f.file)
9417 return -EBADF;
9418
9419 ret = -EOPNOTSUPP;
9420 if (f.file->f_op != &io_uring_fops)
9421 goto out_fput;
9422
9423 ret = -ENXIO;
9424 ctx = f.file->private_data;
9425 if (!percpu_ref_tryget(&ctx->refs))
9426 goto out_fput;
9427
7e84e1c7
SG
9428 ret = -EBADFD;
9429 if (ctx->flags & IORING_SETUP_R_DISABLED)
9430 goto out;
9431
6c271ce2
JA
9432 /*
9433 * For SQ polling, the thread will do all submissions and completions.
9434 * Just return the requested submit count, and wake the thread if
9435 * we were asked to.
9436 */
b2a9eada 9437 ret = 0;
6c271ce2 9438 if (ctx->flags & IORING_SETUP_SQPOLL) {
6c503150 9439 io_cqring_overflow_flush(ctx, false, NULL, NULL);
89448c47 9440
d9d05217
PB
9441 ret = -EOWNERDEAD;
9442 if (unlikely(ctx->sqo_dead))
9443 goto out;
6c271ce2 9444 if (flags & IORING_ENTER_SQ_WAKEUP)
534ca6d6 9445 wake_up(&ctx->sq_data->wait);
d9d05217
PB
9446 if (flags & IORING_ENTER_SQ_WAIT) {
9447 ret = io_sqpoll_wait_sq(ctx);
9448 if (ret)
9449 goto out;
9450 }
6c271ce2 9451 submitted = to_submit;
b2a9eada 9452 } else if (to_submit) {
fdaf083c 9453 ret = io_uring_add_task_file(ctx, f.file);
0f212204
JA
9454 if (unlikely(ret))
9455 goto out;
2b188cc1 9456 mutex_lock(&ctx->uring_lock);
0f212204 9457 submitted = io_submit_sqes(ctx, to_submit);
2b188cc1 9458 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
9459
9460 if (submitted != to_submit)
9461 goto out;
2b188cc1
JA
9462 }
9463 if (flags & IORING_ENTER_GETEVENTS) {
c73ebb68
HX
9464 const sigset_t __user *sig;
9465 struct __kernel_timespec __user *ts;
9466
9467 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9468 if (unlikely(ret))
9469 goto out;
9470
2b188cc1
JA
9471 min_complete = min(min_complete, ctx->cq_entries);
9472
32b2244a
XW
9473 /*
9474 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9475 * space applications don't need to do io completion events
9476 * polling again, they can rely on io_sq_thread to do polling
9477 * work, which can reduce cpu usage and uring_lock contention.
9478 */
9479 if (ctx->flags & IORING_SETUP_IOPOLL &&
9480 !(ctx->flags & IORING_SETUP_SQPOLL)) {
7668b92a 9481 ret = io_iopoll_check(ctx, min_complete);
def596e9 9482 } else {
c73ebb68 9483 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
def596e9 9484 }
2b188cc1
JA
9485 }
9486
7c504e65 9487out:
6805b32e 9488 percpu_ref_put(&ctx->refs);
2b188cc1
JA
9489out_fput:
9490 fdput(f);
9491 return submitted ? submitted : ret;
9492}
9493
bebdb65e 9494#ifdef CONFIG_PROC_FS
87ce955b
JA
9495static int io_uring_show_cred(int id, void *p, void *data)
9496{
6b47ab81
JA
9497 struct io_identity *iod = p;
9498 const struct cred *cred = iod->creds;
87ce955b
JA
9499 struct seq_file *m = data;
9500 struct user_namespace *uns = seq_user_ns(m);
9501 struct group_info *gi;
9502 kernel_cap_t cap;
9503 unsigned __capi;
9504 int g;
9505
9506 seq_printf(m, "%5d\n", id);
9507 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9508 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9509 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9510 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9511 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9512 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9513 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9514 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9515 seq_puts(m, "\n\tGroups:\t");
9516 gi = cred->group_info;
9517 for (g = 0; g < gi->ngroups; g++) {
9518 seq_put_decimal_ull(m, g ? " " : "",
9519 from_kgid_munged(uns, gi->gid[g]));
9520 }
9521 seq_puts(m, "\n\tCapEff:\t");
9522 cap = cred->cap_effective;
9523 CAP_FOR_EACH_U32(__capi)
9524 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9525 seq_putc(m, '\n');
9526 return 0;
9527}
9528
9529static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9530{
dbbe9c64 9531 struct io_sq_data *sq = NULL;
fad8e0de 9532 bool has_lock;
87ce955b
JA
9533 int i;
9534
fad8e0de
JA
9535 /*
9536 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9537 * since fdinfo case grabs it in the opposite direction of normal use
9538 * cases. If we fail to get the lock, we just don't iterate any
9539 * structures that could be going away outside the io_uring mutex.
9540 */
9541 has_lock = mutex_trylock(&ctx->uring_lock);
9542
dbbe9c64
JQ
9543 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL))
9544 sq = ctx->sq_data;
9545
9546 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9547 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
87ce955b 9548 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
fad8e0de 9549 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
ea64ec02 9550 struct file *f = *io_fixed_file_slot(ctx->file_data, i);
87ce955b 9551
87ce955b
JA
9552 if (f)
9553 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9554 else
9555 seq_printf(m, "%5u: <none>\n", i);
9556 }
9557 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
fad8e0de 9558 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
87ce955b
JA
9559 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9560
9561 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9562 (unsigned int) buf->len);
9563 }
fad8e0de 9564 if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
87ce955b
JA
9565 seq_printf(m, "Personalities:\n");
9566 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
9567 }
d7718a9d
JA
9568 seq_printf(m, "PollList:\n");
9569 spin_lock_irq(&ctx->completion_lock);
9570 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9571 struct hlist_head *list = &ctx->cancel_hash[i];
9572 struct io_kiocb *req;
9573
9574 hlist_for_each_entry(req, list, hash_node)
9575 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9576 req->task->task_works != NULL);
9577 }
9578 spin_unlock_irq(&ctx->completion_lock);
fad8e0de
JA
9579 if (has_lock)
9580 mutex_unlock(&ctx->uring_lock);
87ce955b
JA
9581}
9582
9583static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9584{
9585 struct io_ring_ctx *ctx = f->private_data;
9586
9587 if (percpu_ref_tryget(&ctx->refs)) {
9588 __io_uring_show_fdinfo(ctx, m);
9589 percpu_ref_put(&ctx->refs);
9590 }
9591}
bebdb65e 9592#endif
87ce955b 9593
2b188cc1
JA
9594static const struct file_operations io_uring_fops = {
9595 .release = io_uring_release,
fcb323cc 9596 .flush = io_uring_flush,
2b188cc1 9597 .mmap = io_uring_mmap,
6c5c240e
RP
9598#ifndef CONFIG_MMU
9599 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9600 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9601#endif
2b188cc1
JA
9602 .poll = io_uring_poll,
9603 .fasync = io_uring_fasync,
bebdb65e 9604#ifdef CONFIG_PROC_FS
87ce955b 9605 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 9606#endif
2b188cc1
JA
9607};
9608
9609static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9610 struct io_uring_params *p)
9611{
75b28aff
HV
9612 struct io_rings *rings;
9613 size_t size, sq_array_offset;
2b188cc1 9614
bd740481
JA
9615 /* make sure these are sane, as we already accounted them */
9616 ctx->sq_entries = p->sq_entries;
9617 ctx->cq_entries = p->cq_entries;
9618
75b28aff
HV
9619 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9620 if (size == SIZE_MAX)
9621 return -EOVERFLOW;
9622
9623 rings = io_mem_alloc(size);
9624 if (!rings)
2b188cc1
JA
9625 return -ENOMEM;
9626
75b28aff
HV
9627 ctx->rings = rings;
9628 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9629 rings->sq_ring_mask = p->sq_entries - 1;
9630 rings->cq_ring_mask = p->cq_entries - 1;
9631 rings->sq_ring_entries = p->sq_entries;
9632 rings->cq_ring_entries = p->cq_entries;
9633 ctx->sq_mask = rings->sq_ring_mask;
9634 ctx->cq_mask = rings->cq_ring_mask;
2b188cc1
JA
9635
9636 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
9637 if (size == SIZE_MAX) {
9638 io_mem_free(ctx->rings);
9639 ctx->rings = NULL;
2b188cc1 9640 return -EOVERFLOW;
eb065d30 9641 }
2b188cc1
JA
9642
9643 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
9644 if (!ctx->sq_sqes) {
9645 io_mem_free(ctx->rings);
9646 ctx->rings = NULL;
2b188cc1 9647 return -ENOMEM;
eb065d30 9648 }
2b188cc1 9649
2b188cc1
JA
9650 return 0;
9651}
9652
9faadcc8
PB
9653static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9654{
9655 int ret, fd;
9656
9657 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9658 if (fd < 0)
9659 return fd;
9660
9661 ret = io_uring_add_task_file(ctx, file);
9662 if (ret) {
9663 put_unused_fd(fd);
9664 return ret;
9665 }
9666 fd_install(fd, file);
9667 return fd;
9668}
9669
2b188cc1
JA
9670/*
9671 * Allocate an anonymous fd, this is what constitutes the application
9672 * visible backing of an io_uring instance. The application mmaps this
9673 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9674 * we have to tie this fd to a socket for file garbage collection purposes.
9675 */
9faadcc8 9676static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
2b188cc1
JA
9677{
9678 struct file *file;
9faadcc8 9679#if defined(CONFIG_UNIX)
2b188cc1
JA
9680 int ret;
9681
2b188cc1
JA
9682 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9683 &ctx->ring_sock);
9684 if (ret)
9faadcc8 9685 return ERR_PTR(ret);
2b188cc1
JA
9686#endif
9687
2b188cc1
JA
9688 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9689 O_RDWR | O_CLOEXEC);
2b188cc1 9690#if defined(CONFIG_UNIX)
9faadcc8
PB
9691 if (IS_ERR(file)) {
9692 sock_release(ctx->ring_sock);
9693 ctx->ring_sock = NULL;
9694 } else {
9695 ctx->ring_sock->file = file;
0f212204 9696 }
2b188cc1 9697#endif
9faadcc8 9698 return file;
2b188cc1
JA
9699}
9700
7f13657d
XW
9701static int io_uring_create(unsigned entries, struct io_uring_params *p,
9702 struct io_uring_params __user *params)
2b188cc1
JA
9703{
9704 struct user_struct *user = NULL;
9705 struct io_ring_ctx *ctx;
9faadcc8 9706 struct file *file;
2b188cc1
JA
9707 int ret;
9708
8110c1a6 9709 if (!entries)
2b188cc1 9710 return -EINVAL;
8110c1a6
JA
9711 if (entries > IORING_MAX_ENTRIES) {
9712 if (!(p->flags & IORING_SETUP_CLAMP))
9713 return -EINVAL;
9714 entries = IORING_MAX_ENTRIES;
9715 }
2b188cc1
JA
9716
9717 /*
9718 * Use twice as many entries for the CQ ring. It's possible for the
9719 * application to drive a higher depth than the size of the SQ ring,
9720 * since the sqes are only used at submission time. This allows for
33a107f0
JA
9721 * some flexibility in overcommitting a bit. If the application has
9722 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9723 * of CQ ring entries manually.
2b188cc1
JA
9724 */
9725 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
9726 if (p->flags & IORING_SETUP_CQSIZE) {
9727 /*
9728 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9729 * to a power-of-two, if it isn't already. We do NOT impose
9730 * any cq vs sq ring sizing.
9731 */
eb2667b3 9732 if (!p->cq_entries)
33a107f0 9733 return -EINVAL;
8110c1a6
JA
9734 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9735 if (!(p->flags & IORING_SETUP_CLAMP))
9736 return -EINVAL;
9737 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9738 }
eb2667b3
JQ
9739 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9740 if (p->cq_entries < p->sq_entries)
9741 return -EINVAL;
33a107f0
JA
9742 } else {
9743 p->cq_entries = 2 * p->sq_entries;
9744 }
2b188cc1
JA
9745
9746 user = get_uid(current_user());
2b188cc1
JA
9747
9748 ctx = io_ring_ctx_alloc(p);
9749 if (!ctx) {
2b188cc1
JA
9750 free_uid(user);
9751 return -ENOMEM;
9752 }
9753 ctx->compat = in_compat_syscall();
26bfa89e 9754 ctx->limit_mem = !capable(CAP_IPC_LOCK);
2b188cc1 9755 ctx->user = user;
0b8c0ec7 9756 ctx->creds = get_current_cred();
4ea33a97
JA
9757#ifdef CONFIG_AUDIT
9758 ctx->loginuid = current->loginuid;
9759 ctx->sessionid = current->sessionid;
9760#endif
2aede0e4
JA
9761 ctx->sqo_task = get_task_struct(current);
9762
9763 /*
9764 * This is just grabbed for accounting purposes. When a process exits,
9765 * the mm is exited and dropped before the files, hence we need to hang
9766 * on to this mm purely for the purposes of being able to unaccount
9767 * memory (locked/pinned vm). It's not used for anything else.
9768 */
6b7898eb 9769 mmgrab(current->mm);
2aede0e4 9770 ctx->mm_account = current->mm;
6b7898eb 9771
91d8f519
DZ
9772#ifdef CONFIG_BLK_CGROUP
9773 /*
9774 * The sq thread will belong to the original cgroup it was inited in.
9775 * If the cgroup goes offline (e.g. disabling the io controller), then
9776 * issued bios will be associated with the closest cgroup later in the
9777 * block layer.
9778 */
9779 rcu_read_lock();
9780 ctx->sqo_blkcg_css = blkcg_css();
9781 ret = css_tryget_online(ctx->sqo_blkcg_css);
9782 rcu_read_unlock();
9783 if (!ret) {
9784 /* don't init against a dying cgroup, have the user try again */
9785 ctx->sqo_blkcg_css = NULL;
9786 ret = -ENODEV;
9787 goto err;
9788 }
9789#endif
2b188cc1
JA
9790 ret = io_allocate_scq_urings(ctx, p);
9791 if (ret)
9792 goto err;
9793
7e84e1c7 9794 ret = io_sq_offload_create(ctx, p);
2b188cc1
JA
9795 if (ret)
9796 goto err;
9797
7e84e1c7
SG
9798 if (!(p->flags & IORING_SETUP_R_DISABLED))
9799 io_sq_offload_start(ctx);
9800
2b188cc1 9801 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
9802 p->sq_off.head = offsetof(struct io_rings, sq.head);
9803 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9804 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9805 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9806 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9807 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9808 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
9809
9810 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
9811 p->cq_off.head = offsetof(struct io_rings, cq.head);
9812 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9813 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9814 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9815 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9816 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 9817 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 9818
7f13657d
XW
9819 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9820 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
5769a351 9821 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
c73ebb68
HX
9822 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
9823 IORING_FEAT_EXT_ARG;
7f13657d
XW
9824
9825 if (copy_to_user(params, p, sizeof(*p))) {
9826 ret = -EFAULT;
9827 goto err;
9828 }
d1719f70 9829
9faadcc8
PB
9830 file = io_uring_get_file(ctx);
9831 if (IS_ERR(file)) {
9832 ret = PTR_ERR(file);
9833 goto err;
9834 }
9835
044c1ab3
JA
9836 /*
9837 * Install ring fd as the very last thing, so we don't risk someone
9838 * having closed it before we finish setup
9839 */
9faadcc8
PB
9840 ret = io_uring_install_fd(ctx, file);
9841 if (ret < 0) {
06585c49 9842 io_disable_sqo_submit(ctx);
9faadcc8
PB
9843 /* fput will clean it up */
9844 fput(file);
9845 return ret;
9846 }
044c1ab3 9847
c826bd7a 9848 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
9849 return ret;
9850err:
d9d05217 9851 io_disable_sqo_submit(ctx);
2b188cc1
JA
9852 io_ring_ctx_wait_and_kill(ctx);
9853 return ret;
9854}
9855
9856/*
9857 * Sets up an aio uring context, and returns the fd. Applications asks for a
9858 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9859 * params structure passed in.
9860 */
9861static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9862{
9863 struct io_uring_params p;
2b188cc1
JA
9864 int i;
9865
9866 if (copy_from_user(&p, params, sizeof(p)))
9867 return -EFAULT;
9868 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9869 if (p.resv[i])
9870 return -EINVAL;
9871 }
9872
6c271ce2 9873 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 9874 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
7e84e1c7
SG
9875 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9876 IORING_SETUP_R_DISABLED))
2b188cc1
JA
9877 return -EINVAL;
9878
7f13657d 9879 return io_uring_create(entries, &p, params);
2b188cc1
JA
9880}
9881
9882SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9883 struct io_uring_params __user *, params)
9884{
9885 return io_uring_setup(entries, params);
9886}
9887
66f4af93
JA
9888static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9889{
9890 struct io_uring_probe *p;
9891 size_t size;
9892 int i, ret;
9893
9894 size = struct_size(p, ops, nr_args);
9895 if (size == SIZE_MAX)
9896 return -EOVERFLOW;
9897 p = kzalloc(size, GFP_KERNEL);
9898 if (!p)
9899 return -ENOMEM;
9900
9901 ret = -EFAULT;
9902 if (copy_from_user(p, arg, size))
9903 goto out;
9904 ret = -EINVAL;
9905 if (memchr_inv(p, 0, size))
9906 goto out;
9907
9908 p->last_op = IORING_OP_LAST - 1;
9909 if (nr_args > IORING_OP_LAST)
9910 nr_args = IORING_OP_LAST;
9911
9912 for (i = 0; i < nr_args; i++) {
9913 p->ops[i].op = i;
9914 if (!io_op_defs[i].not_supported)
9915 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9916 }
9917 p->ops_len = i;
9918
9919 ret = 0;
9920 if (copy_to_user(arg, p, size))
9921 ret = -EFAULT;
9922out:
9923 kfree(p);
9924 return ret;
9925}
9926
071698e1
JA
9927static int io_register_personality(struct io_ring_ctx *ctx)
9928{
1e6fa521
JA
9929 struct io_identity *id;
9930 int ret;
071698e1 9931
1e6fa521
JA
9932 id = kmalloc(sizeof(*id), GFP_KERNEL);
9933 if (unlikely(!id))
9934 return -ENOMEM;
9935
9936 io_init_identity(id);
9937 id->creds = get_current_cred();
9938
9939 ret = idr_alloc_cyclic(&ctx->personality_idr, id, 1, USHRT_MAX, GFP_KERNEL);
9940 if (ret < 0) {
9941 put_cred(id->creds);
9942 kfree(id);
9943 }
9944 return ret;
071698e1
JA
9945}
9946
21b55dbc
SG
9947static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9948 unsigned int nr_args)
9949{
9950 struct io_uring_restriction *res;
9951 size_t size;
9952 int i, ret;
9953
7e84e1c7
SG
9954 /* Restrictions allowed only if rings started disabled */
9955 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9956 return -EBADFD;
9957
21b55dbc 9958 /* We allow only a single restrictions registration */
7e84e1c7 9959 if (ctx->restrictions.registered)
21b55dbc
SG
9960 return -EBUSY;
9961
9962 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9963 return -EINVAL;
9964
9965 size = array_size(nr_args, sizeof(*res));
9966 if (size == SIZE_MAX)
9967 return -EOVERFLOW;
9968
9969 res = memdup_user(arg, size);
9970 if (IS_ERR(res))
9971 return PTR_ERR(res);
9972
9973 ret = 0;
9974
9975 for (i = 0; i < nr_args; i++) {
9976 switch (res[i].opcode) {
9977 case IORING_RESTRICTION_REGISTER_OP:
9978 if (res[i].register_op >= IORING_REGISTER_LAST) {
9979 ret = -EINVAL;
9980 goto out;
9981 }
9982
9983 __set_bit(res[i].register_op,
9984 ctx->restrictions.register_op);
9985 break;
9986 case IORING_RESTRICTION_SQE_OP:
9987 if (res[i].sqe_op >= IORING_OP_LAST) {
9988 ret = -EINVAL;
9989 goto out;
9990 }
9991
9992 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9993 break;
9994 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9995 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9996 break;
9997 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9998 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9999 break;
10000 default:
10001 ret = -EINVAL;
10002 goto out;
10003 }
10004 }
10005
10006out:
10007 /* Reset all restrictions if an error happened */
10008 if (ret != 0)
10009 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
10010 else
7e84e1c7 10011 ctx->restrictions.registered = true;
21b55dbc
SG
10012
10013 kfree(res);
10014 return ret;
10015}
10016
7e84e1c7
SG
10017static int io_register_enable_rings(struct io_ring_ctx *ctx)
10018{
10019 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
10020 return -EBADFD;
10021
10022 if (ctx->restrictions.registered)
10023 ctx->restricted = 1;
10024
10025 ctx->flags &= ~IORING_SETUP_R_DISABLED;
10026
10027 io_sq_offload_start(ctx);
10028
10029 return 0;
10030}
10031
071698e1
JA
10032static bool io_register_op_must_quiesce(int op)
10033{
10034 switch (op) {
10035 case IORING_UNREGISTER_FILES:
10036 case IORING_REGISTER_FILES_UPDATE:
10037 case IORING_REGISTER_PROBE:
10038 case IORING_REGISTER_PERSONALITY:
10039 case IORING_UNREGISTER_PERSONALITY:
10040 return false;
10041 default:
10042 return true;
10043 }
10044}
10045
edafccee
JA
10046static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
10047 void __user *arg, unsigned nr_args)
b19062a5
JA
10048 __releases(ctx->uring_lock)
10049 __acquires(ctx->uring_lock)
edafccee
JA
10050{
10051 int ret;
10052
35fa71a0
JA
10053 /*
10054 * We're inside the ring mutex, if the ref is already dying, then
10055 * someone else killed the ctx or is already going through
10056 * io_uring_register().
10057 */
10058 if (percpu_ref_is_dying(&ctx->refs))
10059 return -ENXIO;
10060
071698e1 10061 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 10062 percpu_ref_kill(&ctx->refs);
b19062a5 10063
05f3fb3c
JA
10064 /*
10065 * Drop uring mutex before waiting for references to exit. If
10066 * another thread is currently inside io_uring_enter() it might
10067 * need to grab the uring_lock to make progress. If we hold it
10068 * here across the drain wait, then we can deadlock. It's safe
10069 * to drop the mutex here, since no new references will come in
10070 * after we've killed the percpu ref.
10071 */
10072 mutex_unlock(&ctx->uring_lock);
af9c1a44
JA
10073 do {
10074 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10075 if (!ret)
10076 break;
ed6930c9
JA
10077 ret = io_run_task_work_sig();
10078 if (ret < 0)
10079 break;
af9c1a44
JA
10080 } while (1);
10081
05f3fb3c 10082 mutex_lock(&ctx->uring_lock);
af9c1a44 10083
c150368b
JA
10084 if (ret) {
10085 percpu_ref_resurrect(&ctx->refs);
21b55dbc
SG
10086 goto out_quiesce;
10087 }
10088 }
10089
10090 if (ctx->restricted) {
10091 if (opcode >= IORING_REGISTER_LAST) {
10092 ret = -EINVAL;
10093 goto out;
10094 }
10095
10096 if (!test_bit(opcode, ctx->restrictions.register_op)) {
10097 ret = -EACCES;
c150368b
JA
10098 goto out;
10099 }
05f3fb3c 10100 }
edafccee
JA
10101
10102 switch (opcode) {
10103 case IORING_REGISTER_BUFFERS:
0a96bbe4 10104 ret = io_sqe_buffers_register(ctx, arg, nr_args);
edafccee
JA
10105 break;
10106 case IORING_UNREGISTER_BUFFERS:
10107 ret = -EINVAL;
10108 if (arg || nr_args)
10109 break;
0a96bbe4 10110 ret = io_sqe_buffers_unregister(ctx);
edafccee 10111 break;
6b06314c
JA
10112 case IORING_REGISTER_FILES:
10113 ret = io_sqe_files_register(ctx, arg, nr_args);
10114 break;
10115 case IORING_UNREGISTER_FILES:
10116 ret = -EINVAL;
10117 if (arg || nr_args)
10118 break;
10119 ret = io_sqe_files_unregister(ctx);
10120 break;
c3a31e60
JA
10121 case IORING_REGISTER_FILES_UPDATE:
10122 ret = io_sqe_files_update(ctx, arg, nr_args);
10123 break;
9b402849 10124 case IORING_REGISTER_EVENTFD:
f2842ab5 10125 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
10126 ret = -EINVAL;
10127 if (nr_args != 1)
10128 break;
10129 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
10130 if (ret)
10131 break;
10132 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
10133 ctx->eventfd_async = 1;
10134 else
10135 ctx->eventfd_async = 0;
9b402849
JA
10136 break;
10137 case IORING_UNREGISTER_EVENTFD:
10138 ret = -EINVAL;
10139 if (arg || nr_args)
10140 break;
10141 ret = io_eventfd_unregister(ctx);
10142 break;
66f4af93
JA
10143 case IORING_REGISTER_PROBE:
10144 ret = -EINVAL;
10145 if (!arg || nr_args > 256)
10146 break;
10147 ret = io_probe(ctx, arg, nr_args);
10148 break;
071698e1
JA
10149 case IORING_REGISTER_PERSONALITY:
10150 ret = -EINVAL;
10151 if (arg || nr_args)
10152 break;
10153 ret = io_register_personality(ctx);
10154 break;
10155 case IORING_UNREGISTER_PERSONALITY:
10156 ret = -EINVAL;
10157 if (arg)
10158 break;
10159 ret = io_unregister_personality(ctx, nr_args);
10160 break;
7e84e1c7
SG
10161 case IORING_REGISTER_ENABLE_RINGS:
10162 ret = -EINVAL;
10163 if (arg || nr_args)
10164 break;
10165 ret = io_register_enable_rings(ctx);
10166 break;
21b55dbc
SG
10167 case IORING_REGISTER_RESTRICTIONS:
10168 ret = io_register_restrictions(ctx, arg, nr_args);
10169 break;
edafccee
JA
10170 default:
10171 ret = -EINVAL;
10172 break;
10173 }
10174
21b55dbc 10175out:
071698e1 10176 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 10177 /* bring the ctx back to life */
05f3fb3c 10178 percpu_ref_reinit(&ctx->refs);
21b55dbc 10179out_quiesce:
0f158b4c 10180 reinit_completion(&ctx->ref_comp);
05f3fb3c 10181 }
edafccee
JA
10182 return ret;
10183}
10184
10185SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
10186 void __user *, arg, unsigned int, nr_args)
10187{
10188 struct io_ring_ctx *ctx;
10189 long ret = -EBADF;
10190 struct fd f;
10191
10192 f = fdget(fd);
10193 if (!f.file)
10194 return -EBADF;
10195
10196 ret = -EOPNOTSUPP;
10197 if (f.file->f_op != &io_uring_fops)
10198 goto out_fput;
10199
10200 ctx = f.file->private_data;
10201
10202 mutex_lock(&ctx->uring_lock);
10203 ret = __io_uring_register(ctx, opcode, arg, nr_args);
10204 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
10205 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
10206 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
10207out_fput:
10208 fdput(f);
10209 return ret;
10210}
10211
2b188cc1
JA
10212static int __init io_uring_init(void)
10213{
d7f62e82
SM
10214#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10215 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10216 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10217} while (0)
10218
10219#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10220 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10221 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10222 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10223 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10224 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10225 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10226 BUILD_BUG_SQE_ELEM(8, __u64, off);
10227 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10228 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 10229 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
10230 BUILD_BUG_SQE_ELEM(24, __u32, len);
10231 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10232 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
10233 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
10234 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
5769a351
JX
10235 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
10236 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
d7f62e82
SM
10237 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10238 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10239 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10240 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10241 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10242 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10243 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10244 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 10245 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
10246 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10247 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
10248 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 10249 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
d7f62e82 10250
d3656344 10251 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
84557871 10252 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
91f245d5
JA
10253 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
10254 SLAB_ACCOUNT);
2b188cc1
JA
10255 return 0;
10256};
10257__initcall(io_uring_init);