]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/io_uring.c
io_uring: process task work in io_uring_register()
[mirror_ubuntu-jammy-kernel.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
2b188cc1
JA
58#include <linux/percpu.h>
59#include <linux/slab.h>
6c271ce2 60#include <linux/kthread.h>
2b188cc1 61#include <linux/blkdev.h>
edafccee 62#include <linux/bvec.h>
2b188cc1
JA
63#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
6b06314c 66#include <net/scm.h>
2b188cc1
JA
67#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
edafccee
JA
71#include <linux/sizes.h>
72#include <linux/hugetlb.h>
aa4c3967 73#include <linux/highmem.h>
15b71abe
JA
74#include <linux/namei.h>
75#include <linux/fsnotify.h>
4840e418 76#include <linux/fadvise.h>
3e4827b0 77#include <linux/eventpoll.h>
ff002b30 78#include <linux/fs_struct.h>
7d67af2c 79#include <linux/splice.h>
b41e9852 80#include <linux/task_work.h>
bcf5a063 81#include <linux/pagemap.h>
0f212204 82#include <linux/io_uring.h>
91d8f519 83#include <linux/blk-cgroup.h>
2b188cc1 84
c826bd7a
DD
85#define CREATE_TRACE_POINTS
86#include <trace/events/io_uring.h>
87
2b188cc1
JA
88#include <uapi/linux/io_uring.h>
89
90#include "internal.h"
561fb04a 91#include "io-wq.h"
2b188cc1 92
5277deaa 93#define IORING_MAX_ENTRIES 32768
33a107f0 94#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
95
96/*
97 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
98 */
99#define IORING_FILE_TABLE_SHIFT 9
100#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
101#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
102#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
21b55dbc
SG
103#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
104 IORING_REGISTER_LAST + IORING_OP_LAST)
2b188cc1
JA
105
106struct io_uring {
107 u32 head ____cacheline_aligned_in_smp;
108 u32 tail ____cacheline_aligned_in_smp;
109};
110
1e84b97b 111/*
75b28aff
HV
112 * This data is shared with the application through the mmap at offsets
113 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
114 *
115 * The offsets to the member fields are published through struct
116 * io_sqring_offsets when calling io_uring_setup.
117 */
75b28aff 118struct io_rings {
1e84b97b
SB
119 /*
120 * Head and tail offsets into the ring; the offsets need to be
121 * masked to get valid indices.
122 *
75b28aff
HV
123 * The kernel controls head of the sq ring and the tail of the cq ring,
124 * and the application controls tail of the sq ring and the head of the
125 * cq ring.
1e84b97b 126 */
75b28aff 127 struct io_uring sq, cq;
1e84b97b 128 /*
75b28aff 129 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
130 * ring_entries - 1)
131 */
75b28aff
HV
132 u32 sq_ring_mask, cq_ring_mask;
133 /* Ring sizes (constant, power of 2) */
134 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
135 /*
136 * Number of invalid entries dropped by the kernel due to
137 * invalid index stored in array
138 *
139 * Written by the kernel, shouldn't be modified by the
140 * application (i.e. get number of "new events" by comparing to
141 * cached value).
142 *
143 * After a new SQ head value was read by the application this
144 * counter includes all submissions that were dropped reaching
145 * the new SQ head (and possibly more).
146 */
75b28aff 147 u32 sq_dropped;
1e84b97b 148 /*
0d9b5b3a 149 * Runtime SQ flags
1e84b97b
SB
150 *
151 * Written by the kernel, shouldn't be modified by the
152 * application.
153 *
154 * The application needs a full memory barrier before checking
155 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
156 */
75b28aff 157 u32 sq_flags;
0d9b5b3a
SG
158 /*
159 * Runtime CQ flags
160 *
161 * Written by the application, shouldn't be modified by the
162 * kernel.
163 */
164 u32 cq_flags;
1e84b97b
SB
165 /*
166 * Number of completion events lost because the queue was full;
167 * this should be avoided by the application by making sure
0b4295b5 168 * there are not more requests pending than there is space in
1e84b97b
SB
169 * the completion queue.
170 *
171 * Written by the kernel, shouldn't be modified by the
172 * application (i.e. get number of "new events" by comparing to
173 * cached value).
174 *
175 * As completion events come in out of order this counter is not
176 * ordered with any other data.
177 */
75b28aff 178 u32 cq_overflow;
1e84b97b
SB
179 /*
180 * Ring buffer of completion events.
181 *
182 * The kernel writes completion events fresh every time they are
183 * produced, so the application is allowed to modify pending
184 * entries.
185 */
75b28aff 186 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
187};
188
edafccee
JA
189struct io_mapped_ubuf {
190 u64 ubuf;
191 size_t len;
192 struct bio_vec *bvec;
193 unsigned int nr_bvecs;
de293938 194 unsigned long acct_pages;
edafccee
JA
195};
196
65e19f54
JA
197struct fixed_file_table {
198 struct file **files;
31b51510
JA
199};
200
05589553
XW
201struct fixed_file_ref_node {
202 struct percpu_ref refs;
203 struct list_head node;
204 struct list_head file_list;
205 struct fixed_file_data *file_data;
4a38aed2 206 struct llist_node llist;
05589553
XW
207};
208
05f3fb3c
JA
209struct fixed_file_data {
210 struct fixed_file_table *table;
211 struct io_ring_ctx *ctx;
212
05589553 213 struct percpu_ref *cur_refs;
05f3fb3c 214 struct percpu_ref refs;
05f3fb3c 215 struct completion done;
05589553
XW
216 struct list_head ref_list;
217 spinlock_t lock;
05f3fb3c
JA
218};
219
5a2e745d
JA
220struct io_buffer {
221 struct list_head list;
222 __u64 addr;
223 __s32 len;
224 __u16 bid;
225};
226
21b55dbc
SG
227struct io_restriction {
228 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
229 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
230 u8 sqe_flags_allowed;
231 u8 sqe_flags_required;
7e84e1c7 232 bool registered;
21b55dbc
SG
233};
234
534ca6d6
JA
235struct io_sq_data {
236 refcount_t refs;
69fb2131
JA
237 struct mutex lock;
238
239 /* ctx's that are using this sqd */
240 struct list_head ctx_list;
241 struct list_head ctx_new_list;
242 struct mutex ctx_lock;
243
534ca6d6
JA
244 struct task_struct *thread;
245 struct wait_queue_head wait;
246};
247
2b188cc1
JA
248struct io_ring_ctx {
249 struct {
250 struct percpu_ref refs;
251 } ____cacheline_aligned_in_smp;
252
253 struct {
254 unsigned int flags;
e1d85334 255 unsigned int compat: 1;
aad5d8da 256 unsigned int limit_mem: 1;
e1d85334
RD
257 unsigned int cq_overflow_flushed: 1;
258 unsigned int drain_next: 1;
259 unsigned int eventfd_async: 1;
21b55dbc 260 unsigned int restricted: 1;
2b188cc1 261
75b28aff
HV
262 /*
263 * Ring buffer of indices into array of io_uring_sqe, which is
264 * mmapped by the application using the IORING_OFF_SQES offset.
265 *
266 * This indirection could e.g. be used to assign fixed
267 * io_uring_sqe entries to operations and only submit them to
268 * the queue when needed.
269 *
270 * The kernel modifies neither the indices array nor the entries
271 * array.
272 */
273 u32 *sq_array;
2b188cc1
JA
274 unsigned cached_sq_head;
275 unsigned sq_entries;
276 unsigned sq_mask;
6c271ce2 277 unsigned sq_thread_idle;
498ccd9e 278 unsigned cached_sq_dropped;
206aefde 279 atomic_t cached_cq_overflow;
ad3eb2c8 280 unsigned long sq_check_overflow;
de0617e4
JA
281
282 struct list_head defer_list;
5262f567 283 struct list_head timeout_list;
1d7bb1d5 284 struct list_head cq_overflow_list;
fcb323cc
JA
285
286 wait_queue_head_t inflight_wait;
ad3eb2c8 287 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
288 } ____cacheline_aligned_in_smp;
289
206aefde
JA
290 struct io_rings *rings;
291
2b188cc1 292 /* IO offload */
561fb04a 293 struct io_wq *io_wq;
2aede0e4
JA
294
295 /*
296 * For SQPOLL usage - we hold a reference to the parent task, so we
297 * have access to the ->files
298 */
299 struct task_struct *sqo_task;
300
301 /* Only used for accounting purposes */
302 struct mm_struct *mm_account;
303
91d8f519
DZ
304#ifdef CONFIG_BLK_CGROUP
305 struct cgroup_subsys_state *sqo_blkcg_css;
306#endif
307
534ca6d6
JA
308 struct io_sq_data *sq_data; /* if using sq thread polling */
309
90554200 310 struct wait_queue_head sqo_sq_wait;
6a779382 311 struct wait_queue_entry sqo_wait_entry;
69fb2131 312 struct list_head sqd_list;
75b28aff 313
6b06314c
JA
314 /*
315 * If used, fixed file set. Writers must ensure that ->refs is dead,
316 * readers must ensure that ->refs is alive as long as the file* is
317 * used. Only updated through io_uring_register(2).
318 */
05f3fb3c 319 struct fixed_file_data *file_data;
6b06314c
JA
320 unsigned nr_user_files;
321
edafccee
JA
322 /* if used, fixed mapped user buffers */
323 unsigned nr_user_bufs;
324 struct io_mapped_ubuf *user_bufs;
325
2b188cc1
JA
326 struct user_struct *user;
327
0b8c0ec7 328 const struct cred *creds;
181e448d 329
0f158b4c
JA
330 struct completion ref_comp;
331 struct completion sq_thread_comp;
206aefde 332
0ddf92e8
JA
333 /* if all else fails... */
334 struct io_kiocb *fallback_req;
335
206aefde
JA
336#if defined(CONFIG_UNIX)
337 struct socket *ring_sock;
338#endif
339
5a2e745d
JA
340 struct idr io_buffer_idr;
341
071698e1
JA
342 struct idr personality_idr;
343
206aefde
JA
344 struct {
345 unsigned cached_cq_tail;
346 unsigned cq_entries;
347 unsigned cq_mask;
348 atomic_t cq_timeouts;
ad3eb2c8 349 unsigned long cq_check_overflow;
206aefde
JA
350 struct wait_queue_head cq_wait;
351 struct fasync_struct *cq_fasync;
352 struct eventfd_ctx *cq_ev_fd;
353 } ____cacheline_aligned_in_smp;
2b188cc1
JA
354
355 struct {
356 struct mutex uring_lock;
357 wait_queue_head_t wait;
358 } ____cacheline_aligned_in_smp;
359
360 struct {
361 spinlock_t completion_lock;
e94f141b 362
def596e9 363 /*
540e32a0 364 * ->iopoll_list is protected by the ctx->uring_lock for
def596e9
JA
365 * io_uring instances that don't use IORING_SETUP_SQPOLL.
366 * For SQPOLL, only the single threaded io_sq_thread() will
367 * manipulate the list, hence no extra locking is needed there.
368 */
540e32a0 369 struct list_head iopoll_list;
78076bb6
JA
370 struct hlist_head *cancel_hash;
371 unsigned cancel_hash_bits;
e94f141b 372 bool poll_multi_file;
31b51510 373
fcb323cc
JA
374 spinlock_t inflight_lock;
375 struct list_head inflight_list;
2b188cc1 376 } ____cacheline_aligned_in_smp;
85faa7b8 377
4a38aed2
JA
378 struct delayed_work file_put_work;
379 struct llist_head file_put_llist;
380
85faa7b8 381 struct work_struct exit_work;
21b55dbc 382 struct io_restriction restrictions;
2b188cc1
JA
383};
384
09bb8394
JA
385/*
386 * First field must be the file pointer in all the
387 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
388 */
221c5eb2
JA
389struct io_poll_iocb {
390 struct file *file;
0969e783
JA
391 union {
392 struct wait_queue_head *head;
393 u64 addr;
394 };
221c5eb2 395 __poll_t events;
8c838788 396 bool done;
221c5eb2 397 bool canceled;
392edb45 398 struct wait_queue_entry wait;
221c5eb2
JA
399};
400
b5dba59e
JA
401struct io_close {
402 struct file *file;
403 struct file *put_file;
404 int fd;
405};
406
ad8a48ac
JA
407struct io_timeout_data {
408 struct io_kiocb *req;
409 struct hrtimer timer;
410 struct timespec64 ts;
411 enum hrtimer_mode mode;
412};
413
8ed8d3c3
JA
414struct io_accept {
415 struct file *file;
416 struct sockaddr __user *addr;
417 int __user *addr_len;
418 int flags;
09952e3e 419 unsigned long nofile;
8ed8d3c3
JA
420};
421
422struct io_sync {
423 struct file *file;
424 loff_t len;
425 loff_t off;
426 int flags;
d63d1b5e 427 int mode;
8ed8d3c3
JA
428};
429
fbf23849
JA
430struct io_cancel {
431 struct file *file;
432 u64 addr;
433};
434
b29472ee
JA
435struct io_timeout {
436 struct file *file;
437 u64 addr;
438 int flags;
bfe68a22
PB
439 u32 off;
440 u32 target_seq;
135fcde8 441 struct list_head list;
b29472ee
JA
442};
443
9adbd45d
JA
444struct io_rw {
445 /* NOTE: kiocb has the file as the first member, so don't do it here */
446 struct kiocb kiocb;
447 u64 addr;
448 u64 len;
449};
450
3fbb51c1
JA
451struct io_connect {
452 struct file *file;
453 struct sockaddr __user *addr;
454 int addr_len;
455};
456
e47293fd
JA
457struct io_sr_msg {
458 struct file *file;
fddaface 459 union {
270a5940 460 struct user_msghdr __user *umsg;
fddaface
JA
461 void __user *buf;
462 };
e47293fd 463 int msg_flags;
bcda7baa 464 int bgid;
fddaface 465 size_t len;
bcda7baa 466 struct io_buffer *kbuf;
e47293fd
JA
467};
468
15b71abe
JA
469struct io_open {
470 struct file *file;
471 int dfd;
15b71abe 472 struct filename *filename;
c12cedf2 473 struct open_how how;
4022e7af 474 unsigned long nofile;
15b71abe
JA
475};
476
05f3fb3c
JA
477struct io_files_update {
478 struct file *file;
479 u64 arg;
480 u32 nr_args;
481 u32 offset;
482};
483
4840e418
JA
484struct io_fadvise {
485 struct file *file;
486 u64 offset;
487 u32 len;
488 u32 advice;
489};
490
c1ca757b
JA
491struct io_madvise {
492 struct file *file;
493 u64 addr;
494 u32 len;
495 u32 advice;
496};
497
3e4827b0
JA
498struct io_epoll {
499 struct file *file;
500 int epfd;
501 int op;
502 int fd;
503 struct epoll_event event;
e47293fd
JA
504};
505
7d67af2c
PB
506struct io_splice {
507 struct file *file_out;
508 struct file *file_in;
509 loff_t off_out;
510 loff_t off_in;
511 u64 len;
512 unsigned int flags;
513};
514
ddf0322d
JA
515struct io_provide_buf {
516 struct file *file;
517 __u64 addr;
518 __s32 len;
519 __u32 bgid;
520 __u16 nbufs;
521 __u16 bid;
522};
523
1d9e1288
BM
524struct io_statx {
525 struct file *file;
526 int dfd;
527 unsigned int mask;
528 unsigned int flags;
e62753e4 529 const char __user *filename;
1d9e1288
BM
530 struct statx __user *buffer;
531};
532
3ca405eb
PB
533struct io_completion {
534 struct file *file;
535 struct list_head list;
0f7e466b 536 int cflags;
3ca405eb
PB
537};
538
f499a021
JA
539struct io_async_connect {
540 struct sockaddr_storage address;
541};
542
03b1230c
JA
543struct io_async_msghdr {
544 struct iovec fast_iov[UIO_FASTIOV];
545 struct iovec *iov;
546 struct sockaddr __user *uaddr;
547 struct msghdr msg;
b537916c 548 struct sockaddr_storage addr;
03b1230c
JA
549};
550
f67676d1
JA
551struct io_async_rw {
552 struct iovec fast_iov[UIO_FASTIOV];
ff6165b2
JA
553 const struct iovec *free_iovec;
554 struct iov_iter iter;
227c0c96 555 size_t bytes_done;
bcf5a063 556 struct wait_page_queue wpq;
f67676d1
JA
557};
558
6b47ee6e
PB
559enum {
560 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
561 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
562 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
563 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
564 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 565 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
6b47ee6e 566
dea3b49c 567 REQ_F_LINK_HEAD_BIT,
6b47ee6e
PB
568 REQ_F_FAIL_LINK_BIT,
569 REQ_F_INFLIGHT_BIT,
570 REQ_F_CUR_POS_BIT,
571 REQ_F_NOWAIT_BIT,
6b47ee6e 572 REQ_F_LINK_TIMEOUT_BIT,
6b47ee6e 573 REQ_F_ISREG_BIT,
6b47ee6e 574 REQ_F_COMP_LOCKED_BIT,
99bc4c38 575 REQ_F_NEED_CLEANUP_BIT,
d7718a9d 576 REQ_F_POLLED_BIT,
bcda7baa 577 REQ_F_BUFFER_SELECTED_BIT,
5b0bbee4 578 REQ_F_NO_FILE_TABLE_BIT,
7cdaf587 579 REQ_F_WORK_INITIALIZED_BIT,
84557871
JA
580
581 /* not a real bit, just to check we're not overflowing the space */
582 __REQ_F_LAST_BIT,
6b47ee6e
PB
583};
584
585enum {
586 /* ctx owns file */
587 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
588 /* drain existing IO first */
589 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
590 /* linked sqes */
591 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
592 /* doesn't sever on completion < 0 */
593 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
594 /* IOSQE_ASYNC */
595 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
596 /* IOSQE_BUFFER_SELECT */
597 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
6b47ee6e 598
dea3b49c
PB
599 /* head of a link */
600 REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
6b47ee6e
PB
601 /* fail rest of links */
602 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
603 /* on inflight list */
604 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
605 /* read/write uses file position */
606 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
607 /* must not punt to workers */
608 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
6b47ee6e
PB
609 /* has linked timeout */
610 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
6b47ee6e
PB
611 /* regular file */
612 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
6b47ee6e
PB
613 /* completion under lock */
614 REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
99bc4c38
PB
615 /* needs cleanup */
616 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
d7718a9d
JA
617 /* already went through poll handler */
618 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
619 /* buffer already selected */
620 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
5b0bbee4
JA
621 /* doesn't need file table for this request */
622 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
7cdaf587
XW
623 /* io_wq_work is initialized */
624 REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
d7718a9d
JA
625};
626
627struct async_poll {
628 struct io_poll_iocb poll;
807abcb0 629 struct io_poll_iocb *double_poll;
6b47ee6e
PB
630};
631
09bb8394
JA
632/*
633 * NOTE! Each of the iocb union members has the file pointer
634 * as the first entry in their struct definition. So you can
635 * access the file pointer through any of the sub-structs,
636 * or directly as just 'ki_filp' in this struct.
637 */
2b188cc1 638struct io_kiocb {
221c5eb2 639 union {
09bb8394 640 struct file *file;
9adbd45d 641 struct io_rw rw;
221c5eb2 642 struct io_poll_iocb poll;
8ed8d3c3
JA
643 struct io_accept accept;
644 struct io_sync sync;
fbf23849 645 struct io_cancel cancel;
b29472ee 646 struct io_timeout timeout;
3fbb51c1 647 struct io_connect connect;
e47293fd 648 struct io_sr_msg sr_msg;
15b71abe 649 struct io_open open;
b5dba59e 650 struct io_close close;
05f3fb3c 651 struct io_files_update files_update;
4840e418 652 struct io_fadvise fadvise;
c1ca757b 653 struct io_madvise madvise;
3e4827b0 654 struct io_epoll epoll;
7d67af2c 655 struct io_splice splice;
ddf0322d 656 struct io_provide_buf pbuf;
1d9e1288 657 struct io_statx statx;
3ca405eb
PB
658 /* use only after cleaning per-op data, see io_clean_op() */
659 struct io_completion compl;
221c5eb2 660 };
2b188cc1 661
e8c2bc1f
JA
662 /* opcode allocated if it needs to store data for async defer */
663 void *async_data;
d625c6ee 664 u8 opcode;
65a6543d
XW
665 /* polled IO has completed */
666 u8 iopoll_completed;
2b188cc1 667
4f4eeba8 668 u16 buf_index;
9cf7c104 669 u32 result;
4f4eeba8 670
010e8e6b
PB
671 struct io_ring_ctx *ctx;
672 unsigned int flags;
673 refcount_t refs;
674 struct task_struct *task;
675 u64 user_data;
d7718a9d 676
010e8e6b 677 struct list_head link_list;
fcb323cc 678
d21ffe7e
PB
679 /*
680 * 1. used with ctx->iopoll_list with reads/writes
681 * 2. to track reqs with ->files (see io_op_def::file_table)
682 */
010e8e6b
PB
683 struct list_head inflight_entry;
684
685 struct percpu_ref *fixed_file_refs;
686 struct callback_head task_work;
687 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
688 struct hlist_node hash_node;
689 struct async_poll *apoll;
690 struct io_wq_work work;
2b188cc1 691};
05589553 692
27dc8338
PB
693struct io_defer_entry {
694 struct list_head list;
695 struct io_kiocb *req;
9cf7c104 696 u32 seq;
2b188cc1
JA
697};
698
def596e9 699#define IO_IOPOLL_BATCH 8
2b188cc1 700
013538bd
JA
701struct io_comp_state {
702 unsigned int nr;
703 struct list_head list;
704 struct io_ring_ctx *ctx;
705};
706
9a56a232
JA
707struct io_submit_state {
708 struct blk_plug plug;
709
2579f913
JA
710 /*
711 * io_kiocb alloc cache
712 */
713 void *reqs[IO_IOPOLL_BATCH];
6c8a3134 714 unsigned int free_reqs;
2579f913 715
013538bd
JA
716 /*
717 * Batch completion logic
718 */
719 struct io_comp_state comp;
720
9a56a232
JA
721 /*
722 * File reference cache
723 */
724 struct file *file;
725 unsigned int fd;
726 unsigned int has_refs;
9a56a232
JA
727 unsigned int ios_left;
728};
729
d3656344 730struct io_op_def {
d3656344
JA
731 /* needs current->mm setup, does mm access */
732 unsigned needs_mm : 1;
733 /* needs req->file assigned */
734 unsigned needs_file : 1;
fd2206e4
JA
735 /* don't fail if file grab fails */
736 unsigned needs_file_no_error : 1;
d3656344
JA
737 /* hash wq insertion if file is a regular file */
738 unsigned hash_reg_file : 1;
739 /* unbound wq insertion if file is a non-regular file */
740 unsigned unbound_nonreg_file : 1;
66f4af93
JA
741 /* opcode is not supported by this kernel */
742 unsigned not_supported : 1;
f86cd20c
JA
743 /* needs file table */
744 unsigned file_table : 1;
ff002b30
JA
745 /* needs ->fs */
746 unsigned needs_fs : 1;
8a72758c
JA
747 /* set if opcode supports polled "wait" */
748 unsigned pollin : 1;
749 unsigned pollout : 1;
bcda7baa
JA
750 /* op supports buffer selection */
751 unsigned buffer_select : 1;
e8c2bc1f 752 /* needs rlimit(RLIMIT_FSIZE) assigned */
57f1a649 753 unsigned needs_fsize : 1;
e8c2bc1f
JA
754 /* must always have async data allocated */
755 unsigned needs_async_data : 1;
91d8f519
DZ
756 /* needs blkcg context, issues async io potentially */
757 unsigned needs_blkcg : 1;
e8c2bc1f
JA
758 /* size of async data needed, if any */
759 unsigned short async_size;
d3656344
JA
760};
761
738277ad 762static const struct io_op_def io_op_defs[] __read_mostly = {
0463b6c5
PB
763 [IORING_OP_NOP] = {},
764 [IORING_OP_READV] = {
d3656344
JA
765 .needs_mm = 1,
766 .needs_file = 1,
767 .unbound_nonreg_file = 1,
8a72758c 768 .pollin = 1,
4d954c25 769 .buffer_select = 1,
e8c2bc1f 770 .needs_async_data = 1,
91d8f519 771 .needs_blkcg = 1,
e8c2bc1f 772 .async_size = sizeof(struct io_async_rw),
d3656344 773 },
0463b6c5 774 [IORING_OP_WRITEV] = {
d3656344
JA
775 .needs_mm = 1,
776 .needs_file = 1,
777 .hash_reg_file = 1,
778 .unbound_nonreg_file = 1,
8a72758c 779 .pollout = 1,
57f1a649 780 .needs_fsize = 1,
e8c2bc1f 781 .needs_async_data = 1,
91d8f519 782 .needs_blkcg = 1,
e8c2bc1f 783 .async_size = sizeof(struct io_async_rw),
d3656344 784 },
0463b6c5 785 [IORING_OP_FSYNC] = {
d3656344 786 .needs_file = 1,
91d8f519 787 .needs_blkcg = 1,
d3656344 788 },
0463b6c5 789 [IORING_OP_READ_FIXED] = {
d3656344
JA
790 .needs_file = 1,
791 .unbound_nonreg_file = 1,
8a72758c 792 .pollin = 1,
91d8f519 793 .needs_blkcg = 1,
e8c2bc1f 794 .async_size = sizeof(struct io_async_rw),
d3656344 795 },
0463b6c5 796 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
797 .needs_file = 1,
798 .hash_reg_file = 1,
799 .unbound_nonreg_file = 1,
8a72758c 800 .pollout = 1,
57f1a649 801 .needs_fsize = 1,
91d8f519 802 .needs_blkcg = 1,
e8c2bc1f 803 .async_size = sizeof(struct io_async_rw),
d3656344 804 },
0463b6c5 805 [IORING_OP_POLL_ADD] = {
d3656344
JA
806 .needs_file = 1,
807 .unbound_nonreg_file = 1,
808 },
0463b6c5
PB
809 [IORING_OP_POLL_REMOVE] = {},
810 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344 811 .needs_file = 1,
91d8f519 812 .needs_blkcg = 1,
d3656344 813 },
0463b6c5 814 [IORING_OP_SENDMSG] = {
d3656344
JA
815 .needs_mm = 1,
816 .needs_file = 1,
817 .unbound_nonreg_file = 1,
ff002b30 818 .needs_fs = 1,
8a72758c 819 .pollout = 1,
e8c2bc1f 820 .needs_async_data = 1,
91d8f519 821 .needs_blkcg = 1,
e8c2bc1f 822 .async_size = sizeof(struct io_async_msghdr),
d3656344 823 },
0463b6c5 824 [IORING_OP_RECVMSG] = {
d3656344
JA
825 .needs_mm = 1,
826 .needs_file = 1,
827 .unbound_nonreg_file = 1,
ff002b30 828 .needs_fs = 1,
8a72758c 829 .pollin = 1,
52de1fe1 830 .buffer_select = 1,
e8c2bc1f 831 .needs_async_data = 1,
91d8f519 832 .needs_blkcg = 1,
e8c2bc1f 833 .async_size = sizeof(struct io_async_msghdr),
d3656344 834 },
0463b6c5 835 [IORING_OP_TIMEOUT] = {
d3656344 836 .needs_mm = 1,
e8c2bc1f
JA
837 .needs_async_data = 1,
838 .async_size = sizeof(struct io_timeout_data),
d3656344 839 },
0463b6c5
PB
840 [IORING_OP_TIMEOUT_REMOVE] = {},
841 [IORING_OP_ACCEPT] = {
d3656344
JA
842 .needs_mm = 1,
843 .needs_file = 1,
844 .unbound_nonreg_file = 1,
f86cd20c 845 .file_table = 1,
8a72758c 846 .pollin = 1,
d3656344 847 },
0463b6c5
PB
848 [IORING_OP_ASYNC_CANCEL] = {},
849 [IORING_OP_LINK_TIMEOUT] = {
d3656344 850 .needs_mm = 1,
e8c2bc1f
JA
851 .needs_async_data = 1,
852 .async_size = sizeof(struct io_timeout_data),
d3656344 853 },
0463b6c5 854 [IORING_OP_CONNECT] = {
d3656344
JA
855 .needs_mm = 1,
856 .needs_file = 1,
857 .unbound_nonreg_file = 1,
8a72758c 858 .pollout = 1,
e8c2bc1f
JA
859 .needs_async_data = 1,
860 .async_size = sizeof(struct io_async_connect),
d3656344 861 },
0463b6c5 862 [IORING_OP_FALLOCATE] = {
d3656344 863 .needs_file = 1,
57f1a649 864 .needs_fsize = 1,
91d8f519 865 .needs_blkcg = 1,
d3656344 866 },
0463b6c5 867 [IORING_OP_OPENAT] = {
f86cd20c 868 .file_table = 1,
ff002b30 869 .needs_fs = 1,
91d8f519 870 .needs_blkcg = 1,
d3656344 871 },
0463b6c5 872 [IORING_OP_CLOSE] = {
fd2206e4
JA
873 .needs_file = 1,
874 .needs_file_no_error = 1,
f86cd20c 875 .file_table = 1,
91d8f519 876 .needs_blkcg = 1,
d3656344 877 },
0463b6c5 878 [IORING_OP_FILES_UPDATE] = {
d3656344 879 .needs_mm = 1,
f86cd20c 880 .file_table = 1,
d3656344 881 },
0463b6c5 882 [IORING_OP_STATX] = {
d3656344 883 .needs_mm = 1,
ff002b30 884 .needs_fs = 1,
5b0bbee4 885 .file_table = 1,
91d8f519 886 .needs_blkcg = 1,
d3656344 887 },
0463b6c5 888 [IORING_OP_READ] = {
3a6820f2
JA
889 .needs_mm = 1,
890 .needs_file = 1,
891 .unbound_nonreg_file = 1,
8a72758c 892 .pollin = 1,
bcda7baa 893 .buffer_select = 1,
91d8f519 894 .needs_blkcg = 1,
e8c2bc1f 895 .async_size = sizeof(struct io_async_rw),
3a6820f2 896 },
0463b6c5 897 [IORING_OP_WRITE] = {
3a6820f2
JA
898 .needs_mm = 1,
899 .needs_file = 1,
900 .unbound_nonreg_file = 1,
8a72758c 901 .pollout = 1,
57f1a649 902 .needs_fsize = 1,
91d8f519 903 .needs_blkcg = 1,
e8c2bc1f 904 .async_size = sizeof(struct io_async_rw),
3a6820f2 905 },
0463b6c5 906 [IORING_OP_FADVISE] = {
4840e418 907 .needs_file = 1,
91d8f519 908 .needs_blkcg = 1,
4840e418 909 },
0463b6c5 910 [IORING_OP_MADVISE] = {
c1ca757b 911 .needs_mm = 1,
91d8f519 912 .needs_blkcg = 1,
c1ca757b 913 },
0463b6c5 914 [IORING_OP_SEND] = {
fddaface
JA
915 .needs_mm = 1,
916 .needs_file = 1,
917 .unbound_nonreg_file = 1,
8a72758c 918 .pollout = 1,
91d8f519 919 .needs_blkcg = 1,
fddaface 920 },
0463b6c5 921 [IORING_OP_RECV] = {
fddaface
JA
922 .needs_mm = 1,
923 .needs_file = 1,
924 .unbound_nonreg_file = 1,
8a72758c 925 .pollin = 1,
bcda7baa 926 .buffer_select = 1,
91d8f519 927 .needs_blkcg = 1,
fddaface 928 },
0463b6c5 929 [IORING_OP_OPENAT2] = {
f86cd20c 930 .file_table = 1,
ff002b30 931 .needs_fs = 1,
91d8f519 932 .needs_blkcg = 1,
cebdb986 933 },
3e4827b0
JA
934 [IORING_OP_EPOLL_CTL] = {
935 .unbound_nonreg_file = 1,
936 .file_table = 1,
937 },
7d67af2c
PB
938 [IORING_OP_SPLICE] = {
939 .needs_file = 1,
940 .hash_reg_file = 1,
941 .unbound_nonreg_file = 1,
91d8f519 942 .needs_blkcg = 1,
ddf0322d
JA
943 },
944 [IORING_OP_PROVIDE_BUFFERS] = {},
067524e9 945 [IORING_OP_REMOVE_BUFFERS] = {},
f2a8d5c7
PB
946 [IORING_OP_TEE] = {
947 .needs_file = 1,
948 .hash_reg_file = 1,
949 .unbound_nonreg_file = 1,
950 },
d3656344
JA
951};
952
2e0464d4
BM
953enum io_mem_account {
954 ACCT_LOCKED,
955 ACCT_PINNED,
956};
957
81b68a5c
PB
958static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
959 struct io_comp_state *cs);
78e19bbe 960static void io_cqring_fill_event(struct io_kiocb *req, long res);
ec9c02ad 961static void io_put_req(struct io_kiocb *req);
c40f6379 962static void io_double_put_req(struct io_kiocb *req);
978db57e 963static void __io_double_put_req(struct io_kiocb *req);
94ae5e77 964static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
7271ef3a 965static void __io_queue_linked_timeout(struct io_kiocb *req);
94ae5e77 966static void io_queue_linked_timeout(struct io_kiocb *req);
05f3fb3c
JA
967static int __io_sqe_files_update(struct io_ring_ctx *ctx,
968 struct io_uring_files_update *ip,
969 unsigned nr_args);
f56040b8 970static int io_prep_work_files(struct io_kiocb *req);
3ca405eb 971static void __io_clean_op(struct io_kiocb *req);
b41e9852
JA
972static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
973 int fd, struct file **out_file, bool fixed);
974static void __io_queue_sqe(struct io_kiocb *req,
f13fad7b
JA
975 const struct io_uring_sqe *sqe,
976 struct io_comp_state *cs);
4349f30e 977static void io_file_put_work(struct work_struct *work);
de0617e4 978
b63534c4
JA
979static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
980 struct iovec **iovec, struct iov_iter *iter,
981 bool needs_lock);
ff6165b2
JA
982static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
983 const struct iovec *fast_iov,
227c0c96 984 struct iov_iter *iter, bool force);
de0617e4 985
2b188cc1
JA
986static struct kmem_cache *req_cachep;
987
738277ad 988static const struct file_operations io_uring_fops __read_mostly;
2b188cc1
JA
989
990struct sock *io_uring_get_socket(struct file *file)
991{
992#if defined(CONFIG_UNIX)
993 if (file->f_op == &io_uring_fops) {
994 struct io_ring_ctx *ctx = file->private_data;
995
996 return ctx->ring_sock->sk;
997 }
998#endif
999 return NULL;
1000}
1001EXPORT_SYMBOL(io_uring_get_socket);
1002
3ca405eb
PB
1003static inline void io_clean_op(struct io_kiocb *req)
1004{
bb175342
PB
1005 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
1006 REQ_F_INFLIGHT))
3ca405eb
PB
1007 __io_clean_op(req);
1008}
1009
4349f30e 1010static void io_sq_thread_drop_mm(void)
c40f6379
JA
1011{
1012 struct mm_struct *mm = current->mm;
1013
1014 if (mm) {
1015 kthread_unuse_mm(mm);
1016 mmput(mm);
1017 }
1018}
1019
1020static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
1021{
1022 if (!current->mm) {
cbcf7214 1023 if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) ||
2aede0e4
JA
1024 !ctx->sqo_task->mm ||
1025 !mmget_not_zero(ctx->sqo_task->mm)))
c40f6379 1026 return -EFAULT;
2aede0e4 1027 kthread_use_mm(ctx->sqo_task->mm);
c40f6379
JA
1028 }
1029
1030 return 0;
1031}
1032
1033static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
1034 struct io_kiocb *req)
1035{
1036 if (!io_op_defs[req->opcode].needs_mm)
1037 return 0;
1038 return __io_sq_thread_acquire_mm(ctx);
1039}
1040
91d8f519
DZ
1041static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
1042 struct cgroup_subsys_state **cur_css)
1043
1044{
1045#ifdef CONFIG_BLK_CGROUP
1046 /* puts the old one when swapping */
1047 if (*cur_css != ctx->sqo_blkcg_css) {
1048 kthread_associate_blkcg(ctx->sqo_blkcg_css);
1049 *cur_css = ctx->sqo_blkcg_css;
1050 }
1051#endif
1052}
1053
1054static void io_sq_thread_unassociate_blkcg(void)
1055{
1056#ifdef CONFIG_BLK_CGROUP
1057 kthread_associate_blkcg(NULL);
1058#endif
1059}
1060
c40f6379
JA
1061static inline void req_set_fail_links(struct io_kiocb *req)
1062{
1063 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1064 req->flags |= REQ_F_FAIL_LINK;
1065}
4a38aed2 1066
7cdaf587
XW
1067/*
1068 * Note: must call io_req_init_async() for the first time you
1069 * touch any members of io_wq_work.
1070 */
1071static inline void io_req_init_async(struct io_kiocb *req)
1072{
1073 if (req->flags & REQ_F_WORK_INITIALIZED)
1074 return;
1075
1076 memset(&req->work, 0, sizeof(req->work));
1077 req->flags |= REQ_F_WORK_INITIALIZED;
1078}
1079
0cdaf760
PB
1080static inline bool io_async_submit(struct io_ring_ctx *ctx)
1081{
1082 return ctx->flags & IORING_SETUP_SQPOLL;
1083}
1084
2b188cc1
JA
1085static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1086{
1087 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1088
0f158b4c 1089 complete(&ctx->ref_comp);
2b188cc1
JA
1090}
1091
8eb7e2d0
PB
1092static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1093{
1094 return !req->timeout.off;
1095}
1096
2b188cc1
JA
1097static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1098{
1099 struct io_ring_ctx *ctx;
78076bb6 1100 int hash_bits;
2b188cc1
JA
1101
1102 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1103 if (!ctx)
1104 return NULL;
1105
0ddf92e8
JA
1106 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
1107 if (!ctx->fallback_req)
1108 goto err;
1109
78076bb6
JA
1110 /*
1111 * Use 5 bits less than the max cq entries, that should give us around
1112 * 32 entries per hash list if totally full and uniformly spread.
1113 */
1114 hash_bits = ilog2(p->cq_entries);
1115 hash_bits -= 5;
1116 if (hash_bits <= 0)
1117 hash_bits = 1;
1118 ctx->cancel_hash_bits = hash_bits;
1119 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1120 GFP_KERNEL);
1121 if (!ctx->cancel_hash)
1122 goto err;
1123 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1124
21482896 1125 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
1126 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1127 goto err;
2b188cc1
JA
1128
1129 ctx->flags = p->flags;
90554200 1130 init_waitqueue_head(&ctx->sqo_sq_wait);
69fb2131 1131 INIT_LIST_HEAD(&ctx->sqd_list);
2b188cc1 1132 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 1133 INIT_LIST_HEAD(&ctx->cq_overflow_list);
0f158b4c
JA
1134 init_completion(&ctx->ref_comp);
1135 init_completion(&ctx->sq_thread_comp);
5a2e745d 1136 idr_init(&ctx->io_buffer_idr);
071698e1 1137 idr_init(&ctx->personality_idr);
2b188cc1
JA
1138 mutex_init(&ctx->uring_lock);
1139 init_waitqueue_head(&ctx->wait);
1140 spin_lock_init(&ctx->completion_lock);
540e32a0 1141 INIT_LIST_HEAD(&ctx->iopoll_list);
de0617e4 1142 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 1143 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
1144 init_waitqueue_head(&ctx->inflight_wait);
1145 spin_lock_init(&ctx->inflight_lock);
1146 INIT_LIST_HEAD(&ctx->inflight_list);
4a38aed2
JA
1147 INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
1148 init_llist_head(&ctx->file_put_llist);
2b188cc1 1149 return ctx;
206aefde 1150err:
0ddf92e8
JA
1151 if (ctx->fallback_req)
1152 kmem_cache_free(req_cachep, ctx->fallback_req);
78076bb6 1153 kfree(ctx->cancel_hash);
206aefde
JA
1154 kfree(ctx);
1155 return NULL;
2b188cc1
JA
1156}
1157
9cf7c104 1158static bool req_need_defer(struct io_kiocb *req, u32 seq)
7adf4eaf 1159{
2bc9930e
JA
1160 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1161 struct io_ring_ctx *ctx = req->ctx;
a197f664 1162
9cf7c104 1163 return seq != ctx->cached_cq_tail
31af27c7 1164 + atomic_read(&ctx->cached_cq_overflow);
2bc9930e 1165 }
de0617e4 1166
9d858b21 1167 return false;
de0617e4
JA
1168}
1169
de0617e4 1170static void __io_commit_cqring(struct io_ring_ctx *ctx)
2b188cc1 1171{
75b28aff 1172 struct io_rings *rings = ctx->rings;
2b188cc1 1173
07910158
PB
1174 /* order cqe stores with ring update */
1175 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
2b188cc1 1176
07910158
PB
1177 if (wq_has_sleeper(&ctx->cq_wait)) {
1178 wake_up_interruptible(&ctx->cq_wait);
1179 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
2b188cc1
JA
1180 }
1181}
1182
51a4cc11
JA
1183/*
1184 * Returns true if we need to defer file table putting. This can only happen
1185 * from the error path with REQ_F_COMP_LOCKED set.
1186 */
1187static bool io_req_clean_work(struct io_kiocb *req)
18d9be1a 1188{
7cdaf587 1189 if (!(req->flags & REQ_F_WORK_INITIALIZED))
51a4cc11
JA
1190 return false;
1191
1192 req->flags &= ~REQ_F_WORK_INITIALIZED;
7cdaf587 1193
cccf0ee8
JA
1194 if (req->work.mm) {
1195 mmdrop(req->work.mm);
1196 req->work.mm = NULL;
1197 }
91d8f519
DZ
1198#ifdef CONFIG_BLK_CGROUP
1199 if (req->work.blkcg_css)
1200 css_put(req->work.blkcg_css);
1201#endif
cccf0ee8
JA
1202 if (req->work.creds) {
1203 put_cred(req->work.creds);
1204 req->work.creds = NULL;
1205 }
ff002b30
JA
1206 if (req->work.fs) {
1207 struct fs_struct *fs = req->work.fs;
1208
51a4cc11
JA
1209 if (req->flags & REQ_F_COMP_LOCKED)
1210 return true;
1211
ff002b30
JA
1212 spin_lock(&req->work.fs->lock);
1213 if (--fs->users)
1214 fs = NULL;
1215 spin_unlock(&req->work.fs->lock);
1216 if (fs)
1217 free_fs_struct(fs);
b65e0dd6 1218 req->work.fs = NULL;
ff002b30 1219 }
51a4cc11
JA
1220
1221 return false;
561fb04a
JA
1222}
1223
cbdcb435 1224static void io_prep_async_work(struct io_kiocb *req)
18d9be1a 1225{
d3656344 1226 const struct io_op_def *def = &io_op_defs[req->opcode];
54a91f3b 1227
16d59803
PB
1228 io_req_init_async(req);
1229
d3656344 1230 if (req->flags & REQ_F_ISREG) {
eefdf30f 1231 if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL))
8766dd51 1232 io_wq_hash_work(&req->work, file_inode(req->file));
d3656344
JA
1233 } else {
1234 if (def->unbound_nonreg_file)
3529d8c2 1235 req->work.flags |= IO_WQ_WORK_UNBOUND;
54a91f3b 1236 }
dca9cf8b
PB
1237 if (!req->work.mm && def->needs_mm) {
1238 mmgrab(current->mm);
1239 req->work.mm = current->mm;
1240 }
91d8f519
DZ
1241#ifdef CONFIG_BLK_CGROUP
1242 if (!req->work.blkcg_css && def->needs_blkcg) {
1243 rcu_read_lock();
1244 req->work.blkcg_css = blkcg_css();
1245 /*
1246 * This should be rare, either the cgroup is dying or the task
1247 * is moving cgroups. Just punt to root for the handful of ios.
1248 */
1249 if (!css_tryget_online(req->work.blkcg_css))
1250 req->work.blkcg_css = NULL;
1251 rcu_read_unlock();
1252 }
1253#endif
dca9cf8b
PB
1254 if (!req->work.creds)
1255 req->work.creds = get_current_cred();
1256 if (!req->work.fs && def->needs_fs) {
1257 spin_lock(&current->fs->lock);
1258 if (!current->fs->in_exec) {
1259 req->work.fs = current->fs;
1260 req->work.fs->users++;
1261 } else {
1262 req->work.flags |= IO_WQ_WORK_CANCEL;
1263 }
1264 spin_unlock(&current->fs->lock);
1265 }
57f1a649
PB
1266 if (def->needs_fsize)
1267 req->work.fsize = rlimit(RLIMIT_FSIZE);
1268 else
1269 req->work.fsize = RLIM_INFINITY;
561fb04a 1270}
cccf0ee8 1271
cbdcb435 1272static void io_prep_async_link(struct io_kiocb *req)
561fb04a 1273{
cbdcb435 1274 struct io_kiocb *cur;
54a91f3b 1275
cbdcb435
PB
1276 io_prep_async_work(req);
1277 if (req->flags & REQ_F_LINK_HEAD)
1278 list_for_each_entry(cur, &req->link_list, link_list)
1279 io_prep_async_work(cur);
561fb04a
JA
1280}
1281
7271ef3a 1282static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
561fb04a 1283{
a197f664 1284 struct io_ring_ctx *ctx = req->ctx;
cbdcb435 1285 struct io_kiocb *link = io_prep_linked_timeout(req);
561fb04a 1286
8766dd51
PB
1287 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1288 &req->work, req->flags);
1289 io_wq_enqueue(ctx->io_wq, &req->work);
7271ef3a 1290 return link;
18d9be1a
JA
1291}
1292
cbdcb435
PB
1293static void io_queue_async_work(struct io_kiocb *req)
1294{
7271ef3a
JA
1295 struct io_kiocb *link;
1296
cbdcb435
PB
1297 /* init ->work of the whole link before punting */
1298 io_prep_async_link(req);
7271ef3a
JA
1299 link = __io_queue_async_work(req);
1300
1301 if (link)
1302 io_queue_linked_timeout(link);
cbdcb435
PB
1303}
1304
5262f567
JA
1305static void io_kill_timeout(struct io_kiocb *req)
1306{
e8c2bc1f 1307 struct io_timeout_data *io = req->async_data;
5262f567
JA
1308 int ret;
1309
e8c2bc1f 1310 ret = hrtimer_try_to_cancel(&io->timer);
5262f567 1311 if (ret != -1) {
01cec8c1
PB
1312 atomic_set(&req->ctx->cq_timeouts,
1313 atomic_read(&req->ctx->cq_timeouts) + 1);
135fcde8 1314 list_del_init(&req->timeout.list);
f0e20b89 1315 req->flags |= REQ_F_COMP_LOCKED;
78e19bbe 1316 io_cqring_fill_event(req, 0);
ec9c02ad 1317 io_put_req(req);
5262f567
JA
1318 }
1319}
1320
f3606e3a
JA
1321static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
1322{
1323 struct io_ring_ctx *ctx = req->ctx;
1324
1325 if (!tsk || req->task == tsk)
1326 return true;
534ca6d6
JA
1327 if (ctx->flags & IORING_SETUP_SQPOLL) {
1328 if (ctx->sq_data && req->task == ctx->sq_data->thread)
1329 return true;
1330 }
f3606e3a
JA
1331 return false;
1332}
1333
76e1b642
JA
1334/*
1335 * Returns true if we found and killed one or more timeouts
1336 */
1337static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
5262f567
JA
1338{
1339 struct io_kiocb *req, *tmp;
76e1b642 1340 int canceled = 0;
5262f567
JA
1341
1342 spin_lock_irq(&ctx->completion_lock);
f3606e3a 1343 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
76e1b642 1344 if (io_task_match(req, tsk)) {
f3606e3a 1345 io_kill_timeout(req);
76e1b642
JA
1346 canceled++;
1347 }
f3606e3a 1348 }
5262f567 1349 spin_unlock_irq(&ctx->completion_lock);
76e1b642 1350 return canceled != 0;
5262f567
JA
1351}
1352
04518945 1353static void __io_queue_deferred(struct io_ring_ctx *ctx)
de0617e4 1354{
04518945 1355 do {
27dc8338
PB
1356 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1357 struct io_defer_entry, list);
7271ef3a 1358 struct io_kiocb *link;
de0617e4 1359
9cf7c104 1360 if (req_need_defer(de->req, de->seq))
04518945 1361 break;
27dc8338 1362 list_del_init(&de->list);
cbdcb435 1363 /* punt-init is done before queueing for defer */
7271ef3a
JA
1364 link = __io_queue_async_work(de->req);
1365 if (link) {
1366 __io_queue_linked_timeout(link);
1367 /* drop submission reference */
1368 link->flags |= REQ_F_COMP_LOCKED;
1369 io_put_req(link);
1370 }
27dc8338 1371 kfree(de);
04518945
PB
1372 } while (!list_empty(&ctx->defer_list));
1373}
1374
360428f8 1375static void io_flush_timeouts(struct io_ring_ctx *ctx)
de0617e4 1376{
360428f8
PB
1377 while (!list_empty(&ctx->timeout_list)) {
1378 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
135fcde8 1379 struct io_kiocb, timeout.list);
de0617e4 1380
8eb7e2d0 1381 if (io_is_timeout_noseq(req))
360428f8 1382 break;
bfe68a22
PB
1383 if (req->timeout.target_seq != ctx->cached_cq_tail
1384 - atomic_read(&ctx->cq_timeouts))
360428f8 1385 break;
bfe68a22 1386
135fcde8 1387 list_del_init(&req->timeout.list);
5262f567 1388 io_kill_timeout(req);
360428f8
PB
1389 }
1390}
5262f567 1391
360428f8
PB
1392static void io_commit_cqring(struct io_ring_ctx *ctx)
1393{
1394 io_flush_timeouts(ctx);
de0617e4
JA
1395 __io_commit_cqring(ctx);
1396
04518945
PB
1397 if (unlikely(!list_empty(&ctx->defer_list)))
1398 __io_queue_deferred(ctx);
de0617e4
JA
1399}
1400
90554200
JA
1401static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1402{
1403 struct io_rings *r = ctx->rings;
1404
1405 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1406}
1407
2b188cc1
JA
1408static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1409{
75b28aff 1410 struct io_rings *rings = ctx->rings;
2b188cc1
JA
1411 unsigned tail;
1412
1413 tail = ctx->cached_cq_tail;
115e12e5
SB
1414 /*
1415 * writes to the cq entry need to come after reading head; the
1416 * control dependency is enough as we're using WRITE_ONCE to
1417 * fill the cq entry
1418 */
75b28aff 1419 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
2b188cc1
JA
1420 return NULL;
1421
1422 ctx->cached_cq_tail++;
75b28aff 1423 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
1424}
1425
f2842ab5
JA
1426static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1427{
f0b493e6
JA
1428 if (!ctx->cq_ev_fd)
1429 return false;
7e55a19c
SG
1430 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1431 return false;
f2842ab5
JA
1432 if (!ctx->eventfd_async)
1433 return true;
b41e9852 1434 return io_wq_current_is_worker();
f2842ab5
JA
1435}
1436
b41e9852 1437static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5
JA
1438{
1439 if (waitqueue_active(&ctx->wait))
1440 wake_up(&ctx->wait);
534ca6d6
JA
1441 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1442 wake_up(&ctx->sq_data->wait);
b41e9852 1443 if (io_should_trigger_evfd(ctx))
1d7bb1d5
JA
1444 eventfd_signal(ctx->cq_ev_fd, 1);
1445}
1446
46930143
PB
1447static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
1448{
1449 if (list_empty(&ctx->cq_overflow_list)) {
1450 clear_bit(0, &ctx->sq_check_overflow);
1451 clear_bit(0, &ctx->cq_check_overflow);
1452 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1453 }
1454}
1455
e6c8aa9a
JA
1456static inline bool io_match_files(struct io_kiocb *req,
1457 struct files_struct *files)
1458{
1459 if (!files)
1460 return true;
1461 if (req->flags & REQ_F_WORK_INITIALIZED)
1462 return req->work.files == files;
1463 return false;
1464}
1465
c4a2ed72 1466/* Returns true if there are no backlogged entries after the flush */
e6c8aa9a
JA
1467static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1468 struct task_struct *tsk,
1469 struct files_struct *files)
1d7bb1d5
JA
1470{
1471 struct io_rings *rings = ctx->rings;
e6c8aa9a 1472 struct io_kiocb *req, *tmp;
1d7bb1d5 1473 struct io_uring_cqe *cqe;
1d7bb1d5
JA
1474 unsigned long flags;
1475 LIST_HEAD(list);
1476
1477 if (!force) {
1478 if (list_empty_careful(&ctx->cq_overflow_list))
c4a2ed72 1479 return true;
1d7bb1d5
JA
1480 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1481 rings->cq_ring_entries))
c4a2ed72 1482 return false;
1d7bb1d5
JA
1483 }
1484
1485 spin_lock_irqsave(&ctx->completion_lock, flags);
1486
1487 /* if force is set, the ring is going away. always drop after that */
1488 if (force)
69b3e546 1489 ctx->cq_overflow_flushed = 1;
1d7bb1d5 1490
c4a2ed72 1491 cqe = NULL;
e6c8aa9a
JA
1492 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
1493 if (tsk && req->task != tsk)
1494 continue;
1495 if (!io_match_files(req, files))
1496 continue;
1497
1d7bb1d5
JA
1498 cqe = io_get_cqring(ctx);
1499 if (!cqe && !force)
1500 break;
1501
40d8ddd4 1502 list_move(&req->compl.list, &list);
1d7bb1d5
JA
1503 if (cqe) {
1504 WRITE_ONCE(cqe->user_data, req->user_data);
1505 WRITE_ONCE(cqe->res, req->result);
0f7e466b 1506 WRITE_ONCE(cqe->flags, req->compl.cflags);
1d7bb1d5
JA
1507 } else {
1508 WRITE_ONCE(ctx->rings->cq_overflow,
1509 atomic_inc_return(&ctx->cached_cq_overflow));
1510 }
1511 }
1512
1513 io_commit_cqring(ctx);
46930143
PB
1514 io_cqring_mark_overflow(ctx);
1515
1d7bb1d5
JA
1516 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1517 io_cqring_ev_posted(ctx);
1518
1519 while (!list_empty(&list)) {
40d8ddd4
PB
1520 req = list_first_entry(&list, struct io_kiocb, compl.list);
1521 list_del(&req->compl.list);
ec9c02ad 1522 io_put_req(req);
1d7bb1d5 1523 }
c4a2ed72
JA
1524
1525 return cqe != NULL;
1d7bb1d5
JA
1526}
1527
bcda7baa 1528static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1529{
78e19bbe 1530 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1531 struct io_uring_cqe *cqe;
1532
78e19bbe 1533 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 1534
2b188cc1
JA
1535 /*
1536 * If we can't get a cq entry, userspace overflowed the
1537 * submission (by quite a lot). Increment the overflow count in
1538 * the ring.
1539 */
1540 cqe = io_get_cqring(ctx);
1d7bb1d5 1541 if (likely(cqe)) {
78e19bbe 1542 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 1543 WRITE_ONCE(cqe->res, res);
bcda7baa 1544 WRITE_ONCE(cqe->flags, cflags);
0f212204
JA
1545 } else if (ctx->cq_overflow_flushed || req->task->io_uring->in_idle) {
1546 /*
1547 * If we're in ring overflow flush mode, or in task cancel mode,
1548 * then we cannot store the request for later flushing, we need
1549 * to drop it on the floor.
1550 */
498ccd9e
JA
1551 WRITE_ONCE(ctx->rings->cq_overflow,
1552 atomic_inc_return(&ctx->cached_cq_overflow));
1d7bb1d5 1553 } else {
ad3eb2c8
JA
1554 if (list_empty(&ctx->cq_overflow_list)) {
1555 set_bit(0, &ctx->sq_check_overflow);
1556 set_bit(0, &ctx->cq_check_overflow);
6d5f9049 1557 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
ad3eb2c8 1558 }
40d8ddd4 1559 io_clean_op(req);
1d7bb1d5 1560 req->result = res;
0f7e466b 1561 req->compl.cflags = cflags;
40d8ddd4
PB
1562 refcount_inc(&req->refs);
1563 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
2b188cc1
JA
1564 }
1565}
1566
bcda7baa
JA
1567static void io_cqring_fill_event(struct io_kiocb *req, long res)
1568{
1569 __io_cqring_fill_event(req, res, 0);
1570}
1571
e1e16097 1572static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1573{
78e19bbe 1574 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1575 unsigned long flags;
1576
1577 spin_lock_irqsave(&ctx->completion_lock, flags);
bcda7baa 1578 __io_cqring_fill_event(req, res, cflags);
2b188cc1
JA
1579 io_commit_cqring(ctx);
1580 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1581
8c838788 1582 io_cqring_ev_posted(ctx);
2b188cc1
JA
1583}
1584
229a7b63 1585static void io_submit_flush_completions(struct io_comp_state *cs)
bcda7baa 1586{
229a7b63
JA
1587 struct io_ring_ctx *ctx = cs->ctx;
1588
1589 spin_lock_irq(&ctx->completion_lock);
1590 while (!list_empty(&cs->list)) {
1591 struct io_kiocb *req;
1592
3ca405eb
PB
1593 req = list_first_entry(&cs->list, struct io_kiocb, compl.list);
1594 list_del(&req->compl.list);
0f7e466b 1595 __io_cqring_fill_event(req, req->result, req->compl.cflags);
229a7b63
JA
1596 if (!(req->flags & REQ_F_LINK_HEAD)) {
1597 req->flags |= REQ_F_COMP_LOCKED;
1598 io_put_req(req);
1599 } else {
1600 spin_unlock_irq(&ctx->completion_lock);
1601 io_put_req(req);
1602 spin_lock_irq(&ctx->completion_lock);
1603 }
1604 }
1605 io_commit_cqring(ctx);
1606 spin_unlock_irq(&ctx->completion_lock);
1607
1608 io_cqring_ev_posted(ctx);
1609 cs->nr = 0;
1610}
1611
1612static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags,
1613 struct io_comp_state *cs)
1614{
1615 if (!cs) {
1616 io_cqring_add_event(req, res, cflags);
1617 io_put_req(req);
1618 } else {
3ca405eb 1619 io_clean_op(req);
229a7b63 1620 req->result = res;
0f7e466b 1621 req->compl.cflags = cflags;
3ca405eb 1622 list_add_tail(&req->compl.list, &cs->list);
229a7b63
JA
1623 if (++cs->nr >= 32)
1624 io_submit_flush_completions(cs);
1625 }
e1e16097
JA
1626}
1627
1628static void io_req_complete(struct io_kiocb *req, long res)
bcda7baa 1629{
229a7b63 1630 __io_req_complete(req, res, 0, NULL);
bcda7baa
JA
1631}
1632
0ddf92e8
JA
1633static inline bool io_is_fallback_req(struct io_kiocb *req)
1634{
1635 return req == (struct io_kiocb *)
1636 ((unsigned long) req->ctx->fallback_req & ~1UL);
1637}
1638
1639static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1640{
1641 struct io_kiocb *req;
1642
1643 req = ctx->fallback_req;
dd461af6 1644 if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
0ddf92e8
JA
1645 return req;
1646
1647 return NULL;
1648}
1649
0553b8bd
PB
1650static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
1651 struct io_submit_state *state)
2b188cc1 1652{
fd6fab2c 1653 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2b188cc1
JA
1654 struct io_kiocb *req;
1655
f6b6c7d6 1656 if (!state->free_reqs) {
2579f913
JA
1657 size_t sz;
1658 int ret;
1659
1660 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
1661 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1662
1663 /*
1664 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1665 * retry single alloc to be on the safe side.
1666 */
1667 if (unlikely(ret <= 0)) {
1668 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1669 if (!state->reqs[0])
0ddf92e8 1670 goto fallback;
fd6fab2c
JA
1671 ret = 1;
1672 }
2579f913 1673 state->free_reqs = ret - 1;
6c8a3134 1674 req = state->reqs[ret - 1];
2579f913 1675 } else {
2579f913 1676 state->free_reqs--;
6c8a3134 1677 req = state->reqs[state->free_reqs];
2b188cc1
JA
1678 }
1679
2579f913 1680 return req;
0ddf92e8 1681fallback:
0553b8bd 1682 return io_get_fallback_req(ctx);
2b188cc1
JA
1683}
1684
8da11c19
PB
1685static inline void io_put_file(struct io_kiocb *req, struct file *file,
1686 bool fixed)
1687{
1688 if (fixed)
05589553 1689 percpu_ref_put(req->fixed_file_refs);
8da11c19
PB
1690 else
1691 fput(file);
1692}
1693
51a4cc11 1694static bool io_dismantle_req(struct io_kiocb *req)
2b188cc1 1695{
3ca405eb 1696 io_clean_op(req);
929a3af9 1697
e8c2bc1f
JA
1698 if (req->async_data)
1699 kfree(req->async_data);
8da11c19
PB
1700 if (req->file)
1701 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
fcb323cc 1702
51a4cc11 1703 return io_req_clean_work(req);
e65ef56d
JA
1704}
1705
51a4cc11 1706static void __io_free_req_finish(struct io_kiocb *req)
c6ca97b3 1707{
0f212204 1708 struct io_uring_task *tctx = req->task->io_uring;
51a4cc11 1709 struct io_ring_ctx *ctx = req->ctx;
c6ca97b3 1710
0f212204
JA
1711 atomic_long_inc(&tctx->req_complete);
1712 if (tctx->in_idle)
1713 wake_up(&tctx->wait);
e3bc8e9d
JA
1714 put_task_struct(req->task);
1715
b1e50e54
PB
1716 if (likely(!io_is_fallback_req(req)))
1717 kmem_cache_free(req_cachep, req);
1718 else
ecfc5177
PB
1719 clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
1720 percpu_ref_put(&ctx->refs);
e65ef56d
JA
1721}
1722
51a4cc11
JA
1723static void io_req_task_file_table_put(struct callback_head *cb)
1724{
1725 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
1726 struct fs_struct *fs = req->work.fs;
1727
1728 spin_lock(&req->work.fs->lock);
1729 if (--fs->users)
1730 fs = NULL;
1731 spin_unlock(&req->work.fs->lock);
1732 if (fs)
1733 free_fs_struct(fs);
1734 req->work.fs = NULL;
1735 __io_free_req_finish(req);
1736}
1737
1738static void __io_free_req(struct io_kiocb *req)
1739{
1740 if (!io_dismantle_req(req)) {
1741 __io_free_req_finish(req);
1742 } else {
1743 int ret;
1744
1745 init_task_work(&req->task_work, io_req_task_file_table_put);
1746 ret = task_work_add(req->task, &req->task_work, TWA_RESUME);
1747 if (unlikely(ret)) {
1748 struct task_struct *tsk;
1749
1750 tsk = io_wq_get_task(req->ctx->io_wq);
1751 task_work_add(tsk, &req->task_work, 0);
1752 }
1753 }
1754}
1755
a197f664 1756static bool io_link_cancel_timeout(struct io_kiocb *req)
2665abfd 1757{
e8c2bc1f 1758 struct io_timeout_data *io = req->async_data;
a197f664 1759 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1760 int ret;
1761
e8c2bc1f 1762 ret = hrtimer_try_to_cancel(&io->timer);
2665abfd 1763 if (ret != -1) {
78e19bbe 1764 io_cqring_fill_event(req, -ECANCELED);
2665abfd 1765 io_commit_cqring(ctx);
dea3b49c 1766 req->flags &= ~REQ_F_LINK_HEAD;
ec9c02ad 1767 io_put_req(req);
2665abfd
JA
1768 return true;
1769 }
1770
1771 return false;
e65ef56d
JA
1772}
1773
ab0b6451 1774static bool __io_kill_linked_timeout(struct io_kiocb *req)
9e645e11 1775{
7c86ffee 1776 struct io_kiocb *link;
ab0b6451 1777 bool wake_ev;
7c86ffee
PB
1778
1779 if (list_empty(&req->link_list))
ab0b6451 1780 return false;
7c86ffee
PB
1781 link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
1782 if (link->opcode != IORING_OP_LINK_TIMEOUT)
ab0b6451 1783 return false;
7c86ffee
PB
1784
1785 list_del_init(&link->link_list);
9b7adba9 1786 link->flags |= REQ_F_COMP_LOCKED;
7c86ffee
PB
1787 wake_ev = io_link_cancel_timeout(link);
1788 req->flags &= ~REQ_F_LINK_TIMEOUT;
ab0b6451
JA
1789 return wake_ev;
1790}
1791
1792static void io_kill_linked_timeout(struct io_kiocb *req)
9e645e11 1793{
2665abfd 1794 struct io_ring_ctx *ctx = req->ctx;
ab0b6451 1795 bool wake_ev;
9e645e11 1796
ab0b6451
JA
1797 if (!(req->flags & REQ_F_COMP_LOCKED)) {
1798 unsigned long flags;
1799
1800 spin_lock_irqsave(&ctx->completion_lock, flags);
1801 wake_ev = __io_kill_linked_timeout(req);
7c86ffee 1802 spin_unlock_irqrestore(&ctx->completion_lock, flags);
ab0b6451
JA
1803 } else {
1804 wake_ev = __io_kill_linked_timeout(req);
1805 }
1806
7c86ffee
PB
1807 if (wake_ev)
1808 io_cqring_ev_posted(ctx);
1809}
1810
9b5f7bd9 1811static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
7c86ffee
PB
1812{
1813 struct io_kiocb *nxt;
4d7dd462 1814
9e645e11
JA
1815 /*
1816 * The list should never be empty when we are called here. But could
1817 * potentially happen if the chain is messed up, check to be on the
1818 * safe side.
1819 */
7c86ffee 1820 if (unlikely(list_empty(&req->link_list)))
9b5f7bd9 1821 return NULL;
2665abfd 1822
7c86ffee
PB
1823 nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list);
1824 list_del_init(&req->link_list);
1825 if (!list_empty(&nxt->link_list))
1826 nxt->flags |= REQ_F_LINK_HEAD;
9b5f7bd9 1827 return nxt;
9e645e11
JA
1828}
1829
1830/*
dea3b49c 1831 * Called if REQ_F_LINK_HEAD is set, and we fail the head request
9e645e11 1832 */
7c86ffee 1833static void __io_fail_links(struct io_kiocb *req)
9e645e11 1834{
2665abfd 1835 struct io_ring_ctx *ctx = req->ctx;
9e645e11
JA
1836
1837 while (!list_empty(&req->link_list)) {
4493233e
PB
1838 struct io_kiocb *link = list_first_entry(&req->link_list,
1839 struct io_kiocb, link_list);
9e645e11 1840
4493233e 1841 list_del_init(&link->link_list);
c826bd7a 1842 trace_io_uring_fail_link(req, link);
2665abfd 1843
7c86ffee 1844 io_cqring_fill_event(link, -ECANCELED);
9b7adba9 1845 link->flags |= REQ_F_COMP_LOCKED;
7c86ffee 1846 __io_double_put_req(link);
5d960724 1847 req->flags &= ~REQ_F_LINK_TIMEOUT;
9e645e11 1848 }
2665abfd
JA
1849
1850 io_commit_cqring(ctx);
2665abfd 1851 io_cqring_ev_posted(ctx);
9e645e11
JA
1852}
1853
7c86ffee 1854static void io_fail_links(struct io_kiocb *req)
9e645e11 1855{
7c86ffee 1856 struct io_ring_ctx *ctx = req->ctx;
2665abfd 1857
7c86ffee 1858 if (!(req->flags & REQ_F_COMP_LOCKED)) {
2665abfd
JA
1859 unsigned long flags;
1860
2665abfd 1861 spin_lock_irqsave(&ctx->completion_lock, flags);
7c86ffee 1862 __io_fail_links(req);
2665abfd
JA
1863 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1864 } else {
7c86ffee 1865 __io_fail_links(req);
9e645e11
JA
1866 }
1867
2665abfd 1868 io_cqring_ev_posted(ctx);
9e645e11
JA
1869}
1870
3fa5e0f3 1871static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
c69f8dbe 1872{
9b0d911a 1873 req->flags &= ~REQ_F_LINK_HEAD;
7c86ffee
PB
1874 if (req->flags & REQ_F_LINK_TIMEOUT)
1875 io_kill_linked_timeout(req);
944e58bf 1876
9e645e11
JA
1877 /*
1878 * If LINK is set, we have dependent requests in this chain. If we
1879 * didn't fail this request, queue the first one up, moving any other
1880 * dependencies to the next request. In case of failure, fail the rest
1881 * of the chain.
1882 */
9b5f7bd9
PB
1883 if (likely(!(req->flags & REQ_F_FAIL_LINK)))
1884 return io_req_link_next(req);
1885 io_fail_links(req);
1886 return NULL;
4d7dd462 1887}
9e645e11 1888
3fa5e0f3
PB
1889static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1890{
1891 if (likely(!(req->flags & REQ_F_LINK_HEAD)))
1892 return NULL;
1893 return __io_req_find_next(req);
1894}
1895
fd7d6de2
JA
1896static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
1897 bool twa_signal_ok)
c2c4c83c
JA
1898{
1899 struct task_struct *tsk = req->task;
1900 struct io_ring_ctx *ctx = req->ctx;
0ba9c9ed 1901 int ret, notify;
c2c4c83c 1902
6200b0ae
JA
1903 if (tsk->flags & PF_EXITING)
1904 return -ESRCH;
1905
c2c4c83c 1906 /*
0ba9c9ed
JA
1907 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
1908 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
1909 * processing task_work. There's no reliable way to tell if TWA_RESUME
1910 * will do the job.
c2c4c83c 1911 */
0ba9c9ed 1912 notify = 0;
fd7d6de2 1913 if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
c2c4c83c
JA
1914 notify = TWA_SIGNAL;
1915
1916 ret = task_work_add(tsk, cb, notify);
1917 if (!ret)
1918 wake_up_process(tsk);
0ba9c9ed 1919
c2c4c83c
JA
1920 return ret;
1921}
1922
c40f6379
JA
1923static void __io_req_task_cancel(struct io_kiocb *req, int error)
1924{
1925 struct io_ring_ctx *ctx = req->ctx;
1926
1927 spin_lock_irq(&ctx->completion_lock);
1928 io_cqring_fill_event(req, error);
1929 io_commit_cqring(ctx);
1930 spin_unlock_irq(&ctx->completion_lock);
1931
1932 io_cqring_ev_posted(ctx);
1933 req_set_fail_links(req);
1934 io_double_put_req(req);
1935}
1936
1937static void io_req_task_cancel(struct callback_head *cb)
1938{
1939 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
87ceb6a6 1940 struct io_ring_ctx *ctx = req->ctx;
c40f6379
JA
1941
1942 __io_req_task_cancel(req, -ECANCELED);
87ceb6a6 1943 percpu_ref_put(&ctx->refs);
c40f6379
JA
1944}
1945
1946static void __io_req_task_submit(struct io_kiocb *req)
1947{
1948 struct io_ring_ctx *ctx = req->ctx;
1949
c40f6379
JA
1950 if (!__io_sq_thread_acquire_mm(ctx)) {
1951 mutex_lock(&ctx->uring_lock);
1952 __io_queue_sqe(req, NULL, NULL);
1953 mutex_unlock(&ctx->uring_lock);
1954 } else {
1955 __io_req_task_cancel(req, -EFAULT);
1956 }
1957}
1958
1959static void io_req_task_submit(struct callback_head *cb)
1960{
1961 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
6d816e08 1962 struct io_ring_ctx *ctx = req->ctx;
c40f6379
JA
1963
1964 __io_req_task_submit(req);
6d816e08 1965 percpu_ref_put(&ctx->refs);
c40f6379
JA
1966}
1967
1968static void io_req_task_queue(struct io_kiocb *req)
1969{
c40f6379
JA
1970 int ret;
1971
1972 init_task_work(&req->task_work, io_req_task_submit);
6d816e08 1973 percpu_ref_get(&req->ctx->refs);
c40f6379 1974
fd7d6de2 1975 ret = io_req_task_work_add(req, &req->task_work, true);
c40f6379 1976 if (unlikely(ret)) {
c2c4c83c
JA
1977 struct task_struct *tsk;
1978
c40f6379
JA
1979 init_task_work(&req->task_work, io_req_task_cancel);
1980 tsk = io_wq_get_task(req->ctx->io_wq);
c2c4c83c
JA
1981 task_work_add(tsk, &req->task_work, 0);
1982 wake_up_process(tsk);
c40f6379 1983 }
c40f6379
JA
1984}
1985
c3524383 1986static void io_queue_next(struct io_kiocb *req)
c69f8dbe 1987{
9b5f7bd9 1988 struct io_kiocb *nxt = io_req_find_next(req);
944e58bf
PB
1989
1990 if (nxt)
906a8c3f 1991 io_req_task_queue(nxt);
c69f8dbe
JL
1992}
1993
c3524383 1994static void io_free_req(struct io_kiocb *req)
7a743e22 1995{
c3524383
PB
1996 io_queue_next(req);
1997 __io_free_req(req);
1998}
8766dd51 1999
2d6500d4
PB
2000struct req_batch {
2001 void *reqs[IO_IOPOLL_BATCH];
2002 int to_free;
7a743e22 2003
5af1d13e
PB
2004 struct task_struct *task;
2005 int task_refs;
2d6500d4
PB
2006};
2007
5af1d13e
PB
2008static inline void io_init_req_batch(struct req_batch *rb)
2009{
2010 rb->to_free = 0;
2011 rb->task_refs = 0;
2012 rb->task = NULL;
2013}
2014
2d6500d4
PB
2015static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
2016 struct req_batch *rb)
2017{
2018 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
2019 percpu_ref_put_many(&ctx->refs, rb->to_free);
2020 rb->to_free = 0;
2021}
2022
2023static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2024 struct req_batch *rb)
2025{
2026 if (rb->to_free)
2027 __io_req_free_batch_flush(ctx, rb);
5af1d13e 2028 if (rb->task) {
0f212204 2029 atomic_long_add(rb->task_refs, &rb->task->io_uring->req_complete);
5af1d13e
PB
2030 put_task_struct_many(rb->task, rb->task_refs);
2031 rb->task = NULL;
2032 }
2d6500d4
PB
2033}
2034
2035static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
2036{
2037 if (unlikely(io_is_fallback_req(req))) {
2038 io_free_req(req);
2039 return;
2040 }
2041 if (req->flags & REQ_F_LINK_HEAD)
2042 io_queue_next(req);
2043
e3bc8e9d 2044 if (req->task != rb->task) {
0f212204
JA
2045 if (rb->task) {
2046 atomic_long_add(rb->task_refs, &rb->task->io_uring->req_complete);
e3bc8e9d 2047 put_task_struct_many(rb->task, rb->task_refs);
0f212204 2048 }
e3bc8e9d
JA
2049 rb->task = req->task;
2050 rb->task_refs = 0;
5af1d13e 2051 }
e3bc8e9d 2052 rb->task_refs++;
5af1d13e 2053
51a4cc11 2054 WARN_ON_ONCE(io_dismantle_req(req));
2d6500d4
PB
2055 rb->reqs[rb->to_free++] = req;
2056 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
2057 __io_req_free_batch_flush(req->ctx, rb);
7a743e22
PB
2058}
2059
ba816ad6
JA
2060/*
2061 * Drop reference to request, return next in chain (if there is one) if this
2062 * was the last reference to this request.
2063 */
9b5f7bd9 2064static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
e65ef56d 2065{
9b5f7bd9
PB
2066 struct io_kiocb *nxt = NULL;
2067
2a44f467 2068 if (refcount_dec_and_test(&req->refs)) {
9b5f7bd9 2069 nxt = io_req_find_next(req);
4d7dd462 2070 __io_free_req(req);
2a44f467 2071 }
9b5f7bd9 2072 return nxt;
2b188cc1
JA
2073}
2074
e65ef56d
JA
2075static void io_put_req(struct io_kiocb *req)
2076{
2077 if (refcount_dec_and_test(&req->refs))
2078 io_free_req(req);
2b188cc1
JA
2079}
2080
f4db7182 2081static struct io_wq_work *io_steal_work(struct io_kiocb *req)
7a743e22 2082{
6df1db6b 2083 struct io_kiocb *nxt;
f4db7182 2084
7a743e22 2085 /*
f4db7182
PB
2086 * A ref is owned by io-wq in which context we're. So, if that's the
2087 * last one, it's safe to steal next work. False negatives are Ok,
2088 * it just will be re-punted async in io_put_work()
7a743e22 2089 */
f4db7182
PB
2090 if (refcount_read(&req->refs) != 1)
2091 return NULL;
7a743e22 2092
9b5f7bd9 2093 nxt = io_req_find_next(req);
6df1db6b 2094 return nxt ? &nxt->work : NULL;
7a743e22
PB
2095}
2096
978db57e
JA
2097/*
2098 * Must only be used if we don't need to care about links, usually from
2099 * within the completion handling itself.
2100 */
2101static void __io_double_put_req(struct io_kiocb *req)
78e19bbe
JA
2102{
2103 /* drop both submit and complete references */
2104 if (refcount_sub_and_test(2, &req->refs))
2105 __io_free_req(req);
2106}
2107
978db57e
JA
2108static void io_double_put_req(struct io_kiocb *req)
2109{
2110 /* drop both submit and complete references */
2111 if (refcount_sub_and_test(2, &req->refs))
2112 io_free_req(req);
2113}
2114
1d7bb1d5 2115static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
a3a0e43f 2116{
84f97dc2
JA
2117 struct io_rings *rings = ctx->rings;
2118
ad3eb2c8
JA
2119 if (test_bit(0, &ctx->cq_check_overflow)) {
2120 /*
2121 * noflush == true is from the waitqueue handler, just ensure
2122 * we wake up the task, and the next invocation will flush the
2123 * entries. We cannot safely to it from here.
2124 */
2125 if (noflush && !list_empty(&ctx->cq_overflow_list))
2126 return -1U;
1d7bb1d5 2127
e6c8aa9a 2128 io_cqring_overflow_flush(ctx, false, NULL, NULL);
ad3eb2c8 2129 }
1d7bb1d5 2130
a3a0e43f
JA
2131 /* See comment at the top of this file */
2132 smp_rmb();
ad3eb2c8 2133 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
a3a0e43f
JA
2134}
2135
fb5ccc98
PB
2136static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2137{
2138 struct io_rings *rings = ctx->rings;
2139
2140 /* make sure SQ entry isn't read before tail */
2141 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2142}
2143
8ff069bf 2144static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
e94f141b 2145{
8ff069bf 2146 unsigned int cflags;
e94f141b 2147
bcda7baa
JA
2148 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2149 cflags |= IORING_CQE_F_BUFFER;
0e1b6fe3 2150 req->flags &= ~REQ_F_BUFFER_SELECTED;
bcda7baa
JA
2151 kfree(kbuf);
2152 return cflags;
e94f141b
JA
2153}
2154
8ff069bf 2155static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
bcda7baa 2156{
4d954c25 2157 struct io_buffer *kbuf;
bcda7baa 2158
4d954c25 2159 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
8ff069bf
PB
2160 return io_put_kbuf(req, kbuf);
2161}
2162
4c6e277c
JA
2163static inline bool io_run_task_work(void)
2164{
6200b0ae
JA
2165 /*
2166 * Not safe to run on exiting task, and the task_work handling will
2167 * not add work to such a task.
2168 */
2169 if (unlikely(current->flags & PF_EXITING))
2170 return false;
4c6e277c
JA
2171 if (current->task_works) {
2172 __set_current_state(TASK_RUNNING);
2173 task_work_run();
2174 return true;
2175 }
2176
2177 return false;
bcda7baa
JA
2178}
2179
bbde017a
XW
2180static void io_iopoll_queue(struct list_head *again)
2181{
2182 struct io_kiocb *req;
2183
2184 do {
d21ffe7e
PB
2185 req = list_first_entry(again, struct io_kiocb, inflight_entry);
2186 list_del(&req->inflight_entry);
81b68a5c 2187 __io_complete_rw(req, -EAGAIN, 0, NULL);
bbde017a
XW
2188 } while (!list_empty(again));
2189}
2190
def596e9
JA
2191/*
2192 * Find and free completed poll iocbs
2193 */
2194static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2195 struct list_head *done)
2196{
8237e045 2197 struct req_batch rb;
def596e9 2198 struct io_kiocb *req;
bbde017a
XW
2199 LIST_HEAD(again);
2200
2201 /* order with ->result store in io_complete_rw_iopoll() */
2202 smp_rmb();
def596e9 2203
5af1d13e 2204 io_init_req_batch(&rb);
def596e9 2205 while (!list_empty(done)) {
bcda7baa
JA
2206 int cflags = 0;
2207
d21ffe7e 2208 req = list_first_entry(done, struct io_kiocb, inflight_entry);
bbde017a 2209 if (READ_ONCE(req->result) == -EAGAIN) {
56450c20 2210 req->result = 0;
bbde017a 2211 req->iopoll_completed = 0;
d21ffe7e 2212 list_move_tail(&req->inflight_entry, &again);
bbde017a
XW
2213 continue;
2214 }
d21ffe7e 2215 list_del(&req->inflight_entry);
def596e9 2216
bcda7baa 2217 if (req->flags & REQ_F_BUFFER_SELECTED)
8ff069bf 2218 cflags = io_put_rw_kbuf(req);
bcda7baa
JA
2219
2220 __io_cqring_fill_event(req, req->result, cflags);
def596e9
JA
2221 (*nr_events)++;
2222
c3524383 2223 if (refcount_dec_and_test(&req->refs))
2d6500d4 2224 io_req_free_batch(&rb, req);
def596e9 2225 }
def596e9 2226
09bb8394 2227 io_commit_cqring(ctx);
32b2244a
XW
2228 if (ctx->flags & IORING_SETUP_SQPOLL)
2229 io_cqring_ev_posted(ctx);
2d6500d4 2230 io_req_free_batch_finish(ctx, &rb);
581f9810 2231
bbde017a
XW
2232 if (!list_empty(&again))
2233 io_iopoll_queue(&again);
581f9810
BM
2234}
2235
def596e9
JA
2236static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2237 long min)
2238{
2239 struct io_kiocb *req, *tmp;
2240 LIST_HEAD(done);
2241 bool spin;
2242 int ret;
2243
2244 /*
2245 * Only spin for completions if we don't have multiple devices hanging
2246 * off our complete list, and we're under the requested amount.
2247 */
2248 spin = !ctx->poll_multi_file && *nr_events < min;
2249
2250 ret = 0;
d21ffe7e 2251 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
9adbd45d 2252 struct kiocb *kiocb = &req->rw.kiocb;
def596e9
JA
2253
2254 /*
581f9810
BM
2255 * Move completed and retryable entries to our local lists.
2256 * If we find a request that requires polling, break out
2257 * and complete those lists first, if we have entries there.
def596e9 2258 */
65a6543d 2259 if (READ_ONCE(req->iopoll_completed)) {
d21ffe7e 2260 list_move_tail(&req->inflight_entry, &done);
def596e9
JA
2261 continue;
2262 }
2263 if (!list_empty(&done))
2264 break;
2265
2266 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2267 if (ret < 0)
2268 break;
2269
3aadc23e
PB
2270 /* iopoll may have completed current req */
2271 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2272 list_move_tail(&req->inflight_entry, &done);
3aadc23e 2273
def596e9
JA
2274 if (ret && spin)
2275 spin = false;
2276 ret = 0;
2277 }
2278
2279 if (!list_empty(&done))
2280 io_iopoll_complete(ctx, nr_events, &done);
2281
2282 return ret;
2283}
2284
2285/*
d195a66e 2286 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
def596e9
JA
2287 * non-spinning poll check - we'll still enter the driver poll loop, but only
2288 * as a non-spinning completion check.
2289 */
2290static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2291 long min)
2292{
540e32a0 2293 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
def596e9
JA
2294 int ret;
2295
2296 ret = io_do_iopoll(ctx, nr_events, min);
2297 if (ret < 0)
2298 return ret;
eba0a4dd 2299 if (*nr_events >= min)
def596e9
JA
2300 return 0;
2301 }
2302
2303 return 1;
2304}
2305
2306/*
2307 * We can't just wait for polled events to come to us, we have to actively
2308 * find and complete them.
2309 */
b2edc0a7 2310static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
def596e9
JA
2311{
2312 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2313 return;
2314
2315 mutex_lock(&ctx->uring_lock);
540e32a0 2316 while (!list_empty(&ctx->iopoll_list)) {
def596e9
JA
2317 unsigned int nr_events = 0;
2318
b2edc0a7 2319 io_do_iopoll(ctx, &nr_events, 0);
08f5439f 2320
b2edc0a7
PB
2321 /* let it sleep and repeat later if can't complete a request */
2322 if (nr_events == 0)
2323 break;
08f5439f
JA
2324 /*
2325 * Ensure we allow local-to-the-cpu processing to take place,
2326 * in this case we need to ensure that we reap all events.
3fcee5a6 2327 * Also let task_work, etc. to progress by releasing the mutex
08f5439f 2328 */
3fcee5a6
PB
2329 if (need_resched()) {
2330 mutex_unlock(&ctx->uring_lock);
2331 cond_resched();
2332 mutex_lock(&ctx->uring_lock);
2333 }
def596e9
JA
2334 }
2335 mutex_unlock(&ctx->uring_lock);
2336}
2337
7668b92a 2338static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
def596e9 2339{
7668b92a 2340 unsigned int nr_events = 0;
2b2ed975 2341 int iters = 0, ret = 0;
500f9fba 2342
c7849be9
XW
2343 /*
2344 * We disallow the app entering submit/complete with polling, but we
2345 * still need to lock the ring to prevent racing with polled issue
2346 * that got punted to a workqueue.
2347 */
2348 mutex_lock(&ctx->uring_lock);
def596e9 2349 do {
a3a0e43f
JA
2350 /*
2351 * Don't enter poll loop if we already have events pending.
2352 * If we do, we can potentially be spinning for commands that
2353 * already triggered a CQE (eg in error).
2354 */
1d7bb1d5 2355 if (io_cqring_events(ctx, false))
a3a0e43f
JA
2356 break;
2357
500f9fba
JA
2358 /*
2359 * If a submit got punted to a workqueue, we can have the
2360 * application entering polling for a command before it gets
2361 * issued. That app will hold the uring_lock for the duration
2362 * of the poll right here, so we need to take a breather every
2363 * now and then to ensure that the issue has a chance to add
2364 * the poll to the issued list. Otherwise we can spin here
2365 * forever, while the workqueue is stuck trying to acquire the
2366 * very same mutex.
2367 */
2368 if (!(++iters & 7)) {
2369 mutex_unlock(&ctx->uring_lock);
4c6e277c 2370 io_run_task_work();
500f9fba
JA
2371 mutex_lock(&ctx->uring_lock);
2372 }
2373
7668b92a 2374 ret = io_iopoll_getevents(ctx, &nr_events, min);
def596e9
JA
2375 if (ret <= 0)
2376 break;
2377 ret = 0;
7668b92a 2378 } while (min && !nr_events && !need_resched());
def596e9 2379
500f9fba 2380 mutex_unlock(&ctx->uring_lock);
def596e9
JA
2381 return ret;
2382}
2383
491381ce 2384static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 2385{
491381ce
JA
2386 /*
2387 * Tell lockdep we inherited freeze protection from submission
2388 * thread.
2389 */
2390 if (req->flags & REQ_F_ISREG) {
2391 struct inode *inode = file_inode(req->file);
2b188cc1 2392
491381ce 2393 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 2394 }
491381ce 2395 file_end_write(req->file);
2b188cc1
JA
2396}
2397
a1d7c393
JA
2398static void io_complete_rw_common(struct kiocb *kiocb, long res,
2399 struct io_comp_state *cs)
2b188cc1 2400{
9adbd45d 2401 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
bcda7baa 2402 int cflags = 0;
2b188cc1 2403
491381ce
JA
2404 if (kiocb->ki_flags & IOCB_WRITE)
2405 kiocb_end_write(req);
2b188cc1 2406
4e88d6e7
JA
2407 if (res != req->result)
2408 req_set_fail_links(req);
bcda7baa 2409 if (req->flags & REQ_F_BUFFER_SELECTED)
8ff069bf 2410 cflags = io_put_rw_kbuf(req);
a1d7c393 2411 __io_req_complete(req, res, cflags, cs);
ba816ad6
JA
2412}
2413
b63534c4
JA
2414#ifdef CONFIG_BLOCK
2415static bool io_resubmit_prep(struct io_kiocb *req, int error)
2416{
2417 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
2418 ssize_t ret = -ECANCELED;
2419 struct iov_iter iter;
2420 int rw;
2421
2422 if (error) {
2423 ret = error;
2424 goto end_req;
2425 }
2426
2427 switch (req->opcode) {
2428 case IORING_OP_READV:
2429 case IORING_OP_READ_FIXED:
2430 case IORING_OP_READ:
2431 rw = READ;
2432 break;
2433 case IORING_OP_WRITEV:
2434 case IORING_OP_WRITE_FIXED:
2435 case IORING_OP_WRITE:
2436 rw = WRITE;
2437 break;
2438 default:
2439 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2440 req->opcode);
2441 goto end_req;
2442 }
2443
e8c2bc1f 2444 if (!req->async_data) {
8f3d7496
JA
2445 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2446 if (ret < 0)
2447 goto end_req;
2448 ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
2449 if (!ret)
2450 return true;
2451 kfree(iovec);
2452 } else {
b63534c4 2453 return true;
8f3d7496 2454 }
b63534c4 2455end_req:
b63534c4 2456 req_set_fail_links(req);
e1e16097 2457 io_req_complete(req, ret);
b63534c4
JA
2458 return false;
2459}
b63534c4
JA
2460#endif
2461
2462static bool io_rw_reissue(struct io_kiocb *req, long res)
2463{
2464#ifdef CONFIG_BLOCK
355afaeb 2465 umode_t mode = file_inode(req->file)->i_mode;
b63534c4
JA
2466 int ret;
2467
355afaeb
JA
2468 if (!S_ISBLK(mode) && !S_ISREG(mode))
2469 return false;
b63534c4
JA
2470 if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
2471 return false;
2472
fdee946d 2473 ret = io_sq_thread_acquire_mm(req->ctx, req);
6d816e08 2474
fdee946d
JA
2475 if (io_resubmit_prep(req, ret)) {
2476 refcount_inc(&req->refs);
2477 io_queue_async_work(req);
b63534c4 2478 return true;
fdee946d
JA
2479 }
2480
b63534c4
JA
2481#endif
2482 return false;
2483}
2484
a1d7c393
JA
2485static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2486 struct io_comp_state *cs)
2487{
2488 if (!io_rw_reissue(req, res))
2489 io_complete_rw_common(&req->rw.kiocb, res, cs);
ba816ad6
JA
2490}
2491
2492static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2493{
9adbd45d 2494 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6 2495
a1d7c393 2496 __io_complete_rw(req, res, res2, NULL);
2b188cc1
JA
2497}
2498
def596e9
JA
2499static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2500{
9adbd45d 2501 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 2502
491381ce
JA
2503 if (kiocb->ki_flags & IOCB_WRITE)
2504 kiocb_end_write(req);
def596e9 2505
2d7d6792 2506 if (res != -EAGAIN && res != req->result)
4e88d6e7 2507 req_set_fail_links(req);
bbde017a
XW
2508
2509 WRITE_ONCE(req->result, res);
2510 /* order with io_poll_complete() checking ->result */
cd664b0e
PB
2511 smp_wmb();
2512 WRITE_ONCE(req->iopoll_completed, 1);
def596e9
JA
2513}
2514
2515/*
2516 * After the iocb has been issued, it's safe to be found on the poll list.
2517 * Adding the kiocb to the list AFTER submission ensures that we don't
2518 * find it from a io_iopoll_getevents() thread before the issuer is done
2519 * accessing the kiocb cookie.
2520 */
2521static void io_iopoll_req_issued(struct io_kiocb *req)
2522{
2523 struct io_ring_ctx *ctx = req->ctx;
2524
2525 /*
2526 * Track whether we have multiple files in our lists. This will impact
2527 * how we do polling eventually, not spinning if we're on potentially
2528 * different devices.
2529 */
540e32a0 2530 if (list_empty(&ctx->iopoll_list)) {
def596e9
JA
2531 ctx->poll_multi_file = false;
2532 } else if (!ctx->poll_multi_file) {
2533 struct io_kiocb *list_req;
2534
540e32a0 2535 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
d21ffe7e 2536 inflight_entry);
9adbd45d 2537 if (list_req->file != req->file)
def596e9
JA
2538 ctx->poll_multi_file = true;
2539 }
2540
2541 /*
2542 * For fast devices, IO may have already completed. If it has, add
2543 * it to the front so we find it first.
2544 */
65a6543d 2545 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2546 list_add(&req->inflight_entry, &ctx->iopoll_list);
def596e9 2547 else
d21ffe7e 2548 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
bdcd3eab 2549
534ca6d6
JA
2550 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2551 wq_has_sleeper(&ctx->sq_data->wait))
2552 wake_up(&ctx->sq_data->wait);
def596e9
JA
2553}
2554
9f13c35b 2555static void __io_state_file_put(struct io_submit_state *state)
9a56a232 2556{
06ef3608
PB
2557 if (state->has_refs)
2558 fput_many(state->file, state->has_refs);
9f13c35b
PB
2559 state->file = NULL;
2560}
2561
2562static inline void io_state_file_put(struct io_submit_state *state)
2563{
2564 if (state->file)
2565 __io_state_file_put(state);
9a56a232
JA
2566}
2567
2568/*
2569 * Get as many references to a file as we have IOs left in this submission,
2570 * assuming most submissions are for one file, or at least that each file
2571 * has more than one submission.
2572 */
8da11c19 2573static struct file *__io_file_get(struct io_submit_state *state, int fd)
9a56a232
JA
2574{
2575 if (!state)
2576 return fget(fd);
2577
2578 if (state->file) {
2579 if (state->fd == fd) {
06ef3608 2580 state->has_refs--;
9a56a232
JA
2581 state->ios_left--;
2582 return state->file;
2583 }
9f13c35b 2584 __io_state_file_put(state);
9a56a232
JA
2585 }
2586 state->file = fget_many(fd, state->ios_left);
2587 if (!state->file)
2588 return NULL;
2589
2590 state->fd = fd;
9a56a232 2591 state->ios_left--;
06ef3608 2592 state->has_refs = state->ios_left;
9a56a232
JA
2593 return state->file;
2594}
2595
4503b767
JA
2596static bool io_bdev_nowait(struct block_device *bdev)
2597{
2598#ifdef CONFIG_BLOCK
2599 return !bdev || queue_is_mq(bdev_get_queue(bdev));
2600#else
2601 return true;
2602#endif
2603}
2604
2b188cc1
JA
2605/*
2606 * If we tracked the file through the SCM inflight mechanism, we could support
2607 * any file. For now, just ensure that anything potentially problematic is done
2608 * inline.
2609 */
af197f50 2610static bool io_file_supports_async(struct file *file, int rw)
2b188cc1
JA
2611{
2612 umode_t mode = file_inode(file)->i_mode;
2613
4503b767
JA
2614 if (S_ISBLK(mode)) {
2615 if (io_bdev_nowait(file->f_inode->i_bdev))
2616 return true;
2617 return false;
2618 }
2619 if (S_ISCHR(mode) || S_ISSOCK(mode))
2b188cc1 2620 return true;
4503b767
JA
2621 if (S_ISREG(mode)) {
2622 if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
2623 file->f_op != &io_uring_fops)
2624 return true;
2625 return false;
2626 }
2b188cc1 2627
c5b85625
JA
2628 /* any ->read/write should understand O_NONBLOCK */
2629 if (file->f_flags & O_NONBLOCK)
2630 return true;
2631
af197f50
JA
2632 if (!(file->f_mode & FMODE_NOWAIT))
2633 return false;
2634
2635 if (rw == READ)
2636 return file->f_op->read_iter != NULL;
2637
2638 return file->f_op->write_iter != NULL;
2b188cc1
JA
2639}
2640
3529d8c2
JA
2641static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2642 bool force_nonblock)
2b188cc1 2643{
def596e9 2644 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 2645 struct kiocb *kiocb = &req->rw.kiocb;
09bb8394
JA
2646 unsigned ioprio;
2647 int ret;
2b188cc1 2648
491381ce
JA
2649 if (S_ISREG(file_inode(req->file)->i_mode))
2650 req->flags |= REQ_F_ISREG;
2651
2b188cc1 2652 kiocb->ki_pos = READ_ONCE(sqe->off);
ba04291e
JA
2653 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2654 req->flags |= REQ_F_CUR_POS;
2655 kiocb->ki_pos = req->file->f_pos;
2656 }
2b188cc1 2657 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
2658 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2659 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2660 if (unlikely(ret))
2661 return ret;
2b188cc1
JA
2662
2663 ioprio = READ_ONCE(sqe->ioprio);
2664 if (ioprio) {
2665 ret = ioprio_check_cap(ioprio);
2666 if (ret)
09bb8394 2667 return ret;
2b188cc1
JA
2668
2669 kiocb->ki_ioprio = ioprio;
2670 } else
2671 kiocb->ki_ioprio = get_current_ioprio();
2672
8449eeda 2673 /* don't allow async punt if RWF_NOWAIT was requested */
c5b85625 2674 if (kiocb->ki_flags & IOCB_NOWAIT)
8449eeda
SB
2675 req->flags |= REQ_F_NOWAIT;
2676
2677 if (force_nonblock)
2b188cc1 2678 kiocb->ki_flags |= IOCB_NOWAIT;
8449eeda 2679
def596e9 2680 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
2681 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2682 !kiocb->ki_filp->f_op->iopoll)
09bb8394 2683 return -EOPNOTSUPP;
2b188cc1 2684
def596e9
JA
2685 kiocb->ki_flags |= IOCB_HIPRI;
2686 kiocb->ki_complete = io_complete_rw_iopoll;
65a6543d 2687 req->iopoll_completed = 0;
def596e9 2688 } else {
09bb8394
JA
2689 if (kiocb->ki_flags & IOCB_HIPRI)
2690 return -EINVAL;
def596e9
JA
2691 kiocb->ki_complete = io_complete_rw;
2692 }
9adbd45d 2693
3529d8c2
JA
2694 req->rw.addr = READ_ONCE(sqe->addr);
2695 req->rw.len = READ_ONCE(sqe->len);
4f4eeba8 2696 req->buf_index = READ_ONCE(sqe->buf_index);
2b188cc1 2697 return 0;
2b188cc1
JA
2698}
2699
2700static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2701{
2702 switch (ret) {
2703 case -EIOCBQUEUED:
2704 break;
2705 case -ERESTARTSYS:
2706 case -ERESTARTNOINTR:
2707 case -ERESTARTNOHAND:
2708 case -ERESTART_RESTARTBLOCK:
2709 /*
2710 * We can't just restart the syscall, since previously
2711 * submitted sqes may already be in progress. Just fail this
2712 * IO with EINTR.
2713 */
2714 ret = -EINTR;
df561f66 2715 fallthrough;
2b188cc1
JA
2716 default:
2717 kiocb->ki_complete(kiocb, ret, 0);
2718 }
2719}
2720
a1d7c393
JA
2721static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2722 struct io_comp_state *cs)
ba816ad6 2723{
ba04291e 2724 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
e8c2bc1f 2725 struct io_async_rw *io = req->async_data;
ba04291e 2726
227c0c96 2727 /* add previously done IO, if any */
e8c2bc1f 2728 if (io && io->bytes_done > 0) {
227c0c96 2729 if (ret < 0)
e8c2bc1f 2730 ret = io->bytes_done;
227c0c96 2731 else
e8c2bc1f 2732 ret += io->bytes_done;
227c0c96
JA
2733 }
2734
ba04291e
JA
2735 if (req->flags & REQ_F_CUR_POS)
2736 req->file->f_pos = kiocb->ki_pos;
bcaec089 2737 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
a1d7c393 2738 __io_complete_rw(req, ret, 0, cs);
ba816ad6
JA
2739 else
2740 io_rw_done(kiocb, ret);
2741}
2742
9adbd45d 2743static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
7d009165 2744 struct iov_iter *iter)
edafccee 2745{
9adbd45d
JA
2746 struct io_ring_ctx *ctx = req->ctx;
2747 size_t len = req->rw.len;
edafccee 2748 struct io_mapped_ubuf *imu;
4be1c615 2749 u16 index, buf_index = req->buf_index;
edafccee
JA
2750 size_t offset;
2751 u64 buf_addr;
2752
edafccee
JA
2753 if (unlikely(buf_index >= ctx->nr_user_bufs))
2754 return -EFAULT;
edafccee
JA
2755 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2756 imu = &ctx->user_bufs[index];
9adbd45d 2757 buf_addr = req->rw.addr;
edafccee
JA
2758
2759 /* overflow */
2760 if (buf_addr + len < buf_addr)
2761 return -EFAULT;
2762 /* not inside the mapped region */
2763 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2764 return -EFAULT;
2765
2766 /*
2767 * May not be a start of buffer, set size appropriately
2768 * and advance us to the beginning.
2769 */
2770 offset = buf_addr - imu->ubuf;
2771 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
2772
2773 if (offset) {
2774 /*
2775 * Don't use iov_iter_advance() here, as it's really slow for
2776 * using the latter parts of a big fixed buffer - it iterates
2777 * over each segment manually. We can cheat a bit here, because
2778 * we know that:
2779 *
2780 * 1) it's a BVEC iter, we set it up
2781 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2782 * first and last bvec
2783 *
2784 * So just find our index, and adjust the iterator afterwards.
2785 * If the offset is within the first bvec (or the whole first
2786 * bvec, just use iov_iter_advance(). This makes it easier
2787 * since we can just skip the first segment, which may not
2788 * be PAGE_SIZE aligned.
2789 */
2790 const struct bio_vec *bvec = imu->bvec;
2791
2792 if (offset <= bvec->bv_len) {
2793 iov_iter_advance(iter, offset);
2794 } else {
2795 unsigned long seg_skip;
2796
2797 /* skip first vec */
2798 offset -= bvec->bv_len;
2799 seg_skip = 1 + (offset >> PAGE_SHIFT);
2800
2801 iter->bvec = bvec + seg_skip;
2802 iter->nr_segs -= seg_skip;
99c79f66 2803 iter->count -= bvec->bv_len + offset;
bd11b3a3 2804 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
2805 }
2806 }
2807
5e559561 2808 return len;
edafccee
JA
2809}
2810
bcda7baa
JA
2811static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2812{
2813 if (needs_lock)
2814 mutex_unlock(&ctx->uring_lock);
2815}
2816
2817static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2818{
2819 /*
2820 * "Normal" inline submissions always hold the uring_lock, since we
2821 * grab it from the system call. Same is true for the SQPOLL offload.
2822 * The only exception is when we've detached the request and issue it
2823 * from an async worker thread, grab the lock for that case.
2824 */
2825 if (needs_lock)
2826 mutex_lock(&ctx->uring_lock);
2827}
2828
2829static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2830 int bgid, struct io_buffer *kbuf,
2831 bool needs_lock)
2832{
2833 struct io_buffer *head;
2834
2835 if (req->flags & REQ_F_BUFFER_SELECTED)
2836 return kbuf;
2837
2838 io_ring_submit_lock(req->ctx, needs_lock);
2839
2840 lockdep_assert_held(&req->ctx->uring_lock);
2841
2842 head = idr_find(&req->ctx->io_buffer_idr, bgid);
2843 if (head) {
2844 if (!list_empty(&head->list)) {
2845 kbuf = list_last_entry(&head->list, struct io_buffer,
2846 list);
2847 list_del(&kbuf->list);
2848 } else {
2849 kbuf = head;
2850 idr_remove(&req->ctx->io_buffer_idr, bgid);
2851 }
2852 if (*len > kbuf->len)
2853 *len = kbuf->len;
2854 } else {
2855 kbuf = ERR_PTR(-ENOBUFS);
2856 }
2857
2858 io_ring_submit_unlock(req->ctx, needs_lock);
2859
2860 return kbuf;
2861}
2862
4d954c25
JA
2863static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2864 bool needs_lock)
2865{
2866 struct io_buffer *kbuf;
4f4eeba8 2867 u16 bgid;
4d954c25
JA
2868
2869 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
4f4eeba8 2870 bgid = req->buf_index;
4d954c25
JA
2871 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2872 if (IS_ERR(kbuf))
2873 return kbuf;
2874 req->rw.addr = (u64) (unsigned long) kbuf;
2875 req->flags |= REQ_F_BUFFER_SELECTED;
2876 return u64_to_user_ptr(kbuf->addr);
2877}
2878
2879#ifdef CONFIG_COMPAT
2880static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2881 bool needs_lock)
2882{
2883 struct compat_iovec __user *uiov;
2884 compat_ssize_t clen;
2885 void __user *buf;
2886 ssize_t len;
2887
2888 uiov = u64_to_user_ptr(req->rw.addr);
2889 if (!access_ok(uiov, sizeof(*uiov)))
2890 return -EFAULT;
2891 if (__get_user(clen, &uiov->iov_len))
2892 return -EFAULT;
2893 if (clen < 0)
2894 return -EINVAL;
2895
2896 len = clen;
2897 buf = io_rw_buffer_select(req, &len, needs_lock);
2898 if (IS_ERR(buf))
2899 return PTR_ERR(buf);
2900 iov[0].iov_base = buf;
2901 iov[0].iov_len = (compat_size_t) len;
2902 return 0;
2903}
2904#endif
2905
2906static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2907 bool needs_lock)
2908{
2909 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2910 void __user *buf;
2911 ssize_t len;
2912
2913 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2914 return -EFAULT;
2915
2916 len = iov[0].iov_len;
2917 if (len < 0)
2918 return -EINVAL;
2919 buf = io_rw_buffer_select(req, &len, needs_lock);
2920 if (IS_ERR(buf))
2921 return PTR_ERR(buf);
2922 iov[0].iov_base = buf;
2923 iov[0].iov_len = len;
2924 return 0;
2925}
2926
2927static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2928 bool needs_lock)
2929{
dddb3e26
JA
2930 if (req->flags & REQ_F_BUFFER_SELECTED) {
2931 struct io_buffer *kbuf;
2932
2933 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2934 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2935 iov[0].iov_len = kbuf->len;
4d954c25 2936 return 0;
dddb3e26 2937 }
4d954c25
JA
2938 if (!req->rw.len)
2939 return 0;
2940 else if (req->rw.len > 1)
2941 return -EINVAL;
2942
2943#ifdef CONFIG_COMPAT
2944 if (req->ctx->compat)
2945 return io_compat_import(req, iov, needs_lock);
2946#endif
2947
2948 return __io_iov_buffer_select(req, iov, needs_lock);
2949}
2950
8452fd0c
JA
2951static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
2952 struct iovec **iovec, struct iov_iter *iter,
2953 bool needs_lock)
2b188cc1 2954{
9adbd45d
JA
2955 void __user *buf = u64_to_user_ptr(req->rw.addr);
2956 size_t sqe_len = req->rw.len;
4d954c25 2957 ssize_t ret;
edafccee
JA
2958 u8 opcode;
2959
d625c6ee 2960 opcode = req->opcode;
7d009165 2961 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 2962 *iovec = NULL;
9adbd45d 2963 return io_import_fixed(req, rw, iter);
edafccee 2964 }
2b188cc1 2965
bcda7baa 2966 /* buffer index only valid with fixed read/write, or buffer select */
4f4eeba8 2967 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
9adbd45d
JA
2968 return -EINVAL;
2969
3a6820f2 2970 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 2971 if (req->flags & REQ_F_BUFFER_SELECT) {
4d954c25 2972 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
867a23ea 2973 if (IS_ERR(buf))
4d954c25 2974 return PTR_ERR(buf);
3f9d6441 2975 req->rw.len = sqe_len;
bcda7baa
JA
2976 }
2977
3a6820f2
JA
2978 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2979 *iovec = NULL;
3a901598 2980 return ret < 0 ? ret : sqe_len;
3a6820f2
JA
2981 }
2982
4d954c25
JA
2983 if (req->flags & REQ_F_BUFFER_SELECT) {
2984 ret = io_iov_buffer_select(req, *iovec, needs_lock);
3f9d6441
JA
2985 if (!ret) {
2986 ret = (*iovec)->iov_len;
2987 iov_iter_init(iter, rw, *iovec, 1, ret);
2988 }
4d954c25
JA
2989 *iovec = NULL;
2990 return ret;
2991 }
2992
2b188cc1 2993#ifdef CONFIG_COMPAT
cf6fd4bd 2994 if (req->ctx->compat)
2b188cc1
JA
2995 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
2996 iovec, iter);
2997#endif
2998
2999 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
3000}
3001
8452fd0c
JA
3002static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
3003 struct iovec **iovec, struct iov_iter *iter,
3004 bool needs_lock)
3005{
e8c2bc1f
JA
3006 struct io_async_rw *iorw = req->async_data;
3007
3008 if (!iorw)
8452fd0c
JA
3009 return __io_import_iovec(rw, req, iovec, iter, needs_lock);
3010 *iovec = NULL;
e8c2bc1f 3011 return iov_iter_count(&iorw->iter);
8452fd0c
JA
3012}
3013
0fef9483
JA
3014static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3015{
3016 return kiocb->ki_filp->f_mode & FMODE_STREAM ? NULL : &kiocb->ki_pos;
3017}
3018
31b51510 3019/*
32960613
JA
3020 * For files that don't have ->read_iter() and ->write_iter(), handle them
3021 * by looping over ->read() or ->write() manually.
31b51510 3022 */
32960613
JA
3023static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
3024 struct iov_iter *iter)
3025{
3026 ssize_t ret = 0;
3027
3028 /*
3029 * Don't support polled IO through this interface, and we can't
3030 * support non-blocking either. For the latter, this just causes
3031 * the kiocb to be handled from an async context.
3032 */
3033 if (kiocb->ki_flags & IOCB_HIPRI)
3034 return -EOPNOTSUPP;
3035 if (kiocb->ki_flags & IOCB_NOWAIT)
3036 return -EAGAIN;
3037
3038 while (iov_iter_count(iter)) {
311ae9e1 3039 struct iovec iovec;
32960613
JA
3040 ssize_t nr;
3041
311ae9e1
PB
3042 if (!iov_iter_is_bvec(iter)) {
3043 iovec = iov_iter_iovec(iter);
3044 } else {
3045 /* fixed buffers import bvec */
3046 iovec.iov_base = kmap(iter->bvec->bv_page)
3047 + iter->iov_offset;
3048 iovec.iov_len = min(iter->count,
3049 iter->bvec->bv_len - iter->iov_offset);
3050 }
3051
32960613
JA
3052 if (rw == READ) {
3053 nr = file->f_op->read(file, iovec.iov_base,
0fef9483 3054 iovec.iov_len, io_kiocb_ppos(kiocb));
32960613
JA
3055 } else {
3056 nr = file->f_op->write(file, iovec.iov_base,
0fef9483 3057 iovec.iov_len, io_kiocb_ppos(kiocb));
32960613
JA
3058 }
3059
311ae9e1
PB
3060 if (iov_iter_is_bvec(iter))
3061 kunmap(iter->bvec->bv_page);
3062
32960613
JA
3063 if (nr < 0) {
3064 if (!ret)
3065 ret = nr;
3066 break;
3067 }
3068 ret += nr;
3069 if (nr != iovec.iov_len)
3070 break;
3071 iov_iter_advance(iter, nr);
3072 }
3073
3074 return ret;
3075}
3076
ff6165b2
JA
3077static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3078 const struct iovec *fast_iov, struct iov_iter *iter)
f67676d1 3079{
e8c2bc1f 3080 struct io_async_rw *rw = req->async_data;
b64e3444 3081
ff6165b2 3082 memcpy(&rw->iter, iter, sizeof(*iter));
afb87658 3083 rw->free_iovec = iovec;
227c0c96 3084 rw->bytes_done = 0;
ff6165b2
JA
3085 /* can only be fixed buffers, no need to do anything */
3086 if (iter->type == ITER_BVEC)
3087 return;
b64e3444 3088 if (!iovec) {
ff6165b2
JA
3089 unsigned iov_off = 0;
3090
3091 rw->iter.iov = rw->fast_iov;
3092 if (iter->iov != fast_iov) {
3093 iov_off = iter->iov - fast_iov;
3094 rw->iter.iov += iov_off;
3095 }
3096 if (rw->fast_iov != fast_iov)
3097 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
45097dae 3098 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
3099 } else {
3100 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
3101 }
3102}
3103
e8c2bc1f 3104static inline int __io_alloc_async_data(struct io_kiocb *req)
3d9932a8 3105{
e8c2bc1f
JA
3106 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3107 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3108 return req->async_data == NULL;
3d9932a8
XW
3109}
3110
e8c2bc1f 3111static int io_alloc_async_data(struct io_kiocb *req)
f67676d1 3112{
e8c2bc1f 3113 if (!io_op_defs[req->opcode].needs_async_data)
d3656344 3114 return 0;
3d9932a8 3115
e8c2bc1f 3116 return __io_alloc_async_data(req);
b7bb4f7d
JA
3117}
3118
ff6165b2
JA
3119static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3120 const struct iovec *fast_iov,
227c0c96 3121 struct iov_iter *iter, bool force)
b7bb4f7d 3122{
e8c2bc1f 3123 if (!force && !io_op_defs[req->opcode].needs_async_data)
74566df3 3124 return 0;
e8c2bc1f
JA
3125 if (!req->async_data) {
3126 if (__io_alloc_async_data(req))
5d204bcf 3127 return -ENOMEM;
b7bb4f7d 3128
ff6165b2 3129 io_req_map_rw(req, iovec, fast_iov, iter);
5d204bcf 3130 }
b7bb4f7d 3131 return 0;
f67676d1
JA
3132}
3133
c3e330a4
PB
3134static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
3135 bool force_nonblock)
3136{
e8c2bc1f 3137 struct io_async_rw *iorw = req->async_data;
f4bff104 3138 struct iovec *iov = iorw->fast_iov;
c3e330a4
PB
3139 ssize_t ret;
3140
c183edff 3141 ret = __io_import_iovec(rw, req, &iov, &iorw->iter, !force_nonblock);
c3e330a4
PB
3142 if (unlikely(ret < 0))
3143 return ret;
3144
ab0b196c
PB
3145 iorw->bytes_done = 0;
3146 iorw->free_iovec = iov;
3147 if (iov)
3148 req->flags |= REQ_F_NEED_CLEANUP;
c3e330a4
PB
3149 return 0;
3150}
3151
3529d8c2
JA
3152static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
3153 bool force_nonblock)
f67676d1
JA
3154{
3155 ssize_t ret;
3156
3529d8c2
JA
3157 ret = io_prep_rw(req, sqe, force_nonblock);
3158 if (ret)
3159 return ret;
f67676d1 3160
3529d8c2
JA
3161 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3162 return -EBADF;
f67676d1 3163
5f798bea 3164 /* either don't need iovec imported or already have it */
e8c2bc1f 3165 if (!req->async_data || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2 3166 return 0;
c3e330a4 3167 return io_rw_prep_async(req, READ, force_nonblock);
f67676d1
JA
3168}
3169
c1dd91d1
JA
3170/*
3171 * This is our waitqueue callback handler, registered through lock_page_async()
3172 * when we initially tried to do the IO with the iocb armed our waitqueue.
3173 * This gets called when the page is unlocked, and we generally expect that to
3174 * happen when the page IO is completed and the page is now uptodate. This will
3175 * queue a task_work based retry of the operation, attempting to copy the data
3176 * again. If the latter fails because the page was NOT uptodate, then we will
3177 * do a thread based blocking retry of the operation. That's the unexpected
3178 * slow path.
3179 */
bcf5a063
JA
3180static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3181 int sync, void *arg)
3182{
3183 struct wait_page_queue *wpq;
3184 struct io_kiocb *req = wait->private;
bcf5a063 3185 struct wait_page_key *key = arg;
bcf5a063
JA
3186 int ret;
3187
3188 wpq = container_of(wait, struct wait_page_queue, wait);
3189
cdc8fcb4
LT
3190 if (!wake_page_match(wpq, key))
3191 return 0;
3192
c8d317aa 3193 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
bcf5a063
JA
3194 list_del_init(&wait->entry);
3195
e7375122 3196 init_task_work(&req->task_work, io_req_task_submit);
6d816e08
JA
3197 percpu_ref_get(&req->ctx->refs);
3198
bcf5a063
JA
3199 /* submit ref gets dropped, acquire a new one */
3200 refcount_inc(&req->refs);
fd7d6de2 3201 ret = io_req_task_work_add(req, &req->task_work, true);
bcf5a063 3202 if (unlikely(ret)) {
c2c4c83c
JA
3203 struct task_struct *tsk;
3204
bcf5a063 3205 /* queue just for cancelation */
e7375122 3206 init_task_work(&req->task_work, io_req_task_cancel);
bcf5a063 3207 tsk = io_wq_get_task(req->ctx->io_wq);
e7375122 3208 task_work_add(tsk, &req->task_work, 0);
c2c4c83c 3209 wake_up_process(tsk);
bcf5a063 3210 }
bcf5a063
JA
3211 return 1;
3212}
3213
c1dd91d1
JA
3214/*
3215 * This controls whether a given IO request should be armed for async page
3216 * based retry. If we return false here, the request is handed to the async
3217 * worker threads for retry. If we're doing buffered reads on a regular file,
3218 * we prepare a private wait_page_queue entry and retry the operation. This
3219 * will either succeed because the page is now uptodate and unlocked, or it
3220 * will register a callback when the page is unlocked at IO completion. Through
3221 * that callback, io_uring uses task_work to setup a retry of the operation.
3222 * That retry will attempt the buffered read again. The retry will generally
3223 * succeed, or in rare cases where it fails, we then fall back to using the
3224 * async worker threads for a blocking retry.
3225 */
227c0c96 3226static bool io_rw_should_retry(struct io_kiocb *req)
f67676d1 3227{
e8c2bc1f
JA
3228 struct io_async_rw *rw = req->async_data;
3229 struct wait_page_queue *wait = &rw->wpq;
bcf5a063 3230 struct kiocb *kiocb = &req->rw.kiocb;
f67676d1 3231
bcf5a063
JA
3232 /* never retry for NOWAIT, we just complete with -EAGAIN */
3233 if (req->flags & REQ_F_NOWAIT)
3234 return false;
f67676d1 3235
227c0c96 3236 /* Only for buffered IO */
3b2a4439 3237 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
bcf5a063 3238 return false;
3b2a4439 3239
bcf5a063
JA
3240 /*
3241 * just use poll if we can, and don't attempt if the fs doesn't
3242 * support callback based unlocks
3243 */
3244 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3245 return false;
f67676d1 3246
3b2a4439
JA
3247 wait->wait.func = io_async_buf_func;
3248 wait->wait.private = req;
3249 wait->wait.flags = 0;
3250 INIT_LIST_HEAD(&wait->wait.entry);
3251 kiocb->ki_flags |= IOCB_WAITQ;
c8d317aa 3252 kiocb->ki_flags &= ~IOCB_NOWAIT;
3b2a4439 3253 kiocb->ki_waitq = wait;
3b2a4439 3254 return true;
bcf5a063
JA
3255}
3256
3257static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3258{
3259 if (req->file->f_op->read_iter)
3260 return call_read_iter(req->file, &req->rw.kiocb, iter);
2dd2111d
GH
3261 else if (req->file->f_op->read)
3262 return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter);
3263 else
3264 return -EINVAL;
f67676d1
JA
3265}
3266
a1d7c393
JA
3267static int io_read(struct io_kiocb *req, bool force_nonblock,
3268 struct io_comp_state *cs)
2b188cc1
JA
3269{
3270 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3271 struct kiocb *kiocb = &req->rw.kiocb;
ff6165b2 3272 struct iov_iter __iter, *iter = &__iter;
e8c2bc1f 3273 struct io_async_rw *rw = req->async_data;
227c0c96 3274 ssize_t io_size, ret, ret2;
31b51510 3275 size_t iov_count;
f5cac8b1 3276 bool no_async;
ff6165b2 3277
e8c2bc1f
JA
3278 if (rw)
3279 iter = &rw->iter;
2b188cc1 3280
ff6165b2 3281 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
06b76d44
JA
3282 if (ret < 0)
3283 return ret;
eefdf30f 3284 iov_count = iov_iter_count(iter);
fa15bafb
PB
3285 io_size = ret;
3286 req->result = io_size;
227c0c96 3287 ret = 0;
2b188cc1 3288
fd6c2e4c
JA
3289 /* Ensure we clear previously set non-block flag */
3290 if (!force_nonblock)
29de5f6a 3291 kiocb->ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 3292
24c74678 3293 /* If the file doesn't support async, just async punt */
f5cac8b1
JA
3294 no_async = force_nonblock && !io_file_supports_async(req->file, READ);
3295 if (no_async)
f67676d1 3296 goto copy_iov;
9e645e11 3297
0fef9483 3298 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
fa15bafb
PB
3299 if (unlikely(ret))
3300 goto out_free;
2b188cc1 3301
227c0c96 3302 ret = io_iter_do_read(req, iter);
32960613 3303
227c0c96
JA
3304 if (!ret) {
3305 goto done;
3306 } else if (ret == -EIOCBQUEUED) {
3307 ret = 0;
3308 goto out_free;
3309 } else if (ret == -EAGAIN) {
eefdf30f
JA
3310 /* IOPOLL retry should happen for io-wq threads */
3311 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
f91daf56 3312 goto done;
355afaeb
JA
3313 /* no retry on NONBLOCK marked file */
3314 if (req->file->f_flags & O_NONBLOCK)
3315 goto done;
84216315
JA
3316 /* some cases will consume bytes even on error returns */
3317 iov_iter_revert(iter, iov_count - iov_iter_count(iter));
f38c7e3a
JA
3318 ret = 0;
3319 goto copy_iov;
227c0c96 3320 } else if (ret < 0) {
00d23d51
JA
3321 /* make sure -ERESTARTSYS -> -EINTR is done */
3322 goto done;
227c0c96
JA
3323 }
3324
3325 /* read it all, or we did blocking attempt. no retry. */
f91daf56
JA
3326 if (!iov_iter_count(iter) || !force_nonblock ||
3327 (req->file->f_flags & O_NONBLOCK))
227c0c96
JA
3328 goto done;
3329
3330 io_size -= ret;
3331copy_iov:
3332 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3333 if (ret2) {
3334 ret = ret2;
3335 goto out_free;
3336 }
f5cac8b1
JA
3337 if (no_async)
3338 return -EAGAIN;
e8c2bc1f 3339 rw = req->async_data;
227c0c96
JA
3340 /* it's copied and will be cleaned with ->io */
3341 iovec = NULL;
3342 /* now use our persistent iterator, if we aren't already */
e8c2bc1f 3343 iter = &rw->iter;
227c0c96 3344retry:
e8c2bc1f 3345 rw->bytes_done += ret;
227c0c96
JA
3346 /* if we can retry, do so with the callbacks armed */
3347 if (!io_rw_should_retry(req)) {
fa15bafb
PB
3348 kiocb->ki_flags &= ~IOCB_WAITQ;
3349 return -EAGAIN;
2b188cc1 3350 }
227c0c96
JA
3351
3352 /*
3353 * Now retry read with the IOCB_WAITQ parts set in the iocb. If we
3354 * get -EIOCBQUEUED, then we'll get a notification when the desired
3355 * page gets unlocked. We can also get a partial read here, and if we
3356 * do, then just retry at the new offset.
3357 */
3358 ret = io_iter_do_read(req, iter);
3359 if (ret == -EIOCBQUEUED) {
3360 ret = 0;
3361 goto out_free;
3362 } else if (ret > 0 && ret < io_size) {
3363 /* we got some bytes, but not all. retry. */
3364 goto retry;
3365 }
3366done:
3367 kiocb_done(kiocb, ret, cs);
3368 ret = 0;
f67676d1 3369out_free:
f261c168 3370 /* it's reportedly faster than delegating the null check to kfree() */
252917c3 3371 if (iovec)
6f2cc166 3372 kfree(iovec);
2b188cc1
JA
3373 return ret;
3374}
3375
3529d8c2
JA
3376static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
3377 bool force_nonblock)
f67676d1
JA
3378{
3379 ssize_t ret;
3380
3529d8c2
JA
3381 ret = io_prep_rw(req, sqe, force_nonblock);
3382 if (ret)
3383 return ret;
f67676d1 3384
3529d8c2
JA
3385 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3386 return -EBADF;
f67676d1 3387
5f798bea 3388 /* either don't need iovec imported or already have it */
e8c2bc1f 3389 if (!req->async_data || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2 3390 return 0;
c3e330a4 3391 return io_rw_prep_async(req, WRITE, force_nonblock);
f67676d1
JA
3392}
3393
a1d7c393
JA
3394static int io_write(struct io_kiocb *req, bool force_nonblock,
3395 struct io_comp_state *cs)
2b188cc1
JA
3396{
3397 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3398 struct kiocb *kiocb = &req->rw.kiocb;
ff6165b2 3399 struct iov_iter __iter, *iter = &__iter;
e8c2bc1f 3400 struct io_async_rw *rw = req->async_data;
31b51510 3401 size_t iov_count;
fa15bafb 3402 ssize_t ret, ret2, io_size;
2b188cc1 3403
e8c2bc1f
JA
3404 if (rw)
3405 iter = &rw->iter;
ff6165b2
JA
3406
3407 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
06b76d44
JA
3408 if (ret < 0)
3409 return ret;
eefdf30f 3410 iov_count = iov_iter_count(iter);
fa15bafb
PB
3411 io_size = ret;
3412 req->result = io_size;
2b188cc1 3413
fd6c2e4c
JA
3414 /* Ensure we clear previously set non-block flag */
3415 if (!force_nonblock)
9adbd45d 3416 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 3417
24c74678 3418 /* If the file doesn't support async, just async punt */
af197f50 3419 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
f67676d1 3420 goto copy_iov;
31b51510 3421
10d59345
JA
3422 /* file path doesn't support NOWAIT for non-direct_IO */
3423 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3424 (req->flags & REQ_F_ISREG))
f67676d1 3425 goto copy_iov;
31b51510 3426
0fef9483 3427 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
fa15bafb
PB
3428 if (unlikely(ret))
3429 goto out_free;
4ed734b0 3430
fa15bafb
PB
3431 /*
3432 * Open-code file_start_write here to grab freeze protection,
3433 * which will be released by another thread in
3434 * io_complete_rw(). Fool lockdep by telling it the lock got
3435 * released so that it doesn't complain about the held lock when
3436 * we return to userspace.
3437 */
3438 if (req->flags & REQ_F_ISREG) {
3439 __sb_start_write(file_inode(req->file)->i_sb,
3440 SB_FREEZE_WRITE, true);
3441 __sb_writers_release(file_inode(req->file)->i_sb,
3442 SB_FREEZE_WRITE);
3443 }
3444 kiocb->ki_flags |= IOCB_WRITE;
4ed734b0 3445
fa15bafb 3446 if (req->file->f_op->write_iter)
ff6165b2 3447 ret2 = call_write_iter(req->file, kiocb, iter);
2dd2111d 3448 else if (req->file->f_op->write)
ff6165b2 3449 ret2 = loop_rw_iter(WRITE, req->file, kiocb, iter);
2dd2111d
GH
3450 else
3451 ret2 = -EINVAL;
4ed734b0 3452
fa15bafb
PB
3453 /*
3454 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3455 * retry them without IOCB_NOWAIT.
3456 */
3457 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3458 ret2 = -EAGAIN;
355afaeb
JA
3459 /* no retry on NONBLOCK marked file */
3460 if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK))
3461 goto done;
fa15bafb 3462 if (!force_nonblock || ret2 != -EAGAIN) {
eefdf30f
JA
3463 /* IOPOLL retry should happen for io-wq threads */
3464 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3465 goto copy_iov;
355afaeb 3466done:
fa15bafb
PB
3467 kiocb_done(kiocb, ret2, cs);
3468 } else {
f67676d1 3469copy_iov:
84216315
JA
3470 /* some cases will consume bytes even on error returns */
3471 iov_iter_revert(iter, iov_count - iov_iter_count(iter));
227c0c96 3472 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
ff6165b2
JA
3473 if (!ret)
3474 return -EAGAIN;
2b188cc1 3475 }
31b51510 3476out_free:
f261c168 3477 /* it's reportedly faster than delegating the null check to kfree() */
252917c3 3478 if (iovec)
6f2cc166 3479 kfree(iovec);
2b188cc1
JA
3480 return ret;
3481}
3482
f2a8d5c7
PB
3483static int __io_splice_prep(struct io_kiocb *req,
3484 const struct io_uring_sqe *sqe)
7d67af2c
PB
3485{
3486 struct io_splice* sp = &req->splice;
3487 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
3488 int ret;
3489
3490 if (req->flags & REQ_F_NEED_CLEANUP)
3491 return 0;
3232dd02
PB
3492 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3493 return -EINVAL;
7d67af2c
PB
3494
3495 sp->file_in = NULL;
7d67af2c
PB
3496 sp->len = READ_ONCE(sqe->len);
3497 sp->flags = READ_ONCE(sqe->splice_flags);
3498
3499 if (unlikely(sp->flags & ~valid_flags))
3500 return -EINVAL;
3501
3502 ret = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), &sp->file_in,
3503 (sp->flags & SPLICE_F_FD_IN_FIXED));
3504 if (ret)
3505 return ret;
3506 req->flags |= REQ_F_NEED_CLEANUP;
3507
7cdaf587
XW
3508 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3509 /*
3510 * Splice operation will be punted aync, and here need to
3511 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3512 */
3513 io_req_init_async(req);
7d67af2c 3514 req->work.flags |= IO_WQ_WORK_UNBOUND;
7cdaf587 3515 }
7d67af2c
PB
3516
3517 return 0;
3518}
3519
f2a8d5c7
PB
3520static int io_tee_prep(struct io_kiocb *req,
3521 const struct io_uring_sqe *sqe)
3522{
3523 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3524 return -EINVAL;
3525 return __io_splice_prep(req, sqe);
3526}
3527
3528static int io_tee(struct io_kiocb *req, bool force_nonblock)
3529{
3530 struct io_splice *sp = &req->splice;
3531 struct file *in = sp->file_in;
3532 struct file *out = sp->file_out;
3533 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3534 long ret = 0;
3535
3536 if (force_nonblock)
3537 return -EAGAIN;
3538 if (sp->len)
3539 ret = do_tee(in, out, sp->len, flags);
3540
3541 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3542 req->flags &= ~REQ_F_NEED_CLEANUP;
3543
f2a8d5c7
PB
3544 if (ret != sp->len)
3545 req_set_fail_links(req);
e1e16097 3546 io_req_complete(req, ret);
f2a8d5c7
PB
3547 return 0;
3548}
3549
3550static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3551{
3552 struct io_splice* sp = &req->splice;
3553
3554 sp->off_in = READ_ONCE(sqe->splice_off_in);
3555 sp->off_out = READ_ONCE(sqe->off);
3556 return __io_splice_prep(req, sqe);
3557}
3558
014db007 3559static int io_splice(struct io_kiocb *req, bool force_nonblock)
7d67af2c
PB
3560{
3561 struct io_splice *sp = &req->splice;
3562 struct file *in = sp->file_in;
3563 struct file *out = sp->file_out;
3564 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3565 loff_t *poff_in, *poff_out;
c9687426 3566 long ret = 0;
7d67af2c 3567
2fb3e822
PB
3568 if (force_nonblock)
3569 return -EAGAIN;
7d67af2c
PB
3570
3571 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3572 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
c9687426 3573
948a7749 3574 if (sp->len)
c9687426 3575 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
7d67af2c
PB
3576
3577 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3578 req->flags &= ~REQ_F_NEED_CLEANUP;
3579
7d67af2c
PB
3580 if (ret != sp->len)
3581 req_set_fail_links(req);
e1e16097 3582 io_req_complete(req, ret);
7d67af2c
PB
3583 return 0;
3584}
3585
2b188cc1
JA
3586/*
3587 * IORING_OP_NOP just posts a completion event, nothing else.
3588 */
229a7b63 3589static int io_nop(struct io_kiocb *req, struct io_comp_state *cs)
2b188cc1
JA
3590{
3591 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 3592
def596e9
JA
3593 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3594 return -EINVAL;
3595
229a7b63 3596 __io_req_complete(req, 0, 0, cs);
2b188cc1
JA
3597 return 0;
3598}
3599
3529d8c2 3600static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 3601{
6b06314c 3602 struct io_ring_ctx *ctx = req->ctx;
c992fe29 3603
09bb8394
JA
3604 if (!req->file)
3605 return -EBADF;
c992fe29 3606
6b06314c 3607 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 3608 return -EINVAL;
edafccee 3609 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
3610 return -EINVAL;
3611
8ed8d3c3
JA
3612 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3613 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3614 return -EINVAL;
3615
3616 req->sync.off = READ_ONCE(sqe->off);
3617 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
3618 return 0;
3619}
3620
ac45abc0 3621static int io_fsync(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3 3622{
8ed8d3c3 3623 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
3624 int ret;
3625
ac45abc0
PB
3626 /* fsync always requires a blocking context */
3627 if (force_nonblock)
3628 return -EAGAIN;
3629
9adbd45d 3630 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
3631 end > 0 ? end : LLONG_MAX,
3632 req->sync.flags & IORING_FSYNC_DATASYNC);
3633 if (ret < 0)
3634 req_set_fail_links(req);
e1e16097 3635 io_req_complete(req, ret);
c992fe29
CH
3636 return 0;
3637}
3638
d63d1b5e
JA
3639static int io_fallocate_prep(struct io_kiocb *req,
3640 const struct io_uring_sqe *sqe)
3641{
3642 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3643 return -EINVAL;
3232dd02
PB
3644 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3645 return -EINVAL;
d63d1b5e
JA
3646
3647 req->sync.off = READ_ONCE(sqe->off);
3648 req->sync.len = READ_ONCE(sqe->addr);
3649 req->sync.mode = READ_ONCE(sqe->len);
3650 return 0;
3651}
3652
014db007 3653static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
5d17b4a4 3654{
ac45abc0
PB
3655 int ret;
3656
d63d1b5e 3657 /* fallocate always requiring blocking context */
ac45abc0 3658 if (force_nonblock)
5d17b4a4 3659 return -EAGAIN;
ac45abc0
PB
3660 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3661 req->sync.len);
ac45abc0
PB
3662 if (ret < 0)
3663 req_set_fail_links(req);
e1e16097 3664 io_req_complete(req, ret);
5d17b4a4
JA
3665 return 0;
3666}
3667
ec65fea5 3668static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 3669{
f8748881 3670 const char __user *fname;
15b71abe 3671 int ret;
b7bb4f7d 3672
ec65fea5 3673 if (unlikely(sqe->ioprio || sqe->buf_index))
15b71abe 3674 return -EINVAL;
ec65fea5 3675 if (unlikely(req->flags & REQ_F_FIXED_FILE))
cf3040ca 3676 return -EBADF;
03b1230c 3677
ec65fea5
PB
3678 /* open.how should be already initialised */
3679 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
08a1d26e 3680 req->open.how.flags |= O_LARGEFILE;
3529d8c2 3681
25e72d10
PB
3682 req->open.dfd = READ_ONCE(sqe->fd);
3683 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
f8748881 3684 req->open.filename = getname(fname);
15b71abe
JA
3685 if (IS_ERR(req->open.filename)) {
3686 ret = PTR_ERR(req->open.filename);
3687 req->open.filename = NULL;
3688 return ret;
3689 }
4022e7af 3690 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 3691 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 3692 return 0;
03b1230c
JA
3693}
3694
ec65fea5
PB
3695static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3696{
3697 u64 flags, mode;
3698
4eb8dded
JA
3699 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3700 return -EINVAL;
ec65fea5
PB
3701 if (req->flags & REQ_F_NEED_CLEANUP)
3702 return 0;
3703 mode = READ_ONCE(sqe->len);
3704 flags = READ_ONCE(sqe->open_flags);
3705 req->open.how = build_open_how(flags, mode);
3706 return __io_openat_prep(req, sqe);
3707}
3708
cebdb986 3709static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 3710{
cebdb986 3711 struct open_how __user *how;
cebdb986 3712 size_t len;
0fa03c62
JA
3713 int ret;
3714
4eb8dded
JA
3715 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3716 return -EINVAL;
0bdbdd08
PB
3717 if (req->flags & REQ_F_NEED_CLEANUP)
3718 return 0;
cebdb986
JA
3719 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3720 len = READ_ONCE(sqe->len);
cebdb986
JA
3721 if (len < OPEN_HOW_SIZE_VER0)
3722 return -EINVAL;
3529d8c2 3723
cebdb986
JA
3724 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3725 len);
3726 if (ret)
3727 return ret;
3529d8c2 3728
ec65fea5 3729 return __io_openat_prep(req, sqe);
cebdb986
JA
3730}
3731
014db007 3732static int io_openat2(struct io_kiocb *req, bool force_nonblock)
15b71abe
JA
3733{
3734 struct open_flags op;
15b71abe
JA
3735 struct file *file;
3736 int ret;
3737
f86cd20c 3738 if (force_nonblock)
15b71abe 3739 return -EAGAIN;
15b71abe 3740
cebdb986 3741 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
3742 if (ret)
3743 goto err;
3744
4022e7af 3745 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
15b71abe
JA
3746 if (ret < 0)
3747 goto err;
3748
3749 file = do_filp_open(req->open.dfd, req->open.filename, &op);
3750 if (IS_ERR(file)) {
3751 put_unused_fd(ret);
3752 ret = PTR_ERR(file);
3753 } else {
3754 fsnotify_open(file);
3755 fd_install(ret, file);
3756 }
3757err:
3758 putname(req->open.filename);
8fef80bf 3759 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe
JA
3760 if (ret < 0)
3761 req_set_fail_links(req);
e1e16097 3762 io_req_complete(req, ret);
15b71abe
JA
3763 return 0;
3764}
3765
014db007 3766static int io_openat(struct io_kiocb *req, bool force_nonblock)
cebdb986 3767{
014db007 3768 return io_openat2(req, force_nonblock);
cebdb986
JA
3769}
3770
067524e9
JA
3771static int io_remove_buffers_prep(struct io_kiocb *req,
3772 const struct io_uring_sqe *sqe)
3773{
3774 struct io_provide_buf *p = &req->pbuf;
3775 u64 tmp;
3776
3777 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3778 return -EINVAL;
3779
3780 tmp = READ_ONCE(sqe->fd);
3781 if (!tmp || tmp > USHRT_MAX)
3782 return -EINVAL;
3783
3784 memset(p, 0, sizeof(*p));
3785 p->nbufs = tmp;
3786 p->bgid = READ_ONCE(sqe->buf_group);
3787 return 0;
3788}
3789
3790static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3791 int bgid, unsigned nbufs)
3792{
3793 unsigned i = 0;
3794
3795 /* shouldn't happen */
3796 if (!nbufs)
3797 return 0;
3798
3799 /* the head kbuf is the list itself */
3800 while (!list_empty(&buf->list)) {
3801 struct io_buffer *nxt;
3802
3803 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3804 list_del(&nxt->list);
3805 kfree(nxt);
3806 if (++i == nbufs)
3807 return i;
3808 }
3809 i++;
3810 kfree(buf);
3811 idr_remove(&ctx->io_buffer_idr, bgid);
3812
3813 return i;
3814}
3815
229a7b63
JA
3816static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
3817 struct io_comp_state *cs)
067524e9
JA
3818{
3819 struct io_provide_buf *p = &req->pbuf;
3820 struct io_ring_ctx *ctx = req->ctx;
3821 struct io_buffer *head;
3822 int ret = 0;
3823
3824 io_ring_submit_lock(ctx, !force_nonblock);
3825
3826 lockdep_assert_held(&ctx->uring_lock);
3827
3828 ret = -ENOENT;
3829 head = idr_find(&ctx->io_buffer_idr, p->bgid);
3830 if (head)
3831 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3832
3833 io_ring_submit_lock(ctx, !force_nonblock);
3834 if (ret < 0)
3835 req_set_fail_links(req);
229a7b63 3836 __io_req_complete(req, ret, 0, cs);
067524e9
JA
3837 return 0;
3838}
3839
ddf0322d
JA
3840static int io_provide_buffers_prep(struct io_kiocb *req,
3841 const struct io_uring_sqe *sqe)
3842{
3843 struct io_provide_buf *p = &req->pbuf;
3844 u64 tmp;
3845
3846 if (sqe->ioprio || sqe->rw_flags)
3847 return -EINVAL;
3848
3849 tmp = READ_ONCE(sqe->fd);
3850 if (!tmp || tmp > USHRT_MAX)
3851 return -E2BIG;
3852 p->nbufs = tmp;
3853 p->addr = READ_ONCE(sqe->addr);
3854 p->len = READ_ONCE(sqe->len);
3855
efe68c1c 3856 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
ddf0322d
JA
3857 return -EFAULT;
3858
3859 p->bgid = READ_ONCE(sqe->buf_group);
3860 tmp = READ_ONCE(sqe->off);
3861 if (tmp > USHRT_MAX)
3862 return -E2BIG;
3863 p->bid = tmp;
3864 return 0;
3865}
3866
3867static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3868{
3869 struct io_buffer *buf;
3870 u64 addr = pbuf->addr;
3871 int i, bid = pbuf->bid;
3872
3873 for (i = 0; i < pbuf->nbufs; i++) {
3874 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3875 if (!buf)
3876 break;
3877
3878 buf->addr = addr;
3879 buf->len = pbuf->len;
3880 buf->bid = bid;
3881 addr += pbuf->len;
3882 bid++;
3883 if (!*head) {
3884 INIT_LIST_HEAD(&buf->list);
3885 *head = buf;
3886 } else {
3887 list_add_tail(&buf->list, &(*head)->list);
3888 }
3889 }
3890
3891 return i ? i : -ENOMEM;
3892}
3893
229a7b63
JA
3894static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
3895 struct io_comp_state *cs)
ddf0322d
JA
3896{
3897 struct io_provide_buf *p = &req->pbuf;
3898 struct io_ring_ctx *ctx = req->ctx;
3899 struct io_buffer *head, *list;
3900 int ret = 0;
3901
3902 io_ring_submit_lock(ctx, !force_nonblock);
3903
3904 lockdep_assert_held(&ctx->uring_lock);
3905
3906 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
3907
3908 ret = io_add_buffers(p, &head);
3909 if (ret < 0)
3910 goto out;
3911
3912 if (!list) {
3913 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
3914 GFP_KERNEL);
3915 if (ret < 0) {
067524e9 3916 __io_remove_buffers(ctx, head, p->bgid, -1U);
ddf0322d
JA
3917 goto out;
3918 }
3919 }
3920out:
3921 io_ring_submit_unlock(ctx, !force_nonblock);
3922 if (ret < 0)
3923 req_set_fail_links(req);
229a7b63 3924 __io_req_complete(req, ret, 0, cs);
ddf0322d 3925 return 0;
cebdb986
JA
3926}
3927
3e4827b0
JA
3928static int io_epoll_ctl_prep(struct io_kiocb *req,
3929 const struct io_uring_sqe *sqe)
3930{
3931#if defined(CONFIG_EPOLL)
3932 if (sqe->ioprio || sqe->buf_index)
3933 return -EINVAL;
6ca56f84 3934 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
3232dd02 3935 return -EINVAL;
3e4827b0
JA
3936
3937 req->epoll.epfd = READ_ONCE(sqe->fd);
3938 req->epoll.op = READ_ONCE(sqe->len);
3939 req->epoll.fd = READ_ONCE(sqe->off);
3940
3941 if (ep_op_has_event(req->epoll.op)) {
3942 struct epoll_event __user *ev;
3943
3944 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
3945 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
3946 return -EFAULT;
3947 }
3948
3949 return 0;
3950#else
3951 return -EOPNOTSUPP;
3952#endif
3953}
3954
229a7b63
JA
3955static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
3956 struct io_comp_state *cs)
3e4827b0
JA
3957{
3958#if defined(CONFIG_EPOLL)
3959 struct io_epoll *ie = &req->epoll;
3960 int ret;
3961
3962 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
3963 if (force_nonblock && ret == -EAGAIN)
3964 return -EAGAIN;
3965
3966 if (ret < 0)
3967 req_set_fail_links(req);
229a7b63 3968 __io_req_complete(req, ret, 0, cs);
3e4827b0
JA
3969 return 0;
3970#else
3971 return -EOPNOTSUPP;
3972#endif
3973}
3974
c1ca757b
JA
3975static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3976{
3977#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3978 if (sqe->ioprio || sqe->buf_index || sqe->off)
3979 return -EINVAL;
3232dd02
PB
3980 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3981 return -EINVAL;
c1ca757b
JA
3982
3983 req->madvise.addr = READ_ONCE(sqe->addr);
3984 req->madvise.len = READ_ONCE(sqe->len);
3985 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
3986 return 0;
3987#else
3988 return -EOPNOTSUPP;
3989#endif
3990}
3991
014db007 3992static int io_madvise(struct io_kiocb *req, bool force_nonblock)
c1ca757b
JA
3993{
3994#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3995 struct io_madvise *ma = &req->madvise;
3996 int ret;
3997
3998 if (force_nonblock)
3999 return -EAGAIN;
4000
4001 ret = do_madvise(ma->addr, ma->len, ma->advice);
4002 if (ret < 0)
4003 req_set_fail_links(req);
e1e16097 4004 io_req_complete(req, ret);
c1ca757b
JA
4005 return 0;
4006#else
4007 return -EOPNOTSUPP;
4008#endif
4009}
4010
4840e418
JA
4011static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4012{
4013 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4014 return -EINVAL;
3232dd02
PB
4015 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4016 return -EINVAL;
4840e418
JA
4017
4018 req->fadvise.offset = READ_ONCE(sqe->off);
4019 req->fadvise.len = READ_ONCE(sqe->len);
4020 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4021 return 0;
4022}
4023
014db007 4024static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
4840e418
JA
4025{
4026 struct io_fadvise *fa = &req->fadvise;
4027 int ret;
4028
3e69426d
JA
4029 if (force_nonblock) {
4030 switch (fa->advice) {
4031 case POSIX_FADV_NORMAL:
4032 case POSIX_FADV_RANDOM:
4033 case POSIX_FADV_SEQUENTIAL:
4034 break;
4035 default:
4036 return -EAGAIN;
4037 }
4038 }
4840e418
JA
4039
4040 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4041 if (ret < 0)
4042 req_set_fail_links(req);
e1e16097 4043 io_req_complete(req, ret);
4840e418
JA
4044 return 0;
4045}
4046
eddc7ef5
JA
4047static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4048{
6ca56f84 4049 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
3232dd02 4050 return -EINVAL;
eddc7ef5
JA
4051 if (sqe->ioprio || sqe->buf_index)
4052 return -EINVAL;
9c280f90 4053 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 4054 return -EBADF;
eddc7ef5 4055
1d9e1288
BM
4056 req->statx.dfd = READ_ONCE(sqe->fd);
4057 req->statx.mask = READ_ONCE(sqe->len);
e62753e4 4058 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
4059 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4060 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5
JA
4061
4062 return 0;
4063}
4064
014db007 4065static int io_statx(struct io_kiocb *req, bool force_nonblock)
eddc7ef5 4066{
1d9e1288 4067 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
4068 int ret;
4069
5b0bbee4
JA
4070 if (force_nonblock) {
4071 /* only need file table for an actual valid fd */
4072 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
4073 req->flags |= REQ_F_NO_FILE_TABLE;
eddc7ef5 4074 return -EAGAIN;
5b0bbee4 4075 }
eddc7ef5 4076
e62753e4
BM
4077 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4078 ctx->buffer);
eddc7ef5 4079
eddc7ef5
JA
4080 if (ret < 0)
4081 req_set_fail_links(req);
e1e16097 4082 io_req_complete(req, ret);
eddc7ef5
JA
4083 return 0;
4084}
4085
b5dba59e
JA
4086static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4087{
4088 /*
4089 * If we queue this for async, it must not be cancellable. That would
7cdaf587
XW
4090 * leave the 'file' in an undeterminate state, and here need to modify
4091 * io_wq_work.flags, so initialize io_wq_work firstly.
b5dba59e 4092 */
7cdaf587 4093 io_req_init_async(req);
b5dba59e
JA
4094 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
4095
3232dd02
PB
4096 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4097 return -EINVAL;
b5dba59e
JA
4098 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4099 sqe->rw_flags || sqe->buf_index)
4100 return -EINVAL;
9c280f90 4101 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 4102 return -EBADF;
b5dba59e
JA
4103
4104 req->close.fd = READ_ONCE(sqe->fd);
0f212204 4105 if ((req->file && req->file->f_op == &io_uring_fops))
fd2206e4 4106 return -EBADF;
b5dba59e 4107
3af73b28 4108 req->close.put_file = NULL;
b5dba59e 4109 return 0;
b5dba59e
JA
4110}
4111
229a7b63
JA
4112static int io_close(struct io_kiocb *req, bool force_nonblock,
4113 struct io_comp_state *cs)
b5dba59e 4114{
3af73b28 4115 struct io_close *close = &req->close;
b5dba59e
JA
4116 int ret;
4117
3af73b28
PB
4118 /* might be already done during nonblock submission */
4119 if (!close->put_file) {
4120 ret = __close_fd_get_file(close->fd, &close->put_file);
4121 if (ret < 0)
4122 return (ret == -ENOENT) ? -EBADF : ret;
4123 }
b5dba59e
JA
4124
4125 /* if the file has a flush method, be safe and punt to async */
3af73b28 4126 if (close->put_file->f_op->flush && force_nonblock) {
24c74678
PB
4127 /* was never set, but play safe */
4128 req->flags &= ~REQ_F_NOWAIT;
0bf0eefd 4129 /* avoid grabbing files - we don't need the files */
24c74678 4130 req->flags |= REQ_F_NO_FILE_TABLE;
0bf0eefd 4131 return -EAGAIN;
a2100672 4132 }
b5dba59e 4133
3af73b28
PB
4134 /* No ->flush() or already async, safely close from here */
4135 ret = filp_close(close->put_file, req->work.files);
4136 if (ret < 0)
4137 req_set_fail_links(req);
3af73b28
PB
4138 fput(close->put_file);
4139 close->put_file = NULL;
229a7b63 4140 __io_req_complete(req, ret, 0, cs);
1a417f4e 4141 return 0;
b5dba59e
JA
4142}
4143
3529d8c2 4144static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
4145{
4146 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4
JA
4147
4148 if (!req->file)
4149 return -EBADF;
5d17b4a4
JA
4150
4151 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4152 return -EINVAL;
4153 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4154 return -EINVAL;
4155
8ed8d3c3
JA
4156 req->sync.off = READ_ONCE(sqe->off);
4157 req->sync.len = READ_ONCE(sqe->len);
4158 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
4159 return 0;
4160}
4161
ac45abc0 4162static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3 4163{
8ed8d3c3
JA
4164 int ret;
4165
ac45abc0
PB
4166 /* sync_file_range always requires a blocking context */
4167 if (force_nonblock)
4168 return -EAGAIN;
4169
9adbd45d 4170 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
4171 req->sync.flags);
4172 if (ret < 0)
4173 req_set_fail_links(req);
e1e16097 4174 io_req_complete(req, ret);
5d17b4a4
JA
4175 return 0;
4176}
4177
469956e8 4178#if defined(CONFIG_NET)
02d27d89
PB
4179static int io_setup_async_msg(struct io_kiocb *req,
4180 struct io_async_msghdr *kmsg)
4181{
e8c2bc1f
JA
4182 struct io_async_msghdr *async_msg = req->async_data;
4183
4184 if (async_msg)
02d27d89 4185 return -EAGAIN;
e8c2bc1f 4186 if (io_alloc_async_data(req)) {
02d27d89
PB
4187 if (kmsg->iov != kmsg->fast_iov)
4188 kfree(kmsg->iov);
4189 return -ENOMEM;
4190 }
e8c2bc1f 4191 async_msg = req->async_data;
02d27d89 4192 req->flags |= REQ_F_NEED_CLEANUP;
e8c2bc1f 4193 memcpy(async_msg, kmsg, sizeof(*kmsg));
02d27d89
PB
4194 return -EAGAIN;
4195}
4196
2ae523ed
PB
4197static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4198 struct io_async_msghdr *iomsg)
4199{
4200 iomsg->iov = iomsg->fast_iov;
4201 iomsg->msg.msg_name = &iomsg->addr;
4202 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
4203 req->sr_msg.msg_flags, &iomsg->iov);
4204}
4205
3529d8c2 4206static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 4207{
e8c2bc1f 4208 struct io_async_msghdr *async_msg = req->async_data;
e47293fd 4209 struct io_sr_msg *sr = &req->sr_msg;
99bc4c38 4210 int ret;
03b1230c 4211
d2b6f48b
PB
4212 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4213 return -EINVAL;
4214
e47293fd 4215 sr->msg_flags = READ_ONCE(sqe->msg_flags);
270a5940 4216 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 4217 sr->len = READ_ONCE(sqe->len);
3529d8c2 4218
d8768362
JA
4219#ifdef CONFIG_COMPAT
4220 if (req->ctx->compat)
4221 sr->msg_flags |= MSG_CMSG_COMPAT;
4222#endif
4223
e8c2bc1f 4224 if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
3529d8c2 4225 return 0;
5f798bea
PB
4226 /* iovec is already imported */
4227 if (req->flags & REQ_F_NEED_CLEANUP)
4228 return 0;
3529d8c2 4229
e8c2bc1f 4230 ret = io_sendmsg_copy_hdr(req, async_msg);
99bc4c38
PB
4231 if (!ret)
4232 req->flags |= REQ_F_NEED_CLEANUP;
4233 return ret;
03b1230c
JA
4234}
4235
229a7b63
JA
4236static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
4237 struct io_comp_state *cs)
aa1fa28f 4238{
6b754c8b 4239 struct io_async_msghdr iomsg, *kmsg;
0fa03c62 4240 struct socket *sock;
7a7cacba 4241 unsigned flags;
0fa03c62
JA
4242 int ret;
4243
0fa03c62 4244 sock = sock_from_file(req->file, &ret);
7a7cacba
PB
4245 if (unlikely(!sock))
4246 return ret;
3529d8c2 4247
e8c2bc1f
JA
4248 if (req->async_data) {
4249 kmsg = req->async_data;
4250 kmsg->msg.msg_name = &kmsg->addr;
7a7cacba
PB
4251 /* if iov is set, it's allocated already */
4252 if (!kmsg->iov)
4253 kmsg->iov = kmsg->fast_iov;
4254 kmsg->msg.msg_iter.iov = kmsg->iov;
4255 } else {
4256 ret = io_sendmsg_copy_hdr(req, &iomsg);
4257 if (ret)
4258 return ret;
4259 kmsg = &iomsg;
0fa03c62 4260 }
0fa03c62 4261
7a7cacba
PB
4262 flags = req->sr_msg.msg_flags;
4263 if (flags & MSG_DONTWAIT)
4264 req->flags |= REQ_F_NOWAIT;
4265 else if (force_nonblock)
4266 flags |= MSG_DONTWAIT;
e47293fd 4267
7a7cacba
PB
4268 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
4269 if (force_nonblock && ret == -EAGAIN)
4270 return io_setup_async_msg(req, kmsg);
4271 if (ret == -ERESTARTSYS)
4272 ret = -EINTR;
0fa03c62 4273
6b754c8b 4274 if (kmsg->iov != kmsg->fast_iov)
0b416c3e 4275 kfree(kmsg->iov);
99bc4c38 4276 req->flags &= ~REQ_F_NEED_CLEANUP;
4e88d6e7
JA
4277 if (ret < 0)
4278 req_set_fail_links(req);
229a7b63 4279 __io_req_complete(req, ret, 0, cs);
5d17b4a4 4280 return 0;
03b1230c 4281}
aa1fa28f 4282
229a7b63
JA
4283static int io_send(struct io_kiocb *req, bool force_nonblock,
4284 struct io_comp_state *cs)
fddaface 4285{
7a7cacba
PB
4286 struct io_sr_msg *sr = &req->sr_msg;
4287 struct msghdr msg;
4288 struct iovec iov;
fddaface 4289 struct socket *sock;
7a7cacba 4290 unsigned flags;
fddaface
JA
4291 int ret;
4292
fddaface 4293 sock = sock_from_file(req->file, &ret);
7a7cacba
PB
4294 if (unlikely(!sock))
4295 return ret;
fddaface 4296
7a7cacba
PB
4297 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4298 if (unlikely(ret))
14db8411 4299 return ret;
fddaface 4300
7a7cacba
PB
4301 msg.msg_name = NULL;
4302 msg.msg_control = NULL;
4303 msg.msg_controllen = 0;
4304 msg.msg_namelen = 0;
fddaface 4305
7a7cacba
PB
4306 flags = req->sr_msg.msg_flags;
4307 if (flags & MSG_DONTWAIT)
4308 req->flags |= REQ_F_NOWAIT;
4309 else if (force_nonblock)
4310 flags |= MSG_DONTWAIT;
fddaface 4311
7a7cacba
PB
4312 msg.msg_flags = flags;
4313 ret = sock_sendmsg(sock, &msg);
4314 if (force_nonblock && ret == -EAGAIN)
4315 return -EAGAIN;
4316 if (ret == -ERESTARTSYS)
4317 ret = -EINTR;
fddaface 4318
fddaface
JA
4319 if (ret < 0)
4320 req_set_fail_links(req);
229a7b63 4321 __io_req_complete(req, ret, 0, cs);
fddaface 4322 return 0;
fddaface
JA
4323}
4324
1400e697
PB
4325static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4326 struct io_async_msghdr *iomsg)
52de1fe1
JA
4327{
4328 struct io_sr_msg *sr = &req->sr_msg;
4329 struct iovec __user *uiov;
4330 size_t iov_len;
4331 int ret;
4332
1400e697
PB
4333 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4334 &iomsg->uaddr, &uiov, &iov_len);
52de1fe1
JA
4335 if (ret)
4336 return ret;
4337
4338 if (req->flags & REQ_F_BUFFER_SELECT) {
4339 if (iov_len > 1)
4340 return -EINVAL;
1400e697 4341 if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov)))
52de1fe1 4342 return -EFAULT;
1400e697
PB
4343 sr->len = iomsg->iov[0].iov_len;
4344 iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1,
52de1fe1 4345 sr->len);
1400e697 4346 iomsg->iov = NULL;
52de1fe1
JA
4347 } else {
4348 ret = import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
1400e697 4349 &iomsg->iov, &iomsg->msg.msg_iter);
52de1fe1
JA
4350 if (ret > 0)
4351 ret = 0;
4352 }
4353
4354 return ret;
4355}
4356
4357#ifdef CONFIG_COMPAT
4358static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
1400e697 4359 struct io_async_msghdr *iomsg)
52de1fe1
JA
4360{
4361 struct compat_msghdr __user *msg_compat;
4362 struct io_sr_msg *sr = &req->sr_msg;
4363 struct compat_iovec __user *uiov;
4364 compat_uptr_t ptr;
4365 compat_size_t len;
4366 int ret;
4367
270a5940 4368 msg_compat = (struct compat_msghdr __user *) sr->umsg;
1400e697 4369 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
52de1fe1
JA
4370 &ptr, &len);
4371 if (ret)
4372 return ret;
4373
4374 uiov = compat_ptr(ptr);
4375 if (req->flags & REQ_F_BUFFER_SELECT) {
4376 compat_ssize_t clen;
4377
4378 if (len > 1)
4379 return -EINVAL;
4380 if (!access_ok(uiov, sizeof(*uiov)))
4381 return -EFAULT;
4382 if (__get_user(clen, &uiov->iov_len))
4383 return -EFAULT;
4384 if (clen < 0)
4385 return -EINVAL;
1400e697
PB
4386 sr->len = iomsg->iov[0].iov_len;
4387 iomsg->iov = NULL;
52de1fe1
JA
4388 } else {
4389 ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV,
1400e697
PB
4390 &iomsg->iov,
4391 &iomsg->msg.msg_iter);
52de1fe1
JA
4392 if (ret < 0)
4393 return ret;
4394 }
4395
4396 return 0;
4397}
4398#endif
4399
1400e697
PB
4400static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4401 struct io_async_msghdr *iomsg)
52de1fe1 4402{
1400e697
PB
4403 iomsg->msg.msg_name = &iomsg->addr;
4404 iomsg->iov = iomsg->fast_iov;
52de1fe1
JA
4405
4406#ifdef CONFIG_COMPAT
4407 if (req->ctx->compat)
1400e697 4408 return __io_compat_recvmsg_copy_hdr(req, iomsg);
fddaface 4409#endif
52de1fe1 4410
1400e697 4411 return __io_recvmsg_copy_hdr(req, iomsg);
52de1fe1
JA
4412}
4413
bcda7baa 4414static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
7fbb1b54 4415 bool needs_lock)
bcda7baa
JA
4416{
4417 struct io_sr_msg *sr = &req->sr_msg;
4418 struct io_buffer *kbuf;
4419
bcda7baa
JA
4420 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4421 if (IS_ERR(kbuf))
4422 return kbuf;
4423
4424 sr->kbuf = kbuf;
4425 req->flags |= REQ_F_BUFFER_SELECTED;
bcda7baa 4426 return kbuf;
fddaface
JA
4427}
4428
7fbb1b54
PB
4429static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4430{
4431 return io_put_kbuf(req, req->sr_msg.kbuf);
4432}
4433
3529d8c2
JA
4434static int io_recvmsg_prep(struct io_kiocb *req,
4435 const struct io_uring_sqe *sqe)
aa1fa28f 4436{
e8c2bc1f 4437 struct io_async_msghdr *async_msg = req->async_data;
e47293fd 4438 struct io_sr_msg *sr = &req->sr_msg;
99bc4c38 4439 int ret;
3529d8c2 4440
d2b6f48b
PB
4441 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4442 return -EINVAL;
4443
3529d8c2 4444 sr->msg_flags = READ_ONCE(sqe->msg_flags);
270a5940 4445 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 4446 sr->len = READ_ONCE(sqe->len);
bcda7baa 4447 sr->bgid = READ_ONCE(sqe->buf_group);
06b76d44 4448
d8768362
JA
4449#ifdef CONFIG_COMPAT
4450 if (req->ctx->compat)
4451 sr->msg_flags |= MSG_CMSG_COMPAT;
4452#endif
4453
e8c2bc1f 4454 if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
06b76d44 4455 return 0;
5f798bea
PB
4456 /* iovec is already imported */
4457 if (req->flags & REQ_F_NEED_CLEANUP)
4458 return 0;
03b1230c 4459
e8c2bc1f 4460 ret = io_recvmsg_copy_hdr(req, async_msg);
99bc4c38
PB
4461 if (!ret)
4462 req->flags |= REQ_F_NEED_CLEANUP;
4463 return ret;
aa1fa28f
JA
4464}
4465
229a7b63
JA
4466static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
4467 struct io_comp_state *cs)
aa1fa28f 4468{
6b754c8b 4469 struct io_async_msghdr iomsg, *kmsg;
03b1230c 4470 struct socket *sock;
7fbb1b54 4471 struct io_buffer *kbuf;
7a7cacba 4472 unsigned flags;
52de1fe1 4473 int ret, cflags = 0;
03b1230c 4474
03b1230c 4475 sock = sock_from_file(req->file, &ret);
7a7cacba
PB
4476 if (unlikely(!sock))
4477 return ret;
3529d8c2 4478
e8c2bc1f
JA
4479 if (req->async_data) {
4480 kmsg = req->async_data;
4481 kmsg->msg.msg_name = &kmsg->addr;
7a7cacba
PB
4482 /* if iov is set, it's allocated already */
4483 if (!kmsg->iov)
4484 kmsg->iov = kmsg->fast_iov;
4485 kmsg->msg.msg_iter.iov = kmsg->iov;
4486 } else {
4487 ret = io_recvmsg_copy_hdr(req, &iomsg);
4488 if (ret)
681fda8d 4489 return ret;
7a7cacba
PB
4490 kmsg = &iomsg;
4491 }
03b1230c 4492
bc02ef33 4493 if (req->flags & REQ_F_BUFFER_SELECT) {
7fbb1b54 4494 kbuf = io_recv_buffer_select(req, !force_nonblock);
bc02ef33 4495 if (IS_ERR(kbuf))
52de1fe1 4496 return PTR_ERR(kbuf);
7a7cacba
PB
4497 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
4498 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
4499 1, req->sr_msg.len);
4500 }
52de1fe1 4501
7a7cacba
PB
4502 flags = req->sr_msg.msg_flags;
4503 if (flags & MSG_DONTWAIT)
4504 req->flags |= REQ_F_NOWAIT;
4505 else if (force_nonblock)
4506 flags |= MSG_DONTWAIT;
e47293fd 4507
7a7cacba
PB
4508 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4509 kmsg->uaddr, flags);
0e1b6fe3
PB
4510 if (force_nonblock && ret == -EAGAIN)
4511 return io_setup_async_msg(req, kmsg);
7a7cacba
PB
4512 if (ret == -ERESTARTSYS)
4513 ret = -EINTR;
03b1230c 4514
7fbb1b54
PB
4515 if (req->flags & REQ_F_BUFFER_SELECTED)
4516 cflags = io_put_recv_kbuf(req);
6b754c8b 4517 if (kmsg->iov != kmsg->fast_iov)
0b416c3e 4518 kfree(kmsg->iov);
99bc4c38 4519 req->flags &= ~REQ_F_NEED_CLEANUP;
4e88d6e7
JA
4520 if (ret < 0)
4521 req_set_fail_links(req);
229a7b63 4522 __io_req_complete(req, ret, cflags, cs);
03b1230c 4523 return 0;
0fa03c62 4524}
5d17b4a4 4525
229a7b63
JA
4526static int io_recv(struct io_kiocb *req, bool force_nonblock,
4527 struct io_comp_state *cs)
fddaface 4528{
6b754c8b 4529 struct io_buffer *kbuf;
7a7cacba
PB
4530 struct io_sr_msg *sr = &req->sr_msg;
4531 struct msghdr msg;
4532 void __user *buf = sr->buf;
fddaface 4533 struct socket *sock;
7a7cacba
PB
4534 struct iovec iov;
4535 unsigned flags;
bcda7baa 4536 int ret, cflags = 0;
fddaface 4537
fddaface 4538 sock = sock_from_file(req->file, &ret);
7a7cacba
PB
4539 if (unlikely(!sock))
4540 return ret;
fddaface 4541
bc02ef33 4542 if (req->flags & REQ_F_BUFFER_SELECT) {
7fbb1b54 4543 kbuf = io_recv_buffer_select(req, !force_nonblock);
bcda7baa
JA
4544 if (IS_ERR(kbuf))
4545 return PTR_ERR(kbuf);
7a7cacba 4546 buf = u64_to_user_ptr(kbuf->addr);
bc02ef33 4547 }
bcda7baa 4548
7a7cacba 4549 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
14c32eee
PB
4550 if (unlikely(ret))
4551 goto out_free;
fddaface 4552
7a7cacba
PB
4553 msg.msg_name = NULL;
4554 msg.msg_control = NULL;
4555 msg.msg_controllen = 0;
4556 msg.msg_namelen = 0;
4557 msg.msg_iocb = NULL;
4558 msg.msg_flags = 0;
fddaface 4559
7a7cacba
PB
4560 flags = req->sr_msg.msg_flags;
4561 if (flags & MSG_DONTWAIT)
4562 req->flags |= REQ_F_NOWAIT;
4563 else if (force_nonblock)
4564 flags |= MSG_DONTWAIT;
4565
4566 ret = sock_recvmsg(sock, &msg, flags);
4567 if (force_nonblock && ret == -EAGAIN)
4568 return -EAGAIN;
4569 if (ret == -ERESTARTSYS)
4570 ret = -EINTR;
14c32eee 4571out_free:
7fbb1b54
PB
4572 if (req->flags & REQ_F_BUFFER_SELECTED)
4573 cflags = io_put_recv_kbuf(req);
fddaface
JA
4574 if (ret < 0)
4575 req_set_fail_links(req);
229a7b63 4576 __io_req_complete(req, ret, cflags, cs);
fddaface 4577 return 0;
fddaface
JA
4578}
4579
3529d8c2 4580static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 4581{
8ed8d3c3
JA
4582 struct io_accept *accept = &req->accept;
4583
17f2fe35
JA
4584 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4585 return -EINVAL;
8042d6ce 4586 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
4587 return -EINVAL;
4588
d55e5f5b
JA
4589 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4590 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 4591 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 4592 accept->nofile = rlimit(RLIMIT_NOFILE);
8ed8d3c3 4593 return 0;
8ed8d3c3 4594}
17f2fe35 4595
229a7b63
JA
4596static int io_accept(struct io_kiocb *req, bool force_nonblock,
4597 struct io_comp_state *cs)
8ed8d3c3
JA
4598{
4599 struct io_accept *accept = &req->accept;
ac45abc0 4600 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
8ed8d3c3
JA
4601 int ret;
4602
e697deed
JX
4603 if (req->file->f_flags & O_NONBLOCK)
4604 req->flags |= REQ_F_NOWAIT;
4605
8ed8d3c3 4606 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
09952e3e
JA
4607 accept->addr_len, accept->flags,
4608 accept->nofile);
8ed8d3c3 4609 if (ret == -EAGAIN && force_nonblock)
17f2fe35 4610 return -EAGAIN;
ac45abc0
PB
4611 if (ret < 0) {
4612 if (ret == -ERESTARTSYS)
4613 ret = -EINTR;
4e88d6e7 4614 req_set_fail_links(req);
ac45abc0 4615 }
229a7b63 4616 __io_req_complete(req, ret, 0, cs);
17f2fe35 4617 return 0;
8ed8d3c3
JA
4618}
4619
3529d8c2 4620static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 4621{
3529d8c2 4622 struct io_connect *conn = &req->connect;
e8c2bc1f 4623 struct io_async_connect *io = req->async_data;
f499a021 4624
3fbb51c1
JA
4625 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4626 return -EINVAL;
4627 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4628 return -EINVAL;
4629
3529d8c2
JA
4630 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4631 conn->addr_len = READ_ONCE(sqe->addr2);
4632
4633 if (!io)
4634 return 0;
4635
4636 return move_addr_to_kernel(conn->addr, conn->addr_len,
e8c2bc1f 4637 &io->address);
f499a021
JA
4638}
4639
229a7b63
JA
4640static int io_connect(struct io_kiocb *req, bool force_nonblock,
4641 struct io_comp_state *cs)
f8e85cf2 4642{
e8c2bc1f 4643 struct io_async_connect __io, *io;
f8e85cf2 4644 unsigned file_flags;
3fbb51c1 4645 int ret;
f8e85cf2 4646
e8c2bc1f
JA
4647 if (req->async_data) {
4648 io = req->async_data;
f499a021 4649 } else {
3529d8c2
JA
4650 ret = move_addr_to_kernel(req->connect.addr,
4651 req->connect.addr_len,
e8c2bc1f 4652 &__io.address);
f499a021
JA
4653 if (ret)
4654 goto out;
4655 io = &__io;
4656 }
4657
3fbb51c1
JA
4658 file_flags = force_nonblock ? O_NONBLOCK : 0;
4659
e8c2bc1f 4660 ret = __sys_connect_file(req->file, &io->address,
3fbb51c1 4661 req->connect.addr_len, file_flags);
87f80d62 4662 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
e8c2bc1f 4663 if (req->async_data)
b7bb4f7d 4664 return -EAGAIN;
e8c2bc1f 4665 if (io_alloc_async_data(req)) {
f499a021
JA
4666 ret = -ENOMEM;
4667 goto out;
4668 }
e8c2bc1f
JA
4669 io = req->async_data;
4670 memcpy(req->async_data, &__io, sizeof(__io));
f8e85cf2 4671 return -EAGAIN;
f499a021 4672 }
f8e85cf2
JA
4673 if (ret == -ERESTARTSYS)
4674 ret = -EINTR;
f499a021 4675out:
4e88d6e7
JA
4676 if (ret < 0)
4677 req_set_fail_links(req);
229a7b63 4678 __io_req_complete(req, ret, 0, cs);
f8e85cf2 4679 return 0;
469956e8
Y
4680}
4681#else /* !CONFIG_NET */
4682static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4683{
f8e85cf2 4684 return -EOPNOTSUPP;
f8e85cf2
JA
4685}
4686
1e16c2f9
RD
4687static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
4688 struct io_comp_state *cs)
469956e8
Y
4689{
4690 return -EOPNOTSUPP;
4691}
4692
1e16c2f9
RD
4693static int io_send(struct io_kiocb *req, bool force_nonblock,
4694 struct io_comp_state *cs)
469956e8
Y
4695{
4696 return -EOPNOTSUPP;
4697}
4698
4699static int io_recvmsg_prep(struct io_kiocb *req,
4700 const struct io_uring_sqe *sqe)
4701{
4702 return -EOPNOTSUPP;
4703}
4704
1e16c2f9
RD
4705static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
4706 struct io_comp_state *cs)
469956e8
Y
4707{
4708 return -EOPNOTSUPP;
4709}
4710
1e16c2f9
RD
4711static int io_recv(struct io_kiocb *req, bool force_nonblock,
4712 struct io_comp_state *cs)
469956e8
Y
4713{
4714 return -EOPNOTSUPP;
4715}
4716
4717static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4718{
4719 return -EOPNOTSUPP;
4720}
4721
1e16c2f9
RD
4722static int io_accept(struct io_kiocb *req, bool force_nonblock,
4723 struct io_comp_state *cs)
469956e8
Y
4724{
4725 return -EOPNOTSUPP;
4726}
ce593a6c 4727
469956e8
Y
4728static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4729{
4730 return -EOPNOTSUPP;
4731}
4732
1e16c2f9
RD
4733static int io_connect(struct io_kiocb *req, bool force_nonblock,
4734 struct io_comp_state *cs)
469956e8 4735{
f8e85cf2 4736 return -EOPNOTSUPP;
ce593a6c 4737}
469956e8 4738#endif /* CONFIG_NET */
f8e85cf2 4739
d7718a9d
JA
4740struct io_poll_table {
4741 struct poll_table_struct pt;
4742 struct io_kiocb *req;
4743 int error;
4744};
ce593a6c 4745
d7718a9d
JA
4746static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4747 __poll_t mask, task_work_func_t func)
4748{
fd7d6de2 4749 bool twa_signal_ok;
aa96bf8a 4750 int ret;
d7718a9d
JA
4751
4752 /* for instances that support it check for an event match first: */
4753 if (mask && !(mask & poll->events))
4754 return 0;
4755
4756 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4757
4758 list_del_init(&poll->wait.entry);
4759
d7718a9d
JA
4760 req->result = mask;
4761 init_task_work(&req->task_work, func);
6d816e08
JA
4762 percpu_ref_get(&req->ctx->refs);
4763
fd7d6de2
JA
4764 /*
4765 * If we using the signalfd wait_queue_head for this wakeup, then
4766 * it's not safe to use TWA_SIGNAL as we could be recursing on the
4767 * tsk->sighand->siglock on doing the wakeup. Should not be needed
4768 * either, as the normal wakeup will suffice.
4769 */
4770 twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
4771
d7718a9d 4772 /*
e3aabf95
JA
4773 * If this fails, then the task is exiting. When a task exits, the
4774 * work gets canceled, so just cancel this request as well instead
4775 * of executing it. We can't safely execute it anyway, as we may not
4776 * have the needed state needed for it anyway.
d7718a9d 4777 */
fd7d6de2 4778 ret = io_req_task_work_add(req, &req->task_work, twa_signal_ok);
aa96bf8a 4779 if (unlikely(ret)) {
c2c4c83c
JA
4780 struct task_struct *tsk;
4781
e3aabf95 4782 WRITE_ONCE(poll->canceled, true);
aa96bf8a 4783 tsk = io_wq_get_task(req->ctx->io_wq);
ce593a6c
JA
4784 task_work_add(tsk, &req->task_work, 0);
4785 wake_up_process(tsk);
aa96bf8a 4786 }
d7718a9d
JA
4787 return 1;
4788}
4789
74ce6ce4
JA
4790static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4791 __acquires(&req->ctx->completion_lock)
4792{
4793 struct io_ring_ctx *ctx = req->ctx;
4794
4795 if (!req->result && !READ_ONCE(poll->canceled)) {
4796 struct poll_table_struct pt = { ._key = poll->events };
4797
4798 req->result = vfs_poll(req->file, &pt) & poll->events;
4799 }
4800
4801 spin_lock_irq(&ctx->completion_lock);
4802 if (!req->result && !READ_ONCE(poll->canceled)) {
4803 add_wait_queue(poll->head, &poll->wait);
4804 return true;
4805 }
4806
4807 return false;
4808}
4809
d4e7cd36 4810static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
18bceab1 4811{
e8c2bc1f 4812 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
d4e7cd36 4813 if (req->opcode == IORING_OP_POLL_ADD)
e8c2bc1f 4814 return req->async_data;
d4e7cd36
JA
4815 return req->apoll->double_poll;
4816}
4817
4818static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4819{
4820 if (req->opcode == IORING_OP_POLL_ADD)
4821 return &req->poll;
4822 return &req->apoll->poll;
4823}
4824
4825static void io_poll_remove_double(struct io_kiocb *req)
4826{
4827 struct io_poll_iocb *poll = io_poll_get_double(req);
18bceab1
JA
4828
4829 lockdep_assert_held(&req->ctx->completion_lock);
4830
4831 if (poll && poll->head) {
4832 struct wait_queue_head *head = poll->head;
4833
4834 spin_lock(&head->lock);
4835 list_del_init(&poll->wait.entry);
4836 if (poll->wait.private)
4837 refcount_dec(&req->refs);
4838 poll->head = NULL;
4839 spin_unlock(&head->lock);
4840 }
4841}
4842
4843static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4844{
4845 struct io_ring_ctx *ctx = req->ctx;
4846
d4e7cd36 4847 io_poll_remove_double(req);
18bceab1
JA
4848 req->poll.done = true;
4849 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4850 io_commit_cqring(ctx);
4851}
4852
4853static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
4854{
4855 struct io_ring_ctx *ctx = req->ctx;
4856
4857 if (io_poll_rewait(req, &req->poll)) {
4858 spin_unlock_irq(&ctx->completion_lock);
4859 return;
4860 }
4861
4862 hash_del(&req->hash_node);
4863 io_poll_complete(req, req->result, 0);
4864 req->flags |= REQ_F_COMP_LOCKED;
9b5f7bd9 4865 *nxt = io_put_req_find_next(req);
18bceab1
JA
4866 spin_unlock_irq(&ctx->completion_lock);
4867
4868 io_cqring_ev_posted(ctx);
4869}
4870
4871static void io_poll_task_func(struct callback_head *cb)
4872{
4873 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
6d816e08 4874 struct io_ring_ctx *ctx = req->ctx;
18bceab1
JA
4875 struct io_kiocb *nxt = NULL;
4876
4877 io_poll_task_handler(req, &nxt);
ea1164e5
PB
4878 if (nxt)
4879 __io_req_task_submit(nxt);
6d816e08 4880 percpu_ref_put(&ctx->refs);
18bceab1
JA
4881}
4882
4883static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4884 int sync, void *key)
4885{
4886 struct io_kiocb *req = wait->private;
d4e7cd36 4887 struct io_poll_iocb *poll = io_poll_get_single(req);
18bceab1
JA
4888 __poll_t mask = key_to_poll(key);
4889
4890 /* for instances that support it check for an event match first: */
4891 if (mask && !(mask & poll->events))
4892 return 0;
4893
8706e04e
JA
4894 list_del_init(&wait->entry);
4895
807abcb0 4896 if (poll && poll->head) {
18bceab1
JA
4897 bool done;
4898
807abcb0
JA
4899 spin_lock(&poll->head->lock);
4900 done = list_empty(&poll->wait.entry);
18bceab1 4901 if (!done)
807abcb0 4902 list_del_init(&poll->wait.entry);
d4e7cd36
JA
4903 /* make sure double remove sees this as being gone */
4904 wait->private = NULL;
807abcb0 4905 spin_unlock(&poll->head->lock);
18bceab1
JA
4906 if (!done)
4907 __io_async_wake(req, poll, mask, io_poll_task_func);
4908 }
4909 refcount_dec(&req->refs);
4910 return 1;
4911}
4912
4913static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4914 wait_queue_func_t wake_func)
4915{
4916 poll->head = NULL;
4917 poll->done = false;
4918 poll->canceled = false;
4919 poll->events = events;
4920 INIT_LIST_HEAD(&poll->wait.entry);
4921 init_waitqueue_func_entry(&poll->wait, wake_func);
4922}
4923
4924static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
807abcb0
JA
4925 struct wait_queue_head *head,
4926 struct io_poll_iocb **poll_ptr)
18bceab1
JA
4927{
4928 struct io_kiocb *req = pt->req;
4929
4930 /*
4931 * If poll->head is already set, it's because the file being polled
4932 * uses multiple waitqueues for poll handling (eg one for read, one
4933 * for write). Setup a separate io_poll_iocb if this happens.
4934 */
4935 if (unlikely(poll->head)) {
4936 /* already have a 2nd entry, fail a third attempt */
807abcb0 4937 if (*poll_ptr) {
18bceab1
JA
4938 pt->error = -EINVAL;
4939 return;
4940 }
4941 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
4942 if (!poll) {
4943 pt->error = -ENOMEM;
4944 return;
4945 }
4946 io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
4947 refcount_inc(&req->refs);
4948 poll->wait.private = req;
807abcb0 4949 *poll_ptr = poll;
18bceab1
JA
4950 }
4951
4952 pt->error = 0;
4953 poll->head = head;
a31eb4a2
JX
4954
4955 if (poll->events & EPOLLEXCLUSIVE)
4956 add_wait_queue_exclusive(head, &poll->wait);
4957 else
4958 add_wait_queue(head, &poll->wait);
18bceab1
JA
4959}
4960
4961static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
4962 struct poll_table_struct *p)
4963{
4964 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
807abcb0 4965 struct async_poll *apoll = pt->req->apoll;
18bceab1 4966
807abcb0 4967 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
18bceab1
JA
4968}
4969
d7718a9d
JA
4970static void io_async_task_func(struct callback_head *cb)
4971{
4972 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4973 struct async_poll *apoll = req->apoll;
4974 struct io_ring_ctx *ctx = req->ctx;
4975
4976 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
4977
74ce6ce4 4978 if (io_poll_rewait(req, &apoll->poll)) {
d7718a9d 4979 spin_unlock_irq(&ctx->completion_lock);
6d816e08 4980 percpu_ref_put(&ctx->refs);
74ce6ce4 4981 return;
d7718a9d
JA
4982 }
4983
31067255 4984 /* If req is still hashed, it cannot have been canceled. Don't check. */
0be0b0e3 4985 if (hash_hashed(&req->hash_node))
74ce6ce4 4986 hash_del(&req->hash_node);
2bae047e 4987
d4e7cd36 4988 io_poll_remove_double(req);
74ce6ce4
JA
4989 spin_unlock_irq(&ctx->completion_lock);
4990
0be0b0e3
PB
4991 if (!READ_ONCE(apoll->poll.canceled))
4992 __io_req_task_submit(req);
4993 else
4994 __io_req_task_cancel(req, -ECANCELED);
aa340845 4995
6d816e08 4996 percpu_ref_put(&ctx->refs);
807abcb0 4997 kfree(apoll->double_poll);
31067255 4998 kfree(apoll);
d7718a9d
JA
4999}
5000
5001static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5002 void *key)
5003{
5004 struct io_kiocb *req = wait->private;
5005 struct io_poll_iocb *poll = &req->apoll->poll;
5006
5007 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5008 key_to_poll(key));
5009
5010 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5011}
5012
5013static void io_poll_req_insert(struct io_kiocb *req)
5014{
5015 struct io_ring_ctx *ctx = req->ctx;
5016 struct hlist_head *list;
5017
5018 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5019 hlist_add_head(&req->hash_node, list);
5020}
5021
5022static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5023 struct io_poll_iocb *poll,
5024 struct io_poll_table *ipt, __poll_t mask,
5025 wait_queue_func_t wake_func)
5026 __acquires(&ctx->completion_lock)
5027{
5028 struct io_ring_ctx *ctx = req->ctx;
5029 bool cancel = false;
5030
18bceab1 5031 io_init_poll_iocb(poll, mask, wake_func);
b90cd197 5032 poll->file = req->file;
18bceab1 5033 poll->wait.private = req;
d7718a9d
JA
5034
5035 ipt->pt._key = mask;
5036 ipt->req = req;
5037 ipt->error = -EINVAL;
5038
d7718a9d
JA
5039 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5040
5041 spin_lock_irq(&ctx->completion_lock);
5042 if (likely(poll->head)) {
5043 spin_lock(&poll->head->lock);
5044 if (unlikely(list_empty(&poll->wait.entry))) {
5045 if (ipt->error)
5046 cancel = true;
5047 ipt->error = 0;
5048 mask = 0;
5049 }
5050 if (mask || ipt->error)
5051 list_del_init(&poll->wait.entry);
5052 else if (cancel)
5053 WRITE_ONCE(poll->canceled, true);
5054 else if (!poll->done) /* actually waiting for an event */
5055 io_poll_req_insert(req);
5056 spin_unlock(&poll->head->lock);
5057 }
5058
5059 return mask;
5060}
5061
5062static bool io_arm_poll_handler(struct io_kiocb *req)
5063{
5064 const struct io_op_def *def = &io_op_defs[req->opcode];
5065 struct io_ring_ctx *ctx = req->ctx;
5066 struct async_poll *apoll;
5067 struct io_poll_table ipt;
5068 __poll_t mask, ret;
9dab14b8 5069 int rw;
d7718a9d
JA
5070
5071 if (!req->file || !file_can_poll(req->file))
5072 return false;
24c74678 5073 if (req->flags & REQ_F_POLLED)
d7718a9d 5074 return false;
9dab14b8
JA
5075 if (def->pollin)
5076 rw = READ;
5077 else if (def->pollout)
5078 rw = WRITE;
5079 else
5080 return false;
5081 /* if we can't nonblock try, then no point in arming a poll handler */
5082 if (!io_file_supports_async(req->file, rw))
d7718a9d
JA
5083 return false;
5084
5085 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5086 if (unlikely(!apoll))
5087 return false;
807abcb0 5088 apoll->double_poll = NULL;
d7718a9d
JA
5089
5090 req->flags |= REQ_F_POLLED;
d7718a9d
JA
5091 req->apoll = apoll;
5092 INIT_HLIST_NODE(&req->hash_node);
5093
8755d97a 5094 mask = 0;
d7718a9d 5095 if (def->pollin)
8755d97a 5096 mask |= POLLIN | POLLRDNORM;
d7718a9d
JA
5097 if (def->pollout)
5098 mask |= POLLOUT | POLLWRNORM;
5099 mask |= POLLERR | POLLPRI;
5100
5101 ipt.pt._qproc = io_async_queue_proc;
5102
5103 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5104 io_async_wake);
a36da65c 5105 if (ret || ipt.error) {
d4e7cd36 5106 io_poll_remove_double(req);
d7718a9d 5107 spin_unlock_irq(&ctx->completion_lock);
807abcb0 5108 kfree(apoll->double_poll);
d7718a9d
JA
5109 kfree(apoll);
5110 return false;
5111 }
5112 spin_unlock_irq(&ctx->completion_lock);
5113 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5114 apoll->poll.events);
5115 return true;
5116}
5117
5118static bool __io_poll_remove_one(struct io_kiocb *req,
5119 struct io_poll_iocb *poll)
221c5eb2 5120{
b41e9852 5121 bool do_complete = false;
221c5eb2
JA
5122
5123 spin_lock(&poll->head->lock);
5124 WRITE_ONCE(poll->canceled, true);
392edb45
JA
5125 if (!list_empty(&poll->wait.entry)) {
5126 list_del_init(&poll->wait.entry);
b41e9852 5127 do_complete = true;
221c5eb2
JA
5128 }
5129 spin_unlock(&poll->head->lock);
3bfa5bcb 5130 hash_del(&req->hash_node);
d7718a9d
JA
5131 return do_complete;
5132}
5133
5134static bool io_poll_remove_one(struct io_kiocb *req)
5135{
5136 bool do_complete;
5137
d4e7cd36
JA
5138 io_poll_remove_double(req);
5139
d7718a9d
JA
5140 if (req->opcode == IORING_OP_POLL_ADD) {
5141 do_complete = __io_poll_remove_one(req, &req->poll);
5142 } else {
3bfa5bcb
JA
5143 struct async_poll *apoll = req->apoll;
5144
d7718a9d 5145 /* non-poll requests have submit ref still */
3bfa5bcb
JA
5146 do_complete = __io_poll_remove_one(req, &apoll->poll);
5147 if (do_complete) {
d7718a9d 5148 io_put_req(req);
807abcb0 5149 kfree(apoll->double_poll);
3bfa5bcb
JA
5150 kfree(apoll);
5151 }
b1f573bd
XW
5152 }
5153
b41e9852
JA
5154 if (do_complete) {
5155 io_cqring_fill_event(req, -ECANCELED);
5156 io_commit_cqring(req->ctx);
5157 req->flags |= REQ_F_COMP_LOCKED;
f254ac04 5158 req_set_fail_links(req);
b41e9852
JA
5159 io_put_req(req);
5160 }
5161
5162 return do_complete;
221c5eb2
JA
5163}
5164
76e1b642
JA
5165/*
5166 * Returns true if we found and killed one or more poll requests
5167 */
5168static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
221c5eb2 5169{
78076bb6 5170 struct hlist_node *tmp;
221c5eb2 5171 struct io_kiocb *req;
8e2e1faf 5172 int posted = 0, i;
221c5eb2
JA
5173
5174 spin_lock_irq(&ctx->completion_lock);
78076bb6
JA
5175 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5176 struct hlist_head *list;
5177
5178 list = &ctx->cancel_hash[i];
f3606e3a
JA
5179 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
5180 if (io_task_match(req, tsk))
5181 posted += io_poll_remove_one(req);
5182 }
221c5eb2
JA
5183 }
5184 spin_unlock_irq(&ctx->completion_lock);
b41e9852 5185
8e2e1faf
JA
5186 if (posted)
5187 io_cqring_ev_posted(ctx);
76e1b642
JA
5188
5189 return posted != 0;
221c5eb2
JA
5190}
5191
47f46768
JA
5192static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5193{
78076bb6 5194 struct hlist_head *list;
47f46768
JA
5195 struct io_kiocb *req;
5196
78076bb6
JA
5197 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5198 hlist_for_each_entry(req, list, hash_node) {
b41e9852
JA
5199 if (sqe_addr != req->user_data)
5200 continue;
5201 if (io_poll_remove_one(req))
eac406c6 5202 return 0;
b41e9852 5203 return -EALREADY;
47f46768
JA
5204 }
5205
5206 return -ENOENT;
5207}
5208
3529d8c2
JA
5209static int io_poll_remove_prep(struct io_kiocb *req,
5210 const struct io_uring_sqe *sqe)
0969e783 5211{
0969e783
JA
5212 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5213 return -EINVAL;
5214 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5215 sqe->poll_events)
5216 return -EINVAL;
5217
5218 req->poll.addr = READ_ONCE(sqe->addr);
0969e783
JA
5219 return 0;
5220}
5221
221c5eb2
JA
5222/*
5223 * Find a running poll command that matches one specified in sqe->addr,
5224 * and remove it if found.
5225 */
fc4df999 5226static int io_poll_remove(struct io_kiocb *req)
221c5eb2
JA
5227{
5228 struct io_ring_ctx *ctx = req->ctx;
0969e783 5229 u64 addr;
47f46768 5230 int ret;
221c5eb2 5231
0969e783 5232 addr = req->poll.addr;
221c5eb2 5233 spin_lock_irq(&ctx->completion_lock);
0969e783 5234 ret = io_poll_cancel(ctx, addr);
221c5eb2
JA
5235 spin_unlock_irq(&ctx->completion_lock);
5236
4e88d6e7
JA
5237 if (ret < 0)
5238 req_set_fail_links(req);
e1e16097 5239 io_req_complete(req, ret);
221c5eb2
JA
5240 return 0;
5241}
5242
221c5eb2
JA
5243static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5244 void *key)
5245{
c2f2eb7d
JA
5246 struct io_kiocb *req = wait->private;
5247 struct io_poll_iocb *poll = &req->poll;
221c5eb2 5248
d7718a9d 5249 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
221c5eb2
JA
5250}
5251
221c5eb2
JA
5252static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5253 struct poll_table_struct *p)
5254{
5255 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5256
e8c2bc1f 5257 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
eac406c6
JA
5258}
5259
3529d8c2 5260static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
5261{
5262 struct io_poll_iocb *poll = &req->poll;
5769a351 5263 u32 events;
221c5eb2
JA
5264
5265 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5266 return -EINVAL;
5267 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5268 return -EINVAL;
09bb8394
JA
5269 if (!poll->file)
5270 return -EBADF;
221c5eb2 5271
5769a351
JX
5272 events = READ_ONCE(sqe->poll32_events);
5273#ifdef __BIG_ENDIAN
5274 events = swahw32(events);
5275#endif
a31eb4a2
JX
5276 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
5277 (events & EPOLLEXCLUSIVE);
0969e783
JA
5278 return 0;
5279}
5280
014db007 5281static int io_poll_add(struct io_kiocb *req)
0969e783
JA
5282{
5283 struct io_poll_iocb *poll = &req->poll;
5284 struct io_ring_ctx *ctx = req->ctx;
5285 struct io_poll_table ipt;
0969e783 5286 __poll_t mask;
0969e783 5287
78076bb6 5288 INIT_HLIST_NODE(&req->hash_node);
d7718a9d 5289 ipt.pt._qproc = io_poll_queue_proc;
36703247 5290
d7718a9d
JA
5291 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5292 io_poll_wake);
221c5eb2 5293
8c838788 5294 if (mask) { /* no async, we'd stolen it */
221c5eb2 5295 ipt.error = 0;
b0dd8a41 5296 io_poll_complete(req, mask, 0);
221c5eb2 5297 }
221c5eb2
JA
5298 spin_unlock_irq(&ctx->completion_lock);
5299
8c838788
JA
5300 if (mask) {
5301 io_cqring_ev_posted(ctx);
014db007 5302 io_put_req(req);
221c5eb2 5303 }
8c838788 5304 return ipt.error;
221c5eb2
JA
5305}
5306
5262f567
JA
5307static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5308{
ad8a48ac
JA
5309 struct io_timeout_data *data = container_of(timer,
5310 struct io_timeout_data, timer);
5311 struct io_kiocb *req = data->req;
5312 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
5313 unsigned long flags;
5314
5262f567 5315 spin_lock_irqsave(&ctx->completion_lock, flags);
01cec8c1
PB
5316 atomic_set(&req->ctx->cq_timeouts,
5317 atomic_read(&req->ctx->cq_timeouts) + 1);
5318
ef03681a 5319 /*
11365043
JA
5320 * We could be racing with timeout deletion. If the list is empty,
5321 * then timeout lookup already found it and will be handling it.
ef03681a 5322 */
135fcde8
PB
5323 if (!list_empty(&req->timeout.list))
5324 list_del_init(&req->timeout.list);
5262f567 5325
78e19bbe 5326 io_cqring_fill_event(req, -ETIME);
5262f567
JA
5327 io_commit_cqring(ctx);
5328 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5329
5330 io_cqring_ev_posted(ctx);
4e88d6e7 5331 req_set_fail_links(req);
5262f567
JA
5332 io_put_req(req);
5333 return HRTIMER_NORESTART;
5334}
5335
f254ac04
JA
5336static int __io_timeout_cancel(struct io_kiocb *req)
5337{
e8c2bc1f 5338 struct io_timeout_data *io = req->async_data;
f254ac04
JA
5339 int ret;
5340
5341 list_del_init(&req->timeout.list);
5342
e8c2bc1f 5343 ret = hrtimer_try_to_cancel(&io->timer);
f254ac04
JA
5344 if (ret == -1)
5345 return -EALREADY;
5346
5347 req_set_fail_links(req);
5348 req->flags |= REQ_F_COMP_LOCKED;
5349 io_cqring_fill_event(req, -ECANCELED);
5350 io_put_req(req);
5351 return 0;
5352}
5353
47f46768
JA
5354static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5355{
5356 struct io_kiocb *req;
5357 int ret = -ENOENT;
5358
135fcde8 5359 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
47f46768 5360 if (user_data == req->user_data) {
47f46768
JA
5361 ret = 0;
5362 break;
5363 }
5364 }
5365
5366 if (ret == -ENOENT)
5367 return ret;
5368
f254ac04 5369 return __io_timeout_cancel(req);
47f46768
JA
5370}
5371
3529d8c2
JA
5372static int io_timeout_remove_prep(struct io_kiocb *req,
5373 const struct io_uring_sqe *sqe)
b29472ee 5374{
b29472ee
JA
5375 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5376 return -EINVAL;
61710e43
DA
5377 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5378 return -EINVAL;
5379 if (sqe->ioprio || sqe->buf_index || sqe->len)
b29472ee
JA
5380 return -EINVAL;
5381
5382 req->timeout.addr = READ_ONCE(sqe->addr);
5383 req->timeout.flags = READ_ONCE(sqe->timeout_flags);
5384 if (req->timeout.flags)
5385 return -EINVAL;
5386
b29472ee
JA
5387 return 0;
5388}
5389
11365043
JA
5390/*
5391 * Remove or update an existing timeout command
5392 */
fc4df999 5393static int io_timeout_remove(struct io_kiocb *req)
11365043
JA
5394{
5395 struct io_ring_ctx *ctx = req->ctx;
47f46768 5396 int ret;
11365043 5397
11365043 5398 spin_lock_irq(&ctx->completion_lock);
b29472ee 5399 ret = io_timeout_cancel(ctx, req->timeout.addr);
11365043 5400
47f46768 5401 io_cqring_fill_event(req, ret);
11365043
JA
5402 io_commit_cqring(ctx);
5403 spin_unlock_irq(&ctx->completion_lock);
5262f567 5404 io_cqring_ev_posted(ctx);
4e88d6e7
JA
5405 if (ret < 0)
5406 req_set_fail_links(req);
ec9c02ad 5407 io_put_req(req);
11365043 5408 return 0;
5262f567
JA
5409}
5410
3529d8c2 5411static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 5412 bool is_timeout_link)
5262f567 5413{
ad8a48ac 5414 struct io_timeout_data *data;
a41525ab 5415 unsigned flags;
56080b02 5416 u32 off = READ_ONCE(sqe->off);
5262f567 5417
ad8a48ac 5418 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 5419 return -EINVAL;
ad8a48ac 5420 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
a41525ab 5421 return -EINVAL;
56080b02 5422 if (off && is_timeout_link)
2d28390a 5423 return -EINVAL;
a41525ab
JA
5424 flags = READ_ONCE(sqe->timeout_flags);
5425 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 5426 return -EINVAL;
bdf20073 5427
bfe68a22 5428 req->timeout.off = off;
26a61679 5429
e8c2bc1f 5430 if (!req->async_data && io_alloc_async_data(req))
26a61679
JA
5431 return -ENOMEM;
5432
e8c2bc1f 5433 data = req->async_data;
ad8a48ac 5434 data->req = req;
ad8a48ac
JA
5435
5436 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
5437 return -EFAULT;
5438
11365043 5439 if (flags & IORING_TIMEOUT_ABS)
ad8a48ac 5440 data->mode = HRTIMER_MODE_ABS;
11365043 5441 else
ad8a48ac 5442 data->mode = HRTIMER_MODE_REL;
11365043 5443
ad8a48ac
JA
5444 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5445 return 0;
5446}
5447
fc4df999 5448static int io_timeout(struct io_kiocb *req)
ad8a48ac 5449{
ad8a48ac 5450 struct io_ring_ctx *ctx = req->ctx;
e8c2bc1f 5451 struct io_timeout_data *data = req->async_data;
ad8a48ac 5452 struct list_head *entry;
bfe68a22 5453 u32 tail, off = req->timeout.off;
ad8a48ac 5454
733f5c95 5455 spin_lock_irq(&ctx->completion_lock);
93bd25bb 5456
5262f567
JA
5457 /*
5458 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
5459 * timeout event to be satisfied. If it isn't set, then this is
5460 * a pure timeout request, sequence isn't used.
5262f567 5461 */
8eb7e2d0 5462 if (io_is_timeout_noseq(req)) {
93bd25bb
JA
5463 entry = ctx->timeout_list.prev;
5464 goto add;
5465 }
5262f567 5466
bfe68a22
PB
5467 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5468 req->timeout.target_seq = tail + off;
5262f567
JA
5469
5470 /*
5471 * Insertion sort, ensuring the first entry in the list is always
5472 * the one we need first.
5473 */
5262f567 5474 list_for_each_prev(entry, &ctx->timeout_list) {
135fcde8
PB
5475 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5476 timeout.list);
5262f567 5477
8eb7e2d0 5478 if (io_is_timeout_noseq(nxt))
93bd25bb 5479 continue;
bfe68a22
PB
5480 /* nxt.seq is behind @tail, otherwise would've been completed */
5481 if (off >= nxt->timeout.target_seq - tail)
5262f567
JA
5482 break;
5483 }
93bd25bb 5484add:
135fcde8 5485 list_add(&req->timeout.list, entry);
ad8a48ac
JA
5486 data->timer.function = io_timeout_fn;
5487 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5262f567 5488 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
5489 return 0;
5490}
5262f567 5491
62755e35
JA
5492static bool io_cancel_cb(struct io_wq_work *work, void *data)
5493{
5494 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5495
5496 return req->user_data == (unsigned long) data;
5497}
5498
e977d6d3 5499static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
62755e35 5500{
62755e35 5501 enum io_wq_cancel cancel_ret;
62755e35
JA
5502 int ret = 0;
5503
4f26bda1 5504 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
62755e35
JA
5505 switch (cancel_ret) {
5506 case IO_WQ_CANCEL_OK:
5507 ret = 0;
5508 break;
5509 case IO_WQ_CANCEL_RUNNING:
5510 ret = -EALREADY;
5511 break;
5512 case IO_WQ_CANCEL_NOTFOUND:
5513 ret = -ENOENT;
5514 break;
5515 }
5516
e977d6d3
JA
5517 return ret;
5518}
5519
47f46768
JA
5520static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5521 struct io_kiocb *req, __u64 sqe_addr,
014db007 5522 int success_ret)
47f46768
JA
5523{
5524 unsigned long flags;
5525 int ret;
5526
5527 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
5528 if (ret != -ENOENT) {
5529 spin_lock_irqsave(&ctx->completion_lock, flags);
5530 goto done;
5531 }
5532
5533 spin_lock_irqsave(&ctx->completion_lock, flags);
5534 ret = io_timeout_cancel(ctx, sqe_addr);
5535 if (ret != -ENOENT)
5536 goto done;
5537 ret = io_poll_cancel(ctx, sqe_addr);
5538done:
b0dd8a41
JA
5539 if (!ret)
5540 ret = success_ret;
47f46768
JA
5541 io_cqring_fill_event(req, ret);
5542 io_commit_cqring(ctx);
5543 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5544 io_cqring_ev_posted(ctx);
5545
4e88d6e7
JA
5546 if (ret < 0)
5547 req_set_fail_links(req);
014db007 5548 io_put_req(req);
47f46768
JA
5549}
5550
3529d8c2
JA
5551static int io_async_cancel_prep(struct io_kiocb *req,
5552 const struct io_uring_sqe *sqe)
e977d6d3 5553{
fbf23849 5554 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3 5555 return -EINVAL;
61710e43
DA
5556 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5557 return -EINVAL;
5558 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
e977d6d3
JA
5559 return -EINVAL;
5560
fbf23849
JA
5561 req->cancel.addr = READ_ONCE(sqe->addr);
5562 return 0;
5563}
5564
014db007 5565static int io_async_cancel(struct io_kiocb *req)
fbf23849
JA
5566{
5567 struct io_ring_ctx *ctx = req->ctx;
fbf23849 5568
014db007 5569 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
5262f567
JA
5570 return 0;
5571}
5572
05f3fb3c
JA
5573static int io_files_update_prep(struct io_kiocb *req,
5574 const struct io_uring_sqe *sqe)
5575{
6ca56f84
JA
5576 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5577 return -EINVAL;
61710e43
DA
5578 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5579 return -EINVAL;
5580 if (sqe->ioprio || sqe->rw_flags)
05f3fb3c
JA
5581 return -EINVAL;
5582
5583 req->files_update.offset = READ_ONCE(sqe->off);
5584 req->files_update.nr_args = READ_ONCE(sqe->len);
5585 if (!req->files_update.nr_args)
5586 return -EINVAL;
5587 req->files_update.arg = READ_ONCE(sqe->addr);
5588 return 0;
5589}
5590
229a7b63
JA
5591static int io_files_update(struct io_kiocb *req, bool force_nonblock,
5592 struct io_comp_state *cs)
fbf23849
JA
5593{
5594 struct io_ring_ctx *ctx = req->ctx;
05f3fb3c
JA
5595 struct io_uring_files_update up;
5596 int ret;
fbf23849 5597
f86cd20c 5598 if (force_nonblock)
05f3fb3c 5599 return -EAGAIN;
05f3fb3c
JA
5600
5601 up.offset = req->files_update.offset;
5602 up.fds = req->files_update.arg;
5603
5604 mutex_lock(&ctx->uring_lock);
5605 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
5606 mutex_unlock(&ctx->uring_lock);
5607
5608 if (ret < 0)
5609 req_set_fail_links(req);
229a7b63 5610 __io_req_complete(req, ret, 0, cs);
5262f567
JA
5611 return 0;
5612}
5613
3529d8c2
JA
5614static int io_req_defer_prep(struct io_kiocb *req,
5615 const struct io_uring_sqe *sqe)
f67676d1 5616{
e781573e 5617 ssize_t ret = 0;
f67676d1 5618
f1d96a8f
PB
5619 if (!sqe)
5620 return 0;
5621
e8c2bc1f 5622 if (io_alloc_async_data(req))
327d6d96 5623 return -EAGAIN;
f56040b8
PB
5624 ret = io_prep_work_files(req);
5625 if (unlikely(ret))
5626 return ret;
cccf0ee8 5627
202700e1
JA
5628 io_prep_async_work(req);
5629
d625c6ee 5630 switch (req->opcode) {
e781573e
JA
5631 case IORING_OP_NOP:
5632 break;
f67676d1
JA
5633 case IORING_OP_READV:
5634 case IORING_OP_READ_FIXED:
3a6820f2 5635 case IORING_OP_READ:
3529d8c2 5636 ret = io_read_prep(req, sqe, true);
f67676d1
JA
5637 break;
5638 case IORING_OP_WRITEV:
5639 case IORING_OP_WRITE_FIXED:
3a6820f2 5640 case IORING_OP_WRITE:
3529d8c2 5641 ret = io_write_prep(req, sqe, true);
f67676d1 5642 break;
0969e783 5643 case IORING_OP_POLL_ADD:
3529d8c2 5644 ret = io_poll_add_prep(req, sqe);
0969e783
JA
5645 break;
5646 case IORING_OP_POLL_REMOVE:
3529d8c2 5647 ret = io_poll_remove_prep(req, sqe);
0969e783 5648 break;
8ed8d3c3 5649 case IORING_OP_FSYNC:
3529d8c2 5650 ret = io_prep_fsync(req, sqe);
8ed8d3c3
JA
5651 break;
5652 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2 5653 ret = io_prep_sfr(req, sqe);
8ed8d3c3 5654 break;
03b1230c 5655 case IORING_OP_SENDMSG:
fddaface 5656 case IORING_OP_SEND:
3529d8c2 5657 ret = io_sendmsg_prep(req, sqe);
03b1230c
JA
5658 break;
5659 case IORING_OP_RECVMSG:
fddaface 5660 case IORING_OP_RECV:
3529d8c2 5661 ret = io_recvmsg_prep(req, sqe);
03b1230c 5662 break;
f499a021 5663 case IORING_OP_CONNECT:
3529d8c2 5664 ret = io_connect_prep(req, sqe);
f499a021 5665 break;
2d28390a 5666 case IORING_OP_TIMEOUT:
3529d8c2 5667 ret = io_timeout_prep(req, sqe, false);
b7bb4f7d 5668 break;
b29472ee 5669 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2 5670 ret = io_timeout_remove_prep(req, sqe);
b29472ee 5671 break;
fbf23849 5672 case IORING_OP_ASYNC_CANCEL:
3529d8c2 5673 ret = io_async_cancel_prep(req, sqe);
fbf23849 5674 break;
2d28390a 5675 case IORING_OP_LINK_TIMEOUT:
3529d8c2 5676 ret = io_timeout_prep(req, sqe, true);
b7bb4f7d 5677 break;
8ed8d3c3 5678 case IORING_OP_ACCEPT:
3529d8c2 5679 ret = io_accept_prep(req, sqe);
8ed8d3c3 5680 break;
d63d1b5e
JA
5681 case IORING_OP_FALLOCATE:
5682 ret = io_fallocate_prep(req, sqe);
5683 break;
15b71abe
JA
5684 case IORING_OP_OPENAT:
5685 ret = io_openat_prep(req, sqe);
5686 break;
b5dba59e
JA
5687 case IORING_OP_CLOSE:
5688 ret = io_close_prep(req, sqe);
5689 break;
05f3fb3c
JA
5690 case IORING_OP_FILES_UPDATE:
5691 ret = io_files_update_prep(req, sqe);
5692 break;
eddc7ef5
JA
5693 case IORING_OP_STATX:
5694 ret = io_statx_prep(req, sqe);
5695 break;
4840e418
JA
5696 case IORING_OP_FADVISE:
5697 ret = io_fadvise_prep(req, sqe);
5698 break;
c1ca757b
JA
5699 case IORING_OP_MADVISE:
5700 ret = io_madvise_prep(req, sqe);
5701 break;
cebdb986
JA
5702 case IORING_OP_OPENAT2:
5703 ret = io_openat2_prep(req, sqe);
5704 break;
3e4827b0
JA
5705 case IORING_OP_EPOLL_CTL:
5706 ret = io_epoll_ctl_prep(req, sqe);
5707 break;
7d67af2c
PB
5708 case IORING_OP_SPLICE:
5709 ret = io_splice_prep(req, sqe);
5710 break;
ddf0322d
JA
5711 case IORING_OP_PROVIDE_BUFFERS:
5712 ret = io_provide_buffers_prep(req, sqe);
5713 break;
067524e9
JA
5714 case IORING_OP_REMOVE_BUFFERS:
5715 ret = io_remove_buffers_prep(req, sqe);
5716 break;
f2a8d5c7
PB
5717 case IORING_OP_TEE:
5718 ret = io_tee_prep(req, sqe);
5719 break;
f67676d1 5720 default:
e781573e
JA
5721 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5722 req->opcode);
5723 ret = -EINVAL;
b7bb4f7d 5724 break;
f67676d1
JA
5725 }
5726
b7bb4f7d 5727 return ret;
f67676d1
JA
5728}
5729
9cf7c104
PB
5730static u32 io_get_sequence(struct io_kiocb *req)
5731{
5732 struct io_kiocb *pos;
5733 struct io_ring_ctx *ctx = req->ctx;
5734 u32 total_submitted, nr_reqs = 1;
5735
5736 if (req->flags & REQ_F_LINK_HEAD)
5737 list_for_each_entry(pos, &req->link_list, link_list)
5738 nr_reqs++;
5739
5740 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
5741 return total_submitted - nr_reqs;
5742}
5743
3529d8c2 5744static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
de0617e4 5745{
a197f664 5746 struct io_ring_ctx *ctx = req->ctx;
27dc8338 5747 struct io_defer_entry *de;
f67676d1 5748 int ret;
9cf7c104 5749 u32 seq;
de0617e4 5750
9d858b21 5751 /* Still need defer if there is pending req in defer list. */
9cf7c104
PB
5752 if (likely(list_empty_careful(&ctx->defer_list) &&
5753 !(req->flags & REQ_F_IO_DRAIN)))
5754 return 0;
5755
5756 seq = io_get_sequence(req);
5757 /* Still a chance to pass the sequence check */
5758 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
de0617e4
JA
5759 return 0;
5760
e8c2bc1f 5761 if (!req->async_data) {
650b5481 5762 ret = io_req_defer_prep(req, sqe);
327d6d96 5763 if (ret)
650b5481
PB
5764 return ret;
5765 }
cbdcb435 5766 io_prep_async_link(req);
27dc8338
PB
5767 de = kmalloc(sizeof(*de), GFP_KERNEL);
5768 if (!de)
5769 return -ENOMEM;
2d28390a 5770
de0617e4 5771 spin_lock_irq(&ctx->completion_lock);
9cf7c104 5772 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
de0617e4 5773 spin_unlock_irq(&ctx->completion_lock);
27dc8338 5774 kfree(de);
ae34817b
PB
5775 io_queue_async_work(req);
5776 return -EIOCBQUEUED;
de0617e4
JA
5777 }
5778
915967f6 5779 trace_io_uring_defer(ctx, req, req->user_data);
27dc8338 5780 de->req = req;
9cf7c104 5781 de->seq = seq;
27dc8338 5782 list_add_tail(&de->list, &ctx->defer_list);
de0617e4
JA
5783 spin_unlock_irq(&ctx->completion_lock);
5784 return -EIOCBQUEUED;
5785}
5786
f573d384
JA
5787static void io_req_drop_files(struct io_kiocb *req)
5788{
5789 struct io_ring_ctx *ctx = req->ctx;
5790 unsigned long flags;
5791
5792 spin_lock_irqsave(&ctx->inflight_lock, flags);
5793 list_del(&req->inflight_entry);
5794 if (waitqueue_active(&ctx->inflight_wait))
5795 wake_up(&ctx->inflight_wait);
5796 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
5797 req->flags &= ~REQ_F_INFLIGHT;
0f212204 5798 put_files_struct(req->work.files);
9b828492 5799 put_nsproxy(req->work.nsproxy);
f573d384
JA
5800 req->work.files = NULL;
5801}
5802
3ca405eb 5803static void __io_clean_op(struct io_kiocb *req)
99bc4c38 5804{
0e1b6fe3
PB
5805 if (req->flags & REQ_F_BUFFER_SELECTED) {
5806 switch (req->opcode) {
5807 case IORING_OP_READV:
5808 case IORING_OP_READ_FIXED:
5809 case IORING_OP_READ:
bcda7baa 5810 kfree((void *)(unsigned long)req->rw.addr);
0e1b6fe3
PB
5811 break;
5812 case IORING_OP_RECVMSG:
5813 case IORING_OP_RECV:
bcda7baa 5814 kfree(req->sr_msg.kbuf);
0e1b6fe3
PB
5815 break;
5816 }
5817 req->flags &= ~REQ_F_BUFFER_SELECTED;
99bc4c38
PB
5818 }
5819
0e1b6fe3
PB
5820 if (req->flags & REQ_F_NEED_CLEANUP) {
5821 switch (req->opcode) {
5822 case IORING_OP_READV:
5823 case IORING_OP_READ_FIXED:
5824 case IORING_OP_READ:
5825 case IORING_OP_WRITEV:
5826 case IORING_OP_WRITE_FIXED:
e8c2bc1f
JA
5827 case IORING_OP_WRITE: {
5828 struct io_async_rw *io = req->async_data;
5829 if (io->free_iovec)
5830 kfree(io->free_iovec);
0e1b6fe3 5831 break;
e8c2bc1f 5832 }
0e1b6fe3 5833 case IORING_OP_RECVMSG:
e8c2bc1f
JA
5834 case IORING_OP_SENDMSG: {
5835 struct io_async_msghdr *io = req->async_data;
5836 if (io->iov != io->fast_iov)
5837 kfree(io->iov);
0e1b6fe3 5838 break;
e8c2bc1f 5839 }
0e1b6fe3
PB
5840 case IORING_OP_SPLICE:
5841 case IORING_OP_TEE:
5842 io_put_file(req, req->splice.file_in,
5843 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
5844 break;
f3cd4850
JA
5845 case IORING_OP_OPENAT:
5846 case IORING_OP_OPENAT2:
5847 if (req->open.filename)
5848 putname(req->open.filename);
5849 break;
0e1b6fe3
PB
5850 }
5851 req->flags &= ~REQ_F_NEED_CLEANUP;
99bc4c38 5852 }
bb175342 5853
f573d384
JA
5854 if (req->flags & REQ_F_INFLIGHT)
5855 io_req_drop_files(req);
99bc4c38
PB
5856}
5857
3529d8c2 5858static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
f13fad7b 5859 bool force_nonblock, struct io_comp_state *cs)
2b188cc1 5860{
a197f664 5861 struct io_ring_ctx *ctx = req->ctx;
d625c6ee 5862 int ret;
2b188cc1 5863
d625c6ee 5864 switch (req->opcode) {
2b188cc1 5865 case IORING_OP_NOP:
229a7b63 5866 ret = io_nop(req, cs);
2b188cc1
JA
5867 break;
5868 case IORING_OP_READV:
edafccee 5869 case IORING_OP_READ_FIXED:
3a6820f2 5870 case IORING_OP_READ:
3529d8c2
JA
5871 if (sqe) {
5872 ret = io_read_prep(req, sqe, force_nonblock);
5873 if (ret < 0)
5874 break;
5875 }
a1d7c393 5876 ret = io_read(req, force_nonblock, cs);
edafccee 5877 break;
3529d8c2 5878 case IORING_OP_WRITEV:
edafccee 5879 case IORING_OP_WRITE_FIXED:
3a6820f2 5880 case IORING_OP_WRITE:
3529d8c2
JA
5881 if (sqe) {
5882 ret = io_write_prep(req, sqe, force_nonblock);
5883 if (ret < 0)
5884 break;
5885 }
a1d7c393 5886 ret = io_write(req, force_nonblock, cs);
2b188cc1 5887 break;
c992fe29 5888 case IORING_OP_FSYNC:
3529d8c2
JA
5889 if (sqe) {
5890 ret = io_prep_fsync(req, sqe);
5891 if (ret < 0)
5892 break;
5893 }
014db007 5894 ret = io_fsync(req, force_nonblock);
c992fe29 5895 break;
221c5eb2 5896 case IORING_OP_POLL_ADD:
3529d8c2
JA
5897 if (sqe) {
5898 ret = io_poll_add_prep(req, sqe);
5899 if (ret)
5900 break;
5901 }
014db007 5902 ret = io_poll_add(req);
221c5eb2
JA
5903 break;
5904 case IORING_OP_POLL_REMOVE:
3529d8c2
JA
5905 if (sqe) {
5906 ret = io_poll_remove_prep(req, sqe);
5907 if (ret < 0)
5908 break;
5909 }
fc4df999 5910 ret = io_poll_remove(req);
221c5eb2 5911 break;
5d17b4a4 5912 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2
JA
5913 if (sqe) {
5914 ret = io_prep_sfr(req, sqe);
5915 if (ret < 0)
5916 break;
5917 }
014db007 5918 ret = io_sync_file_range(req, force_nonblock);
5d17b4a4 5919 break;
0fa03c62 5920 case IORING_OP_SENDMSG:
fddaface 5921 case IORING_OP_SEND:
3529d8c2
JA
5922 if (sqe) {
5923 ret = io_sendmsg_prep(req, sqe);
5924 if (ret < 0)
5925 break;
5926 }
fddaface 5927 if (req->opcode == IORING_OP_SENDMSG)
229a7b63 5928 ret = io_sendmsg(req, force_nonblock, cs);
fddaface 5929 else
229a7b63 5930 ret = io_send(req, force_nonblock, cs);
0fa03c62 5931 break;
aa1fa28f 5932 case IORING_OP_RECVMSG:
fddaface 5933 case IORING_OP_RECV:
3529d8c2
JA
5934 if (sqe) {
5935 ret = io_recvmsg_prep(req, sqe);
5936 if (ret)
5937 break;
5938 }
fddaface 5939 if (req->opcode == IORING_OP_RECVMSG)
229a7b63 5940 ret = io_recvmsg(req, force_nonblock, cs);
fddaface 5941 else
229a7b63 5942 ret = io_recv(req, force_nonblock, cs);
aa1fa28f 5943 break;
5262f567 5944 case IORING_OP_TIMEOUT:
3529d8c2
JA
5945 if (sqe) {
5946 ret = io_timeout_prep(req, sqe, false);
5947 if (ret)
5948 break;
5949 }
fc4df999 5950 ret = io_timeout(req);
5262f567 5951 break;
11365043 5952 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2
JA
5953 if (sqe) {
5954 ret = io_timeout_remove_prep(req, sqe);
5955 if (ret)
5956 break;
5957 }
fc4df999 5958 ret = io_timeout_remove(req);
11365043 5959 break;
17f2fe35 5960 case IORING_OP_ACCEPT:
3529d8c2
JA
5961 if (sqe) {
5962 ret = io_accept_prep(req, sqe);
5963 if (ret)
5964 break;
5965 }
229a7b63 5966 ret = io_accept(req, force_nonblock, cs);
17f2fe35 5967 break;
f8e85cf2 5968 case IORING_OP_CONNECT:
3529d8c2
JA
5969 if (sqe) {
5970 ret = io_connect_prep(req, sqe);
5971 if (ret)
5972 break;
5973 }
229a7b63 5974 ret = io_connect(req, force_nonblock, cs);
f8e85cf2 5975 break;
62755e35 5976 case IORING_OP_ASYNC_CANCEL:
3529d8c2
JA
5977 if (sqe) {
5978 ret = io_async_cancel_prep(req, sqe);
5979 if (ret)
5980 break;
5981 }
014db007 5982 ret = io_async_cancel(req);
62755e35 5983 break;
d63d1b5e
JA
5984 case IORING_OP_FALLOCATE:
5985 if (sqe) {
5986 ret = io_fallocate_prep(req, sqe);
5987 if (ret)
5988 break;
5989 }
014db007 5990 ret = io_fallocate(req, force_nonblock);
d63d1b5e 5991 break;
15b71abe
JA
5992 case IORING_OP_OPENAT:
5993 if (sqe) {
5994 ret = io_openat_prep(req, sqe);
5995 if (ret)
5996 break;
5997 }
014db007 5998 ret = io_openat(req, force_nonblock);
15b71abe 5999 break;
b5dba59e
JA
6000 case IORING_OP_CLOSE:
6001 if (sqe) {
6002 ret = io_close_prep(req, sqe);
6003 if (ret)
6004 break;
6005 }
229a7b63 6006 ret = io_close(req, force_nonblock, cs);
b5dba59e 6007 break;
05f3fb3c
JA
6008 case IORING_OP_FILES_UPDATE:
6009 if (sqe) {
6010 ret = io_files_update_prep(req, sqe);
6011 if (ret)
6012 break;
6013 }
229a7b63 6014 ret = io_files_update(req, force_nonblock, cs);
05f3fb3c 6015 break;
eddc7ef5
JA
6016 case IORING_OP_STATX:
6017 if (sqe) {
6018 ret = io_statx_prep(req, sqe);
6019 if (ret)
6020 break;
6021 }
014db007 6022 ret = io_statx(req, force_nonblock);
eddc7ef5 6023 break;
4840e418
JA
6024 case IORING_OP_FADVISE:
6025 if (sqe) {
6026 ret = io_fadvise_prep(req, sqe);
6027 if (ret)
6028 break;
6029 }
014db007 6030 ret = io_fadvise(req, force_nonblock);
4840e418 6031 break;
c1ca757b
JA
6032 case IORING_OP_MADVISE:
6033 if (sqe) {
6034 ret = io_madvise_prep(req, sqe);
6035 if (ret)
6036 break;
6037 }
014db007 6038 ret = io_madvise(req, force_nonblock);
c1ca757b 6039 break;
cebdb986
JA
6040 case IORING_OP_OPENAT2:
6041 if (sqe) {
6042 ret = io_openat2_prep(req, sqe);
6043 if (ret)
6044 break;
6045 }
014db007 6046 ret = io_openat2(req, force_nonblock);
cebdb986 6047 break;
3e4827b0
JA
6048 case IORING_OP_EPOLL_CTL:
6049 if (sqe) {
6050 ret = io_epoll_ctl_prep(req, sqe);
6051 if (ret)
6052 break;
6053 }
229a7b63 6054 ret = io_epoll_ctl(req, force_nonblock, cs);
3e4827b0 6055 break;
7d67af2c
PB
6056 case IORING_OP_SPLICE:
6057 if (sqe) {
6058 ret = io_splice_prep(req, sqe);
6059 if (ret < 0)
6060 break;
6061 }
014db007 6062 ret = io_splice(req, force_nonblock);
7d67af2c 6063 break;
ddf0322d
JA
6064 case IORING_OP_PROVIDE_BUFFERS:
6065 if (sqe) {
6066 ret = io_provide_buffers_prep(req, sqe);
6067 if (ret)
6068 break;
6069 }
229a7b63 6070 ret = io_provide_buffers(req, force_nonblock, cs);
ddf0322d 6071 break;
067524e9
JA
6072 case IORING_OP_REMOVE_BUFFERS:
6073 if (sqe) {
6074 ret = io_remove_buffers_prep(req, sqe);
6075 if (ret)
6076 break;
6077 }
229a7b63 6078 ret = io_remove_buffers(req, force_nonblock, cs);
3e4827b0 6079 break;
f2a8d5c7
PB
6080 case IORING_OP_TEE:
6081 if (sqe) {
6082 ret = io_tee_prep(req, sqe);
6083 if (ret < 0)
6084 break;
6085 }
6086 ret = io_tee(req, force_nonblock);
6087 break;
2b188cc1
JA
6088 default:
6089 ret = -EINVAL;
6090 break;
6091 }
6092
def596e9
JA
6093 if (ret)
6094 return ret;
6095
b532576e
JA
6096 /* If the op doesn't have a file, we're not polling for it */
6097 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
11ba820b
JA
6098 const bool in_async = io_wq_current_is_worker();
6099
11ba820b
JA
6100 /* workqueue context doesn't hold uring_lock, grab it now */
6101 if (in_async)
6102 mutex_lock(&ctx->uring_lock);
6103
def596e9 6104 io_iopoll_req_issued(req);
11ba820b
JA
6105
6106 if (in_async)
6107 mutex_unlock(&ctx->uring_lock);
def596e9
JA
6108 }
6109
6110 return 0;
2b188cc1
JA
6111}
6112
f4db7182 6113static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
2b188cc1
JA
6114{
6115 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6df1db6b 6116 struct io_kiocb *timeout;
561fb04a 6117 int ret = 0;
2b188cc1 6118
6df1db6b
PB
6119 timeout = io_prep_linked_timeout(req);
6120 if (timeout)
6121 io_queue_linked_timeout(timeout);
d4c81f38 6122
0c9d5ccd
JA
6123 /* if NO_CANCEL is set, we must still run the work */
6124 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
6125 IO_WQ_WORK_CANCEL) {
561fb04a 6126 ret = -ECANCELED;
0c9d5ccd 6127 }
31b51510 6128
561fb04a 6129 if (!ret) {
561fb04a 6130 do {
f13fad7b 6131 ret = io_issue_sqe(req, NULL, false, NULL);
561fb04a
JA
6132 /*
6133 * We can get EAGAIN for polled IO even though we're
6134 * forcing a sync submission from here, since we can't
6135 * wait for request slots on the block side.
6136 */
6137 if (ret != -EAGAIN)
6138 break;
6139 cond_resched();
6140 } while (1);
6141 }
31b51510 6142
561fb04a 6143 if (ret) {
4e88d6e7 6144 req_set_fail_links(req);
e1e16097 6145 io_req_complete(req, ret);
edafccee 6146 }
2b188cc1 6147
f4db7182 6148 return io_steal_work(req);
2b188cc1
JA
6149}
6150
65e19f54
JA
6151static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6152 int index)
6153{
6154 struct fixed_file_table *table;
6155
05f3fb3c 6156 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
84695089 6157 return table->files[index & IORING_FILE_TABLE_MASK];
65e19f54
JA
6158}
6159
8da11c19
PB
6160static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
6161 int fd, struct file **out_file, bool fixed)
09bb8394 6162{
a197f664 6163 struct io_ring_ctx *ctx = req->ctx;
8da11c19 6164 struct file *file;
09bb8394 6165
8da11c19 6166 if (fixed) {
05f3fb3c 6167 if (unlikely(!ctx->file_data ||
09bb8394
JA
6168 (unsigned) fd >= ctx->nr_user_files))
6169 return -EBADF;
b7620121 6170 fd = array_index_nospec(fd, ctx->nr_user_files);
8da11c19 6171 file = io_file_from_index(ctx, fd);
fd2206e4
JA
6172 if (file) {
6173 req->fixed_file_refs = ctx->file_data->cur_refs;
6174 percpu_ref_get(req->fixed_file_refs);
6175 }
09bb8394 6176 } else {
c826bd7a 6177 trace_io_uring_file_get(ctx, fd);
8da11c19 6178 file = __io_file_get(state, fd);
09bb8394
JA
6179 }
6180
fd2206e4
JA
6181 if (file || io_op_defs[req->opcode].needs_file_no_error) {
6182 *out_file = file;
6183 return 0;
6184 }
6185 return -EBADF;
09bb8394
JA
6186}
6187
8da11c19 6188static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
63ff8223 6189 int fd)
8da11c19 6190{
8da11c19
PB
6191 bool fixed;
6192
63ff8223 6193 fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
0cdaf760 6194 if (unlikely(!fixed && io_async_submit(req->ctx)))
8da11c19
PB
6195 return -EBADF;
6196
6197 return io_file_get(state, req, fd, &req->file, fixed);
6198}
6199
a197f664 6200static int io_grab_files(struct io_kiocb *req)
fcb323cc 6201{
a197f664 6202 struct io_ring_ctx *ctx = req->ctx;
fcb323cc 6203
f56040b8
PB
6204 io_req_init_async(req);
6205
5b0bbee4 6206 if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
f86cd20c 6207 return 0;
b5dba59e 6208
0f212204 6209 req->work.files = get_files_struct(current);
9b828492
JA
6210 get_nsproxy(current->nsproxy);
6211 req->work.nsproxy = current->nsproxy;
0f212204
JA
6212 req->flags |= REQ_F_INFLIGHT;
6213
fcb323cc 6214 spin_lock_irq(&ctx->inflight_lock);
0f212204 6215 list_add(&req->inflight_entry, &ctx->inflight_list);
fcb323cc 6216 spin_unlock_irq(&ctx->inflight_lock);
0f212204 6217 return 0;
fcb323cc
JA
6218}
6219
f56040b8
PB
6220static inline int io_prep_work_files(struct io_kiocb *req)
6221{
6222 if (!io_op_defs[req->opcode].file_table)
6223 return 0;
6224 return io_grab_files(req);
6225}
6226
2665abfd 6227static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 6228{
ad8a48ac
JA
6229 struct io_timeout_data *data = container_of(timer,
6230 struct io_timeout_data, timer);
6231 struct io_kiocb *req = data->req;
2665abfd
JA
6232 struct io_ring_ctx *ctx = req->ctx;
6233 struct io_kiocb *prev = NULL;
6234 unsigned long flags;
2665abfd
JA
6235
6236 spin_lock_irqsave(&ctx->completion_lock, flags);
6237
6238 /*
6239 * We don't expect the list to be empty, that will only happen if we
6240 * race with the completion of the linked work.
6241 */
4493233e
PB
6242 if (!list_empty(&req->link_list)) {
6243 prev = list_entry(req->link_list.prev, struct io_kiocb,
6244 link_list);
5d960724 6245 if (refcount_inc_not_zero(&prev->refs)) {
4493233e 6246 list_del_init(&req->link_list);
5d960724
JA
6247 prev->flags &= ~REQ_F_LINK_TIMEOUT;
6248 } else
76a46e06 6249 prev = NULL;
2665abfd
JA
6250 }
6251
6252 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6253
6254 if (prev) {
4e88d6e7 6255 req_set_fail_links(prev);
014db007 6256 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
76a46e06 6257 io_put_req(prev);
47f46768 6258 } else {
e1e16097 6259 io_req_complete(req, -ETIME);
2665abfd 6260 }
2665abfd
JA
6261 return HRTIMER_NORESTART;
6262}
6263
7271ef3a 6264static void __io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 6265{
76a46e06
JA
6266 /*
6267 * If the list is now empty, then our linked request finished before
6268 * we got a chance to setup the timer
6269 */
4493233e 6270 if (!list_empty(&req->link_list)) {
e8c2bc1f 6271 struct io_timeout_data *data = req->async_data;
94ae5e77 6272
ad8a48ac
JA
6273 data->timer.function = io_link_timeout_fn;
6274 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6275 data->mode);
2665abfd 6276 }
7271ef3a
JA
6277}
6278
6279static void io_queue_linked_timeout(struct io_kiocb *req)
6280{
6281 struct io_ring_ctx *ctx = req->ctx;
6282
6283 spin_lock_irq(&ctx->completion_lock);
6284 __io_queue_linked_timeout(req);
76a46e06 6285 spin_unlock_irq(&ctx->completion_lock);
2665abfd 6286
2665abfd 6287 /* drop submission reference */
76a46e06
JA
6288 io_put_req(req);
6289}
2665abfd 6290
ad8a48ac 6291static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2665abfd
JA
6292{
6293 struct io_kiocb *nxt;
6294
dea3b49c 6295 if (!(req->flags & REQ_F_LINK_HEAD))
2665abfd 6296 return NULL;
6df1db6b 6297 if (req->flags & REQ_F_LINK_TIMEOUT)
d7718a9d 6298 return NULL;
2665abfd 6299
4493233e
PB
6300 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
6301 link_list);
d625c6ee 6302 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
76a46e06 6303 return NULL;
2665abfd 6304
76a46e06 6305 req->flags |= REQ_F_LINK_TIMEOUT;
76a46e06 6306 return nxt;
2665abfd
JA
6307}
6308
f13fad7b
JA
6309static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6310 struct io_comp_state *cs)
2b188cc1 6311{
4a0a7a18 6312 struct io_kiocb *linked_timeout;
4bc4494e 6313 struct io_kiocb *nxt;
193155c8 6314 const struct cred *old_creds = NULL;
e0c5c576 6315 int ret;
2b188cc1 6316
4a0a7a18
JA
6317again:
6318 linked_timeout = io_prep_linked_timeout(req);
6319
7cdaf587
XW
6320 if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds &&
6321 req->work.creds != current_cred()) {
193155c8
JA
6322 if (old_creds)
6323 revert_creds(old_creds);
6324 if (old_creds == req->work.creds)
6325 old_creds = NULL; /* restored original creds */
6326 else
6327 old_creds = override_creds(req->work.creds);
6328 }
6329
f13fad7b 6330 ret = io_issue_sqe(req, sqe, true, cs);
491381ce
JA
6331
6332 /*
6333 * We async punt it if the file wasn't marked NOWAIT, or if the file
6334 * doesn't support non-blocking read/write attempts
6335 */
24c74678 6336 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
f063c547 6337 if (!io_arm_poll_handler(req)) {
86a761f8 6338punt:
f063c547
PB
6339 ret = io_prep_work_files(req);
6340 if (unlikely(ret))
bbad27b2 6341 goto err;
f063c547
PB
6342 /*
6343 * Queued up for async execution, worker will release
6344 * submit reference when the iocb is actually submitted.
6345 */
6346 io_queue_async_work(req);
2b188cc1 6347 }
bbad27b2 6348
f063c547
PB
6349 if (linked_timeout)
6350 io_queue_linked_timeout(linked_timeout);
4bc4494e 6351 goto exit;
2b188cc1 6352 }
e65ef56d 6353
652532ad 6354 if (unlikely(ret)) {
fcb323cc 6355err:
652532ad
PB
6356 /* un-prep timeout, so it'll be killed as any other linked */
6357 req->flags &= ~REQ_F_LINK_TIMEOUT;
4e88d6e7 6358 req_set_fail_links(req);
e65ef56d 6359 io_put_req(req);
e1e16097 6360 io_req_complete(req, ret);
652532ad 6361 goto exit;
9e645e11 6362 }
652532ad
PB
6363
6364 /* drop submission reference */
6365 nxt = io_put_req_find_next(req);
6366 if (linked_timeout)
6367 io_queue_linked_timeout(linked_timeout);
6368
4a0a7a18
JA
6369 if (nxt) {
6370 req = nxt;
86a761f8
PB
6371
6372 if (req->flags & REQ_F_FORCE_ASYNC)
6373 goto punt;
4a0a7a18
JA
6374 goto again;
6375 }
4bc4494e 6376exit:
193155c8
JA
6377 if (old_creds)
6378 revert_creds(old_creds);
2b188cc1
JA
6379}
6380
f13fad7b
JA
6381static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6382 struct io_comp_state *cs)
4fe2c963
JL
6383{
6384 int ret;
6385
3529d8c2 6386 ret = io_req_defer(req, sqe);
4fe2c963
JL
6387 if (ret) {
6388 if (ret != -EIOCBQUEUED) {
1118591a 6389fail_req:
4e88d6e7 6390 req_set_fail_links(req);
e1e16097
JA
6391 io_put_req(req);
6392 io_req_complete(req, ret);
4fe2c963 6393 }
2550878f 6394 } else if (req->flags & REQ_F_FORCE_ASYNC) {
e8c2bc1f 6395 if (!req->async_data) {
bd2ab18a 6396 ret = io_req_defer_prep(req, sqe);
327d6d96 6397 if (unlikely(ret))
bd2ab18a
PB
6398 goto fail_req;
6399 }
6400
ce35a47a
JA
6401 /*
6402 * Never try inline submit of IOSQE_ASYNC is set, go straight
6403 * to async execution.
6404 */
3e863ea3 6405 io_req_init_async(req);
ce35a47a
JA
6406 req->work.flags |= IO_WQ_WORK_CONCURRENT;
6407 io_queue_async_work(req);
6408 } else {
f13fad7b 6409 __io_queue_sqe(req, sqe, cs);
ce35a47a 6410 }
4fe2c963
JL
6411}
6412
f13fad7b
JA
6413static inline void io_queue_link_head(struct io_kiocb *req,
6414 struct io_comp_state *cs)
4fe2c963 6415{
94ae5e77 6416 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
e1e16097
JA
6417 io_put_req(req);
6418 io_req_complete(req, -ECANCELED);
1b4a51b6 6419 } else
f13fad7b 6420 io_queue_sqe(req, NULL, cs);
4fe2c963
JL
6421}
6422
1d4240cc 6423static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
f13fad7b 6424 struct io_kiocb **link, struct io_comp_state *cs)
9e645e11 6425{
a197f664 6426 struct io_ring_ctx *ctx = req->ctx;
ef4ff581 6427 int ret;
9e645e11 6428
9e645e11
JA
6429 /*
6430 * If we already have a head request, queue this one for async
6431 * submittal once the head completes. If we don't have a head but
6432 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6433 * submitted sync once the chain is complete. If none of those
6434 * conditions are true (normal request), then just queue it.
6435 */
6436 if (*link) {
9d76377f 6437 struct io_kiocb *head = *link;
4e88d6e7 6438
8cdf2193
PB
6439 /*
6440 * Taking sequential execution of a link, draining both sides
6441 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6442 * requests in the link. So, it drains the head and the
6443 * next after the link request. The last one is done via
6444 * drain_next flag to persist the effect across calls.
6445 */
ef4ff581 6446 if (req->flags & REQ_F_IO_DRAIN) {
711be031
PB
6447 head->flags |= REQ_F_IO_DRAIN;
6448 ctx->drain_next = 1;
6449 }
3529d8c2 6450 ret = io_req_defer_prep(req, sqe);
327d6d96 6451 if (unlikely(ret)) {
4e88d6e7 6452 /* fail even hard links since we don't submit */
9d76377f 6453 head->flags |= REQ_F_FAIL_LINK;
1d4240cc 6454 return ret;
2d28390a 6455 }
9d76377f
PB
6456 trace_io_uring_link(ctx, req, head);
6457 list_add_tail(&req->link_list, &head->link_list);
32fe525b
PB
6458
6459 /* last request of a link, enqueue the link */
ef4ff581 6460 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
f13fad7b 6461 io_queue_link_head(head, cs);
32fe525b
PB
6462 *link = NULL;
6463 }
9e645e11 6464 } else {
711be031
PB
6465 if (unlikely(ctx->drain_next)) {
6466 req->flags |= REQ_F_IO_DRAIN;
ef4ff581 6467 ctx->drain_next = 0;
711be031 6468 }
ef4ff581 6469 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
dea3b49c 6470 req->flags |= REQ_F_LINK_HEAD;
711be031 6471 INIT_LIST_HEAD(&req->link_list);
f1d96a8f 6472
711be031 6473 ret = io_req_defer_prep(req, sqe);
327d6d96 6474 if (unlikely(ret))
711be031
PB
6475 req->flags |= REQ_F_FAIL_LINK;
6476 *link = req;
6477 } else {
f13fad7b 6478 io_queue_sqe(req, sqe, cs);
711be031 6479 }
9e645e11 6480 }
2e6e1fde 6481
1d4240cc 6482 return 0;
9e645e11
JA
6483}
6484
9a56a232
JA
6485/*
6486 * Batched submission is done, ensure local IO is flushed out.
6487 */
6488static void io_submit_state_end(struct io_submit_state *state)
6489{
f13fad7b
JA
6490 if (!list_empty(&state->comp.list))
6491 io_submit_flush_completions(&state->comp);
9a56a232 6492 blk_finish_plug(&state->plug);
9f13c35b 6493 io_state_file_put(state);
2579f913 6494 if (state->free_reqs)
6c8a3134 6495 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9a56a232
JA
6496}
6497
6498/*
6499 * Start submission side cache.
6500 */
6501static void io_submit_state_start(struct io_submit_state *state,
013538bd 6502 struct io_ring_ctx *ctx, unsigned int max_ios)
9a56a232
JA
6503{
6504 blk_start_plug(&state->plug);
013538bd
JA
6505 state->comp.nr = 0;
6506 INIT_LIST_HEAD(&state->comp.list);
6507 state->comp.ctx = ctx;
2579f913 6508 state->free_reqs = 0;
9a56a232
JA
6509 state->file = NULL;
6510 state->ios_left = max_ios;
6511}
6512
2b188cc1
JA
6513static void io_commit_sqring(struct io_ring_ctx *ctx)
6514{
75b28aff 6515 struct io_rings *rings = ctx->rings;
2b188cc1 6516
caf582c6
PB
6517 /*
6518 * Ensure any loads from the SQEs are done at this point,
6519 * since once we write the new head, the application could
6520 * write new data to them.
6521 */
6522 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
6523}
6524
2b188cc1 6525/*
3529d8c2 6526 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
2b188cc1
JA
6527 * that is mapped by userspace. This means that care needs to be taken to
6528 * ensure that reads are stable, as we cannot rely on userspace always
6529 * being a good citizen. If members of the sqe are validated and then later
6530 * used, it's important that those reads are done through READ_ONCE() to
6531 * prevent a re-load down the line.
6532 */
709b302f 6533static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 6534{
75b28aff 6535 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
6536 unsigned head;
6537
6538 /*
6539 * The cached sq head (or cq tail) serves two purposes:
6540 *
6541 * 1) allows us to batch the cost of updating the user visible
6542 * head updates.
6543 * 2) allows the kernel side to track the head on its own, even
6544 * though the application is the one updating it.
6545 */
ee7d46d9 6546 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
709b302f
PB
6547 if (likely(head < ctx->sq_entries))
6548 return &ctx->sq_sqes[head];
2b188cc1
JA
6549
6550 /* drop invalid entries */
498ccd9e 6551 ctx->cached_sq_dropped++;
ee7d46d9 6552 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
709b302f
PB
6553 return NULL;
6554}
6555
6556static inline void io_consume_sqe(struct io_ring_ctx *ctx)
6557{
6558 ctx->cached_sq_head++;
2b188cc1
JA
6559}
6560
21b55dbc
SG
6561/*
6562 * Check SQE restrictions (opcode and flags).
6563 *
6564 * Returns 'true' if SQE is allowed, 'false' otherwise.
6565 */
6566static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6567 struct io_kiocb *req,
6568 unsigned int sqe_flags)
6569{
6570 if (!ctx->restricted)
6571 return true;
6572
6573 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6574 return false;
6575
6576 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6577 ctx->restrictions.sqe_flags_required)
6578 return false;
6579
6580 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6581 ctx->restrictions.sqe_flags_required))
6582 return false;
6583
6584 return true;
6585}
6586
ef4ff581
PB
6587#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
6588 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
6589 IOSQE_BUFFER_SELECT)
6590
6591static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
6592 const struct io_uring_sqe *sqe,
0cdaf760 6593 struct io_submit_state *state)
0553b8bd 6594{
ef4ff581 6595 unsigned int sqe_flags;
63ff8223 6596 int id;
ef4ff581 6597
0553b8bd
PB
6598 req->opcode = READ_ONCE(sqe->opcode);
6599 req->user_data = READ_ONCE(sqe->user_data);
e8c2bc1f 6600 req->async_data = NULL;
0553b8bd
PB
6601 req->file = NULL;
6602 req->ctx = ctx;
6603 req->flags = 0;
6604 /* one is dropped after submission, the other at completion */
6605 refcount_set(&req->refs, 2);
4dd2824d 6606 req->task = current;
e3bc8e9d 6607 get_task_struct(req->task);
0f212204 6608 atomic_long_inc(&req->task->io_uring->req_issue);
0553b8bd 6609 req->result = 0;
ef4ff581
PB
6610
6611 if (unlikely(req->opcode >= IORING_OP_LAST))
6612 return -EINVAL;
6613
9d8426a0
JA
6614 if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
6615 return -EFAULT;
ef4ff581
PB
6616
6617 sqe_flags = READ_ONCE(sqe->flags);
6618 /* enforce forwards compatibility on users */
6619 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
6620 return -EINVAL;
6621
21b55dbc
SG
6622 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6623 return -EACCES;
6624
ef4ff581
PB
6625 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6626 !io_op_defs[req->opcode].buffer_select)
6627 return -EOPNOTSUPP;
6628
6629 id = READ_ONCE(sqe->personality);
6630 if (id) {
7cdaf587 6631 io_req_init_async(req);
ef4ff581
PB
6632 req->work.creds = idr_find(&ctx->personality_idr, id);
6633 if (unlikely(!req->work.creds))
6634 return -EINVAL;
6635 get_cred(req->work.creds);
6636 }
6637
6638 /* same numerical values with corresponding REQ_F_*, safe to copy */
c11368a5 6639 req->flags |= sqe_flags;
ef4ff581 6640
63ff8223
JA
6641 if (!io_op_defs[req->opcode].needs_file)
6642 return 0;
6643
6644 return io_req_set_file(state, req, READ_ONCE(sqe->fd));
0553b8bd
PB
6645}
6646
0f212204 6647static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
6c271ce2 6648{
ac8691c4 6649 struct io_submit_state state;
9e645e11 6650 struct io_kiocb *link = NULL;
9e645e11 6651 int i, submitted = 0;
6c271ce2 6652
c4a2ed72 6653 /* if we have a backlog and couldn't flush it all, return BUSY */
ad3eb2c8
JA
6654 if (test_bit(0, &ctx->sq_check_overflow)) {
6655 if (!list_empty(&ctx->cq_overflow_list) &&
e6c8aa9a 6656 !io_cqring_overflow_flush(ctx, false, NULL, NULL))
ad3eb2c8
JA
6657 return -EBUSY;
6658 }
6c271ce2 6659
ee7d46d9
PB
6660 /* make sure SQ entry isn't read before tail */
6661 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
9ef4f124 6662
2b85edfc
PB
6663 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6664 return -EAGAIN;
6c271ce2 6665
013538bd 6666 io_submit_state_start(&state, ctx, nr);
6c271ce2
JA
6667
6668 for (i = 0; i < nr; i++) {
3529d8c2 6669 const struct io_uring_sqe *sqe;
196be95c 6670 struct io_kiocb *req;
1cb1edb2 6671 int err;
fb5ccc98 6672
b1e50e54
PB
6673 sqe = io_get_sqe(ctx);
6674 if (unlikely(!sqe)) {
6675 io_consume_sqe(ctx);
6676 break;
6677 }
ac8691c4 6678 req = io_alloc_req(ctx, &state);
196be95c
PB
6679 if (unlikely(!req)) {
6680 if (!submitted)
6681 submitted = -EAGAIN;
fb5ccc98 6682 break;
196be95c 6683 }
fb5ccc98 6684
ac8691c4 6685 err = io_init_req(ctx, req, sqe, &state);
709b302f 6686 io_consume_sqe(ctx);
d3656344
JA
6687 /* will complete beyond this point, count as submitted */
6688 submitted++;
6689
ef4ff581 6690 if (unlikely(err)) {
1cb1edb2 6691fail_req:
e1e16097
JA
6692 io_put_req(req);
6693 io_req_complete(req, err);
196be95c
PB
6694 break;
6695 }
fb5ccc98 6696
354420f7 6697 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
0cdaf760 6698 true, io_async_submit(ctx));
f13fad7b 6699 err = io_submit_sqe(req, sqe, &link, &state.comp);
1d4240cc
PB
6700 if (err)
6701 goto fail_req;
6c271ce2
JA
6702 }
6703
9466f437
PB
6704 if (unlikely(submitted != nr)) {
6705 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
6706
6707 percpu_ref_put_many(&ctx->refs, nr - ref_used);
6708 }
9e645e11 6709 if (link)
f13fad7b 6710 io_queue_link_head(link, &state.comp);
ac8691c4 6711 io_submit_state_end(&state);
6c271ce2 6712
ae9428ca
PB
6713 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6714 io_commit_sqring(ctx);
6715
6c271ce2
JA
6716 return submitted;
6717}
6718
23b3628e
XW
6719static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6720{
6721 /* Tell userspace we may need a wakeup call */
6722 spin_lock_irq(&ctx->completion_lock);
6723 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6724 spin_unlock_irq(&ctx->completion_lock);
6725}
6726
6727static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6728{
6729 spin_lock_irq(&ctx->completion_lock);
6730 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6731 spin_unlock_irq(&ctx->completion_lock);
6732}
6733
3f0e64d0
JA
6734static int io_sq_wake_function(struct wait_queue_entry *wqe, unsigned mode,
6735 int sync, void *key)
6736{
6737 struct io_ring_ctx *ctx = container_of(wqe, struct io_ring_ctx, sqo_wait_entry);
6738 int ret;
6739
6740 ret = autoremove_wake_function(wqe, mode, sync, key);
6741 if (ret) {
6742 unsigned long flags;
6743
6744 spin_lock_irqsave(&ctx->completion_lock, flags);
6745 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6746 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6747 }
6748 return ret;
6749}
6750
c8d1ba58
JA
6751enum sq_ret {
6752 SQT_IDLE = 1,
6753 SQT_SPIN = 2,
6754 SQT_DID_WORK = 4,
6755};
6756
6757static enum sq_ret __io_sq_thread(struct io_ring_ctx *ctx,
e95eee2d 6758 unsigned long start_jiffies, bool cap_entries)
6c271ce2 6759{
c8d1ba58 6760 unsigned long timeout = start_jiffies + ctx->sq_thread_idle;
534ca6d6 6761 struct io_sq_data *sqd = ctx->sq_data;
c8d1ba58 6762 unsigned int to_submit;
bdcd3eab 6763 int ret = 0;
6c271ce2 6764
c8d1ba58
JA
6765again:
6766 if (!list_empty(&ctx->iopoll_list)) {
6767 unsigned nr_events = 0;
6a779382 6768
c8d1ba58
JA
6769 mutex_lock(&ctx->uring_lock);
6770 if (!list_empty(&ctx->iopoll_list) && !need_resched())
6771 io_do_iopoll(ctx, &nr_events, 0);
6772 mutex_unlock(&ctx->uring_lock);
6773 }
a4c0b3de 6774
c8d1ba58 6775 to_submit = io_sqring_entries(ctx);
6c271ce2 6776
c8d1ba58
JA
6777 /*
6778 * If submit got -EBUSY, flag us as needing the application
6779 * to enter the kernel to reap and flush events.
6780 */
6781 if (!to_submit || ret == -EBUSY || need_resched()) {
6782 /*
6783 * Drop cur_mm before scheduling, we can't hold it for
6784 * long periods (or over schedule()). Do this before
6785 * adding ourselves to the waitqueue, as the unuse/drop
6786 * may sleep.
6787 */
6788 io_sq_thread_drop_mm();
6789
6790 /*
6791 * We're polling. If we're within the defined idle
6792 * period, then let us spin without work before going
6793 * to sleep. The exception is if we got EBUSY doing
6794 * more IO, we should wait for the application to
6795 * reap events and wake us up.
6796 */
6797 if (!list_empty(&ctx->iopoll_list) || need_resched() ||
6798 (!time_after(jiffies, timeout) && ret != -EBUSY &&
6799 !percpu_ref_is_dying(&ctx->refs)))
6800 return SQT_SPIN;
6c271ce2 6801
534ca6d6 6802 prepare_to_wait(&sqd->wait, &ctx->sqo_wait_entry,
c8d1ba58 6803 TASK_INTERRUPTIBLE);
6c271ce2 6804
c8d1ba58
JA
6805 /*
6806 * While doing polled IO, before going to sleep, we need
6807 * to check if there are new reqs added to iopoll_list,
6808 * it is because reqs may have been punted to io worker
6809 * and will be added to iopoll_list later, hence check
6810 * the iopoll_list again.
6811 */
6812 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6813 !list_empty_careful(&ctx->iopoll_list)) {
534ca6d6 6814 finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
c8d1ba58 6815 goto again;
6c271ce2
JA
6816 }
6817
fb5ccc98 6818 to_submit = io_sqring_entries(ctx);
c8d1ba58
JA
6819 if (!to_submit || ret == -EBUSY)
6820 return SQT_IDLE;
6821 }
c1edbf5f 6822
534ca6d6 6823 finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
c8d1ba58 6824 io_ring_clear_wakeup_flag(ctx);
7143b5ac 6825
e95eee2d
JA
6826 /* if we're handling multiple rings, cap submit size for fairness */
6827 if (cap_entries && to_submit > 8)
6828 to_submit = 8;
6829
c8d1ba58
JA
6830 mutex_lock(&ctx->uring_lock);
6831 if (likely(!percpu_ref_is_dying(&ctx->refs)))
6832 ret = io_submit_sqes(ctx, to_submit);
6833 mutex_unlock(&ctx->uring_lock);
90554200
JA
6834
6835 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6836 wake_up(&ctx->sqo_sq_wait);
6837
c8d1ba58
JA
6838 return SQT_DID_WORK;
6839}
6c271ce2 6840
69fb2131
JA
6841static void io_sqd_init_new(struct io_sq_data *sqd)
6842{
6843 struct io_ring_ctx *ctx;
6844
6845 while (!list_empty(&sqd->ctx_new_list)) {
6846 ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
6847 init_wait(&ctx->sqo_wait_entry);
6848 ctx->sqo_wait_entry.func = io_sq_wake_function;
6849 list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
6850 complete(&ctx->sq_thread_comp);
6851 }
6852}
6853
c8d1ba58
JA
6854static int io_sq_thread(void *data)
6855{
91d8f519 6856 struct cgroup_subsys_state *cur_css = NULL;
69fb2131
JA
6857 const struct cred *old_cred = NULL;
6858 struct io_sq_data *sqd = data;
6859 struct io_ring_ctx *ctx;
c8d1ba58 6860 unsigned long start_jiffies;
6c271ce2 6861
69fb2131
JA
6862 start_jiffies = jiffies;
6863 while (!kthread_should_stop()) {
6864 enum sq_ret ret = 0;
e95eee2d 6865 bool cap_entries;
6c271ce2 6866
69fb2131
JA
6867 /*
6868 * Any changes to the sqd lists are synchronized through the
6869 * kthread parking. This synchronizes the thread vs users,
6870 * the users are synchronized on the sqd->ctx_lock.
6871 */
6872 if (kthread_should_park())
6873 kthread_parkme();
6c271ce2 6874
69fb2131
JA
6875 if (unlikely(!list_empty(&sqd->ctx_new_list)))
6876 io_sqd_init_new(sqd);
c8d1ba58 6877
e95eee2d
JA
6878 cap_entries = !list_is_singular(&sqd->ctx_list);
6879
69fb2131
JA
6880 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6881 if (current->cred != ctx->creds) {
6882 if (old_cred)
6883 revert_creds(old_cred);
6884 old_cred = override_creds(ctx->creds);
6885 }
91d8f519 6886 io_sq_thread_associate_blkcg(ctx, &cur_css);
c8d1ba58 6887
e95eee2d 6888 ret |= __io_sq_thread(ctx, start_jiffies, cap_entries);
69fb2131
JA
6889
6890 io_sq_thread_drop_mm();
6891 }
6892
6893 if (ret & SQT_SPIN) {
c8d1ba58
JA
6894 io_run_task_work();
6895 cond_resched();
69fb2131
JA
6896 } else if (ret == SQT_IDLE) {
6897 if (kthread_should_park())
6898 continue;
6899 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6900 io_ring_set_wakeup_flag(ctx);
6901 schedule();
6902 start_jiffies = jiffies;
6903 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6904 io_ring_clear_wakeup_flag(ctx);
c8d1ba58 6905 }
6c271ce2
JA
6906 }
6907
4c6e277c 6908 io_run_task_work();
b41e9852 6909
91d8f519
DZ
6910 if (cur_css)
6911 io_sq_thread_unassociate_blkcg();
69fb2131
JA
6912 if (old_cred)
6913 revert_creds(old_cred);
06058632 6914
2bbcd6d3 6915 kthread_parkme();
06058632 6916
6c271ce2
JA
6917 return 0;
6918}
6919
bda52162
JA
6920struct io_wait_queue {
6921 struct wait_queue_entry wq;
6922 struct io_ring_ctx *ctx;
6923 unsigned to_wait;
6924 unsigned nr_timeouts;
6925};
6926
1d7bb1d5 6927static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
bda52162
JA
6928{
6929 struct io_ring_ctx *ctx = iowq->ctx;
6930
6931 /*
d195a66e 6932 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
6933 * started waiting. For timeouts, we always want to return to userspace,
6934 * regardless of event count.
6935 */
1d7bb1d5 6936 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
bda52162
JA
6937 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6938}
6939
6940static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6941 int wake_flags, void *key)
6942{
6943 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6944 wq);
6945
1d7bb1d5
JA
6946 /* use noflush == true, as we can't safely rely on locking context */
6947 if (!io_should_wake(iowq, true))
bda52162
JA
6948 return -1;
6949
6950 return autoremove_wake_function(curr, mode, wake_flags, key);
6951}
6952
af9c1a44
JA
6953static int io_run_task_work_sig(void)
6954{
6955 if (io_run_task_work())
6956 return 1;
6957 if (!signal_pending(current))
6958 return 0;
6959 if (current->jobctl & JOBCTL_TASK_WORK) {
6960 spin_lock_irq(&current->sighand->siglock);
6961 current->jobctl &= ~JOBCTL_TASK_WORK;
6962 recalc_sigpending();
6963 spin_unlock_irq(&current->sighand->siglock);
6964 return 1;
6965 }
6966 return -EINTR;
6967}
6968
2b188cc1
JA
6969/*
6970 * Wait until events become available, if we don't already have some. The
6971 * application must reap them itself, as they reside on the shared cq ring.
6972 */
6973static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
6974 const sigset_t __user *sig, size_t sigsz)
6975{
bda52162
JA
6976 struct io_wait_queue iowq = {
6977 .wq = {
6978 .private = current,
6979 .func = io_wake_function,
6980 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6981 },
6982 .ctx = ctx,
6983 .to_wait = min_events,
6984 };
75b28aff 6985 struct io_rings *rings = ctx->rings;
e9ffa5c2 6986 int ret = 0;
2b188cc1 6987
b41e9852
JA
6988 do {
6989 if (io_cqring_events(ctx, false) >= min_events)
6990 return 0;
4c6e277c 6991 if (!io_run_task_work())
b41e9852 6992 break;
b41e9852 6993 } while (1);
2b188cc1
JA
6994
6995 if (sig) {
9e75ad5d
AB
6996#ifdef CONFIG_COMPAT
6997 if (in_compat_syscall())
6998 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 6999 sigsz);
9e75ad5d
AB
7000 else
7001#endif
b772434b 7002 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 7003
2b188cc1
JA
7004 if (ret)
7005 return ret;
7006 }
7007
bda52162 7008 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 7009 trace_io_uring_cqring_wait(ctx, min_events);
bda52162
JA
7010 do {
7011 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
7012 TASK_INTERRUPTIBLE);
ce593a6c 7013 /* make sure we run task_work before checking for signals */
af9c1a44
JA
7014 ret = io_run_task_work_sig();
7015 if (ret > 0)
4c6e277c 7016 continue;
af9c1a44 7017 else if (ret < 0)
bda52162 7018 break;
ce593a6c
JA
7019 if (io_should_wake(&iowq, false))
7020 break;
7021 schedule();
bda52162
JA
7022 } while (1);
7023 finish_wait(&ctx->wait, &iowq.wq);
7024
b7db41c9 7025 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 7026
75b28aff 7027 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
7028}
7029
6b06314c
JA
7030static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7031{
7032#if defined(CONFIG_UNIX)
7033 if (ctx->ring_sock) {
7034 struct sock *sock = ctx->ring_sock->sk;
7035 struct sk_buff *skb;
7036
7037 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7038 kfree_skb(skb);
7039 }
7040#else
7041 int i;
7042
65e19f54
JA
7043 for (i = 0; i < ctx->nr_user_files; i++) {
7044 struct file *file;
7045
7046 file = io_file_from_index(ctx, i);
7047 if (file)
7048 fput(file);
7049 }
6b06314c
JA
7050#endif
7051}
7052
05f3fb3c
JA
7053static void io_file_ref_kill(struct percpu_ref *ref)
7054{
7055 struct fixed_file_data *data;
7056
7057 data = container_of(ref, struct fixed_file_data, refs);
7058 complete(&data->done);
7059}
7060
6b06314c
JA
7061static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7062{
05f3fb3c 7063 struct fixed_file_data *data = ctx->file_data;
05589553 7064 struct fixed_file_ref_node *ref_node = NULL;
65e19f54
JA
7065 unsigned nr_tables, i;
7066
05f3fb3c 7067 if (!data)
6b06314c
JA
7068 return -ENXIO;
7069
6a4d07cd 7070 spin_lock(&data->lock);
05589553
XW
7071 if (!list_empty(&data->ref_list))
7072 ref_node = list_first_entry(&data->ref_list,
7073 struct fixed_file_ref_node, node);
6a4d07cd 7074 spin_unlock(&data->lock);
05589553
XW
7075 if (ref_node)
7076 percpu_ref_kill(&ref_node->refs);
7077
7078 percpu_ref_kill(&data->refs);
7079
7080 /* wait for all refs nodes to complete */
4a38aed2 7081 flush_delayed_work(&ctx->file_put_work);
2faf852d 7082 wait_for_completion(&data->done);
05f3fb3c 7083
6b06314c 7084 __io_sqe_files_unregister(ctx);
65e19f54
JA
7085 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
7086 for (i = 0; i < nr_tables; i++)
05f3fb3c
JA
7087 kfree(data->table[i].files);
7088 kfree(data->table);
05589553
XW
7089 percpu_ref_exit(&data->refs);
7090 kfree(data);
05f3fb3c 7091 ctx->file_data = NULL;
6b06314c
JA
7092 ctx->nr_user_files = 0;
7093 return 0;
7094}
7095
534ca6d6 7096static void io_put_sq_data(struct io_sq_data *sqd)
6c271ce2 7097{
534ca6d6 7098 if (refcount_dec_and_test(&sqd->refs)) {
2bbcd6d3
RP
7099 /*
7100 * The park is a bit of a work-around, without it we get
7101 * warning spews on shutdown with SQPOLL set and affinity
7102 * set to a single CPU.
7103 */
534ca6d6
JA
7104 if (sqd->thread) {
7105 kthread_park(sqd->thread);
7106 kthread_stop(sqd->thread);
7107 }
7108
7109 kfree(sqd);
7110 }
7111}
7112
aa06165d
JA
7113static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7114{
7115 struct io_ring_ctx *ctx_attach;
7116 struct io_sq_data *sqd;
7117 struct fd f;
7118
7119 f = fdget(p->wq_fd);
7120 if (!f.file)
7121 return ERR_PTR(-ENXIO);
7122 if (f.file->f_op != &io_uring_fops) {
7123 fdput(f);
7124 return ERR_PTR(-EINVAL);
7125 }
7126
7127 ctx_attach = f.file->private_data;
7128 sqd = ctx_attach->sq_data;
7129 if (!sqd) {
7130 fdput(f);
7131 return ERR_PTR(-EINVAL);
7132 }
7133
7134 refcount_inc(&sqd->refs);
7135 fdput(f);
7136 return sqd;
7137}
7138
534ca6d6
JA
7139static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
7140{
7141 struct io_sq_data *sqd;
7142
aa06165d
JA
7143 if (p->flags & IORING_SETUP_ATTACH_WQ)
7144 return io_attach_sq_data(p);
7145
534ca6d6
JA
7146 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7147 if (!sqd)
7148 return ERR_PTR(-ENOMEM);
7149
7150 refcount_set(&sqd->refs, 1);
69fb2131
JA
7151 INIT_LIST_HEAD(&sqd->ctx_list);
7152 INIT_LIST_HEAD(&sqd->ctx_new_list);
7153 mutex_init(&sqd->ctx_lock);
7154 mutex_init(&sqd->lock);
534ca6d6
JA
7155 init_waitqueue_head(&sqd->wait);
7156 return sqd;
7157}
7158
69fb2131
JA
7159static void io_sq_thread_unpark(struct io_sq_data *sqd)
7160 __releases(&sqd->lock)
7161{
7162 if (!sqd->thread)
7163 return;
7164 kthread_unpark(sqd->thread);
7165 mutex_unlock(&sqd->lock);
7166}
7167
7168static void io_sq_thread_park(struct io_sq_data *sqd)
7169 __acquires(&sqd->lock)
7170{
7171 if (!sqd->thread)
7172 return;
7173 mutex_lock(&sqd->lock);
7174 kthread_park(sqd->thread);
7175}
7176
534ca6d6
JA
7177static void io_sq_thread_stop(struct io_ring_ctx *ctx)
7178{
7179 struct io_sq_data *sqd = ctx->sq_data;
7180
7181 if (sqd) {
7182 if (sqd->thread) {
7183 /*
7184 * We may arrive here from the error branch in
7185 * io_sq_offload_create() where the kthread is created
7186 * without being waked up, thus wake it up now to make
7187 * sure the wait will complete.
7188 */
7189 wake_up_process(sqd->thread);
7190 wait_for_completion(&ctx->sq_thread_comp);
69fb2131
JA
7191
7192 io_sq_thread_park(sqd);
7193 }
7194
7195 mutex_lock(&sqd->ctx_lock);
7196 list_del(&ctx->sqd_list);
7197 mutex_unlock(&sqd->ctx_lock);
7198
7199 if (sqd->thread) {
7200 finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
7201 io_sq_thread_unpark(sqd);
534ca6d6
JA
7202 }
7203
7204 io_put_sq_data(sqd);
7205 ctx->sq_data = NULL;
6c271ce2
JA
7206 }
7207}
7208
6b06314c
JA
7209static void io_finish_async(struct io_ring_ctx *ctx)
7210{
6c271ce2
JA
7211 io_sq_thread_stop(ctx);
7212
561fb04a
JA
7213 if (ctx->io_wq) {
7214 io_wq_destroy(ctx->io_wq);
7215 ctx->io_wq = NULL;
6b06314c
JA
7216 }
7217}
7218
7219#if defined(CONFIG_UNIX)
6b06314c
JA
7220/*
7221 * Ensure the UNIX gc is aware of our file set, so we are certain that
7222 * the io_uring can be safely unregistered on process exit, even if we have
7223 * loops in the file referencing.
7224 */
7225static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7226{
7227 struct sock *sk = ctx->ring_sock->sk;
7228 struct scm_fp_list *fpl;
7229 struct sk_buff *skb;
08a45173 7230 int i, nr_files;
6b06314c 7231
6b06314c
JA
7232 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7233 if (!fpl)
7234 return -ENOMEM;
7235
7236 skb = alloc_skb(0, GFP_KERNEL);
7237 if (!skb) {
7238 kfree(fpl);
7239 return -ENOMEM;
7240 }
7241
7242 skb->sk = sk;
6b06314c 7243
08a45173 7244 nr_files = 0;
6b06314c
JA
7245 fpl->user = get_uid(ctx->user);
7246 for (i = 0; i < nr; i++) {
65e19f54
JA
7247 struct file *file = io_file_from_index(ctx, i + offset);
7248
7249 if (!file)
08a45173 7250 continue;
65e19f54 7251 fpl->fp[nr_files] = get_file(file);
08a45173
JA
7252 unix_inflight(fpl->user, fpl->fp[nr_files]);
7253 nr_files++;
6b06314c
JA
7254 }
7255
08a45173
JA
7256 if (nr_files) {
7257 fpl->max = SCM_MAX_FD;
7258 fpl->count = nr_files;
7259 UNIXCB(skb).fp = fpl;
05f3fb3c 7260 skb->destructor = unix_destruct_scm;
08a45173
JA
7261 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7262 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 7263
08a45173
JA
7264 for (i = 0; i < nr_files; i++)
7265 fput(fpl->fp[i]);
7266 } else {
7267 kfree_skb(skb);
7268 kfree(fpl);
7269 }
6b06314c
JA
7270
7271 return 0;
7272}
7273
7274/*
7275 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7276 * causes regular reference counting to break down. We rely on the UNIX
7277 * garbage collection to take care of this problem for us.
7278 */
7279static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7280{
7281 unsigned left, total;
7282 int ret = 0;
7283
7284 total = 0;
7285 left = ctx->nr_user_files;
7286 while (left) {
7287 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
7288
7289 ret = __io_sqe_files_scm(ctx, this_files, total);
7290 if (ret)
7291 break;
7292 left -= this_files;
7293 total += this_files;
7294 }
7295
7296 if (!ret)
7297 return 0;
7298
7299 while (total < ctx->nr_user_files) {
65e19f54
JA
7300 struct file *file = io_file_from_index(ctx, total);
7301
7302 if (file)
7303 fput(file);
6b06314c
JA
7304 total++;
7305 }
7306
7307 return ret;
7308}
7309#else
7310static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7311{
7312 return 0;
7313}
7314#endif
7315
65e19f54
JA
7316static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
7317 unsigned nr_files)
7318{
7319 int i;
7320
7321 for (i = 0; i < nr_tables; i++) {
05f3fb3c 7322 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
7323 unsigned this_files;
7324
7325 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7326 table->files = kcalloc(this_files, sizeof(struct file *),
7327 GFP_KERNEL);
7328 if (!table->files)
7329 break;
7330 nr_files -= this_files;
7331 }
7332
7333 if (i == nr_tables)
7334 return 0;
7335
7336 for (i = 0; i < nr_tables; i++) {
05f3fb3c 7337 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
7338 kfree(table->files);
7339 }
7340 return 1;
7341}
7342
05f3fb3c
JA
7343static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
7344{
7345#if defined(CONFIG_UNIX)
7346 struct sock *sock = ctx->ring_sock->sk;
7347 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7348 struct sk_buff *skb;
7349 int i;
7350
7351 __skb_queue_head_init(&list);
7352
7353 /*
7354 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7355 * remove this entry and rearrange the file array.
7356 */
7357 skb = skb_dequeue(head);
7358 while (skb) {
7359 struct scm_fp_list *fp;
7360
7361 fp = UNIXCB(skb).fp;
7362 for (i = 0; i < fp->count; i++) {
7363 int left;
7364
7365 if (fp->fp[i] != file)
7366 continue;
7367
7368 unix_notinflight(fp->user, fp->fp[i]);
7369 left = fp->count - 1 - i;
7370 if (left) {
7371 memmove(&fp->fp[i], &fp->fp[i + 1],
7372 left * sizeof(struct file *));
7373 }
7374 fp->count--;
7375 if (!fp->count) {
7376 kfree_skb(skb);
7377 skb = NULL;
7378 } else {
7379 __skb_queue_tail(&list, skb);
7380 }
7381 fput(file);
7382 file = NULL;
7383 break;
7384 }
7385
7386 if (!file)
7387 break;
7388
7389 __skb_queue_tail(&list, skb);
7390
7391 skb = skb_dequeue(head);
7392 }
7393
7394 if (skb_peek(&list)) {
7395 spin_lock_irq(&head->lock);
7396 while ((skb = __skb_dequeue(&list)) != NULL)
7397 __skb_queue_tail(head, skb);
7398 spin_unlock_irq(&head->lock);
7399 }
7400#else
7401 fput(file);
7402#endif
7403}
7404
7405struct io_file_put {
05589553 7406 struct list_head list;
05f3fb3c 7407 struct file *file;
05f3fb3c
JA
7408};
7409
4a38aed2 7410static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
65e19f54 7411{
4a38aed2
JA
7412 struct fixed_file_data *file_data = ref_node->file_data;
7413 struct io_ring_ctx *ctx = file_data->ctx;
05f3fb3c 7414 struct io_file_put *pfile, *tmp;
05589553
XW
7415
7416 list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
6a4d07cd 7417 list_del(&pfile->list);
05589553
XW
7418 io_ring_file_put(ctx, pfile->file);
7419 kfree(pfile);
65e19f54 7420 }
05589553 7421
6a4d07cd
JA
7422 spin_lock(&file_data->lock);
7423 list_del(&ref_node->node);
7424 spin_unlock(&file_data->lock);
05589553
XW
7425
7426 percpu_ref_exit(&ref_node->refs);
7427 kfree(ref_node);
7428 percpu_ref_put(&file_data->refs);
2faf852d 7429}
65e19f54 7430
4a38aed2
JA
7431static void io_file_put_work(struct work_struct *work)
7432{
7433 struct io_ring_ctx *ctx;
7434 struct llist_node *node;
7435
7436 ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
7437 node = llist_del_all(&ctx->file_put_llist);
7438
7439 while (node) {
7440 struct fixed_file_ref_node *ref_node;
7441 struct llist_node *next = node->next;
7442
7443 ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
7444 __io_file_put_work(ref_node);
7445 node = next;
7446 }
7447}
7448
05589553 7449static void io_file_data_ref_zero(struct percpu_ref *ref)
2faf852d 7450{
05589553 7451 struct fixed_file_ref_node *ref_node;
4a38aed2
JA
7452 struct io_ring_ctx *ctx;
7453 bool first_add;
7454 int delay = HZ;
65e19f54 7455
05589553 7456 ref_node = container_of(ref, struct fixed_file_ref_node, refs);
4a38aed2 7457 ctx = ref_node->file_data->ctx;
05589553 7458
4a38aed2
JA
7459 if (percpu_ref_is_dying(&ctx->file_data->refs))
7460 delay = 0;
05589553 7461
4a38aed2
JA
7462 first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
7463 if (!delay)
7464 mod_delayed_work(system_wq, &ctx->file_put_work, 0);
7465 else if (first_add)
7466 queue_delayed_work(system_wq, &ctx->file_put_work, delay);
05f3fb3c 7467}
65e19f54 7468
05589553
XW
7469static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
7470 struct io_ring_ctx *ctx)
05f3fb3c 7471{
05589553 7472 struct fixed_file_ref_node *ref_node;
05f3fb3c 7473
05589553
XW
7474 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7475 if (!ref_node)
7476 return ERR_PTR(-ENOMEM);
05f3fb3c 7477
05589553
XW
7478 if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
7479 0, GFP_KERNEL)) {
7480 kfree(ref_node);
7481 return ERR_PTR(-ENOMEM);
7482 }
7483 INIT_LIST_HEAD(&ref_node->node);
7484 INIT_LIST_HEAD(&ref_node->file_list);
05589553
XW
7485 ref_node->file_data = ctx->file_data;
7486 return ref_node;
05589553
XW
7487}
7488
7489static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
7490{
7491 percpu_ref_exit(&ref_node->refs);
7492 kfree(ref_node);
65e19f54
JA
7493}
7494
6b06314c
JA
7495static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7496 unsigned nr_args)
7497{
7498 __s32 __user *fds = (__s32 __user *) arg;
65e19f54 7499 unsigned nr_tables;
05f3fb3c 7500 struct file *file;
6b06314c
JA
7501 int fd, ret = 0;
7502 unsigned i;
05589553 7503 struct fixed_file_ref_node *ref_node;
6b06314c 7504
05f3fb3c 7505 if (ctx->file_data)
6b06314c
JA
7506 return -EBUSY;
7507 if (!nr_args)
7508 return -EINVAL;
7509 if (nr_args > IORING_MAX_FIXED_FILES)
7510 return -EMFILE;
7511
05f3fb3c
JA
7512 ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
7513 if (!ctx->file_data)
7514 return -ENOMEM;
7515 ctx->file_data->ctx = ctx;
7516 init_completion(&ctx->file_data->done);
05589553 7517 INIT_LIST_HEAD(&ctx->file_data->ref_list);
f7fe9346 7518 spin_lock_init(&ctx->file_data->lock);
05f3fb3c 7519
65e19f54 7520 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
05f3fb3c
JA
7521 ctx->file_data->table = kcalloc(nr_tables,
7522 sizeof(struct fixed_file_table),
65e19f54 7523 GFP_KERNEL);
05f3fb3c
JA
7524 if (!ctx->file_data->table) {
7525 kfree(ctx->file_data);
7526 ctx->file_data = NULL;
6b06314c 7527 return -ENOMEM;
05f3fb3c
JA
7528 }
7529
05589553 7530 if (percpu_ref_init(&ctx->file_data->refs, io_file_ref_kill,
05f3fb3c
JA
7531 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
7532 kfree(ctx->file_data->table);
7533 kfree(ctx->file_data);
7534 ctx->file_data = NULL;
6b06314c 7535 return -ENOMEM;
05f3fb3c 7536 }
6b06314c 7537
65e19f54 7538 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
05f3fb3c
JA
7539 percpu_ref_exit(&ctx->file_data->refs);
7540 kfree(ctx->file_data->table);
7541 kfree(ctx->file_data);
7542 ctx->file_data = NULL;
65e19f54
JA
7543 return -ENOMEM;
7544 }
7545
08a45173 7546 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
65e19f54
JA
7547 struct fixed_file_table *table;
7548 unsigned index;
7549
6b06314c
JA
7550 ret = -EFAULT;
7551 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
7552 break;
08a45173
JA
7553 /* allow sparse sets */
7554 if (fd == -1) {
7555 ret = 0;
7556 continue;
7557 }
6b06314c 7558
05f3fb3c 7559 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54 7560 index = i & IORING_FILE_TABLE_MASK;
05f3fb3c 7561 file = fget(fd);
6b06314c
JA
7562
7563 ret = -EBADF;
05f3fb3c 7564 if (!file)
6b06314c 7565 break;
05f3fb3c 7566
6b06314c
JA
7567 /*
7568 * Don't allow io_uring instances to be registered. If UNIX
7569 * isn't enabled, then this causes a reference cycle and this
7570 * instance can never get freed. If UNIX is enabled we'll
7571 * handle it just fine, but there's still no point in allowing
7572 * a ring fd as it doesn't support regular read/write anyway.
7573 */
05f3fb3c
JA
7574 if (file->f_op == &io_uring_fops) {
7575 fput(file);
6b06314c
JA
7576 break;
7577 }
6b06314c 7578 ret = 0;
05f3fb3c 7579 table->files[index] = file;
6b06314c
JA
7580 }
7581
7582 if (ret) {
65e19f54 7583 for (i = 0; i < ctx->nr_user_files; i++) {
65e19f54
JA
7584 file = io_file_from_index(ctx, i);
7585 if (file)
7586 fput(file);
7587 }
7588 for (i = 0; i < nr_tables; i++)
05f3fb3c 7589 kfree(ctx->file_data->table[i].files);
6b06314c 7590
667e57da 7591 percpu_ref_exit(&ctx->file_data->refs);
05f3fb3c
JA
7592 kfree(ctx->file_data->table);
7593 kfree(ctx->file_data);
7594 ctx->file_data = NULL;
6b06314c
JA
7595 ctx->nr_user_files = 0;
7596 return ret;
7597 }
7598
7599 ret = io_sqe_files_scm(ctx);
05589553 7600 if (ret) {
6b06314c 7601 io_sqe_files_unregister(ctx);
05589553
XW
7602 return ret;
7603 }
6b06314c 7604
05589553
XW
7605 ref_node = alloc_fixed_file_ref_node(ctx);
7606 if (IS_ERR(ref_node)) {
7607 io_sqe_files_unregister(ctx);
7608 return PTR_ERR(ref_node);
7609 }
7610
7611 ctx->file_data->cur_refs = &ref_node->refs;
6a4d07cd 7612 spin_lock(&ctx->file_data->lock);
05589553 7613 list_add(&ref_node->node, &ctx->file_data->ref_list);
6a4d07cd 7614 spin_unlock(&ctx->file_data->lock);
05589553 7615 percpu_ref_get(&ctx->file_data->refs);
6b06314c
JA
7616 return ret;
7617}
7618
c3a31e60
JA
7619static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7620 int index)
7621{
7622#if defined(CONFIG_UNIX)
7623 struct sock *sock = ctx->ring_sock->sk;
7624 struct sk_buff_head *head = &sock->sk_receive_queue;
7625 struct sk_buff *skb;
7626
7627 /*
7628 * See if we can merge this file into an existing skb SCM_RIGHTS
7629 * file set. If there's no room, fall back to allocating a new skb
7630 * and filling it in.
7631 */
7632 spin_lock_irq(&head->lock);
7633 skb = skb_peek(head);
7634 if (skb) {
7635 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7636
7637 if (fpl->count < SCM_MAX_FD) {
7638 __skb_unlink(skb, head);
7639 spin_unlock_irq(&head->lock);
7640 fpl->fp[fpl->count] = get_file(file);
7641 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7642 fpl->count++;
7643 spin_lock_irq(&head->lock);
7644 __skb_queue_head(head, skb);
7645 } else {
7646 skb = NULL;
7647 }
7648 }
7649 spin_unlock_irq(&head->lock);
7650
7651 if (skb) {
7652 fput(file);
7653 return 0;
7654 }
7655
7656 return __io_sqe_files_scm(ctx, 1, index);
7657#else
7658 return 0;
7659#endif
7660}
7661
a5318d3c 7662static int io_queue_file_removal(struct fixed_file_data *data,
05589553 7663 struct file *file)
05f3fb3c 7664{
a5318d3c 7665 struct io_file_put *pfile;
05589553
XW
7666 struct percpu_ref *refs = data->cur_refs;
7667 struct fixed_file_ref_node *ref_node;
05f3fb3c 7668
05f3fb3c 7669 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
a5318d3c
HD
7670 if (!pfile)
7671 return -ENOMEM;
05f3fb3c 7672
05589553 7673 ref_node = container_of(refs, struct fixed_file_ref_node, refs);
05f3fb3c 7674 pfile->file = file;
05589553
XW
7675 list_add(&pfile->list, &ref_node->file_list);
7676
a5318d3c 7677 return 0;
05f3fb3c
JA
7678}
7679
7680static int __io_sqe_files_update(struct io_ring_ctx *ctx,
7681 struct io_uring_files_update *up,
7682 unsigned nr_args)
7683{
7684 struct fixed_file_data *data = ctx->file_data;
05589553 7685 struct fixed_file_ref_node *ref_node;
05f3fb3c 7686 struct file *file;
c3a31e60
JA
7687 __s32 __user *fds;
7688 int fd, i, err;
7689 __u32 done;
05589553 7690 bool needs_switch = false;
c3a31e60 7691
05f3fb3c 7692 if (check_add_overflow(up->offset, nr_args, &done))
c3a31e60
JA
7693 return -EOVERFLOW;
7694 if (done > ctx->nr_user_files)
7695 return -EINVAL;
7696
05589553
XW
7697 ref_node = alloc_fixed_file_ref_node(ctx);
7698 if (IS_ERR(ref_node))
7699 return PTR_ERR(ref_node);
7700
c3a31e60 7701 done = 0;
05f3fb3c 7702 fds = u64_to_user_ptr(up->fds);
c3a31e60 7703 while (nr_args) {
65e19f54
JA
7704 struct fixed_file_table *table;
7705 unsigned index;
7706
c3a31e60
JA
7707 err = 0;
7708 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7709 err = -EFAULT;
7710 break;
7711 }
05f3fb3c
JA
7712 i = array_index_nospec(up->offset, ctx->nr_user_files);
7713 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54
JA
7714 index = i & IORING_FILE_TABLE_MASK;
7715 if (table->files[index]) {
98dfd502 7716 file = table->files[index];
a5318d3c
HD
7717 err = io_queue_file_removal(data, file);
7718 if (err)
7719 break;
65e19f54 7720 table->files[index] = NULL;
05589553 7721 needs_switch = true;
c3a31e60
JA
7722 }
7723 if (fd != -1) {
c3a31e60
JA
7724 file = fget(fd);
7725 if (!file) {
7726 err = -EBADF;
7727 break;
7728 }
7729 /*
7730 * Don't allow io_uring instances to be registered. If
7731 * UNIX isn't enabled, then this causes a reference
7732 * cycle and this instance can never get freed. If UNIX
7733 * is enabled we'll handle it just fine, but there's
7734 * still no point in allowing a ring fd as it doesn't
7735 * support regular read/write anyway.
7736 */
7737 if (file->f_op == &io_uring_fops) {
7738 fput(file);
7739 err = -EBADF;
7740 break;
7741 }
65e19f54 7742 table->files[index] = file;
c3a31e60 7743 err = io_sqe_file_register(ctx, file, i);
f3bd9dae 7744 if (err) {
95d1c8e5 7745 table->files[index] = NULL;
f3bd9dae 7746 fput(file);
c3a31e60 7747 break;
f3bd9dae 7748 }
c3a31e60
JA
7749 }
7750 nr_args--;
7751 done++;
05f3fb3c
JA
7752 up->offset++;
7753 }
7754
05589553
XW
7755 if (needs_switch) {
7756 percpu_ref_kill(data->cur_refs);
6a4d07cd 7757 spin_lock(&data->lock);
05589553
XW
7758 list_add(&ref_node->node, &data->ref_list);
7759 data->cur_refs = &ref_node->refs;
6a4d07cd 7760 spin_unlock(&data->lock);
05589553
XW
7761 percpu_ref_get(&ctx->file_data->refs);
7762 } else
7763 destroy_fixed_file_ref_node(ref_node);
c3a31e60
JA
7764
7765 return done ? done : err;
7766}
05589553 7767
05f3fb3c
JA
7768static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7769 unsigned nr_args)
7770{
7771 struct io_uring_files_update up;
7772
7773 if (!ctx->file_data)
7774 return -ENXIO;
7775 if (!nr_args)
7776 return -EINVAL;
7777 if (copy_from_user(&up, arg, sizeof(up)))
7778 return -EFAULT;
7779 if (up.resv)
7780 return -EINVAL;
7781
7782 return __io_sqe_files_update(ctx, &up, nr_args);
7783}
c3a31e60 7784
e9fd9396 7785static void io_free_work(struct io_wq_work *work)
7d723065
JA
7786{
7787 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7788
e9fd9396 7789 /* Consider that io_steal_work() relies on this ref */
7d723065
JA
7790 io_put_req(req);
7791}
7792
24369c2e
PB
7793static int io_init_wq_offload(struct io_ring_ctx *ctx,
7794 struct io_uring_params *p)
7795{
7796 struct io_wq_data data;
7797 struct fd f;
7798 struct io_ring_ctx *ctx_attach;
7799 unsigned int concurrency;
7800 int ret = 0;
7801
7802 data.user = ctx->user;
e9fd9396 7803 data.free_work = io_free_work;
f5fa38c5 7804 data.do_work = io_wq_submit_work;
24369c2e
PB
7805
7806 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
7807 /* Do QD, or 4 * CPUS, whatever is smallest */
7808 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
7809
7810 ctx->io_wq = io_wq_create(concurrency, &data);
7811 if (IS_ERR(ctx->io_wq)) {
7812 ret = PTR_ERR(ctx->io_wq);
7813 ctx->io_wq = NULL;
7814 }
7815 return ret;
7816 }
7817
7818 f = fdget(p->wq_fd);
7819 if (!f.file)
7820 return -EBADF;
7821
7822 if (f.file->f_op != &io_uring_fops) {
7823 ret = -EINVAL;
7824 goto out_fput;
7825 }
7826
7827 ctx_attach = f.file->private_data;
7828 /* @io_wq is protected by holding the fd */
7829 if (!io_wq_get(ctx_attach->io_wq, &data)) {
7830 ret = -EINVAL;
7831 goto out_fput;
7832 }
7833
7834 ctx->io_wq = ctx_attach->io_wq;
7835out_fput:
7836 fdput(f);
7837 return ret;
7838}
7839
0f212204
JA
7840static int io_uring_alloc_task_context(struct task_struct *task)
7841{
7842 struct io_uring_task *tctx;
7843
7844 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
7845 if (unlikely(!tctx))
7846 return -ENOMEM;
7847
7848 xa_init(&tctx->xa);
7849 init_waitqueue_head(&tctx->wait);
7850 tctx->last = NULL;
7851 tctx->in_idle = 0;
7852 atomic_long_set(&tctx->req_issue, 0);
7853 atomic_long_set(&tctx->req_complete, 0);
7854 task->io_uring = tctx;
7855 return 0;
7856}
7857
7858void __io_uring_free(struct task_struct *tsk)
7859{
7860 struct io_uring_task *tctx = tsk->io_uring;
7861
7862 WARN_ON_ONCE(!xa_empty(&tctx->xa));
7863 xa_destroy(&tctx->xa);
7864 kfree(tctx);
7865 tsk->io_uring = NULL;
7866}
7867
7e84e1c7
SG
7868static int io_sq_offload_create(struct io_ring_ctx *ctx,
7869 struct io_uring_params *p)
2b188cc1
JA
7870{
7871 int ret;
7872
6c271ce2 7873 if (ctx->flags & IORING_SETUP_SQPOLL) {
534ca6d6
JA
7874 struct io_sq_data *sqd;
7875
3ec482d1
JA
7876 ret = -EPERM;
7877 if (!capable(CAP_SYS_ADMIN))
7878 goto err;
7879
534ca6d6
JA
7880 sqd = io_get_sq_data(p);
7881 if (IS_ERR(sqd)) {
7882 ret = PTR_ERR(sqd);
7883 goto err;
7884 }
69fb2131 7885
534ca6d6 7886 ctx->sq_data = sqd;
69fb2131
JA
7887 io_sq_thread_park(sqd);
7888 mutex_lock(&sqd->ctx_lock);
7889 list_add(&ctx->sqd_list, &sqd->ctx_new_list);
7890 mutex_unlock(&sqd->ctx_lock);
7891 io_sq_thread_unpark(sqd);
534ca6d6 7892
917257da
JA
7893 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
7894 if (!ctx->sq_thread_idle)
7895 ctx->sq_thread_idle = HZ;
7896
aa06165d
JA
7897 if (sqd->thread)
7898 goto done;
7899
6c271ce2 7900 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 7901 int cpu = p->sq_thread_cpu;
6c271ce2 7902
917257da 7903 ret = -EINVAL;
44a9bd18
JA
7904 if (cpu >= nr_cpu_ids)
7905 goto err;
7889f44d 7906 if (!cpu_online(cpu))
917257da
JA
7907 goto err;
7908
69fb2131 7909 sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd,
534ca6d6 7910 cpu, "io_uring-sq");
6c271ce2 7911 } else {
69fb2131 7912 sqd->thread = kthread_create(io_sq_thread, sqd,
6c271ce2
JA
7913 "io_uring-sq");
7914 }
534ca6d6
JA
7915 if (IS_ERR(sqd->thread)) {
7916 ret = PTR_ERR(sqd->thread);
7917 sqd->thread = NULL;
6c271ce2
JA
7918 goto err;
7919 }
534ca6d6 7920 ret = io_uring_alloc_task_context(sqd->thread);
0f212204
JA
7921 if (ret)
7922 goto err;
6c271ce2
JA
7923 } else if (p->flags & IORING_SETUP_SQ_AFF) {
7924 /* Can't have SQ_AFF without SQPOLL */
7925 ret = -EINVAL;
7926 goto err;
7927 }
7928
aa06165d 7929done:
24369c2e
PB
7930 ret = io_init_wq_offload(ctx, p);
7931 if (ret)
2b188cc1 7932 goto err;
2b188cc1
JA
7933
7934 return 0;
7935err:
54a91f3b 7936 io_finish_async(ctx);
2b188cc1
JA
7937 return ret;
7938}
7939
7e84e1c7
SG
7940static void io_sq_offload_start(struct io_ring_ctx *ctx)
7941{
534ca6d6
JA
7942 struct io_sq_data *sqd = ctx->sq_data;
7943
7944 if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread)
7945 wake_up_process(sqd->thread);
7e84e1c7
SG
7946}
7947
a087e2b5
BM
7948static inline void __io_unaccount_mem(struct user_struct *user,
7949 unsigned long nr_pages)
2b188cc1
JA
7950{
7951 atomic_long_sub(nr_pages, &user->locked_vm);
7952}
7953
a087e2b5
BM
7954static inline int __io_account_mem(struct user_struct *user,
7955 unsigned long nr_pages)
2b188cc1
JA
7956{
7957 unsigned long page_limit, cur_pages, new_pages;
7958
7959 /* Don't allow more pages than we can safely lock */
7960 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
7961
7962 do {
7963 cur_pages = atomic_long_read(&user->locked_vm);
7964 new_pages = cur_pages + nr_pages;
7965 if (new_pages > page_limit)
7966 return -ENOMEM;
7967 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
7968 new_pages) != cur_pages);
7969
7970 return 0;
7971}
7972
2e0464d4
BM
7973static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
7974 enum io_mem_account acct)
a087e2b5 7975{
aad5d8da 7976 if (ctx->limit_mem)
a087e2b5 7977 __io_unaccount_mem(ctx->user, nr_pages);
30975825 7978
2aede0e4 7979 if (ctx->mm_account) {
2e0464d4 7980 if (acct == ACCT_LOCKED)
2aede0e4 7981 ctx->mm_account->locked_vm -= nr_pages;
2e0464d4 7982 else if (acct == ACCT_PINNED)
2aede0e4 7983 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
2e0464d4 7984 }
a087e2b5
BM
7985}
7986
2e0464d4
BM
7987static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
7988 enum io_mem_account acct)
a087e2b5 7989{
30975825
BM
7990 int ret;
7991
7992 if (ctx->limit_mem) {
7993 ret = __io_account_mem(ctx->user, nr_pages);
7994 if (ret)
7995 return ret;
7996 }
7997
2aede0e4 7998 if (ctx->mm_account) {
2e0464d4 7999 if (acct == ACCT_LOCKED)
2aede0e4 8000 ctx->mm_account->locked_vm += nr_pages;
2e0464d4 8001 else if (acct == ACCT_PINNED)
2aede0e4 8002 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
2e0464d4 8003 }
a087e2b5
BM
8004
8005 return 0;
8006}
8007
2b188cc1
JA
8008static void io_mem_free(void *ptr)
8009{
52e04ef4
MR
8010 struct page *page;
8011
8012 if (!ptr)
8013 return;
2b188cc1 8014
52e04ef4 8015 page = virt_to_head_page(ptr);
2b188cc1
JA
8016 if (put_page_testzero(page))
8017 free_compound_page(page);
8018}
8019
8020static void *io_mem_alloc(size_t size)
8021{
8022 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
8023 __GFP_NORETRY;
8024
8025 return (void *) __get_free_pages(gfp_flags, get_order(size));
8026}
8027
75b28aff
HV
8028static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8029 size_t *sq_offset)
8030{
8031 struct io_rings *rings;
8032 size_t off, sq_array_size;
8033
8034 off = struct_size(rings, cqes, cq_entries);
8035 if (off == SIZE_MAX)
8036 return SIZE_MAX;
8037
8038#ifdef CONFIG_SMP
8039 off = ALIGN(off, SMP_CACHE_BYTES);
8040 if (off == 0)
8041 return SIZE_MAX;
8042#endif
8043
b36200f5
DV
8044 if (sq_offset)
8045 *sq_offset = off;
8046
75b28aff
HV
8047 sq_array_size = array_size(sizeof(u32), sq_entries);
8048 if (sq_array_size == SIZE_MAX)
8049 return SIZE_MAX;
8050
8051 if (check_add_overflow(off, sq_array_size, &off))
8052 return SIZE_MAX;
8053
75b28aff
HV
8054 return off;
8055}
8056
2b188cc1
JA
8057static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
8058{
75b28aff 8059 size_t pages;
2b188cc1 8060
75b28aff
HV
8061 pages = (size_t)1 << get_order(
8062 rings_size(sq_entries, cq_entries, NULL));
8063 pages += (size_t)1 << get_order(
8064 array_size(sizeof(struct io_uring_sqe), sq_entries));
2b188cc1 8065
75b28aff 8066 return pages;
2b188cc1
JA
8067}
8068
edafccee
JA
8069static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
8070{
8071 int i, j;
8072
8073 if (!ctx->user_bufs)
8074 return -ENXIO;
8075
8076 for (i = 0; i < ctx->nr_user_bufs; i++) {
8077 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8078
8079 for (j = 0; j < imu->nr_bvecs; j++)
f1f6a7dd 8080 unpin_user_page(imu->bvec[j].bv_page);
edafccee 8081
de293938
JA
8082 if (imu->acct_pages)
8083 io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED);
d4ef6475 8084 kvfree(imu->bvec);
edafccee
JA
8085 imu->nr_bvecs = 0;
8086 }
8087
8088 kfree(ctx->user_bufs);
8089 ctx->user_bufs = NULL;
8090 ctx->nr_user_bufs = 0;
8091 return 0;
8092}
8093
8094static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8095 void __user *arg, unsigned index)
8096{
8097 struct iovec __user *src;
8098
8099#ifdef CONFIG_COMPAT
8100 if (ctx->compat) {
8101 struct compat_iovec __user *ciovs;
8102 struct compat_iovec ciov;
8103
8104 ciovs = (struct compat_iovec __user *) arg;
8105 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8106 return -EFAULT;
8107
d55e5f5b 8108 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
8109 dst->iov_len = ciov.iov_len;
8110 return 0;
8111 }
8112#endif
8113 src = (struct iovec __user *) arg;
8114 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8115 return -EFAULT;
8116 return 0;
8117}
8118
de293938
JA
8119/*
8120 * Not super efficient, but this is just a registration time. And we do cache
8121 * the last compound head, so generally we'll only do a full search if we don't
8122 * match that one.
8123 *
8124 * We check if the given compound head page has already been accounted, to
8125 * avoid double accounting it. This allows us to account the full size of the
8126 * page, not just the constituent pages of a huge page.
8127 */
8128static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8129 int nr_pages, struct page *hpage)
8130{
8131 int i, j;
8132
8133 /* check current page array */
8134 for (i = 0; i < nr_pages; i++) {
8135 if (!PageCompound(pages[i]))
8136 continue;
8137 if (compound_head(pages[i]) == hpage)
8138 return true;
8139 }
8140
8141 /* check previously registered pages */
8142 for (i = 0; i < ctx->nr_user_bufs; i++) {
8143 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8144
8145 for (j = 0; j < imu->nr_bvecs; j++) {
8146 if (!PageCompound(imu->bvec[j].bv_page))
8147 continue;
8148 if (compound_head(imu->bvec[j].bv_page) == hpage)
8149 return true;
8150 }
8151 }
8152
8153 return false;
8154}
8155
8156static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8157 int nr_pages, struct io_mapped_ubuf *imu,
8158 struct page **last_hpage)
8159{
8160 int i, ret;
8161
8162 for (i = 0; i < nr_pages; i++) {
8163 if (!PageCompound(pages[i])) {
8164 imu->acct_pages++;
8165 } else {
8166 struct page *hpage;
8167
8168 hpage = compound_head(pages[i]);
8169 if (hpage == *last_hpage)
8170 continue;
8171 *last_hpage = hpage;
8172 if (headpage_already_acct(ctx, pages, i, hpage))
8173 continue;
8174 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8175 }
8176 }
8177
8178 if (!imu->acct_pages)
8179 return 0;
8180
8181 ret = io_account_mem(ctx, imu->acct_pages, ACCT_PINNED);
8182 if (ret)
8183 imu->acct_pages = 0;
8184 return ret;
8185}
8186
edafccee
JA
8187static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
8188 unsigned nr_args)
8189{
8190 struct vm_area_struct **vmas = NULL;
8191 struct page **pages = NULL;
de293938 8192 struct page *last_hpage = NULL;
edafccee
JA
8193 int i, j, got_pages = 0;
8194 int ret = -EINVAL;
8195
8196 if (ctx->user_bufs)
8197 return -EBUSY;
8198 if (!nr_args || nr_args > UIO_MAXIOV)
8199 return -EINVAL;
8200
8201 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8202 GFP_KERNEL);
8203 if (!ctx->user_bufs)
8204 return -ENOMEM;
8205
8206 for (i = 0; i < nr_args; i++) {
8207 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8208 unsigned long off, start, end, ubuf;
8209 int pret, nr_pages;
8210 struct iovec iov;
8211 size_t size;
8212
8213 ret = io_copy_iov(ctx, &iov, arg, i);
8214 if (ret)
a278682d 8215 goto err;
edafccee
JA
8216
8217 /*
8218 * Don't impose further limits on the size and buffer
8219 * constraints here, we'll -EINVAL later when IO is
8220 * submitted if they are wrong.
8221 */
8222 ret = -EFAULT;
8223 if (!iov.iov_base || !iov.iov_len)
8224 goto err;
8225
8226 /* arbitrary limit, but we need something */
8227 if (iov.iov_len > SZ_1G)
8228 goto err;
8229
8230 ubuf = (unsigned long) iov.iov_base;
8231 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8232 start = ubuf >> PAGE_SHIFT;
8233 nr_pages = end - start;
8234
edafccee
JA
8235 ret = 0;
8236 if (!pages || nr_pages > got_pages) {
a8c73c1a
DE
8237 kvfree(vmas);
8238 kvfree(pages);
d4ef6475 8239 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
edafccee 8240 GFP_KERNEL);
d4ef6475 8241 vmas = kvmalloc_array(nr_pages,
edafccee
JA
8242 sizeof(struct vm_area_struct *),
8243 GFP_KERNEL);
8244 if (!pages || !vmas) {
8245 ret = -ENOMEM;
edafccee
JA
8246 goto err;
8247 }
8248 got_pages = nr_pages;
8249 }
8250
d4ef6475 8251 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
edafccee
JA
8252 GFP_KERNEL);
8253 ret = -ENOMEM;
de293938 8254 if (!imu->bvec)
edafccee 8255 goto err;
edafccee
JA
8256
8257 ret = 0;
d8ed45c5 8258 mmap_read_lock(current->mm);
2113b05d 8259 pret = pin_user_pages(ubuf, nr_pages,
932f4a63
IW
8260 FOLL_WRITE | FOLL_LONGTERM,
8261 pages, vmas);
edafccee
JA
8262 if (pret == nr_pages) {
8263 /* don't support file backed memory */
8264 for (j = 0; j < nr_pages; j++) {
8265 struct vm_area_struct *vma = vmas[j];
8266
8267 if (vma->vm_file &&
8268 !is_file_hugepages(vma->vm_file)) {
8269 ret = -EOPNOTSUPP;
8270 break;
8271 }
8272 }
8273 } else {
8274 ret = pret < 0 ? pret : -EFAULT;
8275 }
d8ed45c5 8276 mmap_read_unlock(current->mm);
edafccee
JA
8277 if (ret) {
8278 /*
8279 * if we did partial map, or found file backed vmas,
8280 * release any pages we did get
8281 */
27c4d3a3 8282 if (pret > 0)
f1f6a7dd 8283 unpin_user_pages(pages, pret);
de293938
JA
8284 kvfree(imu->bvec);
8285 goto err;
8286 }
8287
8288 ret = io_buffer_account_pin(ctx, pages, pret, imu, &last_hpage);
8289 if (ret) {
8290 unpin_user_pages(pages, pret);
d4ef6475 8291 kvfree(imu->bvec);
edafccee
JA
8292 goto err;
8293 }
8294
8295 off = ubuf & ~PAGE_MASK;
8296 size = iov.iov_len;
8297 for (j = 0; j < nr_pages; j++) {
8298 size_t vec_len;
8299
8300 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8301 imu->bvec[j].bv_page = pages[j];
8302 imu->bvec[j].bv_len = vec_len;
8303 imu->bvec[j].bv_offset = off;
8304 off = 0;
8305 size -= vec_len;
8306 }
8307 /* store original address for later verification */
8308 imu->ubuf = ubuf;
8309 imu->len = iov.iov_len;
8310 imu->nr_bvecs = nr_pages;
8311
8312 ctx->nr_user_bufs++;
8313 }
d4ef6475
MR
8314 kvfree(pages);
8315 kvfree(vmas);
edafccee
JA
8316 return 0;
8317err:
d4ef6475
MR
8318 kvfree(pages);
8319 kvfree(vmas);
edafccee
JA
8320 io_sqe_buffer_unregister(ctx);
8321 return ret;
8322}
8323
9b402849
JA
8324static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8325{
8326 __s32 __user *fds = arg;
8327 int fd;
8328
8329 if (ctx->cq_ev_fd)
8330 return -EBUSY;
8331
8332 if (copy_from_user(&fd, fds, sizeof(*fds)))
8333 return -EFAULT;
8334
8335 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8336 if (IS_ERR(ctx->cq_ev_fd)) {
8337 int ret = PTR_ERR(ctx->cq_ev_fd);
8338 ctx->cq_ev_fd = NULL;
8339 return ret;
8340 }
8341
8342 return 0;
8343}
8344
8345static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8346{
8347 if (ctx->cq_ev_fd) {
8348 eventfd_ctx_put(ctx->cq_ev_fd);
8349 ctx->cq_ev_fd = NULL;
8350 return 0;
8351 }
8352
8353 return -ENXIO;
8354}
8355
5a2e745d
JA
8356static int __io_destroy_buffers(int id, void *p, void *data)
8357{
8358 struct io_ring_ctx *ctx = data;
8359 struct io_buffer *buf = p;
8360
067524e9 8361 __io_remove_buffers(ctx, buf, id, -1U);
5a2e745d
JA
8362 return 0;
8363}
8364
8365static void io_destroy_buffers(struct io_ring_ctx *ctx)
8366{
8367 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
8368 idr_destroy(&ctx->io_buffer_idr);
8369}
8370
2b188cc1
JA
8371static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8372{
6b06314c 8373 io_finish_async(ctx);
5dbcad51 8374 io_sqe_buffer_unregister(ctx);
2aede0e4
JA
8375
8376 if (ctx->sqo_task) {
8377 put_task_struct(ctx->sqo_task);
8378 ctx->sqo_task = NULL;
8379 mmdrop(ctx->mm_account);
8380 ctx->mm_account = NULL;
30975825 8381 }
def596e9 8382
91d8f519
DZ
8383#ifdef CONFIG_BLK_CGROUP
8384 if (ctx->sqo_blkcg_css)
8385 css_put(ctx->sqo_blkcg_css);
8386#endif
8387
6b06314c 8388 io_sqe_files_unregister(ctx);
9b402849 8389 io_eventfd_unregister(ctx);
5a2e745d 8390 io_destroy_buffers(ctx);
41726c9a 8391 idr_destroy(&ctx->personality_idr);
def596e9 8392
2b188cc1 8393#if defined(CONFIG_UNIX)
355e8d26
EB
8394 if (ctx->ring_sock) {
8395 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 8396 sock_release(ctx->ring_sock);
355e8d26 8397 }
2b188cc1
JA
8398#endif
8399
75b28aff 8400 io_mem_free(ctx->rings);
2b188cc1 8401 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
8402
8403 percpu_ref_exit(&ctx->refs);
2b188cc1 8404 free_uid(ctx->user);
181e448d 8405 put_cred(ctx->creds);
78076bb6 8406 kfree(ctx->cancel_hash);
0ddf92e8 8407 kmem_cache_free(req_cachep, ctx->fallback_req);
2b188cc1
JA
8408 kfree(ctx);
8409}
8410
8411static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8412{
8413 struct io_ring_ctx *ctx = file->private_data;
8414 __poll_t mask = 0;
8415
8416 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
8417 /*
8418 * synchronizes with barrier from wq_has_sleeper call in
8419 * io_commit_cqring
8420 */
2b188cc1 8421 smp_rmb();
90554200 8422 if (!io_sqring_full(ctx))
2b188cc1 8423 mask |= EPOLLOUT | EPOLLWRNORM;
63e5d81f 8424 if (io_cqring_events(ctx, false))
2b188cc1
JA
8425 mask |= EPOLLIN | EPOLLRDNORM;
8426
8427 return mask;
8428}
8429
8430static int io_uring_fasync(int fd, struct file *file, int on)
8431{
8432 struct io_ring_ctx *ctx = file->private_data;
8433
8434 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8435}
8436
071698e1
JA
8437static int io_remove_personalities(int id, void *p, void *data)
8438{
8439 struct io_ring_ctx *ctx = data;
8440 const struct cred *cred;
8441
8442 cred = idr_remove(&ctx->personality_idr, id);
8443 if (cred)
8444 put_cred(cred);
8445 return 0;
8446}
8447
85faa7b8
JA
8448static void io_ring_exit_work(struct work_struct *work)
8449{
b2edc0a7
PB
8450 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
8451 exit_work);
85faa7b8 8452
56952e91
JA
8453 /*
8454 * If we're doing polled IO and end up having requests being
8455 * submitted async (out-of-line), then completions can come in while
8456 * we're waiting for refs to drop. We need to reap these manually,
8457 * as nobody else will be looking for them.
8458 */
b2edc0a7 8459 do {
56952e91 8460 if (ctx->rings)
e6c8aa9a 8461 io_cqring_overflow_flush(ctx, true, NULL, NULL);
b2edc0a7
PB
8462 io_iopoll_try_reap_events(ctx);
8463 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
85faa7b8
JA
8464 io_ring_ctx_free(ctx);
8465}
8466
2b188cc1
JA
8467static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8468{
8469 mutex_lock(&ctx->uring_lock);
8470 percpu_ref_kill(&ctx->refs);
8471 mutex_unlock(&ctx->uring_lock);
8472
f3606e3a
JA
8473 io_kill_timeouts(ctx, NULL);
8474 io_poll_remove_all(ctx, NULL);
561fb04a
JA
8475
8476 if (ctx->io_wq)
8477 io_wq_cancel_all(ctx->io_wq);
8478
15dff286
JA
8479 /* if we failed setting up the ctx, we might not have any rings */
8480 if (ctx->rings)
e6c8aa9a 8481 io_cqring_overflow_flush(ctx, true, NULL, NULL);
b2edc0a7 8482 io_iopoll_try_reap_events(ctx);
071698e1 8483 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
309fc03a
JA
8484
8485 /*
8486 * Do this upfront, so we won't have a grace period where the ring
8487 * is closed but resources aren't reaped yet. This can cause
8488 * spurious failure in setting up a new ring.
8489 */
760618f7
JA
8490 io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries),
8491 ACCT_LOCKED);
309fc03a 8492
85faa7b8 8493 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
fc666777
JA
8494 /*
8495 * Use system_unbound_wq to avoid spawning tons of event kworkers
8496 * if we're exiting a ton of rings at the same time. It just adds
8497 * noise and overhead, there's no discernable change in runtime
8498 * over using system_wq.
8499 */
8500 queue_work(system_unbound_wq, &ctx->exit_work);
2b188cc1
JA
8501}
8502
8503static int io_uring_release(struct inode *inode, struct file *file)
8504{
8505 struct io_ring_ctx *ctx = file->private_data;
8506
8507 file->private_data = NULL;
8508 io_ring_ctx_wait_and_kill(ctx);
8509 return 0;
8510}
8511
67c4d9e6
PB
8512static bool io_wq_files_match(struct io_wq_work *work, void *data)
8513{
8514 struct files_struct *files = data;
8515
0f212204 8516 return !files || work->files == files;
67c4d9e6
PB
8517}
8518
f254ac04
JA
8519/*
8520 * Returns true if 'preq' is the link parent of 'req'
8521 */
8522static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
8523{
8524 struct io_kiocb *link;
8525
8526 if (!(preq->flags & REQ_F_LINK_HEAD))
8527 return false;
8528
8529 list_for_each_entry(link, &preq->link_list, link_list) {
8530 if (link == req)
8531 return true;
8532 }
8533
8534 return false;
8535}
8536
c127a2a1
PB
8537static bool io_match_link_files(struct io_kiocb *req,
8538 struct files_struct *files)
8539{
8540 struct io_kiocb *link;
8541
8542 if (io_match_files(req, files))
8543 return true;
8544 if (req->flags & REQ_F_LINK_HEAD) {
8545 list_for_each_entry(link, &req->link_list, link_list) {
8546 if (io_match_files(link, files))
8547 return true;
8548 }
8549 }
8550 return false;
8551}
8552
f254ac04
JA
8553/*
8554 * We're looking to cancel 'req' because it's holding on to our files, but
8555 * 'req' could be a link to another request. See if it is, and cancel that
8556 * parent request if so.
8557 */
8558static bool io_poll_remove_link(struct io_ring_ctx *ctx, struct io_kiocb *req)
8559{
8560 struct hlist_node *tmp;
8561 struct io_kiocb *preq;
8562 bool found = false;
8563 int i;
8564
8565 spin_lock_irq(&ctx->completion_lock);
8566 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
8567 struct hlist_head *list;
8568
8569 list = &ctx->cancel_hash[i];
8570 hlist_for_each_entry_safe(preq, tmp, list, hash_node) {
8571 found = io_match_link(preq, req);
8572 if (found) {
8573 io_poll_remove_one(preq);
8574 break;
8575 }
8576 }
8577 }
8578 spin_unlock_irq(&ctx->completion_lock);
8579 return found;
8580}
8581
8582static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
8583 struct io_kiocb *req)
8584{
8585 struct io_kiocb *preq;
8586 bool found = false;
8587
8588 spin_lock_irq(&ctx->completion_lock);
8589 list_for_each_entry(preq, &ctx->timeout_list, timeout.list) {
8590 found = io_match_link(preq, req);
8591 if (found) {
8592 __io_timeout_cancel(preq);
8593 break;
8594 }
8595 }
8596 spin_unlock_irq(&ctx->completion_lock);
8597 return found;
8598}
8599
b711d4ea
JA
8600static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
8601{
8602 return io_match_link(container_of(work, struct io_kiocb, work), data);
8603}
8604
8605static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
8606{
8607 enum io_wq_cancel cret;
8608
8609 /* cancel this particular work, if it's running */
8610 cret = io_wq_cancel_work(ctx->io_wq, &req->work);
8611 if (cret != IO_WQ_CANCEL_NOTFOUND)
8612 return;
8613
8614 /* find links that hold this pending, cancel those */
8615 cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true);
8616 if (cret != IO_WQ_CANCEL_NOTFOUND)
8617 return;
8618
8619 /* if we have a poll link holding this pending, cancel that */
8620 if (io_poll_remove_link(ctx, req))
8621 return;
8622
8623 /* final option, timeout link is holding this req pending */
8624 io_timeout_remove_link(ctx, req);
8625}
8626
b7ddce3c
PB
8627static void io_cancel_defer_files(struct io_ring_ctx *ctx,
8628 struct files_struct *files)
8629{
8630 struct io_defer_entry *de = NULL;
8631 LIST_HEAD(list);
8632
8633 spin_lock_irq(&ctx->completion_lock);
8634 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
c127a2a1 8635 if (io_match_link_files(de->req, files)) {
b7ddce3c
PB
8636 list_cut_position(&list, &ctx->defer_list, &de->list);
8637 break;
8638 }
8639 }
8640 spin_unlock_irq(&ctx->completion_lock);
8641
8642 while (!list_empty(&list)) {
8643 de = list_first_entry(&list, struct io_defer_entry, list);
8644 list_del_init(&de->list);
8645 req_set_fail_links(de->req);
8646 io_put_req(de->req);
8647 io_req_complete(de->req, -ECANCELED);
8648 kfree(de);
8649 }
8650}
8651
76e1b642
JA
8652/*
8653 * Returns true if we found and killed one or more files pinning requests
8654 */
8655static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
fcb323cc
JA
8656 struct files_struct *files)
8657{
67c4d9e6 8658 if (list_empty_careful(&ctx->inflight_list))
76e1b642 8659 return false;
67c4d9e6 8660
b7ddce3c 8661 io_cancel_defer_files(ctx, files);
67c4d9e6
PB
8662 /* cancel all at once, should be faster than doing it one by one*/
8663 io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
8664
fcb323cc 8665 while (!list_empty_careful(&ctx->inflight_list)) {
d8f1b971
XW
8666 struct io_kiocb *cancel_req = NULL, *req;
8667 DEFINE_WAIT(wait);
fcb323cc
JA
8668
8669 spin_lock_irq(&ctx->inflight_lock);
8670 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
0f212204 8671 if (files && req->work.files != files)
768134d4
JA
8672 continue;
8673 /* req is being completed, ignore */
8674 if (!refcount_inc_not_zero(&req->refs))
8675 continue;
8676 cancel_req = req;
8677 break;
fcb323cc 8678 }
768134d4 8679 if (cancel_req)
fcb323cc 8680 prepare_to_wait(&ctx->inflight_wait, &wait,
768134d4 8681 TASK_UNINTERRUPTIBLE);
fcb323cc
JA
8682 spin_unlock_irq(&ctx->inflight_lock);
8683
768134d4
JA
8684 /* We need to keep going until we don't find a matching req */
8685 if (!cancel_req)
fcb323cc 8686 break;
bb175342
PB
8687 /* cancel this request, or head link requests */
8688 io_attempt_cancel(ctx, cancel_req);
8689 io_put_req(cancel_req);
6200b0ae
JA
8690 /* cancellations _may_ trigger task work */
8691 io_run_task_work();
fcb323cc 8692 schedule();
d8f1b971 8693 finish_wait(&ctx->inflight_wait, &wait);
fcb323cc 8694 }
76e1b642
JA
8695
8696 return true;
fcb323cc
JA
8697}
8698
801dd57b 8699static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
44e728b8 8700{
801dd57b
PB
8701 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8702 struct task_struct *task = data;
44e728b8 8703
f3606e3a 8704 return io_task_match(req, task);
44e728b8
PB
8705}
8706
0f212204
JA
8707static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8708 struct task_struct *task,
8709 struct files_struct *files)
8710{
8711 bool ret;
8712
8713 ret = io_uring_cancel_files(ctx, files);
8714 if (!files) {
8715 enum io_wq_cancel cret;
8716
8717 cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, task, true);
8718 if (cret != IO_WQ_CANCEL_NOTFOUND)
8719 ret = true;
8720
8721 /* SQPOLL thread does its own polling */
8722 if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
8723 while (!list_empty_careful(&ctx->iopoll_list)) {
8724 io_iopoll_try_reap_events(ctx);
8725 ret = true;
8726 }
8727 }
8728
8729 ret |= io_poll_remove_all(ctx, task);
8730 ret |= io_kill_timeouts(ctx, task);
8731 }
8732
8733 return ret;
8734}
8735
8736/*
8737 * We need to iteratively cancel requests, in case a request has dependent
8738 * hard links. These persist even for failure of cancelations, hence keep
8739 * looping until none are found.
8740 */
8741static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8742 struct files_struct *files)
8743{
8744 struct task_struct *task = current;
8745
534ca6d6
JA
8746 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data)
8747 task = ctx->sq_data->thread;
0f212204
JA
8748
8749 io_cqring_overflow_flush(ctx, true, task, files);
8750
8751 while (__io_uring_cancel_task_requests(ctx, task, files)) {
8752 io_run_task_work();
8753 cond_resched();
8754 }
8755}
8756
8757/*
8758 * Note that this task has used io_uring. We use it for cancelation purposes.
8759 */
8760static int io_uring_add_task_file(struct file *file)
8761{
8762 if (unlikely(!current->io_uring)) {
8763 int ret;
8764
8765 ret = io_uring_alloc_task_context(current);
8766 if (unlikely(ret))
8767 return ret;
8768 }
8769 if (current->io_uring->last != file) {
8770 XA_STATE(xas, &current->io_uring->xa, (unsigned long) file);
8771 void *old;
8772
8773 rcu_read_lock();
8774 old = xas_load(&xas);
8775 if (old != file) {
8776 get_file(file);
8777 xas_lock(&xas);
8778 xas_store(&xas, file);
8779 xas_unlock(&xas);
8780 }
8781 rcu_read_unlock();
8782 current->io_uring->last = file;
8783 }
8784
8785 return 0;
8786}
8787
8788/*
8789 * Remove this io_uring_file -> task mapping.
8790 */
8791static void io_uring_del_task_file(struct file *file)
8792{
8793 struct io_uring_task *tctx = current->io_uring;
8794 XA_STATE(xas, &tctx->xa, (unsigned long) file);
8795
8796 if (tctx->last == file)
8797 tctx->last = NULL;
8798
8799 xas_lock(&xas);
8800 file = xas_store(&xas, NULL);
8801 xas_unlock(&xas);
8802
8803 if (file)
8804 fput(file);
8805}
8806
8807static void __io_uring_attempt_task_drop(struct file *file)
8808{
8809 XA_STATE(xas, &current->io_uring->xa, (unsigned long) file);
8810 struct file *old;
8811
8812 rcu_read_lock();
8813 old = xas_load(&xas);
8814 rcu_read_unlock();
8815
8816 if (old == file)
8817 io_uring_del_task_file(file);
8818}
8819
8820/*
8821 * Drop task note for this file if we're the only ones that hold it after
8822 * pending fput()
8823 */
8824static void io_uring_attempt_task_drop(struct file *file, bool exiting)
8825{
8826 if (!current->io_uring)
8827 return;
8828 /*
8829 * fput() is pending, will be 2 if the only other ref is our potential
8830 * task file note. If the task is exiting, drop regardless of count.
8831 */
8832 if (!exiting && atomic_long_read(&file->f_count) != 2)
8833 return;
8834
8835 __io_uring_attempt_task_drop(file);
8836}
8837
8838void __io_uring_files_cancel(struct files_struct *files)
8839{
8840 struct io_uring_task *tctx = current->io_uring;
8841 XA_STATE(xas, &tctx->xa, 0);
8842
8843 /* make sure overflow events are dropped */
8844 tctx->in_idle = true;
8845
8846 do {
8847 struct io_ring_ctx *ctx;
8848 struct file *file;
8849
8850 xas_lock(&xas);
8851 file = xas_next_entry(&xas, ULONG_MAX);
8852 xas_unlock(&xas);
8853
8854 if (!file)
8855 break;
8856
8857 ctx = file->private_data;
8858
8859 io_uring_cancel_task_requests(ctx, files);
8860 if (files)
8861 io_uring_del_task_file(file);
8862 } while (1);
8863}
8864
8865static inline bool io_uring_task_idle(struct io_uring_task *tctx)
8866{
8867 return atomic_long_read(&tctx->req_issue) ==
8868 atomic_long_read(&tctx->req_complete);
8869}
8870
8871/*
8872 * Find any io_uring fd that this task has registered or done IO on, and cancel
8873 * requests.
8874 */
8875void __io_uring_task_cancel(void)
8876{
8877 struct io_uring_task *tctx = current->io_uring;
8878 DEFINE_WAIT(wait);
8879 long completions;
8880
8881 /* make sure overflow events are dropped */
8882 tctx->in_idle = true;
8883
8884 while (!io_uring_task_idle(tctx)) {
8885 /* read completions before cancelations */
8886 completions = atomic_long_read(&tctx->req_complete);
8887 __io_uring_files_cancel(NULL);
8888
8889 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
8890
8891 /*
8892 * If we've seen completions, retry. This avoids a race where
8893 * a completion comes in before we did prepare_to_wait().
8894 */
8895 if (completions != atomic_long_read(&tctx->req_complete))
8896 continue;
8897 if (io_uring_task_idle(tctx))
8898 break;
8899 schedule();
8900 }
8901
8902 finish_wait(&tctx->wait, &wait);
8903 tctx->in_idle = false;
8904}
8905
fcb323cc
JA
8906static int io_uring_flush(struct file *file, void *data)
8907{
8908 struct io_ring_ctx *ctx = file->private_data;
8909
6ab23144
JA
8910 /*
8911 * If the task is going away, cancel work it may have pending
8912 */
801dd57b 8913 if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
0f212204 8914 data = NULL;
6ab23144 8915
0f212204
JA
8916 io_uring_cancel_task_requests(ctx, data);
8917 io_uring_attempt_task_drop(file, !data);
fcb323cc
JA
8918 return 0;
8919}
8920
6c5c240e
RP
8921static void *io_uring_validate_mmap_request(struct file *file,
8922 loff_t pgoff, size_t sz)
2b188cc1 8923{
2b188cc1 8924 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 8925 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
8926 struct page *page;
8927 void *ptr;
8928
8929 switch (offset) {
8930 case IORING_OFF_SQ_RING:
75b28aff
HV
8931 case IORING_OFF_CQ_RING:
8932 ptr = ctx->rings;
2b188cc1
JA
8933 break;
8934 case IORING_OFF_SQES:
8935 ptr = ctx->sq_sqes;
8936 break;
2b188cc1 8937 default:
6c5c240e 8938 return ERR_PTR(-EINVAL);
2b188cc1
JA
8939 }
8940
8941 page = virt_to_head_page(ptr);
a50b854e 8942 if (sz > page_size(page))
6c5c240e
RP
8943 return ERR_PTR(-EINVAL);
8944
8945 return ptr;
8946}
8947
8948#ifdef CONFIG_MMU
8949
8950static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
8951{
8952 size_t sz = vma->vm_end - vma->vm_start;
8953 unsigned long pfn;
8954 void *ptr;
8955
8956 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
8957 if (IS_ERR(ptr))
8958 return PTR_ERR(ptr);
2b188cc1
JA
8959
8960 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
8961 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
8962}
8963
6c5c240e
RP
8964#else /* !CONFIG_MMU */
8965
8966static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
8967{
8968 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
8969}
8970
8971static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
8972{
8973 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
8974}
8975
8976static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
8977 unsigned long addr, unsigned long len,
8978 unsigned long pgoff, unsigned long flags)
8979{
8980 void *ptr;
8981
8982 ptr = io_uring_validate_mmap_request(file, pgoff, len);
8983 if (IS_ERR(ptr))
8984 return PTR_ERR(ptr);
8985
8986 return (unsigned long) ptr;
8987}
8988
8989#endif /* !CONFIG_MMU */
8990
90554200
JA
8991static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
8992{
8993 DEFINE_WAIT(wait);
8994
8995 do {
8996 if (!io_sqring_full(ctx))
8997 break;
8998
8999 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9000
9001 if (!io_sqring_full(ctx))
9002 break;
9003
9004 schedule();
9005 } while (!signal_pending(current));
9006
9007 finish_wait(&ctx->sqo_sq_wait, &wait);
9008}
9009
2b188cc1
JA
9010SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
9011 u32, min_complete, u32, flags, const sigset_t __user *, sig,
9012 size_t, sigsz)
9013{
9014 struct io_ring_ctx *ctx;
9015 long ret = -EBADF;
9016 int submitted = 0;
9017 struct fd f;
9018
4c6e277c 9019 io_run_task_work();
b41e9852 9020
90554200
JA
9021 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9022 IORING_ENTER_SQ_WAIT))
2b188cc1
JA
9023 return -EINVAL;
9024
9025 f = fdget(fd);
9026 if (!f.file)
9027 return -EBADF;
9028
9029 ret = -EOPNOTSUPP;
9030 if (f.file->f_op != &io_uring_fops)
9031 goto out_fput;
9032
9033 ret = -ENXIO;
9034 ctx = f.file->private_data;
9035 if (!percpu_ref_tryget(&ctx->refs))
9036 goto out_fput;
9037
7e84e1c7
SG
9038 ret = -EBADFD;
9039 if (ctx->flags & IORING_SETUP_R_DISABLED)
9040 goto out;
9041
6c271ce2
JA
9042 /*
9043 * For SQ polling, the thread will do all submissions and completions.
9044 * Just return the requested submit count, and wake the thread if
9045 * we were asked to.
9046 */
b2a9eada 9047 ret = 0;
6c271ce2 9048 if (ctx->flags & IORING_SETUP_SQPOLL) {
c1edbf5f 9049 if (!list_empty_careful(&ctx->cq_overflow_list))
e6c8aa9a 9050 io_cqring_overflow_flush(ctx, false, NULL, NULL);
6c271ce2 9051 if (flags & IORING_ENTER_SQ_WAKEUP)
534ca6d6 9052 wake_up(&ctx->sq_data->wait);
90554200
JA
9053 if (flags & IORING_ENTER_SQ_WAIT)
9054 io_sqpoll_wait_sq(ctx);
6c271ce2 9055 submitted = to_submit;
b2a9eada 9056 } else if (to_submit) {
0f212204
JA
9057 ret = io_uring_add_task_file(f.file);
9058 if (unlikely(ret))
9059 goto out;
2b188cc1 9060 mutex_lock(&ctx->uring_lock);
0f212204 9061 submitted = io_submit_sqes(ctx, to_submit);
2b188cc1 9062 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
9063
9064 if (submitted != to_submit)
9065 goto out;
2b188cc1
JA
9066 }
9067 if (flags & IORING_ENTER_GETEVENTS) {
9068 min_complete = min(min_complete, ctx->cq_entries);
9069
32b2244a
XW
9070 /*
9071 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9072 * space applications don't need to do io completion events
9073 * polling again, they can rely on io_sq_thread to do polling
9074 * work, which can reduce cpu usage and uring_lock contention.
9075 */
9076 if (ctx->flags & IORING_SETUP_IOPOLL &&
9077 !(ctx->flags & IORING_SETUP_SQPOLL)) {
7668b92a 9078 ret = io_iopoll_check(ctx, min_complete);
def596e9
JA
9079 } else {
9080 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
9081 }
2b188cc1
JA
9082 }
9083
7c504e65 9084out:
6805b32e 9085 percpu_ref_put(&ctx->refs);
2b188cc1
JA
9086out_fput:
9087 fdput(f);
9088 return submitted ? submitted : ret;
9089}
9090
bebdb65e 9091#ifdef CONFIG_PROC_FS
87ce955b
JA
9092static int io_uring_show_cred(int id, void *p, void *data)
9093{
9094 const struct cred *cred = p;
9095 struct seq_file *m = data;
9096 struct user_namespace *uns = seq_user_ns(m);
9097 struct group_info *gi;
9098 kernel_cap_t cap;
9099 unsigned __capi;
9100 int g;
9101
9102 seq_printf(m, "%5d\n", id);
9103 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9104 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9105 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9106 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9107 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9108 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9109 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9110 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9111 seq_puts(m, "\n\tGroups:\t");
9112 gi = cred->group_info;
9113 for (g = 0; g < gi->ngroups; g++) {
9114 seq_put_decimal_ull(m, g ? " " : "",
9115 from_kgid_munged(uns, gi->gid[g]));
9116 }
9117 seq_puts(m, "\n\tCapEff:\t");
9118 cap = cred->cap_effective;
9119 CAP_FOR_EACH_U32(__capi)
9120 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9121 seq_putc(m, '\n');
9122 return 0;
9123}
9124
9125static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9126{
fad8e0de 9127 bool has_lock;
87ce955b
JA
9128 int i;
9129
fad8e0de
JA
9130 /*
9131 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9132 * since fdinfo case grabs it in the opposite direction of normal use
9133 * cases. If we fail to get the lock, we just don't iterate any
9134 * structures that could be going away outside the io_uring mutex.
9135 */
9136 has_lock = mutex_trylock(&ctx->uring_lock);
9137
87ce955b 9138 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
fad8e0de 9139 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
87ce955b
JA
9140 struct fixed_file_table *table;
9141 struct file *f;
9142
9143 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
9144 f = table->files[i & IORING_FILE_TABLE_MASK];
9145 if (f)
9146 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9147 else
9148 seq_printf(m, "%5u: <none>\n", i);
9149 }
9150 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
fad8e0de 9151 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
87ce955b
JA
9152 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9153
9154 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9155 (unsigned int) buf->len);
9156 }
fad8e0de 9157 if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
87ce955b
JA
9158 seq_printf(m, "Personalities:\n");
9159 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
9160 }
d7718a9d
JA
9161 seq_printf(m, "PollList:\n");
9162 spin_lock_irq(&ctx->completion_lock);
9163 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9164 struct hlist_head *list = &ctx->cancel_hash[i];
9165 struct io_kiocb *req;
9166
9167 hlist_for_each_entry(req, list, hash_node)
9168 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9169 req->task->task_works != NULL);
9170 }
9171 spin_unlock_irq(&ctx->completion_lock);
fad8e0de
JA
9172 if (has_lock)
9173 mutex_unlock(&ctx->uring_lock);
87ce955b
JA
9174}
9175
9176static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9177{
9178 struct io_ring_ctx *ctx = f->private_data;
9179
9180 if (percpu_ref_tryget(&ctx->refs)) {
9181 __io_uring_show_fdinfo(ctx, m);
9182 percpu_ref_put(&ctx->refs);
9183 }
9184}
bebdb65e 9185#endif
87ce955b 9186
2b188cc1
JA
9187static const struct file_operations io_uring_fops = {
9188 .release = io_uring_release,
fcb323cc 9189 .flush = io_uring_flush,
2b188cc1 9190 .mmap = io_uring_mmap,
6c5c240e
RP
9191#ifndef CONFIG_MMU
9192 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9193 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9194#endif
2b188cc1
JA
9195 .poll = io_uring_poll,
9196 .fasync = io_uring_fasync,
bebdb65e 9197#ifdef CONFIG_PROC_FS
87ce955b 9198 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 9199#endif
2b188cc1
JA
9200};
9201
9202static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9203 struct io_uring_params *p)
9204{
75b28aff
HV
9205 struct io_rings *rings;
9206 size_t size, sq_array_offset;
2b188cc1 9207
bd740481
JA
9208 /* make sure these are sane, as we already accounted them */
9209 ctx->sq_entries = p->sq_entries;
9210 ctx->cq_entries = p->cq_entries;
9211
75b28aff
HV
9212 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9213 if (size == SIZE_MAX)
9214 return -EOVERFLOW;
9215
9216 rings = io_mem_alloc(size);
9217 if (!rings)
2b188cc1
JA
9218 return -ENOMEM;
9219
75b28aff
HV
9220 ctx->rings = rings;
9221 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9222 rings->sq_ring_mask = p->sq_entries - 1;
9223 rings->cq_ring_mask = p->cq_entries - 1;
9224 rings->sq_ring_entries = p->sq_entries;
9225 rings->cq_ring_entries = p->cq_entries;
9226 ctx->sq_mask = rings->sq_ring_mask;
9227 ctx->cq_mask = rings->cq_ring_mask;
2b188cc1
JA
9228
9229 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
9230 if (size == SIZE_MAX) {
9231 io_mem_free(ctx->rings);
9232 ctx->rings = NULL;
2b188cc1 9233 return -EOVERFLOW;
eb065d30 9234 }
2b188cc1
JA
9235
9236 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
9237 if (!ctx->sq_sqes) {
9238 io_mem_free(ctx->rings);
9239 ctx->rings = NULL;
2b188cc1 9240 return -ENOMEM;
eb065d30 9241 }
2b188cc1 9242
2b188cc1
JA
9243 return 0;
9244}
9245
9246/*
9247 * Allocate an anonymous fd, this is what constitutes the application
9248 * visible backing of an io_uring instance. The application mmaps this
9249 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9250 * we have to tie this fd to a socket for file garbage collection purposes.
9251 */
9252static int io_uring_get_fd(struct io_ring_ctx *ctx)
9253{
9254 struct file *file;
9255 int ret;
9256
9257#if defined(CONFIG_UNIX)
9258 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9259 &ctx->ring_sock);
9260 if (ret)
9261 return ret;
9262#endif
9263
9264 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9265 if (ret < 0)
9266 goto err;
9267
9268 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9269 O_RDWR | O_CLOEXEC);
9270 if (IS_ERR(file)) {
0f212204 9271err_fd:
2b188cc1
JA
9272 put_unused_fd(ret);
9273 ret = PTR_ERR(file);
9274 goto err;
9275 }
9276
9277#if defined(CONFIG_UNIX)
9278 ctx->ring_sock->file = file;
9279#endif
0f212204
JA
9280 if (unlikely(io_uring_add_task_file(file))) {
9281 file = ERR_PTR(-ENOMEM);
9282 goto err_fd;
9283 }
2b188cc1
JA
9284 fd_install(ret, file);
9285 return ret;
9286err:
9287#if defined(CONFIG_UNIX)
9288 sock_release(ctx->ring_sock);
9289 ctx->ring_sock = NULL;
9290#endif
9291 return ret;
9292}
9293
7f13657d
XW
9294static int io_uring_create(unsigned entries, struct io_uring_params *p,
9295 struct io_uring_params __user *params)
2b188cc1
JA
9296{
9297 struct user_struct *user = NULL;
9298 struct io_ring_ctx *ctx;
aad5d8da 9299 bool limit_mem;
2b188cc1
JA
9300 int ret;
9301
8110c1a6 9302 if (!entries)
2b188cc1 9303 return -EINVAL;
8110c1a6
JA
9304 if (entries > IORING_MAX_ENTRIES) {
9305 if (!(p->flags & IORING_SETUP_CLAMP))
9306 return -EINVAL;
9307 entries = IORING_MAX_ENTRIES;
9308 }
2b188cc1
JA
9309
9310 /*
9311 * Use twice as many entries for the CQ ring. It's possible for the
9312 * application to drive a higher depth than the size of the SQ ring,
9313 * since the sqes are only used at submission time. This allows for
33a107f0
JA
9314 * some flexibility in overcommitting a bit. If the application has
9315 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9316 * of CQ ring entries manually.
2b188cc1
JA
9317 */
9318 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
9319 if (p->flags & IORING_SETUP_CQSIZE) {
9320 /*
9321 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9322 * to a power-of-two, if it isn't already. We do NOT impose
9323 * any cq vs sq ring sizing.
9324 */
8110c1a6 9325 if (p->cq_entries < p->sq_entries)
33a107f0 9326 return -EINVAL;
8110c1a6
JA
9327 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9328 if (!(p->flags & IORING_SETUP_CLAMP))
9329 return -EINVAL;
9330 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9331 }
33a107f0
JA
9332 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9333 } else {
9334 p->cq_entries = 2 * p->sq_entries;
9335 }
2b188cc1
JA
9336
9337 user = get_uid(current_user());
aad5d8da 9338 limit_mem = !capable(CAP_IPC_LOCK);
2b188cc1 9339
aad5d8da 9340 if (limit_mem) {
a087e2b5 9341 ret = __io_account_mem(user,
2b188cc1
JA
9342 ring_pages(p->sq_entries, p->cq_entries));
9343 if (ret) {
9344 free_uid(user);
9345 return ret;
9346 }
9347 }
9348
9349 ctx = io_ring_ctx_alloc(p);
9350 if (!ctx) {
aad5d8da 9351 if (limit_mem)
a087e2b5 9352 __io_unaccount_mem(user, ring_pages(p->sq_entries,
2b188cc1
JA
9353 p->cq_entries));
9354 free_uid(user);
9355 return -ENOMEM;
9356 }
9357 ctx->compat = in_compat_syscall();
2b188cc1 9358 ctx->user = user;
0b8c0ec7 9359 ctx->creds = get_current_cred();
2b188cc1 9360
2aede0e4
JA
9361 ctx->sqo_task = get_task_struct(current);
9362
9363 /*
9364 * This is just grabbed for accounting purposes. When a process exits,
9365 * the mm is exited and dropped before the files, hence we need to hang
9366 * on to this mm purely for the purposes of being able to unaccount
9367 * memory (locked/pinned vm). It's not used for anything else.
9368 */
6b7898eb 9369 mmgrab(current->mm);
2aede0e4 9370 ctx->mm_account = current->mm;
6b7898eb 9371
91d8f519
DZ
9372#ifdef CONFIG_BLK_CGROUP
9373 /*
9374 * The sq thread will belong to the original cgroup it was inited in.
9375 * If the cgroup goes offline (e.g. disabling the io controller), then
9376 * issued bios will be associated with the closest cgroup later in the
9377 * block layer.
9378 */
9379 rcu_read_lock();
9380 ctx->sqo_blkcg_css = blkcg_css();
9381 ret = css_tryget_online(ctx->sqo_blkcg_css);
9382 rcu_read_unlock();
9383 if (!ret) {
9384 /* don't init against a dying cgroup, have the user try again */
9385 ctx->sqo_blkcg_css = NULL;
9386 ret = -ENODEV;
9387 goto err;
9388 }
9389#endif
9390
f74441e6
JA
9391 /*
9392 * Account memory _before_ installing the file descriptor. Once
9393 * the descriptor is installed, it can get closed at any time. Also
9394 * do this before hitting the general error path, as ring freeing
9395 * will un-account as well.
9396 */
9397 io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
9398 ACCT_LOCKED);
9399 ctx->limit_mem = limit_mem;
9400
2b188cc1
JA
9401 ret = io_allocate_scq_urings(ctx, p);
9402 if (ret)
9403 goto err;
9404
7e84e1c7 9405 ret = io_sq_offload_create(ctx, p);
2b188cc1
JA
9406 if (ret)
9407 goto err;
9408
7e84e1c7
SG
9409 if (!(p->flags & IORING_SETUP_R_DISABLED))
9410 io_sq_offload_start(ctx);
9411
2b188cc1 9412 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
9413 p->sq_off.head = offsetof(struct io_rings, sq.head);
9414 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9415 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9416 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9417 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9418 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9419 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
9420
9421 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
9422 p->cq_off.head = offsetof(struct io_rings, cq.head);
9423 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9424 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9425 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9426 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9427 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 9428 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 9429
7f13657d
XW
9430 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9431 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
5769a351
JX
9432 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
9433 IORING_FEAT_POLL_32BITS;
7f13657d
XW
9434
9435 if (copy_to_user(params, p, sizeof(*p))) {
9436 ret = -EFAULT;
9437 goto err;
9438 }
d1719f70 9439
044c1ab3
JA
9440 /*
9441 * Install ring fd as the very last thing, so we don't risk someone
9442 * having closed it before we finish setup
9443 */
9444 ret = io_uring_get_fd(ctx);
9445 if (ret < 0)
9446 goto err;
9447
c826bd7a 9448 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
9449 return ret;
9450err:
9451 io_ring_ctx_wait_and_kill(ctx);
9452 return ret;
9453}
9454
9455/*
9456 * Sets up an aio uring context, and returns the fd. Applications asks for a
9457 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9458 * params structure passed in.
9459 */
9460static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9461{
9462 struct io_uring_params p;
2b188cc1
JA
9463 int i;
9464
9465 if (copy_from_user(&p, params, sizeof(p)))
9466 return -EFAULT;
9467 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9468 if (p.resv[i])
9469 return -EINVAL;
9470 }
9471
6c271ce2 9472 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 9473 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
7e84e1c7
SG
9474 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9475 IORING_SETUP_R_DISABLED))
2b188cc1
JA
9476 return -EINVAL;
9477
7f13657d 9478 return io_uring_create(entries, &p, params);
2b188cc1
JA
9479}
9480
9481SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9482 struct io_uring_params __user *, params)
9483{
9484 return io_uring_setup(entries, params);
9485}
9486
66f4af93
JA
9487static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9488{
9489 struct io_uring_probe *p;
9490 size_t size;
9491 int i, ret;
9492
9493 size = struct_size(p, ops, nr_args);
9494 if (size == SIZE_MAX)
9495 return -EOVERFLOW;
9496 p = kzalloc(size, GFP_KERNEL);
9497 if (!p)
9498 return -ENOMEM;
9499
9500 ret = -EFAULT;
9501 if (copy_from_user(p, arg, size))
9502 goto out;
9503 ret = -EINVAL;
9504 if (memchr_inv(p, 0, size))
9505 goto out;
9506
9507 p->last_op = IORING_OP_LAST - 1;
9508 if (nr_args > IORING_OP_LAST)
9509 nr_args = IORING_OP_LAST;
9510
9511 for (i = 0; i < nr_args; i++) {
9512 p->ops[i].op = i;
9513 if (!io_op_defs[i].not_supported)
9514 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9515 }
9516 p->ops_len = i;
9517
9518 ret = 0;
9519 if (copy_to_user(arg, p, size))
9520 ret = -EFAULT;
9521out:
9522 kfree(p);
9523 return ret;
9524}
9525
071698e1
JA
9526static int io_register_personality(struct io_ring_ctx *ctx)
9527{
9528 const struct cred *creds = get_current_cred();
9529 int id;
9530
9531 id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
9532 USHRT_MAX, GFP_KERNEL);
9533 if (id < 0)
9534 put_cred(creds);
9535 return id;
9536}
9537
9538static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
9539{
9540 const struct cred *old_creds;
9541
9542 old_creds = idr_remove(&ctx->personality_idr, id);
9543 if (old_creds) {
9544 put_cred(old_creds);
9545 return 0;
9546 }
9547
9548 return -EINVAL;
9549}
9550
21b55dbc
SG
9551static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9552 unsigned int nr_args)
9553{
9554 struct io_uring_restriction *res;
9555 size_t size;
9556 int i, ret;
9557
7e84e1c7
SG
9558 /* Restrictions allowed only if rings started disabled */
9559 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9560 return -EBADFD;
9561
21b55dbc 9562 /* We allow only a single restrictions registration */
7e84e1c7 9563 if (ctx->restrictions.registered)
21b55dbc
SG
9564 return -EBUSY;
9565
9566 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9567 return -EINVAL;
9568
9569 size = array_size(nr_args, sizeof(*res));
9570 if (size == SIZE_MAX)
9571 return -EOVERFLOW;
9572
9573 res = memdup_user(arg, size);
9574 if (IS_ERR(res))
9575 return PTR_ERR(res);
9576
9577 ret = 0;
9578
9579 for (i = 0; i < nr_args; i++) {
9580 switch (res[i].opcode) {
9581 case IORING_RESTRICTION_REGISTER_OP:
9582 if (res[i].register_op >= IORING_REGISTER_LAST) {
9583 ret = -EINVAL;
9584 goto out;
9585 }
9586
9587 __set_bit(res[i].register_op,
9588 ctx->restrictions.register_op);
9589 break;
9590 case IORING_RESTRICTION_SQE_OP:
9591 if (res[i].sqe_op >= IORING_OP_LAST) {
9592 ret = -EINVAL;
9593 goto out;
9594 }
9595
9596 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9597 break;
9598 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9599 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9600 break;
9601 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9602 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9603 break;
9604 default:
9605 ret = -EINVAL;
9606 goto out;
9607 }
9608 }
9609
9610out:
9611 /* Reset all restrictions if an error happened */
9612 if (ret != 0)
9613 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9614 else
7e84e1c7 9615 ctx->restrictions.registered = true;
21b55dbc
SG
9616
9617 kfree(res);
9618 return ret;
9619}
9620
7e84e1c7
SG
9621static int io_register_enable_rings(struct io_ring_ctx *ctx)
9622{
9623 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9624 return -EBADFD;
9625
9626 if (ctx->restrictions.registered)
9627 ctx->restricted = 1;
9628
9629 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9630
9631 io_sq_offload_start(ctx);
9632
9633 return 0;
9634}
9635
071698e1
JA
9636static bool io_register_op_must_quiesce(int op)
9637{
9638 switch (op) {
9639 case IORING_UNREGISTER_FILES:
9640 case IORING_REGISTER_FILES_UPDATE:
9641 case IORING_REGISTER_PROBE:
9642 case IORING_REGISTER_PERSONALITY:
9643 case IORING_UNREGISTER_PERSONALITY:
9644 return false;
9645 default:
9646 return true;
9647 }
9648}
9649
edafccee
JA
9650static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9651 void __user *arg, unsigned nr_args)
b19062a5
JA
9652 __releases(ctx->uring_lock)
9653 __acquires(ctx->uring_lock)
edafccee
JA
9654{
9655 int ret;
9656
35fa71a0
JA
9657 /*
9658 * We're inside the ring mutex, if the ref is already dying, then
9659 * someone else killed the ctx or is already going through
9660 * io_uring_register().
9661 */
9662 if (percpu_ref_is_dying(&ctx->refs))
9663 return -ENXIO;
9664
071698e1 9665 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 9666 percpu_ref_kill(&ctx->refs);
b19062a5 9667
05f3fb3c
JA
9668 /*
9669 * Drop uring mutex before waiting for references to exit. If
9670 * another thread is currently inside io_uring_enter() it might
9671 * need to grab the uring_lock to make progress. If we hold it
9672 * here across the drain wait, then we can deadlock. It's safe
9673 * to drop the mutex here, since no new references will come in
9674 * after we've killed the percpu ref.
9675 */
9676 mutex_unlock(&ctx->uring_lock);
af9c1a44
JA
9677 do {
9678 ret = wait_for_completion_interruptible(&ctx->ref_comp);
9679 if (!ret)
9680 break;
9681 if (io_run_task_work_sig() > 0)
9682 continue;
9683 } while (1);
9684
05f3fb3c 9685 mutex_lock(&ctx->uring_lock);
af9c1a44 9686
c150368b
JA
9687 if (ret) {
9688 percpu_ref_resurrect(&ctx->refs);
9689 ret = -EINTR;
21b55dbc
SG
9690 goto out_quiesce;
9691 }
9692 }
9693
9694 if (ctx->restricted) {
9695 if (opcode >= IORING_REGISTER_LAST) {
9696 ret = -EINVAL;
9697 goto out;
9698 }
9699
9700 if (!test_bit(opcode, ctx->restrictions.register_op)) {
9701 ret = -EACCES;
c150368b
JA
9702 goto out;
9703 }
05f3fb3c 9704 }
edafccee
JA
9705
9706 switch (opcode) {
9707 case IORING_REGISTER_BUFFERS:
9708 ret = io_sqe_buffer_register(ctx, arg, nr_args);
9709 break;
9710 case IORING_UNREGISTER_BUFFERS:
9711 ret = -EINVAL;
9712 if (arg || nr_args)
9713 break;
9714 ret = io_sqe_buffer_unregister(ctx);
9715 break;
6b06314c
JA
9716 case IORING_REGISTER_FILES:
9717 ret = io_sqe_files_register(ctx, arg, nr_args);
9718 break;
9719 case IORING_UNREGISTER_FILES:
9720 ret = -EINVAL;
9721 if (arg || nr_args)
9722 break;
9723 ret = io_sqe_files_unregister(ctx);
9724 break;
c3a31e60
JA
9725 case IORING_REGISTER_FILES_UPDATE:
9726 ret = io_sqe_files_update(ctx, arg, nr_args);
9727 break;
9b402849 9728 case IORING_REGISTER_EVENTFD:
f2842ab5 9729 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
9730 ret = -EINVAL;
9731 if (nr_args != 1)
9732 break;
9733 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
9734 if (ret)
9735 break;
9736 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
9737 ctx->eventfd_async = 1;
9738 else
9739 ctx->eventfd_async = 0;
9b402849
JA
9740 break;
9741 case IORING_UNREGISTER_EVENTFD:
9742 ret = -EINVAL;
9743 if (arg || nr_args)
9744 break;
9745 ret = io_eventfd_unregister(ctx);
9746 break;
66f4af93
JA
9747 case IORING_REGISTER_PROBE:
9748 ret = -EINVAL;
9749 if (!arg || nr_args > 256)
9750 break;
9751 ret = io_probe(ctx, arg, nr_args);
9752 break;
071698e1
JA
9753 case IORING_REGISTER_PERSONALITY:
9754 ret = -EINVAL;
9755 if (arg || nr_args)
9756 break;
9757 ret = io_register_personality(ctx);
9758 break;
9759 case IORING_UNREGISTER_PERSONALITY:
9760 ret = -EINVAL;
9761 if (arg)
9762 break;
9763 ret = io_unregister_personality(ctx, nr_args);
9764 break;
7e84e1c7
SG
9765 case IORING_REGISTER_ENABLE_RINGS:
9766 ret = -EINVAL;
9767 if (arg || nr_args)
9768 break;
9769 ret = io_register_enable_rings(ctx);
9770 break;
21b55dbc
SG
9771 case IORING_REGISTER_RESTRICTIONS:
9772 ret = io_register_restrictions(ctx, arg, nr_args);
9773 break;
edafccee
JA
9774 default:
9775 ret = -EINVAL;
9776 break;
9777 }
9778
21b55dbc 9779out:
071698e1 9780 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 9781 /* bring the ctx back to life */
05f3fb3c 9782 percpu_ref_reinit(&ctx->refs);
21b55dbc 9783out_quiesce:
0f158b4c 9784 reinit_completion(&ctx->ref_comp);
05f3fb3c 9785 }
edafccee
JA
9786 return ret;
9787}
9788
9789SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
9790 void __user *, arg, unsigned int, nr_args)
9791{
9792 struct io_ring_ctx *ctx;
9793 long ret = -EBADF;
9794 struct fd f;
9795
9796 f = fdget(fd);
9797 if (!f.file)
9798 return -EBADF;
9799
9800 ret = -EOPNOTSUPP;
9801 if (f.file->f_op != &io_uring_fops)
9802 goto out_fput;
9803
9804 ctx = f.file->private_data;
9805
9806 mutex_lock(&ctx->uring_lock);
9807 ret = __io_uring_register(ctx, opcode, arg, nr_args);
9808 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
9809 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
9810 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
9811out_fput:
9812 fdput(f);
9813 return ret;
9814}
9815
2b188cc1
JA
9816static int __init io_uring_init(void)
9817{
d7f62e82
SM
9818#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
9819 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
9820 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
9821} while (0)
9822
9823#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
9824 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
9825 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
9826 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
9827 BUILD_BUG_SQE_ELEM(1, __u8, flags);
9828 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
9829 BUILD_BUG_SQE_ELEM(4, __s32, fd);
9830 BUILD_BUG_SQE_ELEM(8, __u64, off);
9831 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
9832 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 9833 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
9834 BUILD_BUG_SQE_ELEM(24, __u32, len);
9835 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
9836 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
9837 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
9838 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
5769a351
JX
9839 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
9840 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
d7f62e82
SM
9841 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
9842 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
9843 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
9844 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
9845 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
9846 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
9847 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
9848 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 9849 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
9850 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
9851 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
9852 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 9853 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
d7f62e82 9854
d3656344 9855 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
84557871 9856 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
2b188cc1
JA
9857 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
9858 return 0;
9859};
9860__initcall(io_uring_init);