]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/io_uring.c
statx: hide interfaces no longer used by io_uring
[mirror_ubuntu-jammy-kernel.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
58#include <linux/mmu_context.h>
59#include <linux/percpu.h>
60#include <linux/slab.h>
6c271ce2 61#include <linux/kthread.h>
2b188cc1 62#include <linux/blkdev.h>
edafccee 63#include <linux/bvec.h>
2b188cc1
JA
64#include <linux/net.h>
65#include <net/sock.h>
66#include <net/af_unix.h>
6b06314c 67#include <net/scm.h>
2b188cc1
JA
68#include <linux/anon_inodes.h>
69#include <linux/sched/mm.h>
70#include <linux/uaccess.h>
71#include <linux/nospec.h>
edafccee
JA
72#include <linux/sizes.h>
73#include <linux/hugetlb.h>
aa4c3967 74#include <linux/highmem.h>
15b71abe
JA
75#include <linux/namei.h>
76#include <linux/fsnotify.h>
4840e418 77#include <linux/fadvise.h>
3e4827b0 78#include <linux/eventpoll.h>
ff002b30 79#include <linux/fs_struct.h>
7d67af2c 80#include <linux/splice.h>
b41e9852 81#include <linux/task_work.h>
2b188cc1 82
c826bd7a
DD
83#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
2b188cc1
JA
86#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
561fb04a 89#include "io-wq.h"
2b188cc1 90
5277deaa 91#define IORING_MAX_ENTRIES 32768
33a107f0 92#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
93
94/*
95 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
96 */
97#define IORING_FILE_TABLE_SHIFT 9
98#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
99#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
100#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
2b188cc1
JA
101
102struct io_uring {
103 u32 head ____cacheline_aligned_in_smp;
104 u32 tail ____cacheline_aligned_in_smp;
105};
106
1e84b97b 107/*
75b28aff
HV
108 * This data is shared with the application through the mmap at offsets
109 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
110 *
111 * The offsets to the member fields are published through struct
112 * io_sqring_offsets when calling io_uring_setup.
113 */
75b28aff 114struct io_rings {
1e84b97b
SB
115 /*
116 * Head and tail offsets into the ring; the offsets need to be
117 * masked to get valid indices.
118 *
75b28aff
HV
119 * The kernel controls head of the sq ring and the tail of the cq ring,
120 * and the application controls tail of the sq ring and the head of the
121 * cq ring.
1e84b97b 122 */
75b28aff 123 struct io_uring sq, cq;
1e84b97b 124 /*
75b28aff 125 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
126 * ring_entries - 1)
127 */
75b28aff
HV
128 u32 sq_ring_mask, cq_ring_mask;
129 /* Ring sizes (constant, power of 2) */
130 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
131 /*
132 * Number of invalid entries dropped by the kernel due to
133 * invalid index stored in array
134 *
135 * Written by the kernel, shouldn't be modified by the
136 * application (i.e. get number of "new events" by comparing to
137 * cached value).
138 *
139 * After a new SQ head value was read by the application this
140 * counter includes all submissions that were dropped reaching
141 * the new SQ head (and possibly more).
142 */
75b28aff 143 u32 sq_dropped;
1e84b97b 144 /*
0d9b5b3a 145 * Runtime SQ flags
1e84b97b
SB
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application.
149 *
150 * The application needs a full memory barrier before checking
151 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
152 */
75b28aff 153 u32 sq_flags;
0d9b5b3a
SG
154 /*
155 * Runtime CQ flags
156 *
157 * Written by the application, shouldn't be modified by the
158 * kernel.
159 */
160 u32 cq_flags;
1e84b97b
SB
161 /*
162 * Number of completion events lost because the queue was full;
163 * this should be avoided by the application by making sure
0b4295b5 164 * there are not more requests pending than there is space in
1e84b97b
SB
165 * the completion queue.
166 *
167 * Written by the kernel, shouldn't be modified by the
168 * application (i.e. get number of "new events" by comparing to
169 * cached value).
170 *
171 * As completion events come in out of order this counter is not
172 * ordered with any other data.
173 */
75b28aff 174 u32 cq_overflow;
1e84b97b
SB
175 /*
176 * Ring buffer of completion events.
177 *
178 * The kernel writes completion events fresh every time they are
179 * produced, so the application is allowed to modify pending
180 * entries.
181 */
75b28aff 182 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
183};
184
edafccee
JA
185struct io_mapped_ubuf {
186 u64 ubuf;
187 size_t len;
188 struct bio_vec *bvec;
189 unsigned int nr_bvecs;
190};
191
65e19f54
JA
192struct fixed_file_table {
193 struct file **files;
31b51510
JA
194};
195
05589553
XW
196struct fixed_file_ref_node {
197 struct percpu_ref refs;
198 struct list_head node;
199 struct list_head file_list;
200 struct fixed_file_data *file_data;
4a38aed2 201 struct llist_node llist;
05589553
XW
202};
203
05f3fb3c
JA
204struct fixed_file_data {
205 struct fixed_file_table *table;
206 struct io_ring_ctx *ctx;
207
05589553 208 struct percpu_ref *cur_refs;
05f3fb3c 209 struct percpu_ref refs;
05f3fb3c 210 struct completion done;
05589553
XW
211 struct list_head ref_list;
212 spinlock_t lock;
05f3fb3c
JA
213};
214
5a2e745d
JA
215struct io_buffer {
216 struct list_head list;
217 __u64 addr;
218 __s32 len;
219 __u16 bid;
220};
221
2b188cc1
JA
222struct io_ring_ctx {
223 struct {
224 struct percpu_ref refs;
225 } ____cacheline_aligned_in_smp;
226
227 struct {
228 unsigned int flags;
e1d85334
RD
229 unsigned int compat: 1;
230 unsigned int account_mem: 1;
231 unsigned int cq_overflow_flushed: 1;
232 unsigned int drain_next: 1;
233 unsigned int eventfd_async: 1;
2b188cc1 234
75b28aff
HV
235 /*
236 * Ring buffer of indices into array of io_uring_sqe, which is
237 * mmapped by the application using the IORING_OFF_SQES offset.
238 *
239 * This indirection could e.g. be used to assign fixed
240 * io_uring_sqe entries to operations and only submit them to
241 * the queue when needed.
242 *
243 * The kernel modifies neither the indices array nor the entries
244 * array.
245 */
246 u32 *sq_array;
2b188cc1
JA
247 unsigned cached_sq_head;
248 unsigned sq_entries;
249 unsigned sq_mask;
6c271ce2 250 unsigned sq_thread_idle;
498ccd9e 251 unsigned cached_sq_dropped;
206aefde 252 atomic_t cached_cq_overflow;
ad3eb2c8 253 unsigned long sq_check_overflow;
de0617e4
JA
254
255 struct list_head defer_list;
5262f567 256 struct list_head timeout_list;
1d7bb1d5 257 struct list_head cq_overflow_list;
fcb323cc
JA
258
259 wait_queue_head_t inflight_wait;
ad3eb2c8 260 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
261 } ____cacheline_aligned_in_smp;
262
206aefde
JA
263 struct io_rings *rings;
264
2b188cc1 265 /* IO offload */
561fb04a 266 struct io_wq *io_wq;
6c271ce2 267 struct task_struct *sqo_thread; /* if using sq thread polling */
2b188cc1 268 struct mm_struct *sqo_mm;
6c271ce2 269 wait_queue_head_t sqo_wait;
75b28aff 270
6b06314c
JA
271 /*
272 * If used, fixed file set. Writers must ensure that ->refs is dead,
273 * readers must ensure that ->refs is alive as long as the file* is
274 * used. Only updated through io_uring_register(2).
275 */
05f3fb3c 276 struct fixed_file_data *file_data;
6b06314c 277 unsigned nr_user_files;
b14cca0c
PB
278 int ring_fd;
279 struct file *ring_file;
6b06314c 280
edafccee
JA
281 /* if used, fixed mapped user buffers */
282 unsigned nr_user_bufs;
283 struct io_mapped_ubuf *user_bufs;
284
2b188cc1
JA
285 struct user_struct *user;
286
0b8c0ec7 287 const struct cred *creds;
181e448d 288
0f158b4c
JA
289 struct completion ref_comp;
290 struct completion sq_thread_comp;
206aefde 291
0ddf92e8
JA
292 /* if all else fails... */
293 struct io_kiocb *fallback_req;
294
206aefde
JA
295#if defined(CONFIG_UNIX)
296 struct socket *ring_sock;
297#endif
298
5a2e745d
JA
299 struct idr io_buffer_idr;
300
071698e1
JA
301 struct idr personality_idr;
302
206aefde
JA
303 struct {
304 unsigned cached_cq_tail;
305 unsigned cq_entries;
306 unsigned cq_mask;
307 atomic_t cq_timeouts;
ad3eb2c8 308 unsigned long cq_check_overflow;
206aefde
JA
309 struct wait_queue_head cq_wait;
310 struct fasync_struct *cq_fasync;
311 struct eventfd_ctx *cq_ev_fd;
312 } ____cacheline_aligned_in_smp;
2b188cc1
JA
313
314 struct {
315 struct mutex uring_lock;
316 wait_queue_head_t wait;
317 } ____cacheline_aligned_in_smp;
318
319 struct {
320 spinlock_t completion_lock;
e94f141b 321
def596e9
JA
322 /*
323 * ->poll_list is protected by the ctx->uring_lock for
324 * io_uring instances that don't use IORING_SETUP_SQPOLL.
325 * For SQPOLL, only the single threaded io_sq_thread() will
326 * manipulate the list, hence no extra locking is needed there.
327 */
328 struct list_head poll_list;
78076bb6
JA
329 struct hlist_head *cancel_hash;
330 unsigned cancel_hash_bits;
e94f141b 331 bool poll_multi_file;
31b51510 332
fcb323cc
JA
333 spinlock_t inflight_lock;
334 struct list_head inflight_list;
2b188cc1 335 } ____cacheline_aligned_in_smp;
85faa7b8 336
4a38aed2
JA
337 struct delayed_work file_put_work;
338 struct llist_head file_put_llist;
339
85faa7b8 340 struct work_struct exit_work;
2b188cc1
JA
341};
342
09bb8394
JA
343/*
344 * First field must be the file pointer in all the
345 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
346 */
221c5eb2
JA
347struct io_poll_iocb {
348 struct file *file;
0969e783
JA
349 union {
350 struct wait_queue_head *head;
351 u64 addr;
352 };
221c5eb2 353 __poll_t events;
8c838788 354 bool done;
221c5eb2 355 bool canceled;
392edb45 356 struct wait_queue_entry wait;
221c5eb2
JA
357};
358
b5dba59e
JA
359struct io_close {
360 struct file *file;
361 struct file *put_file;
362 int fd;
363};
364
ad8a48ac
JA
365struct io_timeout_data {
366 struct io_kiocb *req;
367 struct hrtimer timer;
368 struct timespec64 ts;
369 enum hrtimer_mode mode;
370};
371
8ed8d3c3
JA
372struct io_accept {
373 struct file *file;
374 struct sockaddr __user *addr;
375 int __user *addr_len;
376 int flags;
09952e3e 377 unsigned long nofile;
8ed8d3c3
JA
378};
379
380struct io_sync {
381 struct file *file;
382 loff_t len;
383 loff_t off;
384 int flags;
d63d1b5e 385 int mode;
8ed8d3c3
JA
386};
387
fbf23849
JA
388struct io_cancel {
389 struct file *file;
390 u64 addr;
391};
392
b29472ee
JA
393struct io_timeout {
394 struct file *file;
395 u64 addr;
396 int flags;
b55ce732 397 u32 count;
b29472ee
JA
398};
399
9adbd45d
JA
400struct io_rw {
401 /* NOTE: kiocb has the file as the first member, so don't do it here */
402 struct kiocb kiocb;
403 u64 addr;
404 u64 len;
405};
406
3fbb51c1
JA
407struct io_connect {
408 struct file *file;
409 struct sockaddr __user *addr;
410 int addr_len;
411};
412
e47293fd
JA
413struct io_sr_msg {
414 struct file *file;
fddaface
JA
415 union {
416 struct user_msghdr __user *msg;
417 void __user *buf;
418 };
e47293fd 419 int msg_flags;
bcda7baa 420 int bgid;
fddaface 421 size_t len;
bcda7baa 422 struct io_buffer *kbuf;
e47293fd
JA
423};
424
15b71abe
JA
425struct io_open {
426 struct file *file;
427 int dfd;
15b71abe 428 struct filename *filename;
c12cedf2 429 struct open_how how;
4022e7af 430 unsigned long nofile;
15b71abe
JA
431};
432
05f3fb3c
JA
433struct io_files_update {
434 struct file *file;
435 u64 arg;
436 u32 nr_args;
437 u32 offset;
438};
439
4840e418
JA
440struct io_fadvise {
441 struct file *file;
442 u64 offset;
443 u32 len;
444 u32 advice;
445};
446
c1ca757b
JA
447struct io_madvise {
448 struct file *file;
449 u64 addr;
450 u32 len;
451 u32 advice;
452};
453
3e4827b0
JA
454struct io_epoll {
455 struct file *file;
456 int epfd;
457 int op;
458 int fd;
459 struct epoll_event event;
e47293fd
JA
460};
461
7d67af2c
PB
462struct io_splice {
463 struct file *file_out;
464 struct file *file_in;
465 loff_t off_out;
466 loff_t off_in;
467 u64 len;
468 unsigned int flags;
469};
470
ddf0322d
JA
471struct io_provide_buf {
472 struct file *file;
473 __u64 addr;
474 __s32 len;
475 __u32 bgid;
476 __u16 nbufs;
477 __u16 bid;
478};
479
1d9e1288
BM
480struct io_statx {
481 struct file *file;
482 int dfd;
483 unsigned int mask;
484 unsigned int flags;
e62753e4 485 const char __user *filename;
1d9e1288
BM
486 struct statx __user *buffer;
487};
488
f499a021
JA
489struct io_async_connect {
490 struct sockaddr_storage address;
491};
492
03b1230c
JA
493struct io_async_msghdr {
494 struct iovec fast_iov[UIO_FASTIOV];
495 struct iovec *iov;
496 struct sockaddr __user *uaddr;
497 struct msghdr msg;
b537916c 498 struct sockaddr_storage addr;
03b1230c
JA
499};
500
f67676d1
JA
501struct io_async_rw {
502 struct iovec fast_iov[UIO_FASTIOV];
503 struct iovec *iov;
504 ssize_t nr_segs;
505 ssize_t size;
506};
507
1a6b74fc 508struct io_async_ctx {
f67676d1
JA
509 union {
510 struct io_async_rw rw;
03b1230c 511 struct io_async_msghdr msg;
f499a021 512 struct io_async_connect connect;
2d28390a 513 struct io_timeout_data timeout;
f67676d1 514 };
1a6b74fc
JA
515};
516
6b47ee6e
PB
517enum {
518 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
519 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
520 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
521 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
522 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 523 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
6b47ee6e 524
dea3b49c 525 REQ_F_LINK_HEAD_BIT,
6b47ee6e
PB
526 REQ_F_LINK_NEXT_BIT,
527 REQ_F_FAIL_LINK_BIT,
528 REQ_F_INFLIGHT_BIT,
529 REQ_F_CUR_POS_BIT,
530 REQ_F_NOWAIT_BIT,
531 REQ_F_IOPOLL_COMPLETED_BIT,
532 REQ_F_LINK_TIMEOUT_BIT,
533 REQ_F_TIMEOUT_BIT,
534 REQ_F_ISREG_BIT,
535 REQ_F_MUST_PUNT_BIT,
536 REQ_F_TIMEOUT_NOSEQ_BIT,
537 REQ_F_COMP_LOCKED_BIT,
99bc4c38 538 REQ_F_NEED_CLEANUP_BIT,
2ca10259 539 REQ_F_OVERFLOW_BIT,
d7718a9d 540 REQ_F_POLLED_BIT,
bcda7baa 541 REQ_F_BUFFER_SELECTED_BIT,
5b0bbee4 542 REQ_F_NO_FILE_TABLE_BIT,
84557871
JA
543
544 /* not a real bit, just to check we're not overflowing the space */
545 __REQ_F_LAST_BIT,
6b47ee6e
PB
546};
547
548enum {
549 /* ctx owns file */
550 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
551 /* drain existing IO first */
552 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
553 /* linked sqes */
554 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
555 /* doesn't sever on completion < 0 */
556 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
557 /* IOSQE_ASYNC */
558 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
559 /* IOSQE_BUFFER_SELECT */
560 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
6b47ee6e 561
dea3b49c
PB
562 /* head of a link */
563 REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
6b47ee6e
PB
564 /* already grabbed next link */
565 REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
566 /* fail rest of links */
567 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
568 /* on inflight list */
569 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
570 /* read/write uses file position */
571 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
572 /* must not punt to workers */
573 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
574 /* polled IO has completed */
575 REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
576 /* has linked timeout */
577 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
578 /* timeout request */
579 REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT),
580 /* regular file */
581 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
582 /* must be punted even for NONBLOCK */
583 REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT),
584 /* no timeout sequence */
585 REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
586 /* completion under lock */
587 REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
99bc4c38
PB
588 /* needs cleanup */
589 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
2ca10259
JA
590 /* in overflow list */
591 REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT),
d7718a9d
JA
592 /* already went through poll handler */
593 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
594 /* buffer already selected */
595 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
5b0bbee4
JA
596 /* doesn't need file table for this request */
597 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
d7718a9d
JA
598};
599
600struct async_poll {
601 struct io_poll_iocb poll;
602 struct io_wq_work work;
6b47ee6e
PB
603};
604
09bb8394
JA
605/*
606 * NOTE! Each of the iocb union members has the file pointer
607 * as the first entry in their struct definition. So you can
608 * access the file pointer through any of the sub-structs,
609 * or directly as just 'ki_filp' in this struct.
610 */
2b188cc1 611struct io_kiocb {
221c5eb2 612 union {
09bb8394 613 struct file *file;
9adbd45d 614 struct io_rw rw;
221c5eb2 615 struct io_poll_iocb poll;
8ed8d3c3
JA
616 struct io_accept accept;
617 struct io_sync sync;
fbf23849 618 struct io_cancel cancel;
b29472ee 619 struct io_timeout timeout;
3fbb51c1 620 struct io_connect connect;
e47293fd 621 struct io_sr_msg sr_msg;
15b71abe 622 struct io_open open;
b5dba59e 623 struct io_close close;
05f3fb3c 624 struct io_files_update files_update;
4840e418 625 struct io_fadvise fadvise;
c1ca757b 626 struct io_madvise madvise;
3e4827b0 627 struct io_epoll epoll;
7d67af2c 628 struct io_splice splice;
ddf0322d 629 struct io_provide_buf pbuf;
1d9e1288 630 struct io_statx statx;
221c5eb2 631 };
2b188cc1 632
1a6b74fc 633 struct io_async_ctx *io;
c398ecb3 634 int cflags;
d625c6ee 635 u8 opcode;
2b188cc1
JA
636
637 struct io_ring_ctx *ctx;
d7718a9d 638 struct list_head list;
2b188cc1 639 unsigned int flags;
c16361c1 640 refcount_t refs;
3537b6a7
JA
641 struct task_struct *task;
642 unsigned long fsize;
2b188cc1 643 u64 user_data;
9e645e11 644 u32 result;
de0617e4 645 u32 sequence;
2b188cc1 646
d7718a9d
JA
647 struct list_head link_list;
648
fcb323cc
JA
649 struct list_head inflight_entry;
650
05589553
XW
651 struct percpu_ref *fixed_file_refs;
652
b41e9852
JA
653 union {
654 /*
655 * Only commands that never go async can use the below fields,
d7718a9d
JA
656 * obviously. Right now only IORING_OP_POLL_ADD uses them, and
657 * async armed poll handlers for regular commands. The latter
658 * restore the work, if needed.
b41e9852
JA
659 */
660 struct {
b41e9852 661 struct callback_head task_work;
d7718a9d
JA
662 struct hlist_node hash_node;
663 struct async_poll *apoll;
b41e9852
JA
664 };
665 struct io_wq_work work;
666 };
2b188cc1
JA
667};
668
669#define IO_PLUG_THRESHOLD 2
def596e9 670#define IO_IOPOLL_BATCH 8
2b188cc1 671
9a56a232
JA
672struct io_submit_state {
673 struct blk_plug plug;
674
2579f913
JA
675 /*
676 * io_kiocb alloc cache
677 */
678 void *reqs[IO_IOPOLL_BATCH];
6c8a3134 679 unsigned int free_reqs;
2579f913 680
9a56a232
JA
681 /*
682 * File reference cache
683 */
684 struct file *file;
685 unsigned int fd;
686 unsigned int has_refs;
687 unsigned int used_refs;
688 unsigned int ios_left;
689};
690
d3656344
JA
691struct io_op_def {
692 /* needs req->io allocated for deferral/async */
693 unsigned async_ctx : 1;
694 /* needs current->mm setup, does mm access */
695 unsigned needs_mm : 1;
696 /* needs req->file assigned */
697 unsigned needs_file : 1;
d3656344
JA
698 /* hash wq insertion if file is a regular file */
699 unsigned hash_reg_file : 1;
700 /* unbound wq insertion if file is a non-regular file */
701 unsigned unbound_nonreg_file : 1;
66f4af93
JA
702 /* opcode is not supported by this kernel */
703 unsigned not_supported : 1;
f86cd20c
JA
704 /* needs file table */
705 unsigned file_table : 1;
ff002b30
JA
706 /* needs ->fs */
707 unsigned needs_fs : 1;
8a72758c
JA
708 /* set if opcode supports polled "wait" */
709 unsigned pollin : 1;
710 unsigned pollout : 1;
bcda7baa
JA
711 /* op supports buffer selection */
712 unsigned buffer_select : 1;
d3656344
JA
713};
714
715static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
716 [IORING_OP_NOP] = {},
717 [IORING_OP_READV] = {
d3656344
JA
718 .async_ctx = 1,
719 .needs_mm = 1,
720 .needs_file = 1,
721 .unbound_nonreg_file = 1,
8a72758c 722 .pollin = 1,
4d954c25 723 .buffer_select = 1,
d3656344 724 },
0463b6c5 725 [IORING_OP_WRITEV] = {
d3656344
JA
726 .async_ctx = 1,
727 .needs_mm = 1,
728 .needs_file = 1,
729 .hash_reg_file = 1,
730 .unbound_nonreg_file = 1,
8a72758c 731 .pollout = 1,
d3656344 732 },
0463b6c5 733 [IORING_OP_FSYNC] = {
d3656344
JA
734 .needs_file = 1,
735 },
0463b6c5 736 [IORING_OP_READ_FIXED] = {
d3656344
JA
737 .needs_file = 1,
738 .unbound_nonreg_file = 1,
8a72758c 739 .pollin = 1,
d3656344 740 },
0463b6c5 741 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
742 .needs_file = 1,
743 .hash_reg_file = 1,
744 .unbound_nonreg_file = 1,
8a72758c 745 .pollout = 1,
d3656344 746 },
0463b6c5 747 [IORING_OP_POLL_ADD] = {
d3656344
JA
748 .needs_file = 1,
749 .unbound_nonreg_file = 1,
750 },
0463b6c5
PB
751 [IORING_OP_POLL_REMOVE] = {},
752 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344
JA
753 .needs_file = 1,
754 },
0463b6c5 755 [IORING_OP_SENDMSG] = {
d3656344
JA
756 .async_ctx = 1,
757 .needs_mm = 1,
758 .needs_file = 1,
759 .unbound_nonreg_file = 1,
ff002b30 760 .needs_fs = 1,
8a72758c 761 .pollout = 1,
d3656344 762 },
0463b6c5 763 [IORING_OP_RECVMSG] = {
d3656344
JA
764 .async_ctx = 1,
765 .needs_mm = 1,
766 .needs_file = 1,
767 .unbound_nonreg_file = 1,
ff002b30 768 .needs_fs = 1,
8a72758c 769 .pollin = 1,
52de1fe1 770 .buffer_select = 1,
d3656344 771 },
0463b6c5 772 [IORING_OP_TIMEOUT] = {
d3656344
JA
773 .async_ctx = 1,
774 .needs_mm = 1,
775 },
0463b6c5
PB
776 [IORING_OP_TIMEOUT_REMOVE] = {},
777 [IORING_OP_ACCEPT] = {
d3656344
JA
778 .needs_mm = 1,
779 .needs_file = 1,
780 .unbound_nonreg_file = 1,
f86cd20c 781 .file_table = 1,
8a72758c 782 .pollin = 1,
d3656344 783 },
0463b6c5
PB
784 [IORING_OP_ASYNC_CANCEL] = {},
785 [IORING_OP_LINK_TIMEOUT] = {
d3656344
JA
786 .async_ctx = 1,
787 .needs_mm = 1,
788 },
0463b6c5 789 [IORING_OP_CONNECT] = {
d3656344
JA
790 .async_ctx = 1,
791 .needs_mm = 1,
792 .needs_file = 1,
793 .unbound_nonreg_file = 1,
8a72758c 794 .pollout = 1,
d3656344 795 },
0463b6c5 796 [IORING_OP_FALLOCATE] = {
d3656344
JA
797 .needs_file = 1,
798 },
0463b6c5 799 [IORING_OP_OPENAT] = {
f86cd20c 800 .file_table = 1,
ff002b30 801 .needs_fs = 1,
d3656344 802 },
0463b6c5 803 [IORING_OP_CLOSE] = {
f86cd20c 804 .file_table = 1,
d3656344 805 },
0463b6c5 806 [IORING_OP_FILES_UPDATE] = {
d3656344 807 .needs_mm = 1,
f86cd20c 808 .file_table = 1,
d3656344 809 },
0463b6c5 810 [IORING_OP_STATX] = {
d3656344 811 .needs_mm = 1,
ff002b30 812 .needs_fs = 1,
5b0bbee4 813 .file_table = 1,
d3656344 814 },
0463b6c5 815 [IORING_OP_READ] = {
3a6820f2
JA
816 .needs_mm = 1,
817 .needs_file = 1,
818 .unbound_nonreg_file = 1,
8a72758c 819 .pollin = 1,
bcda7baa 820 .buffer_select = 1,
3a6820f2 821 },
0463b6c5 822 [IORING_OP_WRITE] = {
3a6820f2
JA
823 .needs_mm = 1,
824 .needs_file = 1,
825 .unbound_nonreg_file = 1,
8a72758c 826 .pollout = 1,
3a6820f2 827 },
0463b6c5 828 [IORING_OP_FADVISE] = {
4840e418
JA
829 .needs_file = 1,
830 },
0463b6c5 831 [IORING_OP_MADVISE] = {
c1ca757b
JA
832 .needs_mm = 1,
833 },
0463b6c5 834 [IORING_OP_SEND] = {
fddaface
JA
835 .needs_mm = 1,
836 .needs_file = 1,
837 .unbound_nonreg_file = 1,
8a72758c 838 .pollout = 1,
fddaface 839 },
0463b6c5 840 [IORING_OP_RECV] = {
fddaface
JA
841 .needs_mm = 1,
842 .needs_file = 1,
843 .unbound_nonreg_file = 1,
8a72758c 844 .pollin = 1,
bcda7baa 845 .buffer_select = 1,
fddaface 846 },
0463b6c5 847 [IORING_OP_OPENAT2] = {
f86cd20c 848 .file_table = 1,
ff002b30 849 .needs_fs = 1,
cebdb986 850 },
3e4827b0
JA
851 [IORING_OP_EPOLL_CTL] = {
852 .unbound_nonreg_file = 1,
853 .file_table = 1,
854 },
7d67af2c
PB
855 [IORING_OP_SPLICE] = {
856 .needs_file = 1,
857 .hash_reg_file = 1,
858 .unbound_nonreg_file = 1,
ddf0322d
JA
859 },
860 [IORING_OP_PROVIDE_BUFFERS] = {},
067524e9 861 [IORING_OP_REMOVE_BUFFERS] = {},
f2a8d5c7
PB
862 [IORING_OP_TEE] = {
863 .needs_file = 1,
864 .hash_reg_file = 1,
865 .unbound_nonreg_file = 1,
866 },
d3656344
JA
867};
868
561fb04a 869static void io_wq_submit_work(struct io_wq_work **workptr);
78e19bbe 870static void io_cqring_fill_event(struct io_kiocb *req, long res);
ec9c02ad 871static void io_put_req(struct io_kiocb *req);
978db57e 872static void __io_double_put_req(struct io_kiocb *req);
94ae5e77
JA
873static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
874static void io_queue_linked_timeout(struct io_kiocb *req);
05f3fb3c
JA
875static int __io_sqe_files_update(struct io_ring_ctx *ctx,
876 struct io_uring_files_update *ip,
877 unsigned nr_args);
f86cd20c 878static int io_grab_files(struct io_kiocb *req);
99bc4c38 879static void io_cleanup_req(struct io_kiocb *req);
b41e9852
JA
880static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
881 int fd, struct file **out_file, bool fixed);
882static void __io_queue_sqe(struct io_kiocb *req,
883 const struct io_uring_sqe *sqe);
de0617e4 884
2b188cc1
JA
885static struct kmem_cache *req_cachep;
886
887static const struct file_operations io_uring_fops;
888
889struct sock *io_uring_get_socket(struct file *file)
890{
891#if defined(CONFIG_UNIX)
892 if (file->f_op == &io_uring_fops) {
893 struct io_ring_ctx *ctx = file->private_data;
894
895 return ctx->ring_sock->sk;
896 }
897#endif
898 return NULL;
899}
900EXPORT_SYMBOL(io_uring_get_socket);
901
4a38aed2
JA
902static void io_file_put_work(struct work_struct *work);
903
0cdaf760
PB
904static inline bool io_async_submit(struct io_ring_ctx *ctx)
905{
906 return ctx->flags & IORING_SETUP_SQPOLL;
907}
908
2b188cc1
JA
909static void io_ring_ctx_ref_free(struct percpu_ref *ref)
910{
911 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
912
0f158b4c 913 complete(&ctx->ref_comp);
2b188cc1
JA
914}
915
916static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
917{
918 struct io_ring_ctx *ctx;
78076bb6 919 int hash_bits;
2b188cc1
JA
920
921 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
922 if (!ctx)
923 return NULL;
924
0ddf92e8
JA
925 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
926 if (!ctx->fallback_req)
927 goto err;
928
78076bb6
JA
929 /*
930 * Use 5 bits less than the max cq entries, that should give us around
931 * 32 entries per hash list if totally full and uniformly spread.
932 */
933 hash_bits = ilog2(p->cq_entries);
934 hash_bits -= 5;
935 if (hash_bits <= 0)
936 hash_bits = 1;
937 ctx->cancel_hash_bits = hash_bits;
938 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
939 GFP_KERNEL);
940 if (!ctx->cancel_hash)
941 goto err;
942 __hash_init(ctx->cancel_hash, 1U << hash_bits);
943
21482896 944 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
945 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
946 goto err;
2b188cc1
JA
947
948 ctx->flags = p->flags;
949 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 950 INIT_LIST_HEAD(&ctx->cq_overflow_list);
0f158b4c
JA
951 init_completion(&ctx->ref_comp);
952 init_completion(&ctx->sq_thread_comp);
5a2e745d 953 idr_init(&ctx->io_buffer_idr);
071698e1 954 idr_init(&ctx->personality_idr);
2b188cc1
JA
955 mutex_init(&ctx->uring_lock);
956 init_waitqueue_head(&ctx->wait);
957 spin_lock_init(&ctx->completion_lock);
def596e9 958 INIT_LIST_HEAD(&ctx->poll_list);
de0617e4 959 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 960 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
961 init_waitqueue_head(&ctx->inflight_wait);
962 spin_lock_init(&ctx->inflight_lock);
963 INIT_LIST_HEAD(&ctx->inflight_list);
4a38aed2
JA
964 INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
965 init_llist_head(&ctx->file_put_llist);
2b188cc1 966 return ctx;
206aefde 967err:
0ddf92e8
JA
968 if (ctx->fallback_req)
969 kmem_cache_free(req_cachep, ctx->fallback_req);
78076bb6 970 kfree(ctx->cancel_hash);
206aefde
JA
971 kfree(ctx);
972 return NULL;
2b188cc1
JA
973}
974
9d858b21 975static inline bool __req_need_defer(struct io_kiocb *req)
7adf4eaf 976{
a197f664
JL
977 struct io_ring_ctx *ctx = req->ctx;
978
31af27c7
PB
979 return req->sequence != ctx->cached_cq_tail
980 + atomic_read(&ctx->cached_cq_overflow);
7adf4eaf
JA
981}
982
9d858b21 983static inline bool req_need_defer(struct io_kiocb *req)
de0617e4 984{
87987898 985 if (unlikely(req->flags & REQ_F_IO_DRAIN))
9d858b21 986 return __req_need_defer(req);
de0617e4 987
9d858b21 988 return false;
de0617e4
JA
989}
990
5262f567
JA
991static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
992{
7adf4eaf
JA
993 struct io_kiocb *req;
994
995 req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
93bd25bb
JA
996 if (req) {
997 if (req->flags & REQ_F_TIMEOUT_NOSEQ)
998 return NULL;
fb4b3d3f 999 if (!__req_need_defer(req)) {
93bd25bb
JA
1000 list_del_init(&req->list);
1001 return req;
1002 }
7adf4eaf
JA
1003 }
1004
1005 return NULL;
5262f567
JA
1006}
1007
de0617e4 1008static void __io_commit_cqring(struct io_ring_ctx *ctx)
2b188cc1 1009{
75b28aff 1010 struct io_rings *rings = ctx->rings;
2b188cc1 1011
07910158
PB
1012 /* order cqe stores with ring update */
1013 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
2b188cc1 1014
07910158
PB
1015 if (wq_has_sleeper(&ctx->cq_wait)) {
1016 wake_up_interruptible(&ctx->cq_wait);
1017 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
2b188cc1
JA
1018 }
1019}
1020
cccf0ee8
JA
1021static inline void io_req_work_grab_env(struct io_kiocb *req,
1022 const struct io_op_def *def)
1023{
1024 if (!req->work.mm && def->needs_mm) {
1025 mmgrab(current->mm);
1026 req->work.mm = current->mm;
2b188cc1 1027 }
cccf0ee8
JA
1028 if (!req->work.creds)
1029 req->work.creds = get_current_cred();
ff002b30
JA
1030 if (!req->work.fs && def->needs_fs) {
1031 spin_lock(&current->fs->lock);
1032 if (!current->fs->in_exec) {
1033 req->work.fs = current->fs;
1034 req->work.fs->users++;
1035 } else {
1036 req->work.flags |= IO_WQ_WORK_CANCEL;
1037 }
1038 spin_unlock(&current->fs->lock);
1039 }
6ab23144
JA
1040 if (!req->work.task_pid)
1041 req->work.task_pid = task_pid_vnr(current);
2b188cc1
JA
1042}
1043
cccf0ee8 1044static inline void io_req_work_drop_env(struct io_kiocb *req)
18d9be1a 1045{
cccf0ee8
JA
1046 if (req->work.mm) {
1047 mmdrop(req->work.mm);
1048 req->work.mm = NULL;
1049 }
1050 if (req->work.creds) {
1051 put_cred(req->work.creds);
1052 req->work.creds = NULL;
1053 }
ff002b30
JA
1054 if (req->work.fs) {
1055 struct fs_struct *fs = req->work.fs;
1056
1057 spin_lock(&req->work.fs->lock);
1058 if (--fs->users)
1059 fs = NULL;
1060 spin_unlock(&req->work.fs->lock);
1061 if (fs)
1062 free_fs_struct(fs);
1063 }
561fb04a
JA
1064}
1065
8766dd51 1066static inline void io_prep_async_work(struct io_kiocb *req,
94ae5e77 1067 struct io_kiocb **link)
18d9be1a 1068{
d3656344 1069 const struct io_op_def *def = &io_op_defs[req->opcode];
54a91f3b 1070
d3656344
JA
1071 if (req->flags & REQ_F_ISREG) {
1072 if (def->hash_reg_file)
8766dd51 1073 io_wq_hash_work(&req->work, file_inode(req->file));
d3656344
JA
1074 } else {
1075 if (def->unbound_nonreg_file)
3529d8c2 1076 req->work.flags |= IO_WQ_WORK_UNBOUND;
54a91f3b 1077 }
cccf0ee8
JA
1078
1079 io_req_work_grab_env(req, def);
54a91f3b 1080
94ae5e77 1081 *link = io_prep_linked_timeout(req);
561fb04a
JA
1082}
1083
a197f664 1084static inline void io_queue_async_work(struct io_kiocb *req)
561fb04a 1085{
a197f664 1086 struct io_ring_ctx *ctx = req->ctx;
94ae5e77 1087 struct io_kiocb *link;
94ae5e77 1088
8766dd51 1089 io_prep_async_work(req, &link);
561fb04a 1090
8766dd51
PB
1091 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1092 &req->work, req->flags);
1093 io_wq_enqueue(ctx->io_wq, &req->work);
94ae5e77
JA
1094
1095 if (link)
1096 io_queue_linked_timeout(link);
18d9be1a
JA
1097}
1098
5262f567
JA
1099static void io_kill_timeout(struct io_kiocb *req)
1100{
1101 int ret;
1102
2d28390a 1103 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
5262f567
JA
1104 if (ret != -1) {
1105 atomic_inc(&req->ctx->cq_timeouts);
842f9612 1106 list_del_init(&req->list);
f0e20b89 1107 req->flags |= REQ_F_COMP_LOCKED;
78e19bbe 1108 io_cqring_fill_event(req, 0);
ec9c02ad 1109 io_put_req(req);
5262f567
JA
1110 }
1111}
1112
1113static void io_kill_timeouts(struct io_ring_ctx *ctx)
1114{
1115 struct io_kiocb *req, *tmp;
1116
1117 spin_lock_irq(&ctx->completion_lock);
1118 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
1119 io_kill_timeout(req);
1120 spin_unlock_irq(&ctx->completion_lock);
1121}
1122
04518945
PB
1123static void __io_queue_deferred(struct io_ring_ctx *ctx)
1124{
1125 do {
1126 struct io_kiocb *req = list_first_entry(&ctx->defer_list,
1127 struct io_kiocb, list);
1128
1129 if (req_need_defer(req))
1130 break;
1131 list_del_init(&req->list);
1132 io_queue_async_work(req);
1133 } while (!list_empty(&ctx->defer_list));
1134}
1135
de0617e4
JA
1136static void io_commit_cqring(struct io_ring_ctx *ctx)
1137{
1138 struct io_kiocb *req;
1139
5262f567
JA
1140 while ((req = io_get_timeout_req(ctx)) != NULL)
1141 io_kill_timeout(req);
1142
de0617e4
JA
1143 __io_commit_cqring(ctx);
1144
04518945
PB
1145 if (unlikely(!list_empty(&ctx->defer_list)))
1146 __io_queue_deferred(ctx);
de0617e4
JA
1147}
1148
2b188cc1
JA
1149static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1150{
75b28aff 1151 struct io_rings *rings = ctx->rings;
2b188cc1
JA
1152 unsigned tail;
1153
1154 tail = ctx->cached_cq_tail;
115e12e5
SB
1155 /*
1156 * writes to the cq entry need to come after reading head; the
1157 * control dependency is enough as we're using WRITE_ONCE to
1158 * fill the cq entry
1159 */
75b28aff 1160 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
2b188cc1
JA
1161 return NULL;
1162
1163 ctx->cached_cq_tail++;
75b28aff 1164 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
1165}
1166
f2842ab5
JA
1167static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1168{
f0b493e6
JA
1169 if (!ctx->cq_ev_fd)
1170 return false;
7e55a19c
SG
1171 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1172 return false;
f2842ab5
JA
1173 if (!ctx->eventfd_async)
1174 return true;
b41e9852 1175 return io_wq_current_is_worker();
f2842ab5
JA
1176}
1177
b41e9852 1178static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5
JA
1179{
1180 if (waitqueue_active(&ctx->wait))
1181 wake_up(&ctx->wait);
1182 if (waitqueue_active(&ctx->sqo_wait))
1183 wake_up(&ctx->sqo_wait);
b41e9852 1184 if (io_should_trigger_evfd(ctx))
1d7bb1d5
JA
1185 eventfd_signal(ctx->cq_ev_fd, 1);
1186}
1187
c4a2ed72
JA
1188/* Returns true if there are no backlogged entries after the flush */
1189static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1d7bb1d5
JA
1190{
1191 struct io_rings *rings = ctx->rings;
1192 struct io_uring_cqe *cqe;
1193 struct io_kiocb *req;
1194 unsigned long flags;
1195 LIST_HEAD(list);
1196
1197 if (!force) {
1198 if (list_empty_careful(&ctx->cq_overflow_list))
c4a2ed72 1199 return true;
1d7bb1d5
JA
1200 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1201 rings->cq_ring_entries))
c4a2ed72 1202 return false;
1d7bb1d5
JA
1203 }
1204
1205 spin_lock_irqsave(&ctx->completion_lock, flags);
1206
1207 /* if force is set, the ring is going away. always drop after that */
1208 if (force)
69b3e546 1209 ctx->cq_overflow_flushed = 1;
1d7bb1d5 1210
c4a2ed72 1211 cqe = NULL;
1d7bb1d5
JA
1212 while (!list_empty(&ctx->cq_overflow_list)) {
1213 cqe = io_get_cqring(ctx);
1214 if (!cqe && !force)
1215 break;
1216
1217 req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
1218 list);
1219 list_move(&req->list, &list);
2ca10259 1220 req->flags &= ~REQ_F_OVERFLOW;
1d7bb1d5
JA
1221 if (cqe) {
1222 WRITE_ONCE(cqe->user_data, req->user_data);
1223 WRITE_ONCE(cqe->res, req->result);
bcda7baa 1224 WRITE_ONCE(cqe->flags, req->cflags);
1d7bb1d5
JA
1225 } else {
1226 WRITE_ONCE(ctx->rings->cq_overflow,
1227 atomic_inc_return(&ctx->cached_cq_overflow));
1228 }
1229 }
1230
1231 io_commit_cqring(ctx);
ad3eb2c8
JA
1232 if (cqe) {
1233 clear_bit(0, &ctx->sq_check_overflow);
1234 clear_bit(0, &ctx->cq_check_overflow);
1235 }
1d7bb1d5
JA
1236 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1237 io_cqring_ev_posted(ctx);
1238
1239 while (!list_empty(&list)) {
1240 req = list_first_entry(&list, struct io_kiocb, list);
1241 list_del(&req->list);
ec9c02ad 1242 io_put_req(req);
1d7bb1d5 1243 }
c4a2ed72
JA
1244
1245 return cqe != NULL;
1d7bb1d5
JA
1246}
1247
bcda7baa 1248static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1249{
78e19bbe 1250 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1251 struct io_uring_cqe *cqe;
1252
78e19bbe 1253 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 1254
2b188cc1
JA
1255 /*
1256 * If we can't get a cq entry, userspace overflowed the
1257 * submission (by quite a lot). Increment the overflow count in
1258 * the ring.
1259 */
1260 cqe = io_get_cqring(ctx);
1d7bb1d5 1261 if (likely(cqe)) {
78e19bbe 1262 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 1263 WRITE_ONCE(cqe->res, res);
bcda7baa 1264 WRITE_ONCE(cqe->flags, cflags);
1d7bb1d5 1265 } else if (ctx->cq_overflow_flushed) {
498ccd9e
JA
1266 WRITE_ONCE(ctx->rings->cq_overflow,
1267 atomic_inc_return(&ctx->cached_cq_overflow));
1d7bb1d5 1268 } else {
ad3eb2c8
JA
1269 if (list_empty(&ctx->cq_overflow_list)) {
1270 set_bit(0, &ctx->sq_check_overflow);
1271 set_bit(0, &ctx->cq_check_overflow);
1272 }
2ca10259 1273 req->flags |= REQ_F_OVERFLOW;
1d7bb1d5
JA
1274 refcount_inc(&req->refs);
1275 req->result = res;
bcda7baa 1276 req->cflags = cflags;
1d7bb1d5 1277 list_add_tail(&req->list, &ctx->cq_overflow_list);
2b188cc1
JA
1278 }
1279}
1280
bcda7baa
JA
1281static void io_cqring_fill_event(struct io_kiocb *req, long res)
1282{
1283 __io_cqring_fill_event(req, res, 0);
1284}
1285
1286static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1287{
78e19bbe 1288 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1289 unsigned long flags;
1290
1291 spin_lock_irqsave(&ctx->completion_lock, flags);
bcda7baa 1292 __io_cqring_fill_event(req, res, cflags);
2b188cc1
JA
1293 io_commit_cqring(ctx);
1294 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1295
8c838788 1296 io_cqring_ev_posted(ctx);
2b188cc1
JA
1297}
1298
bcda7baa
JA
1299static void io_cqring_add_event(struct io_kiocb *req, long res)
1300{
1301 __io_cqring_add_event(req, res, 0);
1302}
1303
0ddf92e8
JA
1304static inline bool io_is_fallback_req(struct io_kiocb *req)
1305{
1306 return req == (struct io_kiocb *)
1307 ((unsigned long) req->ctx->fallback_req & ~1UL);
1308}
1309
1310static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1311{
1312 struct io_kiocb *req;
1313
1314 req = ctx->fallback_req;
dd461af6 1315 if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
0ddf92e8
JA
1316 return req;
1317
1318 return NULL;
1319}
1320
0553b8bd
PB
1321static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
1322 struct io_submit_state *state)
2b188cc1 1323{
fd6fab2c 1324 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2b188cc1
JA
1325 struct io_kiocb *req;
1326
2579f913 1327 if (!state) {
fd6fab2c 1328 req = kmem_cache_alloc(req_cachep, gfp);
2579f913 1329 if (unlikely(!req))
0ddf92e8 1330 goto fallback;
2579f913
JA
1331 } else if (!state->free_reqs) {
1332 size_t sz;
1333 int ret;
1334
1335 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
1336 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1337
1338 /*
1339 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1340 * retry single alloc to be on the safe side.
1341 */
1342 if (unlikely(ret <= 0)) {
1343 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1344 if (!state->reqs[0])
0ddf92e8 1345 goto fallback;
fd6fab2c
JA
1346 ret = 1;
1347 }
2579f913 1348 state->free_reqs = ret - 1;
6c8a3134 1349 req = state->reqs[ret - 1];
2579f913 1350 } else {
2579f913 1351 state->free_reqs--;
6c8a3134 1352 req = state->reqs[state->free_reqs];
2b188cc1
JA
1353 }
1354
2579f913 1355 return req;
0ddf92e8 1356fallback:
0553b8bd 1357 return io_get_fallback_req(ctx);
2b188cc1
JA
1358}
1359
8da11c19
PB
1360static inline void io_put_file(struct io_kiocb *req, struct file *file,
1361 bool fixed)
1362{
1363 if (fixed)
05589553 1364 percpu_ref_put(req->fixed_file_refs);
8da11c19
PB
1365 else
1366 fput(file);
1367}
1368
c6ca97b3 1369static void __io_req_aux_free(struct io_kiocb *req)
2b188cc1 1370{
929a3af9
PB
1371 if (req->flags & REQ_F_NEED_CLEANUP)
1372 io_cleanup_req(req);
1373
96fd84d8 1374 kfree(req->io);
8da11c19
PB
1375 if (req->file)
1376 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
3537b6a7
JA
1377 if (req->task)
1378 put_task_struct(req->task);
cccf0ee8
JA
1379
1380 io_req_work_drop_env(req);
def596e9
JA
1381}
1382
9e645e11 1383static void __io_free_req(struct io_kiocb *req)
2b188cc1 1384{
c6ca97b3 1385 __io_req_aux_free(req);
fcb323cc 1386
fcb323cc 1387 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3 1388 struct io_ring_ctx *ctx = req->ctx;
fcb323cc
JA
1389 unsigned long flags;
1390
1391 spin_lock_irqsave(&ctx->inflight_lock, flags);
1392 list_del(&req->inflight_entry);
1393 if (waitqueue_active(&ctx->inflight_wait))
1394 wake_up(&ctx->inflight_wait);
1395 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1396 }
2b85edfc
PB
1397
1398 percpu_ref_put(&req->ctx->refs);
b1e50e54
PB
1399 if (likely(!io_is_fallback_req(req)))
1400 kmem_cache_free(req_cachep, req);
1401 else
dd461af6 1402 clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req);
e65ef56d
JA
1403}
1404
c6ca97b3
JA
1405struct req_batch {
1406 void *reqs[IO_IOPOLL_BATCH];
1407 int to_free;
1408 int need_iter;
1409};
1410
1411static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
1412{
1413 if (!rb->to_free)
1414 return;
1415 if (rb->need_iter) {
1416 int i, inflight = 0;
1417 unsigned long flags;
1418
1419 for (i = 0; i < rb->to_free; i++) {
1420 struct io_kiocb *req = rb->reqs[i];
1421
10fef4be 1422 if (req->flags & REQ_F_FIXED_FILE) {
c6ca97b3 1423 req->file = NULL;
05589553 1424 percpu_ref_put(req->fixed_file_refs);
10fef4be 1425 }
c6ca97b3
JA
1426 if (req->flags & REQ_F_INFLIGHT)
1427 inflight++;
c6ca97b3
JA
1428 __io_req_aux_free(req);
1429 }
1430 if (!inflight)
1431 goto do_free;
1432
1433 spin_lock_irqsave(&ctx->inflight_lock, flags);
1434 for (i = 0; i < rb->to_free; i++) {
1435 struct io_kiocb *req = rb->reqs[i];
1436
10fef4be 1437 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3
JA
1438 list_del(&req->inflight_entry);
1439 if (!--inflight)
1440 break;
1441 }
1442 }
1443 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1444
1445 if (waitqueue_active(&ctx->inflight_wait))
1446 wake_up(&ctx->inflight_wait);
1447 }
1448do_free:
1449 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
1450 percpu_ref_put_many(&ctx->refs, rb->to_free);
c6ca97b3 1451 rb->to_free = rb->need_iter = 0;
e65ef56d
JA
1452}
1453
a197f664 1454static bool io_link_cancel_timeout(struct io_kiocb *req)
2665abfd 1455{
a197f664 1456 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1457 int ret;
1458
2d28390a 1459 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
2665abfd 1460 if (ret != -1) {
78e19bbe 1461 io_cqring_fill_event(req, -ECANCELED);
2665abfd 1462 io_commit_cqring(ctx);
dea3b49c 1463 req->flags &= ~REQ_F_LINK_HEAD;
ec9c02ad 1464 io_put_req(req);
2665abfd
JA
1465 return true;
1466 }
1467
1468 return false;
e65ef56d
JA
1469}
1470
ba816ad6 1471static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
9e645e11 1472{
2665abfd 1473 struct io_ring_ctx *ctx = req->ctx;
2665abfd 1474 bool wake_ev = false;
9e645e11 1475
4d7dd462
JA
1476 /* Already got next link */
1477 if (req->flags & REQ_F_LINK_NEXT)
1478 return;
1479
9e645e11
JA
1480 /*
1481 * The list should never be empty when we are called here. But could
1482 * potentially happen if the chain is messed up, check to be on the
1483 * safe side.
1484 */
4493233e
PB
1485 while (!list_empty(&req->link_list)) {
1486 struct io_kiocb *nxt = list_first_entry(&req->link_list,
1487 struct io_kiocb, link_list);
94ae5e77 1488
4493233e
PB
1489 if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
1490 (nxt->flags & REQ_F_TIMEOUT))) {
1491 list_del_init(&nxt->link_list);
94ae5e77 1492 wake_ev |= io_link_cancel_timeout(nxt);
94ae5e77
JA
1493 req->flags &= ~REQ_F_LINK_TIMEOUT;
1494 continue;
1495 }
9e645e11 1496
4493233e
PB
1497 list_del_init(&req->link_list);
1498 if (!list_empty(&nxt->link_list))
dea3b49c 1499 nxt->flags |= REQ_F_LINK_HEAD;
b18fdf71 1500 *nxtptr = nxt;
94ae5e77 1501 break;
9e645e11 1502 }
2665abfd 1503
4d7dd462 1504 req->flags |= REQ_F_LINK_NEXT;
2665abfd
JA
1505 if (wake_ev)
1506 io_cqring_ev_posted(ctx);
9e645e11
JA
1507}
1508
1509/*
dea3b49c 1510 * Called if REQ_F_LINK_HEAD is set, and we fail the head request
9e645e11
JA
1511 */
1512static void io_fail_links(struct io_kiocb *req)
1513{
2665abfd 1514 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1515 unsigned long flags;
1516
1517 spin_lock_irqsave(&ctx->completion_lock, flags);
9e645e11
JA
1518
1519 while (!list_empty(&req->link_list)) {
4493233e
PB
1520 struct io_kiocb *link = list_first_entry(&req->link_list,
1521 struct io_kiocb, link_list);
9e645e11 1522
4493233e 1523 list_del_init(&link->link_list);
c826bd7a 1524 trace_io_uring_fail_link(req, link);
2665abfd
JA
1525
1526 if ((req->flags & REQ_F_LINK_TIMEOUT) &&
d625c6ee 1527 link->opcode == IORING_OP_LINK_TIMEOUT) {
a197f664 1528 io_link_cancel_timeout(link);
2665abfd 1529 } else {
78e19bbe 1530 io_cqring_fill_event(link, -ECANCELED);
978db57e 1531 __io_double_put_req(link);
2665abfd 1532 }
5d960724 1533 req->flags &= ~REQ_F_LINK_TIMEOUT;
9e645e11 1534 }
2665abfd
JA
1535
1536 io_commit_cqring(ctx);
1537 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1538 io_cqring_ev_posted(ctx);
9e645e11
JA
1539}
1540
4d7dd462 1541static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
9e645e11 1542{
dea3b49c 1543 if (likely(!(req->flags & REQ_F_LINK_HEAD)))
2665abfd 1544 return;
2665abfd 1545
9e645e11
JA
1546 /*
1547 * If LINK is set, we have dependent requests in this chain. If we
1548 * didn't fail this request, queue the first one up, moving any other
1549 * dependencies to the next request. In case of failure, fail the rest
1550 * of the chain.
1551 */
2665abfd
JA
1552 if (req->flags & REQ_F_FAIL_LINK) {
1553 io_fail_links(req);
7c9e7f0f
JA
1554 } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
1555 REQ_F_LINK_TIMEOUT) {
2665abfd
JA
1556 struct io_ring_ctx *ctx = req->ctx;
1557 unsigned long flags;
1558
1559 /*
1560 * If this is a timeout link, we could be racing with the
1561 * timeout timer. Grab the completion lock for this case to
7c9e7f0f 1562 * protect against that.
2665abfd
JA
1563 */
1564 spin_lock_irqsave(&ctx->completion_lock, flags);
1565 io_req_link_next(req, nxt);
1566 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1567 } else {
1568 io_req_link_next(req, nxt);
9e645e11 1569 }
4d7dd462 1570}
9e645e11 1571
c69f8dbe
JL
1572static void io_free_req(struct io_kiocb *req)
1573{
944e58bf
PB
1574 struct io_kiocb *nxt = NULL;
1575
1576 io_req_find_next(req, &nxt);
70cf9f32 1577 __io_free_req(req);
944e58bf
PB
1578
1579 if (nxt)
1580 io_queue_async_work(nxt);
c69f8dbe
JL
1581}
1582
7a743e22
PB
1583static void io_link_work_cb(struct io_wq_work **workptr)
1584{
18a542ff
PB
1585 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
1586 struct io_kiocb *link;
7a743e22 1587
18a542ff 1588 link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
7a743e22
PB
1589 io_queue_linked_timeout(link);
1590 io_wq_submit_work(workptr);
1591}
1592
1593static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
1594{
1595 struct io_kiocb *link;
8766dd51
PB
1596 const struct io_op_def *def = &io_op_defs[nxt->opcode];
1597
1598 if ((nxt->flags & REQ_F_ISREG) && def->hash_reg_file)
1599 io_wq_hash_work(&nxt->work, file_inode(nxt->file));
7a743e22
PB
1600
1601 *workptr = &nxt->work;
1602 link = io_prep_linked_timeout(nxt);
18a542ff 1603 if (link)
7a743e22 1604 nxt->work.func = io_link_work_cb;
7a743e22
PB
1605}
1606
ba816ad6
JA
1607/*
1608 * Drop reference to request, return next in chain (if there is one) if this
1609 * was the last reference to this request.
1610 */
f9bd67f6 1611__attribute__((nonnull))
ec9c02ad 1612static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
e65ef56d 1613{
2a44f467
JA
1614 if (refcount_dec_and_test(&req->refs)) {
1615 io_req_find_next(req, nxtptr);
4d7dd462 1616 __io_free_req(req);
2a44f467 1617 }
2b188cc1
JA
1618}
1619
e65ef56d
JA
1620static void io_put_req(struct io_kiocb *req)
1621{
1622 if (refcount_dec_and_test(&req->refs))
1623 io_free_req(req);
2b188cc1
JA
1624}
1625
e9fd9396
PB
1626static void io_steal_work(struct io_kiocb *req,
1627 struct io_wq_work **workptr)
7a743e22
PB
1628{
1629 /*
1630 * It's in an io-wq worker, so there always should be at least
1631 * one reference, which will be dropped in io_put_work() just
1632 * after the current handler returns.
1633 *
1634 * It also means, that if the counter dropped to 1, then there is
1635 * no asynchronous users left, so it's safe to steal the next work.
1636 */
7a743e22
PB
1637 if (refcount_read(&req->refs) == 1) {
1638 struct io_kiocb *nxt = NULL;
1639
1640 io_req_find_next(req, &nxt);
1641 if (nxt)
1642 io_wq_assign_next(workptr, nxt);
1643 }
1644}
1645
978db57e
JA
1646/*
1647 * Must only be used if we don't need to care about links, usually from
1648 * within the completion handling itself.
1649 */
1650static void __io_double_put_req(struct io_kiocb *req)
78e19bbe
JA
1651{
1652 /* drop both submit and complete references */
1653 if (refcount_sub_and_test(2, &req->refs))
1654 __io_free_req(req);
1655}
1656
978db57e
JA
1657static void io_double_put_req(struct io_kiocb *req)
1658{
1659 /* drop both submit and complete references */
1660 if (refcount_sub_and_test(2, &req->refs))
1661 io_free_req(req);
1662}
1663
1d7bb1d5 1664static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
a3a0e43f 1665{
84f97dc2
JA
1666 struct io_rings *rings = ctx->rings;
1667
ad3eb2c8
JA
1668 if (test_bit(0, &ctx->cq_check_overflow)) {
1669 /*
1670 * noflush == true is from the waitqueue handler, just ensure
1671 * we wake up the task, and the next invocation will flush the
1672 * entries. We cannot safely to it from here.
1673 */
1674 if (noflush && !list_empty(&ctx->cq_overflow_list))
1675 return -1U;
1d7bb1d5 1676
ad3eb2c8
JA
1677 io_cqring_overflow_flush(ctx, false);
1678 }
1d7bb1d5 1679
a3a0e43f
JA
1680 /* See comment at the top of this file */
1681 smp_rmb();
ad3eb2c8 1682 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
a3a0e43f
JA
1683}
1684
fb5ccc98
PB
1685static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1686{
1687 struct io_rings *rings = ctx->rings;
1688
1689 /* make sure SQ entry isn't read before tail */
1690 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1691}
1692
8237e045 1693static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
e94f141b 1694{
dea3b49c 1695 if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req))
c6ca97b3 1696 return false;
e94f141b 1697
c6ca97b3
JA
1698 if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
1699 rb->need_iter++;
1700
1701 rb->reqs[rb->to_free++] = req;
1702 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
1703 io_free_req_many(req->ctx, rb);
1704 return true;
e94f141b
JA
1705}
1706
bcda7baa
JA
1707static int io_put_kbuf(struct io_kiocb *req)
1708{
4d954c25 1709 struct io_buffer *kbuf;
bcda7baa
JA
1710 int cflags;
1711
4d954c25 1712 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
bcda7baa
JA
1713 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
1714 cflags |= IORING_CQE_F_BUFFER;
1715 req->rw.addr = 0;
1716 kfree(kbuf);
1717 return cflags;
1718}
1719
def596e9
JA
1720/*
1721 * Find and free completed poll iocbs
1722 */
1723static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1724 struct list_head *done)
1725{
8237e045 1726 struct req_batch rb;
def596e9 1727 struct io_kiocb *req;
def596e9 1728
c6ca97b3 1729 rb.to_free = rb.need_iter = 0;
def596e9 1730 while (!list_empty(done)) {
bcda7baa
JA
1731 int cflags = 0;
1732
def596e9
JA
1733 req = list_first_entry(done, struct io_kiocb, list);
1734 list_del(&req->list);
1735
bcda7baa
JA
1736 if (req->flags & REQ_F_BUFFER_SELECTED)
1737 cflags = io_put_kbuf(req);
1738
1739 __io_cqring_fill_event(req, req->result, cflags);
def596e9
JA
1740 (*nr_events)++;
1741
8237e045
JA
1742 if (refcount_dec_and_test(&req->refs) &&
1743 !io_req_multi_free(&rb, req))
1744 io_free_req(req);
def596e9 1745 }
def596e9 1746
09bb8394 1747 io_commit_cqring(ctx);
32b2244a
XW
1748 if (ctx->flags & IORING_SETUP_SQPOLL)
1749 io_cqring_ev_posted(ctx);
8237e045 1750 io_free_req_many(ctx, &rb);
def596e9
JA
1751}
1752
581f9810
BM
1753static void io_iopoll_queue(struct list_head *again)
1754{
1755 struct io_kiocb *req;
1756
1757 do {
1758 req = list_first_entry(again, struct io_kiocb, list);
1759 list_del(&req->list);
1760 refcount_inc(&req->refs);
1761 io_queue_async_work(req);
1762 } while (!list_empty(again));
1763}
1764
def596e9
JA
1765static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
1766 long min)
1767{
1768 struct io_kiocb *req, *tmp;
1769 LIST_HEAD(done);
581f9810 1770 LIST_HEAD(again);
def596e9
JA
1771 bool spin;
1772 int ret;
1773
1774 /*
1775 * Only spin for completions if we don't have multiple devices hanging
1776 * off our complete list, and we're under the requested amount.
1777 */
1778 spin = !ctx->poll_multi_file && *nr_events < min;
1779
1780 ret = 0;
1781 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
9adbd45d 1782 struct kiocb *kiocb = &req->rw.kiocb;
def596e9
JA
1783
1784 /*
581f9810
BM
1785 * Move completed and retryable entries to our local lists.
1786 * If we find a request that requires polling, break out
1787 * and complete those lists first, if we have entries there.
def596e9
JA
1788 */
1789 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
1790 list_move_tail(&req->list, &done);
1791 continue;
1792 }
1793 if (!list_empty(&done))
1794 break;
1795
581f9810
BM
1796 if (req->result == -EAGAIN) {
1797 list_move_tail(&req->list, &again);
1798 continue;
1799 }
1800 if (!list_empty(&again))
1801 break;
1802
def596e9
JA
1803 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
1804 if (ret < 0)
1805 break;
1806
1807 if (ret && spin)
1808 spin = false;
1809 ret = 0;
1810 }
1811
1812 if (!list_empty(&done))
1813 io_iopoll_complete(ctx, nr_events, &done);
1814
581f9810
BM
1815 if (!list_empty(&again))
1816 io_iopoll_queue(&again);
1817
def596e9
JA
1818 return ret;
1819}
1820
1821/*
d195a66e 1822 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
def596e9
JA
1823 * non-spinning poll check - we'll still enter the driver poll loop, but only
1824 * as a non-spinning completion check.
1825 */
1826static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
1827 long min)
1828{
08f5439f 1829 while (!list_empty(&ctx->poll_list) && !need_resched()) {
def596e9
JA
1830 int ret;
1831
1832 ret = io_do_iopoll(ctx, nr_events, min);
1833 if (ret < 0)
1834 return ret;
1835 if (!min || *nr_events >= min)
1836 return 0;
1837 }
1838
1839 return 1;
1840}
1841
1842/*
1843 * We can't just wait for polled events to come to us, we have to actively
1844 * find and complete them.
1845 */
1846static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1847{
1848 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1849 return;
1850
1851 mutex_lock(&ctx->uring_lock);
1852 while (!list_empty(&ctx->poll_list)) {
1853 unsigned int nr_events = 0;
1854
1855 io_iopoll_getevents(ctx, &nr_events, 1);
08f5439f
JA
1856
1857 /*
1858 * Ensure we allow local-to-the-cpu processing to take place,
1859 * in this case we need to ensure that we reap all events.
1860 */
1861 cond_resched();
def596e9
JA
1862 }
1863 mutex_unlock(&ctx->uring_lock);
1864}
1865
c7849be9
XW
1866static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1867 long min)
def596e9 1868{
2b2ed975 1869 int iters = 0, ret = 0;
500f9fba 1870
c7849be9
XW
1871 /*
1872 * We disallow the app entering submit/complete with polling, but we
1873 * still need to lock the ring to prevent racing with polled issue
1874 * that got punted to a workqueue.
1875 */
1876 mutex_lock(&ctx->uring_lock);
def596e9
JA
1877 do {
1878 int tmin = 0;
1879
a3a0e43f
JA
1880 /*
1881 * Don't enter poll loop if we already have events pending.
1882 * If we do, we can potentially be spinning for commands that
1883 * already triggered a CQE (eg in error).
1884 */
1d7bb1d5 1885 if (io_cqring_events(ctx, false))
a3a0e43f
JA
1886 break;
1887
500f9fba
JA
1888 /*
1889 * If a submit got punted to a workqueue, we can have the
1890 * application entering polling for a command before it gets
1891 * issued. That app will hold the uring_lock for the duration
1892 * of the poll right here, so we need to take a breather every
1893 * now and then to ensure that the issue has a chance to add
1894 * the poll to the issued list. Otherwise we can spin here
1895 * forever, while the workqueue is stuck trying to acquire the
1896 * very same mutex.
1897 */
1898 if (!(++iters & 7)) {
1899 mutex_unlock(&ctx->uring_lock);
1900 mutex_lock(&ctx->uring_lock);
1901 }
1902
def596e9
JA
1903 if (*nr_events < min)
1904 tmin = min - *nr_events;
1905
1906 ret = io_iopoll_getevents(ctx, nr_events, tmin);
1907 if (ret <= 0)
1908 break;
1909 ret = 0;
1910 } while (min && !*nr_events && !need_resched());
1911
500f9fba 1912 mutex_unlock(&ctx->uring_lock);
def596e9
JA
1913 return ret;
1914}
1915
491381ce 1916static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 1917{
491381ce
JA
1918 /*
1919 * Tell lockdep we inherited freeze protection from submission
1920 * thread.
1921 */
1922 if (req->flags & REQ_F_ISREG) {
1923 struct inode *inode = file_inode(req->file);
2b188cc1 1924
491381ce 1925 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 1926 }
491381ce 1927 file_end_write(req->file);
2b188cc1
JA
1928}
1929
4e88d6e7
JA
1930static inline void req_set_fail_links(struct io_kiocb *req)
1931{
1932 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1933 req->flags |= REQ_F_FAIL_LINK;
1934}
1935
ba816ad6 1936static void io_complete_rw_common(struct kiocb *kiocb, long res)
2b188cc1 1937{
9adbd45d 1938 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
bcda7baa 1939 int cflags = 0;
2b188cc1 1940
491381ce
JA
1941 if (kiocb->ki_flags & IOCB_WRITE)
1942 kiocb_end_write(req);
2b188cc1 1943
4e88d6e7
JA
1944 if (res != req->result)
1945 req_set_fail_links(req);
bcda7baa
JA
1946 if (req->flags & REQ_F_BUFFER_SELECTED)
1947 cflags = io_put_kbuf(req);
1948 __io_cqring_add_event(req, res, cflags);
ba816ad6
JA
1949}
1950
1951static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1952{
9adbd45d 1953 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6
JA
1954
1955 io_complete_rw_common(kiocb, res);
e65ef56d 1956 io_put_req(req);
2b188cc1
JA
1957}
1958
def596e9
JA
1959static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1960{
9adbd45d 1961 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 1962
491381ce
JA
1963 if (kiocb->ki_flags & IOCB_WRITE)
1964 kiocb_end_write(req);
def596e9 1965
4e88d6e7
JA
1966 if (res != req->result)
1967 req_set_fail_links(req);
9e645e11 1968 req->result = res;
def596e9
JA
1969 if (res != -EAGAIN)
1970 req->flags |= REQ_F_IOPOLL_COMPLETED;
1971}
1972
1973/*
1974 * After the iocb has been issued, it's safe to be found on the poll list.
1975 * Adding the kiocb to the list AFTER submission ensures that we don't
1976 * find it from a io_iopoll_getevents() thread before the issuer is done
1977 * accessing the kiocb cookie.
1978 */
1979static void io_iopoll_req_issued(struct io_kiocb *req)
1980{
1981 struct io_ring_ctx *ctx = req->ctx;
1982
1983 /*
1984 * Track whether we have multiple files in our lists. This will impact
1985 * how we do polling eventually, not spinning if we're on potentially
1986 * different devices.
1987 */
1988 if (list_empty(&ctx->poll_list)) {
1989 ctx->poll_multi_file = false;
1990 } else if (!ctx->poll_multi_file) {
1991 struct io_kiocb *list_req;
1992
1993 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1994 list);
9adbd45d 1995 if (list_req->file != req->file)
def596e9
JA
1996 ctx->poll_multi_file = true;
1997 }
1998
1999 /*
2000 * For fast devices, IO may have already completed. If it has, add
2001 * it to the front so we find it first.
2002 */
2003 if (req->flags & REQ_F_IOPOLL_COMPLETED)
2004 list_add(&req->list, &ctx->poll_list);
2005 else
2006 list_add_tail(&req->list, &ctx->poll_list);
bdcd3eab
XW
2007
2008 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2009 wq_has_sleeper(&ctx->sqo_wait))
2010 wake_up(&ctx->sqo_wait);
def596e9
JA
2011}
2012
9f13c35b 2013static void __io_state_file_put(struct io_submit_state *state)
9a56a232 2014{
9f13c35b 2015 int diff = state->has_refs - state->used_refs;
9a56a232 2016
9f13c35b
PB
2017 if (diff)
2018 fput_many(state->file, diff);
2019 state->file = NULL;
2020}
2021
2022static inline void io_state_file_put(struct io_submit_state *state)
2023{
2024 if (state->file)
2025 __io_state_file_put(state);
9a56a232
JA
2026}
2027
2028/*
2029 * Get as many references to a file as we have IOs left in this submission,
2030 * assuming most submissions are for one file, or at least that each file
2031 * has more than one submission.
2032 */
8da11c19 2033static struct file *__io_file_get(struct io_submit_state *state, int fd)
9a56a232
JA
2034{
2035 if (!state)
2036 return fget(fd);
2037
2038 if (state->file) {
2039 if (state->fd == fd) {
2040 state->used_refs++;
2041 state->ios_left--;
2042 return state->file;
2043 }
9f13c35b 2044 __io_state_file_put(state);
9a56a232
JA
2045 }
2046 state->file = fget_many(fd, state->ios_left);
2047 if (!state->file)
2048 return NULL;
2049
2050 state->fd = fd;
2051 state->has_refs = state->ios_left;
2052 state->used_refs = 1;
2053 state->ios_left--;
2054 return state->file;
2055}
2056
2b188cc1
JA
2057/*
2058 * If we tracked the file through the SCM inflight mechanism, we could support
2059 * any file. For now, just ensure that anything potentially problematic is done
2060 * inline.
2061 */
af197f50 2062static bool io_file_supports_async(struct file *file, int rw)
2b188cc1
JA
2063{
2064 umode_t mode = file_inode(file)->i_mode;
2065
10d59345 2066 if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode))
2b188cc1
JA
2067 return true;
2068 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
2069 return true;
2070
af197f50
JA
2071 if (!(file->f_mode & FMODE_NOWAIT))
2072 return false;
2073
2074 if (rw == READ)
2075 return file->f_op->read_iter != NULL;
2076
2077 return file->f_op->write_iter != NULL;
2b188cc1
JA
2078}
2079
3529d8c2
JA
2080static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2081 bool force_nonblock)
2b188cc1 2082{
def596e9 2083 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 2084 struct kiocb *kiocb = &req->rw.kiocb;
09bb8394
JA
2085 unsigned ioprio;
2086 int ret;
2b188cc1 2087
491381ce
JA
2088 if (S_ISREG(file_inode(req->file)->i_mode))
2089 req->flags |= REQ_F_ISREG;
2090
2b188cc1 2091 kiocb->ki_pos = READ_ONCE(sqe->off);
ba04291e
JA
2092 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2093 req->flags |= REQ_F_CUR_POS;
2094 kiocb->ki_pos = req->file->f_pos;
2095 }
2b188cc1 2096 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
2097 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2098 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2099 if (unlikely(ret))
2100 return ret;
2b188cc1
JA
2101
2102 ioprio = READ_ONCE(sqe->ioprio);
2103 if (ioprio) {
2104 ret = ioprio_check_cap(ioprio);
2105 if (ret)
09bb8394 2106 return ret;
2b188cc1
JA
2107
2108 kiocb->ki_ioprio = ioprio;
2109 } else
2110 kiocb->ki_ioprio = get_current_ioprio();
2111
8449eeda 2112 /* don't allow async punt if RWF_NOWAIT was requested */
491381ce
JA
2113 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
2114 (req->file->f_flags & O_NONBLOCK))
8449eeda
SB
2115 req->flags |= REQ_F_NOWAIT;
2116
2117 if (force_nonblock)
2b188cc1 2118 kiocb->ki_flags |= IOCB_NOWAIT;
8449eeda 2119
def596e9 2120 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
2121 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2122 !kiocb->ki_filp->f_op->iopoll)
09bb8394 2123 return -EOPNOTSUPP;
2b188cc1 2124
def596e9
JA
2125 kiocb->ki_flags |= IOCB_HIPRI;
2126 kiocb->ki_complete = io_complete_rw_iopoll;
6873e0bd 2127 req->result = 0;
def596e9 2128 } else {
09bb8394
JA
2129 if (kiocb->ki_flags & IOCB_HIPRI)
2130 return -EINVAL;
def596e9
JA
2131 kiocb->ki_complete = io_complete_rw;
2132 }
9adbd45d 2133
3529d8c2
JA
2134 req->rw.addr = READ_ONCE(sqe->addr);
2135 req->rw.len = READ_ONCE(sqe->len);
bcda7baa 2136 /* we own ->private, reuse it for the buffer index / buffer ID */
9adbd45d 2137 req->rw.kiocb.private = (void *) (unsigned long)
3529d8c2 2138 READ_ONCE(sqe->buf_index);
2b188cc1 2139 return 0;
2b188cc1
JA
2140}
2141
2142static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2143{
2144 switch (ret) {
2145 case -EIOCBQUEUED:
2146 break;
2147 case -ERESTARTSYS:
2148 case -ERESTARTNOINTR:
2149 case -ERESTARTNOHAND:
2150 case -ERESTART_RESTARTBLOCK:
2151 /*
2152 * We can't just restart the syscall, since previously
2153 * submitted sqes may already be in progress. Just fail this
2154 * IO with EINTR.
2155 */
2156 ret = -EINTR;
2157 /* fall through */
2158 default:
2159 kiocb->ki_complete(kiocb, ret, 0);
2160 }
2161}
2162
014db007 2163static void kiocb_done(struct kiocb *kiocb, ssize_t ret)
ba816ad6 2164{
ba04291e
JA
2165 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2166
2167 if (req->flags & REQ_F_CUR_POS)
2168 req->file->f_pos = kiocb->ki_pos;
bcaec089 2169 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
014db007 2170 io_complete_rw(kiocb, ret, 0);
ba816ad6
JA
2171 else
2172 io_rw_done(kiocb, ret);
2173}
2174
9adbd45d 2175static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
7d009165 2176 struct iov_iter *iter)
edafccee 2177{
9adbd45d
JA
2178 struct io_ring_ctx *ctx = req->ctx;
2179 size_t len = req->rw.len;
edafccee
JA
2180 struct io_mapped_ubuf *imu;
2181 unsigned index, buf_index;
2182 size_t offset;
2183 u64 buf_addr;
2184
2185 /* attempt to use fixed buffers without having provided iovecs */
2186 if (unlikely(!ctx->user_bufs))
2187 return -EFAULT;
2188
9adbd45d 2189 buf_index = (unsigned long) req->rw.kiocb.private;
edafccee
JA
2190 if (unlikely(buf_index >= ctx->nr_user_bufs))
2191 return -EFAULT;
2192
2193 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2194 imu = &ctx->user_bufs[index];
9adbd45d 2195 buf_addr = req->rw.addr;
edafccee
JA
2196
2197 /* overflow */
2198 if (buf_addr + len < buf_addr)
2199 return -EFAULT;
2200 /* not inside the mapped region */
2201 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2202 return -EFAULT;
2203
2204 /*
2205 * May not be a start of buffer, set size appropriately
2206 * and advance us to the beginning.
2207 */
2208 offset = buf_addr - imu->ubuf;
2209 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
2210
2211 if (offset) {
2212 /*
2213 * Don't use iov_iter_advance() here, as it's really slow for
2214 * using the latter parts of a big fixed buffer - it iterates
2215 * over each segment manually. We can cheat a bit here, because
2216 * we know that:
2217 *
2218 * 1) it's a BVEC iter, we set it up
2219 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2220 * first and last bvec
2221 *
2222 * So just find our index, and adjust the iterator afterwards.
2223 * If the offset is within the first bvec (or the whole first
2224 * bvec, just use iov_iter_advance(). This makes it easier
2225 * since we can just skip the first segment, which may not
2226 * be PAGE_SIZE aligned.
2227 */
2228 const struct bio_vec *bvec = imu->bvec;
2229
2230 if (offset <= bvec->bv_len) {
2231 iov_iter_advance(iter, offset);
2232 } else {
2233 unsigned long seg_skip;
2234
2235 /* skip first vec */
2236 offset -= bvec->bv_len;
2237 seg_skip = 1 + (offset >> PAGE_SHIFT);
2238
2239 iter->bvec = bvec + seg_skip;
2240 iter->nr_segs -= seg_skip;
99c79f66 2241 iter->count -= bvec->bv_len + offset;
bd11b3a3 2242 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
2243 }
2244 }
2245
5e559561 2246 return len;
edafccee
JA
2247}
2248
bcda7baa
JA
2249static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2250{
2251 if (needs_lock)
2252 mutex_unlock(&ctx->uring_lock);
2253}
2254
2255static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2256{
2257 /*
2258 * "Normal" inline submissions always hold the uring_lock, since we
2259 * grab it from the system call. Same is true for the SQPOLL offload.
2260 * The only exception is when we've detached the request and issue it
2261 * from an async worker thread, grab the lock for that case.
2262 */
2263 if (needs_lock)
2264 mutex_lock(&ctx->uring_lock);
2265}
2266
2267static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2268 int bgid, struct io_buffer *kbuf,
2269 bool needs_lock)
2270{
2271 struct io_buffer *head;
2272
2273 if (req->flags & REQ_F_BUFFER_SELECTED)
2274 return kbuf;
2275
2276 io_ring_submit_lock(req->ctx, needs_lock);
2277
2278 lockdep_assert_held(&req->ctx->uring_lock);
2279
2280 head = idr_find(&req->ctx->io_buffer_idr, bgid);
2281 if (head) {
2282 if (!list_empty(&head->list)) {
2283 kbuf = list_last_entry(&head->list, struct io_buffer,
2284 list);
2285 list_del(&kbuf->list);
2286 } else {
2287 kbuf = head;
2288 idr_remove(&req->ctx->io_buffer_idr, bgid);
2289 }
2290 if (*len > kbuf->len)
2291 *len = kbuf->len;
2292 } else {
2293 kbuf = ERR_PTR(-ENOBUFS);
2294 }
2295
2296 io_ring_submit_unlock(req->ctx, needs_lock);
2297
2298 return kbuf;
2299}
2300
4d954c25
JA
2301static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2302 bool needs_lock)
2303{
2304 struct io_buffer *kbuf;
2305 int bgid;
2306
2307 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2308 bgid = (int) (unsigned long) req->rw.kiocb.private;
2309 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2310 if (IS_ERR(kbuf))
2311 return kbuf;
2312 req->rw.addr = (u64) (unsigned long) kbuf;
2313 req->flags |= REQ_F_BUFFER_SELECTED;
2314 return u64_to_user_ptr(kbuf->addr);
2315}
2316
2317#ifdef CONFIG_COMPAT
2318static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2319 bool needs_lock)
2320{
2321 struct compat_iovec __user *uiov;
2322 compat_ssize_t clen;
2323 void __user *buf;
2324 ssize_t len;
2325
2326 uiov = u64_to_user_ptr(req->rw.addr);
2327 if (!access_ok(uiov, sizeof(*uiov)))
2328 return -EFAULT;
2329 if (__get_user(clen, &uiov->iov_len))
2330 return -EFAULT;
2331 if (clen < 0)
2332 return -EINVAL;
2333
2334 len = clen;
2335 buf = io_rw_buffer_select(req, &len, needs_lock);
2336 if (IS_ERR(buf))
2337 return PTR_ERR(buf);
2338 iov[0].iov_base = buf;
2339 iov[0].iov_len = (compat_size_t) len;
2340 return 0;
2341}
2342#endif
2343
2344static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2345 bool needs_lock)
2346{
2347 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2348 void __user *buf;
2349 ssize_t len;
2350
2351 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2352 return -EFAULT;
2353
2354 len = iov[0].iov_len;
2355 if (len < 0)
2356 return -EINVAL;
2357 buf = io_rw_buffer_select(req, &len, needs_lock);
2358 if (IS_ERR(buf))
2359 return PTR_ERR(buf);
2360 iov[0].iov_base = buf;
2361 iov[0].iov_len = len;
2362 return 0;
2363}
2364
2365static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2366 bool needs_lock)
2367{
2368 if (req->flags & REQ_F_BUFFER_SELECTED)
2369 return 0;
2370 if (!req->rw.len)
2371 return 0;
2372 else if (req->rw.len > 1)
2373 return -EINVAL;
2374
2375#ifdef CONFIG_COMPAT
2376 if (req->ctx->compat)
2377 return io_compat_import(req, iov, needs_lock);
2378#endif
2379
2380 return __io_iov_buffer_select(req, iov, needs_lock);
2381}
2382
cf6fd4bd 2383static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
bcda7baa
JA
2384 struct iovec **iovec, struct iov_iter *iter,
2385 bool needs_lock)
2b188cc1 2386{
9adbd45d
JA
2387 void __user *buf = u64_to_user_ptr(req->rw.addr);
2388 size_t sqe_len = req->rw.len;
4d954c25 2389 ssize_t ret;
edafccee
JA
2390 u8 opcode;
2391
d625c6ee 2392 opcode = req->opcode;
7d009165 2393 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 2394 *iovec = NULL;
9adbd45d 2395 return io_import_fixed(req, rw, iter);
edafccee 2396 }
2b188cc1 2397
bcda7baa
JA
2398 /* buffer index only valid with fixed read/write, or buffer select */
2399 if (req->rw.kiocb.private && !(req->flags & REQ_F_BUFFER_SELECT))
9adbd45d
JA
2400 return -EINVAL;
2401
3a6820f2 2402 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 2403 if (req->flags & REQ_F_BUFFER_SELECT) {
4d954c25
JA
2404 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
2405 if (IS_ERR(buf)) {
bcda7baa 2406 *iovec = NULL;
4d954c25 2407 return PTR_ERR(buf);
bcda7baa 2408 }
3f9d6441 2409 req->rw.len = sqe_len;
bcda7baa
JA
2410 }
2411
3a6820f2
JA
2412 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2413 *iovec = NULL;
3a901598 2414 return ret < 0 ? ret : sqe_len;
3a6820f2
JA
2415 }
2416
f67676d1
JA
2417 if (req->io) {
2418 struct io_async_rw *iorw = &req->io->rw;
2419
2420 *iovec = iorw->iov;
2421 iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size);
2422 if (iorw->iov == iorw->fast_iov)
2423 *iovec = NULL;
2424 return iorw->size;
2425 }
2426
4d954c25
JA
2427 if (req->flags & REQ_F_BUFFER_SELECT) {
2428 ret = io_iov_buffer_select(req, *iovec, needs_lock);
3f9d6441
JA
2429 if (!ret) {
2430 ret = (*iovec)->iov_len;
2431 iov_iter_init(iter, rw, *iovec, 1, ret);
2432 }
4d954c25
JA
2433 *iovec = NULL;
2434 return ret;
2435 }
2436
2b188cc1 2437#ifdef CONFIG_COMPAT
cf6fd4bd 2438 if (req->ctx->compat)
2b188cc1
JA
2439 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
2440 iovec, iter);
2441#endif
2442
2443 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
2444}
2445
31b51510 2446/*
32960613
JA
2447 * For files that don't have ->read_iter() and ->write_iter(), handle them
2448 * by looping over ->read() or ->write() manually.
31b51510 2449 */
32960613
JA
2450static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2451 struct iov_iter *iter)
2452{
2453 ssize_t ret = 0;
2454
2455 /*
2456 * Don't support polled IO through this interface, and we can't
2457 * support non-blocking either. For the latter, this just causes
2458 * the kiocb to be handled from an async context.
2459 */
2460 if (kiocb->ki_flags & IOCB_HIPRI)
2461 return -EOPNOTSUPP;
2462 if (kiocb->ki_flags & IOCB_NOWAIT)
2463 return -EAGAIN;
2464
2465 while (iov_iter_count(iter)) {
311ae9e1 2466 struct iovec iovec;
32960613
JA
2467 ssize_t nr;
2468
311ae9e1
PB
2469 if (!iov_iter_is_bvec(iter)) {
2470 iovec = iov_iter_iovec(iter);
2471 } else {
2472 /* fixed buffers import bvec */
2473 iovec.iov_base = kmap(iter->bvec->bv_page)
2474 + iter->iov_offset;
2475 iovec.iov_len = min(iter->count,
2476 iter->bvec->bv_len - iter->iov_offset);
2477 }
2478
32960613
JA
2479 if (rw == READ) {
2480 nr = file->f_op->read(file, iovec.iov_base,
2481 iovec.iov_len, &kiocb->ki_pos);
2482 } else {
2483 nr = file->f_op->write(file, iovec.iov_base,
2484 iovec.iov_len, &kiocb->ki_pos);
2485 }
2486
311ae9e1
PB
2487 if (iov_iter_is_bvec(iter))
2488 kunmap(iter->bvec->bv_page);
2489
32960613
JA
2490 if (nr < 0) {
2491 if (!ret)
2492 ret = nr;
2493 break;
2494 }
2495 ret += nr;
2496 if (nr != iovec.iov_len)
2497 break;
2498 iov_iter_advance(iter, nr);
2499 }
2500
2501 return ret;
2502}
2503
b7bb4f7d 2504static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
f67676d1
JA
2505 struct iovec *iovec, struct iovec *fast_iov,
2506 struct iov_iter *iter)
2507{
2508 req->io->rw.nr_segs = iter->nr_segs;
2509 req->io->rw.size = io_size;
2510 req->io->rw.iov = iovec;
2511 if (!req->io->rw.iov) {
2512 req->io->rw.iov = req->io->rw.fast_iov;
45097dae
XW
2513 if (req->io->rw.iov != fast_iov)
2514 memcpy(req->io->rw.iov, fast_iov,
2515 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
2516 } else {
2517 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
2518 }
2519}
2520
3d9932a8
XW
2521static inline int __io_alloc_async_ctx(struct io_kiocb *req)
2522{
2523 req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
2524 return req->io == NULL;
2525}
2526
b7bb4f7d 2527static int io_alloc_async_ctx(struct io_kiocb *req)
f67676d1 2528{
d3656344
JA
2529 if (!io_op_defs[req->opcode].async_ctx)
2530 return 0;
3d9932a8
XW
2531
2532 return __io_alloc_async_ctx(req);
b7bb4f7d
JA
2533}
2534
b7bb4f7d
JA
2535static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2536 struct iovec *iovec, struct iovec *fast_iov,
2537 struct iov_iter *iter)
2538{
980ad263 2539 if (!io_op_defs[req->opcode].async_ctx)
74566df3 2540 return 0;
5d204bcf 2541 if (!req->io) {
3d9932a8 2542 if (__io_alloc_async_ctx(req))
5d204bcf 2543 return -ENOMEM;
b7bb4f7d 2544
5d204bcf
JA
2545 io_req_map_rw(req, io_size, iovec, fast_iov, iter);
2546 }
b7bb4f7d 2547 return 0;
f67676d1
JA
2548}
2549
3529d8c2
JA
2550static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2551 bool force_nonblock)
f67676d1 2552{
3529d8c2
JA
2553 struct io_async_ctx *io;
2554 struct iov_iter iter;
f67676d1
JA
2555 ssize_t ret;
2556
3529d8c2
JA
2557 ret = io_prep_rw(req, sqe, force_nonblock);
2558 if (ret)
2559 return ret;
f67676d1 2560
3529d8c2
JA
2561 if (unlikely(!(req->file->f_mode & FMODE_READ)))
2562 return -EBADF;
f67676d1 2563
5f798bea
PB
2564 /* either don't need iovec imported or already have it */
2565 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2
JA
2566 return 0;
2567
2568 io = req->io;
2569 io->rw.iov = io->rw.fast_iov;
2570 req->io = NULL;
bcda7baa 2571 ret = io_import_iovec(READ, req, &io->rw.iov, &iter, !force_nonblock);
3529d8c2
JA
2572 req->io = io;
2573 if (ret < 0)
2574 return ret;
2575
2576 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2577 return 0;
f67676d1
JA
2578}
2579
014db007 2580static int io_read(struct io_kiocb *req, bool force_nonblock)
2b188cc1
JA
2581{
2582 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 2583 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 2584 struct iov_iter iter;
31b51510 2585 size_t iov_count;
f67676d1 2586 ssize_t io_size, ret;
2b188cc1 2587
bcda7baa 2588 ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
06b76d44
JA
2589 if (ret < 0)
2590 return ret;
2b188cc1 2591
fd6c2e4c
JA
2592 /* Ensure we clear previously set non-block flag */
2593 if (!force_nonblock)
29de5f6a 2594 kiocb->ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 2595
797f3f53 2596 req->result = 0;
f67676d1 2597 io_size = ret;
dea3b49c 2598 if (req->flags & REQ_F_LINK_HEAD)
f67676d1
JA
2599 req->result = io_size;
2600
2601 /*
2602 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2603 * we know to async punt it even if it was opened O_NONBLOCK
2604 */
af197f50 2605 if (force_nonblock && !io_file_supports_async(req->file, READ))
f67676d1 2606 goto copy_iov;
9e645e11 2607
31b51510 2608 iov_count = iov_iter_count(&iter);
9adbd45d 2609 ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
2b188cc1
JA
2610 if (!ret) {
2611 ssize_t ret2;
2612
9adbd45d
JA
2613 if (req->file->f_op->read_iter)
2614 ret2 = call_read_iter(req->file, kiocb, &iter);
32960613 2615 else
9adbd45d 2616 ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
32960613 2617
9d93a3f5 2618 /* Catch -EAGAIN return for forced non-blocking submission */
f67676d1 2619 if (!force_nonblock || ret2 != -EAGAIN) {
014db007 2620 kiocb_done(kiocb, ret2);
f67676d1
JA
2621 } else {
2622copy_iov:
b7bb4f7d 2623 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
2624 inline_vecs, &iter);
2625 if (ret)
2626 goto out_free;
29de5f6a 2627 /* any defer here is final, must blocking retry */
490e8967
JA
2628 if (!(req->flags & REQ_F_NOWAIT) &&
2629 !file_can_poll(req->file))
29de5f6a 2630 req->flags |= REQ_F_MUST_PUNT;
f67676d1
JA
2631 return -EAGAIN;
2632 }
2b188cc1 2633 }
f67676d1 2634out_free:
1e95081c 2635 kfree(iovec);
99bc4c38 2636 req->flags &= ~REQ_F_NEED_CLEANUP;
2b188cc1
JA
2637 return ret;
2638}
2639
3529d8c2
JA
2640static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2641 bool force_nonblock)
f67676d1 2642{
3529d8c2
JA
2643 struct io_async_ctx *io;
2644 struct iov_iter iter;
f67676d1
JA
2645 ssize_t ret;
2646
3529d8c2
JA
2647 ret = io_prep_rw(req, sqe, force_nonblock);
2648 if (ret)
2649 return ret;
f67676d1 2650
3529d8c2
JA
2651 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
2652 return -EBADF;
f67676d1 2653
4ed734b0
JA
2654 req->fsize = rlimit(RLIMIT_FSIZE);
2655
5f798bea
PB
2656 /* either don't need iovec imported or already have it */
2657 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2
JA
2658 return 0;
2659
2660 io = req->io;
2661 io->rw.iov = io->rw.fast_iov;
2662 req->io = NULL;
bcda7baa 2663 ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter, !force_nonblock);
3529d8c2
JA
2664 req->io = io;
2665 if (ret < 0)
2666 return ret;
2667
2668 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2669 return 0;
f67676d1
JA
2670}
2671
014db007 2672static int io_write(struct io_kiocb *req, bool force_nonblock)
2b188cc1
JA
2673{
2674 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 2675 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 2676 struct iov_iter iter;
31b51510 2677 size_t iov_count;
f67676d1 2678 ssize_t ret, io_size;
2b188cc1 2679
bcda7baa 2680 ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
06b76d44
JA
2681 if (ret < 0)
2682 return ret;
2b188cc1 2683
fd6c2e4c
JA
2684 /* Ensure we clear previously set non-block flag */
2685 if (!force_nonblock)
9adbd45d 2686 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 2687
797f3f53 2688 req->result = 0;
f67676d1 2689 io_size = ret;
dea3b49c 2690 if (req->flags & REQ_F_LINK_HEAD)
f67676d1 2691 req->result = io_size;
9e645e11 2692
f67676d1
JA
2693 /*
2694 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2695 * we know to async punt it even if it was opened O_NONBLOCK
2696 */
af197f50 2697 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
f67676d1 2698 goto copy_iov;
31b51510 2699
10d59345
JA
2700 /* file path doesn't support NOWAIT for non-direct_IO */
2701 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
2702 (req->flags & REQ_F_ISREG))
f67676d1 2703 goto copy_iov;
31b51510 2704
f67676d1 2705 iov_count = iov_iter_count(&iter);
9adbd45d 2706 ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
2b188cc1 2707 if (!ret) {
9bf7933f
RP
2708 ssize_t ret2;
2709
2b188cc1
JA
2710 /*
2711 * Open-code file_start_write here to grab freeze protection,
2712 * which will be released by another thread in
2713 * io_complete_rw(). Fool lockdep by telling it the lock got
2714 * released so that it doesn't complain about the held lock when
2715 * we return to userspace.
2716 */
491381ce 2717 if (req->flags & REQ_F_ISREG) {
9adbd45d 2718 __sb_start_write(file_inode(req->file)->i_sb,
2b188cc1 2719 SB_FREEZE_WRITE, true);
9adbd45d 2720 __sb_writers_release(file_inode(req->file)->i_sb,
2b188cc1
JA
2721 SB_FREEZE_WRITE);
2722 }
2723 kiocb->ki_flags |= IOCB_WRITE;
9bf7933f 2724
4ed734b0
JA
2725 if (!force_nonblock)
2726 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2727
9adbd45d
JA
2728 if (req->file->f_op->write_iter)
2729 ret2 = call_write_iter(req->file, kiocb, &iter);
32960613 2730 else
9adbd45d 2731 ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
4ed734b0
JA
2732
2733 if (!force_nonblock)
2734 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2735
faac996c 2736 /*
bff6035d 2737 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
faac996c
JA
2738 * retry them without IOCB_NOWAIT.
2739 */
2740 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
2741 ret2 = -EAGAIN;
f67676d1 2742 if (!force_nonblock || ret2 != -EAGAIN) {
014db007 2743 kiocb_done(kiocb, ret2);
f67676d1
JA
2744 } else {
2745copy_iov:
b7bb4f7d 2746 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
2747 inline_vecs, &iter);
2748 if (ret)
2749 goto out_free;
29de5f6a 2750 /* any defer here is final, must blocking retry */
490e8967
JA
2751 if (!file_can_poll(req->file))
2752 req->flags |= REQ_F_MUST_PUNT;
f67676d1
JA
2753 return -EAGAIN;
2754 }
2b188cc1 2755 }
31b51510 2756out_free:
99bc4c38 2757 req->flags &= ~REQ_F_NEED_CLEANUP;
1e95081c 2758 kfree(iovec);
2b188cc1
JA
2759 return ret;
2760}
2761
f2a8d5c7
PB
2762static int __io_splice_prep(struct io_kiocb *req,
2763 const struct io_uring_sqe *sqe)
7d67af2c
PB
2764{
2765 struct io_splice* sp = &req->splice;
2766 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
2767 int ret;
2768
2769 if (req->flags & REQ_F_NEED_CLEANUP)
2770 return 0;
2771
2772 sp->file_in = NULL;
7d67af2c
PB
2773 sp->len = READ_ONCE(sqe->len);
2774 sp->flags = READ_ONCE(sqe->splice_flags);
2775
2776 if (unlikely(sp->flags & ~valid_flags))
2777 return -EINVAL;
2778
2779 ret = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), &sp->file_in,
2780 (sp->flags & SPLICE_F_FD_IN_FIXED));
2781 if (ret)
2782 return ret;
2783 req->flags |= REQ_F_NEED_CLEANUP;
2784
2785 if (!S_ISREG(file_inode(sp->file_in)->i_mode))
2786 req->work.flags |= IO_WQ_WORK_UNBOUND;
2787
2788 return 0;
2789}
2790
f2a8d5c7
PB
2791static int io_tee_prep(struct io_kiocb *req,
2792 const struct io_uring_sqe *sqe)
2793{
2794 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
2795 return -EINVAL;
2796 return __io_splice_prep(req, sqe);
2797}
2798
2799static int io_tee(struct io_kiocb *req, bool force_nonblock)
2800{
2801 struct io_splice *sp = &req->splice;
2802 struct file *in = sp->file_in;
2803 struct file *out = sp->file_out;
2804 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
2805 long ret = 0;
2806
2807 if (force_nonblock)
2808 return -EAGAIN;
2809 if (sp->len)
2810 ret = do_tee(in, out, sp->len, flags);
2811
2812 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
2813 req->flags &= ~REQ_F_NEED_CLEANUP;
2814
2815 io_cqring_add_event(req, ret);
2816 if (ret != sp->len)
2817 req_set_fail_links(req);
2818 io_put_req(req);
2819 return 0;
2820}
2821
2822static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2823{
2824 struct io_splice* sp = &req->splice;
2825
2826 sp->off_in = READ_ONCE(sqe->splice_off_in);
2827 sp->off_out = READ_ONCE(sqe->off);
2828 return __io_splice_prep(req, sqe);
2829}
2830
014db007 2831static int io_splice(struct io_kiocb *req, bool force_nonblock)
7d67af2c
PB
2832{
2833 struct io_splice *sp = &req->splice;
2834 struct file *in = sp->file_in;
2835 struct file *out = sp->file_out;
2836 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
2837 loff_t *poff_in, *poff_out;
2838 long ret;
2839
2fb3e822
PB
2840 if (force_nonblock)
2841 return -EAGAIN;
7d67af2c
PB
2842
2843 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
2844 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
2845 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
2846 if (force_nonblock && ret == -EAGAIN)
2847 return -EAGAIN;
2848
2849 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
2850 req->flags &= ~REQ_F_NEED_CLEANUP;
2851
2852 io_cqring_add_event(req, ret);
2853 if (ret != sp->len)
2854 req_set_fail_links(req);
014db007 2855 io_put_req(req);
7d67af2c
PB
2856 return 0;
2857}
2858
2b188cc1
JA
2859/*
2860 * IORING_OP_NOP just posts a completion event, nothing else.
2861 */
78e19bbe 2862static int io_nop(struct io_kiocb *req)
2b188cc1
JA
2863{
2864 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 2865
def596e9
JA
2866 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2867 return -EINVAL;
2868
78e19bbe 2869 io_cqring_add_event(req, 0);
e65ef56d 2870 io_put_req(req);
2b188cc1
JA
2871 return 0;
2872}
2873
3529d8c2 2874static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 2875{
6b06314c 2876 struct io_ring_ctx *ctx = req->ctx;
c992fe29 2877
09bb8394
JA
2878 if (!req->file)
2879 return -EBADF;
c992fe29 2880
6b06314c 2881 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 2882 return -EINVAL;
edafccee 2883 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
2884 return -EINVAL;
2885
8ed8d3c3
JA
2886 req->sync.flags = READ_ONCE(sqe->fsync_flags);
2887 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
2888 return -EINVAL;
2889
2890 req->sync.off = READ_ONCE(sqe->off);
2891 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
2892 return 0;
2893}
2894
8ed8d3c3
JA
2895static bool io_req_cancelled(struct io_kiocb *req)
2896{
2897 if (req->work.flags & IO_WQ_WORK_CANCEL) {
2898 req_set_fail_links(req);
2899 io_cqring_add_event(req, -ECANCELED);
2900 io_put_req(req);
2901 return true;
2902 }
2903
2904 return false;
2905}
2906
014db007 2907static void __io_fsync(struct io_kiocb *req)
8ed8d3c3 2908{
8ed8d3c3 2909 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
2910 int ret;
2911
9adbd45d 2912 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
2913 end > 0 ? end : LLONG_MAX,
2914 req->sync.flags & IORING_FSYNC_DATASYNC);
2915 if (ret < 0)
2916 req_set_fail_links(req);
2917 io_cqring_add_event(req, ret);
014db007 2918 io_put_req(req);
8ed8d3c3
JA
2919}
2920
5ea62161 2921static void io_fsync_finish(struct io_wq_work **workptr)
c992fe29 2922{
5ea62161 2923 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
5ea62161
PB
2924
2925 if (io_req_cancelled(req))
2926 return;
014db007 2927 __io_fsync(req);
e9fd9396 2928 io_steal_work(req, workptr);
8ed8d3c3 2929}
c992fe29 2930
014db007 2931static int io_fsync(struct io_kiocb *req, bool force_nonblock)
c992fe29 2932{
c992fe29 2933 /* fsync always requires a blocking context */
8ed8d3c3 2934 if (force_nonblock) {
8ed8d3c3 2935 req->work.func = io_fsync_finish;
c992fe29 2936 return -EAGAIN;
8ed8d3c3 2937 }
014db007 2938 __io_fsync(req);
c992fe29
CH
2939 return 0;
2940}
2941
014db007 2942static void __io_fallocate(struct io_kiocb *req)
8ed8d3c3 2943{
8ed8d3c3
JA
2944 int ret;
2945
4ed734b0 2946 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
d63d1b5e
JA
2947 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2948 req->sync.len);
4ed734b0 2949 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
8ed8d3c3
JA
2950 if (ret < 0)
2951 req_set_fail_links(req);
2952 io_cqring_add_event(req, ret);
014db007 2953 io_put_req(req);
5ea62161
PB
2954}
2955
2956static void io_fallocate_finish(struct io_wq_work **workptr)
2957{
2958 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
5ea62161 2959
594506fe
PB
2960 if (io_req_cancelled(req))
2961 return;
014db007 2962 __io_fallocate(req);
e9fd9396 2963 io_steal_work(req, workptr);
5d17b4a4
JA
2964}
2965
d63d1b5e
JA
2966static int io_fallocate_prep(struct io_kiocb *req,
2967 const struct io_uring_sqe *sqe)
2968{
2969 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
2970 return -EINVAL;
2971
2972 req->sync.off = READ_ONCE(sqe->off);
2973 req->sync.len = READ_ONCE(sqe->addr);
2974 req->sync.mode = READ_ONCE(sqe->len);
4ed734b0 2975 req->fsize = rlimit(RLIMIT_FSIZE);
d63d1b5e
JA
2976 return 0;
2977}
2978
014db007 2979static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
5d17b4a4 2980{
d63d1b5e 2981 /* fallocate always requiring blocking context */
8ed8d3c3 2982 if (force_nonblock) {
d63d1b5e 2983 req->work.func = io_fallocate_finish;
5d17b4a4 2984 return -EAGAIN;
8ed8d3c3 2985 }
5d17b4a4 2986
014db007 2987 __io_fallocate(req);
5d17b4a4
JA
2988 return 0;
2989}
2990
15b71abe 2991static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 2992{
f8748881 2993 const char __user *fname;
15b71abe 2994 int ret;
b7bb4f7d 2995
15b71abe
JA
2996 if (sqe->ioprio || sqe->buf_index)
2997 return -EINVAL;
9c280f90 2998 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 2999 return -EBADF;
0bdbdd08
PB
3000 if (req->flags & REQ_F_NEED_CLEANUP)
3001 return 0;
03b1230c 3002
15b71abe 3003 req->open.dfd = READ_ONCE(sqe->fd);
c12cedf2 3004 req->open.how.mode = READ_ONCE(sqe->len);
f8748881 3005 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
c12cedf2 3006 req->open.how.flags = READ_ONCE(sqe->open_flags);
08a1d26e
JA
3007 if (force_o_largefile())
3008 req->open.how.flags |= O_LARGEFILE;
3529d8c2 3009
f8748881 3010 req->open.filename = getname(fname);
15b71abe
JA
3011 if (IS_ERR(req->open.filename)) {
3012 ret = PTR_ERR(req->open.filename);
3013 req->open.filename = NULL;
3014 return ret;
3015 }
3529d8c2 3016
4022e7af 3017 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 3018 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 3019 return 0;
03b1230c
JA
3020}
3021
cebdb986 3022static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 3023{
cebdb986
JA
3024 struct open_how __user *how;
3025 const char __user *fname;
3026 size_t len;
0fa03c62
JA
3027 int ret;
3028
cebdb986 3029 if (sqe->ioprio || sqe->buf_index)
0fa03c62 3030 return -EINVAL;
9c280f90 3031 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 3032 return -EBADF;
0bdbdd08
PB
3033 if (req->flags & REQ_F_NEED_CLEANUP)
3034 return 0;
0fa03c62 3035
cebdb986
JA
3036 req->open.dfd = READ_ONCE(sqe->fd);
3037 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3038 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3039 len = READ_ONCE(sqe->len);
0fa03c62 3040
cebdb986
JA
3041 if (len < OPEN_HOW_SIZE_VER0)
3042 return -EINVAL;
3529d8c2 3043
cebdb986
JA
3044 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3045 len);
3046 if (ret)
3047 return ret;
3529d8c2 3048
cebdb986
JA
3049 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
3050 req->open.how.flags |= O_LARGEFILE;
0fa03c62 3051
cebdb986
JA
3052 req->open.filename = getname(fname);
3053 if (IS_ERR(req->open.filename)) {
3054 ret = PTR_ERR(req->open.filename);
3055 req->open.filename = NULL;
3056 return ret;
3057 }
3058
4022e7af 3059 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 3060 req->flags |= REQ_F_NEED_CLEANUP;
cebdb986
JA
3061 return 0;
3062}
3063
014db007 3064static int io_openat2(struct io_kiocb *req, bool force_nonblock)
15b71abe
JA
3065{
3066 struct open_flags op;
15b71abe
JA
3067 struct file *file;
3068 int ret;
3069
f86cd20c 3070 if (force_nonblock)
15b71abe 3071 return -EAGAIN;
15b71abe 3072
cebdb986 3073 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
3074 if (ret)
3075 goto err;
3076
4022e7af 3077 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
15b71abe
JA
3078 if (ret < 0)
3079 goto err;
3080
3081 file = do_filp_open(req->open.dfd, req->open.filename, &op);
3082 if (IS_ERR(file)) {
3083 put_unused_fd(ret);
3084 ret = PTR_ERR(file);
3085 } else {
3086 fsnotify_open(file);
3087 fd_install(ret, file);
3088 }
3089err:
3090 putname(req->open.filename);
8fef80bf 3091 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe
JA
3092 if (ret < 0)
3093 req_set_fail_links(req);
3094 io_cqring_add_event(req, ret);
014db007 3095 io_put_req(req);
15b71abe
JA
3096 return 0;
3097}
3098
014db007 3099static int io_openat(struct io_kiocb *req, bool force_nonblock)
cebdb986
JA
3100{
3101 req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
014db007 3102 return io_openat2(req, force_nonblock);
cebdb986
JA
3103}
3104
067524e9
JA
3105static int io_remove_buffers_prep(struct io_kiocb *req,
3106 const struct io_uring_sqe *sqe)
3107{
3108 struct io_provide_buf *p = &req->pbuf;
3109 u64 tmp;
3110
3111 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3112 return -EINVAL;
3113
3114 tmp = READ_ONCE(sqe->fd);
3115 if (!tmp || tmp > USHRT_MAX)
3116 return -EINVAL;
3117
3118 memset(p, 0, sizeof(*p));
3119 p->nbufs = tmp;
3120 p->bgid = READ_ONCE(sqe->buf_group);
3121 return 0;
3122}
3123
3124static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3125 int bgid, unsigned nbufs)
3126{
3127 unsigned i = 0;
3128
3129 /* shouldn't happen */
3130 if (!nbufs)
3131 return 0;
3132
3133 /* the head kbuf is the list itself */
3134 while (!list_empty(&buf->list)) {
3135 struct io_buffer *nxt;
3136
3137 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3138 list_del(&nxt->list);
3139 kfree(nxt);
3140 if (++i == nbufs)
3141 return i;
3142 }
3143 i++;
3144 kfree(buf);
3145 idr_remove(&ctx->io_buffer_idr, bgid);
3146
3147 return i;
3148}
3149
3150static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock)
3151{
3152 struct io_provide_buf *p = &req->pbuf;
3153 struct io_ring_ctx *ctx = req->ctx;
3154 struct io_buffer *head;
3155 int ret = 0;
3156
3157 io_ring_submit_lock(ctx, !force_nonblock);
3158
3159 lockdep_assert_held(&ctx->uring_lock);
3160
3161 ret = -ENOENT;
3162 head = idr_find(&ctx->io_buffer_idr, p->bgid);
3163 if (head)
3164 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3165
3166 io_ring_submit_lock(ctx, !force_nonblock);
3167 if (ret < 0)
3168 req_set_fail_links(req);
3169 io_cqring_add_event(req, ret);
3170 io_put_req(req);
3171 return 0;
3172}
3173
ddf0322d
JA
3174static int io_provide_buffers_prep(struct io_kiocb *req,
3175 const struct io_uring_sqe *sqe)
3176{
3177 struct io_provide_buf *p = &req->pbuf;
3178 u64 tmp;
3179
3180 if (sqe->ioprio || sqe->rw_flags)
3181 return -EINVAL;
3182
3183 tmp = READ_ONCE(sqe->fd);
3184 if (!tmp || tmp > USHRT_MAX)
3185 return -E2BIG;
3186 p->nbufs = tmp;
3187 p->addr = READ_ONCE(sqe->addr);
3188 p->len = READ_ONCE(sqe->len);
3189
3190 if (!access_ok(u64_to_user_ptr(p->addr), p->len))
3191 return -EFAULT;
3192
3193 p->bgid = READ_ONCE(sqe->buf_group);
3194 tmp = READ_ONCE(sqe->off);
3195 if (tmp > USHRT_MAX)
3196 return -E2BIG;
3197 p->bid = tmp;
3198 return 0;
3199}
3200
3201static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3202{
3203 struct io_buffer *buf;
3204 u64 addr = pbuf->addr;
3205 int i, bid = pbuf->bid;
3206
3207 for (i = 0; i < pbuf->nbufs; i++) {
3208 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3209 if (!buf)
3210 break;
3211
3212 buf->addr = addr;
3213 buf->len = pbuf->len;
3214 buf->bid = bid;
3215 addr += pbuf->len;
3216 bid++;
3217 if (!*head) {
3218 INIT_LIST_HEAD(&buf->list);
3219 *head = buf;
3220 } else {
3221 list_add_tail(&buf->list, &(*head)->list);
3222 }
3223 }
3224
3225 return i ? i : -ENOMEM;
3226}
3227
ddf0322d
JA
3228static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock)
3229{
3230 struct io_provide_buf *p = &req->pbuf;
3231 struct io_ring_ctx *ctx = req->ctx;
3232 struct io_buffer *head, *list;
3233 int ret = 0;
3234
3235 io_ring_submit_lock(ctx, !force_nonblock);
3236
3237 lockdep_assert_held(&ctx->uring_lock);
3238
3239 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
3240
3241 ret = io_add_buffers(p, &head);
3242 if (ret < 0)
3243 goto out;
3244
3245 if (!list) {
3246 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
3247 GFP_KERNEL);
3248 if (ret < 0) {
067524e9 3249 __io_remove_buffers(ctx, head, p->bgid, -1U);
ddf0322d
JA
3250 goto out;
3251 }
3252 }
3253out:
3254 io_ring_submit_unlock(ctx, !force_nonblock);
3255 if (ret < 0)
3256 req_set_fail_links(req);
3257 io_cqring_add_event(req, ret);
3258 io_put_req(req);
3259 return 0;
cebdb986
JA
3260}
3261
3e4827b0
JA
3262static int io_epoll_ctl_prep(struct io_kiocb *req,
3263 const struct io_uring_sqe *sqe)
3264{
3265#if defined(CONFIG_EPOLL)
3266 if (sqe->ioprio || sqe->buf_index)
3267 return -EINVAL;
3268
3269 req->epoll.epfd = READ_ONCE(sqe->fd);
3270 req->epoll.op = READ_ONCE(sqe->len);
3271 req->epoll.fd = READ_ONCE(sqe->off);
3272
3273 if (ep_op_has_event(req->epoll.op)) {
3274 struct epoll_event __user *ev;
3275
3276 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
3277 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
3278 return -EFAULT;
3279 }
3280
3281 return 0;
3282#else
3283 return -EOPNOTSUPP;
3284#endif
3285}
3286
014db007 3287static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock)
3e4827b0
JA
3288{
3289#if defined(CONFIG_EPOLL)
3290 struct io_epoll *ie = &req->epoll;
3291 int ret;
3292
3293 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
3294 if (force_nonblock && ret == -EAGAIN)
3295 return -EAGAIN;
3296
3297 if (ret < 0)
3298 req_set_fail_links(req);
3299 io_cqring_add_event(req, ret);
014db007 3300 io_put_req(req);
3e4827b0
JA
3301 return 0;
3302#else
3303 return -EOPNOTSUPP;
3304#endif
3305}
3306
c1ca757b
JA
3307static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3308{
3309#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3310 if (sqe->ioprio || sqe->buf_index || sqe->off)
3311 return -EINVAL;
3312
3313 req->madvise.addr = READ_ONCE(sqe->addr);
3314 req->madvise.len = READ_ONCE(sqe->len);
3315 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
3316 return 0;
3317#else
3318 return -EOPNOTSUPP;
3319#endif
3320}
3321
014db007 3322static int io_madvise(struct io_kiocb *req, bool force_nonblock)
c1ca757b
JA
3323{
3324#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3325 struct io_madvise *ma = &req->madvise;
3326 int ret;
3327
3328 if (force_nonblock)
3329 return -EAGAIN;
3330
3331 ret = do_madvise(ma->addr, ma->len, ma->advice);
3332 if (ret < 0)
3333 req_set_fail_links(req);
3334 io_cqring_add_event(req, ret);
014db007 3335 io_put_req(req);
c1ca757b
JA
3336 return 0;
3337#else
3338 return -EOPNOTSUPP;
3339#endif
3340}
3341
4840e418
JA
3342static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3343{
3344 if (sqe->ioprio || sqe->buf_index || sqe->addr)
3345 return -EINVAL;
3346
3347 req->fadvise.offset = READ_ONCE(sqe->off);
3348 req->fadvise.len = READ_ONCE(sqe->len);
3349 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
3350 return 0;
3351}
3352
014db007 3353static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
4840e418
JA
3354{
3355 struct io_fadvise *fa = &req->fadvise;
3356 int ret;
3357
3e69426d
JA
3358 if (force_nonblock) {
3359 switch (fa->advice) {
3360 case POSIX_FADV_NORMAL:
3361 case POSIX_FADV_RANDOM:
3362 case POSIX_FADV_SEQUENTIAL:
3363 break;
3364 default:
3365 return -EAGAIN;
3366 }
3367 }
4840e418
JA
3368
3369 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
3370 if (ret < 0)
3371 req_set_fail_links(req);
3372 io_cqring_add_event(req, ret);
014db007 3373 io_put_req(req);
4840e418
JA
3374 return 0;
3375}
3376
eddc7ef5
JA
3377static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3378{
eddc7ef5
JA
3379 if (sqe->ioprio || sqe->buf_index)
3380 return -EINVAL;
9c280f90 3381 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 3382 return -EBADF;
eddc7ef5 3383
1d9e1288
BM
3384 req->statx.dfd = READ_ONCE(sqe->fd);
3385 req->statx.mask = READ_ONCE(sqe->len);
e62753e4 3386 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
3387 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3388 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5 3389
eddc7ef5
JA
3390 return 0;
3391}
3392
014db007 3393static int io_statx(struct io_kiocb *req, bool force_nonblock)
eddc7ef5 3394{
1d9e1288 3395 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
3396 int ret;
3397
5b0bbee4
JA
3398 if (force_nonblock) {
3399 /* only need file table for an actual valid fd */
3400 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
3401 req->flags |= REQ_F_NO_FILE_TABLE;
eddc7ef5 3402 return -EAGAIN;
5b0bbee4 3403 }
eddc7ef5 3404
e62753e4
BM
3405 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
3406 ctx->buffer);
eddc7ef5 3407
eddc7ef5
JA
3408 if (ret < 0)
3409 req_set_fail_links(req);
3410 io_cqring_add_event(req, ret);
014db007 3411 io_put_req(req);
eddc7ef5
JA
3412 return 0;
3413}
3414
b5dba59e
JA
3415static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3416{
3417 /*
3418 * If we queue this for async, it must not be cancellable. That would
3419 * leave the 'file' in an undeterminate state.
3420 */
3421 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
3422
3423 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
3424 sqe->rw_flags || sqe->buf_index)
3425 return -EINVAL;
9c280f90 3426 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 3427 return -EBADF;
b5dba59e
JA
3428
3429 req->close.fd = READ_ONCE(sqe->fd);
b5dba59e
JA
3430 return 0;
3431}
3432
a93b3331 3433/* only called when __close_fd_get_file() is done */
014db007 3434static void __io_close_finish(struct io_kiocb *req)
a93b3331
PB
3435{
3436 int ret;
3437
3438 ret = filp_close(req->close.put_file, req->work.files);
3439 if (ret < 0)
3440 req_set_fail_links(req);
3441 io_cqring_add_event(req, ret);
3442 fput(req->close.put_file);
014db007 3443 io_put_req(req);
a93b3331
PB
3444}
3445
b5dba59e
JA
3446static void io_close_finish(struct io_wq_work **workptr)
3447{
3448 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
b5dba59e 3449
7fbeb95d 3450 /* not cancellable, don't do io_req_cancelled() */
014db007 3451 __io_close_finish(req);
e9fd9396 3452 io_steal_work(req, workptr);
b5dba59e
JA
3453}
3454
014db007 3455static int io_close(struct io_kiocb *req, bool force_nonblock)
b5dba59e
JA
3456{
3457 int ret;
3458
3459 req->close.put_file = NULL;
3460 ret = __close_fd_get_file(req->close.fd, &req->close.put_file);
0bf0eefd
PB
3461 if (ret < 0)
3462 return (ret == -ENOENT) ? -EBADF : ret;
b5dba59e
JA
3463
3464 /* if the file has a flush method, be safe and punt to async */
a2100672 3465 if (req->close.put_file->f_op->flush && force_nonblock) {
0bf0eefd
PB
3466 /* avoid grabbing files - we don't need the files */
3467 req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT;
a2100672 3468 req->work.func = io_close_finish;
0bf0eefd 3469 return -EAGAIN;
a2100672 3470 }
b5dba59e
JA
3471
3472 /*
3473 * No ->flush(), safely close from here and just punt the
3474 * fput() to async context.
3475 */
014db007 3476 __io_close_finish(req);
1a417f4e 3477 return 0;
b5dba59e
JA
3478}
3479
3529d8c2 3480static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
3481{
3482 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4
JA
3483
3484 if (!req->file)
3485 return -EBADF;
5d17b4a4
JA
3486
3487 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3488 return -EINVAL;
3489 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3490 return -EINVAL;
3491
8ed8d3c3
JA
3492 req->sync.off = READ_ONCE(sqe->off);
3493 req->sync.len = READ_ONCE(sqe->len);
3494 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
3495 return 0;
3496}
3497
014db007 3498static void __io_sync_file_range(struct io_kiocb *req)
8ed8d3c3 3499{
8ed8d3c3
JA
3500 int ret;
3501
9adbd45d 3502 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
3503 req->sync.flags);
3504 if (ret < 0)
3505 req_set_fail_links(req);
3506 io_cqring_add_event(req, ret);
014db007 3507 io_put_req(req);
5ea62161
PB
3508}
3509
3510
3511static void io_sync_file_range_finish(struct io_wq_work **workptr)
3512{
3513 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
5ea62161
PB
3514
3515 if (io_req_cancelled(req))
3516 return;
014db007 3517 __io_sync_file_range(req);
7759a0bf 3518 io_steal_work(req, workptr);
5d17b4a4
JA
3519}
3520
014db007 3521static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
5d17b4a4 3522{
5d17b4a4 3523 /* sync_file_range always requires a blocking context */
8ed8d3c3 3524 if (force_nonblock) {
8ed8d3c3 3525 req->work.func = io_sync_file_range_finish;
5d17b4a4 3526 return -EAGAIN;
8ed8d3c3 3527 }
5d17b4a4 3528
014db007 3529 __io_sync_file_range(req);
5d17b4a4
JA
3530 return 0;
3531}
3532
469956e8 3533#if defined(CONFIG_NET)
02d27d89
PB
3534static int io_setup_async_msg(struct io_kiocb *req,
3535 struct io_async_msghdr *kmsg)
3536{
3537 if (req->io)
3538 return -EAGAIN;
3539 if (io_alloc_async_ctx(req)) {
3540 if (kmsg->iov != kmsg->fast_iov)
3541 kfree(kmsg->iov);
3542 return -ENOMEM;
3543 }
3544 req->flags |= REQ_F_NEED_CLEANUP;
3545 memcpy(&req->io->msg, kmsg, sizeof(*kmsg));
3546 return -EAGAIN;
3547}
3548
3529d8c2 3549static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 3550{
e47293fd 3551 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 3552 struct io_async_ctx *io = req->io;
99bc4c38 3553 int ret;
03b1230c 3554
e47293fd
JA
3555 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3556 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 3557 sr->len = READ_ONCE(sqe->len);
3529d8c2 3558
d8768362
JA
3559#ifdef CONFIG_COMPAT
3560 if (req->ctx->compat)
3561 sr->msg_flags |= MSG_CMSG_COMPAT;
3562#endif
3563
fddaface 3564 if (!io || req->opcode == IORING_OP_SEND)
3529d8c2 3565 return 0;
5f798bea
PB
3566 /* iovec is already imported */
3567 if (req->flags & REQ_F_NEED_CLEANUP)
3568 return 0;
3529d8c2 3569
d9688565 3570 io->msg.iov = io->msg.fast_iov;
99bc4c38 3571 ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
e47293fd 3572 &io->msg.iov);
99bc4c38
PB
3573 if (!ret)
3574 req->flags |= REQ_F_NEED_CLEANUP;
3575 return ret;
03b1230c
JA
3576}
3577
014db007 3578static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
aa1fa28f 3579{
0b416c3e 3580 struct io_async_msghdr *kmsg = NULL;
0fa03c62
JA
3581 struct socket *sock;
3582 int ret;
3583
3584 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3585 return -EINVAL;
3586
3587 sock = sock_from_file(req->file, &ret);
3588 if (sock) {
b7bb4f7d 3589 struct io_async_ctx io;
0fa03c62
JA
3590 unsigned flags;
3591
03b1230c 3592 if (req->io) {
0b416c3e 3593 kmsg = &req->io->msg;
b537916c 3594 kmsg->msg.msg_name = &req->io->msg.addr;
0b416c3e
JA
3595 /* if iov is set, it's allocated already */
3596 if (!kmsg->iov)
3597 kmsg->iov = kmsg->fast_iov;
3598 kmsg->msg.msg_iter.iov = kmsg->iov;
03b1230c 3599 } else {
3529d8c2
JA
3600 struct io_sr_msg *sr = &req->sr_msg;
3601
0b416c3e 3602 kmsg = &io.msg;
b537916c 3603 kmsg->msg.msg_name = &io.msg.addr;
3529d8c2
JA
3604
3605 io.msg.iov = io.msg.fast_iov;
3606 ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
3607 sr->msg_flags, &io.msg.iov);
03b1230c 3608 if (ret)
3529d8c2 3609 return ret;
03b1230c 3610 }
0fa03c62 3611
e47293fd
JA
3612 flags = req->sr_msg.msg_flags;
3613 if (flags & MSG_DONTWAIT)
3614 req->flags |= REQ_F_NOWAIT;
3615 else if (force_nonblock)
3616 flags |= MSG_DONTWAIT;
3617
0b416c3e 3618 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
02d27d89
PB
3619 if (force_nonblock && ret == -EAGAIN)
3620 return io_setup_async_msg(req, kmsg);
441cdbd5
JA
3621 if (ret == -ERESTARTSYS)
3622 ret = -EINTR;
0fa03c62
JA
3623 }
3624
1e95081c 3625 if (kmsg && kmsg->iov != kmsg->fast_iov)
0b416c3e 3626 kfree(kmsg->iov);
99bc4c38 3627 req->flags &= ~REQ_F_NEED_CLEANUP;
78e19bbe 3628 io_cqring_add_event(req, ret);
4e88d6e7
JA
3629 if (ret < 0)
3630 req_set_fail_links(req);
014db007 3631 io_put_req(req);
5d17b4a4 3632 return 0;
03b1230c 3633}
aa1fa28f 3634
014db007 3635static int io_send(struct io_kiocb *req, bool force_nonblock)
fddaface 3636{
fddaface
JA
3637 struct socket *sock;
3638 int ret;
3639
3640 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3641 return -EINVAL;
3642
3643 sock = sock_from_file(req->file, &ret);
3644 if (sock) {
3645 struct io_sr_msg *sr = &req->sr_msg;
3646 struct msghdr msg;
3647 struct iovec iov;
3648 unsigned flags;
3649
3650 ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
3651 &msg.msg_iter);
3652 if (ret)
3653 return ret;
3654
3655 msg.msg_name = NULL;
3656 msg.msg_control = NULL;
3657 msg.msg_controllen = 0;
3658 msg.msg_namelen = 0;
3659
3660 flags = req->sr_msg.msg_flags;
3661 if (flags & MSG_DONTWAIT)
3662 req->flags |= REQ_F_NOWAIT;
3663 else if (force_nonblock)
3664 flags |= MSG_DONTWAIT;
3665
0b7b21e4
JA
3666 msg.msg_flags = flags;
3667 ret = sock_sendmsg(sock, &msg);
fddaface
JA
3668 if (force_nonblock && ret == -EAGAIN)
3669 return -EAGAIN;
3670 if (ret == -ERESTARTSYS)
3671 ret = -EINTR;
3672 }
3673
3674 io_cqring_add_event(req, ret);
3675 if (ret < 0)
3676 req_set_fail_links(req);
014db007 3677 io_put_req(req);
fddaface 3678 return 0;
fddaface
JA
3679}
3680
52de1fe1
JA
3681static int __io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
3682{
3683 struct io_sr_msg *sr = &req->sr_msg;
3684 struct iovec __user *uiov;
3685 size_t iov_len;
3686 int ret;
3687
3688 ret = __copy_msghdr_from_user(&io->msg.msg, sr->msg, &io->msg.uaddr,
3689 &uiov, &iov_len);
3690 if (ret)
3691 return ret;
3692
3693 if (req->flags & REQ_F_BUFFER_SELECT) {
3694 if (iov_len > 1)
3695 return -EINVAL;
3696 if (copy_from_user(io->msg.iov, uiov, sizeof(*uiov)))
3697 return -EFAULT;
3698 sr->len = io->msg.iov[0].iov_len;
3699 iov_iter_init(&io->msg.msg.msg_iter, READ, io->msg.iov, 1,
3700 sr->len);
3701 io->msg.iov = NULL;
3702 } else {
3703 ret = import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
3704 &io->msg.iov, &io->msg.msg.msg_iter);
3705 if (ret > 0)
3706 ret = 0;
3707 }
3708
3709 return ret;
3710}
3711
3712#ifdef CONFIG_COMPAT
3713static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
3714 struct io_async_ctx *io)
3715{
3716 struct compat_msghdr __user *msg_compat;
3717 struct io_sr_msg *sr = &req->sr_msg;
3718 struct compat_iovec __user *uiov;
3719 compat_uptr_t ptr;
3720 compat_size_t len;
3721 int ret;
3722
3723 msg_compat = (struct compat_msghdr __user *) sr->msg;
3724 ret = __get_compat_msghdr(&io->msg.msg, msg_compat, &io->msg.uaddr,
3725 &ptr, &len);
3726 if (ret)
3727 return ret;
3728
3729 uiov = compat_ptr(ptr);
3730 if (req->flags & REQ_F_BUFFER_SELECT) {
3731 compat_ssize_t clen;
3732
3733 if (len > 1)
3734 return -EINVAL;
3735 if (!access_ok(uiov, sizeof(*uiov)))
3736 return -EFAULT;
3737 if (__get_user(clen, &uiov->iov_len))
3738 return -EFAULT;
3739 if (clen < 0)
3740 return -EINVAL;
3741 sr->len = io->msg.iov[0].iov_len;
3742 io->msg.iov = NULL;
3743 } else {
3744 ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV,
3745 &io->msg.iov,
3746 &io->msg.msg.msg_iter);
3747 if (ret < 0)
3748 return ret;
3749 }
3750
3751 return 0;
3752}
3753#endif
3754
3755static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
3756{
3757 io->msg.iov = io->msg.fast_iov;
3758
3759#ifdef CONFIG_COMPAT
3760 if (req->ctx->compat)
3761 return __io_compat_recvmsg_copy_hdr(req, io);
fddaface 3762#endif
52de1fe1
JA
3763
3764 return __io_recvmsg_copy_hdr(req, io);
3765}
3766
bcda7baa
JA
3767static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
3768 int *cflags, bool needs_lock)
3769{
3770 struct io_sr_msg *sr = &req->sr_msg;
3771 struct io_buffer *kbuf;
3772
3773 if (!(req->flags & REQ_F_BUFFER_SELECT))
3774 return NULL;
3775
3776 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
3777 if (IS_ERR(kbuf))
3778 return kbuf;
3779
3780 sr->kbuf = kbuf;
3781 req->flags |= REQ_F_BUFFER_SELECTED;
3782
3783 *cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
3784 *cflags |= IORING_CQE_F_BUFFER;
3785 return kbuf;
fddaface
JA
3786}
3787
3529d8c2
JA
3788static int io_recvmsg_prep(struct io_kiocb *req,
3789 const struct io_uring_sqe *sqe)
aa1fa28f 3790{
e47293fd 3791 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 3792 struct io_async_ctx *io = req->io;
99bc4c38 3793 int ret;
3529d8c2
JA
3794
3795 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3796 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 3797 sr->len = READ_ONCE(sqe->len);
bcda7baa 3798 sr->bgid = READ_ONCE(sqe->buf_group);
06b76d44 3799
d8768362
JA
3800#ifdef CONFIG_COMPAT
3801 if (req->ctx->compat)
3802 sr->msg_flags |= MSG_CMSG_COMPAT;
3803#endif
3804
fddaface 3805 if (!io || req->opcode == IORING_OP_RECV)
06b76d44 3806 return 0;
5f798bea
PB
3807 /* iovec is already imported */
3808 if (req->flags & REQ_F_NEED_CLEANUP)
3809 return 0;
03b1230c 3810
52de1fe1 3811 ret = io_recvmsg_copy_hdr(req, io);
99bc4c38
PB
3812 if (!ret)
3813 req->flags |= REQ_F_NEED_CLEANUP;
3814 return ret;
aa1fa28f
JA
3815}
3816
014db007 3817static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
aa1fa28f 3818{
0b416c3e 3819 struct io_async_msghdr *kmsg = NULL;
03b1230c 3820 struct socket *sock;
52de1fe1 3821 int ret, cflags = 0;
03b1230c
JA
3822
3823 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3824 return -EINVAL;
3825
3826 sock = sock_from_file(req->file, &ret);
3827 if (sock) {
52de1fe1 3828 struct io_buffer *kbuf;
b7bb4f7d 3829 struct io_async_ctx io;
03b1230c
JA
3830 unsigned flags;
3831
03b1230c 3832 if (req->io) {
0b416c3e 3833 kmsg = &req->io->msg;
b537916c 3834 kmsg->msg.msg_name = &req->io->msg.addr;
0b416c3e
JA
3835 /* if iov is set, it's allocated already */
3836 if (!kmsg->iov)
3837 kmsg->iov = kmsg->fast_iov;
3838 kmsg->msg.msg_iter.iov = kmsg->iov;
03b1230c 3839 } else {
0b416c3e 3840 kmsg = &io.msg;
b537916c 3841 kmsg->msg.msg_name = &io.msg.addr;
3529d8c2 3842
52de1fe1 3843 ret = io_recvmsg_copy_hdr(req, &io);
03b1230c 3844 if (ret)
3529d8c2 3845 return ret;
03b1230c
JA
3846 }
3847
52de1fe1
JA
3848 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3849 if (IS_ERR(kbuf)) {
3850 return PTR_ERR(kbuf);
3851 } else if (kbuf) {
3852 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3853 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
3854 1, req->sr_msg.len);
3855 }
3856
e47293fd
JA
3857 flags = req->sr_msg.msg_flags;
3858 if (flags & MSG_DONTWAIT)
3859 req->flags |= REQ_F_NOWAIT;
3860 else if (force_nonblock)
3861 flags |= MSG_DONTWAIT;
3862
3863 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
3864 kmsg->uaddr, flags);
02d27d89
PB
3865 if (force_nonblock && ret == -EAGAIN)
3866 return io_setup_async_msg(req, kmsg);
03b1230c
JA
3867 if (ret == -ERESTARTSYS)
3868 ret = -EINTR;
3869 }
3870
1e95081c 3871 if (kmsg && kmsg->iov != kmsg->fast_iov)
0b416c3e 3872 kfree(kmsg->iov);
99bc4c38 3873 req->flags &= ~REQ_F_NEED_CLEANUP;
52de1fe1 3874 __io_cqring_add_event(req, ret, cflags);
4e88d6e7
JA
3875 if (ret < 0)
3876 req_set_fail_links(req);
014db007 3877 io_put_req(req);
03b1230c 3878 return 0;
0fa03c62 3879}
5d17b4a4 3880
014db007 3881static int io_recv(struct io_kiocb *req, bool force_nonblock)
fddaface 3882{
bcda7baa 3883 struct io_buffer *kbuf = NULL;
fddaface 3884 struct socket *sock;
bcda7baa 3885 int ret, cflags = 0;
fddaface
JA
3886
3887 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3888 return -EINVAL;
3889
3890 sock = sock_from_file(req->file, &ret);
3891 if (sock) {
3892 struct io_sr_msg *sr = &req->sr_msg;
bcda7baa 3893 void __user *buf = sr->buf;
fddaface
JA
3894 struct msghdr msg;
3895 struct iovec iov;
3896 unsigned flags;
3897
bcda7baa
JA
3898 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3899 if (IS_ERR(kbuf))
3900 return PTR_ERR(kbuf);
3901 else if (kbuf)
3902 buf = u64_to_user_ptr(kbuf->addr);
3903
3904 ret = import_single_range(READ, buf, sr->len, &iov,
fddaface 3905 &msg.msg_iter);
bcda7baa
JA
3906 if (ret) {
3907 kfree(kbuf);
fddaface 3908 return ret;
bcda7baa 3909 }
fddaface 3910
bcda7baa 3911 req->flags |= REQ_F_NEED_CLEANUP;
fddaface
JA
3912 msg.msg_name = NULL;
3913 msg.msg_control = NULL;
3914 msg.msg_controllen = 0;
3915 msg.msg_namelen = 0;
3916 msg.msg_iocb = NULL;
3917 msg.msg_flags = 0;
3918
3919 flags = req->sr_msg.msg_flags;
3920 if (flags & MSG_DONTWAIT)
3921 req->flags |= REQ_F_NOWAIT;
3922 else if (force_nonblock)
3923 flags |= MSG_DONTWAIT;
3924
0b7b21e4 3925 ret = sock_recvmsg(sock, &msg, flags);
fddaface
JA
3926 if (force_nonblock && ret == -EAGAIN)
3927 return -EAGAIN;
3928 if (ret == -ERESTARTSYS)
3929 ret = -EINTR;
3930 }
3931
bcda7baa
JA
3932 kfree(kbuf);
3933 req->flags &= ~REQ_F_NEED_CLEANUP;
3934 __io_cqring_add_event(req, ret, cflags);
fddaface
JA
3935 if (ret < 0)
3936 req_set_fail_links(req);
014db007 3937 io_put_req(req);
fddaface 3938 return 0;
fddaface
JA
3939}
3940
3529d8c2 3941static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 3942{
8ed8d3c3
JA
3943 struct io_accept *accept = &req->accept;
3944
17f2fe35
JA
3945 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3946 return -EINVAL;
8042d6ce 3947 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
3948 return -EINVAL;
3949
d55e5f5b
JA
3950 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3951 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 3952 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 3953 accept->nofile = rlimit(RLIMIT_NOFILE);
8ed8d3c3 3954 return 0;
8ed8d3c3 3955}
17f2fe35 3956
014db007 3957static int __io_accept(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3
JA
3958{
3959 struct io_accept *accept = &req->accept;
3960 unsigned file_flags;
3961 int ret;
3962
3963 file_flags = force_nonblock ? O_NONBLOCK : 0;
3964 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
09952e3e
JA
3965 accept->addr_len, accept->flags,
3966 accept->nofile);
8ed8d3c3 3967 if (ret == -EAGAIN && force_nonblock)
17f2fe35 3968 return -EAGAIN;
8e3cca12
JA
3969 if (ret == -ERESTARTSYS)
3970 ret = -EINTR;
4e88d6e7
JA
3971 if (ret < 0)
3972 req_set_fail_links(req);
78e19bbe 3973 io_cqring_add_event(req, ret);
014db007 3974 io_put_req(req);
17f2fe35 3975 return 0;
8ed8d3c3
JA
3976}
3977
3978static void io_accept_finish(struct io_wq_work **workptr)
3979{
3980 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
8ed8d3c3
JA
3981
3982 if (io_req_cancelled(req))
3983 return;
014db007 3984 __io_accept(req, false);
e9fd9396 3985 io_steal_work(req, workptr);
8ed8d3c3 3986}
8ed8d3c3 3987
014db007 3988static int io_accept(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3 3989{
8ed8d3c3
JA
3990 int ret;
3991
014db007 3992 ret = __io_accept(req, force_nonblock);
8ed8d3c3
JA
3993 if (ret == -EAGAIN && force_nonblock) {
3994 req->work.func = io_accept_finish;
8ed8d3c3
JA
3995 return -EAGAIN;
3996 }
3997 return 0;
0fa03c62 3998}
5d17b4a4 3999
3529d8c2 4000static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 4001{
3529d8c2
JA
4002 struct io_connect *conn = &req->connect;
4003 struct io_async_ctx *io = req->io;
f499a021 4004
3fbb51c1
JA
4005 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4006 return -EINVAL;
4007 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4008 return -EINVAL;
4009
3529d8c2
JA
4010 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4011 conn->addr_len = READ_ONCE(sqe->addr2);
4012
4013 if (!io)
4014 return 0;
4015
4016 return move_addr_to_kernel(conn->addr, conn->addr_len,
3fbb51c1 4017 &io->connect.address);
f499a021
JA
4018}
4019
014db007 4020static int io_connect(struct io_kiocb *req, bool force_nonblock)
f8e85cf2 4021{
f499a021 4022 struct io_async_ctx __io, *io;
f8e85cf2 4023 unsigned file_flags;
3fbb51c1 4024 int ret;
f8e85cf2 4025
f499a021
JA
4026 if (req->io) {
4027 io = req->io;
4028 } else {
3529d8c2
JA
4029 ret = move_addr_to_kernel(req->connect.addr,
4030 req->connect.addr_len,
4031 &__io.connect.address);
f499a021
JA
4032 if (ret)
4033 goto out;
4034 io = &__io;
4035 }
4036
3fbb51c1
JA
4037 file_flags = force_nonblock ? O_NONBLOCK : 0;
4038
4039 ret = __sys_connect_file(req->file, &io->connect.address,
4040 req->connect.addr_len, file_flags);
87f80d62 4041 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
b7bb4f7d
JA
4042 if (req->io)
4043 return -EAGAIN;
4044 if (io_alloc_async_ctx(req)) {
f499a021
JA
4045 ret = -ENOMEM;
4046 goto out;
4047 }
b7bb4f7d 4048 memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
f8e85cf2 4049 return -EAGAIN;
f499a021 4050 }
f8e85cf2
JA
4051 if (ret == -ERESTARTSYS)
4052 ret = -EINTR;
f499a021 4053out:
4e88d6e7
JA
4054 if (ret < 0)
4055 req_set_fail_links(req);
f8e85cf2 4056 io_cqring_add_event(req, ret);
014db007 4057 io_put_req(req);
f8e85cf2 4058 return 0;
469956e8
Y
4059}
4060#else /* !CONFIG_NET */
4061static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4062{
f8e85cf2 4063 return -EOPNOTSUPP;
f8e85cf2
JA
4064}
4065
469956e8
Y
4066static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
4067{
4068 return -EOPNOTSUPP;
4069}
4070
4071static int io_send(struct io_kiocb *req, bool force_nonblock)
4072{
4073 return -EOPNOTSUPP;
4074}
4075
4076static int io_recvmsg_prep(struct io_kiocb *req,
4077 const struct io_uring_sqe *sqe)
4078{
4079 return -EOPNOTSUPP;
4080}
4081
4082static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
4083{
4084 return -EOPNOTSUPP;
4085}
4086
4087static int io_recv(struct io_kiocb *req, bool force_nonblock)
4088{
4089 return -EOPNOTSUPP;
4090}
4091
4092static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4093{
4094 return -EOPNOTSUPP;
4095}
4096
4097static int io_accept(struct io_kiocb *req, bool force_nonblock)
4098{
4099 return -EOPNOTSUPP;
4100}
4101
4102static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4103{
4104 return -EOPNOTSUPP;
4105}
4106
4107static int io_connect(struct io_kiocb *req, bool force_nonblock)
4108{
f8e85cf2 4109 return -EOPNOTSUPP;
f8e85cf2 4110}
469956e8 4111#endif /* CONFIG_NET */
f8e85cf2 4112
d7718a9d
JA
4113struct io_poll_table {
4114 struct poll_table_struct pt;
4115 struct io_kiocb *req;
4116 int error;
4117};
4118
d7718a9d
JA
4119static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4120 __poll_t mask, task_work_func_t func)
4121{
4122 struct task_struct *tsk;
aa96bf8a 4123 int ret;
d7718a9d
JA
4124
4125 /* for instances that support it check for an event match first: */
4126 if (mask && !(mask & poll->events))
4127 return 0;
4128
4129 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4130
4131 list_del_init(&poll->wait.entry);
4132
4133 tsk = req->task;
4134 req->result = mask;
4135 init_task_work(&req->task_work, func);
4136 /*
aa96bf8a
JA
4137 * If this fails, then the task is exiting. Punt to one of the io-wq
4138 * threads to ensure the work gets run, we can't always rely on exit
4139 * cancelation taking care of this.
d7718a9d 4140 */
aa96bf8a
JA
4141 ret = task_work_add(tsk, &req->task_work, true);
4142 if (unlikely(ret)) {
4143 tsk = io_wq_get_task(req->ctx->io_wq);
4144 task_work_add(tsk, &req->task_work, true);
4145 }
d7718a9d
JA
4146 wake_up_process(tsk);
4147 return 1;
4148}
4149
74ce6ce4
JA
4150static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4151 __acquires(&req->ctx->completion_lock)
4152{
4153 struct io_ring_ctx *ctx = req->ctx;
4154
4155 if (!req->result && !READ_ONCE(poll->canceled)) {
4156 struct poll_table_struct pt = { ._key = poll->events };
4157
4158 req->result = vfs_poll(req->file, &pt) & poll->events;
4159 }
4160
4161 spin_lock_irq(&ctx->completion_lock);
4162 if (!req->result && !READ_ONCE(poll->canceled)) {
4163 add_wait_queue(poll->head, &poll->wait);
4164 return true;
4165 }
4166
4167 return false;
4168}
4169
18bceab1
JA
4170static void io_poll_remove_double(struct io_kiocb *req)
4171{
4172 struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
4173
4174 lockdep_assert_held(&req->ctx->completion_lock);
4175
4176 if (poll && poll->head) {
4177 struct wait_queue_head *head = poll->head;
4178
4179 spin_lock(&head->lock);
4180 list_del_init(&poll->wait.entry);
4181 if (poll->wait.private)
4182 refcount_dec(&req->refs);
4183 poll->head = NULL;
4184 spin_unlock(&head->lock);
4185 }
4186}
4187
4188static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4189{
4190 struct io_ring_ctx *ctx = req->ctx;
4191
4192 io_poll_remove_double(req);
4193 req->poll.done = true;
4194 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4195 io_commit_cqring(ctx);
4196}
4197
4198static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
4199{
4200 struct io_ring_ctx *ctx = req->ctx;
4201
4202 if (io_poll_rewait(req, &req->poll)) {
4203 spin_unlock_irq(&ctx->completion_lock);
4204 return;
4205 }
4206
4207 hash_del(&req->hash_node);
4208 io_poll_complete(req, req->result, 0);
4209 req->flags |= REQ_F_COMP_LOCKED;
4210 io_put_req_find_next(req, nxt);
4211 spin_unlock_irq(&ctx->completion_lock);
4212
4213 io_cqring_ev_posted(ctx);
4214}
4215
4216static void io_poll_task_func(struct callback_head *cb)
4217{
4218 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4219 struct io_kiocb *nxt = NULL;
4220
4221 io_poll_task_handler(req, &nxt);
4222 if (nxt) {
4223 struct io_ring_ctx *ctx = nxt->ctx;
4224
4225 mutex_lock(&ctx->uring_lock);
4226 __io_queue_sqe(nxt, NULL);
4227 mutex_unlock(&ctx->uring_lock);
4228 }
4229}
4230
4231static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4232 int sync, void *key)
4233{
4234 struct io_kiocb *req = wait->private;
4235 struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
4236 __poll_t mask = key_to_poll(key);
4237
4238 /* for instances that support it check for an event match first: */
4239 if (mask && !(mask & poll->events))
4240 return 0;
4241
4242 if (req->poll.head) {
4243 bool done;
4244
4245 spin_lock(&req->poll.head->lock);
4246 done = list_empty(&req->poll.wait.entry);
4247 if (!done)
4248 list_del_init(&req->poll.wait.entry);
4249 spin_unlock(&req->poll.head->lock);
4250 if (!done)
4251 __io_async_wake(req, poll, mask, io_poll_task_func);
4252 }
4253 refcount_dec(&req->refs);
4254 return 1;
4255}
4256
4257static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4258 wait_queue_func_t wake_func)
4259{
4260 poll->head = NULL;
4261 poll->done = false;
4262 poll->canceled = false;
4263 poll->events = events;
4264 INIT_LIST_HEAD(&poll->wait.entry);
4265 init_waitqueue_func_entry(&poll->wait, wake_func);
4266}
4267
4268static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
4269 struct wait_queue_head *head)
4270{
4271 struct io_kiocb *req = pt->req;
4272
4273 /*
4274 * If poll->head is already set, it's because the file being polled
4275 * uses multiple waitqueues for poll handling (eg one for read, one
4276 * for write). Setup a separate io_poll_iocb if this happens.
4277 */
4278 if (unlikely(poll->head)) {
4279 /* already have a 2nd entry, fail a third attempt */
4280 if (req->io) {
4281 pt->error = -EINVAL;
4282 return;
4283 }
4284 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
4285 if (!poll) {
4286 pt->error = -ENOMEM;
4287 return;
4288 }
4289 io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
4290 refcount_inc(&req->refs);
4291 poll->wait.private = req;
4292 req->io = (void *) poll;
4293 }
4294
4295 pt->error = 0;
4296 poll->head = head;
4297 add_wait_queue(head, &poll->wait);
4298}
4299
4300static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
4301 struct poll_table_struct *p)
4302{
4303 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4304
4305 __io_queue_proc(&pt->req->apoll->poll, pt, head);
4306}
4307
d7718a9d
JA
4308static void io_async_task_func(struct callback_head *cb)
4309{
4310 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4311 struct async_poll *apoll = req->apoll;
4312 struct io_ring_ctx *ctx = req->ctx;
31067255 4313 bool canceled = false;
d7718a9d
JA
4314
4315 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
4316
74ce6ce4 4317 if (io_poll_rewait(req, &apoll->poll)) {
d7718a9d 4318 spin_unlock_irq(&ctx->completion_lock);
74ce6ce4 4319 return;
d7718a9d
JA
4320 }
4321
31067255
JA
4322 /* If req is still hashed, it cannot have been canceled. Don't check. */
4323 if (hash_hashed(&req->hash_node)) {
74ce6ce4 4324 hash_del(&req->hash_node);
31067255
JA
4325 } else {
4326 canceled = READ_ONCE(apoll->poll.canceled);
4327 if (canceled) {
4328 io_cqring_fill_event(req, -ECANCELED);
4329 io_commit_cqring(ctx);
4330 }
2bae047e
JA
4331 }
4332
74ce6ce4
JA
4333 spin_unlock_irq(&ctx->completion_lock);
4334
44575a67
XW
4335 /* restore ->work in case we need to retry again */
4336 memcpy(&req->work, &apoll->work, sizeof(req->work));
31067255 4337 kfree(apoll);
44575a67 4338
31067255
JA
4339 if (!canceled) {
4340 __set_current_state(TASK_RUNNING);
4341 mutex_lock(&ctx->uring_lock);
4342 __io_queue_sqe(req, NULL);
4343 mutex_unlock(&ctx->uring_lock);
4344 } else {
2bae047e
JA
4345 io_cqring_ev_posted(ctx);
4346 req_set_fail_links(req);
44575a67 4347 io_double_put_req(req);
2bae047e 4348 }
d7718a9d
JA
4349}
4350
4351static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4352 void *key)
4353{
4354 struct io_kiocb *req = wait->private;
4355 struct io_poll_iocb *poll = &req->apoll->poll;
4356
4357 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
4358 key_to_poll(key));
4359
4360 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
4361}
4362
4363static void io_poll_req_insert(struct io_kiocb *req)
4364{
4365 struct io_ring_ctx *ctx = req->ctx;
4366 struct hlist_head *list;
4367
4368 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
4369 hlist_add_head(&req->hash_node, list);
4370}
4371
4372static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
4373 struct io_poll_iocb *poll,
4374 struct io_poll_table *ipt, __poll_t mask,
4375 wait_queue_func_t wake_func)
4376 __acquires(&ctx->completion_lock)
4377{
4378 struct io_ring_ctx *ctx = req->ctx;
4379 bool cancel = false;
4380
4381 poll->file = req->file;
18bceab1
JA
4382 io_init_poll_iocb(poll, mask, wake_func);
4383 poll->wait.private = req;
d7718a9d
JA
4384
4385 ipt->pt._key = mask;
4386 ipt->req = req;
4387 ipt->error = -EINVAL;
4388
d7718a9d
JA
4389 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
4390
4391 spin_lock_irq(&ctx->completion_lock);
4392 if (likely(poll->head)) {
4393 spin_lock(&poll->head->lock);
4394 if (unlikely(list_empty(&poll->wait.entry))) {
4395 if (ipt->error)
4396 cancel = true;
4397 ipt->error = 0;
4398 mask = 0;
4399 }
4400 if (mask || ipt->error)
4401 list_del_init(&poll->wait.entry);
4402 else if (cancel)
4403 WRITE_ONCE(poll->canceled, true);
4404 else if (!poll->done) /* actually waiting for an event */
4405 io_poll_req_insert(req);
4406 spin_unlock(&poll->head->lock);
4407 }
4408
4409 return mask;
4410}
4411
4412static bool io_arm_poll_handler(struct io_kiocb *req)
4413{
4414 const struct io_op_def *def = &io_op_defs[req->opcode];
4415 struct io_ring_ctx *ctx = req->ctx;
4416 struct async_poll *apoll;
4417 struct io_poll_table ipt;
4418 __poll_t mask, ret;
18bceab1 4419 bool had_io;
d7718a9d
JA
4420
4421 if (!req->file || !file_can_poll(req->file))
4422 return false;
4423 if (req->flags & (REQ_F_MUST_PUNT | REQ_F_POLLED))
4424 return false;
4425 if (!def->pollin && !def->pollout)
4426 return false;
4427
4428 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
4429 if (unlikely(!apoll))
4430 return false;
4431
4432 req->flags |= REQ_F_POLLED;
4433 memcpy(&apoll->work, &req->work, sizeof(req->work));
18bceab1 4434 had_io = req->io != NULL;
d7718a9d 4435
3537b6a7 4436 get_task_struct(current);
d7718a9d
JA
4437 req->task = current;
4438 req->apoll = apoll;
4439 INIT_HLIST_NODE(&req->hash_node);
4440
8755d97a 4441 mask = 0;
d7718a9d 4442 if (def->pollin)
8755d97a 4443 mask |= POLLIN | POLLRDNORM;
d7718a9d
JA
4444 if (def->pollout)
4445 mask |= POLLOUT | POLLWRNORM;
4446 mask |= POLLERR | POLLPRI;
4447
4448 ipt.pt._qproc = io_async_queue_proc;
4449
4450 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
4451 io_async_wake);
4452 if (ret) {
4453 ipt.error = 0;
18bceab1
JA
4454 /* only remove double add if we did it here */
4455 if (!had_io)
4456 io_poll_remove_double(req);
d7718a9d
JA
4457 spin_unlock_irq(&ctx->completion_lock);
4458 memcpy(&req->work, &apoll->work, sizeof(req->work));
4459 kfree(apoll);
4460 return false;
4461 }
4462 spin_unlock_irq(&ctx->completion_lock);
4463 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
4464 apoll->poll.events);
4465 return true;
4466}
4467
4468static bool __io_poll_remove_one(struct io_kiocb *req,
4469 struct io_poll_iocb *poll)
221c5eb2 4470{
b41e9852 4471 bool do_complete = false;
221c5eb2
JA
4472
4473 spin_lock(&poll->head->lock);
4474 WRITE_ONCE(poll->canceled, true);
392edb45
JA
4475 if (!list_empty(&poll->wait.entry)) {
4476 list_del_init(&poll->wait.entry);
b41e9852 4477 do_complete = true;
221c5eb2
JA
4478 }
4479 spin_unlock(&poll->head->lock);
3bfa5bcb 4480 hash_del(&req->hash_node);
d7718a9d
JA
4481 return do_complete;
4482}
4483
4484static bool io_poll_remove_one(struct io_kiocb *req)
4485{
4486 bool do_complete;
4487
4488 if (req->opcode == IORING_OP_POLL_ADD) {
18bceab1 4489 io_poll_remove_double(req);
d7718a9d
JA
4490 do_complete = __io_poll_remove_one(req, &req->poll);
4491 } else {
3bfa5bcb
JA
4492 struct async_poll *apoll = req->apoll;
4493
d7718a9d 4494 /* non-poll requests have submit ref still */
3bfa5bcb
JA
4495 do_complete = __io_poll_remove_one(req, &apoll->poll);
4496 if (do_complete) {
d7718a9d 4497 io_put_req(req);
3bfa5bcb
JA
4498 /*
4499 * restore ->work because we will call
4500 * io_req_work_drop_env below when dropping the
4501 * final reference.
4502 */
4503 memcpy(&req->work, &apoll->work, sizeof(req->work));
4504 kfree(apoll);
4505 }
b1f573bd
XW
4506 }
4507
b41e9852
JA
4508 if (do_complete) {
4509 io_cqring_fill_event(req, -ECANCELED);
4510 io_commit_cqring(req->ctx);
4511 req->flags |= REQ_F_COMP_LOCKED;
4512 io_put_req(req);
4513 }
4514
4515 return do_complete;
221c5eb2
JA
4516}
4517
4518static void io_poll_remove_all(struct io_ring_ctx *ctx)
4519{
78076bb6 4520 struct hlist_node *tmp;
221c5eb2 4521 struct io_kiocb *req;
8e2e1faf 4522 int posted = 0, i;
221c5eb2
JA
4523
4524 spin_lock_irq(&ctx->completion_lock);
78076bb6
JA
4525 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
4526 struct hlist_head *list;
4527
4528 list = &ctx->cancel_hash[i];
4529 hlist_for_each_entry_safe(req, tmp, list, hash_node)
8e2e1faf 4530 posted += io_poll_remove_one(req);
221c5eb2
JA
4531 }
4532 spin_unlock_irq(&ctx->completion_lock);
b41e9852 4533
8e2e1faf
JA
4534 if (posted)
4535 io_cqring_ev_posted(ctx);
221c5eb2
JA
4536}
4537
47f46768
JA
4538static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
4539{
78076bb6 4540 struct hlist_head *list;
47f46768
JA
4541 struct io_kiocb *req;
4542
78076bb6
JA
4543 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
4544 hlist_for_each_entry(req, list, hash_node) {
b41e9852
JA
4545 if (sqe_addr != req->user_data)
4546 continue;
4547 if (io_poll_remove_one(req))
eac406c6 4548 return 0;
b41e9852 4549 return -EALREADY;
47f46768
JA
4550 }
4551
4552 return -ENOENT;
4553}
4554
3529d8c2
JA
4555static int io_poll_remove_prep(struct io_kiocb *req,
4556 const struct io_uring_sqe *sqe)
0969e783 4557{
0969e783
JA
4558 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4559 return -EINVAL;
4560 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
4561 sqe->poll_events)
4562 return -EINVAL;
4563
4564 req->poll.addr = READ_ONCE(sqe->addr);
0969e783
JA
4565 return 0;
4566}
4567
221c5eb2
JA
4568/*
4569 * Find a running poll command that matches one specified in sqe->addr,
4570 * and remove it if found.
4571 */
fc4df999 4572static int io_poll_remove(struct io_kiocb *req)
221c5eb2
JA
4573{
4574 struct io_ring_ctx *ctx = req->ctx;
0969e783 4575 u64 addr;
47f46768 4576 int ret;
221c5eb2 4577
0969e783 4578 addr = req->poll.addr;
221c5eb2 4579 spin_lock_irq(&ctx->completion_lock);
0969e783 4580 ret = io_poll_cancel(ctx, addr);
221c5eb2
JA
4581 spin_unlock_irq(&ctx->completion_lock);
4582
78e19bbe 4583 io_cqring_add_event(req, ret);
4e88d6e7
JA
4584 if (ret < 0)
4585 req_set_fail_links(req);
e65ef56d 4586 io_put_req(req);
221c5eb2
JA
4587 return 0;
4588}
4589
221c5eb2
JA
4590static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4591 void *key)
4592{
c2f2eb7d
JA
4593 struct io_kiocb *req = wait->private;
4594 struct io_poll_iocb *poll = &req->poll;
221c5eb2 4595
d7718a9d 4596 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
221c5eb2
JA
4597}
4598
221c5eb2
JA
4599static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
4600 struct poll_table_struct *p)
4601{
4602 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4603
d7718a9d 4604 __io_queue_proc(&pt->req->poll, pt, head);
eac406c6
JA
4605}
4606
3529d8c2 4607static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
4608{
4609 struct io_poll_iocb *poll = &req->poll;
221c5eb2 4610 u16 events;
221c5eb2
JA
4611
4612 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4613 return -EINVAL;
4614 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
4615 return -EINVAL;
09bb8394
JA
4616 if (!poll->file)
4617 return -EBADF;
221c5eb2 4618
221c5eb2
JA
4619 events = READ_ONCE(sqe->poll_events);
4620 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
b41e9852 4621
3537b6a7 4622 get_task_struct(current);
b41e9852 4623 req->task = current;
0969e783
JA
4624 return 0;
4625}
4626
014db007 4627static int io_poll_add(struct io_kiocb *req)
0969e783
JA
4628{
4629 struct io_poll_iocb *poll = &req->poll;
4630 struct io_ring_ctx *ctx = req->ctx;
4631 struct io_poll_table ipt;
0969e783 4632 __poll_t mask;
0969e783 4633
78076bb6 4634 INIT_HLIST_NODE(&req->hash_node);
36703247 4635 INIT_LIST_HEAD(&req->list);
d7718a9d 4636 ipt.pt._qproc = io_poll_queue_proc;
36703247 4637
d7718a9d
JA
4638 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
4639 io_poll_wake);
221c5eb2 4640
8c838788 4641 if (mask) { /* no async, we'd stolen it */
221c5eb2 4642 ipt.error = 0;
b0dd8a41 4643 io_poll_complete(req, mask, 0);
221c5eb2 4644 }
221c5eb2
JA
4645 spin_unlock_irq(&ctx->completion_lock);
4646
8c838788
JA
4647 if (mask) {
4648 io_cqring_ev_posted(ctx);
014db007 4649 io_put_req(req);
221c5eb2 4650 }
8c838788 4651 return ipt.error;
221c5eb2
JA
4652}
4653
5262f567
JA
4654static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
4655{
ad8a48ac
JA
4656 struct io_timeout_data *data = container_of(timer,
4657 struct io_timeout_data, timer);
4658 struct io_kiocb *req = data->req;
4659 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
4660 unsigned long flags;
4661
5262f567
JA
4662 atomic_inc(&ctx->cq_timeouts);
4663
4664 spin_lock_irqsave(&ctx->completion_lock, flags);
ef03681a 4665 /*
11365043
JA
4666 * We could be racing with timeout deletion. If the list is empty,
4667 * then timeout lookup already found it and will be handling it.
ef03681a 4668 */
842f9612 4669 if (!list_empty(&req->list)) {
11365043 4670 struct io_kiocb *prev;
5262f567 4671
11365043
JA
4672 /*
4673 * Adjust the reqs sequence before the current one because it
d195a66e 4674 * will consume a slot in the cq_ring and the cq_tail
11365043
JA
4675 * pointer will be increased, otherwise other timeout reqs may
4676 * return in advance without waiting for enough wait_nr.
4677 */
4678 prev = req;
4679 list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
4680 prev->sequence++;
11365043 4681 list_del_init(&req->list);
11365043 4682 }
5262f567 4683
78e19bbe 4684 io_cqring_fill_event(req, -ETIME);
5262f567
JA
4685 io_commit_cqring(ctx);
4686 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4687
4688 io_cqring_ev_posted(ctx);
4e88d6e7 4689 req_set_fail_links(req);
5262f567
JA
4690 io_put_req(req);
4691 return HRTIMER_NORESTART;
4692}
4693
47f46768
JA
4694static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
4695{
4696 struct io_kiocb *req;
4697 int ret = -ENOENT;
4698
4699 list_for_each_entry(req, &ctx->timeout_list, list) {
4700 if (user_data == req->user_data) {
4701 list_del_init(&req->list);
4702 ret = 0;
4703 break;
4704 }
4705 }
4706
4707 if (ret == -ENOENT)
4708 return ret;
4709
2d28390a 4710 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
47f46768
JA
4711 if (ret == -1)
4712 return -EALREADY;
4713
4e88d6e7 4714 req_set_fail_links(req);
47f46768
JA
4715 io_cqring_fill_event(req, -ECANCELED);
4716 io_put_req(req);
4717 return 0;
4718}
4719
3529d8c2
JA
4720static int io_timeout_remove_prep(struct io_kiocb *req,
4721 const struct io_uring_sqe *sqe)
b29472ee 4722{
b29472ee
JA
4723 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4724 return -EINVAL;
4725 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
4726 return -EINVAL;
4727
4728 req->timeout.addr = READ_ONCE(sqe->addr);
4729 req->timeout.flags = READ_ONCE(sqe->timeout_flags);
4730 if (req->timeout.flags)
4731 return -EINVAL;
4732
b29472ee
JA
4733 return 0;
4734}
4735
11365043
JA
4736/*
4737 * Remove or update an existing timeout command
4738 */
fc4df999 4739static int io_timeout_remove(struct io_kiocb *req)
11365043
JA
4740{
4741 struct io_ring_ctx *ctx = req->ctx;
47f46768 4742 int ret;
11365043 4743
11365043 4744 spin_lock_irq(&ctx->completion_lock);
b29472ee 4745 ret = io_timeout_cancel(ctx, req->timeout.addr);
11365043 4746
47f46768 4747 io_cqring_fill_event(req, ret);
11365043
JA
4748 io_commit_cqring(ctx);
4749 spin_unlock_irq(&ctx->completion_lock);
5262f567 4750 io_cqring_ev_posted(ctx);
4e88d6e7
JA
4751 if (ret < 0)
4752 req_set_fail_links(req);
ec9c02ad 4753 io_put_req(req);
11365043 4754 return 0;
5262f567
JA
4755}
4756
3529d8c2 4757static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 4758 bool is_timeout_link)
5262f567 4759{
ad8a48ac 4760 struct io_timeout_data *data;
a41525ab 4761 unsigned flags;
56080b02 4762 u32 off = READ_ONCE(sqe->off);
5262f567 4763
ad8a48ac 4764 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 4765 return -EINVAL;
ad8a48ac 4766 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
a41525ab 4767 return -EINVAL;
56080b02 4768 if (off && is_timeout_link)
2d28390a 4769 return -EINVAL;
a41525ab
JA
4770 flags = READ_ONCE(sqe->timeout_flags);
4771 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 4772 return -EINVAL;
bdf20073 4773
56080b02 4774 req->timeout.count = off;
26a61679 4775
3529d8c2 4776 if (!req->io && io_alloc_async_ctx(req))
26a61679
JA
4777 return -ENOMEM;
4778
4779 data = &req->io->timeout;
ad8a48ac 4780 data->req = req;
ad8a48ac
JA
4781 req->flags |= REQ_F_TIMEOUT;
4782
4783 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
4784 return -EFAULT;
4785
11365043 4786 if (flags & IORING_TIMEOUT_ABS)
ad8a48ac 4787 data->mode = HRTIMER_MODE_ABS;
11365043 4788 else
ad8a48ac 4789 data->mode = HRTIMER_MODE_REL;
11365043 4790
ad8a48ac
JA
4791 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
4792 return 0;
4793}
4794
fc4df999 4795static int io_timeout(struct io_kiocb *req)
ad8a48ac 4796{
ad8a48ac
JA
4797 struct io_ring_ctx *ctx = req->ctx;
4798 struct io_timeout_data *data;
4799 struct list_head *entry;
4800 unsigned span = 0;
b55ce732 4801 u32 count = req->timeout.count;
22cad158 4802 u32 seq = req->sequence;
ad8a48ac 4803
2d28390a 4804 data = &req->io->timeout;
733f5c95 4805 spin_lock_irq(&ctx->completion_lock);
93bd25bb 4806
5262f567
JA
4807 /*
4808 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
4809 * timeout event to be satisfied. If it isn't set, then this is
4810 * a pure timeout request, sequence isn't used.
5262f567 4811 */
93bd25bb
JA
4812 if (!count) {
4813 req->flags |= REQ_F_TIMEOUT_NOSEQ;
93bd25bb
JA
4814 entry = ctx->timeout_list.prev;
4815 goto add;
4816 }
5262f567 4817
22cad158 4818 req->sequence = seq + count;
5262f567
JA
4819
4820 /*
4821 * Insertion sort, ensuring the first entry in the list is always
4822 * the one we need first.
4823 */
5262f567
JA
4824 list_for_each_prev(entry, &ctx->timeout_list) {
4825 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
22cad158 4826 unsigned nxt_seq;
5da0fb1a 4827 long long tmp, tmp_nxt;
b55ce732 4828 u32 nxt_offset = nxt->timeout.count;
5262f567 4829
93bd25bb
JA
4830 if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
4831 continue;
4832
5da0fb1a 4833 /*
22cad158 4834 * Since seq + count can overflow, use type long
5da0fb1a 4835 * long to store it.
4836 */
22cad158
PB
4837 tmp = (long long)seq + count;
4838 nxt_seq = nxt->sequence - nxt_offset;
4839 tmp_nxt = (long long)nxt_seq + nxt_offset;
5da0fb1a 4840
4841 /*
4842 * cached_sq_head may overflow, and it will never overflow twice
4843 * once there is some timeout req still be valid.
4844 */
22cad158 4845 if (seq < nxt_seq)
8b07a65a 4846 tmp += UINT_MAX;
5da0fb1a 4847
a1f58ba4 4848 if (tmp > tmp_nxt)
5262f567 4849 break;
a1f58ba4 4850
4851 /*
4852 * Sequence of reqs after the insert one and itself should
4853 * be adjusted because each timeout req consumes a slot.
4854 */
4855 span++;
4856 nxt->sequence++;
5262f567 4857 }
a1f58ba4 4858 req->sequence -= span;
93bd25bb 4859add:
5262f567 4860 list_add(&req->list, entry);
ad8a48ac
JA
4861 data->timer.function = io_timeout_fn;
4862 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5262f567 4863 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
4864 return 0;
4865}
5262f567 4866
62755e35
JA
4867static bool io_cancel_cb(struct io_wq_work *work, void *data)
4868{
4869 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
4870
4871 return req->user_data == (unsigned long) data;
4872}
4873
e977d6d3 4874static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
62755e35 4875{
62755e35 4876 enum io_wq_cancel cancel_ret;
62755e35
JA
4877 int ret = 0;
4878
62755e35
JA
4879 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
4880 switch (cancel_ret) {
4881 case IO_WQ_CANCEL_OK:
4882 ret = 0;
4883 break;
4884 case IO_WQ_CANCEL_RUNNING:
4885 ret = -EALREADY;
4886 break;
4887 case IO_WQ_CANCEL_NOTFOUND:
4888 ret = -ENOENT;
4889 break;
4890 }
4891
e977d6d3
JA
4892 return ret;
4893}
4894
47f46768
JA
4895static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
4896 struct io_kiocb *req, __u64 sqe_addr,
014db007 4897 int success_ret)
47f46768
JA
4898{
4899 unsigned long flags;
4900 int ret;
4901
4902 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
4903 if (ret != -ENOENT) {
4904 spin_lock_irqsave(&ctx->completion_lock, flags);
4905 goto done;
4906 }
4907
4908 spin_lock_irqsave(&ctx->completion_lock, flags);
4909 ret = io_timeout_cancel(ctx, sqe_addr);
4910 if (ret != -ENOENT)
4911 goto done;
4912 ret = io_poll_cancel(ctx, sqe_addr);
4913done:
b0dd8a41
JA
4914 if (!ret)
4915 ret = success_ret;
47f46768
JA
4916 io_cqring_fill_event(req, ret);
4917 io_commit_cqring(ctx);
4918 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4919 io_cqring_ev_posted(ctx);
4920
4e88d6e7
JA
4921 if (ret < 0)
4922 req_set_fail_links(req);
014db007 4923 io_put_req(req);
47f46768
JA
4924}
4925
3529d8c2
JA
4926static int io_async_cancel_prep(struct io_kiocb *req,
4927 const struct io_uring_sqe *sqe)
e977d6d3 4928{
fbf23849 4929 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3
JA
4930 return -EINVAL;
4931 if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
4932 sqe->cancel_flags)
4933 return -EINVAL;
4934
fbf23849
JA
4935 req->cancel.addr = READ_ONCE(sqe->addr);
4936 return 0;
4937}
4938
014db007 4939static int io_async_cancel(struct io_kiocb *req)
fbf23849
JA
4940{
4941 struct io_ring_ctx *ctx = req->ctx;
fbf23849 4942
014db007 4943 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
5262f567
JA
4944 return 0;
4945}
4946
05f3fb3c
JA
4947static int io_files_update_prep(struct io_kiocb *req,
4948 const struct io_uring_sqe *sqe)
4949{
4950 if (sqe->flags || sqe->ioprio || sqe->rw_flags)
4951 return -EINVAL;
4952
4953 req->files_update.offset = READ_ONCE(sqe->off);
4954 req->files_update.nr_args = READ_ONCE(sqe->len);
4955 if (!req->files_update.nr_args)
4956 return -EINVAL;
4957 req->files_update.arg = READ_ONCE(sqe->addr);
4958 return 0;
4959}
4960
4961static int io_files_update(struct io_kiocb *req, bool force_nonblock)
fbf23849
JA
4962{
4963 struct io_ring_ctx *ctx = req->ctx;
05f3fb3c
JA
4964 struct io_uring_files_update up;
4965 int ret;
fbf23849 4966
f86cd20c 4967 if (force_nonblock)
05f3fb3c 4968 return -EAGAIN;
05f3fb3c
JA
4969
4970 up.offset = req->files_update.offset;
4971 up.fds = req->files_update.arg;
4972
4973 mutex_lock(&ctx->uring_lock);
4974 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
4975 mutex_unlock(&ctx->uring_lock);
4976
4977 if (ret < 0)
4978 req_set_fail_links(req);
4979 io_cqring_add_event(req, ret);
4980 io_put_req(req);
5262f567
JA
4981 return 0;
4982}
4983
3529d8c2
JA
4984static int io_req_defer_prep(struct io_kiocb *req,
4985 const struct io_uring_sqe *sqe)
f67676d1 4986{
e781573e 4987 ssize_t ret = 0;
f67676d1 4988
f1d96a8f
PB
4989 if (!sqe)
4990 return 0;
4991
f86cd20c
JA
4992 if (io_op_defs[req->opcode].file_table) {
4993 ret = io_grab_files(req);
4994 if (unlikely(ret))
4995 return ret;
4996 }
4997
cccf0ee8
JA
4998 io_req_work_grab_env(req, &io_op_defs[req->opcode]);
4999
d625c6ee 5000 switch (req->opcode) {
e781573e
JA
5001 case IORING_OP_NOP:
5002 break;
f67676d1
JA
5003 case IORING_OP_READV:
5004 case IORING_OP_READ_FIXED:
3a6820f2 5005 case IORING_OP_READ:
3529d8c2 5006 ret = io_read_prep(req, sqe, true);
f67676d1
JA
5007 break;
5008 case IORING_OP_WRITEV:
5009 case IORING_OP_WRITE_FIXED:
3a6820f2 5010 case IORING_OP_WRITE:
3529d8c2 5011 ret = io_write_prep(req, sqe, true);
f67676d1 5012 break;
0969e783 5013 case IORING_OP_POLL_ADD:
3529d8c2 5014 ret = io_poll_add_prep(req, sqe);
0969e783
JA
5015 break;
5016 case IORING_OP_POLL_REMOVE:
3529d8c2 5017 ret = io_poll_remove_prep(req, sqe);
0969e783 5018 break;
8ed8d3c3 5019 case IORING_OP_FSYNC:
3529d8c2 5020 ret = io_prep_fsync(req, sqe);
8ed8d3c3
JA
5021 break;
5022 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2 5023 ret = io_prep_sfr(req, sqe);
8ed8d3c3 5024 break;
03b1230c 5025 case IORING_OP_SENDMSG:
fddaface 5026 case IORING_OP_SEND:
3529d8c2 5027 ret = io_sendmsg_prep(req, sqe);
03b1230c
JA
5028 break;
5029 case IORING_OP_RECVMSG:
fddaface 5030 case IORING_OP_RECV:
3529d8c2 5031 ret = io_recvmsg_prep(req, sqe);
03b1230c 5032 break;
f499a021 5033 case IORING_OP_CONNECT:
3529d8c2 5034 ret = io_connect_prep(req, sqe);
f499a021 5035 break;
2d28390a 5036 case IORING_OP_TIMEOUT:
3529d8c2 5037 ret = io_timeout_prep(req, sqe, false);
b7bb4f7d 5038 break;
b29472ee 5039 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2 5040 ret = io_timeout_remove_prep(req, sqe);
b29472ee 5041 break;
fbf23849 5042 case IORING_OP_ASYNC_CANCEL:
3529d8c2 5043 ret = io_async_cancel_prep(req, sqe);
fbf23849 5044 break;
2d28390a 5045 case IORING_OP_LINK_TIMEOUT:
3529d8c2 5046 ret = io_timeout_prep(req, sqe, true);
b7bb4f7d 5047 break;
8ed8d3c3 5048 case IORING_OP_ACCEPT:
3529d8c2 5049 ret = io_accept_prep(req, sqe);
8ed8d3c3 5050 break;
d63d1b5e
JA
5051 case IORING_OP_FALLOCATE:
5052 ret = io_fallocate_prep(req, sqe);
5053 break;
15b71abe
JA
5054 case IORING_OP_OPENAT:
5055 ret = io_openat_prep(req, sqe);
5056 break;
b5dba59e
JA
5057 case IORING_OP_CLOSE:
5058 ret = io_close_prep(req, sqe);
5059 break;
05f3fb3c
JA
5060 case IORING_OP_FILES_UPDATE:
5061 ret = io_files_update_prep(req, sqe);
5062 break;
eddc7ef5
JA
5063 case IORING_OP_STATX:
5064 ret = io_statx_prep(req, sqe);
5065 break;
4840e418
JA
5066 case IORING_OP_FADVISE:
5067 ret = io_fadvise_prep(req, sqe);
5068 break;
c1ca757b
JA
5069 case IORING_OP_MADVISE:
5070 ret = io_madvise_prep(req, sqe);
5071 break;
cebdb986
JA
5072 case IORING_OP_OPENAT2:
5073 ret = io_openat2_prep(req, sqe);
5074 break;
3e4827b0
JA
5075 case IORING_OP_EPOLL_CTL:
5076 ret = io_epoll_ctl_prep(req, sqe);
5077 break;
7d67af2c
PB
5078 case IORING_OP_SPLICE:
5079 ret = io_splice_prep(req, sqe);
5080 break;
ddf0322d
JA
5081 case IORING_OP_PROVIDE_BUFFERS:
5082 ret = io_provide_buffers_prep(req, sqe);
5083 break;
067524e9
JA
5084 case IORING_OP_REMOVE_BUFFERS:
5085 ret = io_remove_buffers_prep(req, sqe);
5086 break;
f2a8d5c7
PB
5087 case IORING_OP_TEE:
5088 ret = io_tee_prep(req, sqe);
5089 break;
f67676d1 5090 default:
e781573e
JA
5091 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5092 req->opcode);
5093 ret = -EINVAL;
b7bb4f7d 5094 break;
f67676d1
JA
5095 }
5096
b7bb4f7d 5097 return ret;
f67676d1
JA
5098}
5099
3529d8c2 5100static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
de0617e4 5101{
a197f664 5102 struct io_ring_ctx *ctx = req->ctx;
f67676d1 5103 int ret;
de0617e4 5104
9d858b21 5105 /* Still need defer if there is pending req in defer list. */
4ee36314 5106 if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
de0617e4
JA
5107 return 0;
5108
3529d8c2 5109 if (!req->io && io_alloc_async_ctx(req))
de0617e4
JA
5110 return -EAGAIN;
5111
3529d8c2 5112 ret = io_req_defer_prep(req, sqe);
b7bb4f7d 5113 if (ret < 0)
2d28390a 5114 return ret;
2d28390a 5115
de0617e4 5116 spin_lock_irq(&ctx->completion_lock);
9d858b21 5117 if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
de0617e4 5118 spin_unlock_irq(&ctx->completion_lock);
de0617e4
JA
5119 return 0;
5120 }
5121
915967f6 5122 trace_io_uring_defer(ctx, req, req->user_data);
de0617e4
JA
5123 list_add_tail(&req->list, &ctx->defer_list);
5124 spin_unlock_irq(&ctx->completion_lock);
5125 return -EIOCBQUEUED;
5126}
5127
99bc4c38
PB
5128static void io_cleanup_req(struct io_kiocb *req)
5129{
5130 struct io_async_ctx *io = req->io;
5131
5132 switch (req->opcode) {
5133 case IORING_OP_READV:
5134 case IORING_OP_READ_FIXED:
5135 case IORING_OP_READ:
bcda7baa
JA
5136 if (req->flags & REQ_F_BUFFER_SELECTED)
5137 kfree((void *)(unsigned long)req->rw.addr);
5138 /* fallthrough */
99bc4c38
PB
5139 case IORING_OP_WRITEV:
5140 case IORING_OP_WRITE_FIXED:
5141 case IORING_OP_WRITE:
5142 if (io->rw.iov != io->rw.fast_iov)
5143 kfree(io->rw.iov);
5144 break;
99bc4c38 5145 case IORING_OP_RECVMSG:
52de1fe1
JA
5146 if (req->flags & REQ_F_BUFFER_SELECTED)
5147 kfree(req->sr_msg.kbuf);
5148 /* fallthrough */
5149 case IORING_OP_SENDMSG:
99bc4c38
PB
5150 if (io->msg.iov != io->msg.fast_iov)
5151 kfree(io->msg.iov);
5152 break;
bcda7baa
JA
5153 case IORING_OP_RECV:
5154 if (req->flags & REQ_F_BUFFER_SELECTED)
5155 kfree(req->sr_msg.kbuf);
5156 break;
8fef80bf
PB
5157 case IORING_OP_OPENAT:
5158 case IORING_OP_OPENAT2:
8fef80bf 5159 break;
7d67af2c 5160 case IORING_OP_SPLICE:
f2a8d5c7 5161 case IORING_OP_TEE:
7d67af2c
PB
5162 io_put_file(req, req->splice.file_in,
5163 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
5164 break;
99bc4c38
PB
5165 }
5166
5167 req->flags &= ~REQ_F_NEED_CLEANUP;
5168}
5169
3529d8c2 5170static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
014db007 5171 bool force_nonblock)
2b188cc1 5172{
a197f664 5173 struct io_ring_ctx *ctx = req->ctx;
d625c6ee 5174 int ret;
2b188cc1 5175
d625c6ee 5176 switch (req->opcode) {
2b188cc1 5177 case IORING_OP_NOP:
78e19bbe 5178 ret = io_nop(req);
2b188cc1
JA
5179 break;
5180 case IORING_OP_READV:
edafccee 5181 case IORING_OP_READ_FIXED:
3a6820f2 5182 case IORING_OP_READ:
3529d8c2
JA
5183 if (sqe) {
5184 ret = io_read_prep(req, sqe, force_nonblock);
5185 if (ret < 0)
5186 break;
5187 }
014db007 5188 ret = io_read(req, force_nonblock);
edafccee 5189 break;
3529d8c2 5190 case IORING_OP_WRITEV:
edafccee 5191 case IORING_OP_WRITE_FIXED:
3a6820f2 5192 case IORING_OP_WRITE:
3529d8c2
JA
5193 if (sqe) {
5194 ret = io_write_prep(req, sqe, force_nonblock);
5195 if (ret < 0)
5196 break;
5197 }
014db007 5198 ret = io_write(req, force_nonblock);
2b188cc1 5199 break;
c992fe29 5200 case IORING_OP_FSYNC:
3529d8c2
JA
5201 if (sqe) {
5202 ret = io_prep_fsync(req, sqe);
5203 if (ret < 0)
5204 break;
5205 }
014db007 5206 ret = io_fsync(req, force_nonblock);
c992fe29 5207 break;
221c5eb2 5208 case IORING_OP_POLL_ADD:
3529d8c2
JA
5209 if (sqe) {
5210 ret = io_poll_add_prep(req, sqe);
5211 if (ret)
5212 break;
5213 }
014db007 5214 ret = io_poll_add(req);
221c5eb2
JA
5215 break;
5216 case IORING_OP_POLL_REMOVE:
3529d8c2
JA
5217 if (sqe) {
5218 ret = io_poll_remove_prep(req, sqe);
5219 if (ret < 0)
5220 break;
5221 }
fc4df999 5222 ret = io_poll_remove(req);
221c5eb2 5223 break;
5d17b4a4 5224 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2
JA
5225 if (sqe) {
5226 ret = io_prep_sfr(req, sqe);
5227 if (ret < 0)
5228 break;
5229 }
014db007 5230 ret = io_sync_file_range(req, force_nonblock);
5d17b4a4 5231 break;
0fa03c62 5232 case IORING_OP_SENDMSG:
fddaface 5233 case IORING_OP_SEND:
3529d8c2
JA
5234 if (sqe) {
5235 ret = io_sendmsg_prep(req, sqe);
5236 if (ret < 0)
5237 break;
5238 }
fddaface 5239 if (req->opcode == IORING_OP_SENDMSG)
014db007 5240 ret = io_sendmsg(req, force_nonblock);
fddaface 5241 else
014db007 5242 ret = io_send(req, force_nonblock);
0fa03c62 5243 break;
aa1fa28f 5244 case IORING_OP_RECVMSG:
fddaface 5245 case IORING_OP_RECV:
3529d8c2
JA
5246 if (sqe) {
5247 ret = io_recvmsg_prep(req, sqe);
5248 if (ret)
5249 break;
5250 }
fddaface 5251 if (req->opcode == IORING_OP_RECVMSG)
014db007 5252 ret = io_recvmsg(req, force_nonblock);
fddaface 5253 else
014db007 5254 ret = io_recv(req, force_nonblock);
aa1fa28f 5255 break;
5262f567 5256 case IORING_OP_TIMEOUT:
3529d8c2
JA
5257 if (sqe) {
5258 ret = io_timeout_prep(req, sqe, false);
5259 if (ret)
5260 break;
5261 }
fc4df999 5262 ret = io_timeout(req);
5262f567 5263 break;
11365043 5264 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2
JA
5265 if (sqe) {
5266 ret = io_timeout_remove_prep(req, sqe);
5267 if (ret)
5268 break;
5269 }
fc4df999 5270 ret = io_timeout_remove(req);
11365043 5271 break;
17f2fe35 5272 case IORING_OP_ACCEPT:
3529d8c2
JA
5273 if (sqe) {
5274 ret = io_accept_prep(req, sqe);
5275 if (ret)
5276 break;
5277 }
014db007 5278 ret = io_accept(req, force_nonblock);
17f2fe35 5279 break;
f8e85cf2 5280 case IORING_OP_CONNECT:
3529d8c2
JA
5281 if (sqe) {
5282 ret = io_connect_prep(req, sqe);
5283 if (ret)
5284 break;
5285 }
014db007 5286 ret = io_connect(req, force_nonblock);
f8e85cf2 5287 break;
62755e35 5288 case IORING_OP_ASYNC_CANCEL:
3529d8c2
JA
5289 if (sqe) {
5290 ret = io_async_cancel_prep(req, sqe);
5291 if (ret)
5292 break;
5293 }
014db007 5294 ret = io_async_cancel(req);
62755e35 5295 break;
d63d1b5e
JA
5296 case IORING_OP_FALLOCATE:
5297 if (sqe) {
5298 ret = io_fallocate_prep(req, sqe);
5299 if (ret)
5300 break;
5301 }
014db007 5302 ret = io_fallocate(req, force_nonblock);
d63d1b5e 5303 break;
15b71abe
JA
5304 case IORING_OP_OPENAT:
5305 if (sqe) {
5306 ret = io_openat_prep(req, sqe);
5307 if (ret)
5308 break;
5309 }
014db007 5310 ret = io_openat(req, force_nonblock);
15b71abe 5311 break;
b5dba59e
JA
5312 case IORING_OP_CLOSE:
5313 if (sqe) {
5314 ret = io_close_prep(req, sqe);
5315 if (ret)
5316 break;
5317 }
014db007 5318 ret = io_close(req, force_nonblock);
b5dba59e 5319 break;
05f3fb3c
JA
5320 case IORING_OP_FILES_UPDATE:
5321 if (sqe) {
5322 ret = io_files_update_prep(req, sqe);
5323 if (ret)
5324 break;
5325 }
5326 ret = io_files_update(req, force_nonblock);
5327 break;
eddc7ef5
JA
5328 case IORING_OP_STATX:
5329 if (sqe) {
5330 ret = io_statx_prep(req, sqe);
5331 if (ret)
5332 break;
5333 }
014db007 5334 ret = io_statx(req, force_nonblock);
eddc7ef5 5335 break;
4840e418
JA
5336 case IORING_OP_FADVISE:
5337 if (sqe) {
5338 ret = io_fadvise_prep(req, sqe);
5339 if (ret)
5340 break;
5341 }
014db007 5342 ret = io_fadvise(req, force_nonblock);
4840e418 5343 break;
c1ca757b
JA
5344 case IORING_OP_MADVISE:
5345 if (sqe) {
5346 ret = io_madvise_prep(req, sqe);
5347 if (ret)
5348 break;
5349 }
014db007 5350 ret = io_madvise(req, force_nonblock);
c1ca757b 5351 break;
cebdb986
JA
5352 case IORING_OP_OPENAT2:
5353 if (sqe) {
5354 ret = io_openat2_prep(req, sqe);
5355 if (ret)
5356 break;
5357 }
014db007 5358 ret = io_openat2(req, force_nonblock);
cebdb986 5359 break;
3e4827b0
JA
5360 case IORING_OP_EPOLL_CTL:
5361 if (sqe) {
5362 ret = io_epoll_ctl_prep(req, sqe);
5363 if (ret)
5364 break;
5365 }
014db007 5366 ret = io_epoll_ctl(req, force_nonblock);
3e4827b0 5367 break;
7d67af2c
PB
5368 case IORING_OP_SPLICE:
5369 if (sqe) {
5370 ret = io_splice_prep(req, sqe);
5371 if (ret < 0)
5372 break;
5373 }
014db007 5374 ret = io_splice(req, force_nonblock);
7d67af2c 5375 break;
ddf0322d
JA
5376 case IORING_OP_PROVIDE_BUFFERS:
5377 if (sqe) {
5378 ret = io_provide_buffers_prep(req, sqe);
5379 if (ret)
5380 break;
5381 }
5382 ret = io_provide_buffers(req, force_nonblock);
5383 break;
067524e9
JA
5384 case IORING_OP_REMOVE_BUFFERS:
5385 if (sqe) {
5386 ret = io_remove_buffers_prep(req, sqe);
5387 if (ret)
5388 break;
5389 }
5390 ret = io_remove_buffers(req, force_nonblock);
3e4827b0 5391 break;
f2a8d5c7
PB
5392 case IORING_OP_TEE:
5393 if (sqe) {
5394 ret = io_tee_prep(req, sqe);
5395 if (ret < 0)
5396 break;
5397 }
5398 ret = io_tee(req, force_nonblock);
5399 break;
2b188cc1
JA
5400 default:
5401 ret = -EINVAL;
5402 break;
5403 }
5404
def596e9
JA
5405 if (ret)
5406 return ret;
5407
5408 if (ctx->flags & IORING_SETUP_IOPOLL) {
11ba820b
JA
5409 const bool in_async = io_wq_current_is_worker();
5410
9e645e11 5411 if (req->result == -EAGAIN)
def596e9
JA
5412 return -EAGAIN;
5413
11ba820b
JA
5414 /* workqueue context doesn't hold uring_lock, grab it now */
5415 if (in_async)
5416 mutex_lock(&ctx->uring_lock);
5417
def596e9 5418 io_iopoll_req_issued(req);
11ba820b
JA
5419
5420 if (in_async)
5421 mutex_unlock(&ctx->uring_lock);
def596e9
JA
5422 }
5423
5424 return 0;
2b188cc1
JA
5425}
5426
561fb04a 5427static void io_wq_submit_work(struct io_wq_work **workptr)
2b188cc1 5428{
561fb04a 5429 struct io_wq_work *work = *workptr;
2b188cc1 5430 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
561fb04a 5431 int ret = 0;
2b188cc1 5432
0c9d5ccd
JA
5433 /* if NO_CANCEL is set, we must still run the work */
5434 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
5435 IO_WQ_WORK_CANCEL) {
561fb04a 5436 ret = -ECANCELED;
0c9d5ccd 5437 }
31b51510 5438
561fb04a 5439 if (!ret) {
561fb04a 5440 do {
014db007 5441 ret = io_issue_sqe(req, NULL, false);
561fb04a
JA
5442 /*
5443 * We can get EAGAIN for polled IO even though we're
5444 * forcing a sync submission from here, since we can't
5445 * wait for request slots on the block side.
5446 */
5447 if (ret != -EAGAIN)
5448 break;
5449 cond_resched();
5450 } while (1);
5451 }
31b51510 5452
561fb04a 5453 if (ret) {
4e88d6e7 5454 req_set_fail_links(req);
78e19bbe 5455 io_cqring_add_event(req, ret);
817869d2 5456 io_put_req(req);
edafccee 5457 }
2b188cc1 5458
e9fd9396 5459 io_steal_work(req, workptr);
2b188cc1
JA
5460}
5461
65e19f54
JA
5462static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
5463 int index)
5464{
5465 struct fixed_file_table *table;
5466
05f3fb3c 5467 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
84695089 5468 return table->files[index & IORING_FILE_TABLE_MASK];
65e19f54
JA
5469}
5470
8da11c19
PB
5471static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
5472 int fd, struct file **out_file, bool fixed)
09bb8394 5473{
a197f664 5474 struct io_ring_ctx *ctx = req->ctx;
8da11c19 5475 struct file *file;
09bb8394 5476
8da11c19 5477 if (fixed) {
05f3fb3c 5478 if (unlikely(!ctx->file_data ||
09bb8394
JA
5479 (unsigned) fd >= ctx->nr_user_files))
5480 return -EBADF;
b7620121 5481 fd = array_index_nospec(fd, ctx->nr_user_files);
8da11c19
PB
5482 file = io_file_from_index(ctx, fd);
5483 if (!file)
08a45173 5484 return -EBADF;
05589553
XW
5485 req->fixed_file_refs = ctx->file_data->cur_refs;
5486 percpu_ref_get(req->fixed_file_refs);
09bb8394 5487 } else {
c826bd7a 5488 trace_io_uring_file_get(ctx, fd);
8da11c19
PB
5489 file = __io_file_get(state, fd);
5490 if (unlikely(!file))
09bb8394
JA
5491 return -EBADF;
5492 }
5493
8da11c19 5494 *out_file = file;
09bb8394
JA
5495 return 0;
5496}
5497
8da11c19 5498static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
63ff8223 5499 int fd)
8da11c19 5500{
8da11c19
PB
5501 bool fixed;
5502
63ff8223 5503 fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
0cdaf760 5504 if (unlikely(!fixed && io_async_submit(req->ctx)))
8da11c19
PB
5505 return -EBADF;
5506
5507 return io_file_get(state, req, fd, &req->file, fixed);
5508}
5509
a197f664 5510static int io_grab_files(struct io_kiocb *req)
fcb323cc
JA
5511{
5512 int ret = -EBADF;
a197f664 5513 struct io_ring_ctx *ctx = req->ctx;
fcb323cc 5514
5b0bbee4 5515 if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
f86cd20c 5516 return 0;
b14cca0c 5517 if (!ctx->ring_file)
b5dba59e
JA
5518 return -EBADF;
5519
fcb323cc
JA
5520 rcu_read_lock();
5521 spin_lock_irq(&ctx->inflight_lock);
5522 /*
5523 * We use the f_ops->flush() handler to ensure that we can flush
5524 * out work accessing these files if the fd is closed. Check if
5525 * the fd has changed since we started down this path, and disallow
5526 * this operation if it has.
5527 */
b14cca0c 5528 if (fcheck(ctx->ring_fd) == ctx->ring_file) {
fcb323cc
JA
5529 list_add(&req->inflight_entry, &ctx->inflight_list);
5530 req->flags |= REQ_F_INFLIGHT;
5531 req->work.files = current->files;
5532 ret = 0;
5533 }
5534 spin_unlock_irq(&ctx->inflight_lock);
5535 rcu_read_unlock();
5536
5537 return ret;
5538}
5539
2665abfd 5540static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 5541{
ad8a48ac
JA
5542 struct io_timeout_data *data = container_of(timer,
5543 struct io_timeout_data, timer);
5544 struct io_kiocb *req = data->req;
2665abfd
JA
5545 struct io_ring_ctx *ctx = req->ctx;
5546 struct io_kiocb *prev = NULL;
5547 unsigned long flags;
2665abfd
JA
5548
5549 spin_lock_irqsave(&ctx->completion_lock, flags);
5550
5551 /*
5552 * We don't expect the list to be empty, that will only happen if we
5553 * race with the completion of the linked work.
5554 */
4493233e
PB
5555 if (!list_empty(&req->link_list)) {
5556 prev = list_entry(req->link_list.prev, struct io_kiocb,
5557 link_list);
5d960724 5558 if (refcount_inc_not_zero(&prev->refs)) {
4493233e 5559 list_del_init(&req->link_list);
5d960724
JA
5560 prev->flags &= ~REQ_F_LINK_TIMEOUT;
5561 } else
76a46e06 5562 prev = NULL;
2665abfd
JA
5563 }
5564
5565 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5566
5567 if (prev) {
4e88d6e7 5568 req_set_fail_links(prev);
014db007 5569 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
76a46e06 5570 io_put_req(prev);
47f46768
JA
5571 } else {
5572 io_cqring_add_event(req, -ETIME);
5573 io_put_req(req);
2665abfd 5574 }
2665abfd
JA
5575 return HRTIMER_NORESTART;
5576}
5577
ad8a48ac 5578static void io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 5579{
76a46e06 5580 struct io_ring_ctx *ctx = req->ctx;
2665abfd 5581
76a46e06
JA
5582 /*
5583 * If the list is now empty, then our linked request finished before
5584 * we got a chance to setup the timer
5585 */
5586 spin_lock_irq(&ctx->completion_lock);
4493233e 5587 if (!list_empty(&req->link_list)) {
2d28390a 5588 struct io_timeout_data *data = &req->io->timeout;
94ae5e77 5589
ad8a48ac
JA
5590 data->timer.function = io_link_timeout_fn;
5591 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
5592 data->mode);
2665abfd 5593 }
76a46e06 5594 spin_unlock_irq(&ctx->completion_lock);
2665abfd 5595
2665abfd 5596 /* drop submission reference */
76a46e06
JA
5597 io_put_req(req);
5598}
2665abfd 5599
ad8a48ac 5600static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2665abfd
JA
5601{
5602 struct io_kiocb *nxt;
5603
dea3b49c 5604 if (!(req->flags & REQ_F_LINK_HEAD))
2665abfd 5605 return NULL;
d7718a9d
JA
5606 /* for polled retry, if flag is set, we already went through here */
5607 if (req->flags & REQ_F_POLLED)
5608 return NULL;
2665abfd 5609
4493233e
PB
5610 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
5611 link_list);
d625c6ee 5612 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
76a46e06 5613 return NULL;
2665abfd 5614
76a46e06 5615 req->flags |= REQ_F_LINK_TIMEOUT;
76a46e06 5616 return nxt;
2665abfd
JA
5617}
5618
3529d8c2 5619static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 5620{
4a0a7a18 5621 struct io_kiocb *linked_timeout;
4bc4494e 5622 struct io_kiocb *nxt;
193155c8 5623 const struct cred *old_creds = NULL;
e0c5c576 5624 int ret;
2b188cc1 5625
4a0a7a18
JA
5626again:
5627 linked_timeout = io_prep_linked_timeout(req);
5628
193155c8
JA
5629 if (req->work.creds && req->work.creds != current_cred()) {
5630 if (old_creds)
5631 revert_creds(old_creds);
5632 if (old_creds == req->work.creds)
5633 old_creds = NULL; /* restored original creds */
5634 else
5635 old_creds = override_creds(req->work.creds);
5636 }
5637
014db007 5638 ret = io_issue_sqe(req, sqe, true);
491381ce
JA
5639
5640 /*
5641 * We async punt it if the file wasn't marked NOWAIT, or if the file
5642 * doesn't support non-blocking read/write attempts
5643 */
5644 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
5645 (req->flags & REQ_F_MUST_PUNT))) {
d7718a9d
JA
5646 if (io_arm_poll_handler(req)) {
5647 if (linked_timeout)
5648 io_queue_linked_timeout(linked_timeout);
4bc4494e 5649 goto exit;
d7718a9d 5650 }
86a761f8 5651punt:
f86cd20c 5652 if (io_op_defs[req->opcode].file_table) {
bbad27b2
PB
5653 ret = io_grab_files(req);
5654 if (ret)
5655 goto err;
2b188cc1 5656 }
bbad27b2
PB
5657
5658 /*
5659 * Queued up for async execution, worker will release
5660 * submit reference when the iocb is actually submitted.
5661 */
5662 io_queue_async_work(req);
4bc4494e 5663 goto exit;
2b188cc1 5664 }
e65ef56d 5665
fcb323cc 5666err:
4bc4494e 5667 nxt = NULL;
76a46e06 5668 /* drop submission reference */
2a44f467 5669 io_put_req_find_next(req, &nxt);
e65ef56d 5670
f9bd67f6 5671 if (linked_timeout) {
76a46e06 5672 if (!ret)
f9bd67f6 5673 io_queue_linked_timeout(linked_timeout);
76a46e06 5674 else
f9bd67f6 5675 io_put_req(linked_timeout);
76a46e06
JA
5676 }
5677
e65ef56d 5678 /* and drop final reference, if we failed */
9e645e11 5679 if (ret) {
78e19bbe 5680 io_cqring_add_event(req, ret);
4e88d6e7 5681 req_set_fail_links(req);
e65ef56d 5682 io_put_req(req);
9e645e11 5683 }
4a0a7a18
JA
5684 if (nxt) {
5685 req = nxt;
86a761f8
PB
5686
5687 if (req->flags & REQ_F_FORCE_ASYNC)
5688 goto punt;
4a0a7a18
JA
5689 goto again;
5690 }
4bc4494e 5691exit:
193155c8
JA
5692 if (old_creds)
5693 revert_creds(old_creds);
2b188cc1
JA
5694}
5695
3529d8c2 5696static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4fe2c963
JL
5697{
5698 int ret;
5699
3529d8c2 5700 ret = io_req_defer(req, sqe);
4fe2c963
JL
5701 if (ret) {
5702 if (ret != -EIOCBQUEUED) {
1118591a 5703fail_req:
78e19bbe 5704 io_cqring_add_event(req, ret);
4e88d6e7 5705 req_set_fail_links(req);
78e19bbe 5706 io_double_put_req(req);
4fe2c963 5707 }
2550878f 5708 } else if (req->flags & REQ_F_FORCE_ASYNC) {
1118591a
PB
5709 ret = io_req_defer_prep(req, sqe);
5710 if (unlikely(ret < 0))
5711 goto fail_req;
ce35a47a
JA
5712 /*
5713 * Never try inline submit of IOSQE_ASYNC is set, go straight
5714 * to async execution.
5715 */
5716 req->work.flags |= IO_WQ_WORK_CONCURRENT;
5717 io_queue_async_work(req);
5718 } else {
3529d8c2 5719 __io_queue_sqe(req, sqe);
ce35a47a 5720 }
4fe2c963
JL
5721}
5722
1b4a51b6 5723static inline void io_queue_link_head(struct io_kiocb *req)
4fe2c963 5724{
94ae5e77 5725 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
1b4a51b6
PB
5726 io_cqring_add_event(req, -ECANCELED);
5727 io_double_put_req(req);
5728 } else
3529d8c2 5729 io_queue_sqe(req, NULL);
4fe2c963
JL
5730}
5731
1d4240cc 5732static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
7d01bd74 5733 struct io_kiocb **link)
9e645e11 5734{
a197f664 5735 struct io_ring_ctx *ctx = req->ctx;
ef4ff581 5736 int ret;
9e645e11 5737
9e645e11
JA
5738 /*
5739 * If we already have a head request, queue this one for async
5740 * submittal once the head completes. If we don't have a head but
5741 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
5742 * submitted sync once the chain is complete. If none of those
5743 * conditions are true (normal request), then just queue it.
5744 */
5745 if (*link) {
9d76377f 5746 struct io_kiocb *head = *link;
4e88d6e7 5747
8cdf2193
PB
5748 /*
5749 * Taking sequential execution of a link, draining both sides
5750 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
5751 * requests in the link. So, it drains the head and the
5752 * next after the link request. The last one is done via
5753 * drain_next flag to persist the effect across calls.
5754 */
ef4ff581 5755 if (req->flags & REQ_F_IO_DRAIN) {
711be031
PB
5756 head->flags |= REQ_F_IO_DRAIN;
5757 ctx->drain_next = 1;
5758 }
1d4240cc
PB
5759 if (io_alloc_async_ctx(req))
5760 return -EAGAIN;
9e645e11 5761
3529d8c2 5762 ret = io_req_defer_prep(req, sqe);
2d28390a 5763 if (ret) {
4e88d6e7 5764 /* fail even hard links since we don't submit */
9d76377f 5765 head->flags |= REQ_F_FAIL_LINK;
1d4240cc 5766 return ret;
2d28390a 5767 }
9d76377f
PB
5768 trace_io_uring_link(ctx, req, head);
5769 list_add_tail(&req->link_list, &head->link_list);
32fe525b
PB
5770
5771 /* last request of a link, enqueue the link */
ef4ff581 5772 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
32fe525b
PB
5773 io_queue_link_head(head);
5774 *link = NULL;
5775 }
9e645e11 5776 } else {
711be031
PB
5777 if (unlikely(ctx->drain_next)) {
5778 req->flags |= REQ_F_IO_DRAIN;
ef4ff581 5779 ctx->drain_next = 0;
711be031 5780 }
ef4ff581 5781 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
dea3b49c 5782 req->flags |= REQ_F_LINK_HEAD;
711be031 5783 INIT_LIST_HEAD(&req->link_list);
f1d96a8f 5784
1d4240cc
PB
5785 if (io_alloc_async_ctx(req))
5786 return -EAGAIN;
5787
711be031
PB
5788 ret = io_req_defer_prep(req, sqe);
5789 if (ret)
5790 req->flags |= REQ_F_FAIL_LINK;
5791 *link = req;
5792 } else {
5793 io_queue_sqe(req, sqe);
5794 }
9e645e11 5795 }
2e6e1fde 5796
1d4240cc 5797 return 0;
9e645e11
JA
5798}
5799
9a56a232
JA
5800/*
5801 * Batched submission is done, ensure local IO is flushed out.
5802 */
5803static void io_submit_state_end(struct io_submit_state *state)
5804{
5805 blk_finish_plug(&state->plug);
9f13c35b 5806 io_state_file_put(state);
2579f913 5807 if (state->free_reqs)
6c8a3134 5808 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9a56a232
JA
5809}
5810
5811/*
5812 * Start submission side cache.
5813 */
5814static void io_submit_state_start(struct io_submit_state *state,
22efde59 5815 unsigned int max_ios)
9a56a232
JA
5816{
5817 blk_start_plug(&state->plug);
2579f913 5818 state->free_reqs = 0;
9a56a232
JA
5819 state->file = NULL;
5820 state->ios_left = max_ios;
5821}
5822
2b188cc1
JA
5823static void io_commit_sqring(struct io_ring_ctx *ctx)
5824{
75b28aff 5825 struct io_rings *rings = ctx->rings;
2b188cc1 5826
caf582c6
PB
5827 /*
5828 * Ensure any loads from the SQEs are done at this point,
5829 * since once we write the new head, the application could
5830 * write new data to them.
5831 */
5832 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
5833}
5834
2b188cc1 5835/*
3529d8c2 5836 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
2b188cc1
JA
5837 * that is mapped by userspace. This means that care needs to be taken to
5838 * ensure that reads are stable, as we cannot rely on userspace always
5839 * being a good citizen. If members of the sqe are validated and then later
5840 * used, it's important that those reads are done through READ_ONCE() to
5841 * prevent a re-load down the line.
5842 */
709b302f 5843static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 5844{
75b28aff 5845 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
5846 unsigned head;
5847
5848 /*
5849 * The cached sq head (or cq tail) serves two purposes:
5850 *
5851 * 1) allows us to batch the cost of updating the user visible
5852 * head updates.
5853 * 2) allows the kernel side to track the head on its own, even
5854 * though the application is the one updating it.
5855 */
ee7d46d9 5856 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
709b302f
PB
5857 if (likely(head < ctx->sq_entries))
5858 return &ctx->sq_sqes[head];
2b188cc1
JA
5859
5860 /* drop invalid entries */
498ccd9e 5861 ctx->cached_sq_dropped++;
ee7d46d9 5862 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
709b302f
PB
5863 return NULL;
5864}
5865
5866static inline void io_consume_sqe(struct io_ring_ctx *ctx)
5867{
5868 ctx->cached_sq_head++;
2b188cc1
JA
5869}
5870
ef4ff581
PB
5871#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
5872 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
5873 IOSQE_BUFFER_SELECT)
5874
5875static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
5876 const struct io_uring_sqe *sqe,
0cdaf760 5877 struct io_submit_state *state)
0553b8bd 5878{
ef4ff581 5879 unsigned int sqe_flags;
63ff8223 5880 int id;
ef4ff581 5881
0553b8bd
PB
5882 /*
5883 * All io need record the previous position, if LINK vs DARIN,
5884 * it can be used to mark the position of the first IO in the
5885 * link list.
5886 */
31af27c7 5887 req->sequence = ctx->cached_sq_head - ctx->cached_sq_dropped;
0553b8bd
PB
5888 req->opcode = READ_ONCE(sqe->opcode);
5889 req->user_data = READ_ONCE(sqe->user_data);
5890 req->io = NULL;
5891 req->file = NULL;
5892 req->ctx = ctx;
5893 req->flags = 0;
5894 /* one is dropped after submission, the other at completion */
5895 refcount_set(&req->refs, 2);
5896 req->task = NULL;
5897 req->result = 0;
5898 INIT_IO_WORK(&req->work, io_wq_submit_work);
ef4ff581
PB
5899
5900 if (unlikely(req->opcode >= IORING_OP_LAST))
5901 return -EINVAL;
5902
5903 if (io_op_defs[req->opcode].needs_mm && !current->mm) {
5904 if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
5905 return -EFAULT;
5906 use_mm(ctx->sqo_mm);
5907 }
5908
5909 sqe_flags = READ_ONCE(sqe->flags);
5910 /* enforce forwards compatibility on users */
5911 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
5912 return -EINVAL;
5913
5914 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
5915 !io_op_defs[req->opcode].buffer_select)
5916 return -EOPNOTSUPP;
5917
5918 id = READ_ONCE(sqe->personality);
5919 if (id) {
5920 req->work.creds = idr_find(&ctx->personality_idr, id);
5921 if (unlikely(!req->work.creds))
5922 return -EINVAL;
5923 get_cred(req->work.creds);
5924 }
5925
5926 /* same numerical values with corresponding REQ_F_*, safe to copy */
c11368a5 5927 req->flags |= sqe_flags;
ef4ff581 5928
63ff8223
JA
5929 if (!io_op_defs[req->opcode].needs_file)
5930 return 0;
5931
5932 return io_req_set_file(state, req, READ_ONCE(sqe->fd));
0553b8bd
PB
5933}
5934
fb5ccc98 5935static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
0cdaf760 5936 struct file *ring_file, int ring_fd)
6c271ce2
JA
5937{
5938 struct io_submit_state state, *statep = NULL;
9e645e11 5939 struct io_kiocb *link = NULL;
9e645e11 5940 int i, submitted = 0;
6c271ce2 5941
c4a2ed72 5942 /* if we have a backlog and couldn't flush it all, return BUSY */
ad3eb2c8
JA
5943 if (test_bit(0, &ctx->sq_check_overflow)) {
5944 if (!list_empty(&ctx->cq_overflow_list) &&
5945 !io_cqring_overflow_flush(ctx, false))
5946 return -EBUSY;
5947 }
6c271ce2 5948
ee7d46d9
PB
5949 /* make sure SQ entry isn't read before tail */
5950 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
9ef4f124 5951
2b85edfc
PB
5952 if (!percpu_ref_tryget_many(&ctx->refs, nr))
5953 return -EAGAIN;
6c271ce2
JA
5954
5955 if (nr > IO_PLUG_THRESHOLD) {
22efde59 5956 io_submit_state_start(&state, nr);
6c271ce2
JA
5957 statep = &state;
5958 }
5959
b14cca0c
PB
5960 ctx->ring_fd = ring_fd;
5961 ctx->ring_file = ring_file;
5962
6c271ce2 5963 for (i = 0; i < nr; i++) {
3529d8c2 5964 const struct io_uring_sqe *sqe;
196be95c 5965 struct io_kiocb *req;
1cb1edb2 5966 int err;
fb5ccc98 5967
b1e50e54
PB
5968 sqe = io_get_sqe(ctx);
5969 if (unlikely(!sqe)) {
5970 io_consume_sqe(ctx);
5971 break;
5972 }
0553b8bd 5973 req = io_alloc_req(ctx, statep);
196be95c
PB
5974 if (unlikely(!req)) {
5975 if (!submitted)
5976 submitted = -EAGAIN;
fb5ccc98 5977 break;
196be95c 5978 }
fb5ccc98 5979
0cdaf760 5980 err = io_init_req(ctx, req, sqe, statep);
709b302f 5981 io_consume_sqe(ctx);
d3656344
JA
5982 /* will complete beyond this point, count as submitted */
5983 submitted++;
5984
ef4ff581 5985 if (unlikely(err)) {
1cb1edb2
PB
5986fail_req:
5987 io_cqring_add_event(req, err);
d3656344 5988 io_double_put_req(req);
196be95c
PB
5989 break;
5990 }
fb5ccc98 5991
354420f7 5992 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
0cdaf760 5993 true, io_async_submit(ctx));
7d01bd74 5994 err = io_submit_sqe(req, sqe, &link);
1d4240cc
PB
5995 if (err)
5996 goto fail_req;
6c271ce2
JA
5997 }
5998
9466f437
PB
5999 if (unlikely(submitted != nr)) {
6000 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
6001
6002 percpu_ref_put_many(&ctx->refs, nr - ref_used);
6003 }
9e645e11 6004 if (link)
1b4a51b6 6005 io_queue_link_head(link);
6c271ce2
JA
6006 if (statep)
6007 io_submit_state_end(&state);
6008
ae9428ca
PB
6009 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6010 io_commit_sqring(ctx);
6011
6c271ce2
JA
6012 return submitted;
6013}
6014
bf9c2f1c
PB
6015static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
6016{
6017 struct mm_struct *mm = current->mm;
6018
6019 if (mm) {
6020 unuse_mm(mm);
6021 mmput(mm);
6022 }
6023}
6024
6c271ce2
JA
6025static int io_sq_thread(void *data)
6026{
6c271ce2 6027 struct io_ring_ctx *ctx = data;
181e448d 6028 const struct cred *old_cred;
6c271ce2
JA
6029 mm_segment_t old_fs;
6030 DEFINE_WAIT(wait);
6c271ce2 6031 unsigned long timeout;
bdcd3eab 6032 int ret = 0;
6c271ce2 6033
0f158b4c 6034 complete(&ctx->sq_thread_comp);
a4c0b3de 6035
6c271ce2
JA
6036 old_fs = get_fs();
6037 set_fs(USER_DS);
181e448d 6038 old_cred = override_creds(ctx->creds);
6c271ce2 6039
bdcd3eab 6040 timeout = jiffies + ctx->sq_thread_idle;
2bbcd6d3 6041 while (!kthread_should_park()) {
fb5ccc98 6042 unsigned int to_submit;
6c271ce2 6043
bdcd3eab 6044 if (!list_empty(&ctx->poll_list)) {
6c271ce2
JA
6045 unsigned nr_events = 0;
6046
bdcd3eab
XW
6047 mutex_lock(&ctx->uring_lock);
6048 if (!list_empty(&ctx->poll_list))
6049 io_iopoll_getevents(ctx, &nr_events, 0);
6050 else
6c271ce2 6051 timeout = jiffies + ctx->sq_thread_idle;
bdcd3eab 6052 mutex_unlock(&ctx->uring_lock);
6c271ce2
JA
6053 }
6054
fb5ccc98 6055 to_submit = io_sqring_entries(ctx);
c1edbf5f
JA
6056
6057 /*
6058 * If submit got -EBUSY, flag us as needing the application
6059 * to enter the kernel to reap and flush events.
6060 */
6061 if (!to_submit || ret == -EBUSY) {
7143b5ac
SG
6062 /*
6063 * Drop cur_mm before scheduling, we can't hold it for
6064 * long periods (or over schedule()). Do this before
6065 * adding ourselves to the waitqueue, as the unuse/drop
6066 * may sleep.
6067 */
bf9c2f1c 6068 io_sq_thread_drop_mm(ctx);
7143b5ac 6069
6c271ce2
JA
6070 /*
6071 * We're polling. If we're within the defined idle
6072 * period, then let us spin without work before going
c1edbf5f
JA
6073 * to sleep. The exception is if we got EBUSY doing
6074 * more IO, we should wait for the application to
6075 * reap events and wake us up.
6c271ce2 6076 */
bdcd3eab 6077 if (!list_empty(&ctx->poll_list) ||
df069d80
JA
6078 (!time_after(jiffies, timeout) && ret != -EBUSY &&
6079 !percpu_ref_is_dying(&ctx->refs))) {
b41e9852
JA
6080 if (current->task_works)
6081 task_work_run();
9831a90c 6082 cond_resched();
6c271ce2
JA
6083 continue;
6084 }
6085
6c271ce2
JA
6086 prepare_to_wait(&ctx->sqo_wait, &wait,
6087 TASK_INTERRUPTIBLE);
6088
bdcd3eab
XW
6089 /*
6090 * While doing polled IO, before going to sleep, we need
6091 * to check if there are new reqs added to poll_list, it
6092 * is because reqs may have been punted to io worker and
6093 * will be added to poll_list later, hence check the
6094 * poll_list again.
6095 */
6096 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6097 !list_empty_careful(&ctx->poll_list)) {
6098 finish_wait(&ctx->sqo_wait, &wait);
6099 continue;
6100 }
6101
6c271ce2 6102 /* Tell userspace we may need a wakeup call */
75b28aff 6103 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
0d7bae69
SB
6104 /* make sure to read SQ tail after writing flags */
6105 smp_mb();
6c271ce2 6106
fb5ccc98 6107 to_submit = io_sqring_entries(ctx);
c1edbf5f 6108 if (!to_submit || ret == -EBUSY) {
2bbcd6d3 6109 if (kthread_should_park()) {
6c271ce2
JA
6110 finish_wait(&ctx->sqo_wait, &wait);
6111 break;
6112 }
b41e9852
JA
6113 if (current->task_works) {
6114 task_work_run();
10bea96d 6115 finish_wait(&ctx->sqo_wait, &wait);
b41e9852
JA
6116 continue;
6117 }
6c271ce2
JA
6118 if (signal_pending(current))
6119 flush_signals(current);
6120 schedule();
6121 finish_wait(&ctx->sqo_wait, &wait);
6122
75b28aff 6123 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
6124 continue;
6125 }
6126 finish_wait(&ctx->sqo_wait, &wait);
6127
75b28aff 6128 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
6129 }
6130
8a4955ff 6131 mutex_lock(&ctx->uring_lock);
6b668c9b
XW
6132 if (likely(!percpu_ref_is_dying(&ctx->refs)))
6133 ret = io_submit_sqes(ctx, to_submit, NULL, -1);
8a4955ff 6134 mutex_unlock(&ctx->uring_lock);
bdcd3eab 6135 timeout = jiffies + ctx->sq_thread_idle;
6c271ce2
JA
6136 }
6137
b41e9852
JA
6138 if (current->task_works)
6139 task_work_run();
6140
6c271ce2 6141 set_fs(old_fs);
bf9c2f1c 6142 io_sq_thread_drop_mm(ctx);
181e448d 6143 revert_creds(old_cred);
06058632 6144
2bbcd6d3 6145 kthread_parkme();
06058632 6146
6c271ce2
JA
6147 return 0;
6148}
6149
bda52162
JA
6150struct io_wait_queue {
6151 struct wait_queue_entry wq;
6152 struct io_ring_ctx *ctx;
6153 unsigned to_wait;
6154 unsigned nr_timeouts;
6155};
6156
1d7bb1d5 6157static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
bda52162
JA
6158{
6159 struct io_ring_ctx *ctx = iowq->ctx;
6160
6161 /*
d195a66e 6162 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
6163 * started waiting. For timeouts, we always want to return to userspace,
6164 * regardless of event count.
6165 */
1d7bb1d5 6166 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
bda52162
JA
6167 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6168}
6169
6170static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6171 int wake_flags, void *key)
6172{
6173 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6174 wq);
6175
1d7bb1d5
JA
6176 /* use noflush == true, as we can't safely rely on locking context */
6177 if (!io_should_wake(iowq, true))
bda52162
JA
6178 return -1;
6179
6180 return autoremove_wake_function(curr, mode, wake_flags, key);
6181}
6182
2b188cc1
JA
6183/*
6184 * Wait until events become available, if we don't already have some. The
6185 * application must reap them itself, as they reside on the shared cq ring.
6186 */
6187static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
6188 const sigset_t __user *sig, size_t sigsz)
6189{
bda52162
JA
6190 struct io_wait_queue iowq = {
6191 .wq = {
6192 .private = current,
6193 .func = io_wake_function,
6194 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6195 },
6196 .ctx = ctx,
6197 .to_wait = min_events,
6198 };
75b28aff 6199 struct io_rings *rings = ctx->rings;
e9ffa5c2 6200 int ret = 0;
2b188cc1 6201
b41e9852
JA
6202 do {
6203 if (io_cqring_events(ctx, false) >= min_events)
6204 return 0;
6205 if (!current->task_works)
6206 break;
6207 task_work_run();
6208 } while (1);
2b188cc1
JA
6209
6210 if (sig) {
9e75ad5d
AB
6211#ifdef CONFIG_COMPAT
6212 if (in_compat_syscall())
6213 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 6214 sigsz);
9e75ad5d
AB
6215 else
6216#endif
b772434b 6217 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 6218
2b188cc1
JA
6219 if (ret)
6220 return ret;
6221 }
6222
bda52162 6223 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 6224 trace_io_uring_cqring_wait(ctx, min_events);
bda52162
JA
6225 do {
6226 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6227 TASK_INTERRUPTIBLE);
b41e9852
JA
6228 if (current->task_works)
6229 task_work_run();
1d7bb1d5 6230 if (io_should_wake(&iowq, false))
bda52162
JA
6231 break;
6232 schedule();
6233 if (signal_pending(current)) {
e9ffa5c2 6234 ret = -EINTR;
bda52162
JA
6235 break;
6236 }
6237 } while (1);
6238 finish_wait(&ctx->wait, &iowq.wq);
6239
e9ffa5c2 6240 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 6241
75b28aff 6242 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
6243}
6244
6b06314c
JA
6245static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6246{
6247#if defined(CONFIG_UNIX)
6248 if (ctx->ring_sock) {
6249 struct sock *sock = ctx->ring_sock->sk;
6250 struct sk_buff *skb;
6251
6252 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6253 kfree_skb(skb);
6254 }
6255#else
6256 int i;
6257
65e19f54
JA
6258 for (i = 0; i < ctx->nr_user_files; i++) {
6259 struct file *file;
6260
6261 file = io_file_from_index(ctx, i);
6262 if (file)
6263 fput(file);
6264 }
6b06314c
JA
6265#endif
6266}
6267
05f3fb3c
JA
6268static void io_file_ref_kill(struct percpu_ref *ref)
6269{
6270 struct fixed_file_data *data;
6271
6272 data = container_of(ref, struct fixed_file_data, refs);
6273 complete(&data->done);
6274}
6275
6b06314c
JA
6276static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
6277{
05f3fb3c 6278 struct fixed_file_data *data = ctx->file_data;
05589553 6279 struct fixed_file_ref_node *ref_node = NULL;
65e19f54
JA
6280 unsigned nr_tables, i;
6281
05f3fb3c 6282 if (!data)
6b06314c
JA
6283 return -ENXIO;
6284
6a4d07cd 6285 spin_lock(&data->lock);
05589553
XW
6286 if (!list_empty(&data->ref_list))
6287 ref_node = list_first_entry(&data->ref_list,
6288 struct fixed_file_ref_node, node);
6a4d07cd 6289 spin_unlock(&data->lock);
05589553
XW
6290 if (ref_node)
6291 percpu_ref_kill(&ref_node->refs);
6292
6293 percpu_ref_kill(&data->refs);
6294
6295 /* wait for all refs nodes to complete */
4a38aed2 6296 flush_delayed_work(&ctx->file_put_work);
2faf852d 6297 wait_for_completion(&data->done);
05f3fb3c 6298
6b06314c 6299 __io_sqe_files_unregister(ctx);
65e19f54
JA
6300 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
6301 for (i = 0; i < nr_tables; i++)
05f3fb3c
JA
6302 kfree(data->table[i].files);
6303 kfree(data->table);
05589553
XW
6304 percpu_ref_exit(&data->refs);
6305 kfree(data);
05f3fb3c 6306 ctx->file_data = NULL;
6b06314c
JA
6307 ctx->nr_user_files = 0;
6308 return 0;
6309}
6310
6c271ce2
JA
6311static void io_sq_thread_stop(struct io_ring_ctx *ctx)
6312{
6313 if (ctx->sqo_thread) {
0f158b4c 6314 wait_for_completion(&ctx->sq_thread_comp);
2bbcd6d3
RP
6315 /*
6316 * The park is a bit of a work-around, without it we get
6317 * warning spews on shutdown with SQPOLL set and affinity
6318 * set to a single CPU.
6319 */
06058632 6320 kthread_park(ctx->sqo_thread);
6c271ce2
JA
6321 kthread_stop(ctx->sqo_thread);
6322 ctx->sqo_thread = NULL;
6323 }
6324}
6325
6b06314c
JA
6326static void io_finish_async(struct io_ring_ctx *ctx)
6327{
6c271ce2
JA
6328 io_sq_thread_stop(ctx);
6329
561fb04a
JA
6330 if (ctx->io_wq) {
6331 io_wq_destroy(ctx->io_wq);
6332 ctx->io_wq = NULL;
6b06314c
JA
6333 }
6334}
6335
6336#if defined(CONFIG_UNIX)
6b06314c
JA
6337/*
6338 * Ensure the UNIX gc is aware of our file set, so we are certain that
6339 * the io_uring can be safely unregistered on process exit, even if we have
6340 * loops in the file referencing.
6341 */
6342static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
6343{
6344 struct sock *sk = ctx->ring_sock->sk;
6345 struct scm_fp_list *fpl;
6346 struct sk_buff *skb;
08a45173 6347 int i, nr_files;
6b06314c 6348
6b06314c
JA
6349 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
6350 if (!fpl)
6351 return -ENOMEM;
6352
6353 skb = alloc_skb(0, GFP_KERNEL);
6354 if (!skb) {
6355 kfree(fpl);
6356 return -ENOMEM;
6357 }
6358
6359 skb->sk = sk;
6b06314c 6360
08a45173 6361 nr_files = 0;
6b06314c
JA
6362 fpl->user = get_uid(ctx->user);
6363 for (i = 0; i < nr; i++) {
65e19f54
JA
6364 struct file *file = io_file_from_index(ctx, i + offset);
6365
6366 if (!file)
08a45173 6367 continue;
65e19f54 6368 fpl->fp[nr_files] = get_file(file);
08a45173
JA
6369 unix_inflight(fpl->user, fpl->fp[nr_files]);
6370 nr_files++;
6b06314c
JA
6371 }
6372
08a45173
JA
6373 if (nr_files) {
6374 fpl->max = SCM_MAX_FD;
6375 fpl->count = nr_files;
6376 UNIXCB(skb).fp = fpl;
05f3fb3c 6377 skb->destructor = unix_destruct_scm;
08a45173
JA
6378 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
6379 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 6380
08a45173
JA
6381 for (i = 0; i < nr_files; i++)
6382 fput(fpl->fp[i]);
6383 } else {
6384 kfree_skb(skb);
6385 kfree(fpl);
6386 }
6b06314c
JA
6387
6388 return 0;
6389}
6390
6391/*
6392 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
6393 * causes regular reference counting to break down. We rely on the UNIX
6394 * garbage collection to take care of this problem for us.
6395 */
6396static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6397{
6398 unsigned left, total;
6399 int ret = 0;
6400
6401 total = 0;
6402 left = ctx->nr_user_files;
6403 while (left) {
6404 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
6405
6406 ret = __io_sqe_files_scm(ctx, this_files, total);
6407 if (ret)
6408 break;
6409 left -= this_files;
6410 total += this_files;
6411 }
6412
6413 if (!ret)
6414 return 0;
6415
6416 while (total < ctx->nr_user_files) {
65e19f54
JA
6417 struct file *file = io_file_from_index(ctx, total);
6418
6419 if (file)
6420 fput(file);
6b06314c
JA
6421 total++;
6422 }
6423
6424 return ret;
6425}
6426#else
6427static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6428{
6429 return 0;
6430}
6431#endif
6432
65e19f54
JA
6433static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
6434 unsigned nr_files)
6435{
6436 int i;
6437
6438 for (i = 0; i < nr_tables; i++) {
05f3fb3c 6439 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
6440 unsigned this_files;
6441
6442 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
6443 table->files = kcalloc(this_files, sizeof(struct file *),
6444 GFP_KERNEL);
6445 if (!table->files)
6446 break;
6447 nr_files -= this_files;
6448 }
6449
6450 if (i == nr_tables)
6451 return 0;
6452
6453 for (i = 0; i < nr_tables; i++) {
05f3fb3c 6454 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
6455 kfree(table->files);
6456 }
6457 return 1;
6458}
6459
05f3fb3c
JA
6460static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
6461{
6462#if defined(CONFIG_UNIX)
6463 struct sock *sock = ctx->ring_sock->sk;
6464 struct sk_buff_head list, *head = &sock->sk_receive_queue;
6465 struct sk_buff *skb;
6466 int i;
6467
6468 __skb_queue_head_init(&list);
6469
6470 /*
6471 * Find the skb that holds this file in its SCM_RIGHTS. When found,
6472 * remove this entry and rearrange the file array.
6473 */
6474 skb = skb_dequeue(head);
6475 while (skb) {
6476 struct scm_fp_list *fp;
6477
6478 fp = UNIXCB(skb).fp;
6479 for (i = 0; i < fp->count; i++) {
6480 int left;
6481
6482 if (fp->fp[i] != file)
6483 continue;
6484
6485 unix_notinflight(fp->user, fp->fp[i]);
6486 left = fp->count - 1 - i;
6487 if (left) {
6488 memmove(&fp->fp[i], &fp->fp[i + 1],
6489 left * sizeof(struct file *));
6490 }
6491 fp->count--;
6492 if (!fp->count) {
6493 kfree_skb(skb);
6494 skb = NULL;
6495 } else {
6496 __skb_queue_tail(&list, skb);
6497 }
6498 fput(file);
6499 file = NULL;
6500 break;
6501 }
6502
6503 if (!file)
6504 break;
6505
6506 __skb_queue_tail(&list, skb);
6507
6508 skb = skb_dequeue(head);
6509 }
6510
6511 if (skb_peek(&list)) {
6512 spin_lock_irq(&head->lock);
6513 while ((skb = __skb_dequeue(&list)) != NULL)
6514 __skb_queue_tail(head, skb);
6515 spin_unlock_irq(&head->lock);
6516 }
6517#else
6518 fput(file);
6519#endif
6520}
6521
6522struct io_file_put {
05589553 6523 struct list_head list;
05f3fb3c 6524 struct file *file;
05f3fb3c
JA
6525};
6526
4a38aed2 6527static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
65e19f54 6528{
4a38aed2
JA
6529 struct fixed_file_data *file_data = ref_node->file_data;
6530 struct io_ring_ctx *ctx = file_data->ctx;
05f3fb3c 6531 struct io_file_put *pfile, *tmp;
65e19f54 6532
05589553 6533 list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
6a4d07cd 6534 list_del(&pfile->list);
05589553
XW
6535 io_ring_file_put(ctx, pfile->file);
6536 kfree(pfile);
65e19f54 6537 }
05589553 6538
6a4d07cd
JA
6539 spin_lock(&file_data->lock);
6540 list_del(&ref_node->node);
6541 spin_unlock(&file_data->lock);
05589553
XW
6542
6543 percpu_ref_exit(&ref_node->refs);
6544 kfree(ref_node);
6545 percpu_ref_put(&file_data->refs);
2faf852d 6546}
65e19f54 6547
4a38aed2
JA
6548static void io_file_put_work(struct work_struct *work)
6549{
6550 struct io_ring_ctx *ctx;
6551 struct llist_node *node;
6552
6553 ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
6554 node = llist_del_all(&ctx->file_put_llist);
6555
6556 while (node) {
6557 struct fixed_file_ref_node *ref_node;
6558 struct llist_node *next = node->next;
6559
6560 ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
6561 __io_file_put_work(ref_node);
6562 node = next;
6563 }
6564}
6565
05589553 6566static void io_file_data_ref_zero(struct percpu_ref *ref)
2faf852d 6567{
05589553 6568 struct fixed_file_ref_node *ref_node;
4a38aed2
JA
6569 struct io_ring_ctx *ctx;
6570 bool first_add;
6571 int delay = HZ;
65e19f54 6572
05589553 6573 ref_node = container_of(ref, struct fixed_file_ref_node, refs);
4a38aed2 6574 ctx = ref_node->file_data->ctx;
05589553 6575
4a38aed2
JA
6576 if (percpu_ref_is_dying(&ctx->file_data->refs))
6577 delay = 0;
6578
6579 first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
6580 if (!delay)
6581 mod_delayed_work(system_wq, &ctx->file_put_work, 0);
6582 else if (first_add)
6583 queue_delayed_work(system_wq, &ctx->file_put_work, delay);
05f3fb3c 6584}
65e19f54 6585
05589553
XW
6586static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
6587 struct io_ring_ctx *ctx)
05f3fb3c 6588{
05589553 6589 struct fixed_file_ref_node *ref_node;
05f3fb3c 6590
05589553
XW
6591 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
6592 if (!ref_node)
6593 return ERR_PTR(-ENOMEM);
05f3fb3c 6594
05589553
XW
6595 if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
6596 0, GFP_KERNEL)) {
6597 kfree(ref_node);
6598 return ERR_PTR(-ENOMEM);
6599 }
6600 INIT_LIST_HEAD(&ref_node->node);
6601 INIT_LIST_HEAD(&ref_node->file_list);
05589553
XW
6602 ref_node->file_data = ctx->file_data;
6603 return ref_node;
05589553
XW
6604}
6605
6606static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
6607{
6608 percpu_ref_exit(&ref_node->refs);
6609 kfree(ref_node);
65e19f54
JA
6610}
6611
6b06314c
JA
6612static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
6613 unsigned nr_args)
6614{
6615 __s32 __user *fds = (__s32 __user *) arg;
65e19f54 6616 unsigned nr_tables;
05f3fb3c 6617 struct file *file;
6b06314c
JA
6618 int fd, ret = 0;
6619 unsigned i;
05589553 6620 struct fixed_file_ref_node *ref_node;
6b06314c 6621
05f3fb3c 6622 if (ctx->file_data)
6b06314c
JA
6623 return -EBUSY;
6624 if (!nr_args)
6625 return -EINVAL;
6626 if (nr_args > IORING_MAX_FIXED_FILES)
6627 return -EMFILE;
6628
05f3fb3c
JA
6629 ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
6630 if (!ctx->file_data)
6631 return -ENOMEM;
6632 ctx->file_data->ctx = ctx;
6633 init_completion(&ctx->file_data->done);
05589553 6634 INIT_LIST_HEAD(&ctx->file_data->ref_list);
f7fe9346 6635 spin_lock_init(&ctx->file_data->lock);
05f3fb3c 6636
65e19f54 6637 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
05f3fb3c
JA
6638 ctx->file_data->table = kcalloc(nr_tables,
6639 sizeof(struct fixed_file_table),
65e19f54 6640 GFP_KERNEL);
05f3fb3c
JA
6641 if (!ctx->file_data->table) {
6642 kfree(ctx->file_data);
6643 ctx->file_data = NULL;
6b06314c 6644 return -ENOMEM;
05f3fb3c
JA
6645 }
6646
05589553 6647 if (percpu_ref_init(&ctx->file_data->refs, io_file_ref_kill,
05f3fb3c
JA
6648 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
6649 kfree(ctx->file_data->table);
6650 kfree(ctx->file_data);
6651 ctx->file_data = NULL;
6b06314c 6652 return -ENOMEM;
05f3fb3c 6653 }
6b06314c 6654
65e19f54 6655 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
05f3fb3c
JA
6656 percpu_ref_exit(&ctx->file_data->refs);
6657 kfree(ctx->file_data->table);
6658 kfree(ctx->file_data);
6659 ctx->file_data = NULL;
65e19f54
JA
6660 return -ENOMEM;
6661 }
6662
08a45173 6663 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
65e19f54
JA
6664 struct fixed_file_table *table;
6665 unsigned index;
6666
6b06314c
JA
6667 ret = -EFAULT;
6668 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
6669 break;
08a45173
JA
6670 /* allow sparse sets */
6671 if (fd == -1) {
6672 ret = 0;
6673 continue;
6674 }
6b06314c 6675
05f3fb3c 6676 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54 6677 index = i & IORING_FILE_TABLE_MASK;
05f3fb3c 6678 file = fget(fd);
6b06314c
JA
6679
6680 ret = -EBADF;
05f3fb3c 6681 if (!file)
6b06314c 6682 break;
05f3fb3c 6683
6b06314c
JA
6684 /*
6685 * Don't allow io_uring instances to be registered. If UNIX
6686 * isn't enabled, then this causes a reference cycle and this
6687 * instance can never get freed. If UNIX is enabled we'll
6688 * handle it just fine, but there's still no point in allowing
6689 * a ring fd as it doesn't support regular read/write anyway.
6690 */
05f3fb3c
JA
6691 if (file->f_op == &io_uring_fops) {
6692 fput(file);
6b06314c
JA
6693 break;
6694 }
6b06314c 6695 ret = 0;
05f3fb3c 6696 table->files[index] = file;
6b06314c
JA
6697 }
6698
6699 if (ret) {
65e19f54 6700 for (i = 0; i < ctx->nr_user_files; i++) {
65e19f54
JA
6701 file = io_file_from_index(ctx, i);
6702 if (file)
6703 fput(file);
6704 }
6705 for (i = 0; i < nr_tables; i++)
05f3fb3c 6706 kfree(ctx->file_data->table[i].files);
6b06314c 6707
05f3fb3c
JA
6708 kfree(ctx->file_data->table);
6709 kfree(ctx->file_data);
6710 ctx->file_data = NULL;
6b06314c
JA
6711 ctx->nr_user_files = 0;
6712 return ret;
6713 }
6714
6715 ret = io_sqe_files_scm(ctx);
05589553 6716 if (ret) {
6b06314c 6717 io_sqe_files_unregister(ctx);
05589553
XW
6718 return ret;
6719 }
6b06314c 6720
05589553
XW
6721 ref_node = alloc_fixed_file_ref_node(ctx);
6722 if (IS_ERR(ref_node)) {
6723 io_sqe_files_unregister(ctx);
6724 return PTR_ERR(ref_node);
6725 }
6726
6727 ctx->file_data->cur_refs = &ref_node->refs;
6a4d07cd 6728 spin_lock(&ctx->file_data->lock);
05589553 6729 list_add(&ref_node->node, &ctx->file_data->ref_list);
6a4d07cd 6730 spin_unlock(&ctx->file_data->lock);
05589553 6731 percpu_ref_get(&ctx->file_data->refs);
6b06314c
JA
6732 return ret;
6733}
6734
c3a31e60
JA
6735static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
6736 int index)
6737{
6738#if defined(CONFIG_UNIX)
6739 struct sock *sock = ctx->ring_sock->sk;
6740 struct sk_buff_head *head = &sock->sk_receive_queue;
6741 struct sk_buff *skb;
6742
6743 /*
6744 * See if we can merge this file into an existing skb SCM_RIGHTS
6745 * file set. If there's no room, fall back to allocating a new skb
6746 * and filling it in.
6747 */
6748 spin_lock_irq(&head->lock);
6749 skb = skb_peek(head);
6750 if (skb) {
6751 struct scm_fp_list *fpl = UNIXCB(skb).fp;
6752
6753 if (fpl->count < SCM_MAX_FD) {
6754 __skb_unlink(skb, head);
6755 spin_unlock_irq(&head->lock);
6756 fpl->fp[fpl->count] = get_file(file);
6757 unix_inflight(fpl->user, fpl->fp[fpl->count]);
6758 fpl->count++;
6759 spin_lock_irq(&head->lock);
6760 __skb_queue_head(head, skb);
6761 } else {
6762 skb = NULL;
6763 }
6764 }
6765 spin_unlock_irq(&head->lock);
6766
6767 if (skb) {
6768 fput(file);
6769 return 0;
6770 }
6771
6772 return __io_sqe_files_scm(ctx, 1, index);
6773#else
6774 return 0;
6775#endif
6776}
6777
a5318d3c 6778static int io_queue_file_removal(struct fixed_file_data *data,
05589553 6779 struct file *file)
05f3fb3c 6780{
a5318d3c 6781 struct io_file_put *pfile;
05589553
XW
6782 struct percpu_ref *refs = data->cur_refs;
6783 struct fixed_file_ref_node *ref_node;
05f3fb3c 6784
05f3fb3c 6785 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
a5318d3c
HD
6786 if (!pfile)
6787 return -ENOMEM;
05f3fb3c 6788
05589553 6789 ref_node = container_of(refs, struct fixed_file_ref_node, refs);
05f3fb3c 6790 pfile->file = file;
05589553
XW
6791 list_add(&pfile->list, &ref_node->file_list);
6792
a5318d3c 6793 return 0;
05f3fb3c
JA
6794}
6795
6796static int __io_sqe_files_update(struct io_ring_ctx *ctx,
6797 struct io_uring_files_update *up,
6798 unsigned nr_args)
6799{
6800 struct fixed_file_data *data = ctx->file_data;
05589553 6801 struct fixed_file_ref_node *ref_node;
05f3fb3c 6802 struct file *file;
c3a31e60
JA
6803 __s32 __user *fds;
6804 int fd, i, err;
6805 __u32 done;
05589553 6806 bool needs_switch = false;
c3a31e60 6807
05f3fb3c 6808 if (check_add_overflow(up->offset, nr_args, &done))
c3a31e60
JA
6809 return -EOVERFLOW;
6810 if (done > ctx->nr_user_files)
6811 return -EINVAL;
6812
05589553
XW
6813 ref_node = alloc_fixed_file_ref_node(ctx);
6814 if (IS_ERR(ref_node))
6815 return PTR_ERR(ref_node);
6816
c3a31e60 6817 done = 0;
05f3fb3c 6818 fds = u64_to_user_ptr(up->fds);
c3a31e60 6819 while (nr_args) {
65e19f54
JA
6820 struct fixed_file_table *table;
6821 unsigned index;
6822
c3a31e60
JA
6823 err = 0;
6824 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
6825 err = -EFAULT;
6826 break;
6827 }
05f3fb3c
JA
6828 i = array_index_nospec(up->offset, ctx->nr_user_files);
6829 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54
JA
6830 index = i & IORING_FILE_TABLE_MASK;
6831 if (table->files[index]) {
05f3fb3c 6832 file = io_file_from_index(ctx, index);
a5318d3c
HD
6833 err = io_queue_file_removal(data, file);
6834 if (err)
6835 break;
65e19f54 6836 table->files[index] = NULL;
05589553 6837 needs_switch = true;
c3a31e60
JA
6838 }
6839 if (fd != -1) {
c3a31e60
JA
6840 file = fget(fd);
6841 if (!file) {
6842 err = -EBADF;
6843 break;
6844 }
6845 /*
6846 * Don't allow io_uring instances to be registered. If
6847 * UNIX isn't enabled, then this causes a reference
6848 * cycle and this instance can never get freed. If UNIX
6849 * is enabled we'll handle it just fine, but there's
6850 * still no point in allowing a ring fd as it doesn't
6851 * support regular read/write anyway.
6852 */
6853 if (file->f_op == &io_uring_fops) {
6854 fput(file);
6855 err = -EBADF;
6856 break;
6857 }
65e19f54 6858 table->files[index] = file;
c3a31e60
JA
6859 err = io_sqe_file_register(ctx, file, i);
6860 if (err)
6861 break;
6862 }
6863 nr_args--;
6864 done++;
05f3fb3c
JA
6865 up->offset++;
6866 }
6867
05589553
XW
6868 if (needs_switch) {
6869 percpu_ref_kill(data->cur_refs);
6a4d07cd 6870 spin_lock(&data->lock);
05589553
XW
6871 list_add(&ref_node->node, &data->ref_list);
6872 data->cur_refs = &ref_node->refs;
6a4d07cd 6873 spin_unlock(&data->lock);
05589553
XW
6874 percpu_ref_get(&ctx->file_data->refs);
6875 } else
6876 destroy_fixed_file_ref_node(ref_node);
c3a31e60
JA
6877
6878 return done ? done : err;
6879}
05589553 6880
05f3fb3c
JA
6881static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
6882 unsigned nr_args)
6883{
6884 struct io_uring_files_update up;
6885
6886 if (!ctx->file_data)
6887 return -ENXIO;
6888 if (!nr_args)
6889 return -EINVAL;
6890 if (copy_from_user(&up, arg, sizeof(up)))
6891 return -EFAULT;
6892 if (up.resv)
6893 return -EINVAL;
6894
6895 return __io_sqe_files_update(ctx, &up, nr_args);
6896}
c3a31e60 6897
e9fd9396 6898static void io_free_work(struct io_wq_work *work)
7d723065
JA
6899{
6900 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6901
e9fd9396 6902 /* Consider that io_steal_work() relies on this ref */
7d723065
JA
6903 io_put_req(req);
6904}
6905
24369c2e
PB
6906static int io_init_wq_offload(struct io_ring_ctx *ctx,
6907 struct io_uring_params *p)
6908{
6909 struct io_wq_data data;
6910 struct fd f;
6911 struct io_ring_ctx *ctx_attach;
6912 unsigned int concurrency;
6913 int ret = 0;
6914
6915 data.user = ctx->user;
e9fd9396 6916 data.free_work = io_free_work;
24369c2e
PB
6917
6918 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
6919 /* Do QD, or 4 * CPUS, whatever is smallest */
6920 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
6921
6922 ctx->io_wq = io_wq_create(concurrency, &data);
6923 if (IS_ERR(ctx->io_wq)) {
6924 ret = PTR_ERR(ctx->io_wq);
6925 ctx->io_wq = NULL;
6926 }
6927 return ret;
6928 }
6929
6930 f = fdget(p->wq_fd);
6931 if (!f.file)
6932 return -EBADF;
6933
6934 if (f.file->f_op != &io_uring_fops) {
6935 ret = -EINVAL;
6936 goto out_fput;
6937 }
6938
6939 ctx_attach = f.file->private_data;
6940 /* @io_wq is protected by holding the fd */
6941 if (!io_wq_get(ctx_attach->io_wq, &data)) {
6942 ret = -EINVAL;
6943 goto out_fput;
6944 }
6945
6946 ctx->io_wq = ctx_attach->io_wq;
6947out_fput:
6948 fdput(f);
6949 return ret;
6950}
6951
6c271ce2
JA
6952static int io_sq_offload_start(struct io_ring_ctx *ctx,
6953 struct io_uring_params *p)
2b188cc1
JA
6954{
6955 int ret;
6956
6c271ce2 6957 init_waitqueue_head(&ctx->sqo_wait);
2b188cc1
JA
6958 mmgrab(current->mm);
6959 ctx->sqo_mm = current->mm;
6960
6c271ce2 6961 if (ctx->flags & IORING_SETUP_SQPOLL) {
3ec482d1
JA
6962 ret = -EPERM;
6963 if (!capable(CAP_SYS_ADMIN))
6964 goto err;
6965
917257da
JA
6966 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
6967 if (!ctx->sq_thread_idle)
6968 ctx->sq_thread_idle = HZ;
6969
6c271ce2 6970 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 6971 int cpu = p->sq_thread_cpu;
6c271ce2 6972
917257da 6973 ret = -EINVAL;
44a9bd18
JA
6974 if (cpu >= nr_cpu_ids)
6975 goto err;
7889f44d 6976 if (!cpu_online(cpu))
917257da
JA
6977 goto err;
6978
6c271ce2
JA
6979 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
6980 ctx, cpu,
6981 "io_uring-sq");
6982 } else {
6983 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
6984 "io_uring-sq");
6985 }
6986 if (IS_ERR(ctx->sqo_thread)) {
6987 ret = PTR_ERR(ctx->sqo_thread);
6988 ctx->sqo_thread = NULL;
6989 goto err;
6990 }
6991 wake_up_process(ctx->sqo_thread);
6992 } else if (p->flags & IORING_SETUP_SQ_AFF) {
6993 /* Can't have SQ_AFF without SQPOLL */
6994 ret = -EINVAL;
6995 goto err;
6996 }
6997
24369c2e
PB
6998 ret = io_init_wq_offload(ctx, p);
6999 if (ret)
2b188cc1 7000 goto err;
2b188cc1
JA
7001
7002 return 0;
7003err:
54a91f3b 7004 io_finish_async(ctx);
2b188cc1
JA
7005 mmdrop(ctx->sqo_mm);
7006 ctx->sqo_mm = NULL;
7007 return ret;
7008}
7009
7010static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
7011{
7012 atomic_long_sub(nr_pages, &user->locked_vm);
7013}
7014
7015static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
7016{
7017 unsigned long page_limit, cur_pages, new_pages;
7018
7019 /* Don't allow more pages than we can safely lock */
7020 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
7021
7022 do {
7023 cur_pages = atomic_long_read(&user->locked_vm);
7024 new_pages = cur_pages + nr_pages;
7025 if (new_pages > page_limit)
7026 return -ENOMEM;
7027 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
7028 new_pages) != cur_pages);
7029
7030 return 0;
7031}
7032
7033static void io_mem_free(void *ptr)
7034{
52e04ef4
MR
7035 struct page *page;
7036
7037 if (!ptr)
7038 return;
2b188cc1 7039
52e04ef4 7040 page = virt_to_head_page(ptr);
2b188cc1
JA
7041 if (put_page_testzero(page))
7042 free_compound_page(page);
7043}
7044
7045static void *io_mem_alloc(size_t size)
7046{
7047 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
7048 __GFP_NORETRY;
7049
7050 return (void *) __get_free_pages(gfp_flags, get_order(size));
7051}
7052
75b28aff
HV
7053static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
7054 size_t *sq_offset)
7055{
7056 struct io_rings *rings;
7057 size_t off, sq_array_size;
7058
7059 off = struct_size(rings, cqes, cq_entries);
7060 if (off == SIZE_MAX)
7061 return SIZE_MAX;
7062
7063#ifdef CONFIG_SMP
7064 off = ALIGN(off, SMP_CACHE_BYTES);
7065 if (off == 0)
7066 return SIZE_MAX;
7067#endif
7068
7069 sq_array_size = array_size(sizeof(u32), sq_entries);
7070 if (sq_array_size == SIZE_MAX)
7071 return SIZE_MAX;
7072
7073 if (check_add_overflow(off, sq_array_size, &off))
7074 return SIZE_MAX;
7075
7076 if (sq_offset)
7077 *sq_offset = off;
7078
7079 return off;
7080}
7081
2b188cc1
JA
7082static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
7083{
75b28aff 7084 size_t pages;
2b188cc1 7085
75b28aff
HV
7086 pages = (size_t)1 << get_order(
7087 rings_size(sq_entries, cq_entries, NULL));
7088 pages += (size_t)1 << get_order(
7089 array_size(sizeof(struct io_uring_sqe), sq_entries));
2b188cc1 7090
75b28aff 7091 return pages;
2b188cc1
JA
7092}
7093
edafccee
JA
7094static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
7095{
7096 int i, j;
7097
7098 if (!ctx->user_bufs)
7099 return -ENXIO;
7100
7101 for (i = 0; i < ctx->nr_user_bufs; i++) {
7102 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7103
7104 for (j = 0; j < imu->nr_bvecs; j++)
f1f6a7dd 7105 unpin_user_page(imu->bvec[j].bv_page);
edafccee
JA
7106
7107 if (ctx->account_mem)
7108 io_unaccount_mem(ctx->user, imu->nr_bvecs);
d4ef6475 7109 kvfree(imu->bvec);
edafccee
JA
7110 imu->nr_bvecs = 0;
7111 }
7112
7113 kfree(ctx->user_bufs);
7114 ctx->user_bufs = NULL;
7115 ctx->nr_user_bufs = 0;
7116 return 0;
7117}
7118
7119static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
7120 void __user *arg, unsigned index)
7121{
7122 struct iovec __user *src;
7123
7124#ifdef CONFIG_COMPAT
7125 if (ctx->compat) {
7126 struct compat_iovec __user *ciovs;
7127 struct compat_iovec ciov;
7128
7129 ciovs = (struct compat_iovec __user *) arg;
7130 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
7131 return -EFAULT;
7132
d55e5f5b 7133 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
7134 dst->iov_len = ciov.iov_len;
7135 return 0;
7136 }
7137#endif
7138 src = (struct iovec __user *) arg;
7139 if (copy_from_user(dst, &src[index], sizeof(*dst)))
7140 return -EFAULT;
7141 return 0;
7142}
7143
7144static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
7145 unsigned nr_args)
7146{
7147 struct vm_area_struct **vmas = NULL;
7148 struct page **pages = NULL;
7149 int i, j, got_pages = 0;
7150 int ret = -EINVAL;
7151
7152 if (ctx->user_bufs)
7153 return -EBUSY;
7154 if (!nr_args || nr_args > UIO_MAXIOV)
7155 return -EINVAL;
7156
7157 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
7158 GFP_KERNEL);
7159 if (!ctx->user_bufs)
7160 return -ENOMEM;
7161
7162 for (i = 0; i < nr_args; i++) {
7163 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7164 unsigned long off, start, end, ubuf;
7165 int pret, nr_pages;
7166 struct iovec iov;
7167 size_t size;
7168
7169 ret = io_copy_iov(ctx, &iov, arg, i);
7170 if (ret)
a278682d 7171 goto err;
edafccee
JA
7172
7173 /*
7174 * Don't impose further limits on the size and buffer
7175 * constraints here, we'll -EINVAL later when IO is
7176 * submitted if they are wrong.
7177 */
7178 ret = -EFAULT;
7179 if (!iov.iov_base || !iov.iov_len)
7180 goto err;
7181
7182 /* arbitrary limit, but we need something */
7183 if (iov.iov_len > SZ_1G)
7184 goto err;
7185
7186 ubuf = (unsigned long) iov.iov_base;
7187 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
7188 start = ubuf >> PAGE_SHIFT;
7189 nr_pages = end - start;
7190
7191 if (ctx->account_mem) {
7192 ret = io_account_mem(ctx->user, nr_pages);
7193 if (ret)
7194 goto err;
7195 }
7196
7197 ret = 0;
7198 if (!pages || nr_pages > got_pages) {
7199 kfree(vmas);
7200 kfree(pages);
d4ef6475 7201 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
edafccee 7202 GFP_KERNEL);
d4ef6475 7203 vmas = kvmalloc_array(nr_pages,
edafccee
JA
7204 sizeof(struct vm_area_struct *),
7205 GFP_KERNEL);
7206 if (!pages || !vmas) {
7207 ret = -ENOMEM;
7208 if (ctx->account_mem)
7209 io_unaccount_mem(ctx->user, nr_pages);
7210 goto err;
7211 }
7212 got_pages = nr_pages;
7213 }
7214
d4ef6475 7215 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
edafccee
JA
7216 GFP_KERNEL);
7217 ret = -ENOMEM;
7218 if (!imu->bvec) {
7219 if (ctx->account_mem)
7220 io_unaccount_mem(ctx->user, nr_pages);
7221 goto err;
7222 }
7223
7224 ret = 0;
7225 down_read(&current->mm->mmap_sem);
2113b05d 7226 pret = pin_user_pages(ubuf, nr_pages,
932f4a63
IW
7227 FOLL_WRITE | FOLL_LONGTERM,
7228 pages, vmas);
edafccee
JA
7229 if (pret == nr_pages) {
7230 /* don't support file backed memory */
7231 for (j = 0; j < nr_pages; j++) {
7232 struct vm_area_struct *vma = vmas[j];
7233
7234 if (vma->vm_file &&
7235 !is_file_hugepages(vma->vm_file)) {
7236 ret = -EOPNOTSUPP;
7237 break;
7238 }
7239 }
7240 } else {
7241 ret = pret < 0 ? pret : -EFAULT;
7242 }
7243 up_read(&current->mm->mmap_sem);
7244 if (ret) {
7245 /*
7246 * if we did partial map, or found file backed vmas,
7247 * release any pages we did get
7248 */
27c4d3a3 7249 if (pret > 0)
f1f6a7dd 7250 unpin_user_pages(pages, pret);
edafccee
JA
7251 if (ctx->account_mem)
7252 io_unaccount_mem(ctx->user, nr_pages);
d4ef6475 7253 kvfree(imu->bvec);
edafccee
JA
7254 goto err;
7255 }
7256
7257 off = ubuf & ~PAGE_MASK;
7258 size = iov.iov_len;
7259 for (j = 0; j < nr_pages; j++) {
7260 size_t vec_len;
7261
7262 vec_len = min_t(size_t, size, PAGE_SIZE - off);
7263 imu->bvec[j].bv_page = pages[j];
7264 imu->bvec[j].bv_len = vec_len;
7265 imu->bvec[j].bv_offset = off;
7266 off = 0;
7267 size -= vec_len;
7268 }
7269 /* store original address for later verification */
7270 imu->ubuf = ubuf;
7271 imu->len = iov.iov_len;
7272 imu->nr_bvecs = nr_pages;
7273
7274 ctx->nr_user_bufs++;
7275 }
d4ef6475
MR
7276 kvfree(pages);
7277 kvfree(vmas);
edafccee
JA
7278 return 0;
7279err:
d4ef6475
MR
7280 kvfree(pages);
7281 kvfree(vmas);
edafccee
JA
7282 io_sqe_buffer_unregister(ctx);
7283 return ret;
7284}
7285
9b402849
JA
7286static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
7287{
7288 __s32 __user *fds = arg;
7289 int fd;
7290
7291 if (ctx->cq_ev_fd)
7292 return -EBUSY;
7293
7294 if (copy_from_user(&fd, fds, sizeof(*fds)))
7295 return -EFAULT;
7296
7297 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
7298 if (IS_ERR(ctx->cq_ev_fd)) {
7299 int ret = PTR_ERR(ctx->cq_ev_fd);
7300 ctx->cq_ev_fd = NULL;
7301 return ret;
7302 }
7303
7304 return 0;
7305}
7306
7307static int io_eventfd_unregister(struct io_ring_ctx *ctx)
7308{
7309 if (ctx->cq_ev_fd) {
7310 eventfd_ctx_put(ctx->cq_ev_fd);
7311 ctx->cq_ev_fd = NULL;
7312 return 0;
7313 }
7314
7315 return -ENXIO;
7316}
7317
5a2e745d
JA
7318static int __io_destroy_buffers(int id, void *p, void *data)
7319{
7320 struct io_ring_ctx *ctx = data;
7321 struct io_buffer *buf = p;
7322
067524e9 7323 __io_remove_buffers(ctx, buf, id, -1U);
5a2e745d
JA
7324 return 0;
7325}
7326
7327static void io_destroy_buffers(struct io_ring_ctx *ctx)
7328{
7329 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
7330 idr_destroy(&ctx->io_buffer_idr);
7331}
7332
2b188cc1
JA
7333static void io_ring_ctx_free(struct io_ring_ctx *ctx)
7334{
6b06314c 7335 io_finish_async(ctx);
2b188cc1
JA
7336 if (ctx->sqo_mm)
7337 mmdrop(ctx->sqo_mm);
def596e9
JA
7338
7339 io_iopoll_reap_events(ctx);
edafccee 7340 io_sqe_buffer_unregister(ctx);
6b06314c 7341 io_sqe_files_unregister(ctx);
9b402849 7342 io_eventfd_unregister(ctx);
5a2e745d 7343 io_destroy_buffers(ctx);
41726c9a 7344 idr_destroy(&ctx->personality_idr);
def596e9 7345
2b188cc1 7346#if defined(CONFIG_UNIX)
355e8d26
EB
7347 if (ctx->ring_sock) {
7348 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 7349 sock_release(ctx->ring_sock);
355e8d26 7350 }
2b188cc1
JA
7351#endif
7352
75b28aff 7353 io_mem_free(ctx->rings);
2b188cc1 7354 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
7355
7356 percpu_ref_exit(&ctx->refs);
7357 if (ctx->account_mem)
7358 io_unaccount_mem(ctx->user,
7359 ring_pages(ctx->sq_entries, ctx->cq_entries));
7360 free_uid(ctx->user);
181e448d 7361 put_cred(ctx->creds);
78076bb6 7362 kfree(ctx->cancel_hash);
0ddf92e8 7363 kmem_cache_free(req_cachep, ctx->fallback_req);
2b188cc1
JA
7364 kfree(ctx);
7365}
7366
7367static __poll_t io_uring_poll(struct file *file, poll_table *wait)
7368{
7369 struct io_ring_ctx *ctx = file->private_data;
7370 __poll_t mask = 0;
7371
7372 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
7373 /*
7374 * synchronizes with barrier from wq_has_sleeper call in
7375 * io_commit_cqring
7376 */
2b188cc1 7377 smp_rmb();
75b28aff
HV
7378 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
7379 ctx->rings->sq_ring_entries)
2b188cc1 7380 mask |= EPOLLOUT | EPOLLWRNORM;
63e5d81f 7381 if (io_cqring_events(ctx, false))
2b188cc1
JA
7382 mask |= EPOLLIN | EPOLLRDNORM;
7383
7384 return mask;
7385}
7386
7387static int io_uring_fasync(int fd, struct file *file, int on)
7388{
7389 struct io_ring_ctx *ctx = file->private_data;
7390
7391 return fasync_helper(fd, file, on, &ctx->cq_fasync);
7392}
7393
071698e1
JA
7394static int io_remove_personalities(int id, void *p, void *data)
7395{
7396 struct io_ring_ctx *ctx = data;
7397 const struct cred *cred;
7398
7399 cred = idr_remove(&ctx->personality_idr, id);
7400 if (cred)
7401 put_cred(cred);
7402 return 0;
7403}
7404
85faa7b8
JA
7405static void io_ring_exit_work(struct work_struct *work)
7406{
7407 struct io_ring_ctx *ctx;
7408
7409 ctx = container_of(work, struct io_ring_ctx, exit_work);
7410 if (ctx->rings)
7411 io_cqring_overflow_flush(ctx, true);
7412
0f158b4c 7413 wait_for_completion(&ctx->ref_comp);
85faa7b8
JA
7414 io_ring_ctx_free(ctx);
7415}
7416
2b188cc1
JA
7417static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
7418{
7419 mutex_lock(&ctx->uring_lock);
7420 percpu_ref_kill(&ctx->refs);
7421 mutex_unlock(&ctx->uring_lock);
7422
5262f567 7423 io_kill_timeouts(ctx);
221c5eb2 7424 io_poll_remove_all(ctx);
561fb04a
JA
7425
7426 if (ctx->io_wq)
7427 io_wq_cancel_all(ctx->io_wq);
7428
def596e9 7429 io_iopoll_reap_events(ctx);
15dff286
JA
7430 /* if we failed setting up the ctx, we might not have any rings */
7431 if (ctx->rings)
7432 io_cqring_overflow_flush(ctx, true);
071698e1 7433 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
85faa7b8
JA
7434 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
7435 queue_work(system_wq, &ctx->exit_work);
2b188cc1
JA
7436}
7437
7438static int io_uring_release(struct inode *inode, struct file *file)
7439{
7440 struct io_ring_ctx *ctx = file->private_data;
7441
7442 file->private_data = NULL;
7443 io_ring_ctx_wait_and_kill(ctx);
7444 return 0;
7445}
7446
fcb323cc
JA
7447static void io_uring_cancel_files(struct io_ring_ctx *ctx,
7448 struct files_struct *files)
7449{
fcb323cc 7450 while (!list_empty_careful(&ctx->inflight_list)) {
d8f1b971
XW
7451 struct io_kiocb *cancel_req = NULL, *req;
7452 DEFINE_WAIT(wait);
fcb323cc
JA
7453
7454 spin_lock_irq(&ctx->inflight_lock);
7455 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
768134d4
JA
7456 if (req->work.files != files)
7457 continue;
7458 /* req is being completed, ignore */
7459 if (!refcount_inc_not_zero(&req->refs))
7460 continue;
7461 cancel_req = req;
7462 break;
fcb323cc 7463 }
768134d4 7464 if (cancel_req)
fcb323cc 7465 prepare_to_wait(&ctx->inflight_wait, &wait,
768134d4 7466 TASK_UNINTERRUPTIBLE);
fcb323cc
JA
7467 spin_unlock_irq(&ctx->inflight_lock);
7468
768134d4
JA
7469 /* We need to keep going until we don't find a matching req */
7470 if (!cancel_req)
fcb323cc 7471 break;
2f6d9b9d 7472
2ca10259
JA
7473 if (cancel_req->flags & REQ_F_OVERFLOW) {
7474 spin_lock_irq(&ctx->completion_lock);
7475 list_del(&cancel_req->list);
7476 cancel_req->flags &= ~REQ_F_OVERFLOW;
7477 if (list_empty(&ctx->cq_overflow_list)) {
7478 clear_bit(0, &ctx->sq_check_overflow);
7479 clear_bit(0, &ctx->cq_check_overflow);
7480 }
7481 spin_unlock_irq(&ctx->completion_lock);
7482
7483 WRITE_ONCE(ctx->rings->cq_overflow,
7484 atomic_inc_return(&ctx->cached_cq_overflow));
7485
7486 /*
7487 * Put inflight ref and overflow ref. If that's
7488 * all we had, then we're done with this request.
7489 */
7490 if (refcount_sub_and_test(2, &cancel_req->refs)) {
4518a3cc 7491 io_free_req(cancel_req);
d8f1b971 7492 finish_wait(&ctx->inflight_wait, &wait);
2ca10259
JA
7493 continue;
7494 }
7495 }
7496
2f6d9b9d
BL
7497 io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
7498 io_put_req(cancel_req);
fcb323cc 7499 schedule();
d8f1b971 7500 finish_wait(&ctx->inflight_wait, &wait);
fcb323cc
JA
7501 }
7502}
7503
7504static int io_uring_flush(struct file *file, void *data)
7505{
7506 struct io_ring_ctx *ctx = file->private_data;
7507
7508 io_uring_cancel_files(ctx, data);
6ab23144
JA
7509
7510 /*
7511 * If the task is going away, cancel work it may have pending
7512 */
7513 if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
7514 io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
7515
fcb323cc
JA
7516 return 0;
7517}
7518
6c5c240e
RP
7519static void *io_uring_validate_mmap_request(struct file *file,
7520 loff_t pgoff, size_t sz)
2b188cc1 7521{
2b188cc1 7522 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 7523 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
7524 struct page *page;
7525 void *ptr;
7526
7527 switch (offset) {
7528 case IORING_OFF_SQ_RING:
75b28aff
HV
7529 case IORING_OFF_CQ_RING:
7530 ptr = ctx->rings;
2b188cc1
JA
7531 break;
7532 case IORING_OFF_SQES:
7533 ptr = ctx->sq_sqes;
7534 break;
2b188cc1 7535 default:
6c5c240e 7536 return ERR_PTR(-EINVAL);
2b188cc1
JA
7537 }
7538
7539 page = virt_to_head_page(ptr);
a50b854e 7540 if (sz > page_size(page))
6c5c240e
RP
7541 return ERR_PTR(-EINVAL);
7542
7543 return ptr;
7544}
7545
7546#ifdef CONFIG_MMU
7547
7548static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7549{
7550 size_t sz = vma->vm_end - vma->vm_start;
7551 unsigned long pfn;
7552 void *ptr;
7553
7554 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
7555 if (IS_ERR(ptr))
7556 return PTR_ERR(ptr);
2b188cc1
JA
7557
7558 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
7559 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
7560}
7561
6c5c240e
RP
7562#else /* !CONFIG_MMU */
7563
7564static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7565{
7566 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
7567}
7568
7569static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
7570{
7571 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
7572}
7573
7574static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
7575 unsigned long addr, unsigned long len,
7576 unsigned long pgoff, unsigned long flags)
7577{
7578 void *ptr;
7579
7580 ptr = io_uring_validate_mmap_request(file, pgoff, len);
7581 if (IS_ERR(ptr))
7582 return PTR_ERR(ptr);
7583
7584 return (unsigned long) ptr;
7585}
7586
7587#endif /* !CONFIG_MMU */
7588
2b188cc1
JA
7589SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
7590 u32, min_complete, u32, flags, const sigset_t __user *, sig,
7591 size_t, sigsz)
7592{
7593 struct io_ring_ctx *ctx;
7594 long ret = -EBADF;
7595 int submitted = 0;
7596 struct fd f;
7597
b41e9852
JA
7598 if (current->task_works)
7599 task_work_run();
7600
6c271ce2 7601 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
2b188cc1
JA
7602 return -EINVAL;
7603
7604 f = fdget(fd);
7605 if (!f.file)
7606 return -EBADF;
7607
7608 ret = -EOPNOTSUPP;
7609 if (f.file->f_op != &io_uring_fops)
7610 goto out_fput;
7611
7612 ret = -ENXIO;
7613 ctx = f.file->private_data;
7614 if (!percpu_ref_tryget(&ctx->refs))
7615 goto out_fput;
7616
6c271ce2
JA
7617 /*
7618 * For SQ polling, the thread will do all submissions and completions.
7619 * Just return the requested submit count, and wake the thread if
7620 * we were asked to.
7621 */
b2a9eada 7622 ret = 0;
6c271ce2 7623 if (ctx->flags & IORING_SETUP_SQPOLL) {
c1edbf5f
JA
7624 if (!list_empty_careful(&ctx->cq_overflow_list))
7625 io_cqring_overflow_flush(ctx, false);
6c271ce2
JA
7626 if (flags & IORING_ENTER_SQ_WAKEUP)
7627 wake_up(&ctx->sqo_wait);
7628 submitted = to_submit;
b2a9eada 7629 } else if (to_submit) {
2b188cc1 7630 mutex_lock(&ctx->uring_lock);
0cdaf760 7631 submitted = io_submit_sqes(ctx, to_submit, f.file, fd);
2b188cc1 7632 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
7633
7634 if (submitted != to_submit)
7635 goto out;
2b188cc1
JA
7636 }
7637 if (flags & IORING_ENTER_GETEVENTS) {
def596e9
JA
7638 unsigned nr_events = 0;
7639
2b188cc1
JA
7640 min_complete = min(min_complete, ctx->cq_entries);
7641
32b2244a
XW
7642 /*
7643 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
7644 * space applications don't need to do io completion events
7645 * polling again, they can rely on io_sq_thread to do polling
7646 * work, which can reduce cpu usage and uring_lock contention.
7647 */
7648 if (ctx->flags & IORING_SETUP_IOPOLL &&
7649 !(ctx->flags & IORING_SETUP_SQPOLL)) {
def596e9 7650 ret = io_iopoll_check(ctx, &nr_events, min_complete);
def596e9
JA
7651 } else {
7652 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
7653 }
2b188cc1
JA
7654 }
7655
7c504e65 7656out:
6805b32e 7657 percpu_ref_put(&ctx->refs);
2b188cc1
JA
7658out_fput:
7659 fdput(f);
7660 return submitted ? submitted : ret;
7661}
7662
bebdb65e 7663#ifdef CONFIG_PROC_FS
87ce955b
JA
7664static int io_uring_show_cred(int id, void *p, void *data)
7665{
7666 const struct cred *cred = p;
7667 struct seq_file *m = data;
7668 struct user_namespace *uns = seq_user_ns(m);
7669 struct group_info *gi;
7670 kernel_cap_t cap;
7671 unsigned __capi;
7672 int g;
7673
7674 seq_printf(m, "%5d\n", id);
7675 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
7676 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
7677 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
7678 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
7679 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
7680 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
7681 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
7682 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
7683 seq_puts(m, "\n\tGroups:\t");
7684 gi = cred->group_info;
7685 for (g = 0; g < gi->ngroups; g++) {
7686 seq_put_decimal_ull(m, g ? " " : "",
7687 from_kgid_munged(uns, gi->gid[g]));
7688 }
7689 seq_puts(m, "\n\tCapEff:\t");
7690 cap = cred->cap_effective;
7691 CAP_FOR_EACH_U32(__capi)
7692 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
7693 seq_putc(m, '\n');
7694 return 0;
7695}
7696
7697static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
7698{
7699 int i;
7700
7701 mutex_lock(&ctx->uring_lock);
7702 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
7703 for (i = 0; i < ctx->nr_user_files; i++) {
7704 struct fixed_file_table *table;
7705 struct file *f;
7706
7707 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7708 f = table->files[i & IORING_FILE_TABLE_MASK];
7709 if (f)
7710 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
7711 else
7712 seq_printf(m, "%5u: <none>\n", i);
7713 }
7714 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
7715 for (i = 0; i < ctx->nr_user_bufs; i++) {
7716 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
7717
7718 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
7719 (unsigned int) buf->len);
7720 }
7721 if (!idr_is_empty(&ctx->personality_idr)) {
7722 seq_printf(m, "Personalities:\n");
7723 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
7724 }
d7718a9d
JA
7725 seq_printf(m, "PollList:\n");
7726 spin_lock_irq(&ctx->completion_lock);
7727 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
7728 struct hlist_head *list = &ctx->cancel_hash[i];
7729 struct io_kiocb *req;
7730
7731 hlist_for_each_entry(req, list, hash_node)
7732 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
7733 req->task->task_works != NULL);
7734 }
7735 spin_unlock_irq(&ctx->completion_lock);
87ce955b
JA
7736 mutex_unlock(&ctx->uring_lock);
7737}
7738
7739static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
7740{
7741 struct io_ring_ctx *ctx = f->private_data;
7742
7743 if (percpu_ref_tryget(&ctx->refs)) {
7744 __io_uring_show_fdinfo(ctx, m);
7745 percpu_ref_put(&ctx->refs);
7746 }
7747}
bebdb65e 7748#endif
87ce955b 7749
2b188cc1
JA
7750static const struct file_operations io_uring_fops = {
7751 .release = io_uring_release,
fcb323cc 7752 .flush = io_uring_flush,
2b188cc1 7753 .mmap = io_uring_mmap,
6c5c240e
RP
7754#ifndef CONFIG_MMU
7755 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
7756 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
7757#endif
2b188cc1
JA
7758 .poll = io_uring_poll,
7759 .fasync = io_uring_fasync,
bebdb65e 7760#ifdef CONFIG_PROC_FS
87ce955b 7761 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 7762#endif
2b188cc1
JA
7763};
7764
7765static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
7766 struct io_uring_params *p)
7767{
75b28aff
HV
7768 struct io_rings *rings;
7769 size_t size, sq_array_offset;
2b188cc1 7770
75b28aff
HV
7771 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
7772 if (size == SIZE_MAX)
7773 return -EOVERFLOW;
7774
7775 rings = io_mem_alloc(size);
7776 if (!rings)
2b188cc1
JA
7777 return -ENOMEM;
7778
75b28aff
HV
7779 ctx->rings = rings;
7780 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
7781 rings->sq_ring_mask = p->sq_entries - 1;
7782 rings->cq_ring_mask = p->cq_entries - 1;
7783 rings->sq_ring_entries = p->sq_entries;
7784 rings->cq_ring_entries = p->cq_entries;
7785 ctx->sq_mask = rings->sq_ring_mask;
7786 ctx->cq_mask = rings->cq_ring_mask;
7787 ctx->sq_entries = rings->sq_ring_entries;
7788 ctx->cq_entries = rings->cq_ring_entries;
2b188cc1
JA
7789
7790 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
7791 if (size == SIZE_MAX) {
7792 io_mem_free(ctx->rings);
7793 ctx->rings = NULL;
2b188cc1 7794 return -EOVERFLOW;
eb065d30 7795 }
2b188cc1
JA
7796
7797 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
7798 if (!ctx->sq_sqes) {
7799 io_mem_free(ctx->rings);
7800 ctx->rings = NULL;
2b188cc1 7801 return -ENOMEM;
eb065d30 7802 }
2b188cc1 7803
2b188cc1
JA
7804 return 0;
7805}
7806
7807/*
7808 * Allocate an anonymous fd, this is what constitutes the application
7809 * visible backing of an io_uring instance. The application mmaps this
7810 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
7811 * we have to tie this fd to a socket for file garbage collection purposes.
7812 */
7813static int io_uring_get_fd(struct io_ring_ctx *ctx)
7814{
7815 struct file *file;
7816 int ret;
7817
7818#if defined(CONFIG_UNIX)
7819 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
7820 &ctx->ring_sock);
7821 if (ret)
7822 return ret;
7823#endif
7824
7825 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
7826 if (ret < 0)
7827 goto err;
7828
7829 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
7830 O_RDWR | O_CLOEXEC);
7831 if (IS_ERR(file)) {
7832 put_unused_fd(ret);
7833 ret = PTR_ERR(file);
7834 goto err;
7835 }
7836
7837#if defined(CONFIG_UNIX)
7838 ctx->ring_sock->file = file;
7839#endif
7840 fd_install(ret, file);
7841 return ret;
7842err:
7843#if defined(CONFIG_UNIX)
7844 sock_release(ctx->ring_sock);
7845 ctx->ring_sock = NULL;
7846#endif
7847 return ret;
7848}
7849
7f13657d
XW
7850static int io_uring_create(unsigned entries, struct io_uring_params *p,
7851 struct io_uring_params __user *params)
2b188cc1
JA
7852{
7853 struct user_struct *user = NULL;
7854 struct io_ring_ctx *ctx;
7855 bool account_mem;
7856 int ret;
7857
8110c1a6 7858 if (!entries)
2b188cc1 7859 return -EINVAL;
8110c1a6
JA
7860 if (entries > IORING_MAX_ENTRIES) {
7861 if (!(p->flags & IORING_SETUP_CLAMP))
7862 return -EINVAL;
7863 entries = IORING_MAX_ENTRIES;
7864 }
2b188cc1
JA
7865
7866 /*
7867 * Use twice as many entries for the CQ ring. It's possible for the
7868 * application to drive a higher depth than the size of the SQ ring,
7869 * since the sqes are only used at submission time. This allows for
33a107f0
JA
7870 * some flexibility in overcommitting a bit. If the application has
7871 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
7872 * of CQ ring entries manually.
2b188cc1
JA
7873 */
7874 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
7875 if (p->flags & IORING_SETUP_CQSIZE) {
7876 /*
7877 * If IORING_SETUP_CQSIZE is set, we do the same roundup
7878 * to a power-of-two, if it isn't already. We do NOT impose
7879 * any cq vs sq ring sizing.
7880 */
8110c1a6 7881 if (p->cq_entries < p->sq_entries)
33a107f0 7882 return -EINVAL;
8110c1a6
JA
7883 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
7884 if (!(p->flags & IORING_SETUP_CLAMP))
7885 return -EINVAL;
7886 p->cq_entries = IORING_MAX_CQ_ENTRIES;
7887 }
33a107f0
JA
7888 p->cq_entries = roundup_pow_of_two(p->cq_entries);
7889 } else {
7890 p->cq_entries = 2 * p->sq_entries;
7891 }
2b188cc1
JA
7892
7893 user = get_uid(current_user());
7894 account_mem = !capable(CAP_IPC_LOCK);
7895
7896 if (account_mem) {
7897 ret = io_account_mem(user,
7898 ring_pages(p->sq_entries, p->cq_entries));
7899 if (ret) {
7900 free_uid(user);
7901 return ret;
7902 }
7903 }
7904
7905 ctx = io_ring_ctx_alloc(p);
7906 if (!ctx) {
7907 if (account_mem)
7908 io_unaccount_mem(user, ring_pages(p->sq_entries,
7909 p->cq_entries));
7910 free_uid(user);
7911 return -ENOMEM;
7912 }
7913 ctx->compat = in_compat_syscall();
7914 ctx->account_mem = account_mem;
7915 ctx->user = user;
0b8c0ec7 7916 ctx->creds = get_current_cred();
2b188cc1
JA
7917
7918 ret = io_allocate_scq_urings(ctx, p);
7919 if (ret)
7920 goto err;
7921
6c271ce2 7922 ret = io_sq_offload_start(ctx, p);
2b188cc1
JA
7923 if (ret)
7924 goto err;
7925
2b188cc1 7926 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
7927 p->sq_off.head = offsetof(struct io_rings, sq.head);
7928 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
7929 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
7930 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
7931 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
7932 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
7933 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
7934
7935 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
7936 p->cq_off.head = offsetof(struct io_rings, cq.head);
7937 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
7938 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
7939 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
7940 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
7941 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 7942 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 7943
7f13657d
XW
7944 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
7945 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
7946 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL;
7947
7948 if (copy_to_user(params, p, sizeof(*p))) {
7949 ret = -EFAULT;
7950 goto err;
7951 }
044c1ab3
JA
7952 /*
7953 * Install ring fd as the very last thing, so we don't risk someone
7954 * having closed it before we finish setup
7955 */
7956 ret = io_uring_get_fd(ctx);
7957 if (ret < 0)
7958 goto err;
7959
c826bd7a 7960 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
7961 return ret;
7962err:
7963 io_ring_ctx_wait_and_kill(ctx);
7964 return ret;
7965}
7966
7967/*
7968 * Sets up an aio uring context, and returns the fd. Applications asks for a
7969 * ring size, we return the actual sq/cq ring sizes (among other things) in the
7970 * params structure passed in.
7971 */
7972static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
7973{
7974 struct io_uring_params p;
2b188cc1
JA
7975 int i;
7976
7977 if (copy_from_user(&p, params, sizeof(p)))
7978 return -EFAULT;
7979 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
7980 if (p.resv[i])
7981 return -EINVAL;
7982 }
7983
6c271ce2 7984 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 7985 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
24369c2e 7986 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
2b188cc1
JA
7987 return -EINVAL;
7988
7f13657d 7989 return io_uring_create(entries, &p, params);
2b188cc1
JA
7990}
7991
7992SYSCALL_DEFINE2(io_uring_setup, u32, entries,
7993 struct io_uring_params __user *, params)
7994{
7995 return io_uring_setup(entries, params);
7996}
7997
66f4af93
JA
7998static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
7999{
8000 struct io_uring_probe *p;
8001 size_t size;
8002 int i, ret;
8003
8004 size = struct_size(p, ops, nr_args);
8005 if (size == SIZE_MAX)
8006 return -EOVERFLOW;
8007 p = kzalloc(size, GFP_KERNEL);
8008 if (!p)
8009 return -ENOMEM;
8010
8011 ret = -EFAULT;
8012 if (copy_from_user(p, arg, size))
8013 goto out;
8014 ret = -EINVAL;
8015 if (memchr_inv(p, 0, size))
8016 goto out;
8017
8018 p->last_op = IORING_OP_LAST - 1;
8019 if (nr_args > IORING_OP_LAST)
8020 nr_args = IORING_OP_LAST;
8021
8022 for (i = 0; i < nr_args; i++) {
8023 p->ops[i].op = i;
8024 if (!io_op_defs[i].not_supported)
8025 p->ops[i].flags = IO_URING_OP_SUPPORTED;
8026 }
8027 p->ops_len = i;
8028
8029 ret = 0;
8030 if (copy_to_user(arg, p, size))
8031 ret = -EFAULT;
8032out:
8033 kfree(p);
8034 return ret;
8035}
8036
071698e1
JA
8037static int io_register_personality(struct io_ring_ctx *ctx)
8038{
8039 const struct cred *creds = get_current_cred();
8040 int id;
8041
8042 id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
8043 USHRT_MAX, GFP_KERNEL);
8044 if (id < 0)
8045 put_cred(creds);
8046 return id;
8047}
8048
8049static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
8050{
8051 const struct cred *old_creds;
8052
8053 old_creds = idr_remove(&ctx->personality_idr, id);
8054 if (old_creds) {
8055 put_cred(old_creds);
8056 return 0;
8057 }
8058
8059 return -EINVAL;
8060}
8061
8062static bool io_register_op_must_quiesce(int op)
8063{
8064 switch (op) {
8065 case IORING_UNREGISTER_FILES:
8066 case IORING_REGISTER_FILES_UPDATE:
8067 case IORING_REGISTER_PROBE:
8068 case IORING_REGISTER_PERSONALITY:
8069 case IORING_UNREGISTER_PERSONALITY:
8070 return false;
8071 default:
8072 return true;
8073 }
8074}
8075
edafccee
JA
8076static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
8077 void __user *arg, unsigned nr_args)
b19062a5
JA
8078 __releases(ctx->uring_lock)
8079 __acquires(ctx->uring_lock)
edafccee
JA
8080{
8081 int ret;
8082
35fa71a0
JA
8083 /*
8084 * We're inside the ring mutex, if the ref is already dying, then
8085 * someone else killed the ctx or is already going through
8086 * io_uring_register().
8087 */
8088 if (percpu_ref_is_dying(&ctx->refs))
8089 return -ENXIO;
8090
071698e1 8091 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 8092 percpu_ref_kill(&ctx->refs);
b19062a5 8093
05f3fb3c
JA
8094 /*
8095 * Drop uring mutex before waiting for references to exit. If
8096 * another thread is currently inside io_uring_enter() it might
8097 * need to grab the uring_lock to make progress. If we hold it
8098 * here across the drain wait, then we can deadlock. It's safe
8099 * to drop the mutex here, since no new references will come in
8100 * after we've killed the percpu ref.
8101 */
8102 mutex_unlock(&ctx->uring_lock);
0f158b4c 8103 ret = wait_for_completion_interruptible(&ctx->ref_comp);
05f3fb3c 8104 mutex_lock(&ctx->uring_lock);
c150368b
JA
8105 if (ret) {
8106 percpu_ref_resurrect(&ctx->refs);
8107 ret = -EINTR;
8108 goto out;
8109 }
05f3fb3c 8110 }
edafccee
JA
8111
8112 switch (opcode) {
8113 case IORING_REGISTER_BUFFERS:
8114 ret = io_sqe_buffer_register(ctx, arg, nr_args);
8115 break;
8116 case IORING_UNREGISTER_BUFFERS:
8117 ret = -EINVAL;
8118 if (arg || nr_args)
8119 break;
8120 ret = io_sqe_buffer_unregister(ctx);
8121 break;
6b06314c
JA
8122 case IORING_REGISTER_FILES:
8123 ret = io_sqe_files_register(ctx, arg, nr_args);
8124 break;
8125 case IORING_UNREGISTER_FILES:
8126 ret = -EINVAL;
8127 if (arg || nr_args)
8128 break;
8129 ret = io_sqe_files_unregister(ctx);
8130 break;
c3a31e60
JA
8131 case IORING_REGISTER_FILES_UPDATE:
8132 ret = io_sqe_files_update(ctx, arg, nr_args);
8133 break;
9b402849 8134 case IORING_REGISTER_EVENTFD:
f2842ab5 8135 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
8136 ret = -EINVAL;
8137 if (nr_args != 1)
8138 break;
8139 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
8140 if (ret)
8141 break;
8142 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
8143 ctx->eventfd_async = 1;
8144 else
8145 ctx->eventfd_async = 0;
9b402849
JA
8146 break;
8147 case IORING_UNREGISTER_EVENTFD:
8148 ret = -EINVAL;
8149 if (arg || nr_args)
8150 break;
8151 ret = io_eventfd_unregister(ctx);
8152 break;
66f4af93
JA
8153 case IORING_REGISTER_PROBE:
8154 ret = -EINVAL;
8155 if (!arg || nr_args > 256)
8156 break;
8157 ret = io_probe(ctx, arg, nr_args);
8158 break;
071698e1
JA
8159 case IORING_REGISTER_PERSONALITY:
8160 ret = -EINVAL;
8161 if (arg || nr_args)
8162 break;
8163 ret = io_register_personality(ctx);
8164 break;
8165 case IORING_UNREGISTER_PERSONALITY:
8166 ret = -EINVAL;
8167 if (arg)
8168 break;
8169 ret = io_unregister_personality(ctx, nr_args);
8170 break;
edafccee
JA
8171 default:
8172 ret = -EINVAL;
8173 break;
8174 }
8175
071698e1 8176 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 8177 /* bring the ctx back to life */
05f3fb3c 8178 percpu_ref_reinit(&ctx->refs);
c150368b 8179out:
0f158b4c 8180 reinit_completion(&ctx->ref_comp);
05f3fb3c 8181 }
edafccee
JA
8182 return ret;
8183}
8184
8185SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
8186 void __user *, arg, unsigned int, nr_args)
8187{
8188 struct io_ring_ctx *ctx;
8189 long ret = -EBADF;
8190 struct fd f;
8191
8192 f = fdget(fd);
8193 if (!f.file)
8194 return -EBADF;
8195
8196 ret = -EOPNOTSUPP;
8197 if (f.file->f_op != &io_uring_fops)
8198 goto out_fput;
8199
8200 ctx = f.file->private_data;
8201
8202 mutex_lock(&ctx->uring_lock);
8203 ret = __io_uring_register(ctx, opcode, arg, nr_args);
8204 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
8205 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
8206 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
8207out_fput:
8208 fdput(f);
8209 return ret;
8210}
8211
2b188cc1
JA
8212static int __init io_uring_init(void)
8213{
d7f62e82
SM
8214#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
8215 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
8216 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
8217} while (0)
8218
8219#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
8220 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
8221 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
8222 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
8223 BUILD_BUG_SQE_ELEM(1, __u8, flags);
8224 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
8225 BUILD_BUG_SQE_ELEM(4, __s32, fd);
8226 BUILD_BUG_SQE_ELEM(8, __u64, off);
8227 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
8228 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 8229 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
8230 BUILD_BUG_SQE_ELEM(24, __u32, len);
8231 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
8232 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
8233 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
8234 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
8235 BUILD_BUG_SQE_ELEM(28, __u16, poll_events);
8236 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
8237 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
8238 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
8239 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
8240 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
8241 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
8242 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
8243 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 8244 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
8245 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
8246 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
8247 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 8248 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
d7f62e82 8249
d3656344 8250 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
84557871 8251 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
2b188cc1
JA
8252 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
8253 return 0;
8254};
8255__initcall(io_uring_init);