]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/io_uring.c
io_uring: move BUFFER_SELECT check into *recv[msg]
[mirror_ubuntu-jammy-kernel.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
2b188cc1
JA
58#include <linux/percpu.h>
59#include <linux/slab.h>
6c271ce2 60#include <linux/kthread.h>
2b188cc1 61#include <linux/blkdev.h>
edafccee 62#include <linux/bvec.h>
2b188cc1
JA
63#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
6b06314c 66#include <net/scm.h>
2b188cc1
JA
67#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
edafccee
JA
71#include <linux/sizes.h>
72#include <linux/hugetlb.h>
aa4c3967 73#include <linux/highmem.h>
15b71abe
JA
74#include <linux/namei.h>
75#include <linux/fsnotify.h>
4840e418 76#include <linux/fadvise.h>
3e4827b0 77#include <linux/eventpoll.h>
ff002b30 78#include <linux/fs_struct.h>
7d67af2c 79#include <linux/splice.h>
b41e9852 80#include <linux/task_work.h>
bcf5a063 81#include <linux/pagemap.h>
2b188cc1 82
c826bd7a
DD
83#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
2b188cc1
JA
86#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
561fb04a 89#include "io-wq.h"
2b188cc1 90
5277deaa 91#define IORING_MAX_ENTRIES 32768
33a107f0 92#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
93
94/*
95 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
96 */
97#define IORING_FILE_TABLE_SHIFT 9
98#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
99#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
100#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
2b188cc1
JA
101
102struct io_uring {
103 u32 head ____cacheline_aligned_in_smp;
104 u32 tail ____cacheline_aligned_in_smp;
105};
106
1e84b97b 107/*
75b28aff
HV
108 * This data is shared with the application through the mmap at offsets
109 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
110 *
111 * The offsets to the member fields are published through struct
112 * io_sqring_offsets when calling io_uring_setup.
113 */
75b28aff 114struct io_rings {
1e84b97b
SB
115 /*
116 * Head and tail offsets into the ring; the offsets need to be
117 * masked to get valid indices.
118 *
75b28aff
HV
119 * The kernel controls head of the sq ring and the tail of the cq ring,
120 * and the application controls tail of the sq ring and the head of the
121 * cq ring.
1e84b97b 122 */
75b28aff 123 struct io_uring sq, cq;
1e84b97b 124 /*
75b28aff 125 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
126 * ring_entries - 1)
127 */
75b28aff
HV
128 u32 sq_ring_mask, cq_ring_mask;
129 /* Ring sizes (constant, power of 2) */
130 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
131 /*
132 * Number of invalid entries dropped by the kernel due to
133 * invalid index stored in array
134 *
135 * Written by the kernel, shouldn't be modified by the
136 * application (i.e. get number of "new events" by comparing to
137 * cached value).
138 *
139 * After a new SQ head value was read by the application this
140 * counter includes all submissions that were dropped reaching
141 * the new SQ head (and possibly more).
142 */
75b28aff 143 u32 sq_dropped;
1e84b97b 144 /*
0d9b5b3a 145 * Runtime SQ flags
1e84b97b
SB
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application.
149 *
150 * The application needs a full memory barrier before checking
151 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
152 */
75b28aff 153 u32 sq_flags;
0d9b5b3a
SG
154 /*
155 * Runtime CQ flags
156 *
157 * Written by the application, shouldn't be modified by the
158 * kernel.
159 */
160 u32 cq_flags;
1e84b97b
SB
161 /*
162 * Number of completion events lost because the queue was full;
163 * this should be avoided by the application by making sure
0b4295b5 164 * there are not more requests pending than there is space in
1e84b97b
SB
165 * the completion queue.
166 *
167 * Written by the kernel, shouldn't be modified by the
168 * application (i.e. get number of "new events" by comparing to
169 * cached value).
170 *
171 * As completion events come in out of order this counter is not
172 * ordered with any other data.
173 */
75b28aff 174 u32 cq_overflow;
1e84b97b
SB
175 /*
176 * Ring buffer of completion events.
177 *
178 * The kernel writes completion events fresh every time they are
179 * produced, so the application is allowed to modify pending
180 * entries.
181 */
75b28aff 182 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
183};
184
edafccee
JA
185struct io_mapped_ubuf {
186 u64 ubuf;
187 size_t len;
188 struct bio_vec *bvec;
189 unsigned int nr_bvecs;
190};
191
65e19f54
JA
192struct fixed_file_table {
193 struct file **files;
31b51510
JA
194};
195
05589553
XW
196struct fixed_file_ref_node {
197 struct percpu_ref refs;
198 struct list_head node;
199 struct list_head file_list;
200 struct fixed_file_data *file_data;
4a38aed2 201 struct llist_node llist;
05589553
XW
202};
203
05f3fb3c
JA
204struct fixed_file_data {
205 struct fixed_file_table *table;
206 struct io_ring_ctx *ctx;
207
05589553 208 struct percpu_ref *cur_refs;
05f3fb3c 209 struct percpu_ref refs;
05f3fb3c 210 struct completion done;
05589553
XW
211 struct list_head ref_list;
212 spinlock_t lock;
05f3fb3c
JA
213};
214
5a2e745d
JA
215struct io_buffer {
216 struct list_head list;
217 __u64 addr;
218 __s32 len;
219 __u16 bid;
220};
221
2b188cc1
JA
222struct io_ring_ctx {
223 struct {
224 struct percpu_ref refs;
225 } ____cacheline_aligned_in_smp;
226
227 struct {
228 unsigned int flags;
e1d85334 229 unsigned int compat: 1;
aad5d8da 230 unsigned int limit_mem: 1;
e1d85334
RD
231 unsigned int cq_overflow_flushed: 1;
232 unsigned int drain_next: 1;
233 unsigned int eventfd_async: 1;
2b188cc1 234
75b28aff
HV
235 /*
236 * Ring buffer of indices into array of io_uring_sqe, which is
237 * mmapped by the application using the IORING_OFF_SQES offset.
238 *
239 * This indirection could e.g. be used to assign fixed
240 * io_uring_sqe entries to operations and only submit them to
241 * the queue when needed.
242 *
243 * The kernel modifies neither the indices array nor the entries
244 * array.
245 */
246 u32 *sq_array;
2b188cc1
JA
247 unsigned cached_sq_head;
248 unsigned sq_entries;
249 unsigned sq_mask;
6c271ce2 250 unsigned sq_thread_idle;
498ccd9e 251 unsigned cached_sq_dropped;
206aefde 252 atomic_t cached_cq_overflow;
ad3eb2c8 253 unsigned long sq_check_overflow;
de0617e4
JA
254
255 struct list_head defer_list;
5262f567 256 struct list_head timeout_list;
1d7bb1d5 257 struct list_head cq_overflow_list;
fcb323cc
JA
258
259 wait_queue_head_t inflight_wait;
ad3eb2c8 260 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
261 } ____cacheline_aligned_in_smp;
262
206aefde
JA
263 struct io_rings *rings;
264
2b188cc1 265 /* IO offload */
561fb04a 266 struct io_wq *io_wq;
6c271ce2 267 struct task_struct *sqo_thread; /* if using sq thread polling */
2b188cc1 268 struct mm_struct *sqo_mm;
6c271ce2 269 wait_queue_head_t sqo_wait;
75b28aff 270
6b06314c
JA
271 /*
272 * If used, fixed file set. Writers must ensure that ->refs is dead,
273 * readers must ensure that ->refs is alive as long as the file* is
274 * used. Only updated through io_uring_register(2).
275 */
05f3fb3c 276 struct fixed_file_data *file_data;
6b06314c 277 unsigned nr_user_files;
b14cca0c
PB
278 int ring_fd;
279 struct file *ring_file;
6b06314c 280
edafccee
JA
281 /* if used, fixed mapped user buffers */
282 unsigned nr_user_bufs;
283 struct io_mapped_ubuf *user_bufs;
284
2b188cc1
JA
285 struct user_struct *user;
286
0b8c0ec7 287 const struct cred *creds;
181e448d 288
0f158b4c
JA
289 struct completion ref_comp;
290 struct completion sq_thread_comp;
206aefde 291
0ddf92e8
JA
292 /* if all else fails... */
293 struct io_kiocb *fallback_req;
294
206aefde
JA
295#if defined(CONFIG_UNIX)
296 struct socket *ring_sock;
297#endif
298
5a2e745d
JA
299 struct idr io_buffer_idr;
300
071698e1
JA
301 struct idr personality_idr;
302
206aefde
JA
303 struct {
304 unsigned cached_cq_tail;
305 unsigned cq_entries;
306 unsigned cq_mask;
307 atomic_t cq_timeouts;
ad3eb2c8 308 unsigned long cq_check_overflow;
206aefde
JA
309 struct wait_queue_head cq_wait;
310 struct fasync_struct *cq_fasync;
311 struct eventfd_ctx *cq_ev_fd;
312 } ____cacheline_aligned_in_smp;
2b188cc1
JA
313
314 struct {
315 struct mutex uring_lock;
316 wait_queue_head_t wait;
317 } ____cacheline_aligned_in_smp;
318
319 struct {
320 spinlock_t completion_lock;
e94f141b 321
def596e9 322 /*
540e32a0 323 * ->iopoll_list is protected by the ctx->uring_lock for
def596e9
JA
324 * io_uring instances that don't use IORING_SETUP_SQPOLL.
325 * For SQPOLL, only the single threaded io_sq_thread() will
326 * manipulate the list, hence no extra locking is needed there.
327 */
540e32a0 328 struct list_head iopoll_list;
78076bb6
JA
329 struct hlist_head *cancel_hash;
330 unsigned cancel_hash_bits;
e94f141b 331 bool poll_multi_file;
31b51510 332
fcb323cc
JA
333 spinlock_t inflight_lock;
334 struct list_head inflight_list;
2b188cc1 335 } ____cacheline_aligned_in_smp;
85faa7b8 336
4a38aed2
JA
337 struct delayed_work file_put_work;
338 struct llist_head file_put_llist;
339
85faa7b8 340 struct work_struct exit_work;
2b188cc1
JA
341};
342
09bb8394
JA
343/*
344 * First field must be the file pointer in all the
345 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
346 */
221c5eb2
JA
347struct io_poll_iocb {
348 struct file *file;
0969e783
JA
349 union {
350 struct wait_queue_head *head;
351 u64 addr;
352 };
221c5eb2 353 __poll_t events;
8c838788 354 bool done;
221c5eb2 355 bool canceled;
392edb45 356 struct wait_queue_entry wait;
221c5eb2
JA
357};
358
b5dba59e
JA
359struct io_close {
360 struct file *file;
361 struct file *put_file;
362 int fd;
363};
364
ad8a48ac
JA
365struct io_timeout_data {
366 struct io_kiocb *req;
367 struct hrtimer timer;
368 struct timespec64 ts;
369 enum hrtimer_mode mode;
370};
371
8ed8d3c3
JA
372struct io_accept {
373 struct file *file;
374 struct sockaddr __user *addr;
375 int __user *addr_len;
376 int flags;
09952e3e 377 unsigned long nofile;
8ed8d3c3
JA
378};
379
380struct io_sync {
381 struct file *file;
382 loff_t len;
383 loff_t off;
384 int flags;
d63d1b5e 385 int mode;
8ed8d3c3
JA
386};
387
fbf23849
JA
388struct io_cancel {
389 struct file *file;
390 u64 addr;
391};
392
b29472ee
JA
393struct io_timeout {
394 struct file *file;
395 u64 addr;
396 int flags;
bfe68a22
PB
397 u32 off;
398 u32 target_seq;
135fcde8 399 struct list_head list;
b29472ee
JA
400};
401
9adbd45d
JA
402struct io_rw {
403 /* NOTE: kiocb has the file as the first member, so don't do it here */
404 struct kiocb kiocb;
405 u64 addr;
406 u64 len;
407};
408
3fbb51c1
JA
409struct io_connect {
410 struct file *file;
411 struct sockaddr __user *addr;
412 int addr_len;
413};
414
e47293fd
JA
415struct io_sr_msg {
416 struct file *file;
fddaface 417 union {
270a5940 418 struct user_msghdr __user *umsg;
fddaface
JA
419 void __user *buf;
420 };
e47293fd 421 int msg_flags;
bcda7baa 422 int bgid;
fddaface 423 size_t len;
bcda7baa 424 struct io_buffer *kbuf;
e47293fd
JA
425};
426
15b71abe
JA
427struct io_open {
428 struct file *file;
429 int dfd;
15b71abe 430 struct filename *filename;
c12cedf2 431 struct open_how how;
4022e7af 432 unsigned long nofile;
15b71abe
JA
433};
434
05f3fb3c
JA
435struct io_files_update {
436 struct file *file;
437 u64 arg;
438 u32 nr_args;
439 u32 offset;
440};
441
4840e418
JA
442struct io_fadvise {
443 struct file *file;
444 u64 offset;
445 u32 len;
446 u32 advice;
447};
448
c1ca757b
JA
449struct io_madvise {
450 struct file *file;
451 u64 addr;
452 u32 len;
453 u32 advice;
454};
455
3e4827b0
JA
456struct io_epoll {
457 struct file *file;
458 int epfd;
459 int op;
460 int fd;
461 struct epoll_event event;
e47293fd
JA
462};
463
7d67af2c
PB
464struct io_splice {
465 struct file *file_out;
466 struct file *file_in;
467 loff_t off_out;
468 loff_t off_in;
469 u64 len;
470 unsigned int flags;
471};
472
ddf0322d
JA
473struct io_provide_buf {
474 struct file *file;
475 __u64 addr;
476 __s32 len;
477 __u32 bgid;
478 __u16 nbufs;
479 __u16 bid;
480};
481
1d9e1288
BM
482struct io_statx {
483 struct file *file;
484 int dfd;
485 unsigned int mask;
486 unsigned int flags;
e62753e4 487 const char __user *filename;
1d9e1288
BM
488 struct statx __user *buffer;
489};
490
3ca405eb
PB
491struct io_completion {
492 struct file *file;
493 struct list_head list;
0f7e466b 494 int cflags;
3ca405eb
PB
495};
496
f499a021
JA
497struct io_async_connect {
498 struct sockaddr_storage address;
499};
500
03b1230c
JA
501struct io_async_msghdr {
502 struct iovec fast_iov[UIO_FASTIOV];
503 struct iovec *iov;
504 struct sockaddr __user *uaddr;
505 struct msghdr msg;
b537916c 506 struct sockaddr_storage addr;
03b1230c
JA
507};
508
f67676d1
JA
509struct io_async_rw {
510 struct iovec fast_iov[UIO_FASTIOV];
511 struct iovec *iov;
512 ssize_t nr_segs;
513 ssize_t size;
bcf5a063 514 struct wait_page_queue wpq;
f67676d1
JA
515};
516
1a6b74fc 517struct io_async_ctx {
f67676d1
JA
518 union {
519 struct io_async_rw rw;
03b1230c 520 struct io_async_msghdr msg;
f499a021 521 struct io_async_connect connect;
2d28390a 522 struct io_timeout_data timeout;
f67676d1 523 };
1a6b74fc
JA
524};
525
6b47ee6e
PB
526enum {
527 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
528 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
529 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
530 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
531 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 532 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
6b47ee6e 533
dea3b49c 534 REQ_F_LINK_HEAD_BIT,
6b47ee6e
PB
535 REQ_F_FAIL_LINK_BIT,
536 REQ_F_INFLIGHT_BIT,
537 REQ_F_CUR_POS_BIT,
538 REQ_F_NOWAIT_BIT,
6b47ee6e 539 REQ_F_LINK_TIMEOUT_BIT,
6b47ee6e 540 REQ_F_ISREG_BIT,
6b47ee6e 541 REQ_F_COMP_LOCKED_BIT,
99bc4c38 542 REQ_F_NEED_CLEANUP_BIT,
2ca10259 543 REQ_F_OVERFLOW_BIT,
d7718a9d 544 REQ_F_POLLED_BIT,
bcda7baa 545 REQ_F_BUFFER_SELECTED_BIT,
5b0bbee4 546 REQ_F_NO_FILE_TABLE_BIT,
7cdaf587 547 REQ_F_WORK_INITIALIZED_BIT,
4dd2824d 548 REQ_F_TASK_PINNED_BIT,
84557871
JA
549
550 /* not a real bit, just to check we're not overflowing the space */
551 __REQ_F_LAST_BIT,
6b47ee6e
PB
552};
553
554enum {
555 /* ctx owns file */
556 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
557 /* drain existing IO first */
558 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
559 /* linked sqes */
560 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
561 /* doesn't sever on completion < 0 */
562 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
563 /* IOSQE_ASYNC */
564 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
565 /* IOSQE_BUFFER_SELECT */
566 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
6b47ee6e 567
dea3b49c
PB
568 /* head of a link */
569 REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
6b47ee6e
PB
570 /* fail rest of links */
571 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
572 /* on inflight list */
573 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
574 /* read/write uses file position */
575 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
576 /* must not punt to workers */
577 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
6b47ee6e
PB
578 /* has linked timeout */
579 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
6b47ee6e
PB
580 /* regular file */
581 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
6b47ee6e
PB
582 /* completion under lock */
583 REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
99bc4c38
PB
584 /* needs cleanup */
585 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
2ca10259
JA
586 /* in overflow list */
587 REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT),
d7718a9d
JA
588 /* already went through poll handler */
589 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
590 /* buffer already selected */
591 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
5b0bbee4
JA
592 /* doesn't need file table for this request */
593 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
7cdaf587
XW
594 /* io_wq_work is initialized */
595 REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
4dd2824d
PB
596 /* req->task is refcounted */
597 REQ_F_TASK_PINNED = BIT(REQ_F_TASK_PINNED_BIT),
d7718a9d
JA
598};
599
600struct async_poll {
601 struct io_poll_iocb poll;
807abcb0 602 struct io_poll_iocb *double_poll;
d7718a9d 603 struct io_wq_work work;
6b47ee6e
PB
604};
605
09bb8394
JA
606/*
607 * NOTE! Each of the iocb union members has the file pointer
608 * as the first entry in their struct definition. So you can
609 * access the file pointer through any of the sub-structs,
610 * or directly as just 'ki_filp' in this struct.
611 */
2b188cc1 612struct io_kiocb {
221c5eb2 613 union {
09bb8394 614 struct file *file;
9adbd45d 615 struct io_rw rw;
221c5eb2 616 struct io_poll_iocb poll;
8ed8d3c3
JA
617 struct io_accept accept;
618 struct io_sync sync;
fbf23849 619 struct io_cancel cancel;
b29472ee 620 struct io_timeout timeout;
3fbb51c1 621 struct io_connect connect;
e47293fd 622 struct io_sr_msg sr_msg;
15b71abe 623 struct io_open open;
b5dba59e 624 struct io_close close;
05f3fb3c 625 struct io_files_update files_update;
4840e418 626 struct io_fadvise fadvise;
c1ca757b 627 struct io_madvise madvise;
3e4827b0 628 struct io_epoll epoll;
7d67af2c 629 struct io_splice splice;
ddf0322d 630 struct io_provide_buf pbuf;
1d9e1288 631 struct io_statx statx;
3ca405eb
PB
632 /* use only after cleaning per-op data, see io_clean_op() */
633 struct io_completion compl;
221c5eb2 634 };
2b188cc1 635
1a6b74fc 636 struct io_async_ctx *io;
d625c6ee 637 u8 opcode;
65a6543d
XW
638 /* polled IO has completed */
639 u8 iopoll_completed;
2b188cc1 640
4f4eeba8 641 u16 buf_index;
9cf7c104 642 u32 result;
4f4eeba8 643
2b188cc1 644 struct io_ring_ctx *ctx;
2b188cc1 645 unsigned int flags;
c16361c1 646 refcount_t refs;
3537b6a7 647 struct task_struct *task;
2b188cc1
JA
648 u64 user_data;
649
d7718a9d
JA
650 struct list_head link_list;
651
d21ffe7e
PB
652 /*
653 * 1. used with ctx->iopoll_list with reads/writes
654 * 2. to track reqs with ->files (see io_op_def::file_table)
655 */
fcb323cc
JA
656 struct list_head inflight_entry;
657
05589553
XW
658 struct percpu_ref *fixed_file_refs;
659
b41e9852
JA
660 union {
661 /*
662 * Only commands that never go async can use the below fields,
d7718a9d
JA
663 * obviously. Right now only IORING_OP_POLL_ADD uses them, and
664 * async armed poll handlers for regular commands. The latter
665 * restore the work, if needed.
b41e9852
JA
666 */
667 struct {
d7718a9d
JA
668 struct hlist_node hash_node;
669 struct async_poll *apoll;
b41e9852
JA
670 };
671 struct io_wq_work work;
672 };
8ef77766 673 struct callback_head task_work;
2b188cc1
JA
674};
675
27dc8338
PB
676struct io_defer_entry {
677 struct list_head list;
678 struct io_kiocb *req;
9cf7c104 679 u32 seq;
27dc8338
PB
680};
681
def596e9 682#define IO_IOPOLL_BATCH 8
2b188cc1 683
013538bd
JA
684struct io_comp_state {
685 unsigned int nr;
686 struct list_head list;
687 struct io_ring_ctx *ctx;
688};
689
9a56a232
JA
690struct io_submit_state {
691 struct blk_plug plug;
692
2579f913
JA
693 /*
694 * io_kiocb alloc cache
695 */
696 void *reqs[IO_IOPOLL_BATCH];
6c8a3134 697 unsigned int free_reqs;
2579f913 698
013538bd
JA
699 /*
700 * Batch completion logic
701 */
702 struct io_comp_state comp;
703
9a56a232
JA
704 /*
705 * File reference cache
706 */
707 struct file *file;
708 unsigned int fd;
709 unsigned int has_refs;
9a56a232
JA
710 unsigned int ios_left;
711};
712
d3656344
JA
713struct io_op_def {
714 /* needs req->io allocated for deferral/async */
715 unsigned async_ctx : 1;
716 /* needs current->mm setup, does mm access */
717 unsigned needs_mm : 1;
718 /* needs req->file assigned */
719 unsigned needs_file : 1;
fd2206e4
JA
720 /* don't fail if file grab fails */
721 unsigned needs_file_no_error : 1;
d3656344
JA
722 /* hash wq insertion if file is a regular file */
723 unsigned hash_reg_file : 1;
724 /* unbound wq insertion if file is a non-regular file */
725 unsigned unbound_nonreg_file : 1;
66f4af93
JA
726 /* opcode is not supported by this kernel */
727 unsigned not_supported : 1;
f86cd20c
JA
728 /* needs file table */
729 unsigned file_table : 1;
ff002b30
JA
730 /* needs ->fs */
731 unsigned needs_fs : 1;
8a72758c
JA
732 /* set if opcode supports polled "wait" */
733 unsigned pollin : 1;
734 unsigned pollout : 1;
bcda7baa
JA
735 /* op supports buffer selection */
736 unsigned buffer_select : 1;
57f1a649 737 unsigned needs_fsize : 1;
d3656344
JA
738};
739
740static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
741 [IORING_OP_NOP] = {},
742 [IORING_OP_READV] = {
d3656344
JA
743 .async_ctx = 1,
744 .needs_mm = 1,
745 .needs_file = 1,
746 .unbound_nonreg_file = 1,
8a72758c 747 .pollin = 1,
4d954c25 748 .buffer_select = 1,
d3656344 749 },
0463b6c5 750 [IORING_OP_WRITEV] = {
d3656344
JA
751 .async_ctx = 1,
752 .needs_mm = 1,
753 .needs_file = 1,
754 .hash_reg_file = 1,
755 .unbound_nonreg_file = 1,
8a72758c 756 .pollout = 1,
57f1a649 757 .needs_fsize = 1,
d3656344 758 },
0463b6c5 759 [IORING_OP_FSYNC] = {
d3656344
JA
760 .needs_file = 1,
761 },
0463b6c5 762 [IORING_OP_READ_FIXED] = {
d3656344
JA
763 .needs_file = 1,
764 .unbound_nonreg_file = 1,
8a72758c 765 .pollin = 1,
d3656344 766 },
0463b6c5 767 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
768 .needs_file = 1,
769 .hash_reg_file = 1,
770 .unbound_nonreg_file = 1,
8a72758c 771 .pollout = 1,
57f1a649 772 .needs_fsize = 1,
d3656344 773 },
0463b6c5 774 [IORING_OP_POLL_ADD] = {
d3656344
JA
775 .needs_file = 1,
776 .unbound_nonreg_file = 1,
777 },
0463b6c5
PB
778 [IORING_OP_POLL_REMOVE] = {},
779 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344
JA
780 .needs_file = 1,
781 },
0463b6c5 782 [IORING_OP_SENDMSG] = {
d3656344
JA
783 .async_ctx = 1,
784 .needs_mm = 1,
785 .needs_file = 1,
786 .unbound_nonreg_file = 1,
ff002b30 787 .needs_fs = 1,
8a72758c 788 .pollout = 1,
d3656344 789 },
0463b6c5 790 [IORING_OP_RECVMSG] = {
d3656344
JA
791 .async_ctx = 1,
792 .needs_mm = 1,
793 .needs_file = 1,
794 .unbound_nonreg_file = 1,
ff002b30 795 .needs_fs = 1,
8a72758c 796 .pollin = 1,
52de1fe1 797 .buffer_select = 1,
d3656344 798 },
0463b6c5 799 [IORING_OP_TIMEOUT] = {
d3656344
JA
800 .async_ctx = 1,
801 .needs_mm = 1,
802 },
0463b6c5
PB
803 [IORING_OP_TIMEOUT_REMOVE] = {},
804 [IORING_OP_ACCEPT] = {
d3656344
JA
805 .needs_mm = 1,
806 .needs_file = 1,
807 .unbound_nonreg_file = 1,
f86cd20c 808 .file_table = 1,
8a72758c 809 .pollin = 1,
d3656344 810 },
0463b6c5
PB
811 [IORING_OP_ASYNC_CANCEL] = {},
812 [IORING_OP_LINK_TIMEOUT] = {
d3656344
JA
813 .async_ctx = 1,
814 .needs_mm = 1,
815 },
0463b6c5 816 [IORING_OP_CONNECT] = {
d3656344
JA
817 .async_ctx = 1,
818 .needs_mm = 1,
819 .needs_file = 1,
820 .unbound_nonreg_file = 1,
8a72758c 821 .pollout = 1,
d3656344 822 },
0463b6c5 823 [IORING_OP_FALLOCATE] = {
d3656344 824 .needs_file = 1,
57f1a649 825 .needs_fsize = 1,
d3656344 826 },
0463b6c5 827 [IORING_OP_OPENAT] = {
f86cd20c 828 .file_table = 1,
ff002b30 829 .needs_fs = 1,
d3656344 830 },
0463b6c5 831 [IORING_OP_CLOSE] = {
fd2206e4
JA
832 .needs_file = 1,
833 .needs_file_no_error = 1,
f86cd20c 834 .file_table = 1,
d3656344 835 },
0463b6c5 836 [IORING_OP_FILES_UPDATE] = {
d3656344 837 .needs_mm = 1,
f86cd20c 838 .file_table = 1,
d3656344 839 },
0463b6c5 840 [IORING_OP_STATX] = {
d3656344 841 .needs_mm = 1,
ff002b30 842 .needs_fs = 1,
5b0bbee4 843 .file_table = 1,
d3656344 844 },
0463b6c5 845 [IORING_OP_READ] = {
3a6820f2
JA
846 .needs_mm = 1,
847 .needs_file = 1,
848 .unbound_nonreg_file = 1,
8a72758c 849 .pollin = 1,
bcda7baa 850 .buffer_select = 1,
3a6820f2 851 },
0463b6c5 852 [IORING_OP_WRITE] = {
3a6820f2
JA
853 .needs_mm = 1,
854 .needs_file = 1,
855 .unbound_nonreg_file = 1,
8a72758c 856 .pollout = 1,
57f1a649 857 .needs_fsize = 1,
3a6820f2 858 },
0463b6c5 859 [IORING_OP_FADVISE] = {
4840e418
JA
860 .needs_file = 1,
861 },
0463b6c5 862 [IORING_OP_MADVISE] = {
c1ca757b
JA
863 .needs_mm = 1,
864 },
0463b6c5 865 [IORING_OP_SEND] = {
fddaface
JA
866 .needs_mm = 1,
867 .needs_file = 1,
868 .unbound_nonreg_file = 1,
8a72758c 869 .pollout = 1,
fddaface 870 },
0463b6c5 871 [IORING_OP_RECV] = {
fddaface
JA
872 .needs_mm = 1,
873 .needs_file = 1,
874 .unbound_nonreg_file = 1,
8a72758c 875 .pollin = 1,
bcda7baa 876 .buffer_select = 1,
fddaface 877 },
0463b6c5 878 [IORING_OP_OPENAT2] = {
f86cd20c 879 .file_table = 1,
ff002b30 880 .needs_fs = 1,
cebdb986 881 },
3e4827b0
JA
882 [IORING_OP_EPOLL_CTL] = {
883 .unbound_nonreg_file = 1,
884 .file_table = 1,
885 },
7d67af2c
PB
886 [IORING_OP_SPLICE] = {
887 .needs_file = 1,
888 .hash_reg_file = 1,
889 .unbound_nonreg_file = 1,
ddf0322d
JA
890 },
891 [IORING_OP_PROVIDE_BUFFERS] = {},
067524e9 892 [IORING_OP_REMOVE_BUFFERS] = {},
f2a8d5c7
PB
893 [IORING_OP_TEE] = {
894 .needs_file = 1,
895 .hash_reg_file = 1,
896 .unbound_nonreg_file = 1,
897 },
d3656344
JA
898};
899
2e0464d4
BM
900enum io_mem_account {
901 ACCT_LOCKED,
902 ACCT_PINNED,
903};
904
f3a6fa22 905static bool io_rw_reissue(struct io_kiocb *req, long res);
78e19bbe 906static void io_cqring_fill_event(struct io_kiocb *req, long res);
ec9c02ad 907static void io_put_req(struct io_kiocb *req);
c40f6379 908static void io_double_put_req(struct io_kiocb *req);
978db57e 909static void __io_double_put_req(struct io_kiocb *req);
94ae5e77
JA
910static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
911static void io_queue_linked_timeout(struct io_kiocb *req);
05f3fb3c
JA
912static int __io_sqe_files_update(struct io_ring_ctx *ctx,
913 struct io_uring_files_update *ip,
914 unsigned nr_args);
f86cd20c 915static int io_grab_files(struct io_kiocb *req);
2237d765
JA
916static void io_complete_rw_common(struct kiocb *kiocb, long res,
917 struct io_comp_state *cs);
3ca405eb 918static void __io_clean_op(struct io_kiocb *req);
b41e9852
JA
919static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
920 int fd, struct file **out_file, bool fixed);
921static void __io_queue_sqe(struct io_kiocb *req,
f13fad7b
JA
922 const struct io_uring_sqe *sqe,
923 struct io_comp_state *cs);
4349f30e 924static void io_file_put_work(struct work_struct *work);
de0617e4 925
b63534c4
JA
926static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
927 struct iovec **iovec, struct iov_iter *iter,
928 bool needs_lock);
929static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
930 struct iovec *iovec, struct iovec *fast_iov,
931 struct iov_iter *iter);
932
2b188cc1
JA
933static struct kmem_cache *req_cachep;
934
935static const struct file_operations io_uring_fops;
936
937struct sock *io_uring_get_socket(struct file *file)
938{
939#if defined(CONFIG_UNIX)
940 if (file->f_op == &io_uring_fops) {
941 struct io_ring_ctx *ctx = file->private_data;
942
943 return ctx->ring_sock->sk;
944 }
945#endif
946 return NULL;
947}
948EXPORT_SYMBOL(io_uring_get_socket);
949
4dd2824d
PB
950static void io_get_req_task(struct io_kiocb *req)
951{
952 if (req->flags & REQ_F_TASK_PINNED)
953 return;
954 get_task_struct(req->task);
955 req->flags |= REQ_F_TASK_PINNED;
956}
957
3ca405eb
PB
958static inline void io_clean_op(struct io_kiocb *req)
959{
0e1b6fe3 960 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
3ca405eb
PB
961 __io_clean_op(req);
962}
963
4dd2824d
PB
964/* not idempotent -- it doesn't clear REQ_F_TASK_PINNED */
965static void __io_put_req_task(struct io_kiocb *req)
966{
967 if (req->flags & REQ_F_TASK_PINNED)
968 put_task_struct(req->task);
969}
970
4349f30e 971static void io_sq_thread_drop_mm(void)
c40f6379
JA
972{
973 struct mm_struct *mm = current->mm;
974
975 if (mm) {
976 kthread_unuse_mm(mm);
977 mmput(mm);
978 }
979}
980
981static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
982{
983 if (!current->mm) {
8eb06d7e 984 if (unlikely(!ctx->sqo_mm || !mmget_not_zero(ctx->sqo_mm)))
c40f6379
JA
985 return -EFAULT;
986 kthread_use_mm(ctx->sqo_mm);
987 }
988
989 return 0;
990}
991
992static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
993 struct io_kiocb *req)
994{
995 if (!io_op_defs[req->opcode].needs_mm)
996 return 0;
997 return __io_sq_thread_acquire_mm(ctx);
998}
999
1000static inline void req_set_fail_links(struct io_kiocb *req)
1001{
1002 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1003 req->flags |= REQ_F_FAIL_LINK;
1004}
1005
7cdaf587
XW
1006/*
1007 * Note: must call io_req_init_async() for the first time you
1008 * touch any members of io_wq_work.
1009 */
1010static inline void io_req_init_async(struct io_kiocb *req)
1011{
1012 if (req->flags & REQ_F_WORK_INITIALIZED)
1013 return;
1014
1015 memset(&req->work, 0, sizeof(req->work));
1016 req->flags |= REQ_F_WORK_INITIALIZED;
1017}
1018
0cdaf760
PB
1019static inline bool io_async_submit(struct io_ring_ctx *ctx)
1020{
1021 return ctx->flags & IORING_SETUP_SQPOLL;
1022}
1023
2b188cc1
JA
1024static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1025{
1026 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1027
0f158b4c 1028 complete(&ctx->ref_comp);
2b188cc1
JA
1029}
1030
8eb7e2d0
PB
1031static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1032{
1033 return !req->timeout.off;
1034}
1035
2b188cc1
JA
1036static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1037{
1038 struct io_ring_ctx *ctx;
78076bb6 1039 int hash_bits;
2b188cc1
JA
1040
1041 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1042 if (!ctx)
1043 return NULL;
1044
0ddf92e8
JA
1045 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
1046 if (!ctx->fallback_req)
1047 goto err;
1048
78076bb6
JA
1049 /*
1050 * Use 5 bits less than the max cq entries, that should give us around
1051 * 32 entries per hash list if totally full and uniformly spread.
1052 */
1053 hash_bits = ilog2(p->cq_entries);
1054 hash_bits -= 5;
1055 if (hash_bits <= 0)
1056 hash_bits = 1;
1057 ctx->cancel_hash_bits = hash_bits;
1058 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1059 GFP_KERNEL);
1060 if (!ctx->cancel_hash)
1061 goto err;
1062 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1063
21482896 1064 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
1065 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1066 goto err;
2b188cc1
JA
1067
1068 ctx->flags = p->flags;
583863ed 1069 init_waitqueue_head(&ctx->sqo_wait);
2b188cc1 1070 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 1071 INIT_LIST_HEAD(&ctx->cq_overflow_list);
0f158b4c
JA
1072 init_completion(&ctx->ref_comp);
1073 init_completion(&ctx->sq_thread_comp);
5a2e745d 1074 idr_init(&ctx->io_buffer_idr);
071698e1 1075 idr_init(&ctx->personality_idr);
2b188cc1
JA
1076 mutex_init(&ctx->uring_lock);
1077 init_waitqueue_head(&ctx->wait);
1078 spin_lock_init(&ctx->completion_lock);
540e32a0 1079 INIT_LIST_HEAD(&ctx->iopoll_list);
de0617e4 1080 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 1081 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
1082 init_waitqueue_head(&ctx->inflight_wait);
1083 spin_lock_init(&ctx->inflight_lock);
1084 INIT_LIST_HEAD(&ctx->inflight_list);
4a38aed2
JA
1085 INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
1086 init_llist_head(&ctx->file_put_llist);
2b188cc1 1087 return ctx;
206aefde 1088err:
0ddf92e8
JA
1089 if (ctx->fallback_req)
1090 kmem_cache_free(req_cachep, ctx->fallback_req);
78076bb6 1091 kfree(ctx->cancel_hash);
206aefde
JA
1092 kfree(ctx);
1093 return NULL;
2b188cc1
JA
1094}
1095
9cf7c104 1096static bool req_need_defer(struct io_kiocb *req, u32 seq)
de0617e4 1097{
2bc9930e
JA
1098 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1099 struct io_ring_ctx *ctx = req->ctx;
1100
9cf7c104
PB
1101 return seq != ctx->cached_cq_tail
1102 + atomic_read(&ctx->cached_cq_overflow);
2bc9930e 1103 }
de0617e4 1104
9d858b21 1105 return false;
de0617e4
JA
1106}
1107
de0617e4 1108static void __io_commit_cqring(struct io_ring_ctx *ctx)
2b188cc1 1109{
75b28aff 1110 struct io_rings *rings = ctx->rings;
2b188cc1 1111
07910158
PB
1112 /* order cqe stores with ring update */
1113 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
2b188cc1 1114
07910158
PB
1115 if (wq_has_sleeper(&ctx->cq_wait)) {
1116 wake_up_interruptible(&ctx->cq_wait);
1117 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
2b188cc1
JA
1118 }
1119}
1120
dca9cf8b 1121static void io_req_clean_work(struct io_kiocb *req)
18d9be1a 1122{
7cdaf587
XW
1123 if (!(req->flags & REQ_F_WORK_INITIALIZED))
1124 return;
1125
cccf0ee8
JA
1126 if (req->work.mm) {
1127 mmdrop(req->work.mm);
1128 req->work.mm = NULL;
1129 }
1130 if (req->work.creds) {
1131 put_cred(req->work.creds);
1132 req->work.creds = NULL;
1133 }
ff002b30
JA
1134 if (req->work.fs) {
1135 struct fs_struct *fs = req->work.fs;
1136
1137 spin_lock(&req->work.fs->lock);
1138 if (--fs->users)
1139 fs = NULL;
1140 spin_unlock(&req->work.fs->lock);
1141 if (fs)
1142 free_fs_struct(fs);
1143 }
561fb04a
JA
1144}
1145
cbdcb435 1146static void io_prep_async_work(struct io_kiocb *req)
18d9be1a 1147{
d3656344 1148 const struct io_op_def *def = &io_op_defs[req->opcode];
54a91f3b 1149
16d59803
PB
1150 io_req_init_async(req);
1151
d3656344
JA
1152 if (req->flags & REQ_F_ISREG) {
1153 if (def->hash_reg_file)
8766dd51 1154 io_wq_hash_work(&req->work, file_inode(req->file));
d3656344
JA
1155 } else {
1156 if (def->unbound_nonreg_file)
3529d8c2 1157 req->work.flags |= IO_WQ_WORK_UNBOUND;
54a91f3b 1158 }
dca9cf8b
PB
1159 if (!req->work.mm && def->needs_mm) {
1160 mmgrab(current->mm);
1161 req->work.mm = current->mm;
1162 }
1163 if (!req->work.creds)
1164 req->work.creds = get_current_cred();
1165 if (!req->work.fs && def->needs_fs) {
1166 spin_lock(&current->fs->lock);
1167 if (!current->fs->in_exec) {
1168 req->work.fs = current->fs;
1169 req->work.fs->users++;
1170 } else {
1171 req->work.flags |= IO_WQ_WORK_CANCEL;
1172 }
1173 spin_unlock(&current->fs->lock);
1174 }
57f1a649
PB
1175 if (def->needs_fsize)
1176 req->work.fsize = rlimit(RLIMIT_FSIZE);
1177 else
1178 req->work.fsize = RLIM_INFINITY;
561fb04a
JA
1179}
1180
cbdcb435 1181static void io_prep_async_link(struct io_kiocb *req)
561fb04a 1182{
cbdcb435 1183 struct io_kiocb *cur;
94ae5e77 1184
cbdcb435
PB
1185 io_prep_async_work(req);
1186 if (req->flags & REQ_F_LINK_HEAD)
1187 list_for_each_entry(cur, &req->link_list, link_list)
1188 io_prep_async_work(cur);
1189}
1190
1191static void __io_queue_async_work(struct io_kiocb *req)
1192{
1193 struct io_ring_ctx *ctx = req->ctx;
1194 struct io_kiocb *link = io_prep_linked_timeout(req);
561fb04a 1195
8766dd51
PB
1196 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1197 &req->work, req->flags);
1198 io_wq_enqueue(ctx->io_wq, &req->work);
94ae5e77
JA
1199
1200 if (link)
1201 io_queue_linked_timeout(link);
18d9be1a
JA
1202}
1203
cbdcb435
PB
1204static void io_queue_async_work(struct io_kiocb *req)
1205{
1206 /* init ->work of the whole link before punting */
1207 io_prep_async_link(req);
1208 __io_queue_async_work(req);
1209}
1210
5262f567
JA
1211static void io_kill_timeout(struct io_kiocb *req)
1212{
1213 int ret;
1214
2d28390a 1215 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
5262f567
JA
1216 if (ret != -1) {
1217 atomic_inc(&req->ctx->cq_timeouts);
135fcde8 1218 list_del_init(&req->timeout.list);
f0e20b89 1219 req->flags |= REQ_F_COMP_LOCKED;
78e19bbe 1220 io_cqring_fill_event(req, 0);
ec9c02ad 1221 io_put_req(req);
5262f567
JA
1222 }
1223}
1224
1225static void io_kill_timeouts(struct io_ring_ctx *ctx)
1226{
1227 struct io_kiocb *req, *tmp;
1228
1229 spin_lock_irq(&ctx->completion_lock);
135fcde8 1230 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list)
5262f567
JA
1231 io_kill_timeout(req);
1232 spin_unlock_irq(&ctx->completion_lock);
1233}
1234
04518945 1235static void __io_queue_deferred(struct io_ring_ctx *ctx)
de0617e4 1236{
04518945 1237 do {
27dc8338
PB
1238 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1239 struct io_defer_entry, list);
de0617e4 1240
9cf7c104 1241 if (req_need_defer(de->req, de->seq))
04518945 1242 break;
27dc8338 1243 list_del_init(&de->list);
cbdcb435 1244 /* punt-init is done before queueing for defer */
27dc8338
PB
1245 __io_queue_async_work(de->req);
1246 kfree(de);
04518945
PB
1247 } while (!list_empty(&ctx->defer_list));
1248}
1249
360428f8 1250static void io_flush_timeouts(struct io_ring_ctx *ctx)
de0617e4 1251{
360428f8
PB
1252 while (!list_empty(&ctx->timeout_list)) {
1253 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
135fcde8 1254 struct io_kiocb, timeout.list);
de0617e4 1255
8eb7e2d0 1256 if (io_is_timeout_noseq(req))
360428f8 1257 break;
bfe68a22
PB
1258 if (req->timeout.target_seq != ctx->cached_cq_tail
1259 - atomic_read(&ctx->cq_timeouts))
360428f8 1260 break;
bfe68a22 1261
135fcde8 1262 list_del_init(&req->timeout.list);
5262f567 1263 io_kill_timeout(req);
360428f8
PB
1264 }
1265}
5262f567 1266
360428f8
PB
1267static void io_commit_cqring(struct io_ring_ctx *ctx)
1268{
1269 io_flush_timeouts(ctx);
de0617e4
JA
1270 __io_commit_cqring(ctx);
1271
04518945
PB
1272 if (unlikely(!list_empty(&ctx->defer_list)))
1273 __io_queue_deferred(ctx);
de0617e4
JA
1274}
1275
2b188cc1
JA
1276static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1277{
75b28aff 1278 struct io_rings *rings = ctx->rings;
2b188cc1
JA
1279 unsigned tail;
1280
1281 tail = ctx->cached_cq_tail;
115e12e5
SB
1282 /*
1283 * writes to the cq entry need to come after reading head; the
1284 * control dependency is enough as we're using WRITE_ONCE to
1285 * fill the cq entry
1286 */
75b28aff 1287 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
2b188cc1
JA
1288 return NULL;
1289
1290 ctx->cached_cq_tail++;
75b28aff 1291 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
1292}
1293
f2842ab5
JA
1294static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1295{
f0b493e6
JA
1296 if (!ctx->cq_ev_fd)
1297 return false;
7e55a19c
SG
1298 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1299 return false;
f2842ab5
JA
1300 if (!ctx->eventfd_async)
1301 return true;
b41e9852 1302 return io_wq_current_is_worker();
f2842ab5
JA
1303}
1304
b41e9852 1305static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5
JA
1306{
1307 if (waitqueue_active(&ctx->wait))
1308 wake_up(&ctx->wait);
1309 if (waitqueue_active(&ctx->sqo_wait))
1310 wake_up(&ctx->sqo_wait);
b41e9852 1311 if (io_should_trigger_evfd(ctx))
1d7bb1d5
JA
1312 eventfd_signal(ctx->cq_ev_fd, 1);
1313}
1314
c4a2ed72
JA
1315/* Returns true if there are no backlogged entries after the flush */
1316static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1d7bb1d5
JA
1317{
1318 struct io_rings *rings = ctx->rings;
1319 struct io_uring_cqe *cqe;
1320 struct io_kiocb *req;
1321 unsigned long flags;
1322 LIST_HEAD(list);
1323
1324 if (!force) {
1325 if (list_empty_careful(&ctx->cq_overflow_list))
c4a2ed72 1326 return true;
1d7bb1d5
JA
1327 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1328 rings->cq_ring_entries))
c4a2ed72 1329 return false;
1d7bb1d5
JA
1330 }
1331
1332 spin_lock_irqsave(&ctx->completion_lock, flags);
1333
1334 /* if force is set, the ring is going away. always drop after that */
1335 if (force)
69b3e546 1336 ctx->cq_overflow_flushed = 1;
1d7bb1d5 1337
c4a2ed72 1338 cqe = NULL;
1d7bb1d5
JA
1339 while (!list_empty(&ctx->cq_overflow_list)) {
1340 cqe = io_get_cqring(ctx);
1341 if (!cqe && !force)
1342 break;
1343
1344 req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
40d8ddd4
PB
1345 compl.list);
1346 list_move(&req->compl.list, &list);
2ca10259 1347 req->flags &= ~REQ_F_OVERFLOW;
1d7bb1d5
JA
1348 if (cqe) {
1349 WRITE_ONCE(cqe->user_data, req->user_data);
1350 WRITE_ONCE(cqe->res, req->result);
0f7e466b 1351 WRITE_ONCE(cqe->flags, req->compl.cflags);
1d7bb1d5
JA
1352 } else {
1353 WRITE_ONCE(ctx->rings->cq_overflow,
1354 atomic_inc_return(&ctx->cached_cq_overflow));
1355 }
1356 }
1357
1358 io_commit_cqring(ctx);
ad3eb2c8
JA
1359 if (cqe) {
1360 clear_bit(0, &ctx->sq_check_overflow);
1361 clear_bit(0, &ctx->cq_check_overflow);
6d5f9049 1362 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
ad3eb2c8 1363 }
1d7bb1d5
JA
1364 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1365 io_cqring_ev_posted(ctx);
1366
1367 while (!list_empty(&list)) {
40d8ddd4
PB
1368 req = list_first_entry(&list, struct io_kiocb, compl.list);
1369 list_del(&req->compl.list);
ec9c02ad 1370 io_put_req(req);
1d7bb1d5 1371 }
c4a2ed72
JA
1372
1373 return cqe != NULL;
1d7bb1d5
JA
1374}
1375
bcda7baa 1376static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1377{
78e19bbe 1378 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1379 struct io_uring_cqe *cqe;
1380
78e19bbe 1381 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 1382
2b188cc1
JA
1383 /*
1384 * If we can't get a cq entry, userspace overflowed the
1385 * submission (by quite a lot). Increment the overflow count in
1386 * the ring.
1387 */
1388 cqe = io_get_cqring(ctx);
1d7bb1d5 1389 if (likely(cqe)) {
78e19bbe 1390 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 1391 WRITE_ONCE(cqe->res, res);
bcda7baa 1392 WRITE_ONCE(cqe->flags, cflags);
1d7bb1d5 1393 } else if (ctx->cq_overflow_flushed) {
498ccd9e
JA
1394 WRITE_ONCE(ctx->rings->cq_overflow,
1395 atomic_inc_return(&ctx->cached_cq_overflow));
1d7bb1d5 1396 } else {
ad3eb2c8
JA
1397 if (list_empty(&ctx->cq_overflow_list)) {
1398 set_bit(0, &ctx->sq_check_overflow);
1399 set_bit(0, &ctx->cq_check_overflow);
6d5f9049 1400 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
ad3eb2c8 1401 }
40d8ddd4 1402 io_clean_op(req);
2ca10259 1403 req->flags |= REQ_F_OVERFLOW;
1d7bb1d5 1404 req->result = res;
0f7e466b 1405 req->compl.cflags = cflags;
40d8ddd4
PB
1406 refcount_inc(&req->refs);
1407 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
2b188cc1
JA
1408 }
1409}
1410
bcda7baa
JA
1411static void io_cqring_fill_event(struct io_kiocb *req, long res)
1412{
1413 __io_cqring_fill_event(req, res, 0);
1414}
1415
e1e16097 1416static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1417{
78e19bbe 1418 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1419 unsigned long flags;
1420
1421 spin_lock_irqsave(&ctx->completion_lock, flags);
bcda7baa 1422 __io_cqring_fill_event(req, res, cflags);
2b188cc1
JA
1423 io_commit_cqring(ctx);
1424 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1425
8c838788 1426 io_cqring_ev_posted(ctx);
2b188cc1
JA
1427}
1428
229a7b63 1429static void io_submit_flush_completions(struct io_comp_state *cs)
bcda7baa 1430{
229a7b63
JA
1431 struct io_ring_ctx *ctx = cs->ctx;
1432
1433 spin_lock_irq(&ctx->completion_lock);
1434 while (!list_empty(&cs->list)) {
1435 struct io_kiocb *req;
1436
3ca405eb
PB
1437 req = list_first_entry(&cs->list, struct io_kiocb, compl.list);
1438 list_del(&req->compl.list);
0f7e466b 1439 __io_cqring_fill_event(req, req->result, req->compl.cflags);
229a7b63
JA
1440 if (!(req->flags & REQ_F_LINK_HEAD)) {
1441 req->flags |= REQ_F_COMP_LOCKED;
1442 io_put_req(req);
1443 } else {
1444 spin_unlock_irq(&ctx->completion_lock);
1445 io_put_req(req);
1446 spin_lock_irq(&ctx->completion_lock);
1447 }
1448 }
1449 io_commit_cqring(ctx);
1450 spin_unlock_irq(&ctx->completion_lock);
1451
1452 io_cqring_ev_posted(ctx);
1453 cs->nr = 0;
1454}
1455
1456static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags,
1457 struct io_comp_state *cs)
1458{
1459 if (!cs) {
1460 io_cqring_add_event(req, res, cflags);
1461 io_put_req(req);
1462 } else {
3ca405eb 1463 io_clean_op(req);
229a7b63 1464 req->result = res;
0f7e466b 1465 req->compl.cflags = cflags;
3ca405eb 1466 list_add_tail(&req->compl.list, &cs->list);
229a7b63
JA
1467 if (++cs->nr >= 32)
1468 io_submit_flush_completions(cs);
1469 }
e1e16097
JA
1470}
1471
1472static void io_req_complete(struct io_kiocb *req, long res)
1473{
229a7b63 1474 __io_req_complete(req, res, 0, NULL);
bcda7baa
JA
1475}
1476
0ddf92e8
JA
1477static inline bool io_is_fallback_req(struct io_kiocb *req)
1478{
1479 return req == (struct io_kiocb *)
1480 ((unsigned long) req->ctx->fallback_req & ~1UL);
1481}
1482
1483static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1484{
1485 struct io_kiocb *req;
1486
1487 req = ctx->fallback_req;
dd461af6 1488 if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
0ddf92e8
JA
1489 return req;
1490
1491 return NULL;
1492}
1493
0553b8bd
PB
1494static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
1495 struct io_submit_state *state)
2b188cc1 1496{
fd6fab2c 1497 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2b188cc1
JA
1498 struct io_kiocb *req;
1499
f6b6c7d6 1500 if (!state->free_reqs) {
2579f913
JA
1501 size_t sz;
1502 int ret;
1503
1504 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
1505 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1506
1507 /*
1508 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1509 * retry single alloc to be on the safe side.
1510 */
1511 if (unlikely(ret <= 0)) {
1512 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1513 if (!state->reqs[0])
0ddf92e8 1514 goto fallback;
fd6fab2c
JA
1515 ret = 1;
1516 }
2579f913 1517 state->free_reqs = ret - 1;
6c8a3134 1518 req = state->reqs[ret - 1];
2579f913 1519 } else {
2579f913 1520 state->free_reqs--;
6c8a3134 1521 req = state->reqs[state->free_reqs];
2b188cc1
JA
1522 }
1523
2579f913 1524 return req;
0ddf92e8 1525fallback:
0553b8bd 1526 return io_get_fallback_req(ctx);
2b188cc1
JA
1527}
1528
8da11c19
PB
1529static inline void io_put_file(struct io_kiocb *req, struct file *file,
1530 bool fixed)
1531{
1532 if (fixed)
05589553 1533 percpu_ref_put(req->fixed_file_refs);
8da11c19
PB
1534 else
1535 fput(file);
1536}
1537
e6543a81 1538static void io_dismantle_req(struct io_kiocb *req)
2b188cc1 1539{
3ca405eb 1540 io_clean_op(req);
929a3af9 1541
5acbbc8e
JA
1542 if (req->io)
1543 kfree(req->io);
8da11c19
PB
1544 if (req->file)
1545 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
4dd2824d 1546 __io_put_req_task(req);
dca9cf8b 1547 io_req_clean_work(req);
fcb323cc 1548
fcb323cc 1549 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3 1550 struct io_ring_ctx *ctx = req->ctx;
fcb323cc
JA
1551 unsigned long flags;
1552
1553 spin_lock_irqsave(&ctx->inflight_lock, flags);
1554 list_del(&req->inflight_entry);
1555 if (waitqueue_active(&ctx->inflight_wait))
1556 wake_up(&ctx->inflight_wait);
1557 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1558 }
e6543a81 1559}
2b85edfc 1560
e6543a81
PB
1561static void __io_free_req(struct io_kiocb *req)
1562{
ecfc5177
PB
1563 struct io_ring_ctx *ctx;
1564
e6543a81 1565 io_dismantle_req(req);
ecfc5177 1566 ctx = req->ctx;
b1e50e54
PB
1567 if (likely(!io_is_fallback_req(req)))
1568 kmem_cache_free(req_cachep, req);
1569 else
ecfc5177
PB
1570 clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
1571 percpu_ref_put(&ctx->refs);
e65ef56d
JA
1572}
1573
a197f664 1574static bool io_link_cancel_timeout(struct io_kiocb *req)
2665abfd 1575{
a197f664 1576 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1577 int ret;
1578
2d28390a 1579 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
2665abfd 1580 if (ret != -1) {
78e19bbe 1581 io_cqring_fill_event(req, -ECANCELED);
2665abfd 1582 io_commit_cqring(ctx);
dea3b49c 1583 req->flags &= ~REQ_F_LINK_HEAD;
ec9c02ad 1584 io_put_req(req);
2665abfd
JA
1585 return true;
1586 }
1587
1588 return false;
e65ef56d
JA
1589}
1590
ab0b6451 1591static bool __io_kill_linked_timeout(struct io_kiocb *req)
9e645e11 1592{
7c86ffee 1593 struct io_kiocb *link;
ab0b6451 1594 bool wake_ev;
7c86ffee
PB
1595
1596 if (list_empty(&req->link_list))
ab0b6451 1597 return false;
7c86ffee
PB
1598 link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
1599 if (link->opcode != IORING_OP_LINK_TIMEOUT)
ab0b6451 1600 return false;
7c86ffee
PB
1601
1602 list_del_init(&link->link_list);
1603 wake_ev = io_link_cancel_timeout(link);
1604 req->flags &= ~REQ_F_LINK_TIMEOUT;
ab0b6451
JA
1605 return wake_ev;
1606}
1607
1608static void io_kill_linked_timeout(struct io_kiocb *req)
1609{
1610 struct io_ring_ctx *ctx = req->ctx;
1611 bool wake_ev;
1612
1613 if (!(req->flags & REQ_F_COMP_LOCKED)) {
1614 unsigned long flags;
1615
1616 spin_lock_irqsave(&ctx->completion_lock, flags);
1617 wake_ev = __io_kill_linked_timeout(req);
7c86ffee 1618 spin_unlock_irqrestore(&ctx->completion_lock, flags);
ab0b6451
JA
1619 } else {
1620 wake_ev = __io_kill_linked_timeout(req);
1621 }
1622
7c86ffee
PB
1623 if (wake_ev)
1624 io_cqring_ev_posted(ctx);
1625}
1626
9b5f7bd9 1627static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
7c86ffee
PB
1628{
1629 struct io_kiocb *nxt;
9e645e11
JA
1630
1631 /*
1632 * The list should never be empty when we are called here. But could
1633 * potentially happen if the chain is messed up, check to be on the
1634 * safe side.
1635 */
7c86ffee 1636 if (unlikely(list_empty(&req->link_list)))
9b5f7bd9 1637 return NULL;
2665abfd 1638
7c86ffee
PB
1639 nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list);
1640 list_del_init(&req->link_list);
1641 if (!list_empty(&nxt->link_list))
1642 nxt->flags |= REQ_F_LINK_HEAD;
9b5f7bd9 1643 return nxt;
9e645e11
JA
1644}
1645
1646/*
dea3b49c 1647 * Called if REQ_F_LINK_HEAD is set, and we fail the head request
9e645e11 1648 */
7c86ffee 1649static void __io_fail_links(struct io_kiocb *req)
9e645e11 1650{
2665abfd 1651 struct io_ring_ctx *ctx = req->ctx;
9e645e11
JA
1652
1653 while (!list_empty(&req->link_list)) {
4493233e
PB
1654 struct io_kiocb *link = list_first_entry(&req->link_list,
1655 struct io_kiocb, link_list);
9e645e11 1656
4493233e 1657 list_del_init(&link->link_list);
c826bd7a 1658 trace_io_uring_fail_link(req, link);
2665abfd 1659
7c86ffee
PB
1660 io_cqring_fill_event(link, -ECANCELED);
1661 __io_double_put_req(link);
5d960724 1662 req->flags &= ~REQ_F_LINK_TIMEOUT;
9e645e11 1663 }
2665abfd
JA
1664
1665 io_commit_cqring(ctx);
7c86ffee
PB
1666 io_cqring_ev_posted(ctx);
1667}
1668
1669static void io_fail_links(struct io_kiocb *req)
1670{
1671 struct io_ring_ctx *ctx = req->ctx;
1672
1673 if (!(req->flags & REQ_F_COMP_LOCKED)) {
1674 unsigned long flags;
1675
1676 spin_lock_irqsave(&ctx->completion_lock, flags);
1677 __io_fail_links(req);
1678 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1679 } else {
1680 __io_fail_links(req);
1681 }
1682
2665abfd 1683 io_cqring_ev_posted(ctx);
9e645e11
JA
1684}
1685
3fa5e0f3 1686static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
9e645e11 1687{
9b0d911a 1688 req->flags &= ~REQ_F_LINK_HEAD;
7c86ffee
PB
1689 if (req->flags & REQ_F_LINK_TIMEOUT)
1690 io_kill_linked_timeout(req);
1691
9e645e11
JA
1692 /*
1693 * If LINK is set, we have dependent requests in this chain. If we
1694 * didn't fail this request, queue the first one up, moving any other
1695 * dependencies to the next request. In case of failure, fail the rest
1696 * of the chain.
1697 */
9b5f7bd9
PB
1698 if (likely(!(req->flags & REQ_F_FAIL_LINK)))
1699 return io_req_link_next(req);
1700 io_fail_links(req);
1701 return NULL;
4d7dd462 1702}
9e645e11 1703
3fa5e0f3
PB
1704static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1705{
1706 if (likely(!(req->flags & REQ_F_LINK_HEAD)))
1707 return NULL;
1708 return __io_req_find_next(req);
1709}
1710
c2c4c83c
JA
1711static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
1712{
1713 struct task_struct *tsk = req->task;
1714 struct io_ring_ctx *ctx = req->ctx;
1715 int ret, notify = TWA_RESUME;
1716
1717 /*
1718 * SQPOLL kernel thread doesn't need notification, just a wakeup.
1719 * If we're not using an eventfd, then TWA_RESUME is always fine,
1720 * as we won't have dependencies between request completions for
1721 * other kernel wait conditions.
1722 */
1723 if (ctx->flags & IORING_SETUP_SQPOLL)
1724 notify = 0;
1725 else if (ctx->cq_ev_fd)
1726 notify = TWA_SIGNAL;
1727
1728 ret = task_work_add(tsk, cb, notify);
1729 if (!ret)
1730 wake_up_process(tsk);
1731 return ret;
1732}
1733
c40f6379
JA
1734static void __io_req_task_cancel(struct io_kiocb *req, int error)
1735{
1736 struct io_ring_ctx *ctx = req->ctx;
1737
1738 spin_lock_irq(&ctx->completion_lock);
1739 io_cqring_fill_event(req, error);
1740 io_commit_cqring(ctx);
1741 spin_unlock_irq(&ctx->completion_lock);
1742
1743 io_cqring_ev_posted(ctx);
1744 req_set_fail_links(req);
1745 io_double_put_req(req);
1746}
1747
1748static void io_req_task_cancel(struct callback_head *cb)
1749{
1750 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
1751
1752 __io_req_task_cancel(req, -ECANCELED);
1753}
1754
1755static void __io_req_task_submit(struct io_kiocb *req)
1756{
1757 struct io_ring_ctx *ctx = req->ctx;
1758
c40f6379
JA
1759 if (!__io_sq_thread_acquire_mm(ctx)) {
1760 mutex_lock(&ctx->uring_lock);
1761 __io_queue_sqe(req, NULL, NULL);
1762 mutex_unlock(&ctx->uring_lock);
1763 } else {
1764 __io_req_task_cancel(req, -EFAULT);
1765 }
1766}
1767
1768static void io_req_task_submit(struct callback_head *cb)
1769{
1770 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
1771
1772 __io_req_task_submit(req);
1773}
1774
1775static void io_req_task_queue(struct io_kiocb *req)
1776{
c40f6379
JA
1777 int ret;
1778
1779 init_task_work(&req->task_work, io_req_task_submit);
1780
c2c4c83c 1781 ret = io_req_task_work_add(req, &req->task_work);
c40f6379 1782 if (unlikely(ret)) {
c2c4c83c
JA
1783 struct task_struct *tsk;
1784
c40f6379
JA
1785 init_task_work(&req->task_work, io_req_task_cancel);
1786 tsk = io_wq_get_task(req->ctx->io_wq);
c2c4c83c
JA
1787 task_work_add(tsk, &req->task_work, 0);
1788 wake_up_process(tsk);
c40f6379 1789 }
c40f6379
JA
1790}
1791
c3524383 1792static void io_queue_next(struct io_kiocb *req)
c69f8dbe 1793{
9b5f7bd9 1794 struct io_kiocb *nxt = io_req_find_next(req);
944e58bf 1795
906a8c3f
PB
1796 if (nxt)
1797 io_req_task_queue(nxt);
c69f8dbe
JL
1798}
1799
c3524383
PB
1800static void io_free_req(struct io_kiocb *req)
1801{
1802 io_queue_next(req);
1803 __io_free_req(req);
1804}
1805
2d6500d4
PB
1806struct req_batch {
1807 void *reqs[IO_IOPOLL_BATCH];
1808 int to_free;
1809};
1810
1811static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
1812 struct req_batch *rb)
1813{
1814 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
1815 percpu_ref_put_many(&ctx->refs, rb->to_free);
1816 rb->to_free = 0;
1817}
1818
1819static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
1820 struct req_batch *rb)
1821{
1822 if (rb->to_free)
1823 __io_req_free_batch_flush(ctx, rb);
1824}
1825
1826static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
1827{
1828 if (unlikely(io_is_fallback_req(req))) {
1829 io_free_req(req);
1830 return;
1831 }
1832 if (req->flags & REQ_F_LINK_HEAD)
1833 io_queue_next(req);
1834
1835 io_dismantle_req(req);
1836 rb->reqs[rb->to_free++] = req;
1837 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
1838 __io_req_free_batch_flush(req->ctx, rb);
1839}
1840
ba816ad6
JA
1841/*
1842 * Drop reference to request, return next in chain (if there is one) if this
1843 * was the last reference to this request.
1844 */
9b5f7bd9 1845static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
e65ef56d 1846{
9b5f7bd9
PB
1847 struct io_kiocb *nxt = NULL;
1848
2a44f467 1849 if (refcount_dec_and_test(&req->refs)) {
9b5f7bd9 1850 nxt = io_req_find_next(req);
4d7dd462 1851 __io_free_req(req);
2a44f467 1852 }
9b5f7bd9 1853 return nxt;
2b188cc1
JA
1854}
1855
e65ef56d
JA
1856static void io_put_req(struct io_kiocb *req)
1857{
1858 if (refcount_dec_and_test(&req->refs))
1859 io_free_req(req);
2b188cc1
JA
1860}
1861
f4db7182 1862static struct io_wq_work *io_steal_work(struct io_kiocb *req)
7a743e22 1863{
6df1db6b 1864 struct io_kiocb *nxt;
f4db7182 1865
7a743e22 1866 /*
f4db7182
PB
1867 * A ref is owned by io-wq in which context we're. So, if that's the
1868 * last one, it's safe to steal next work. False negatives are Ok,
1869 * it just will be re-punted async in io_put_work()
7a743e22 1870 */
f4db7182
PB
1871 if (refcount_read(&req->refs) != 1)
1872 return NULL;
7a743e22 1873
9b5f7bd9 1874 nxt = io_req_find_next(req);
6df1db6b 1875 return nxt ? &nxt->work : NULL;
7a743e22
PB
1876}
1877
978db57e
JA
1878/*
1879 * Must only be used if we don't need to care about links, usually from
1880 * within the completion handling itself.
1881 */
1882static void __io_double_put_req(struct io_kiocb *req)
78e19bbe
JA
1883{
1884 /* drop both submit and complete references */
1885 if (refcount_sub_and_test(2, &req->refs))
1886 __io_free_req(req);
1887}
1888
978db57e
JA
1889static void io_double_put_req(struct io_kiocb *req)
1890{
1891 /* drop both submit and complete references */
1892 if (refcount_sub_and_test(2, &req->refs))
1893 io_free_req(req);
1894}
1895
1d7bb1d5 1896static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
a3a0e43f 1897{
84f97dc2
JA
1898 struct io_rings *rings = ctx->rings;
1899
ad3eb2c8
JA
1900 if (test_bit(0, &ctx->cq_check_overflow)) {
1901 /*
1902 * noflush == true is from the waitqueue handler, just ensure
1903 * we wake up the task, and the next invocation will flush the
1904 * entries. We cannot safely to it from here.
1905 */
1906 if (noflush && !list_empty(&ctx->cq_overflow_list))
1907 return -1U;
1d7bb1d5 1908
ad3eb2c8
JA
1909 io_cqring_overflow_flush(ctx, false);
1910 }
1d7bb1d5 1911
a3a0e43f
JA
1912 /* See comment at the top of this file */
1913 smp_rmb();
ad3eb2c8 1914 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
a3a0e43f
JA
1915}
1916
fb5ccc98
PB
1917static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1918{
1919 struct io_rings *rings = ctx->rings;
1920
1921 /* make sure SQ entry isn't read before tail */
1922 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1923}
1924
bcda7baa
JA
1925static int io_put_kbuf(struct io_kiocb *req)
1926{
4d954c25 1927 struct io_buffer *kbuf;
bcda7baa
JA
1928 int cflags;
1929
4d954c25 1930 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
bcda7baa
JA
1931 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
1932 cflags |= IORING_CQE_F_BUFFER;
1933 req->rw.addr = 0;
0e1b6fe3 1934 req->flags &= ~REQ_F_BUFFER_SELECTED;
bcda7baa
JA
1935 kfree(kbuf);
1936 return cflags;
1937}
1938
4c6e277c
JA
1939static inline bool io_run_task_work(void)
1940{
1941 if (current->task_works) {
1942 __set_current_state(TASK_RUNNING);
1943 task_work_run();
1944 return true;
1945 }
1946
1947 return false;
1948}
1949
bbde017a
XW
1950static void io_iopoll_queue(struct list_head *again)
1951{
1952 struct io_kiocb *req;
1953
1954 do {
d21ffe7e
PB
1955 req = list_first_entry(again, struct io_kiocb, inflight_entry);
1956 list_del(&req->inflight_entry);
cf2f5425 1957 if (!io_rw_reissue(req, -EAGAIN))
2237d765 1958 io_complete_rw_common(&req->rw.kiocb, -EAGAIN, NULL);
bbde017a
XW
1959 } while (!list_empty(again));
1960}
1961
def596e9
JA
1962/*
1963 * Find and free completed poll iocbs
1964 */
1965static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1966 struct list_head *done)
1967{
8237e045 1968 struct req_batch rb;
def596e9 1969 struct io_kiocb *req;
bbde017a
XW
1970 LIST_HEAD(again);
1971
1972 /* order with ->result store in io_complete_rw_iopoll() */
1973 smp_rmb();
def596e9 1974
2757a23e 1975 rb.to_free = 0;
def596e9 1976 while (!list_empty(done)) {
bcda7baa
JA
1977 int cflags = 0;
1978
d21ffe7e 1979 req = list_first_entry(done, struct io_kiocb, inflight_entry);
bbde017a
XW
1980 if (READ_ONCE(req->result) == -EAGAIN) {
1981 req->iopoll_completed = 0;
d21ffe7e 1982 list_move_tail(&req->inflight_entry, &again);
bbde017a
XW
1983 continue;
1984 }
d21ffe7e 1985 list_del(&req->inflight_entry);
def596e9 1986
bcda7baa
JA
1987 if (req->flags & REQ_F_BUFFER_SELECTED)
1988 cflags = io_put_kbuf(req);
1989
1990 __io_cqring_fill_event(req, req->result, cflags);
def596e9
JA
1991 (*nr_events)++;
1992
c3524383 1993 if (refcount_dec_and_test(&req->refs))
2d6500d4 1994 io_req_free_batch(&rb, req);
def596e9 1995 }
def596e9 1996
09bb8394 1997 io_commit_cqring(ctx);
32b2244a
XW
1998 if (ctx->flags & IORING_SETUP_SQPOLL)
1999 io_cqring_ev_posted(ctx);
2d6500d4 2000 io_req_free_batch_finish(ctx, &rb);
581f9810 2001
bbde017a
XW
2002 if (!list_empty(&again))
2003 io_iopoll_queue(&again);
581f9810
BM
2004}
2005
def596e9
JA
2006static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2007 long min)
2008{
2009 struct io_kiocb *req, *tmp;
2010 LIST_HEAD(done);
2011 bool spin;
2012 int ret;
2013
2014 /*
2015 * Only spin for completions if we don't have multiple devices hanging
2016 * off our complete list, and we're under the requested amount.
2017 */
2018 spin = !ctx->poll_multi_file && *nr_events < min;
2019
2020 ret = 0;
d21ffe7e 2021 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
9adbd45d 2022 struct kiocb *kiocb = &req->rw.kiocb;
def596e9
JA
2023
2024 /*
581f9810
BM
2025 * Move completed and retryable entries to our local lists.
2026 * If we find a request that requires polling, break out
2027 * and complete those lists first, if we have entries there.
def596e9 2028 */
65a6543d 2029 if (READ_ONCE(req->iopoll_completed)) {
d21ffe7e 2030 list_move_tail(&req->inflight_entry, &done);
def596e9
JA
2031 continue;
2032 }
2033 if (!list_empty(&done))
2034 break;
2035
2036 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2037 if (ret < 0)
2038 break;
2039
3aadc23e
PB
2040 /* iopoll may have completed current req */
2041 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2042 list_move_tail(&req->inflight_entry, &done);
3aadc23e 2043
def596e9
JA
2044 if (ret && spin)
2045 spin = false;
2046 ret = 0;
2047 }
2048
2049 if (!list_empty(&done))
2050 io_iopoll_complete(ctx, nr_events, &done);
2051
2052 return ret;
2053}
2054
2055/*
d195a66e 2056 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
def596e9
JA
2057 * non-spinning poll check - we'll still enter the driver poll loop, but only
2058 * as a non-spinning completion check.
2059 */
2060static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2061 long min)
2062{
540e32a0 2063 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
def596e9
JA
2064 int ret;
2065
2066 ret = io_do_iopoll(ctx, nr_events, min);
2067 if (ret < 0)
2068 return ret;
eba0a4dd 2069 if (*nr_events >= min)
def596e9
JA
2070 return 0;
2071 }
2072
2073 return 1;
2074}
2075
2076/*
2077 * We can't just wait for polled events to come to us, we have to actively
2078 * find and complete them.
2079 */
b2edc0a7 2080static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
def596e9
JA
2081{
2082 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2083 return;
2084
2085 mutex_lock(&ctx->uring_lock);
540e32a0 2086 while (!list_empty(&ctx->iopoll_list)) {
def596e9
JA
2087 unsigned int nr_events = 0;
2088
b2edc0a7 2089 io_do_iopoll(ctx, &nr_events, 0);
08f5439f 2090
b2edc0a7
PB
2091 /* let it sleep and repeat later if can't complete a request */
2092 if (nr_events == 0)
2093 break;
08f5439f
JA
2094 /*
2095 * Ensure we allow local-to-the-cpu processing to take place,
2096 * in this case we need to ensure that we reap all events.
3fcee5a6 2097 * Also let task_work, etc. to progress by releasing the mutex
08f5439f 2098 */
3fcee5a6
PB
2099 if (need_resched()) {
2100 mutex_unlock(&ctx->uring_lock);
2101 cond_resched();
2102 mutex_lock(&ctx->uring_lock);
2103 }
def596e9
JA
2104 }
2105 mutex_unlock(&ctx->uring_lock);
2106}
2107
7668b92a 2108static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
def596e9 2109{
7668b92a 2110 unsigned int nr_events = 0;
2b2ed975 2111 int iters = 0, ret = 0;
500f9fba 2112
c7849be9
XW
2113 /*
2114 * We disallow the app entering submit/complete with polling, but we
2115 * still need to lock the ring to prevent racing with polled issue
2116 * that got punted to a workqueue.
2117 */
2118 mutex_lock(&ctx->uring_lock);
def596e9 2119 do {
a3a0e43f
JA
2120 /*
2121 * Don't enter poll loop if we already have events pending.
2122 * If we do, we can potentially be spinning for commands that
2123 * already triggered a CQE (eg in error).
2124 */
1d7bb1d5 2125 if (io_cqring_events(ctx, false))
a3a0e43f
JA
2126 break;
2127
500f9fba
JA
2128 /*
2129 * If a submit got punted to a workqueue, we can have the
2130 * application entering polling for a command before it gets
2131 * issued. That app will hold the uring_lock for the duration
2132 * of the poll right here, so we need to take a breather every
2133 * now and then to ensure that the issue has a chance to add
2134 * the poll to the issued list. Otherwise we can spin here
2135 * forever, while the workqueue is stuck trying to acquire the
2136 * very same mutex.
2137 */
2138 if (!(++iters & 7)) {
2139 mutex_unlock(&ctx->uring_lock);
4c6e277c 2140 io_run_task_work();
500f9fba
JA
2141 mutex_lock(&ctx->uring_lock);
2142 }
2143
7668b92a 2144 ret = io_iopoll_getevents(ctx, &nr_events, min);
def596e9
JA
2145 if (ret <= 0)
2146 break;
2147 ret = 0;
7668b92a 2148 } while (min && !nr_events && !need_resched());
def596e9 2149
500f9fba 2150 mutex_unlock(&ctx->uring_lock);
def596e9
JA
2151 return ret;
2152}
2153
491381ce 2154static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 2155{
491381ce
JA
2156 /*
2157 * Tell lockdep we inherited freeze protection from submission
2158 * thread.
2159 */
2160 if (req->flags & REQ_F_ISREG) {
2161 struct inode *inode = file_inode(req->file);
2b188cc1 2162
491381ce 2163 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 2164 }
491381ce 2165 file_end_write(req->file);
2b188cc1
JA
2166}
2167
a1d7c393
JA
2168static void io_complete_rw_common(struct kiocb *kiocb, long res,
2169 struct io_comp_state *cs)
2b188cc1 2170{
9adbd45d 2171 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
bcda7baa 2172 int cflags = 0;
2b188cc1 2173
491381ce
JA
2174 if (kiocb->ki_flags & IOCB_WRITE)
2175 kiocb_end_write(req);
2b188cc1 2176
4e88d6e7
JA
2177 if (res != req->result)
2178 req_set_fail_links(req);
bcda7baa
JA
2179 if (req->flags & REQ_F_BUFFER_SELECTED)
2180 cflags = io_put_kbuf(req);
a1d7c393 2181 __io_req_complete(req, res, cflags, cs);
ba816ad6
JA
2182}
2183
b63534c4
JA
2184#ifdef CONFIG_BLOCK
2185static bool io_resubmit_prep(struct io_kiocb *req, int error)
2186{
2187 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
2188 ssize_t ret = -ECANCELED;
2189 struct iov_iter iter;
2190 int rw;
2191
2192 if (error) {
2193 ret = error;
2194 goto end_req;
2195 }
2196
2197 switch (req->opcode) {
2198 case IORING_OP_READV:
2199 case IORING_OP_READ_FIXED:
2200 case IORING_OP_READ:
2201 rw = READ;
2202 break;
2203 case IORING_OP_WRITEV:
2204 case IORING_OP_WRITE_FIXED:
2205 case IORING_OP_WRITE:
2206 rw = WRITE;
2207 break;
2208 default:
2209 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2210 req->opcode);
2211 goto end_req;
2212 }
2213
2214 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2215 if (ret < 0)
2216 goto end_req;
2217 ret = io_setup_async_rw(req, ret, iovec, inline_vecs, &iter);
2218 if (!ret)
2219 return true;
2220 kfree(iovec);
2221end_req:
b63534c4 2222 req_set_fail_links(req);
e1e16097 2223 io_req_complete(req, ret);
b63534c4
JA
2224 return false;
2225}
2226
2227static void io_rw_resubmit(struct callback_head *cb)
2228{
2229 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2230 struct io_ring_ctx *ctx = req->ctx;
2231 int err;
2232
b63534c4
JA
2233 err = io_sq_thread_acquire_mm(ctx, req);
2234
2235 if (io_resubmit_prep(req, err)) {
2236 refcount_inc(&req->refs);
2237 io_queue_async_work(req);
2238 }
2239}
2240#endif
2241
2242static bool io_rw_reissue(struct io_kiocb *req, long res)
2243{
2244#ifdef CONFIG_BLOCK
b63534c4
JA
2245 int ret;
2246
2247 if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
2248 return false;
2249
b63534c4 2250 init_task_work(&req->task_work, io_rw_resubmit);
c2c4c83c
JA
2251 ret = io_req_task_work_add(req, &req->task_work);
2252 if (!ret)
b63534c4
JA
2253 return true;
2254#endif
2255 return false;
2256}
2257
a1d7c393
JA
2258static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2259 struct io_comp_state *cs)
2260{
2261 if (!io_rw_reissue(req, res))
2262 io_complete_rw_common(&req->rw.kiocb, res, cs);
2263}
2264
ba816ad6
JA
2265static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2266{
9adbd45d 2267 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6 2268
a1d7c393 2269 __io_complete_rw(req, res, res2, NULL);
2b188cc1
JA
2270}
2271
def596e9
JA
2272static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2273{
9adbd45d 2274 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 2275
491381ce
JA
2276 if (kiocb->ki_flags & IOCB_WRITE)
2277 kiocb_end_write(req);
def596e9 2278
2d7d6792 2279 if (res != -EAGAIN && res != req->result)
4e88d6e7 2280 req_set_fail_links(req);
bbde017a
XW
2281
2282 WRITE_ONCE(req->result, res);
2283 /* order with io_poll_complete() checking ->result */
cd664b0e
PB
2284 smp_wmb();
2285 WRITE_ONCE(req->iopoll_completed, 1);
def596e9
JA
2286}
2287
2288/*
2289 * After the iocb has been issued, it's safe to be found on the poll list.
2290 * Adding the kiocb to the list AFTER submission ensures that we don't
2291 * find it from a io_iopoll_getevents() thread before the issuer is done
2292 * accessing the kiocb cookie.
2293 */
2294static void io_iopoll_req_issued(struct io_kiocb *req)
2295{
2296 struct io_ring_ctx *ctx = req->ctx;
2297
2298 /*
2299 * Track whether we have multiple files in our lists. This will impact
2300 * how we do polling eventually, not spinning if we're on potentially
2301 * different devices.
2302 */
540e32a0 2303 if (list_empty(&ctx->iopoll_list)) {
def596e9
JA
2304 ctx->poll_multi_file = false;
2305 } else if (!ctx->poll_multi_file) {
2306 struct io_kiocb *list_req;
2307
540e32a0 2308 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
d21ffe7e 2309 inflight_entry);
9adbd45d 2310 if (list_req->file != req->file)
def596e9
JA
2311 ctx->poll_multi_file = true;
2312 }
2313
2314 /*
2315 * For fast devices, IO may have already completed. If it has, add
2316 * it to the front so we find it first.
2317 */
65a6543d 2318 if (READ_ONCE(req->iopoll_completed))
d21ffe7e 2319 list_add(&req->inflight_entry, &ctx->iopoll_list);
def596e9 2320 else
d21ffe7e 2321 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
bdcd3eab
XW
2322
2323 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2324 wq_has_sleeper(&ctx->sqo_wait))
2325 wake_up(&ctx->sqo_wait);
def596e9
JA
2326}
2327
9f13c35b 2328static void __io_state_file_put(struct io_submit_state *state)
9a56a232 2329{
06ef3608
PB
2330 if (state->has_refs)
2331 fput_many(state->file, state->has_refs);
9f13c35b
PB
2332 state->file = NULL;
2333}
2334
2335static inline void io_state_file_put(struct io_submit_state *state)
2336{
2337 if (state->file)
2338 __io_state_file_put(state);
9a56a232
JA
2339}
2340
2341/*
2342 * Get as many references to a file as we have IOs left in this submission,
2343 * assuming most submissions are for one file, or at least that each file
2344 * has more than one submission.
2345 */
8da11c19 2346static struct file *__io_file_get(struct io_submit_state *state, int fd)
9a56a232
JA
2347{
2348 if (!state)
2349 return fget(fd);
2350
2351 if (state->file) {
2352 if (state->fd == fd) {
06ef3608 2353 state->has_refs--;
9a56a232
JA
2354 state->ios_left--;
2355 return state->file;
2356 }
9f13c35b 2357 __io_state_file_put(state);
9a56a232
JA
2358 }
2359 state->file = fget_many(fd, state->ios_left);
2360 if (!state->file)
2361 return NULL;
2362
2363 state->fd = fd;
9a56a232 2364 state->ios_left--;
06ef3608 2365 state->has_refs = state->ios_left;
9a56a232
JA
2366 return state->file;
2367}
2368
4503b767
JA
2369static bool io_bdev_nowait(struct block_device *bdev)
2370{
2371#ifdef CONFIG_BLOCK
2372 return !bdev || queue_is_mq(bdev_get_queue(bdev));
2373#else
2374 return true;
2375#endif
2376}
2377
2b188cc1
JA
2378/*
2379 * If we tracked the file through the SCM inflight mechanism, we could support
2380 * any file. For now, just ensure that anything potentially problematic is done
2381 * inline.
2382 */
af197f50 2383static bool io_file_supports_async(struct file *file, int rw)
2b188cc1
JA
2384{
2385 umode_t mode = file_inode(file)->i_mode;
2386
4503b767
JA
2387 if (S_ISBLK(mode)) {
2388 if (io_bdev_nowait(file->f_inode->i_bdev))
2389 return true;
2390 return false;
2391 }
2392 if (S_ISCHR(mode) || S_ISSOCK(mode))
2b188cc1 2393 return true;
4503b767
JA
2394 if (S_ISREG(mode)) {
2395 if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
2396 file->f_op != &io_uring_fops)
2397 return true;
2398 return false;
2399 }
2b188cc1 2400
c5b85625
JA
2401 /* any ->read/write should understand O_NONBLOCK */
2402 if (file->f_flags & O_NONBLOCK)
2403 return true;
2404
af197f50
JA
2405 if (!(file->f_mode & FMODE_NOWAIT))
2406 return false;
2407
2408 if (rw == READ)
2409 return file->f_op->read_iter != NULL;
2410
2411 return file->f_op->write_iter != NULL;
2b188cc1
JA
2412}
2413
3529d8c2
JA
2414static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2415 bool force_nonblock)
2b188cc1 2416{
def596e9 2417 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 2418 struct kiocb *kiocb = &req->rw.kiocb;
09bb8394
JA
2419 unsigned ioprio;
2420 int ret;
2b188cc1 2421
491381ce
JA
2422 if (S_ISREG(file_inode(req->file)->i_mode))
2423 req->flags |= REQ_F_ISREG;
2424
2b188cc1 2425 kiocb->ki_pos = READ_ONCE(sqe->off);
ba04291e
JA
2426 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2427 req->flags |= REQ_F_CUR_POS;
2428 kiocb->ki_pos = req->file->f_pos;
2429 }
2b188cc1 2430 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
2431 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2432 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2433 if (unlikely(ret))
2434 return ret;
2b188cc1
JA
2435
2436 ioprio = READ_ONCE(sqe->ioprio);
2437 if (ioprio) {
2438 ret = ioprio_check_cap(ioprio);
2439 if (ret)
09bb8394 2440 return ret;
2b188cc1
JA
2441
2442 kiocb->ki_ioprio = ioprio;
2443 } else
2444 kiocb->ki_ioprio = get_current_ioprio();
2445
8449eeda 2446 /* don't allow async punt if RWF_NOWAIT was requested */
c5b85625 2447 if (kiocb->ki_flags & IOCB_NOWAIT)
8449eeda
SB
2448 req->flags |= REQ_F_NOWAIT;
2449
b63534c4
JA
2450 if (kiocb->ki_flags & IOCB_DIRECT)
2451 io_get_req_task(req);
2452
8449eeda 2453 if (force_nonblock)
2b188cc1 2454 kiocb->ki_flags |= IOCB_NOWAIT;
8449eeda 2455
def596e9 2456 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
2457 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2458 !kiocb->ki_filp->f_op->iopoll)
09bb8394 2459 return -EOPNOTSUPP;
2b188cc1 2460
def596e9
JA
2461 kiocb->ki_flags |= IOCB_HIPRI;
2462 kiocb->ki_complete = io_complete_rw_iopoll;
65a6543d 2463 req->iopoll_completed = 0;
f3a6fa22 2464 io_get_req_task(req);
def596e9 2465 } else {
09bb8394
JA
2466 if (kiocb->ki_flags & IOCB_HIPRI)
2467 return -EINVAL;
def596e9
JA
2468 kiocb->ki_complete = io_complete_rw;
2469 }
9adbd45d 2470
3529d8c2
JA
2471 req->rw.addr = READ_ONCE(sqe->addr);
2472 req->rw.len = READ_ONCE(sqe->len);
4f4eeba8 2473 req->buf_index = READ_ONCE(sqe->buf_index);
2b188cc1 2474 return 0;
2b188cc1
JA
2475}
2476
2477static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2478{
2479 switch (ret) {
2480 case -EIOCBQUEUED:
2481 break;
2482 case -ERESTARTSYS:
2483 case -ERESTARTNOINTR:
2484 case -ERESTARTNOHAND:
2485 case -ERESTART_RESTARTBLOCK:
2486 /*
2487 * We can't just restart the syscall, since previously
2488 * submitted sqes may already be in progress. Just fail this
2489 * IO with EINTR.
2490 */
2491 ret = -EINTR;
2492 /* fall through */
2493 default:
2494 kiocb->ki_complete(kiocb, ret, 0);
2495 }
2496}
2497
a1d7c393
JA
2498static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2499 struct io_comp_state *cs)
ba816ad6 2500{
ba04291e
JA
2501 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2502
2503 if (req->flags & REQ_F_CUR_POS)
2504 req->file->f_pos = kiocb->ki_pos;
bcaec089 2505 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
a1d7c393 2506 __io_complete_rw(req, ret, 0, cs);
ba816ad6
JA
2507 else
2508 io_rw_done(kiocb, ret);
2509}
2510
9adbd45d 2511static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
7d009165 2512 struct iov_iter *iter)
edafccee 2513{
9adbd45d
JA
2514 struct io_ring_ctx *ctx = req->ctx;
2515 size_t len = req->rw.len;
edafccee 2516 struct io_mapped_ubuf *imu;
4f4eeba8 2517 u16 index, buf_index;
edafccee
JA
2518 size_t offset;
2519 u64 buf_addr;
2520
2521 /* attempt to use fixed buffers without having provided iovecs */
2522 if (unlikely(!ctx->user_bufs))
2523 return -EFAULT;
2524
4f4eeba8 2525 buf_index = req->buf_index;
edafccee
JA
2526 if (unlikely(buf_index >= ctx->nr_user_bufs))
2527 return -EFAULT;
2528
2529 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2530 imu = &ctx->user_bufs[index];
9adbd45d 2531 buf_addr = req->rw.addr;
edafccee
JA
2532
2533 /* overflow */
2534 if (buf_addr + len < buf_addr)
2535 return -EFAULT;
2536 /* not inside the mapped region */
2537 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2538 return -EFAULT;
2539
2540 /*
2541 * May not be a start of buffer, set size appropriately
2542 * and advance us to the beginning.
2543 */
2544 offset = buf_addr - imu->ubuf;
2545 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
2546
2547 if (offset) {
2548 /*
2549 * Don't use iov_iter_advance() here, as it's really slow for
2550 * using the latter parts of a big fixed buffer - it iterates
2551 * over each segment manually. We can cheat a bit here, because
2552 * we know that:
2553 *
2554 * 1) it's a BVEC iter, we set it up
2555 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2556 * first and last bvec
2557 *
2558 * So just find our index, and adjust the iterator afterwards.
2559 * If the offset is within the first bvec (or the whole first
2560 * bvec, just use iov_iter_advance(). This makes it easier
2561 * since we can just skip the first segment, which may not
2562 * be PAGE_SIZE aligned.
2563 */
2564 const struct bio_vec *bvec = imu->bvec;
2565
2566 if (offset <= bvec->bv_len) {
2567 iov_iter_advance(iter, offset);
2568 } else {
2569 unsigned long seg_skip;
2570
2571 /* skip first vec */
2572 offset -= bvec->bv_len;
2573 seg_skip = 1 + (offset >> PAGE_SHIFT);
2574
2575 iter->bvec = bvec + seg_skip;
2576 iter->nr_segs -= seg_skip;
99c79f66 2577 iter->count -= bvec->bv_len + offset;
bd11b3a3 2578 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
2579 }
2580 }
2581
5e559561 2582 return len;
edafccee
JA
2583}
2584
bcda7baa
JA
2585static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2586{
2587 if (needs_lock)
2588 mutex_unlock(&ctx->uring_lock);
2589}
2590
2591static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2592{
2593 /*
2594 * "Normal" inline submissions always hold the uring_lock, since we
2595 * grab it from the system call. Same is true for the SQPOLL offload.
2596 * The only exception is when we've detached the request and issue it
2597 * from an async worker thread, grab the lock for that case.
2598 */
2599 if (needs_lock)
2600 mutex_lock(&ctx->uring_lock);
2601}
2602
2603static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2604 int bgid, struct io_buffer *kbuf,
2605 bool needs_lock)
2606{
2607 struct io_buffer *head;
2608
2609 if (req->flags & REQ_F_BUFFER_SELECTED)
2610 return kbuf;
2611
2612 io_ring_submit_lock(req->ctx, needs_lock);
2613
2614 lockdep_assert_held(&req->ctx->uring_lock);
2615
2616 head = idr_find(&req->ctx->io_buffer_idr, bgid);
2617 if (head) {
2618 if (!list_empty(&head->list)) {
2619 kbuf = list_last_entry(&head->list, struct io_buffer,
2620 list);
2621 list_del(&kbuf->list);
2622 } else {
2623 kbuf = head;
2624 idr_remove(&req->ctx->io_buffer_idr, bgid);
2625 }
2626 if (*len > kbuf->len)
2627 *len = kbuf->len;
2628 } else {
2629 kbuf = ERR_PTR(-ENOBUFS);
2630 }
2631
2632 io_ring_submit_unlock(req->ctx, needs_lock);
2633
2634 return kbuf;
2635}
2636
4d954c25
JA
2637static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2638 bool needs_lock)
2639{
2640 struct io_buffer *kbuf;
4f4eeba8 2641 u16 bgid;
4d954c25
JA
2642
2643 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
4f4eeba8 2644 bgid = req->buf_index;
4d954c25
JA
2645 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2646 if (IS_ERR(kbuf))
2647 return kbuf;
2648 req->rw.addr = (u64) (unsigned long) kbuf;
2649 req->flags |= REQ_F_BUFFER_SELECTED;
2650 return u64_to_user_ptr(kbuf->addr);
2651}
2652
2653#ifdef CONFIG_COMPAT
2654static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2655 bool needs_lock)
2656{
2657 struct compat_iovec __user *uiov;
2658 compat_ssize_t clen;
2659 void __user *buf;
2660 ssize_t len;
2661
2662 uiov = u64_to_user_ptr(req->rw.addr);
2663 if (!access_ok(uiov, sizeof(*uiov)))
2664 return -EFAULT;
2665 if (__get_user(clen, &uiov->iov_len))
2666 return -EFAULT;
2667 if (clen < 0)
2668 return -EINVAL;
2669
2670 len = clen;
2671 buf = io_rw_buffer_select(req, &len, needs_lock);
2672 if (IS_ERR(buf))
2673 return PTR_ERR(buf);
2674 iov[0].iov_base = buf;
2675 iov[0].iov_len = (compat_size_t) len;
2676 return 0;
2677}
2678#endif
2679
2680static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2681 bool needs_lock)
2682{
2683 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2684 void __user *buf;
2685 ssize_t len;
2686
2687 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2688 return -EFAULT;
2689
2690 len = iov[0].iov_len;
2691 if (len < 0)
2692 return -EINVAL;
2693 buf = io_rw_buffer_select(req, &len, needs_lock);
2694 if (IS_ERR(buf))
2695 return PTR_ERR(buf);
2696 iov[0].iov_base = buf;
2697 iov[0].iov_len = len;
2698 return 0;
2699}
2700
2701static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2702 bool needs_lock)
2703{
dddb3e26
JA
2704 if (req->flags & REQ_F_BUFFER_SELECTED) {
2705 struct io_buffer *kbuf;
2706
2707 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2708 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2709 iov[0].iov_len = kbuf->len;
4d954c25 2710 return 0;
dddb3e26 2711 }
4d954c25
JA
2712 if (!req->rw.len)
2713 return 0;
2714 else if (req->rw.len > 1)
2715 return -EINVAL;
2716
2717#ifdef CONFIG_COMPAT
2718 if (req->ctx->compat)
2719 return io_compat_import(req, iov, needs_lock);
2720#endif
2721
2722 return __io_iov_buffer_select(req, iov, needs_lock);
2723}
2724
cf6fd4bd 2725static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
bcda7baa
JA
2726 struct iovec **iovec, struct iov_iter *iter,
2727 bool needs_lock)
2b188cc1 2728{
9adbd45d
JA
2729 void __user *buf = u64_to_user_ptr(req->rw.addr);
2730 size_t sqe_len = req->rw.len;
4d954c25 2731 ssize_t ret;
edafccee
JA
2732 u8 opcode;
2733
d625c6ee 2734 opcode = req->opcode;
7d009165 2735 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 2736 *iovec = NULL;
9adbd45d 2737 return io_import_fixed(req, rw, iter);
edafccee 2738 }
2b188cc1 2739
bcda7baa 2740 /* buffer index only valid with fixed read/write, or buffer select */
4f4eeba8 2741 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
9adbd45d
JA
2742 return -EINVAL;
2743
3a6820f2 2744 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 2745 if (req->flags & REQ_F_BUFFER_SELECT) {
4d954c25
JA
2746 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
2747 if (IS_ERR(buf)) {
bcda7baa 2748 *iovec = NULL;
4d954c25 2749 return PTR_ERR(buf);
bcda7baa 2750 }
3f9d6441 2751 req->rw.len = sqe_len;
bcda7baa
JA
2752 }
2753
3a6820f2
JA
2754 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2755 *iovec = NULL;
3a901598 2756 return ret < 0 ? ret : sqe_len;
3a6820f2
JA
2757 }
2758
f67676d1
JA
2759 if (req->io) {
2760 struct io_async_rw *iorw = &req->io->rw;
2761
252917c3
PB
2762 iov_iter_init(iter, rw, iorw->iov, iorw->nr_segs, iorw->size);
2763 *iovec = NULL;
f67676d1
JA
2764 return iorw->size;
2765 }
2766
4d954c25
JA
2767 if (req->flags & REQ_F_BUFFER_SELECT) {
2768 ret = io_iov_buffer_select(req, *iovec, needs_lock);
3f9d6441
JA
2769 if (!ret) {
2770 ret = (*iovec)->iov_len;
2771 iov_iter_init(iter, rw, *iovec, 1, ret);
2772 }
4d954c25
JA
2773 *iovec = NULL;
2774 return ret;
2775 }
2776
2b188cc1 2777#ifdef CONFIG_COMPAT
cf6fd4bd 2778 if (req->ctx->compat)
2b188cc1
JA
2779 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
2780 iovec, iter);
2781#endif
2782
2783 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
2784}
2785
31b51510 2786/*
32960613
JA
2787 * For files that don't have ->read_iter() and ->write_iter(), handle them
2788 * by looping over ->read() or ->write() manually.
31b51510 2789 */
32960613
JA
2790static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2791 struct iov_iter *iter)
2792{
2793 ssize_t ret = 0;
2794
2795 /*
2796 * Don't support polled IO through this interface, and we can't
2797 * support non-blocking either. For the latter, this just causes
2798 * the kiocb to be handled from an async context.
2799 */
2800 if (kiocb->ki_flags & IOCB_HIPRI)
2801 return -EOPNOTSUPP;
2802 if (kiocb->ki_flags & IOCB_NOWAIT)
2803 return -EAGAIN;
2804
2805 while (iov_iter_count(iter)) {
311ae9e1 2806 struct iovec iovec;
32960613
JA
2807 ssize_t nr;
2808
311ae9e1
PB
2809 if (!iov_iter_is_bvec(iter)) {
2810 iovec = iov_iter_iovec(iter);
2811 } else {
2812 /* fixed buffers import bvec */
2813 iovec.iov_base = kmap(iter->bvec->bv_page)
2814 + iter->iov_offset;
2815 iovec.iov_len = min(iter->count,
2816 iter->bvec->bv_len - iter->iov_offset);
2817 }
2818
32960613
JA
2819 if (rw == READ) {
2820 nr = file->f_op->read(file, iovec.iov_base,
2821 iovec.iov_len, &kiocb->ki_pos);
2822 } else {
2823 nr = file->f_op->write(file, iovec.iov_base,
2824 iovec.iov_len, &kiocb->ki_pos);
2825 }
2826
311ae9e1
PB
2827 if (iov_iter_is_bvec(iter))
2828 kunmap(iter->bvec->bv_page);
2829
32960613
JA
2830 if (nr < 0) {
2831 if (!ret)
2832 ret = nr;
2833 break;
2834 }
2835 ret += nr;
2836 if (nr != iovec.iov_len)
2837 break;
2838 iov_iter_advance(iter, nr);
2839 }
2840
2841 return ret;
2842}
2843
b7bb4f7d 2844static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
f67676d1
JA
2845 struct iovec *iovec, struct iovec *fast_iov,
2846 struct iov_iter *iter)
2847{
b64e3444
PB
2848 struct io_async_rw *rw = &req->io->rw;
2849
2850 rw->nr_segs = iter->nr_segs;
2851 rw->size = io_size;
2852 if (!iovec) {
2853 rw->iov = rw->fast_iov;
2854 if (rw->iov != fast_iov)
2855 memcpy(rw->iov, fast_iov,
45097dae 2856 sizeof(struct iovec) * iter->nr_segs);
99bc4c38 2857 } else {
b64e3444 2858 rw->iov = iovec;
99bc4c38 2859 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
2860 }
2861}
2862
3d9932a8
XW
2863static inline int __io_alloc_async_ctx(struct io_kiocb *req)
2864{
2865 req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
2866 return req->io == NULL;
2867}
2868
b7bb4f7d 2869static int io_alloc_async_ctx(struct io_kiocb *req)
f67676d1 2870{
d3656344
JA
2871 if (!io_op_defs[req->opcode].async_ctx)
2872 return 0;
3d9932a8
XW
2873
2874 return __io_alloc_async_ctx(req);
b7bb4f7d
JA
2875}
2876
b7bb4f7d
JA
2877static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2878 struct iovec *iovec, struct iovec *fast_iov,
2879 struct iov_iter *iter)
2880{
980ad263 2881 if (!io_op_defs[req->opcode].async_ctx)
74566df3 2882 return 0;
5d204bcf 2883 if (!req->io) {
3d9932a8 2884 if (__io_alloc_async_ctx(req))
5d204bcf 2885 return -ENOMEM;
b7bb4f7d 2886
5d204bcf
JA
2887 io_req_map_rw(req, io_size, iovec, fast_iov, iter);
2888 }
b7bb4f7d 2889 return 0;
f67676d1
JA
2890}
2891
c3e330a4
PB
2892static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
2893 bool force_nonblock)
2894{
2895 struct io_async_ctx *io = req->io;
2896 struct iov_iter iter;
2897 ssize_t ret;
2898
2899 io->rw.iov = io->rw.fast_iov;
2900 req->io = NULL;
2901 ret = io_import_iovec(rw, req, &io->rw.iov, &iter, !force_nonblock);
2902 req->io = io;
2903 if (unlikely(ret < 0))
2904 return ret;
2905
2906 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2907 return 0;
2908}
2909
3529d8c2
JA
2910static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2911 bool force_nonblock)
f67676d1
JA
2912{
2913 ssize_t ret;
2914
3529d8c2
JA
2915 ret = io_prep_rw(req, sqe, force_nonblock);
2916 if (ret)
2917 return ret;
f67676d1 2918
3529d8c2
JA
2919 if (unlikely(!(req->file->f_mode & FMODE_READ)))
2920 return -EBADF;
f67676d1 2921
5f798bea
PB
2922 /* either don't need iovec imported or already have it */
2923 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2 2924 return 0;
c3e330a4 2925 return io_rw_prep_async(req, READ, force_nonblock);
f67676d1
JA
2926}
2927
bcf5a063
JA
2928static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
2929 int sync, void *arg)
2930{
2931 struct wait_page_queue *wpq;
2932 struct io_kiocb *req = wait->private;
bcf5a063 2933 struct wait_page_key *key = arg;
bcf5a063
JA
2934 int ret;
2935
2936 wpq = container_of(wait, struct wait_page_queue, wait);
2937
2938 ret = wake_page_match(wpq, key);
2939 if (ret != 1)
2940 return ret;
2941
2942 list_del_init(&wait->entry);
2943
e7375122 2944 init_task_work(&req->task_work, io_req_task_submit);
bcf5a063
JA
2945 /* submit ref gets dropped, acquire a new one */
2946 refcount_inc(&req->refs);
e7375122 2947 ret = io_req_task_work_add(req, &req->task_work);
bcf5a063 2948 if (unlikely(ret)) {
c2c4c83c
JA
2949 struct task_struct *tsk;
2950
bcf5a063 2951 /* queue just for cancelation */
e7375122 2952 init_task_work(&req->task_work, io_req_task_cancel);
bcf5a063 2953 tsk = io_wq_get_task(req->ctx->io_wq);
e7375122 2954 task_work_add(tsk, &req->task_work, 0);
c2c4c83c 2955 wake_up_process(tsk);
bcf5a063 2956 }
bcf5a063
JA
2957 return 1;
2958}
2959
2960static bool io_rw_should_retry(struct io_kiocb *req)
2961{
2962 struct kiocb *kiocb = &req->rw.kiocb;
2963 int ret;
2964
2965 /* never retry for NOWAIT, we just complete with -EAGAIN */
2966 if (req->flags & REQ_F_NOWAIT)
2967 return false;
2968
2969 /* already tried, or we're doing O_DIRECT */
2970 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_WAITQ))
2971 return false;
2972 /*
2973 * just use poll if we can, and don't attempt if the fs doesn't
2974 * support callback based unlocks
2975 */
2976 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
2977 return false;
2978
2979 /*
2980 * If request type doesn't require req->io to defer in general,
2981 * we need to allocate it here
2982 */
2983 if (!req->io && __io_alloc_async_ctx(req))
2984 return false;
2985
2986 ret = kiocb_wait_page_queue_init(kiocb, &req->io->rw.wpq,
2987 io_async_buf_func, req);
2988 if (!ret) {
2989 io_get_req_task(req);
2990 return true;
2991 }
2992
2993 return false;
2994}
2995
2996static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
2997{
2998 if (req->file->f_op->read_iter)
2999 return call_read_iter(req->file, &req->rw.kiocb, iter);
3000 return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter);
3001}
3002
a1d7c393
JA
3003static int io_read(struct io_kiocb *req, bool force_nonblock,
3004 struct io_comp_state *cs)
2b188cc1
JA
3005{
3006 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3007 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 3008 struct iov_iter iter;
31b51510 3009 size_t iov_count;
f67676d1 3010 ssize_t io_size, ret;
2b188cc1 3011
bcda7baa 3012 ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
06b76d44
JA
3013 if (ret < 0)
3014 return ret;
2b188cc1 3015
fd6c2e4c
JA
3016 /* Ensure we clear previously set non-block flag */
3017 if (!force_nonblock)
29de5f6a 3018 kiocb->ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 3019
f67676d1 3020 io_size = ret;
6795c5ab 3021 req->result = io_size;
f67676d1 3022
24c74678 3023 /* If the file doesn't support async, just async punt */
af197f50 3024 if (force_nonblock && !io_file_supports_async(req->file, READ))
f67676d1 3025 goto copy_iov;
9e645e11 3026
31b51510 3027 iov_count = iov_iter_count(&iter);
9adbd45d 3028 ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
2b188cc1 3029 if (!ret) {
b63534c4 3030 unsigned long nr_segs = iter.nr_segs;
4503b767 3031 ssize_t ret2 = 0;
2b188cc1 3032
bcf5a063 3033 ret2 = io_iter_do_read(req, &iter);
32960613 3034
9d93a3f5 3035 /* Catch -EAGAIN return for forced non-blocking submission */
4503b767 3036 if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) {
a1d7c393 3037 kiocb_done(kiocb, ret2, cs);
f67676d1 3038 } else {
b63534c4
JA
3039 iter.count = iov_count;
3040 iter.nr_segs = nr_segs;
f67676d1 3041copy_iov:
b7bb4f7d 3042 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
3043 inline_vecs, &iter);
3044 if (ret)
3045 goto out_free;
252917c3
PB
3046 /* it's copied and will be cleaned with ->io */
3047 iovec = NULL;
bcf5a063
JA
3048 /* if we can retry, do so with the callbacks armed */
3049 if (io_rw_should_retry(req)) {
3050 ret2 = io_iter_do_read(req, &iter);
3051 if (ret2 == -EIOCBQUEUED) {
3052 goto out_free;
3053 } else if (ret2 != -EAGAIN) {
a1d7c393 3054 kiocb_done(kiocb, ret2, cs);
bcf5a063
JA
3055 goto out_free;
3056 }
3057 }
3058 kiocb->ki_flags &= ~IOCB_WAITQ;
f67676d1
JA
3059 return -EAGAIN;
3060 }
2b188cc1 3061 }
f67676d1 3062out_free:
252917c3 3063 if (iovec)
6f2cc166 3064 kfree(iovec);
2b188cc1
JA
3065 return ret;
3066}
3067
3529d8c2
JA
3068static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
3069 bool force_nonblock)
f67676d1
JA
3070{
3071 ssize_t ret;
3072
3529d8c2
JA
3073 ret = io_prep_rw(req, sqe, force_nonblock);
3074 if (ret)
3075 return ret;
f67676d1 3076
3529d8c2
JA
3077 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3078 return -EBADF;
f67676d1 3079
5f798bea
PB
3080 /* either don't need iovec imported or already have it */
3081 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2 3082 return 0;
c3e330a4 3083 return io_rw_prep_async(req, WRITE, force_nonblock);
f67676d1
JA
3084}
3085
a1d7c393
JA
3086static int io_write(struct io_kiocb *req, bool force_nonblock,
3087 struct io_comp_state *cs)
2b188cc1
JA
3088{
3089 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 3090 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 3091 struct iov_iter iter;
31b51510 3092 size_t iov_count;
f67676d1 3093 ssize_t ret, io_size;
2b188cc1 3094
bcda7baa 3095 ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
06b76d44
JA
3096 if (ret < 0)
3097 return ret;
2b188cc1 3098
fd6c2e4c
JA
3099 /* Ensure we clear previously set non-block flag */
3100 if (!force_nonblock)
9adbd45d 3101 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 3102
f67676d1 3103 io_size = ret;
6795c5ab 3104 req->result = io_size;
9e645e11 3105
24c74678 3106 /* If the file doesn't support async, just async punt */
af197f50 3107 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
f67676d1 3108 goto copy_iov;
31b51510 3109
10d59345
JA
3110 /* file path doesn't support NOWAIT for non-direct_IO */
3111 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3112 (req->flags & REQ_F_ISREG))
f67676d1 3113 goto copy_iov;
31b51510 3114
f67676d1 3115 iov_count = iov_iter_count(&iter);
9adbd45d 3116 ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
2b188cc1 3117 if (!ret) {
b63534c4 3118 unsigned long nr_segs = iter.nr_segs;
9bf7933f
RP
3119 ssize_t ret2;
3120
2b188cc1
JA
3121 /*
3122 * Open-code file_start_write here to grab freeze protection,
3123 * which will be released by another thread in
3124 * io_complete_rw(). Fool lockdep by telling it the lock got
3125 * released so that it doesn't complain about the held lock when
3126 * we return to userspace.
3127 */
491381ce 3128 if (req->flags & REQ_F_ISREG) {
9adbd45d 3129 __sb_start_write(file_inode(req->file)->i_sb,
2b188cc1 3130 SB_FREEZE_WRITE, true);
9adbd45d 3131 __sb_writers_release(file_inode(req->file)->i_sb,
2b188cc1
JA
3132 SB_FREEZE_WRITE);
3133 }
3134 kiocb->ki_flags |= IOCB_WRITE;
9bf7933f 3135
9adbd45d
JA
3136 if (req->file->f_op->write_iter)
3137 ret2 = call_write_iter(req->file, kiocb, &iter);
32960613 3138 else
9adbd45d 3139 ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
4ed734b0 3140
faac996c 3141 /*
bff6035d 3142 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
faac996c
JA
3143 * retry them without IOCB_NOWAIT.
3144 */
3145 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3146 ret2 = -EAGAIN;
f67676d1 3147 if (!force_nonblock || ret2 != -EAGAIN) {
a1d7c393 3148 kiocb_done(kiocb, ret2, cs);
f67676d1 3149 } else {
b63534c4
JA
3150 iter.count = iov_count;
3151 iter.nr_segs = nr_segs;
f67676d1 3152copy_iov:
b7bb4f7d 3153 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
3154 inline_vecs, &iter);
3155 if (ret)
3156 goto out_free;
252917c3
PB
3157 /* it's copied and will be cleaned with ->io */
3158 iovec = NULL;
f67676d1
JA
3159 return -EAGAIN;
3160 }
2b188cc1 3161 }
31b51510 3162out_free:
252917c3 3163 if (iovec)
6f2cc166 3164 kfree(iovec);
2b188cc1
JA
3165 return ret;
3166}
3167
f2a8d5c7
PB
3168static int __io_splice_prep(struct io_kiocb *req,
3169 const struct io_uring_sqe *sqe)
7d67af2c
PB
3170{
3171 struct io_splice* sp = &req->splice;
3172 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
3173 int ret;
3174
3175 if (req->flags & REQ_F_NEED_CLEANUP)
3176 return 0;
3232dd02
PB
3177 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3178 return -EINVAL;
7d67af2c
PB
3179
3180 sp->file_in = NULL;
7d67af2c
PB
3181 sp->len = READ_ONCE(sqe->len);
3182 sp->flags = READ_ONCE(sqe->splice_flags);
3183
3184 if (unlikely(sp->flags & ~valid_flags))
3185 return -EINVAL;
3186
3187 ret = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), &sp->file_in,
3188 (sp->flags & SPLICE_F_FD_IN_FIXED));
3189 if (ret)
3190 return ret;
3191 req->flags |= REQ_F_NEED_CLEANUP;
3192
7cdaf587
XW
3193 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3194 /*
3195 * Splice operation will be punted aync, and here need to
3196 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3197 */
3198 io_req_init_async(req);
7d67af2c 3199 req->work.flags |= IO_WQ_WORK_UNBOUND;
7cdaf587 3200 }
7d67af2c
PB
3201
3202 return 0;
3203}
3204
f2a8d5c7
PB
3205static int io_tee_prep(struct io_kiocb *req,
3206 const struct io_uring_sqe *sqe)
3207{
3208 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3209 return -EINVAL;
3210 return __io_splice_prep(req, sqe);
3211}
3212
3213static int io_tee(struct io_kiocb *req, bool force_nonblock)
3214{
3215 struct io_splice *sp = &req->splice;
3216 struct file *in = sp->file_in;
3217 struct file *out = sp->file_out;
3218 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3219 long ret = 0;
3220
3221 if (force_nonblock)
3222 return -EAGAIN;
3223 if (sp->len)
3224 ret = do_tee(in, out, sp->len, flags);
3225
3226 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3227 req->flags &= ~REQ_F_NEED_CLEANUP;
3228
f2a8d5c7
PB
3229 if (ret != sp->len)
3230 req_set_fail_links(req);
e1e16097 3231 io_req_complete(req, ret);
f2a8d5c7
PB
3232 return 0;
3233}
3234
3235static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3236{
3237 struct io_splice* sp = &req->splice;
3238
3239 sp->off_in = READ_ONCE(sqe->splice_off_in);
3240 sp->off_out = READ_ONCE(sqe->off);
3241 return __io_splice_prep(req, sqe);
3242}
3243
014db007 3244static int io_splice(struct io_kiocb *req, bool force_nonblock)
7d67af2c
PB
3245{
3246 struct io_splice *sp = &req->splice;
3247 struct file *in = sp->file_in;
3248 struct file *out = sp->file_out;
3249 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3250 loff_t *poff_in, *poff_out;
c9687426 3251 long ret = 0;
7d67af2c 3252
2fb3e822
PB
3253 if (force_nonblock)
3254 return -EAGAIN;
7d67af2c
PB
3255
3256 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3257 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
c9687426 3258
948a7749 3259 if (sp->len)
c9687426 3260 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
7d67af2c
PB
3261
3262 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3263 req->flags &= ~REQ_F_NEED_CLEANUP;
3264
7d67af2c
PB
3265 if (ret != sp->len)
3266 req_set_fail_links(req);
e1e16097 3267 io_req_complete(req, ret);
7d67af2c
PB
3268 return 0;
3269}
3270
2b188cc1
JA
3271/*
3272 * IORING_OP_NOP just posts a completion event, nothing else.
3273 */
229a7b63 3274static int io_nop(struct io_kiocb *req, struct io_comp_state *cs)
2b188cc1
JA
3275{
3276 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 3277
def596e9
JA
3278 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3279 return -EINVAL;
3280
229a7b63 3281 __io_req_complete(req, 0, 0, cs);
2b188cc1
JA
3282 return 0;
3283}
3284
3529d8c2 3285static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 3286{
6b06314c 3287 struct io_ring_ctx *ctx = req->ctx;
c992fe29 3288
09bb8394
JA
3289 if (!req->file)
3290 return -EBADF;
c992fe29 3291
6b06314c 3292 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 3293 return -EINVAL;
edafccee 3294 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
3295 return -EINVAL;
3296
8ed8d3c3
JA
3297 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3298 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3299 return -EINVAL;
3300
3301 req->sync.off = READ_ONCE(sqe->off);
3302 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
3303 return 0;
3304}
3305
ac45abc0 3306static int io_fsync(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3 3307{
8ed8d3c3 3308 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
3309 int ret;
3310
ac45abc0
PB
3311 /* fsync always requires a blocking context */
3312 if (force_nonblock)
3313 return -EAGAIN;
3314
9adbd45d 3315 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
3316 end > 0 ? end : LLONG_MAX,
3317 req->sync.flags & IORING_FSYNC_DATASYNC);
3318 if (ret < 0)
3319 req_set_fail_links(req);
e1e16097 3320 io_req_complete(req, ret);
c992fe29
CH
3321 return 0;
3322}
3323
d63d1b5e
JA
3324static int io_fallocate_prep(struct io_kiocb *req,
3325 const struct io_uring_sqe *sqe)
3326{
3327 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3328 return -EINVAL;
3232dd02
PB
3329 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3330 return -EINVAL;
d63d1b5e
JA
3331
3332 req->sync.off = READ_ONCE(sqe->off);
3333 req->sync.len = READ_ONCE(sqe->addr);
3334 req->sync.mode = READ_ONCE(sqe->len);
3335 return 0;
3336}
3337
014db007 3338static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
5d17b4a4 3339{
ac45abc0
PB
3340 int ret;
3341
d63d1b5e 3342 /* fallocate always requiring blocking context */
ac45abc0 3343 if (force_nonblock)
5d17b4a4 3344 return -EAGAIN;
ac45abc0
PB
3345 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3346 req->sync.len);
ac45abc0
PB
3347 if (ret < 0)
3348 req_set_fail_links(req);
e1e16097 3349 io_req_complete(req, ret);
5d17b4a4
JA
3350 return 0;
3351}
3352
ec65fea5 3353static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 3354{
f8748881 3355 const char __user *fname;
15b71abe 3356 int ret;
b7bb4f7d 3357
3232dd02 3358 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
15b71abe 3359 return -EINVAL;
ec65fea5 3360 if (unlikely(sqe->ioprio || sqe->buf_index))
15b71abe 3361 return -EINVAL;
ec65fea5 3362 if (unlikely(req->flags & REQ_F_FIXED_FILE))
cf3040ca 3363 return -EBADF;
03b1230c 3364
ec65fea5
PB
3365 /* open.how should be already initialised */
3366 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
08a1d26e 3367 req->open.how.flags |= O_LARGEFILE;
3529d8c2 3368
25e72d10
PB
3369 req->open.dfd = READ_ONCE(sqe->fd);
3370 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
f8748881 3371 req->open.filename = getname(fname);
15b71abe
JA
3372 if (IS_ERR(req->open.filename)) {
3373 ret = PTR_ERR(req->open.filename);
3374 req->open.filename = NULL;
3375 return ret;
3376 }
4022e7af 3377 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 3378 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 3379 return 0;
03b1230c
JA
3380}
3381
ec65fea5
PB
3382static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3383{
3384 u64 flags, mode;
3385
3386 if (req->flags & REQ_F_NEED_CLEANUP)
3387 return 0;
3388 mode = READ_ONCE(sqe->len);
3389 flags = READ_ONCE(sqe->open_flags);
3390 req->open.how = build_open_how(flags, mode);
3391 return __io_openat_prep(req, sqe);
3392}
3393
cebdb986 3394static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 3395{
cebdb986 3396 struct open_how __user *how;
cebdb986 3397 size_t len;
0fa03c62
JA
3398 int ret;
3399
0bdbdd08
PB
3400 if (req->flags & REQ_F_NEED_CLEANUP)
3401 return 0;
cebdb986
JA
3402 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3403 len = READ_ONCE(sqe->len);
cebdb986
JA
3404 if (len < OPEN_HOW_SIZE_VER0)
3405 return -EINVAL;
3529d8c2 3406
cebdb986
JA
3407 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3408 len);
3409 if (ret)
3410 return ret;
3529d8c2 3411
ec65fea5 3412 return __io_openat_prep(req, sqe);
cebdb986
JA
3413}
3414
014db007 3415static int io_openat2(struct io_kiocb *req, bool force_nonblock)
15b71abe
JA
3416{
3417 struct open_flags op;
15b71abe
JA
3418 struct file *file;
3419 int ret;
3420
f86cd20c 3421 if (force_nonblock)
15b71abe 3422 return -EAGAIN;
15b71abe 3423
cebdb986 3424 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
3425 if (ret)
3426 goto err;
3427
4022e7af 3428 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
15b71abe
JA
3429 if (ret < 0)
3430 goto err;
3431
3432 file = do_filp_open(req->open.dfd, req->open.filename, &op);
3433 if (IS_ERR(file)) {
3434 put_unused_fd(ret);
3435 ret = PTR_ERR(file);
3436 } else {
3437 fsnotify_open(file);
3438 fd_install(ret, file);
3439 }
3440err:
3441 putname(req->open.filename);
8fef80bf 3442 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe
JA
3443 if (ret < 0)
3444 req_set_fail_links(req);
e1e16097 3445 io_req_complete(req, ret);
15b71abe
JA
3446 return 0;
3447}
3448
014db007 3449static int io_openat(struct io_kiocb *req, bool force_nonblock)
cebdb986 3450{
014db007 3451 return io_openat2(req, force_nonblock);
cebdb986
JA
3452}
3453
067524e9
JA
3454static int io_remove_buffers_prep(struct io_kiocb *req,
3455 const struct io_uring_sqe *sqe)
3456{
3457 struct io_provide_buf *p = &req->pbuf;
3458 u64 tmp;
3459
3460 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3461 return -EINVAL;
3462
3463 tmp = READ_ONCE(sqe->fd);
3464 if (!tmp || tmp > USHRT_MAX)
3465 return -EINVAL;
3466
3467 memset(p, 0, sizeof(*p));
3468 p->nbufs = tmp;
3469 p->bgid = READ_ONCE(sqe->buf_group);
3470 return 0;
3471}
3472
3473static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3474 int bgid, unsigned nbufs)
3475{
3476 unsigned i = 0;
3477
3478 /* shouldn't happen */
3479 if (!nbufs)
3480 return 0;
3481
3482 /* the head kbuf is the list itself */
3483 while (!list_empty(&buf->list)) {
3484 struct io_buffer *nxt;
3485
3486 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3487 list_del(&nxt->list);
3488 kfree(nxt);
3489 if (++i == nbufs)
3490 return i;
3491 }
3492 i++;
3493 kfree(buf);
3494 idr_remove(&ctx->io_buffer_idr, bgid);
3495
3496 return i;
3497}
3498
229a7b63
JA
3499static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
3500 struct io_comp_state *cs)
067524e9
JA
3501{
3502 struct io_provide_buf *p = &req->pbuf;
3503 struct io_ring_ctx *ctx = req->ctx;
3504 struct io_buffer *head;
3505 int ret = 0;
3506
3507 io_ring_submit_lock(ctx, !force_nonblock);
3508
3509 lockdep_assert_held(&ctx->uring_lock);
3510
3511 ret = -ENOENT;
3512 head = idr_find(&ctx->io_buffer_idr, p->bgid);
3513 if (head)
3514 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3515
3516 io_ring_submit_lock(ctx, !force_nonblock);
3517 if (ret < 0)
3518 req_set_fail_links(req);
229a7b63 3519 __io_req_complete(req, ret, 0, cs);
067524e9
JA
3520 return 0;
3521}
3522
ddf0322d
JA
3523static int io_provide_buffers_prep(struct io_kiocb *req,
3524 const struct io_uring_sqe *sqe)
3525{
3526 struct io_provide_buf *p = &req->pbuf;
3527 u64 tmp;
3528
3529 if (sqe->ioprio || sqe->rw_flags)
3530 return -EINVAL;
3531
3532 tmp = READ_ONCE(sqe->fd);
3533 if (!tmp || tmp > USHRT_MAX)
3534 return -E2BIG;
3535 p->nbufs = tmp;
3536 p->addr = READ_ONCE(sqe->addr);
3537 p->len = READ_ONCE(sqe->len);
3538
efe68c1c 3539 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
ddf0322d
JA
3540 return -EFAULT;
3541
3542 p->bgid = READ_ONCE(sqe->buf_group);
3543 tmp = READ_ONCE(sqe->off);
3544 if (tmp > USHRT_MAX)
3545 return -E2BIG;
3546 p->bid = tmp;
3547 return 0;
3548}
3549
3550static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3551{
3552 struct io_buffer *buf;
3553 u64 addr = pbuf->addr;
3554 int i, bid = pbuf->bid;
3555
3556 for (i = 0; i < pbuf->nbufs; i++) {
3557 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3558 if (!buf)
3559 break;
3560
3561 buf->addr = addr;
3562 buf->len = pbuf->len;
3563 buf->bid = bid;
3564 addr += pbuf->len;
3565 bid++;
3566 if (!*head) {
3567 INIT_LIST_HEAD(&buf->list);
3568 *head = buf;
3569 } else {
3570 list_add_tail(&buf->list, &(*head)->list);
3571 }
3572 }
3573
3574 return i ? i : -ENOMEM;
3575}
3576
229a7b63
JA
3577static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
3578 struct io_comp_state *cs)
ddf0322d
JA
3579{
3580 struct io_provide_buf *p = &req->pbuf;
3581 struct io_ring_ctx *ctx = req->ctx;
3582 struct io_buffer *head, *list;
3583 int ret = 0;
3584
3585 io_ring_submit_lock(ctx, !force_nonblock);
3586
3587 lockdep_assert_held(&ctx->uring_lock);
3588
3589 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
3590
3591 ret = io_add_buffers(p, &head);
3592 if (ret < 0)
3593 goto out;
3594
3595 if (!list) {
3596 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
3597 GFP_KERNEL);
3598 if (ret < 0) {
067524e9 3599 __io_remove_buffers(ctx, head, p->bgid, -1U);
ddf0322d
JA
3600 goto out;
3601 }
3602 }
3603out:
3604 io_ring_submit_unlock(ctx, !force_nonblock);
3605 if (ret < 0)
3606 req_set_fail_links(req);
229a7b63 3607 __io_req_complete(req, ret, 0, cs);
ddf0322d 3608 return 0;
cebdb986
JA
3609}
3610
3e4827b0
JA
3611static int io_epoll_ctl_prep(struct io_kiocb *req,
3612 const struct io_uring_sqe *sqe)
3613{
3614#if defined(CONFIG_EPOLL)
3615 if (sqe->ioprio || sqe->buf_index)
3616 return -EINVAL;
3232dd02
PB
3617 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3618 return -EINVAL;
3e4827b0
JA
3619
3620 req->epoll.epfd = READ_ONCE(sqe->fd);
3621 req->epoll.op = READ_ONCE(sqe->len);
3622 req->epoll.fd = READ_ONCE(sqe->off);
3623
3624 if (ep_op_has_event(req->epoll.op)) {
3625 struct epoll_event __user *ev;
3626
3627 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
3628 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
3629 return -EFAULT;
3630 }
3631
3632 return 0;
3633#else
3634 return -EOPNOTSUPP;
3635#endif
3636}
3637
229a7b63
JA
3638static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
3639 struct io_comp_state *cs)
3e4827b0
JA
3640{
3641#if defined(CONFIG_EPOLL)
3642 struct io_epoll *ie = &req->epoll;
3643 int ret;
3644
3645 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
3646 if (force_nonblock && ret == -EAGAIN)
3647 return -EAGAIN;
3648
3649 if (ret < 0)
3650 req_set_fail_links(req);
229a7b63 3651 __io_req_complete(req, ret, 0, cs);
3e4827b0
JA
3652 return 0;
3653#else
3654 return -EOPNOTSUPP;
3655#endif
3656}
3657
c1ca757b
JA
3658static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3659{
3660#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3661 if (sqe->ioprio || sqe->buf_index || sqe->off)
3662 return -EINVAL;
3232dd02
PB
3663 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3664 return -EINVAL;
c1ca757b
JA
3665
3666 req->madvise.addr = READ_ONCE(sqe->addr);
3667 req->madvise.len = READ_ONCE(sqe->len);
3668 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
3669 return 0;
3670#else
3671 return -EOPNOTSUPP;
3672#endif
3673}
3674
014db007 3675static int io_madvise(struct io_kiocb *req, bool force_nonblock)
c1ca757b
JA
3676{
3677#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3678 struct io_madvise *ma = &req->madvise;
3679 int ret;
3680
3681 if (force_nonblock)
3682 return -EAGAIN;
3683
3684 ret = do_madvise(ma->addr, ma->len, ma->advice);
3685 if (ret < 0)
3686 req_set_fail_links(req);
e1e16097 3687 io_req_complete(req, ret);
c1ca757b
JA
3688 return 0;
3689#else
3690 return -EOPNOTSUPP;
3691#endif
3692}
3693
4840e418
JA
3694static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3695{
3696 if (sqe->ioprio || sqe->buf_index || sqe->addr)
3697 return -EINVAL;
3232dd02
PB
3698 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3699 return -EINVAL;
4840e418
JA
3700
3701 req->fadvise.offset = READ_ONCE(sqe->off);
3702 req->fadvise.len = READ_ONCE(sqe->len);
3703 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
3704 return 0;
3705}
3706
014db007 3707static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
4840e418
JA
3708{
3709 struct io_fadvise *fa = &req->fadvise;
3710 int ret;
3711
3e69426d
JA
3712 if (force_nonblock) {
3713 switch (fa->advice) {
3714 case POSIX_FADV_NORMAL:
3715 case POSIX_FADV_RANDOM:
3716 case POSIX_FADV_SEQUENTIAL:
3717 break;
3718 default:
3719 return -EAGAIN;
3720 }
3721 }
4840e418
JA
3722
3723 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
3724 if (ret < 0)
3725 req_set_fail_links(req);
e1e16097 3726 io_req_complete(req, ret);
4840e418
JA
3727 return 0;
3728}
3729
eddc7ef5
JA
3730static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3731{
3232dd02
PB
3732 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3733 return -EINVAL;
eddc7ef5
JA
3734 if (sqe->ioprio || sqe->buf_index)
3735 return -EINVAL;
9c280f90 3736 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 3737 return -EBADF;
eddc7ef5 3738
1d9e1288
BM
3739 req->statx.dfd = READ_ONCE(sqe->fd);
3740 req->statx.mask = READ_ONCE(sqe->len);
e62753e4 3741 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
3742 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3743 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5
JA
3744
3745 return 0;
3746}
3747
014db007 3748static int io_statx(struct io_kiocb *req, bool force_nonblock)
eddc7ef5 3749{
1d9e1288 3750 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
3751 int ret;
3752
5b0bbee4
JA
3753 if (force_nonblock) {
3754 /* only need file table for an actual valid fd */
3755 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
3756 req->flags |= REQ_F_NO_FILE_TABLE;
eddc7ef5 3757 return -EAGAIN;
5b0bbee4 3758 }
eddc7ef5 3759
e62753e4
BM
3760 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
3761 ctx->buffer);
eddc7ef5 3762
eddc7ef5
JA
3763 if (ret < 0)
3764 req_set_fail_links(req);
e1e16097 3765 io_req_complete(req, ret);
eddc7ef5
JA
3766 return 0;
3767}
3768
b5dba59e
JA
3769static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3770{
3771 /*
3772 * If we queue this for async, it must not be cancellable. That would
7cdaf587
XW
3773 * leave the 'file' in an undeterminate state, and here need to modify
3774 * io_wq_work.flags, so initialize io_wq_work firstly.
b5dba59e 3775 */
7cdaf587 3776 io_req_init_async(req);
b5dba59e
JA
3777 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
3778
3232dd02
PB
3779 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3780 return -EINVAL;
b5dba59e
JA
3781 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
3782 sqe->rw_flags || sqe->buf_index)
3783 return -EINVAL;
9c280f90 3784 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 3785 return -EBADF;
b5dba59e
JA
3786
3787 req->close.fd = READ_ONCE(sqe->fd);
fd2206e4
JA
3788 if ((req->file && req->file->f_op == &io_uring_fops) ||
3789 req->close.fd == req->ctx->ring_fd)
3790 return -EBADF;
b5dba59e 3791
3af73b28 3792 req->close.put_file = NULL;
b5dba59e 3793 return 0;
b5dba59e
JA
3794}
3795
229a7b63
JA
3796static int io_close(struct io_kiocb *req, bool force_nonblock,
3797 struct io_comp_state *cs)
b5dba59e 3798{
3af73b28 3799 struct io_close *close = &req->close;
b5dba59e
JA
3800 int ret;
3801
3af73b28
PB
3802 /* might be already done during nonblock submission */
3803 if (!close->put_file) {
3804 ret = __close_fd_get_file(close->fd, &close->put_file);
3805 if (ret < 0)
3806 return (ret == -ENOENT) ? -EBADF : ret;
3807 }
b5dba59e
JA
3808
3809 /* if the file has a flush method, be safe and punt to async */
3af73b28 3810 if (close->put_file->f_op->flush && force_nonblock) {
24c74678
PB
3811 /* was never set, but play safe */
3812 req->flags &= ~REQ_F_NOWAIT;
0bf0eefd 3813 /* avoid grabbing files - we don't need the files */
24c74678 3814 req->flags |= REQ_F_NO_FILE_TABLE;
0bf0eefd 3815 return -EAGAIN;
a2100672 3816 }
b5dba59e 3817
3af73b28
PB
3818 /* No ->flush() or already async, safely close from here */
3819 ret = filp_close(close->put_file, req->work.files);
3820 if (ret < 0)
3821 req_set_fail_links(req);
3af73b28
PB
3822 fput(close->put_file);
3823 close->put_file = NULL;
229a7b63 3824 __io_req_complete(req, ret, 0, cs);
1a417f4e 3825 return 0;
b5dba59e
JA
3826}
3827
3529d8c2 3828static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
3829{
3830 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4
JA
3831
3832 if (!req->file)
3833 return -EBADF;
5d17b4a4
JA
3834
3835 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3836 return -EINVAL;
3837 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3838 return -EINVAL;
3839
8ed8d3c3
JA
3840 req->sync.off = READ_ONCE(sqe->off);
3841 req->sync.len = READ_ONCE(sqe->len);
3842 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
3843 return 0;
3844}
3845
ac45abc0 3846static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3 3847{
8ed8d3c3
JA
3848 int ret;
3849
ac45abc0
PB
3850 /* sync_file_range always requires a blocking context */
3851 if (force_nonblock)
3852 return -EAGAIN;
3853
9adbd45d 3854 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
3855 req->sync.flags);
3856 if (ret < 0)
3857 req_set_fail_links(req);
e1e16097 3858 io_req_complete(req, ret);
5d17b4a4
JA
3859 return 0;
3860}
3861
469956e8 3862#if defined(CONFIG_NET)
02d27d89
PB
3863static int io_setup_async_msg(struct io_kiocb *req,
3864 struct io_async_msghdr *kmsg)
3865{
3866 if (req->io)
3867 return -EAGAIN;
3868 if (io_alloc_async_ctx(req)) {
3869 if (kmsg->iov != kmsg->fast_iov)
3870 kfree(kmsg->iov);
3871 return -ENOMEM;
3872 }
3873 req->flags |= REQ_F_NEED_CLEANUP;
3874 memcpy(&req->io->msg, kmsg, sizeof(*kmsg));
3875 return -EAGAIN;
3876}
3877
2ae523ed
PB
3878static int io_sendmsg_copy_hdr(struct io_kiocb *req,
3879 struct io_async_msghdr *iomsg)
3880{
3881 iomsg->iov = iomsg->fast_iov;
3882 iomsg->msg.msg_name = &iomsg->addr;
3883 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
3884 req->sr_msg.msg_flags, &iomsg->iov);
3885}
3886
3529d8c2 3887static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 3888{
e47293fd 3889 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 3890 struct io_async_ctx *io = req->io;
99bc4c38 3891 int ret;
03b1230c 3892
d2b6f48b
PB
3893 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3894 return -EINVAL;
3895
e47293fd 3896 sr->msg_flags = READ_ONCE(sqe->msg_flags);
270a5940 3897 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 3898 sr->len = READ_ONCE(sqe->len);
3529d8c2 3899
d8768362
JA
3900#ifdef CONFIG_COMPAT
3901 if (req->ctx->compat)
3902 sr->msg_flags |= MSG_CMSG_COMPAT;
3903#endif
3904
fddaface 3905 if (!io || req->opcode == IORING_OP_SEND)
3529d8c2 3906 return 0;
5f798bea
PB
3907 /* iovec is already imported */
3908 if (req->flags & REQ_F_NEED_CLEANUP)
3909 return 0;
3529d8c2 3910
2ae523ed 3911 ret = io_sendmsg_copy_hdr(req, &io->msg);
99bc4c38
PB
3912 if (!ret)
3913 req->flags |= REQ_F_NEED_CLEANUP;
3914 return ret;
03b1230c
JA
3915}
3916
229a7b63
JA
3917static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
3918 struct io_comp_state *cs)
aa1fa28f 3919{
6b754c8b 3920 struct io_async_msghdr iomsg, *kmsg;
0fa03c62 3921 struct socket *sock;
7a7cacba 3922 unsigned flags;
0fa03c62
JA
3923 int ret;
3924
0fa03c62 3925 sock = sock_from_file(req->file, &ret);
7a7cacba
PB
3926 if (unlikely(!sock))
3927 return ret;
0fa03c62 3928
7a7cacba
PB
3929 if (req->io) {
3930 kmsg = &req->io->msg;
3931 kmsg->msg.msg_name = &req->io->msg.addr;
3932 /* if iov is set, it's allocated already */
3933 if (!kmsg->iov)
3934 kmsg->iov = kmsg->fast_iov;
3935 kmsg->msg.msg_iter.iov = kmsg->iov;
3936 } else {
3937 ret = io_sendmsg_copy_hdr(req, &iomsg);
3938 if (ret)
3939 return ret;
3940 kmsg = &iomsg;
0fa03c62
JA
3941 }
3942
7a7cacba
PB
3943 flags = req->sr_msg.msg_flags;
3944 if (flags & MSG_DONTWAIT)
3945 req->flags |= REQ_F_NOWAIT;
3946 else if (force_nonblock)
3947 flags |= MSG_DONTWAIT;
3948
3949 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
3950 if (force_nonblock && ret == -EAGAIN)
3951 return io_setup_async_msg(req, kmsg);
3952 if (ret == -ERESTARTSYS)
3953 ret = -EINTR;
3954
6b754c8b 3955 if (kmsg->iov != kmsg->fast_iov)
0b416c3e 3956 kfree(kmsg->iov);
99bc4c38 3957 req->flags &= ~REQ_F_NEED_CLEANUP;
4e88d6e7
JA
3958 if (ret < 0)
3959 req_set_fail_links(req);
229a7b63 3960 __io_req_complete(req, ret, 0, cs);
5d17b4a4 3961 return 0;
03b1230c 3962}
aa1fa28f 3963
229a7b63
JA
3964static int io_send(struct io_kiocb *req, bool force_nonblock,
3965 struct io_comp_state *cs)
fddaface 3966{
7a7cacba
PB
3967 struct io_sr_msg *sr = &req->sr_msg;
3968 struct msghdr msg;
3969 struct iovec iov;
fddaface 3970 struct socket *sock;
7a7cacba 3971 unsigned flags;
fddaface
JA
3972 int ret;
3973
fddaface 3974 sock = sock_from_file(req->file, &ret);
7a7cacba
PB
3975 if (unlikely(!sock))
3976 return ret;
fddaface 3977
7a7cacba
PB
3978 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
3979 if (unlikely(ret))
14c32eee 3980 return ret;;
fddaface 3981
7a7cacba
PB
3982 msg.msg_name = NULL;
3983 msg.msg_control = NULL;
3984 msg.msg_controllen = 0;
3985 msg.msg_namelen = 0;
fddaface 3986
7a7cacba
PB
3987 flags = req->sr_msg.msg_flags;
3988 if (flags & MSG_DONTWAIT)
3989 req->flags |= REQ_F_NOWAIT;
3990 else if (force_nonblock)
3991 flags |= MSG_DONTWAIT;
3992
3993 msg.msg_flags = flags;
3994 ret = sock_sendmsg(sock, &msg);
3995 if (force_nonblock && ret == -EAGAIN)
3996 return -EAGAIN;
3997 if (ret == -ERESTARTSYS)
3998 ret = -EINTR;
fddaface 3999
fddaface
JA
4000 if (ret < 0)
4001 req_set_fail_links(req);
229a7b63 4002 __io_req_complete(req, ret, 0, cs);
fddaface 4003 return 0;
fddaface
JA
4004}
4005
1400e697
PB
4006static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4007 struct io_async_msghdr *iomsg)
52de1fe1
JA
4008{
4009 struct io_sr_msg *sr = &req->sr_msg;
4010 struct iovec __user *uiov;
4011 size_t iov_len;
4012 int ret;
4013
1400e697
PB
4014 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4015 &iomsg->uaddr, &uiov, &iov_len);
52de1fe1
JA
4016 if (ret)
4017 return ret;
4018
4019 if (req->flags & REQ_F_BUFFER_SELECT) {
4020 if (iov_len > 1)
4021 return -EINVAL;
1400e697 4022 if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov)))
52de1fe1 4023 return -EFAULT;
1400e697
PB
4024 sr->len = iomsg->iov[0].iov_len;
4025 iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1,
52de1fe1 4026 sr->len);
1400e697 4027 iomsg->iov = NULL;
52de1fe1
JA
4028 } else {
4029 ret = import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
1400e697 4030 &iomsg->iov, &iomsg->msg.msg_iter);
52de1fe1
JA
4031 if (ret > 0)
4032 ret = 0;
4033 }
4034
4035 return ret;
4036}
4037
4038#ifdef CONFIG_COMPAT
4039static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
1400e697 4040 struct io_async_msghdr *iomsg)
52de1fe1
JA
4041{
4042 struct compat_msghdr __user *msg_compat;
4043 struct io_sr_msg *sr = &req->sr_msg;
4044 struct compat_iovec __user *uiov;
4045 compat_uptr_t ptr;
4046 compat_size_t len;
4047 int ret;
4048
270a5940 4049 msg_compat = (struct compat_msghdr __user *) sr->umsg;
1400e697 4050 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
52de1fe1
JA
4051 &ptr, &len);
4052 if (ret)
4053 return ret;
4054
4055 uiov = compat_ptr(ptr);
4056 if (req->flags & REQ_F_BUFFER_SELECT) {
4057 compat_ssize_t clen;
4058
4059 if (len > 1)
4060 return -EINVAL;
4061 if (!access_ok(uiov, sizeof(*uiov)))
4062 return -EFAULT;
4063 if (__get_user(clen, &uiov->iov_len))
4064 return -EFAULT;
4065 if (clen < 0)
4066 return -EINVAL;
1400e697
PB
4067 sr->len = iomsg->iov[0].iov_len;
4068 iomsg->iov = NULL;
52de1fe1
JA
4069 } else {
4070 ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV,
1400e697
PB
4071 &iomsg->iov,
4072 &iomsg->msg.msg_iter);
52de1fe1
JA
4073 if (ret < 0)
4074 return ret;
4075 }
4076
4077 return 0;
4078}
4079#endif
4080
1400e697
PB
4081static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4082 struct io_async_msghdr *iomsg)
52de1fe1 4083{
1400e697
PB
4084 iomsg->msg.msg_name = &iomsg->addr;
4085 iomsg->iov = iomsg->fast_iov;
52de1fe1
JA
4086
4087#ifdef CONFIG_COMPAT
4088 if (req->ctx->compat)
1400e697 4089 return __io_compat_recvmsg_copy_hdr(req, iomsg);
fddaface 4090#endif
52de1fe1 4091
1400e697 4092 return __io_recvmsg_copy_hdr(req, iomsg);
52de1fe1
JA
4093}
4094
bcda7baa
JA
4095static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
4096 int *cflags, bool needs_lock)
4097{
4098 struct io_sr_msg *sr = &req->sr_msg;
4099 struct io_buffer *kbuf;
4100
bcda7baa
JA
4101 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4102 if (IS_ERR(kbuf))
4103 return kbuf;
4104
4105 sr->kbuf = kbuf;
4106 req->flags |= REQ_F_BUFFER_SELECTED;
4107
4108 *cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
4109 *cflags |= IORING_CQE_F_BUFFER;
4110 return kbuf;
fddaface
JA
4111}
4112
3529d8c2
JA
4113static int io_recvmsg_prep(struct io_kiocb *req,
4114 const struct io_uring_sqe *sqe)
aa1fa28f 4115{
e47293fd 4116 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 4117 struct io_async_ctx *io = req->io;
99bc4c38 4118 int ret;
3529d8c2 4119
d2b6f48b
PB
4120 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4121 return -EINVAL;
4122
3529d8c2 4123 sr->msg_flags = READ_ONCE(sqe->msg_flags);
270a5940 4124 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 4125 sr->len = READ_ONCE(sqe->len);
bcda7baa 4126 sr->bgid = READ_ONCE(sqe->buf_group);
06b76d44 4127
d8768362
JA
4128#ifdef CONFIG_COMPAT
4129 if (req->ctx->compat)
4130 sr->msg_flags |= MSG_CMSG_COMPAT;
4131#endif
4132
fddaface 4133 if (!io || req->opcode == IORING_OP_RECV)
06b76d44 4134 return 0;
5f798bea
PB
4135 /* iovec is already imported */
4136 if (req->flags & REQ_F_NEED_CLEANUP)
4137 return 0;
03b1230c 4138
1400e697 4139 ret = io_recvmsg_copy_hdr(req, &io->msg);
99bc4c38
PB
4140 if (!ret)
4141 req->flags |= REQ_F_NEED_CLEANUP;
4142 return ret;
aa1fa28f
JA
4143}
4144
229a7b63
JA
4145static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
4146 struct io_comp_state *cs)
aa1fa28f 4147{
6b754c8b 4148 struct io_async_msghdr iomsg, *kmsg;
03b1230c 4149 struct socket *sock;
bc02ef33 4150 struct io_buffer *kbuf = NULL;
7a7cacba 4151 unsigned flags;
52de1fe1 4152 int ret, cflags = 0;
03b1230c 4153
03b1230c 4154 sock = sock_from_file(req->file, &ret);
7a7cacba
PB
4155 if (unlikely(!sock))
4156 return ret;
52de1fe1 4157
7a7cacba
PB
4158 if (req->io) {
4159 kmsg = &req->io->msg;
4160 kmsg->msg.msg_name = &req->io->msg.addr;
4161 /* if iov is set, it's allocated already */
4162 if (!kmsg->iov)
4163 kmsg->iov = kmsg->fast_iov;
4164 kmsg->msg.msg_iter.iov = kmsg->iov;
4165 } else {
4166 ret = io_recvmsg_copy_hdr(req, &iomsg);
4167 if (ret)
681fda8d 4168 return ret;
7a7cacba
PB
4169 kmsg = &iomsg;
4170 }
4171
bc02ef33
PB
4172 if (req->flags & REQ_F_BUFFER_SELECT) {
4173 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
4174 if (IS_ERR(kbuf))
4175 return PTR_ERR(kbuf);
7a7cacba
PB
4176 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
4177 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
4178 1, req->sr_msg.len);
4179 }
4180
4181 flags = req->sr_msg.msg_flags;
4182 if (flags & MSG_DONTWAIT)
4183 req->flags |= REQ_F_NOWAIT;
4184 else if (force_nonblock)
4185 flags |= MSG_DONTWAIT;
4186
4187 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4188 kmsg->uaddr, flags);
0e1b6fe3
PB
4189 if (force_nonblock && ret == -EAGAIN)
4190 return io_setup_async_msg(req, kmsg);
7a7cacba
PB
4191 if (ret == -ERESTARTSYS)
4192 ret = -EINTR;
0e1b6fe3 4193
7a7cacba
PB
4194 if (kbuf)
4195 kfree(kbuf);
6b754c8b 4196 if (kmsg->iov != kmsg->fast_iov)
0b416c3e 4197 kfree(kmsg->iov);
0e1b6fe3 4198 req->flags &= ~(REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED);
7a7cacba 4199
4e88d6e7
JA
4200 if (ret < 0)
4201 req_set_fail_links(req);
229a7b63 4202 __io_req_complete(req, ret, cflags, cs);
03b1230c 4203 return 0;
0fa03c62 4204}
5d17b4a4 4205
229a7b63
JA
4206static int io_recv(struct io_kiocb *req, bool force_nonblock,
4207 struct io_comp_state *cs)
fddaface 4208{
6b754c8b 4209 struct io_buffer *kbuf;
7a7cacba
PB
4210 struct io_sr_msg *sr = &req->sr_msg;
4211 struct msghdr msg;
4212 void __user *buf = sr->buf;
fddaface 4213 struct socket *sock;
7a7cacba
PB
4214 struct iovec iov;
4215 unsigned flags;
bcda7baa 4216 int ret, cflags = 0;
fddaface 4217
fddaface 4218 sock = sock_from_file(req->file, &ret);
7a7cacba
PB
4219 if (unlikely(!sock))
4220 return ret;
fddaface 4221
bc02ef33
PB
4222 if (req->flags & REQ_F_BUFFER_SELECT) {
4223 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
4224 if (IS_ERR(kbuf))
4225 return PTR_ERR(kbuf);
7a7cacba 4226 buf = u64_to_user_ptr(kbuf->addr);
bc02ef33 4227 }
7a7cacba
PB
4228
4229 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
14c32eee
PB
4230 if (unlikely(ret))
4231 goto out_free;
fddaface 4232
7a7cacba
PB
4233 msg.msg_name = NULL;
4234 msg.msg_control = NULL;
4235 msg.msg_controllen = 0;
4236 msg.msg_namelen = 0;
4237 msg.msg_iocb = NULL;
4238 msg.msg_flags = 0;
4239
4240 flags = req->sr_msg.msg_flags;
4241 if (flags & MSG_DONTWAIT)
4242 req->flags |= REQ_F_NOWAIT;
4243 else if (force_nonblock)
4244 flags |= MSG_DONTWAIT;
4245
4246 ret = sock_recvmsg(sock, &msg, flags);
4247 if (force_nonblock && ret == -EAGAIN)
4248 return -EAGAIN;
4249 if (ret == -ERESTARTSYS)
4250 ret = -EINTR;
14c32eee 4251out_free:
0e1b6fe3
PB
4252 if (kbuf)
4253 kfree(kbuf);
bcda7baa 4254 req->flags &= ~REQ_F_NEED_CLEANUP;
fddaface
JA
4255 if (ret < 0)
4256 req_set_fail_links(req);
229a7b63 4257 __io_req_complete(req, ret, cflags, cs);
fddaface 4258 return 0;
fddaface
JA
4259}
4260
3529d8c2 4261static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 4262{
8ed8d3c3
JA
4263 struct io_accept *accept = &req->accept;
4264
17f2fe35
JA
4265 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4266 return -EINVAL;
8042d6ce 4267 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
4268 return -EINVAL;
4269
d55e5f5b
JA
4270 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4271 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 4272 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 4273 accept->nofile = rlimit(RLIMIT_NOFILE);
8ed8d3c3 4274 return 0;
8ed8d3c3 4275}
17f2fe35 4276
229a7b63
JA
4277static int io_accept(struct io_kiocb *req, bool force_nonblock,
4278 struct io_comp_state *cs)
8ed8d3c3
JA
4279{
4280 struct io_accept *accept = &req->accept;
ac45abc0 4281 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
8ed8d3c3
JA
4282 int ret;
4283
e697deed
JX
4284 if (req->file->f_flags & O_NONBLOCK)
4285 req->flags |= REQ_F_NOWAIT;
4286
8ed8d3c3 4287 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
09952e3e
JA
4288 accept->addr_len, accept->flags,
4289 accept->nofile);
8ed8d3c3 4290 if (ret == -EAGAIN && force_nonblock)
17f2fe35 4291 return -EAGAIN;
ac45abc0
PB
4292 if (ret < 0) {
4293 if (ret == -ERESTARTSYS)
4294 ret = -EINTR;
4e88d6e7 4295 req_set_fail_links(req);
ac45abc0 4296 }
229a7b63 4297 __io_req_complete(req, ret, 0, cs);
17f2fe35 4298 return 0;
8ed8d3c3
JA
4299}
4300
3529d8c2 4301static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 4302{
3529d8c2
JA
4303 struct io_connect *conn = &req->connect;
4304 struct io_async_ctx *io = req->io;
f499a021 4305
3fbb51c1
JA
4306 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4307 return -EINVAL;
4308 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4309 return -EINVAL;
4310
3529d8c2
JA
4311 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4312 conn->addr_len = READ_ONCE(sqe->addr2);
4313
4314 if (!io)
4315 return 0;
4316
4317 return move_addr_to_kernel(conn->addr, conn->addr_len,
3fbb51c1 4318 &io->connect.address);
f499a021
JA
4319}
4320
229a7b63
JA
4321static int io_connect(struct io_kiocb *req, bool force_nonblock,
4322 struct io_comp_state *cs)
f8e85cf2 4323{
f499a021 4324 struct io_async_ctx __io, *io;
f8e85cf2 4325 unsigned file_flags;
3fbb51c1 4326 int ret;
f8e85cf2 4327
f499a021
JA
4328 if (req->io) {
4329 io = req->io;
4330 } else {
3529d8c2
JA
4331 ret = move_addr_to_kernel(req->connect.addr,
4332 req->connect.addr_len,
4333 &__io.connect.address);
f499a021
JA
4334 if (ret)
4335 goto out;
4336 io = &__io;
4337 }
4338
3fbb51c1
JA
4339 file_flags = force_nonblock ? O_NONBLOCK : 0;
4340
4341 ret = __sys_connect_file(req->file, &io->connect.address,
4342 req->connect.addr_len, file_flags);
87f80d62 4343 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
b7bb4f7d
JA
4344 if (req->io)
4345 return -EAGAIN;
4346 if (io_alloc_async_ctx(req)) {
f499a021
JA
4347 ret = -ENOMEM;
4348 goto out;
4349 }
b7bb4f7d 4350 memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
f8e85cf2 4351 return -EAGAIN;
f499a021 4352 }
f8e85cf2
JA
4353 if (ret == -ERESTARTSYS)
4354 ret = -EINTR;
f499a021 4355out:
4e88d6e7
JA
4356 if (ret < 0)
4357 req_set_fail_links(req);
229a7b63 4358 __io_req_complete(req, ret, 0, cs);
f8e85cf2 4359 return 0;
469956e8
Y
4360}
4361#else /* !CONFIG_NET */
4362static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4363{
f8e85cf2 4364 return -EOPNOTSUPP;
f8e85cf2
JA
4365}
4366
1e16c2f9
RD
4367static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
4368 struct io_comp_state *cs)
469956e8
Y
4369{
4370 return -EOPNOTSUPP;
4371}
4372
1e16c2f9
RD
4373static int io_send(struct io_kiocb *req, bool force_nonblock,
4374 struct io_comp_state *cs)
469956e8
Y
4375{
4376 return -EOPNOTSUPP;
4377}
4378
4379static int io_recvmsg_prep(struct io_kiocb *req,
4380 const struct io_uring_sqe *sqe)
4381{
4382 return -EOPNOTSUPP;
4383}
4384
1e16c2f9
RD
4385static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
4386 struct io_comp_state *cs)
469956e8
Y
4387{
4388 return -EOPNOTSUPP;
4389}
4390
1e16c2f9
RD
4391static int io_recv(struct io_kiocb *req, bool force_nonblock,
4392 struct io_comp_state *cs)
469956e8
Y
4393{
4394 return -EOPNOTSUPP;
4395}
4396
4397static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4398{
4399 return -EOPNOTSUPP;
4400}
4401
1e16c2f9
RD
4402static int io_accept(struct io_kiocb *req, bool force_nonblock,
4403 struct io_comp_state *cs)
469956e8
Y
4404{
4405 return -EOPNOTSUPP;
4406}
4407
4408static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4409{
4410 return -EOPNOTSUPP;
4411}
4412
1e16c2f9
RD
4413static int io_connect(struct io_kiocb *req, bool force_nonblock,
4414 struct io_comp_state *cs)
469956e8 4415{
f8e85cf2 4416 return -EOPNOTSUPP;
f8e85cf2 4417}
469956e8 4418#endif /* CONFIG_NET */
f8e85cf2 4419
d7718a9d
JA
4420struct io_poll_table {
4421 struct poll_table_struct pt;
4422 struct io_kiocb *req;
4423 int error;
4424};
4425
d7718a9d
JA
4426static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4427 __poll_t mask, task_work_func_t func)
4428{
aa96bf8a 4429 int ret;
d7718a9d
JA
4430
4431 /* for instances that support it check for an event match first: */
4432 if (mask && !(mask & poll->events))
4433 return 0;
4434
4435 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4436
4437 list_del_init(&poll->wait.entry);
4438
d7718a9d
JA
4439 req->result = mask;
4440 init_task_work(&req->task_work, func);
4441 /*
e3aabf95
JA
4442 * If this fails, then the task is exiting. When a task exits, the
4443 * work gets canceled, so just cancel this request as well instead
4444 * of executing it. We can't safely execute it anyway, as we may not
4445 * have the needed state needed for it anyway.
d7718a9d 4446 */
b7db41c9 4447 ret = io_req_task_work_add(req, &req->task_work);
aa96bf8a 4448 if (unlikely(ret)) {
c2c4c83c
JA
4449 struct task_struct *tsk;
4450
e3aabf95 4451 WRITE_ONCE(poll->canceled, true);
aa96bf8a 4452 tsk = io_wq_get_task(req->ctx->io_wq);
ce593a6c
JA
4453 task_work_add(tsk, &req->task_work, 0);
4454 wake_up_process(tsk);
aa96bf8a 4455 }
d7718a9d
JA
4456 return 1;
4457}
4458
74ce6ce4
JA
4459static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4460 __acquires(&req->ctx->completion_lock)
4461{
4462 struct io_ring_ctx *ctx = req->ctx;
4463
4464 if (!req->result && !READ_ONCE(poll->canceled)) {
4465 struct poll_table_struct pt = { ._key = poll->events };
4466
4467 req->result = vfs_poll(req->file, &pt) & poll->events;
4468 }
4469
4470 spin_lock_irq(&ctx->completion_lock);
4471 if (!req->result && !READ_ONCE(poll->canceled)) {
4472 add_wait_queue(poll->head, &poll->wait);
4473 return true;
4474 }
4475
4476 return false;
4477}
4478
807abcb0 4479static void io_poll_remove_double(struct io_kiocb *req, void *data)
18bceab1 4480{
807abcb0 4481 struct io_poll_iocb *poll = data;
18bceab1
JA
4482
4483 lockdep_assert_held(&req->ctx->completion_lock);
4484
4485 if (poll && poll->head) {
4486 struct wait_queue_head *head = poll->head;
4487
4488 spin_lock(&head->lock);
4489 list_del_init(&poll->wait.entry);
4490 if (poll->wait.private)
4491 refcount_dec(&req->refs);
4492 poll->head = NULL;
4493 spin_unlock(&head->lock);
4494 }
4495}
4496
4497static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4498{
4499 struct io_ring_ctx *ctx = req->ctx;
4500
807abcb0 4501 io_poll_remove_double(req, req->io);
18bceab1
JA
4502 req->poll.done = true;
4503 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4504 io_commit_cqring(ctx);
4505}
4506
4507static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
4508{
4509 struct io_ring_ctx *ctx = req->ctx;
4510
4511 if (io_poll_rewait(req, &req->poll)) {
4512 spin_unlock_irq(&ctx->completion_lock);
4513 return;
4514 }
4515
4516 hash_del(&req->hash_node);
4517 io_poll_complete(req, req->result, 0);
4518 req->flags |= REQ_F_COMP_LOCKED;
9b5f7bd9 4519 *nxt = io_put_req_find_next(req);
18bceab1
JA
4520 spin_unlock_irq(&ctx->completion_lock);
4521
4522 io_cqring_ev_posted(ctx);
4523}
4524
4525static void io_poll_task_func(struct callback_head *cb)
4526{
4527 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4528 struct io_kiocb *nxt = NULL;
4529
4530 io_poll_task_handler(req, &nxt);
ea1164e5
PB
4531 if (nxt)
4532 __io_req_task_submit(nxt);
18bceab1
JA
4533}
4534
4535static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4536 int sync, void *key)
4537{
4538 struct io_kiocb *req = wait->private;
807abcb0 4539 struct io_poll_iocb *poll = req->apoll->double_poll;
18bceab1
JA
4540 __poll_t mask = key_to_poll(key);
4541
4542 /* for instances that support it check for an event match first: */
4543 if (mask && !(mask & poll->events))
4544 return 0;
4545
807abcb0 4546 if (poll && poll->head) {
18bceab1
JA
4547 bool done;
4548
807abcb0
JA
4549 spin_lock(&poll->head->lock);
4550 done = list_empty(&poll->wait.entry);
18bceab1 4551 if (!done)
807abcb0
JA
4552 list_del_init(&poll->wait.entry);
4553 spin_unlock(&poll->head->lock);
18bceab1
JA
4554 if (!done)
4555 __io_async_wake(req, poll, mask, io_poll_task_func);
4556 }
4557 refcount_dec(&req->refs);
4558 return 1;
4559}
4560
4561static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4562 wait_queue_func_t wake_func)
4563{
4564 poll->head = NULL;
4565 poll->done = false;
4566 poll->canceled = false;
4567 poll->events = events;
4568 INIT_LIST_HEAD(&poll->wait.entry);
4569 init_waitqueue_func_entry(&poll->wait, wake_func);
4570}
4571
4572static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
807abcb0
JA
4573 struct wait_queue_head *head,
4574 struct io_poll_iocb **poll_ptr)
18bceab1
JA
4575{
4576 struct io_kiocb *req = pt->req;
4577
4578 /*
4579 * If poll->head is already set, it's because the file being polled
4580 * uses multiple waitqueues for poll handling (eg one for read, one
4581 * for write). Setup a separate io_poll_iocb if this happens.
4582 */
4583 if (unlikely(poll->head)) {
4584 /* already have a 2nd entry, fail a third attempt */
807abcb0 4585 if (*poll_ptr) {
18bceab1
JA
4586 pt->error = -EINVAL;
4587 return;
4588 }
4589 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
4590 if (!poll) {
4591 pt->error = -ENOMEM;
4592 return;
4593 }
4594 io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
4595 refcount_inc(&req->refs);
4596 poll->wait.private = req;
807abcb0 4597 *poll_ptr = poll;
18bceab1
JA
4598 }
4599
4600 pt->error = 0;
4601 poll->head = head;
a31eb4a2
JX
4602
4603 if (poll->events & EPOLLEXCLUSIVE)
4604 add_wait_queue_exclusive(head, &poll->wait);
4605 else
4606 add_wait_queue(head, &poll->wait);
18bceab1
JA
4607}
4608
4609static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
4610 struct poll_table_struct *p)
4611{
4612 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
807abcb0 4613 struct async_poll *apoll = pt->req->apoll;
18bceab1 4614
807abcb0 4615 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
18bceab1
JA
4616}
4617
d7718a9d
JA
4618static void io_async_task_func(struct callback_head *cb)
4619{
4620 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4621 struct async_poll *apoll = req->apoll;
4622 struct io_ring_ctx *ctx = req->ctx;
4623
4624 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
4625
74ce6ce4 4626 if (io_poll_rewait(req, &apoll->poll)) {
d7718a9d 4627 spin_unlock_irq(&ctx->completion_lock);
74ce6ce4 4628 return;
d7718a9d
JA
4629 }
4630
31067255 4631 /* If req is still hashed, it cannot have been canceled. Don't check. */
0be0b0e3 4632 if (hash_hashed(&req->hash_node))
74ce6ce4 4633 hash_del(&req->hash_node);
2bae047e 4634
807abcb0 4635 io_poll_remove_double(req, apoll->double_poll);
74ce6ce4
JA
4636 spin_unlock_irq(&ctx->completion_lock);
4637
44575a67 4638 /* restore ->work in case we need to retry again */
405a5d2b
XW
4639 if (req->flags & REQ_F_WORK_INITIALIZED)
4640 memcpy(&req->work, &apoll->work, sizeof(req->work));
44575a67 4641
0be0b0e3
PB
4642 if (!READ_ONCE(apoll->poll.canceled))
4643 __io_req_task_submit(req);
4644 else
4645 __io_req_task_cancel(req, -ECANCELED);
aa340845 4646
807abcb0 4647 kfree(apoll->double_poll);
aa340845 4648 kfree(apoll);
d7718a9d
JA
4649}
4650
4651static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4652 void *key)
4653{
4654 struct io_kiocb *req = wait->private;
4655 struct io_poll_iocb *poll = &req->apoll->poll;
4656
4657 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
4658 key_to_poll(key));
4659
4660 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
4661}
4662
4663static void io_poll_req_insert(struct io_kiocb *req)
4664{
4665 struct io_ring_ctx *ctx = req->ctx;
4666 struct hlist_head *list;
4667
4668 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
4669 hlist_add_head(&req->hash_node, list);
4670}
4671
4672static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
4673 struct io_poll_iocb *poll,
4674 struct io_poll_table *ipt, __poll_t mask,
4675 wait_queue_func_t wake_func)
4676 __acquires(&ctx->completion_lock)
4677{
4678 struct io_ring_ctx *ctx = req->ctx;
4679 bool cancel = false;
4680
18bceab1 4681 io_init_poll_iocb(poll, mask, wake_func);
b90cd197 4682 poll->file = req->file;
18bceab1 4683 poll->wait.private = req;
d7718a9d
JA
4684
4685 ipt->pt._key = mask;
4686 ipt->req = req;
4687 ipt->error = -EINVAL;
4688
d7718a9d
JA
4689 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
4690
4691 spin_lock_irq(&ctx->completion_lock);
4692 if (likely(poll->head)) {
4693 spin_lock(&poll->head->lock);
4694 if (unlikely(list_empty(&poll->wait.entry))) {
4695 if (ipt->error)
4696 cancel = true;
4697 ipt->error = 0;
4698 mask = 0;
4699 }
4700 if (mask || ipt->error)
4701 list_del_init(&poll->wait.entry);
4702 else if (cancel)
4703 WRITE_ONCE(poll->canceled, true);
4704 else if (!poll->done) /* actually waiting for an event */
4705 io_poll_req_insert(req);
4706 spin_unlock(&poll->head->lock);
4707 }
4708
4709 return mask;
4710}
4711
4712static bool io_arm_poll_handler(struct io_kiocb *req)
4713{
4714 const struct io_op_def *def = &io_op_defs[req->opcode];
4715 struct io_ring_ctx *ctx = req->ctx;
4716 struct async_poll *apoll;
4717 struct io_poll_table ipt;
4718 __poll_t mask, ret;
4719
4720 if (!req->file || !file_can_poll(req->file))
4721 return false;
24c74678 4722 if (req->flags & REQ_F_POLLED)
d7718a9d
JA
4723 return false;
4724 if (!def->pollin && !def->pollout)
4725 return false;
4726
4727 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
4728 if (unlikely(!apoll))
4729 return false;
807abcb0 4730 apoll->double_poll = NULL;
d7718a9d
JA
4731
4732 req->flags |= REQ_F_POLLED;
405a5d2b
XW
4733 if (req->flags & REQ_F_WORK_INITIALIZED)
4734 memcpy(&apoll->work, &req->work, sizeof(req->work));
d7718a9d 4735
4dd2824d 4736 io_get_req_task(req);
d7718a9d
JA
4737 req->apoll = apoll;
4738 INIT_HLIST_NODE(&req->hash_node);
4739
8755d97a 4740 mask = 0;
d7718a9d 4741 if (def->pollin)
8755d97a 4742 mask |= POLLIN | POLLRDNORM;
d7718a9d
JA
4743 if (def->pollout)
4744 mask |= POLLOUT | POLLWRNORM;
4745 mask |= POLLERR | POLLPRI;
4746
4747 ipt.pt._qproc = io_async_queue_proc;
4748
4749 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
4750 io_async_wake);
4751 if (ret) {
807abcb0 4752 io_poll_remove_double(req, apoll->double_poll);
d7718a9d 4753 spin_unlock_irq(&ctx->completion_lock);
405a5d2b
XW
4754 if (req->flags & REQ_F_WORK_INITIALIZED)
4755 memcpy(&req->work, &apoll->work, sizeof(req->work));
807abcb0 4756 kfree(apoll->double_poll);
d7718a9d
JA
4757 kfree(apoll);
4758 return false;
4759 }
4760 spin_unlock_irq(&ctx->completion_lock);
4761 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
4762 apoll->poll.events);
4763 return true;
4764}
4765
4766static bool __io_poll_remove_one(struct io_kiocb *req,
4767 struct io_poll_iocb *poll)
221c5eb2 4768{
b41e9852 4769 bool do_complete = false;
221c5eb2
JA
4770
4771 spin_lock(&poll->head->lock);
4772 WRITE_ONCE(poll->canceled, true);
392edb45
JA
4773 if (!list_empty(&poll->wait.entry)) {
4774 list_del_init(&poll->wait.entry);
b41e9852 4775 do_complete = true;
221c5eb2
JA
4776 }
4777 spin_unlock(&poll->head->lock);
3bfa5bcb 4778 hash_del(&req->hash_node);
d7718a9d
JA
4779 return do_complete;
4780}
4781
4782static bool io_poll_remove_one(struct io_kiocb *req)
4783{
4784 bool do_complete;
4785
4786 if (req->opcode == IORING_OP_POLL_ADD) {
807abcb0 4787 io_poll_remove_double(req, req->io);
d7718a9d
JA
4788 do_complete = __io_poll_remove_one(req, &req->poll);
4789 } else {
3bfa5bcb
JA
4790 struct async_poll *apoll = req->apoll;
4791
807abcb0
JA
4792 io_poll_remove_double(req, apoll->double_poll);
4793
d7718a9d 4794 /* non-poll requests have submit ref still */
3bfa5bcb
JA
4795 do_complete = __io_poll_remove_one(req, &apoll->poll);
4796 if (do_complete) {
d7718a9d 4797 io_put_req(req);
3bfa5bcb
JA
4798 /*
4799 * restore ->work because we will call
dca9cf8b 4800 * io_req_clean_work below when dropping the
3bfa5bcb
JA
4801 * final reference.
4802 */
405a5d2b
XW
4803 if (req->flags & REQ_F_WORK_INITIALIZED)
4804 memcpy(&req->work, &apoll->work,
4805 sizeof(req->work));
807abcb0 4806 kfree(apoll->double_poll);
3bfa5bcb
JA
4807 kfree(apoll);
4808 }
b1f573bd
XW
4809 }
4810
b41e9852
JA
4811 if (do_complete) {
4812 io_cqring_fill_event(req, -ECANCELED);
4813 io_commit_cqring(req->ctx);
4814 req->flags |= REQ_F_COMP_LOCKED;
4815 io_put_req(req);
4816 }
4817
4818 return do_complete;
221c5eb2
JA
4819}
4820
4821static void io_poll_remove_all(struct io_ring_ctx *ctx)
4822{
78076bb6 4823 struct hlist_node *tmp;
221c5eb2 4824 struct io_kiocb *req;
8e2e1faf 4825 int posted = 0, i;
221c5eb2
JA
4826
4827 spin_lock_irq(&ctx->completion_lock);
78076bb6
JA
4828 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
4829 struct hlist_head *list;
4830
4831 list = &ctx->cancel_hash[i];
4832 hlist_for_each_entry_safe(req, tmp, list, hash_node)
8e2e1faf 4833 posted += io_poll_remove_one(req);
221c5eb2
JA
4834 }
4835 spin_unlock_irq(&ctx->completion_lock);
b41e9852 4836
8e2e1faf
JA
4837 if (posted)
4838 io_cqring_ev_posted(ctx);
221c5eb2
JA
4839}
4840
47f46768
JA
4841static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
4842{
78076bb6 4843 struct hlist_head *list;
47f46768
JA
4844 struct io_kiocb *req;
4845
78076bb6
JA
4846 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
4847 hlist_for_each_entry(req, list, hash_node) {
b41e9852
JA
4848 if (sqe_addr != req->user_data)
4849 continue;
4850 if (io_poll_remove_one(req))
eac406c6 4851 return 0;
b41e9852 4852 return -EALREADY;
47f46768
JA
4853 }
4854
4855 return -ENOENT;
4856}
4857
3529d8c2
JA
4858static int io_poll_remove_prep(struct io_kiocb *req,
4859 const struct io_uring_sqe *sqe)
0969e783 4860{
0969e783
JA
4861 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4862 return -EINVAL;
4863 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
4864 sqe->poll_events)
4865 return -EINVAL;
4866
4867 req->poll.addr = READ_ONCE(sqe->addr);
0969e783
JA
4868 return 0;
4869}
4870
221c5eb2
JA
4871/*
4872 * Find a running poll command that matches one specified in sqe->addr,
4873 * and remove it if found.
4874 */
fc4df999 4875static int io_poll_remove(struct io_kiocb *req)
221c5eb2
JA
4876{
4877 struct io_ring_ctx *ctx = req->ctx;
0969e783 4878 u64 addr;
47f46768 4879 int ret;
221c5eb2 4880
0969e783 4881 addr = req->poll.addr;
221c5eb2 4882 spin_lock_irq(&ctx->completion_lock);
0969e783 4883 ret = io_poll_cancel(ctx, addr);
221c5eb2
JA
4884 spin_unlock_irq(&ctx->completion_lock);
4885
4e88d6e7
JA
4886 if (ret < 0)
4887 req_set_fail_links(req);
e1e16097 4888 io_req_complete(req, ret);
221c5eb2
JA
4889 return 0;
4890}
4891
221c5eb2
JA
4892static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4893 void *key)
4894{
c2f2eb7d
JA
4895 struct io_kiocb *req = wait->private;
4896 struct io_poll_iocb *poll = &req->poll;
221c5eb2 4897
d7718a9d 4898 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
221c5eb2
JA
4899}
4900
221c5eb2
JA
4901static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
4902 struct poll_table_struct *p)
4903{
4904 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4905
807abcb0 4906 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->io);
eac406c6
JA
4907}
4908
3529d8c2 4909static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
4910{
4911 struct io_poll_iocb *poll = &req->poll;
5769a351 4912 u32 events;
221c5eb2
JA
4913
4914 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4915 return -EINVAL;
4916 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
4917 return -EINVAL;
09bb8394
JA
4918 if (!poll->file)
4919 return -EBADF;
221c5eb2 4920
5769a351
JX
4921 events = READ_ONCE(sqe->poll32_events);
4922#ifdef __BIG_ENDIAN
4923 events = swahw32(events);
4924#endif
a31eb4a2
JX
4925 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
4926 (events & EPOLLEXCLUSIVE);
b41e9852 4927
4dd2824d 4928 io_get_req_task(req);
0969e783
JA
4929 return 0;
4930}
4931
014db007 4932static int io_poll_add(struct io_kiocb *req)
0969e783
JA
4933{
4934 struct io_poll_iocb *poll = &req->poll;
4935 struct io_ring_ctx *ctx = req->ctx;
4936 struct io_poll_table ipt;
0969e783 4937 __poll_t mask;
0969e783 4938
d5e16d8e 4939 /* ->work is in union with hash_node and others */
dca9cf8b 4940 io_req_clean_work(req);
d5e16d8e
PB
4941 req->flags &= ~REQ_F_WORK_INITIALIZED;
4942
78076bb6 4943 INIT_HLIST_NODE(&req->hash_node);
d7718a9d 4944 ipt.pt._qproc = io_poll_queue_proc;
36703247 4945
d7718a9d
JA
4946 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
4947 io_poll_wake);
221c5eb2 4948
8c838788 4949 if (mask) { /* no async, we'd stolen it */
221c5eb2 4950 ipt.error = 0;
b0dd8a41 4951 io_poll_complete(req, mask, 0);
221c5eb2 4952 }
221c5eb2
JA
4953 spin_unlock_irq(&ctx->completion_lock);
4954
8c838788
JA
4955 if (mask) {
4956 io_cqring_ev_posted(ctx);
014db007 4957 io_put_req(req);
221c5eb2 4958 }
8c838788 4959 return ipt.error;
221c5eb2
JA
4960}
4961
5262f567
JA
4962static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
4963{
ad8a48ac
JA
4964 struct io_timeout_data *data = container_of(timer,
4965 struct io_timeout_data, timer);
4966 struct io_kiocb *req = data->req;
4967 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
4968 unsigned long flags;
4969
5262f567
JA
4970 atomic_inc(&ctx->cq_timeouts);
4971
4972 spin_lock_irqsave(&ctx->completion_lock, flags);
ef03681a 4973 /*
11365043
JA
4974 * We could be racing with timeout deletion. If the list is empty,
4975 * then timeout lookup already found it and will be handling it.
ef03681a 4976 */
135fcde8
PB
4977 if (!list_empty(&req->timeout.list))
4978 list_del_init(&req->timeout.list);
5262f567 4979
78e19bbe 4980 io_cqring_fill_event(req, -ETIME);
5262f567
JA
4981 io_commit_cqring(ctx);
4982 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4983
4984 io_cqring_ev_posted(ctx);
4e88d6e7 4985 req_set_fail_links(req);
5262f567
JA
4986 io_put_req(req);
4987 return HRTIMER_NORESTART;
4988}
4989
47f46768
JA
4990static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
4991{
4992 struct io_kiocb *req;
4993 int ret = -ENOENT;
4994
135fcde8 4995 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
47f46768 4996 if (user_data == req->user_data) {
135fcde8 4997 list_del_init(&req->timeout.list);
47f46768
JA
4998 ret = 0;
4999 break;
5000 }
5001 }
5002
5003 if (ret == -ENOENT)
5004 return ret;
5005
2d28390a 5006 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
47f46768
JA
5007 if (ret == -1)
5008 return -EALREADY;
5009
4e88d6e7 5010 req_set_fail_links(req);
47f46768
JA
5011 io_cqring_fill_event(req, -ECANCELED);
5012 io_put_req(req);
5013 return 0;
5014}
5015
3529d8c2
JA
5016static int io_timeout_remove_prep(struct io_kiocb *req,
5017 const struct io_uring_sqe *sqe)
b29472ee 5018{
b29472ee
JA
5019 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5020 return -EINVAL;
61710e43
DA
5021 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5022 return -EINVAL;
5023 if (sqe->ioprio || sqe->buf_index || sqe->len)
b29472ee
JA
5024 return -EINVAL;
5025
5026 req->timeout.addr = READ_ONCE(sqe->addr);
5027 req->timeout.flags = READ_ONCE(sqe->timeout_flags);
5028 if (req->timeout.flags)
5029 return -EINVAL;
5030
b29472ee
JA
5031 return 0;
5032}
5033
11365043
JA
5034/*
5035 * Remove or update an existing timeout command
5036 */
fc4df999 5037static int io_timeout_remove(struct io_kiocb *req)
11365043
JA
5038{
5039 struct io_ring_ctx *ctx = req->ctx;
47f46768 5040 int ret;
11365043 5041
11365043 5042 spin_lock_irq(&ctx->completion_lock);
b29472ee 5043 ret = io_timeout_cancel(ctx, req->timeout.addr);
11365043 5044
47f46768 5045 io_cqring_fill_event(req, ret);
11365043
JA
5046 io_commit_cqring(ctx);
5047 spin_unlock_irq(&ctx->completion_lock);
5262f567 5048 io_cqring_ev_posted(ctx);
4e88d6e7
JA
5049 if (ret < 0)
5050 req_set_fail_links(req);
ec9c02ad 5051 io_put_req(req);
11365043 5052 return 0;
5262f567
JA
5053}
5054
3529d8c2 5055static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 5056 bool is_timeout_link)
5262f567 5057{
ad8a48ac 5058 struct io_timeout_data *data;
a41525ab 5059 unsigned flags;
56080b02 5060 u32 off = READ_ONCE(sqe->off);
5262f567 5061
ad8a48ac 5062 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 5063 return -EINVAL;
ad8a48ac 5064 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
a41525ab 5065 return -EINVAL;
56080b02 5066 if (off && is_timeout_link)
2d28390a 5067 return -EINVAL;
a41525ab
JA
5068 flags = READ_ONCE(sqe->timeout_flags);
5069 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 5070 return -EINVAL;
bdf20073 5071
bfe68a22 5072 req->timeout.off = off;
26a61679 5073
3529d8c2 5074 if (!req->io && io_alloc_async_ctx(req))
26a61679
JA
5075 return -ENOMEM;
5076
5077 data = &req->io->timeout;
ad8a48ac 5078 data->req = req;
ad8a48ac
JA
5079
5080 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
5081 return -EFAULT;
5082
11365043 5083 if (flags & IORING_TIMEOUT_ABS)
ad8a48ac 5084 data->mode = HRTIMER_MODE_ABS;
11365043 5085 else
ad8a48ac 5086 data->mode = HRTIMER_MODE_REL;
11365043 5087
ad8a48ac
JA
5088 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5089 return 0;
5090}
5091
fc4df999 5092static int io_timeout(struct io_kiocb *req)
ad8a48ac 5093{
ad8a48ac 5094 struct io_ring_ctx *ctx = req->ctx;
bfe68a22 5095 struct io_timeout_data *data = &req->io->timeout;
ad8a48ac 5096 struct list_head *entry;
bfe68a22 5097 u32 tail, off = req->timeout.off;
ad8a48ac 5098
733f5c95 5099 spin_lock_irq(&ctx->completion_lock);
93bd25bb 5100
5262f567
JA
5101 /*
5102 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
5103 * timeout event to be satisfied. If it isn't set, then this is
5104 * a pure timeout request, sequence isn't used.
5262f567 5105 */
8eb7e2d0 5106 if (io_is_timeout_noseq(req)) {
93bd25bb
JA
5107 entry = ctx->timeout_list.prev;
5108 goto add;
5109 }
5262f567 5110
bfe68a22
PB
5111 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5112 req->timeout.target_seq = tail + off;
5262f567
JA
5113
5114 /*
5115 * Insertion sort, ensuring the first entry in the list is always
5116 * the one we need first.
5117 */
5262f567 5118 list_for_each_prev(entry, &ctx->timeout_list) {
135fcde8
PB
5119 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5120 timeout.list);
5262f567 5121
8eb7e2d0 5122 if (io_is_timeout_noseq(nxt))
93bd25bb 5123 continue;
bfe68a22
PB
5124 /* nxt.seq is behind @tail, otherwise would've been completed */
5125 if (off >= nxt->timeout.target_seq - tail)
5262f567
JA
5126 break;
5127 }
93bd25bb 5128add:
135fcde8 5129 list_add(&req->timeout.list, entry);
ad8a48ac
JA
5130 data->timer.function = io_timeout_fn;
5131 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5262f567 5132 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
5133 return 0;
5134}
5262f567 5135
62755e35
JA
5136static bool io_cancel_cb(struct io_wq_work *work, void *data)
5137{
5138 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5139
5140 return req->user_data == (unsigned long) data;
5141}
5142
e977d6d3 5143static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
62755e35 5144{
62755e35 5145 enum io_wq_cancel cancel_ret;
62755e35
JA
5146 int ret = 0;
5147
4f26bda1 5148 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
62755e35
JA
5149 switch (cancel_ret) {
5150 case IO_WQ_CANCEL_OK:
5151 ret = 0;
5152 break;
5153 case IO_WQ_CANCEL_RUNNING:
5154 ret = -EALREADY;
5155 break;
5156 case IO_WQ_CANCEL_NOTFOUND:
5157 ret = -ENOENT;
5158 break;
5159 }
5160
e977d6d3
JA
5161 return ret;
5162}
5163
47f46768
JA
5164static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5165 struct io_kiocb *req, __u64 sqe_addr,
014db007 5166 int success_ret)
47f46768
JA
5167{
5168 unsigned long flags;
5169 int ret;
5170
5171 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
5172 if (ret != -ENOENT) {
5173 spin_lock_irqsave(&ctx->completion_lock, flags);
5174 goto done;
5175 }
5176
5177 spin_lock_irqsave(&ctx->completion_lock, flags);
5178 ret = io_timeout_cancel(ctx, sqe_addr);
5179 if (ret != -ENOENT)
5180 goto done;
5181 ret = io_poll_cancel(ctx, sqe_addr);
5182done:
b0dd8a41
JA
5183 if (!ret)
5184 ret = success_ret;
47f46768
JA
5185 io_cqring_fill_event(req, ret);
5186 io_commit_cqring(ctx);
5187 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5188 io_cqring_ev_posted(ctx);
5189
4e88d6e7
JA
5190 if (ret < 0)
5191 req_set_fail_links(req);
014db007 5192 io_put_req(req);
47f46768
JA
5193}
5194
3529d8c2
JA
5195static int io_async_cancel_prep(struct io_kiocb *req,
5196 const struct io_uring_sqe *sqe)
e977d6d3 5197{
fbf23849 5198 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3 5199 return -EINVAL;
61710e43
DA
5200 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5201 return -EINVAL;
5202 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
e977d6d3
JA
5203 return -EINVAL;
5204
fbf23849
JA
5205 req->cancel.addr = READ_ONCE(sqe->addr);
5206 return 0;
5207}
5208
014db007 5209static int io_async_cancel(struct io_kiocb *req)
fbf23849
JA
5210{
5211 struct io_ring_ctx *ctx = req->ctx;
fbf23849 5212
014db007 5213 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
5262f567
JA
5214 return 0;
5215}
5216
05f3fb3c
JA
5217static int io_files_update_prep(struct io_kiocb *req,
5218 const struct io_uring_sqe *sqe)
5219{
61710e43
DA
5220 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5221 return -EINVAL;
5222 if (sqe->ioprio || sqe->rw_flags)
05f3fb3c
JA
5223 return -EINVAL;
5224
5225 req->files_update.offset = READ_ONCE(sqe->off);
5226 req->files_update.nr_args = READ_ONCE(sqe->len);
5227 if (!req->files_update.nr_args)
5228 return -EINVAL;
5229 req->files_update.arg = READ_ONCE(sqe->addr);
5230 return 0;
5231}
5232
229a7b63
JA
5233static int io_files_update(struct io_kiocb *req, bool force_nonblock,
5234 struct io_comp_state *cs)
fbf23849
JA
5235{
5236 struct io_ring_ctx *ctx = req->ctx;
05f3fb3c
JA
5237 struct io_uring_files_update up;
5238 int ret;
fbf23849 5239
f86cd20c 5240 if (force_nonblock)
05f3fb3c 5241 return -EAGAIN;
05f3fb3c
JA
5242
5243 up.offset = req->files_update.offset;
5244 up.fds = req->files_update.arg;
5245
5246 mutex_lock(&ctx->uring_lock);
5247 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
5248 mutex_unlock(&ctx->uring_lock);
5249
5250 if (ret < 0)
5251 req_set_fail_links(req);
229a7b63 5252 __io_req_complete(req, ret, 0, cs);
5262f567
JA
5253 return 0;
5254}
5255
3529d8c2 5256static int io_req_defer_prep(struct io_kiocb *req,
debb85f4 5257 const struct io_uring_sqe *sqe)
f67676d1 5258{
e781573e 5259 ssize_t ret = 0;
f67676d1 5260
f1d96a8f
PB
5261 if (!sqe)
5262 return 0;
5263
327d6d96
PB
5264 if (io_alloc_async_ctx(req))
5265 return -EAGAIN;
5266
710c2bfb 5267 if (io_op_defs[req->opcode].file_table) {
c40f6379 5268 io_req_init_async(req);
710c2bfb
PB
5269 ret = io_grab_files(req);
5270 if (unlikely(ret))
5271 return ret;
5272 }
7cdaf587 5273
d625c6ee 5274 switch (req->opcode) {
e781573e
JA
5275 case IORING_OP_NOP:
5276 break;
f67676d1
JA
5277 case IORING_OP_READV:
5278 case IORING_OP_READ_FIXED:
3a6820f2 5279 case IORING_OP_READ:
3529d8c2 5280 ret = io_read_prep(req, sqe, true);
f67676d1
JA
5281 break;
5282 case IORING_OP_WRITEV:
5283 case IORING_OP_WRITE_FIXED:
3a6820f2 5284 case IORING_OP_WRITE:
3529d8c2 5285 ret = io_write_prep(req, sqe, true);
f67676d1 5286 break;
0969e783 5287 case IORING_OP_POLL_ADD:
3529d8c2 5288 ret = io_poll_add_prep(req, sqe);
0969e783
JA
5289 break;
5290 case IORING_OP_POLL_REMOVE:
3529d8c2 5291 ret = io_poll_remove_prep(req, sqe);
0969e783 5292 break;
8ed8d3c3 5293 case IORING_OP_FSYNC:
3529d8c2 5294 ret = io_prep_fsync(req, sqe);
8ed8d3c3
JA
5295 break;
5296 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2 5297 ret = io_prep_sfr(req, sqe);
8ed8d3c3 5298 break;
03b1230c 5299 case IORING_OP_SENDMSG:
fddaface 5300 case IORING_OP_SEND:
3529d8c2 5301 ret = io_sendmsg_prep(req, sqe);
03b1230c
JA
5302 break;
5303 case IORING_OP_RECVMSG:
fddaface 5304 case IORING_OP_RECV:
3529d8c2 5305 ret = io_recvmsg_prep(req, sqe);
03b1230c 5306 break;
f499a021 5307 case IORING_OP_CONNECT:
3529d8c2 5308 ret = io_connect_prep(req, sqe);
f499a021 5309 break;
2d28390a 5310 case IORING_OP_TIMEOUT:
3529d8c2 5311 ret = io_timeout_prep(req, sqe, false);
b7bb4f7d 5312 break;
b29472ee 5313 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2 5314 ret = io_timeout_remove_prep(req, sqe);
b29472ee 5315 break;
fbf23849 5316 case IORING_OP_ASYNC_CANCEL:
3529d8c2 5317 ret = io_async_cancel_prep(req, sqe);
fbf23849 5318 break;
2d28390a 5319 case IORING_OP_LINK_TIMEOUT:
3529d8c2 5320 ret = io_timeout_prep(req, sqe, true);
b7bb4f7d 5321 break;
8ed8d3c3 5322 case IORING_OP_ACCEPT:
3529d8c2 5323 ret = io_accept_prep(req, sqe);
8ed8d3c3 5324 break;
d63d1b5e
JA
5325 case IORING_OP_FALLOCATE:
5326 ret = io_fallocate_prep(req, sqe);
5327 break;
15b71abe
JA
5328 case IORING_OP_OPENAT:
5329 ret = io_openat_prep(req, sqe);
5330 break;
b5dba59e
JA
5331 case IORING_OP_CLOSE:
5332 ret = io_close_prep(req, sqe);
5333 break;
05f3fb3c
JA
5334 case IORING_OP_FILES_UPDATE:
5335 ret = io_files_update_prep(req, sqe);
5336 break;
eddc7ef5
JA
5337 case IORING_OP_STATX:
5338 ret = io_statx_prep(req, sqe);
5339 break;
4840e418
JA
5340 case IORING_OP_FADVISE:
5341 ret = io_fadvise_prep(req, sqe);
5342 break;
c1ca757b
JA
5343 case IORING_OP_MADVISE:
5344 ret = io_madvise_prep(req, sqe);
5345 break;
cebdb986
JA
5346 case IORING_OP_OPENAT2:
5347 ret = io_openat2_prep(req, sqe);
5348 break;
3e4827b0
JA
5349 case IORING_OP_EPOLL_CTL:
5350 ret = io_epoll_ctl_prep(req, sqe);
5351 break;
7d67af2c
PB
5352 case IORING_OP_SPLICE:
5353 ret = io_splice_prep(req, sqe);
5354 break;
ddf0322d
JA
5355 case IORING_OP_PROVIDE_BUFFERS:
5356 ret = io_provide_buffers_prep(req, sqe);
5357 break;
067524e9
JA
5358 case IORING_OP_REMOVE_BUFFERS:
5359 ret = io_remove_buffers_prep(req, sqe);
5360 break;
f2a8d5c7
PB
5361 case IORING_OP_TEE:
5362 ret = io_tee_prep(req, sqe);
5363 break;
f67676d1 5364 default:
e781573e
JA
5365 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5366 req->opcode);
5367 ret = -EINVAL;
b7bb4f7d 5368 break;
f67676d1
JA
5369 }
5370
b7bb4f7d 5371 return ret;
f67676d1
JA
5372}
5373
9cf7c104
PB
5374static u32 io_get_sequence(struct io_kiocb *req)
5375{
5376 struct io_kiocb *pos;
5377 struct io_ring_ctx *ctx = req->ctx;
5378 u32 total_submitted, nr_reqs = 1;
5379
5380 if (req->flags & REQ_F_LINK_HEAD)
5381 list_for_each_entry(pos, &req->link_list, link_list)
5382 nr_reqs++;
5383
5384 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
5385 return total_submitted - nr_reqs;
5386}
5387
3529d8c2 5388static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
de0617e4 5389{
a197f664 5390 struct io_ring_ctx *ctx = req->ctx;
27dc8338 5391 struct io_defer_entry *de;
f67676d1 5392 int ret;
9cf7c104 5393 u32 seq;
de0617e4 5394
9d858b21 5395 /* Still need defer if there is pending req in defer list. */
9cf7c104
PB
5396 if (likely(list_empty_careful(&ctx->defer_list) &&
5397 !(req->flags & REQ_F_IO_DRAIN)))
5398 return 0;
5399
5400 seq = io_get_sequence(req);
5401 /* Still a chance to pass the sequence check */
5402 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
de0617e4
JA
5403 return 0;
5404
650b5481 5405 if (!req->io) {
debb85f4 5406 ret = io_req_defer_prep(req, sqe);
327d6d96 5407 if (ret)
650b5481
PB
5408 return ret;
5409 }
cbdcb435 5410 io_prep_async_link(req);
27dc8338
PB
5411 de = kmalloc(sizeof(*de), GFP_KERNEL);
5412 if (!de)
5413 return -ENOMEM;
2d28390a 5414
de0617e4 5415 spin_lock_irq(&ctx->completion_lock);
9cf7c104 5416 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
de0617e4 5417 spin_unlock_irq(&ctx->completion_lock);
27dc8338 5418 kfree(de);
de0617e4
JA
5419 return 0;
5420 }
5421
915967f6 5422 trace_io_uring_defer(ctx, req, req->user_data);
27dc8338 5423 de->req = req;
9cf7c104 5424 de->seq = seq;
27dc8338 5425 list_add_tail(&de->list, &ctx->defer_list);
de0617e4
JA
5426 spin_unlock_irq(&ctx->completion_lock);
5427 return -EIOCBQUEUED;
5428}
5429
3ca405eb 5430static void __io_clean_op(struct io_kiocb *req)
99bc4c38
PB
5431{
5432 struct io_async_ctx *io = req->io;
5433
0e1b6fe3
PB
5434 if (req->flags & REQ_F_BUFFER_SELECTED) {
5435 switch (req->opcode) {
5436 case IORING_OP_READV:
5437 case IORING_OP_READ_FIXED:
5438 case IORING_OP_READ:
bcda7baa 5439 kfree((void *)(unsigned long)req->rw.addr);
0e1b6fe3
PB
5440 break;
5441 case IORING_OP_RECVMSG:
5442 case IORING_OP_RECV:
bcda7baa 5443 kfree(req->sr_msg.kbuf);
0e1b6fe3
PB
5444 break;
5445 }
5446 req->flags &= ~REQ_F_BUFFER_SELECTED;
99bc4c38
PB
5447 }
5448
0e1b6fe3
PB
5449 if (req->flags & REQ_F_NEED_CLEANUP) {
5450 switch (req->opcode) {
5451 case IORING_OP_READV:
5452 case IORING_OP_READ_FIXED:
5453 case IORING_OP_READ:
5454 case IORING_OP_WRITEV:
5455 case IORING_OP_WRITE_FIXED:
5456 case IORING_OP_WRITE:
5457 if (io->rw.iov != io->rw.fast_iov)
5458 kfree(io->rw.iov);
5459 break;
5460 case IORING_OP_RECVMSG:
5461 case IORING_OP_SENDMSG:
5462 if (io->msg.iov != io->msg.fast_iov)
5463 kfree(io->msg.iov);
5464 break;
5465 case IORING_OP_SPLICE:
5466 case IORING_OP_TEE:
5467 io_put_file(req, req->splice.file_in,
5468 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
5469 break;
5470 }
5471 req->flags &= ~REQ_F_NEED_CLEANUP;
5472 }
99bc4c38
PB
5473}
5474
3529d8c2 5475static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
f13fad7b 5476 bool force_nonblock, struct io_comp_state *cs)
2b188cc1 5477{
a197f664 5478 struct io_ring_ctx *ctx = req->ctx;
d625c6ee 5479 int ret;
2b188cc1 5480
d625c6ee 5481 switch (req->opcode) {
2b188cc1 5482 case IORING_OP_NOP:
229a7b63 5483 ret = io_nop(req, cs);
2b188cc1
JA
5484 break;
5485 case IORING_OP_READV:
edafccee 5486 case IORING_OP_READ_FIXED:
3a6820f2 5487 case IORING_OP_READ:
3529d8c2
JA
5488 if (sqe) {
5489 ret = io_read_prep(req, sqe, force_nonblock);
5490 if (ret < 0)
5491 break;
5492 }
a1d7c393 5493 ret = io_read(req, force_nonblock, cs);
edafccee 5494 break;
3529d8c2 5495 case IORING_OP_WRITEV:
edafccee 5496 case IORING_OP_WRITE_FIXED:
3a6820f2 5497 case IORING_OP_WRITE:
3529d8c2
JA
5498 if (sqe) {
5499 ret = io_write_prep(req, sqe, force_nonblock);
5500 if (ret < 0)
5501 break;
5502 }
a1d7c393 5503 ret = io_write(req, force_nonblock, cs);
2b188cc1 5504 break;
c992fe29 5505 case IORING_OP_FSYNC:
3529d8c2
JA
5506 if (sqe) {
5507 ret = io_prep_fsync(req, sqe);
5508 if (ret < 0)
5509 break;
5510 }
014db007 5511 ret = io_fsync(req, force_nonblock);
c992fe29 5512 break;
221c5eb2 5513 case IORING_OP_POLL_ADD:
3529d8c2
JA
5514 if (sqe) {
5515 ret = io_poll_add_prep(req, sqe);
5516 if (ret)
5517 break;
5518 }
014db007 5519 ret = io_poll_add(req);
221c5eb2
JA
5520 break;
5521 case IORING_OP_POLL_REMOVE:
3529d8c2
JA
5522 if (sqe) {
5523 ret = io_poll_remove_prep(req, sqe);
5524 if (ret < 0)
5525 break;
5526 }
fc4df999 5527 ret = io_poll_remove(req);
221c5eb2 5528 break;
5d17b4a4 5529 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2
JA
5530 if (sqe) {
5531 ret = io_prep_sfr(req, sqe);
5532 if (ret < 0)
5533 break;
5534 }
014db007 5535 ret = io_sync_file_range(req, force_nonblock);
5d17b4a4 5536 break;
0fa03c62 5537 case IORING_OP_SENDMSG:
fddaface 5538 case IORING_OP_SEND:
3529d8c2
JA
5539 if (sqe) {
5540 ret = io_sendmsg_prep(req, sqe);
5541 if (ret < 0)
5542 break;
5543 }
fddaface 5544 if (req->opcode == IORING_OP_SENDMSG)
229a7b63 5545 ret = io_sendmsg(req, force_nonblock, cs);
fddaface 5546 else
229a7b63 5547 ret = io_send(req, force_nonblock, cs);
0fa03c62 5548 break;
aa1fa28f 5549 case IORING_OP_RECVMSG:
fddaface 5550 case IORING_OP_RECV:
3529d8c2
JA
5551 if (sqe) {
5552 ret = io_recvmsg_prep(req, sqe);
5553 if (ret)
5554 break;
5555 }
fddaface 5556 if (req->opcode == IORING_OP_RECVMSG)
229a7b63 5557 ret = io_recvmsg(req, force_nonblock, cs);
fddaface 5558 else
229a7b63 5559 ret = io_recv(req, force_nonblock, cs);
aa1fa28f 5560 break;
5262f567 5561 case IORING_OP_TIMEOUT:
3529d8c2
JA
5562 if (sqe) {
5563 ret = io_timeout_prep(req, sqe, false);
5564 if (ret)
5565 break;
5566 }
fc4df999 5567 ret = io_timeout(req);
5262f567 5568 break;
11365043 5569 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2
JA
5570 if (sqe) {
5571 ret = io_timeout_remove_prep(req, sqe);
5572 if (ret)
5573 break;
5574 }
fc4df999 5575 ret = io_timeout_remove(req);
11365043 5576 break;
17f2fe35 5577 case IORING_OP_ACCEPT:
3529d8c2
JA
5578 if (sqe) {
5579 ret = io_accept_prep(req, sqe);
5580 if (ret)
5581 break;
5582 }
229a7b63 5583 ret = io_accept(req, force_nonblock, cs);
17f2fe35 5584 break;
f8e85cf2 5585 case IORING_OP_CONNECT:
3529d8c2
JA
5586 if (sqe) {
5587 ret = io_connect_prep(req, sqe);
5588 if (ret)
5589 break;
5590 }
229a7b63 5591 ret = io_connect(req, force_nonblock, cs);
f8e85cf2 5592 break;
62755e35 5593 case IORING_OP_ASYNC_CANCEL:
3529d8c2
JA
5594 if (sqe) {
5595 ret = io_async_cancel_prep(req, sqe);
5596 if (ret)
5597 break;
5598 }
014db007 5599 ret = io_async_cancel(req);
62755e35 5600 break;
d63d1b5e
JA
5601 case IORING_OP_FALLOCATE:
5602 if (sqe) {
5603 ret = io_fallocate_prep(req, sqe);
5604 if (ret)
5605 break;
5606 }
014db007 5607 ret = io_fallocate(req, force_nonblock);
d63d1b5e 5608 break;
15b71abe
JA
5609 case IORING_OP_OPENAT:
5610 if (sqe) {
5611 ret = io_openat_prep(req, sqe);
5612 if (ret)
5613 break;
5614 }
014db007 5615 ret = io_openat(req, force_nonblock);
15b71abe 5616 break;
b5dba59e
JA
5617 case IORING_OP_CLOSE:
5618 if (sqe) {
5619 ret = io_close_prep(req, sqe);
5620 if (ret)
5621 break;
5622 }
229a7b63 5623 ret = io_close(req, force_nonblock, cs);
b5dba59e 5624 break;
05f3fb3c
JA
5625 case IORING_OP_FILES_UPDATE:
5626 if (sqe) {
5627 ret = io_files_update_prep(req, sqe);
5628 if (ret)
5629 break;
5630 }
229a7b63 5631 ret = io_files_update(req, force_nonblock, cs);
05f3fb3c 5632 break;
eddc7ef5
JA
5633 case IORING_OP_STATX:
5634 if (sqe) {
5635 ret = io_statx_prep(req, sqe);
5636 if (ret)
5637 break;
5638 }
014db007 5639 ret = io_statx(req, force_nonblock);
eddc7ef5 5640 break;
4840e418
JA
5641 case IORING_OP_FADVISE:
5642 if (sqe) {
5643 ret = io_fadvise_prep(req, sqe);
5644 if (ret)
5645 break;
5646 }
014db007 5647 ret = io_fadvise(req, force_nonblock);
4840e418 5648 break;
c1ca757b
JA
5649 case IORING_OP_MADVISE:
5650 if (sqe) {
5651 ret = io_madvise_prep(req, sqe);
5652 if (ret)
5653 break;
5654 }
014db007 5655 ret = io_madvise(req, force_nonblock);
c1ca757b 5656 break;
cebdb986
JA
5657 case IORING_OP_OPENAT2:
5658 if (sqe) {
5659 ret = io_openat2_prep(req, sqe);
5660 if (ret)
5661 break;
5662 }
014db007 5663 ret = io_openat2(req, force_nonblock);
cebdb986 5664 break;
3e4827b0
JA
5665 case IORING_OP_EPOLL_CTL:
5666 if (sqe) {
5667 ret = io_epoll_ctl_prep(req, sqe);
5668 if (ret)
5669 break;
5670 }
229a7b63 5671 ret = io_epoll_ctl(req, force_nonblock, cs);
3e4827b0 5672 break;
7d67af2c
PB
5673 case IORING_OP_SPLICE:
5674 if (sqe) {
5675 ret = io_splice_prep(req, sqe);
5676 if (ret < 0)
5677 break;
5678 }
014db007 5679 ret = io_splice(req, force_nonblock);
7d67af2c 5680 break;
ddf0322d
JA
5681 case IORING_OP_PROVIDE_BUFFERS:
5682 if (sqe) {
5683 ret = io_provide_buffers_prep(req, sqe);
5684 if (ret)
5685 break;
5686 }
229a7b63 5687 ret = io_provide_buffers(req, force_nonblock, cs);
ddf0322d 5688 break;
067524e9
JA
5689 case IORING_OP_REMOVE_BUFFERS:
5690 if (sqe) {
5691 ret = io_remove_buffers_prep(req, sqe);
5692 if (ret)
5693 break;
5694 }
229a7b63 5695 ret = io_remove_buffers(req, force_nonblock, cs);
3e4827b0 5696 break;
f2a8d5c7
PB
5697 case IORING_OP_TEE:
5698 if (sqe) {
5699 ret = io_tee_prep(req, sqe);
5700 if (ret < 0)
5701 break;
5702 }
5703 ret = io_tee(req, force_nonblock);
5704 break;
2b188cc1
JA
5705 default:
5706 ret = -EINVAL;
5707 break;
5708 }
5709
def596e9
JA
5710 if (ret)
5711 return ret;
5712
b532576e
JA
5713 /* If the op doesn't have a file, we're not polling for it */
5714 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
11ba820b
JA
5715 const bool in_async = io_wq_current_is_worker();
5716
11ba820b
JA
5717 /* workqueue context doesn't hold uring_lock, grab it now */
5718 if (in_async)
5719 mutex_lock(&ctx->uring_lock);
5720
def596e9 5721 io_iopoll_req_issued(req);
11ba820b
JA
5722
5723 if (in_async)
5724 mutex_unlock(&ctx->uring_lock);
def596e9
JA
5725 }
5726
5727 return 0;
2b188cc1
JA
5728}
5729
f4db7182 5730static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
2b188cc1
JA
5731{
5732 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6df1db6b 5733 struct io_kiocb *timeout;
561fb04a 5734 int ret = 0;
2b188cc1 5735
6df1db6b
PB
5736 timeout = io_prep_linked_timeout(req);
5737 if (timeout)
5738 io_queue_linked_timeout(timeout);
d4c81f38 5739
0c9d5ccd
JA
5740 /* if NO_CANCEL is set, we must still run the work */
5741 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
5742 IO_WQ_WORK_CANCEL) {
561fb04a 5743 ret = -ECANCELED;
0c9d5ccd 5744 }
31b51510 5745
561fb04a 5746 if (!ret) {
561fb04a 5747 do {
f13fad7b 5748 ret = io_issue_sqe(req, NULL, false, NULL);
561fb04a
JA
5749 /*
5750 * We can get EAGAIN for polled IO even though we're
5751 * forcing a sync submission from here, since we can't
5752 * wait for request slots on the block side.
5753 */
5754 if (ret != -EAGAIN)
5755 break;
5756 cond_resched();
5757 } while (1);
5758 }
31b51510 5759
561fb04a 5760 if (ret) {
4e88d6e7 5761 req_set_fail_links(req);
e1e16097 5762 io_req_complete(req, ret);
edafccee 5763 }
2b188cc1 5764
f4db7182 5765 return io_steal_work(req);
2b188cc1
JA
5766}
5767
65e19f54
JA
5768static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
5769 int index)
5770{
5771 struct fixed_file_table *table;
5772
05f3fb3c 5773 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
84695089 5774 return table->files[index & IORING_FILE_TABLE_MASK];
65e19f54
JA
5775}
5776
8da11c19
PB
5777static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
5778 int fd, struct file **out_file, bool fixed)
09bb8394 5779{
a197f664 5780 struct io_ring_ctx *ctx = req->ctx;
8da11c19 5781 struct file *file;
09bb8394 5782
8da11c19 5783 if (fixed) {
05f3fb3c 5784 if (unlikely(!ctx->file_data ||
09bb8394
JA
5785 (unsigned) fd >= ctx->nr_user_files))
5786 return -EBADF;
b7620121 5787 fd = array_index_nospec(fd, ctx->nr_user_files);
8da11c19 5788 file = io_file_from_index(ctx, fd);
fd2206e4
JA
5789 if (file) {
5790 req->fixed_file_refs = ctx->file_data->cur_refs;
5791 percpu_ref_get(req->fixed_file_refs);
5792 }
09bb8394 5793 } else {
c826bd7a 5794 trace_io_uring_file_get(ctx, fd);
8da11c19 5795 file = __io_file_get(state, fd);
09bb8394
JA
5796 }
5797
fd2206e4
JA
5798 if (file || io_op_defs[req->opcode].needs_file_no_error) {
5799 *out_file = file;
5800 return 0;
5801 }
5802 return -EBADF;
09bb8394
JA
5803}
5804
8da11c19 5805static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
63ff8223 5806 int fd)
8da11c19 5807{
8da11c19
PB
5808 bool fixed;
5809
63ff8223 5810 fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
0cdaf760 5811 if (unlikely(!fixed && io_async_submit(req->ctx)))
8da11c19
PB
5812 return -EBADF;
5813
5814 return io_file_get(state, req, fd, &req->file, fixed);
5815}
5816
a197f664 5817static int io_grab_files(struct io_kiocb *req)
fcb323cc
JA
5818{
5819 int ret = -EBADF;
a197f664 5820 struct io_ring_ctx *ctx = req->ctx;
fcb323cc 5821
5b0bbee4 5822 if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
f86cd20c 5823 return 0;
b14cca0c 5824 if (!ctx->ring_file)
b5dba59e
JA
5825 return -EBADF;
5826
fcb323cc
JA
5827 rcu_read_lock();
5828 spin_lock_irq(&ctx->inflight_lock);
5829 /*
5830 * We use the f_ops->flush() handler to ensure that we can flush
5831 * out work accessing these files if the fd is closed. Check if
5832 * the fd has changed since we started down this path, and disallow
5833 * this operation if it has.
5834 */
b14cca0c 5835 if (fcheck(ctx->ring_fd) == ctx->ring_file) {
fcb323cc
JA
5836 list_add(&req->inflight_entry, &ctx->inflight_list);
5837 req->flags |= REQ_F_INFLIGHT;
5838 req->work.files = current->files;
5839 ret = 0;
5840 }
5841 spin_unlock_irq(&ctx->inflight_lock);
5842 rcu_read_unlock();
5843
5844 return ret;
5845}
5846
2665abfd 5847static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 5848{
ad8a48ac
JA
5849 struct io_timeout_data *data = container_of(timer,
5850 struct io_timeout_data, timer);
5851 struct io_kiocb *req = data->req;
2665abfd
JA
5852 struct io_ring_ctx *ctx = req->ctx;
5853 struct io_kiocb *prev = NULL;
5854 unsigned long flags;
2665abfd
JA
5855
5856 spin_lock_irqsave(&ctx->completion_lock, flags);
5857
5858 /*
5859 * We don't expect the list to be empty, that will only happen if we
5860 * race with the completion of the linked work.
5861 */
4493233e
PB
5862 if (!list_empty(&req->link_list)) {
5863 prev = list_entry(req->link_list.prev, struct io_kiocb,
5864 link_list);
5d960724 5865 if (refcount_inc_not_zero(&prev->refs)) {
4493233e 5866 list_del_init(&req->link_list);
5d960724
JA
5867 prev->flags &= ~REQ_F_LINK_TIMEOUT;
5868 } else
76a46e06 5869 prev = NULL;
2665abfd
JA
5870 }
5871
5872 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5873
5874 if (prev) {
4e88d6e7 5875 req_set_fail_links(prev);
014db007 5876 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
76a46e06 5877 io_put_req(prev);
47f46768 5878 } else {
e1e16097 5879 io_req_complete(req, -ETIME);
2665abfd 5880 }
2665abfd
JA
5881 return HRTIMER_NORESTART;
5882}
5883
ad8a48ac 5884static void io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 5885{
76a46e06 5886 struct io_ring_ctx *ctx = req->ctx;
2665abfd 5887
76a46e06
JA
5888 /*
5889 * If the list is now empty, then our linked request finished before
5890 * we got a chance to setup the timer
5891 */
5892 spin_lock_irq(&ctx->completion_lock);
4493233e 5893 if (!list_empty(&req->link_list)) {
2d28390a 5894 struct io_timeout_data *data = &req->io->timeout;
94ae5e77 5895
ad8a48ac
JA
5896 data->timer.function = io_link_timeout_fn;
5897 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
5898 data->mode);
2665abfd 5899 }
76a46e06 5900 spin_unlock_irq(&ctx->completion_lock);
2665abfd 5901
2665abfd 5902 /* drop submission reference */
76a46e06
JA
5903 io_put_req(req);
5904}
2665abfd 5905
ad8a48ac 5906static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2665abfd
JA
5907{
5908 struct io_kiocb *nxt;
5909
dea3b49c 5910 if (!(req->flags & REQ_F_LINK_HEAD))
2665abfd 5911 return NULL;
6df1db6b 5912 if (req->flags & REQ_F_LINK_TIMEOUT)
d7718a9d 5913 return NULL;
2665abfd 5914
4493233e
PB
5915 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
5916 link_list);
d625c6ee 5917 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
76a46e06 5918 return NULL;
2665abfd 5919
76a46e06 5920 req->flags |= REQ_F_LINK_TIMEOUT;
76a46e06 5921 return nxt;
2665abfd
JA
5922}
5923
f13fad7b
JA
5924static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5925 struct io_comp_state *cs)
2b188cc1 5926{
4a0a7a18 5927 struct io_kiocb *linked_timeout;
4bc4494e 5928 struct io_kiocb *nxt;
193155c8 5929 const struct cred *old_creds = NULL;
e0c5c576 5930 int ret;
2b188cc1 5931
4a0a7a18
JA
5932again:
5933 linked_timeout = io_prep_linked_timeout(req);
5934
7cdaf587
XW
5935 if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds &&
5936 req->work.creds != current_cred()) {
193155c8
JA
5937 if (old_creds)
5938 revert_creds(old_creds);
5939 if (old_creds == req->work.creds)
5940 old_creds = NULL; /* restored original creds */
5941 else
5942 old_creds = override_creds(req->work.creds);
5943 }
5944
f13fad7b 5945 ret = io_issue_sqe(req, sqe, true, cs);
491381ce
JA
5946
5947 /*
5948 * We async punt it if the file wasn't marked NOWAIT, or if the file
5949 * doesn't support non-blocking read/write attempts
5950 */
24c74678 5951 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
d7718a9d
JA
5952 if (io_arm_poll_handler(req)) {
5953 if (linked_timeout)
5954 io_queue_linked_timeout(linked_timeout);
4bc4494e 5955 goto exit;
d7718a9d 5956 }
86a761f8 5957punt:
7cdaf587
XW
5958 io_req_init_async(req);
5959
f86cd20c 5960 if (io_op_defs[req->opcode].file_table) {
bbad27b2
PB
5961 ret = io_grab_files(req);
5962 if (ret)
5963 goto err;
2b188cc1 5964 }
bbad27b2
PB
5965
5966 /*
5967 * Queued up for async execution, worker will release
5968 * submit reference when the iocb is actually submitted.
5969 */
5970 io_queue_async_work(req);
4bc4494e 5971 goto exit;
2b188cc1 5972 }
e65ef56d 5973
652532ad 5974 if (unlikely(ret)) {
fcb323cc 5975err:
652532ad
PB
5976 /* un-prep timeout, so it'll be killed as any other linked */
5977 req->flags &= ~REQ_F_LINK_TIMEOUT;
4e88d6e7 5978 req_set_fail_links(req);
652532ad 5979 io_put_req(req);
e1e16097 5980 io_req_complete(req, ret);
652532ad 5981 goto exit;
9e645e11 5982 }
652532ad
PB
5983
5984 /* drop submission reference */
5985 nxt = io_put_req_find_next(req);
5986 if (linked_timeout)
5987 io_queue_linked_timeout(linked_timeout);
5988
4a0a7a18
JA
5989 if (nxt) {
5990 req = nxt;
86a761f8
PB
5991
5992 if (req->flags & REQ_F_FORCE_ASYNC)
5993 goto punt;
4a0a7a18
JA
5994 goto again;
5995 }
4bc4494e 5996exit:
193155c8
JA
5997 if (old_creds)
5998 revert_creds(old_creds);
2b188cc1
JA
5999}
6000
f13fad7b
JA
6001static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6002 struct io_comp_state *cs)
4fe2c963
JL
6003{
6004 int ret;
6005
3529d8c2 6006 ret = io_req_defer(req, sqe);
4fe2c963
JL
6007 if (ret) {
6008 if (ret != -EIOCBQUEUED) {
1118591a 6009fail_req:
4e88d6e7 6010 req_set_fail_links(req);
e1e16097
JA
6011 io_put_req(req);
6012 io_req_complete(req, ret);
4fe2c963 6013 }
2550878f 6014 } else if (req->flags & REQ_F_FORCE_ASYNC) {
bd2ab18a 6015 if (!req->io) {
debb85f4 6016 ret = io_req_defer_prep(req, sqe);
327d6d96 6017 if (unlikely(ret))
bd2ab18a
PB
6018 goto fail_req;
6019 }
6020
ce35a47a
JA
6021 /*
6022 * Never try inline submit of IOSQE_ASYNC is set, go straight
6023 * to async execution.
6024 */
3e863ea3 6025 io_req_init_async(req);
ce35a47a
JA
6026 req->work.flags |= IO_WQ_WORK_CONCURRENT;
6027 io_queue_async_work(req);
6028 } else {
f13fad7b 6029 __io_queue_sqe(req, sqe, cs);
ce35a47a 6030 }
4fe2c963
JL
6031}
6032
f13fad7b
JA
6033static inline void io_queue_link_head(struct io_kiocb *req,
6034 struct io_comp_state *cs)
4fe2c963 6035{
94ae5e77 6036 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
e1e16097
JA
6037 io_put_req(req);
6038 io_req_complete(req, -ECANCELED);
1b4a51b6 6039 } else
f13fad7b 6040 io_queue_sqe(req, NULL, cs);
4fe2c963
JL
6041}
6042
1d4240cc 6043static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
f13fad7b 6044 struct io_kiocb **link, struct io_comp_state *cs)
9e645e11 6045{
a197f664 6046 struct io_ring_ctx *ctx = req->ctx;
ef4ff581 6047 int ret;
9e645e11 6048
9e645e11
JA
6049 /*
6050 * If we already have a head request, queue this one for async
6051 * submittal once the head completes. If we don't have a head but
6052 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6053 * submitted sync once the chain is complete. If none of those
6054 * conditions are true (normal request), then just queue it.
6055 */
6056 if (*link) {
9d76377f 6057 struct io_kiocb *head = *link;
4e88d6e7 6058
8cdf2193
PB
6059 /*
6060 * Taking sequential execution of a link, draining both sides
6061 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6062 * requests in the link. So, it drains the head and the
6063 * next after the link request. The last one is done via
6064 * drain_next flag to persist the effect across calls.
6065 */
ef4ff581 6066 if (req->flags & REQ_F_IO_DRAIN) {
711be031
PB
6067 head->flags |= REQ_F_IO_DRAIN;
6068 ctx->drain_next = 1;
6069 }
debb85f4 6070 ret = io_req_defer_prep(req, sqe);
327d6d96 6071 if (unlikely(ret)) {
4e88d6e7 6072 /* fail even hard links since we don't submit */
9d76377f 6073 head->flags |= REQ_F_FAIL_LINK;
1d4240cc 6074 return ret;
2d28390a 6075 }
9d76377f 6076 trace_io_uring_link(ctx, req, head);
c40f6379 6077 io_get_req_task(req);
9d76377f 6078 list_add_tail(&req->link_list, &head->link_list);
32fe525b
PB
6079
6080 /* last request of a link, enqueue the link */
ef4ff581 6081 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
f13fad7b 6082 io_queue_link_head(head, cs);
32fe525b
PB
6083 *link = NULL;
6084 }
9e645e11 6085 } else {
711be031
PB
6086 if (unlikely(ctx->drain_next)) {
6087 req->flags |= REQ_F_IO_DRAIN;
ef4ff581 6088 ctx->drain_next = 0;
711be031 6089 }
ef4ff581 6090 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
dea3b49c 6091 req->flags |= REQ_F_LINK_HEAD;
711be031 6092 INIT_LIST_HEAD(&req->link_list);
f1d96a8f 6093
debb85f4 6094 ret = io_req_defer_prep(req, sqe);
327d6d96 6095 if (unlikely(ret))
711be031
PB
6096 req->flags |= REQ_F_FAIL_LINK;
6097 *link = req;
6098 } else {
f13fad7b 6099 io_queue_sqe(req, sqe, cs);
711be031 6100 }
9e645e11 6101 }
2e6e1fde 6102
1d4240cc 6103 return 0;
9e645e11
JA
6104}
6105
9a56a232
JA
6106/*
6107 * Batched submission is done, ensure local IO is flushed out.
6108 */
6109static void io_submit_state_end(struct io_submit_state *state)
6110{
f13fad7b
JA
6111 if (!list_empty(&state->comp.list))
6112 io_submit_flush_completions(&state->comp);
9a56a232 6113 blk_finish_plug(&state->plug);
9f13c35b 6114 io_state_file_put(state);
2579f913 6115 if (state->free_reqs)
6c8a3134 6116 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9a56a232
JA
6117}
6118
6119/*
6120 * Start submission side cache.
6121 */
6122static void io_submit_state_start(struct io_submit_state *state,
013538bd 6123 struct io_ring_ctx *ctx, unsigned int max_ios)
9a56a232
JA
6124{
6125 blk_start_plug(&state->plug);
b63534c4
JA
6126#ifdef CONFIG_BLOCK
6127 state->plug.nowait = true;
6128#endif
013538bd
JA
6129 state->comp.nr = 0;
6130 INIT_LIST_HEAD(&state->comp.list);
6131 state->comp.ctx = ctx;
2579f913 6132 state->free_reqs = 0;
9a56a232
JA
6133 state->file = NULL;
6134 state->ios_left = max_ios;
6135}
6136
2b188cc1
JA
6137static void io_commit_sqring(struct io_ring_ctx *ctx)
6138{
75b28aff 6139 struct io_rings *rings = ctx->rings;
2b188cc1 6140
caf582c6
PB
6141 /*
6142 * Ensure any loads from the SQEs are done at this point,
6143 * since once we write the new head, the application could
6144 * write new data to them.
6145 */
6146 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
6147}
6148
2b188cc1 6149/*
3529d8c2 6150 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
2b188cc1
JA
6151 * that is mapped by userspace. This means that care needs to be taken to
6152 * ensure that reads are stable, as we cannot rely on userspace always
6153 * being a good citizen. If members of the sqe are validated and then later
6154 * used, it's important that those reads are done through READ_ONCE() to
6155 * prevent a re-load down the line.
6156 */
709b302f 6157static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 6158{
75b28aff 6159 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
6160 unsigned head;
6161
6162 /*
6163 * The cached sq head (or cq tail) serves two purposes:
6164 *
6165 * 1) allows us to batch the cost of updating the user visible
6166 * head updates.
6167 * 2) allows the kernel side to track the head on its own, even
6168 * though the application is the one updating it.
6169 */
ee7d46d9 6170 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
709b302f
PB
6171 if (likely(head < ctx->sq_entries))
6172 return &ctx->sq_sqes[head];
2b188cc1
JA
6173
6174 /* drop invalid entries */
498ccd9e 6175 ctx->cached_sq_dropped++;
ee7d46d9 6176 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
709b302f
PB
6177 return NULL;
6178}
6179
6180static inline void io_consume_sqe(struct io_ring_ctx *ctx)
6181{
6182 ctx->cached_sq_head++;
2b188cc1
JA
6183}
6184
ef4ff581
PB
6185#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
6186 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
6187 IOSQE_BUFFER_SELECT)
6188
6189static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
6190 const struct io_uring_sqe *sqe,
0cdaf760 6191 struct io_submit_state *state)
0553b8bd 6192{
ef4ff581 6193 unsigned int sqe_flags;
63ff8223 6194 int id;
ef4ff581 6195
0553b8bd
PB
6196 req->opcode = READ_ONCE(sqe->opcode);
6197 req->user_data = READ_ONCE(sqe->user_data);
6198 req->io = NULL;
6199 req->file = NULL;
6200 req->ctx = ctx;
6201 req->flags = 0;
6202 /* one is dropped after submission, the other at completion */
6203 refcount_set(&req->refs, 2);
4dd2824d 6204 req->task = current;
0553b8bd 6205 req->result = 0;
ef4ff581
PB
6206
6207 if (unlikely(req->opcode >= IORING_OP_LAST))
6208 return -EINVAL;
6209
9d8426a0
JA
6210 if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
6211 return -EFAULT;
ef4ff581
PB
6212
6213 sqe_flags = READ_ONCE(sqe->flags);
6214 /* enforce forwards compatibility on users */
6215 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
6216 return -EINVAL;
6217
6218 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6219 !io_op_defs[req->opcode].buffer_select)
6220 return -EOPNOTSUPP;
6221
6222 id = READ_ONCE(sqe->personality);
6223 if (id) {
7cdaf587 6224 io_req_init_async(req);
ef4ff581
PB
6225 req->work.creds = idr_find(&ctx->personality_idr, id);
6226 if (unlikely(!req->work.creds))
6227 return -EINVAL;
6228 get_cred(req->work.creds);
6229 }
6230
6231 /* same numerical values with corresponding REQ_F_*, safe to copy */
c11368a5 6232 req->flags |= sqe_flags;
ef4ff581 6233
63ff8223
JA
6234 if (!io_op_defs[req->opcode].needs_file)
6235 return 0;
6236
6237 return io_req_set_file(state, req, READ_ONCE(sqe->fd));
0553b8bd
PB
6238}
6239
fb5ccc98 6240static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
0cdaf760 6241 struct file *ring_file, int ring_fd)
6c271ce2 6242{
ac8691c4 6243 struct io_submit_state state;
9e645e11 6244 struct io_kiocb *link = NULL;
9e645e11 6245 int i, submitted = 0;
6c271ce2 6246
c4a2ed72 6247 /* if we have a backlog and couldn't flush it all, return BUSY */
ad3eb2c8
JA
6248 if (test_bit(0, &ctx->sq_check_overflow)) {
6249 if (!list_empty(&ctx->cq_overflow_list) &&
6250 !io_cqring_overflow_flush(ctx, false))
6251 return -EBUSY;
6252 }
6c271ce2 6253
ee7d46d9
PB
6254 /* make sure SQ entry isn't read before tail */
6255 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
9ef4f124 6256
2b85edfc
PB
6257 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6258 return -EAGAIN;
6c271ce2 6259
013538bd 6260 io_submit_state_start(&state, ctx, nr);
6c271ce2 6261
b14cca0c
PB
6262 ctx->ring_fd = ring_fd;
6263 ctx->ring_file = ring_file;
6264
6c271ce2 6265 for (i = 0; i < nr; i++) {
3529d8c2 6266 const struct io_uring_sqe *sqe;
196be95c 6267 struct io_kiocb *req;
1cb1edb2 6268 int err;
fb5ccc98 6269
b1e50e54
PB
6270 sqe = io_get_sqe(ctx);
6271 if (unlikely(!sqe)) {
6272 io_consume_sqe(ctx);
6273 break;
6274 }
ac8691c4 6275 req = io_alloc_req(ctx, &state);
196be95c
PB
6276 if (unlikely(!req)) {
6277 if (!submitted)
6278 submitted = -EAGAIN;
fb5ccc98 6279 break;
196be95c 6280 }
fb5ccc98 6281
ac8691c4 6282 err = io_init_req(ctx, req, sqe, &state);
709b302f 6283 io_consume_sqe(ctx);
d3656344
JA
6284 /* will complete beyond this point, count as submitted */
6285 submitted++;
6286
ef4ff581 6287 if (unlikely(err)) {
1cb1edb2 6288fail_req:
e1e16097
JA
6289 io_put_req(req);
6290 io_req_complete(req, err);
196be95c
PB
6291 break;
6292 }
fb5ccc98 6293
354420f7 6294 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
0cdaf760 6295 true, io_async_submit(ctx));
f13fad7b 6296 err = io_submit_sqe(req, sqe, &link, &state.comp);
1d4240cc
PB
6297 if (err)
6298 goto fail_req;
6c271ce2
JA
6299 }
6300
9466f437
PB
6301 if (unlikely(submitted != nr)) {
6302 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
6303
6304 percpu_ref_put_many(&ctx->refs, nr - ref_used);
6305 }
9e645e11 6306 if (link)
f13fad7b 6307 io_queue_link_head(link, &state.comp);
ac8691c4 6308 io_submit_state_end(&state);
6c271ce2 6309
ae9428ca
PB
6310 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6311 io_commit_sqring(ctx);
6312
6c271ce2
JA
6313 return submitted;
6314}
6315
6316static int io_sq_thread(void *data)
6317{
6c271ce2 6318 struct io_ring_ctx *ctx = data;
181e448d 6319 const struct cred *old_cred;
6c271ce2 6320 DEFINE_WAIT(wait);
6c271ce2 6321 unsigned long timeout;
bdcd3eab 6322 int ret = 0;
6c271ce2 6323
0f158b4c 6324 complete(&ctx->sq_thread_comp);
a4c0b3de 6325
181e448d 6326 old_cred = override_creds(ctx->creds);
6c271ce2 6327
bdcd3eab 6328 timeout = jiffies + ctx->sq_thread_idle;
2bbcd6d3 6329 while (!kthread_should_park()) {
fb5ccc98 6330 unsigned int to_submit;
6c271ce2 6331
540e32a0 6332 if (!list_empty(&ctx->iopoll_list)) {
6c271ce2
JA
6333 unsigned nr_events = 0;
6334
bdcd3eab 6335 mutex_lock(&ctx->uring_lock);
540e32a0 6336 if (!list_empty(&ctx->iopoll_list) && !need_resched())
9dedd563 6337 io_do_iopoll(ctx, &nr_events, 0);
bdcd3eab 6338 else
6c271ce2 6339 timeout = jiffies + ctx->sq_thread_idle;
bdcd3eab 6340 mutex_unlock(&ctx->uring_lock);
6c271ce2
JA
6341 }
6342
fb5ccc98 6343 to_submit = io_sqring_entries(ctx);
c1edbf5f
JA
6344
6345 /*
6346 * If submit got -EBUSY, flag us as needing the application
6347 * to enter the kernel to reap and flush events.
6348 */
b772f07a 6349 if (!to_submit || ret == -EBUSY || need_resched()) {
7143b5ac
SG
6350 /*
6351 * Drop cur_mm before scheduling, we can't hold it for
6352 * long periods (or over schedule()). Do this before
6353 * adding ourselves to the waitqueue, as the unuse/drop
6354 * may sleep.
6355 */
4349f30e 6356 io_sq_thread_drop_mm();
7143b5ac 6357
6c271ce2
JA
6358 /*
6359 * We're polling. If we're within the defined idle
6360 * period, then let us spin without work before going
c1edbf5f
JA
6361 * to sleep. The exception is if we got EBUSY doing
6362 * more IO, we should wait for the application to
6363 * reap events and wake us up.
6c271ce2 6364 */
540e32a0 6365 if (!list_empty(&ctx->iopoll_list) || need_resched() ||
df069d80
JA
6366 (!time_after(jiffies, timeout) && ret != -EBUSY &&
6367 !percpu_ref_is_dying(&ctx->refs))) {
4c6e277c 6368 io_run_task_work();
9831a90c 6369 cond_resched();
6c271ce2
JA
6370 continue;
6371 }
6372
6c271ce2
JA
6373 prepare_to_wait(&ctx->sqo_wait, &wait,
6374 TASK_INTERRUPTIBLE);
6375
bdcd3eab
XW
6376 /*
6377 * While doing polled IO, before going to sleep, we need
540e32a0
PB
6378 * to check if there are new reqs added to iopoll_list,
6379 * it is because reqs may have been punted to io worker
6380 * and will be added to iopoll_list later, hence check
6381 * the iopoll_list again.
bdcd3eab
XW
6382 */
6383 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
540e32a0 6384 !list_empty_careful(&ctx->iopoll_list)) {
bdcd3eab
XW
6385 finish_wait(&ctx->sqo_wait, &wait);
6386 continue;
6387 }
6388
6c271ce2 6389 /* Tell userspace we may need a wakeup call */
6d5f9049 6390 spin_lock_irq(&ctx->completion_lock);
75b28aff 6391 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6d5f9049 6392 spin_unlock_irq(&ctx->completion_lock);
6c271ce2 6393
fb5ccc98 6394 to_submit = io_sqring_entries(ctx);
c1edbf5f 6395 if (!to_submit || ret == -EBUSY) {
2bbcd6d3 6396 if (kthread_should_park()) {
6c271ce2
JA
6397 finish_wait(&ctx->sqo_wait, &wait);
6398 break;
6399 }
4c6e277c 6400 if (io_run_task_work()) {
10bea96d 6401 finish_wait(&ctx->sqo_wait, &wait);
b41e9852
JA
6402 continue;
6403 }
6c271ce2
JA
6404 if (signal_pending(current))
6405 flush_signals(current);
6406 schedule();
6407 finish_wait(&ctx->sqo_wait, &wait);
6408
6d5f9049 6409 spin_lock_irq(&ctx->completion_lock);
75b28aff 6410 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6d5f9049 6411 spin_unlock_irq(&ctx->completion_lock);
d4ae271d 6412 ret = 0;
6c271ce2
JA
6413 continue;
6414 }
6415 finish_wait(&ctx->sqo_wait, &wait);
6416
6d5f9049 6417 spin_lock_irq(&ctx->completion_lock);
75b28aff 6418 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6d5f9049 6419 spin_unlock_irq(&ctx->completion_lock);
6c271ce2
JA
6420 }
6421
8a4955ff 6422 mutex_lock(&ctx->uring_lock);
6b668c9b
XW
6423 if (likely(!percpu_ref_is_dying(&ctx->refs)))
6424 ret = io_submit_sqes(ctx, to_submit, NULL, -1);
8a4955ff 6425 mutex_unlock(&ctx->uring_lock);
bdcd3eab 6426 timeout = jiffies + ctx->sq_thread_idle;
6c271ce2
JA
6427 }
6428
4c6e277c 6429 io_run_task_work();
b41e9852 6430
4349f30e 6431 io_sq_thread_drop_mm();
181e448d 6432 revert_creds(old_cred);
06058632 6433
2bbcd6d3 6434 kthread_parkme();
06058632 6435
6c271ce2
JA
6436 return 0;
6437}
6438
bda52162
JA
6439struct io_wait_queue {
6440 struct wait_queue_entry wq;
6441 struct io_ring_ctx *ctx;
6442 unsigned to_wait;
6443 unsigned nr_timeouts;
6444};
6445
1d7bb1d5 6446static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
bda52162
JA
6447{
6448 struct io_ring_ctx *ctx = iowq->ctx;
6449
6450 /*
d195a66e 6451 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
6452 * started waiting. For timeouts, we always want to return to userspace,
6453 * regardless of event count.
6454 */
1d7bb1d5 6455 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
bda52162
JA
6456 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6457}
6458
6459static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6460 int wake_flags, void *key)
6461{
6462 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6463 wq);
6464
1d7bb1d5
JA
6465 /* use noflush == true, as we can't safely rely on locking context */
6466 if (!io_should_wake(iowq, true))
bda52162
JA
6467 return -1;
6468
6469 return autoremove_wake_function(curr, mode, wake_flags, key);
6470}
6471
2b188cc1
JA
6472/*
6473 * Wait until events become available, if we don't already have some. The
6474 * application must reap them itself, as they reside on the shared cq ring.
6475 */
6476static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
6477 const sigset_t __user *sig, size_t sigsz)
6478{
bda52162
JA
6479 struct io_wait_queue iowq = {
6480 .wq = {
6481 .private = current,
6482 .func = io_wake_function,
6483 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6484 },
6485 .ctx = ctx,
6486 .to_wait = min_events,
6487 };
75b28aff 6488 struct io_rings *rings = ctx->rings;
e9ffa5c2 6489 int ret = 0;
2b188cc1 6490
b41e9852
JA
6491 do {
6492 if (io_cqring_events(ctx, false) >= min_events)
6493 return 0;
4c6e277c 6494 if (!io_run_task_work())
b41e9852 6495 break;
b41e9852 6496 } while (1);
2b188cc1
JA
6497
6498 if (sig) {
9e75ad5d
AB
6499#ifdef CONFIG_COMPAT
6500 if (in_compat_syscall())
6501 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 6502 sigsz);
9e75ad5d
AB
6503 else
6504#endif
b772434b 6505 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 6506
2b188cc1
JA
6507 if (ret)
6508 return ret;
6509 }
6510
bda52162 6511 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 6512 trace_io_uring_cqring_wait(ctx, min_events);
bda52162
JA
6513 do {
6514 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6515 TASK_INTERRUPTIBLE);
ce593a6c 6516 /* make sure we run task_work before checking for signals */
4c6e277c
JA
6517 if (io_run_task_work())
6518 continue;
bda52162 6519 if (signal_pending(current)) {
b7db41c9
JA
6520 if (current->jobctl & JOBCTL_TASK_WORK) {
6521 spin_lock_irq(&current->sighand->siglock);
6522 current->jobctl &= ~JOBCTL_TASK_WORK;
6523 recalc_sigpending();
6524 spin_unlock_irq(&current->sighand->siglock);
6525 continue;
6526 }
e9ffa5c2 6527 ret = -EINTR;
bda52162
JA
6528 break;
6529 }
ce593a6c
JA
6530 if (io_should_wake(&iowq, false))
6531 break;
6532 schedule();
bda52162
JA
6533 } while (1);
6534 finish_wait(&ctx->wait, &iowq.wq);
6535
e9ffa5c2 6536 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 6537
75b28aff 6538 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
6539}
6540
6b06314c
JA
6541static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6542{
6543#if defined(CONFIG_UNIX)
6544 if (ctx->ring_sock) {
6545 struct sock *sock = ctx->ring_sock->sk;
6546 struct sk_buff *skb;
6547
6548 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6549 kfree_skb(skb);
6550 }
6551#else
6552 int i;
6553
65e19f54
JA
6554 for (i = 0; i < ctx->nr_user_files; i++) {
6555 struct file *file;
6556
6557 file = io_file_from_index(ctx, i);
6558 if (file)
6559 fput(file);
6560 }
6b06314c
JA
6561#endif
6562}
6563
05f3fb3c
JA
6564static void io_file_ref_kill(struct percpu_ref *ref)
6565{
6566 struct fixed_file_data *data;
6567
6568 data = container_of(ref, struct fixed_file_data, refs);
6569 complete(&data->done);
6570}
6571
6b06314c
JA
6572static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
6573{
05f3fb3c 6574 struct fixed_file_data *data = ctx->file_data;
05589553 6575 struct fixed_file_ref_node *ref_node = NULL;
65e19f54
JA
6576 unsigned nr_tables, i;
6577
05f3fb3c 6578 if (!data)
6b06314c
JA
6579 return -ENXIO;
6580
6a4d07cd 6581 spin_lock(&data->lock);
05589553
XW
6582 if (!list_empty(&data->ref_list))
6583 ref_node = list_first_entry(&data->ref_list,
6584 struct fixed_file_ref_node, node);
6a4d07cd 6585 spin_unlock(&data->lock);
05589553
XW
6586 if (ref_node)
6587 percpu_ref_kill(&ref_node->refs);
6588
6589 percpu_ref_kill(&data->refs);
6590
6591 /* wait for all refs nodes to complete */
4a38aed2 6592 flush_delayed_work(&ctx->file_put_work);
2faf852d 6593 wait_for_completion(&data->done);
05f3fb3c 6594
6b06314c 6595 __io_sqe_files_unregister(ctx);
65e19f54
JA
6596 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
6597 for (i = 0; i < nr_tables; i++)
05f3fb3c
JA
6598 kfree(data->table[i].files);
6599 kfree(data->table);
05589553
XW
6600 percpu_ref_exit(&data->refs);
6601 kfree(data);
05f3fb3c 6602 ctx->file_data = NULL;
6b06314c
JA
6603 ctx->nr_user_files = 0;
6604 return 0;
6605}
6606
6c271ce2
JA
6607static void io_sq_thread_stop(struct io_ring_ctx *ctx)
6608{
6609 if (ctx->sqo_thread) {
0f158b4c 6610 wait_for_completion(&ctx->sq_thread_comp);
2bbcd6d3
RP
6611 /*
6612 * The park is a bit of a work-around, without it we get
6613 * warning spews on shutdown with SQPOLL set and affinity
6614 * set to a single CPU.
6615 */
06058632 6616 kthread_park(ctx->sqo_thread);
6c271ce2
JA
6617 kthread_stop(ctx->sqo_thread);
6618 ctx->sqo_thread = NULL;
6619 }
6620}
6621
6b06314c
JA
6622static void io_finish_async(struct io_ring_ctx *ctx)
6623{
6c271ce2
JA
6624 io_sq_thread_stop(ctx);
6625
561fb04a
JA
6626 if (ctx->io_wq) {
6627 io_wq_destroy(ctx->io_wq);
6628 ctx->io_wq = NULL;
6b06314c
JA
6629 }
6630}
6631
6632#if defined(CONFIG_UNIX)
6b06314c
JA
6633/*
6634 * Ensure the UNIX gc is aware of our file set, so we are certain that
6635 * the io_uring can be safely unregistered on process exit, even if we have
6636 * loops in the file referencing.
6637 */
6638static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
6639{
6640 struct sock *sk = ctx->ring_sock->sk;
6641 struct scm_fp_list *fpl;
6642 struct sk_buff *skb;
08a45173 6643 int i, nr_files;
6b06314c 6644
6b06314c
JA
6645 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
6646 if (!fpl)
6647 return -ENOMEM;
6648
6649 skb = alloc_skb(0, GFP_KERNEL);
6650 if (!skb) {
6651 kfree(fpl);
6652 return -ENOMEM;
6653 }
6654
6655 skb->sk = sk;
6b06314c 6656
08a45173 6657 nr_files = 0;
6b06314c
JA
6658 fpl->user = get_uid(ctx->user);
6659 for (i = 0; i < nr; i++) {
65e19f54
JA
6660 struct file *file = io_file_from_index(ctx, i + offset);
6661
6662 if (!file)
08a45173 6663 continue;
65e19f54 6664 fpl->fp[nr_files] = get_file(file);
08a45173
JA
6665 unix_inflight(fpl->user, fpl->fp[nr_files]);
6666 nr_files++;
6b06314c
JA
6667 }
6668
08a45173
JA
6669 if (nr_files) {
6670 fpl->max = SCM_MAX_FD;
6671 fpl->count = nr_files;
6672 UNIXCB(skb).fp = fpl;
05f3fb3c 6673 skb->destructor = unix_destruct_scm;
08a45173
JA
6674 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
6675 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 6676
08a45173
JA
6677 for (i = 0; i < nr_files; i++)
6678 fput(fpl->fp[i]);
6679 } else {
6680 kfree_skb(skb);
6681 kfree(fpl);
6682 }
6b06314c
JA
6683
6684 return 0;
6685}
6686
6687/*
6688 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
6689 * causes regular reference counting to break down. We rely on the UNIX
6690 * garbage collection to take care of this problem for us.
6691 */
6692static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6693{
6694 unsigned left, total;
6695 int ret = 0;
6696
6697 total = 0;
6698 left = ctx->nr_user_files;
6699 while (left) {
6700 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
6701
6702 ret = __io_sqe_files_scm(ctx, this_files, total);
6703 if (ret)
6704 break;
6705 left -= this_files;
6706 total += this_files;
6707 }
6708
6709 if (!ret)
6710 return 0;
6711
6712 while (total < ctx->nr_user_files) {
65e19f54
JA
6713 struct file *file = io_file_from_index(ctx, total);
6714
6715 if (file)
6716 fput(file);
6b06314c
JA
6717 total++;
6718 }
6719
6720 return ret;
6721}
6722#else
6723static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6724{
6725 return 0;
6726}
6727#endif
6728
65e19f54
JA
6729static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
6730 unsigned nr_files)
6731{
6732 int i;
6733
6734 for (i = 0; i < nr_tables; i++) {
05f3fb3c 6735 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
6736 unsigned this_files;
6737
6738 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
6739 table->files = kcalloc(this_files, sizeof(struct file *),
6740 GFP_KERNEL);
6741 if (!table->files)
6742 break;
6743 nr_files -= this_files;
6744 }
6745
6746 if (i == nr_tables)
6747 return 0;
6748
6749 for (i = 0; i < nr_tables; i++) {
05f3fb3c 6750 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
6751 kfree(table->files);
6752 }
6753 return 1;
6754}
6755
05f3fb3c
JA
6756static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
6757{
6758#if defined(CONFIG_UNIX)
6759 struct sock *sock = ctx->ring_sock->sk;
6760 struct sk_buff_head list, *head = &sock->sk_receive_queue;
6761 struct sk_buff *skb;
6762 int i;
6763
6764 __skb_queue_head_init(&list);
6765
6766 /*
6767 * Find the skb that holds this file in its SCM_RIGHTS. When found,
6768 * remove this entry and rearrange the file array.
6769 */
6770 skb = skb_dequeue(head);
6771 while (skb) {
6772 struct scm_fp_list *fp;
6773
6774 fp = UNIXCB(skb).fp;
6775 for (i = 0; i < fp->count; i++) {
6776 int left;
6777
6778 if (fp->fp[i] != file)
6779 continue;
6780
6781 unix_notinflight(fp->user, fp->fp[i]);
6782 left = fp->count - 1 - i;
6783 if (left) {
6784 memmove(&fp->fp[i], &fp->fp[i + 1],
6785 left * sizeof(struct file *));
6786 }
6787 fp->count--;
6788 if (!fp->count) {
6789 kfree_skb(skb);
6790 skb = NULL;
6791 } else {
6792 __skb_queue_tail(&list, skb);
6793 }
6794 fput(file);
6795 file = NULL;
6796 break;
6797 }
6798
6799 if (!file)
6800 break;
6801
6802 __skb_queue_tail(&list, skb);
6803
6804 skb = skb_dequeue(head);
6805 }
6806
6807 if (skb_peek(&list)) {
6808 spin_lock_irq(&head->lock);
6809 while ((skb = __skb_dequeue(&list)) != NULL)
6810 __skb_queue_tail(head, skb);
6811 spin_unlock_irq(&head->lock);
6812 }
6813#else
6814 fput(file);
6815#endif
6816}
6817
6818struct io_file_put {
05589553 6819 struct list_head list;
05f3fb3c 6820 struct file *file;
05f3fb3c
JA
6821};
6822
4a38aed2 6823static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
65e19f54 6824{
4a38aed2
JA
6825 struct fixed_file_data *file_data = ref_node->file_data;
6826 struct io_ring_ctx *ctx = file_data->ctx;
05f3fb3c 6827 struct io_file_put *pfile, *tmp;
05589553
XW
6828
6829 list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
6a4d07cd 6830 list_del(&pfile->list);
05589553
XW
6831 io_ring_file_put(ctx, pfile->file);
6832 kfree(pfile);
65e19f54 6833 }
05589553 6834
6a4d07cd
JA
6835 spin_lock(&file_data->lock);
6836 list_del(&ref_node->node);
6837 spin_unlock(&file_data->lock);
05589553
XW
6838
6839 percpu_ref_exit(&ref_node->refs);
6840 kfree(ref_node);
6841 percpu_ref_put(&file_data->refs);
2faf852d 6842}
65e19f54 6843
4a38aed2
JA
6844static void io_file_put_work(struct work_struct *work)
6845{
6846 struct io_ring_ctx *ctx;
6847 struct llist_node *node;
6848
6849 ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
6850 node = llist_del_all(&ctx->file_put_llist);
6851
6852 while (node) {
6853 struct fixed_file_ref_node *ref_node;
6854 struct llist_node *next = node->next;
6855
6856 ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
6857 __io_file_put_work(ref_node);
6858 node = next;
6859 }
6860}
6861
05589553 6862static void io_file_data_ref_zero(struct percpu_ref *ref)
2faf852d 6863{
05589553 6864 struct fixed_file_ref_node *ref_node;
4a38aed2
JA
6865 struct io_ring_ctx *ctx;
6866 bool first_add;
6867 int delay = HZ;
65e19f54 6868
05589553 6869 ref_node = container_of(ref, struct fixed_file_ref_node, refs);
4a38aed2 6870 ctx = ref_node->file_data->ctx;
05589553 6871
4a38aed2
JA
6872 if (percpu_ref_is_dying(&ctx->file_data->refs))
6873 delay = 0;
05589553 6874
4a38aed2
JA
6875 first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
6876 if (!delay)
6877 mod_delayed_work(system_wq, &ctx->file_put_work, 0);
6878 else if (first_add)
6879 queue_delayed_work(system_wq, &ctx->file_put_work, delay);
05f3fb3c 6880}
65e19f54 6881
05589553
XW
6882static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
6883 struct io_ring_ctx *ctx)
05f3fb3c 6884{
05589553 6885 struct fixed_file_ref_node *ref_node;
05f3fb3c 6886
05589553
XW
6887 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
6888 if (!ref_node)
6889 return ERR_PTR(-ENOMEM);
05f3fb3c 6890
05589553
XW
6891 if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
6892 0, GFP_KERNEL)) {
6893 kfree(ref_node);
6894 return ERR_PTR(-ENOMEM);
6895 }
6896 INIT_LIST_HEAD(&ref_node->node);
6897 INIT_LIST_HEAD(&ref_node->file_list);
05589553
XW
6898 ref_node->file_data = ctx->file_data;
6899 return ref_node;
05589553
XW
6900}
6901
6902static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
6903{
6904 percpu_ref_exit(&ref_node->refs);
6905 kfree(ref_node);
65e19f54
JA
6906}
6907
6b06314c
JA
6908static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
6909 unsigned nr_args)
6910{
6911 __s32 __user *fds = (__s32 __user *) arg;
65e19f54 6912 unsigned nr_tables;
05f3fb3c 6913 struct file *file;
6b06314c
JA
6914 int fd, ret = 0;
6915 unsigned i;
05589553 6916 struct fixed_file_ref_node *ref_node;
6b06314c 6917
05f3fb3c 6918 if (ctx->file_data)
6b06314c
JA
6919 return -EBUSY;
6920 if (!nr_args)
6921 return -EINVAL;
6922 if (nr_args > IORING_MAX_FIXED_FILES)
6923 return -EMFILE;
6924
05f3fb3c
JA
6925 ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
6926 if (!ctx->file_data)
6927 return -ENOMEM;
6928 ctx->file_data->ctx = ctx;
6929 init_completion(&ctx->file_data->done);
05589553 6930 INIT_LIST_HEAD(&ctx->file_data->ref_list);
f7fe9346 6931 spin_lock_init(&ctx->file_data->lock);
05f3fb3c 6932
65e19f54 6933 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
05f3fb3c
JA
6934 ctx->file_data->table = kcalloc(nr_tables,
6935 sizeof(struct fixed_file_table),
65e19f54 6936 GFP_KERNEL);
05f3fb3c
JA
6937 if (!ctx->file_data->table) {
6938 kfree(ctx->file_data);
6939 ctx->file_data = NULL;
6b06314c 6940 return -ENOMEM;
05f3fb3c
JA
6941 }
6942
05589553 6943 if (percpu_ref_init(&ctx->file_data->refs, io_file_ref_kill,
05f3fb3c
JA
6944 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
6945 kfree(ctx->file_data->table);
6946 kfree(ctx->file_data);
6947 ctx->file_data = NULL;
6b06314c 6948 return -ENOMEM;
05f3fb3c 6949 }
6b06314c 6950
65e19f54 6951 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
05f3fb3c
JA
6952 percpu_ref_exit(&ctx->file_data->refs);
6953 kfree(ctx->file_data->table);
6954 kfree(ctx->file_data);
6955 ctx->file_data = NULL;
65e19f54
JA
6956 return -ENOMEM;
6957 }
6958
08a45173 6959 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
65e19f54
JA
6960 struct fixed_file_table *table;
6961 unsigned index;
6962
6b06314c
JA
6963 ret = -EFAULT;
6964 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
6965 break;
08a45173
JA
6966 /* allow sparse sets */
6967 if (fd == -1) {
6968 ret = 0;
6969 continue;
6970 }
6b06314c 6971
05f3fb3c 6972 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54 6973 index = i & IORING_FILE_TABLE_MASK;
05f3fb3c 6974 file = fget(fd);
6b06314c
JA
6975
6976 ret = -EBADF;
05f3fb3c 6977 if (!file)
6b06314c 6978 break;
05f3fb3c 6979
6b06314c
JA
6980 /*
6981 * Don't allow io_uring instances to be registered. If UNIX
6982 * isn't enabled, then this causes a reference cycle and this
6983 * instance can never get freed. If UNIX is enabled we'll
6984 * handle it just fine, but there's still no point in allowing
6985 * a ring fd as it doesn't support regular read/write anyway.
6986 */
05f3fb3c
JA
6987 if (file->f_op == &io_uring_fops) {
6988 fput(file);
6b06314c
JA
6989 break;
6990 }
6b06314c 6991 ret = 0;
05f3fb3c 6992 table->files[index] = file;
6b06314c
JA
6993 }
6994
6995 if (ret) {
65e19f54 6996 for (i = 0; i < ctx->nr_user_files; i++) {
65e19f54
JA
6997 file = io_file_from_index(ctx, i);
6998 if (file)
6999 fput(file);
7000 }
7001 for (i = 0; i < nr_tables; i++)
05f3fb3c 7002 kfree(ctx->file_data->table[i].files);
6b06314c 7003
667e57da 7004 percpu_ref_exit(&ctx->file_data->refs);
05f3fb3c
JA
7005 kfree(ctx->file_data->table);
7006 kfree(ctx->file_data);
7007 ctx->file_data = NULL;
6b06314c
JA
7008 ctx->nr_user_files = 0;
7009 return ret;
7010 }
7011
7012 ret = io_sqe_files_scm(ctx);
05589553 7013 if (ret) {
6b06314c 7014 io_sqe_files_unregister(ctx);
05589553
XW
7015 return ret;
7016 }
6b06314c 7017
05589553
XW
7018 ref_node = alloc_fixed_file_ref_node(ctx);
7019 if (IS_ERR(ref_node)) {
7020 io_sqe_files_unregister(ctx);
7021 return PTR_ERR(ref_node);
7022 }
7023
7024 ctx->file_data->cur_refs = &ref_node->refs;
6a4d07cd 7025 spin_lock(&ctx->file_data->lock);
05589553 7026 list_add(&ref_node->node, &ctx->file_data->ref_list);
6a4d07cd 7027 spin_unlock(&ctx->file_data->lock);
05589553 7028 percpu_ref_get(&ctx->file_data->refs);
6b06314c
JA
7029 return ret;
7030}
7031
c3a31e60
JA
7032static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7033 int index)
7034{
7035#if defined(CONFIG_UNIX)
7036 struct sock *sock = ctx->ring_sock->sk;
7037 struct sk_buff_head *head = &sock->sk_receive_queue;
7038 struct sk_buff *skb;
7039
7040 /*
7041 * See if we can merge this file into an existing skb SCM_RIGHTS
7042 * file set. If there's no room, fall back to allocating a new skb
7043 * and filling it in.
7044 */
7045 spin_lock_irq(&head->lock);
7046 skb = skb_peek(head);
7047 if (skb) {
7048 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7049
7050 if (fpl->count < SCM_MAX_FD) {
7051 __skb_unlink(skb, head);
7052 spin_unlock_irq(&head->lock);
7053 fpl->fp[fpl->count] = get_file(file);
7054 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7055 fpl->count++;
7056 spin_lock_irq(&head->lock);
7057 __skb_queue_head(head, skb);
7058 } else {
7059 skb = NULL;
7060 }
7061 }
7062 spin_unlock_irq(&head->lock);
7063
7064 if (skb) {
7065 fput(file);
7066 return 0;
7067 }
7068
7069 return __io_sqe_files_scm(ctx, 1, index);
7070#else
7071 return 0;
7072#endif
7073}
7074
a5318d3c 7075static int io_queue_file_removal(struct fixed_file_data *data,
05589553 7076 struct file *file)
05f3fb3c 7077{
a5318d3c 7078 struct io_file_put *pfile;
05589553
XW
7079 struct percpu_ref *refs = data->cur_refs;
7080 struct fixed_file_ref_node *ref_node;
05f3fb3c 7081
05f3fb3c 7082 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
a5318d3c
HD
7083 if (!pfile)
7084 return -ENOMEM;
05f3fb3c 7085
05589553 7086 ref_node = container_of(refs, struct fixed_file_ref_node, refs);
05f3fb3c 7087 pfile->file = file;
05589553
XW
7088 list_add(&pfile->list, &ref_node->file_list);
7089
a5318d3c 7090 return 0;
05f3fb3c
JA
7091}
7092
7093static int __io_sqe_files_update(struct io_ring_ctx *ctx,
7094 struct io_uring_files_update *up,
7095 unsigned nr_args)
7096{
7097 struct fixed_file_data *data = ctx->file_data;
05589553 7098 struct fixed_file_ref_node *ref_node;
05f3fb3c 7099 struct file *file;
c3a31e60
JA
7100 __s32 __user *fds;
7101 int fd, i, err;
7102 __u32 done;
05589553 7103 bool needs_switch = false;
c3a31e60 7104
05f3fb3c 7105 if (check_add_overflow(up->offset, nr_args, &done))
c3a31e60
JA
7106 return -EOVERFLOW;
7107 if (done > ctx->nr_user_files)
7108 return -EINVAL;
7109
05589553
XW
7110 ref_node = alloc_fixed_file_ref_node(ctx);
7111 if (IS_ERR(ref_node))
7112 return PTR_ERR(ref_node);
7113
c3a31e60 7114 done = 0;
05f3fb3c 7115 fds = u64_to_user_ptr(up->fds);
c3a31e60 7116 while (nr_args) {
65e19f54
JA
7117 struct fixed_file_table *table;
7118 unsigned index;
7119
c3a31e60
JA
7120 err = 0;
7121 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7122 err = -EFAULT;
7123 break;
7124 }
05f3fb3c
JA
7125 i = array_index_nospec(up->offset, ctx->nr_user_files);
7126 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54
JA
7127 index = i & IORING_FILE_TABLE_MASK;
7128 if (table->files[index]) {
05f3fb3c 7129 file = io_file_from_index(ctx, index);
a5318d3c
HD
7130 err = io_queue_file_removal(data, file);
7131 if (err)
7132 break;
65e19f54 7133 table->files[index] = NULL;
05589553 7134 needs_switch = true;
c3a31e60
JA
7135 }
7136 if (fd != -1) {
c3a31e60
JA
7137 file = fget(fd);
7138 if (!file) {
7139 err = -EBADF;
7140 break;
7141 }
7142 /*
7143 * Don't allow io_uring instances to be registered. If
7144 * UNIX isn't enabled, then this causes a reference
7145 * cycle and this instance can never get freed. If UNIX
7146 * is enabled we'll handle it just fine, but there's
7147 * still no point in allowing a ring fd as it doesn't
7148 * support regular read/write anyway.
7149 */
7150 if (file->f_op == &io_uring_fops) {
7151 fput(file);
7152 err = -EBADF;
7153 break;
7154 }
65e19f54 7155 table->files[index] = file;
c3a31e60 7156 err = io_sqe_file_register(ctx, file, i);
f3bd9dae
YY
7157 if (err) {
7158 fput(file);
c3a31e60 7159 break;
f3bd9dae 7160 }
c3a31e60
JA
7161 }
7162 nr_args--;
7163 done++;
05f3fb3c
JA
7164 up->offset++;
7165 }
7166
05589553
XW
7167 if (needs_switch) {
7168 percpu_ref_kill(data->cur_refs);
6a4d07cd 7169 spin_lock(&data->lock);
05589553
XW
7170 list_add(&ref_node->node, &data->ref_list);
7171 data->cur_refs = &ref_node->refs;
6a4d07cd 7172 spin_unlock(&data->lock);
05589553
XW
7173 percpu_ref_get(&ctx->file_data->refs);
7174 } else
7175 destroy_fixed_file_ref_node(ref_node);
c3a31e60
JA
7176
7177 return done ? done : err;
7178}
05589553 7179
05f3fb3c
JA
7180static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7181 unsigned nr_args)
7182{
7183 struct io_uring_files_update up;
7184
7185 if (!ctx->file_data)
7186 return -ENXIO;
7187 if (!nr_args)
7188 return -EINVAL;
7189 if (copy_from_user(&up, arg, sizeof(up)))
7190 return -EFAULT;
7191 if (up.resv)
7192 return -EINVAL;
7193
7194 return __io_sqe_files_update(ctx, &up, nr_args);
7195}
c3a31e60 7196
e9fd9396 7197static void io_free_work(struct io_wq_work *work)
7d723065
JA
7198{
7199 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7200
e9fd9396 7201 /* Consider that io_steal_work() relies on this ref */
7d723065
JA
7202 io_put_req(req);
7203}
7204
24369c2e
PB
7205static int io_init_wq_offload(struct io_ring_ctx *ctx,
7206 struct io_uring_params *p)
7207{
7208 struct io_wq_data data;
7209 struct fd f;
7210 struct io_ring_ctx *ctx_attach;
7211 unsigned int concurrency;
7212 int ret = 0;
7213
7214 data.user = ctx->user;
e9fd9396 7215 data.free_work = io_free_work;
f5fa38c5 7216 data.do_work = io_wq_submit_work;
24369c2e
PB
7217
7218 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
7219 /* Do QD, or 4 * CPUS, whatever is smallest */
7220 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
7221
7222 ctx->io_wq = io_wq_create(concurrency, &data);
7223 if (IS_ERR(ctx->io_wq)) {
7224 ret = PTR_ERR(ctx->io_wq);
7225 ctx->io_wq = NULL;
7226 }
7227 return ret;
7228 }
7229
7230 f = fdget(p->wq_fd);
7231 if (!f.file)
7232 return -EBADF;
7233
7234 if (f.file->f_op != &io_uring_fops) {
7235 ret = -EINVAL;
7236 goto out_fput;
7237 }
7238
7239 ctx_attach = f.file->private_data;
7240 /* @io_wq is protected by holding the fd */
7241 if (!io_wq_get(ctx_attach->io_wq, &data)) {
7242 ret = -EINVAL;
7243 goto out_fput;
7244 }
7245
7246 ctx->io_wq = ctx_attach->io_wq;
7247out_fput:
7248 fdput(f);
7249 return ret;
7250}
7251
6c271ce2
JA
7252static int io_sq_offload_start(struct io_ring_ctx *ctx,
7253 struct io_uring_params *p)
2b188cc1
JA
7254{
7255 int ret;
7256
6c271ce2 7257 if (ctx->flags & IORING_SETUP_SQPOLL) {
8eb06d7e
PB
7258 mmgrab(current->mm);
7259 ctx->sqo_mm = current->mm;
7260
3ec482d1
JA
7261 ret = -EPERM;
7262 if (!capable(CAP_SYS_ADMIN))
7263 goto err;
7264
917257da
JA
7265 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
7266 if (!ctx->sq_thread_idle)
7267 ctx->sq_thread_idle = HZ;
7268
6c271ce2 7269 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 7270 int cpu = p->sq_thread_cpu;
6c271ce2 7271
917257da 7272 ret = -EINVAL;
44a9bd18
JA
7273 if (cpu >= nr_cpu_ids)
7274 goto err;
7889f44d 7275 if (!cpu_online(cpu))
917257da
JA
7276 goto err;
7277
6c271ce2
JA
7278 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
7279 ctx, cpu,
7280 "io_uring-sq");
7281 } else {
7282 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
7283 "io_uring-sq");
7284 }
7285 if (IS_ERR(ctx->sqo_thread)) {
7286 ret = PTR_ERR(ctx->sqo_thread);
7287 ctx->sqo_thread = NULL;
7288 goto err;
7289 }
7290 wake_up_process(ctx->sqo_thread);
7291 } else if (p->flags & IORING_SETUP_SQ_AFF) {
7292 /* Can't have SQ_AFF without SQPOLL */
7293 ret = -EINVAL;
7294 goto err;
7295 }
7296
24369c2e
PB
7297 ret = io_init_wq_offload(ctx, p);
7298 if (ret)
2b188cc1 7299 goto err;
2b188cc1
JA
7300
7301 return 0;
7302err:
54a91f3b 7303 io_finish_async(ctx);
8eb06d7e
PB
7304 if (ctx->sqo_mm) {
7305 mmdrop(ctx->sqo_mm);
7306 ctx->sqo_mm = NULL;
7307 }
2b188cc1
JA
7308 return ret;
7309}
7310
a087e2b5
BM
7311static inline void __io_unaccount_mem(struct user_struct *user,
7312 unsigned long nr_pages)
2b188cc1
JA
7313{
7314 atomic_long_sub(nr_pages, &user->locked_vm);
7315}
7316
a087e2b5
BM
7317static inline int __io_account_mem(struct user_struct *user,
7318 unsigned long nr_pages)
2b188cc1
JA
7319{
7320 unsigned long page_limit, cur_pages, new_pages;
7321
7322 /* Don't allow more pages than we can safely lock */
7323 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
7324
7325 do {
7326 cur_pages = atomic_long_read(&user->locked_vm);
7327 new_pages = cur_pages + nr_pages;
7328 if (new_pages > page_limit)
7329 return -ENOMEM;
7330 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
7331 new_pages) != cur_pages);
7332
7333 return 0;
7334}
7335
2e0464d4
BM
7336static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
7337 enum io_mem_account acct)
a087e2b5 7338{
aad5d8da 7339 if (ctx->limit_mem)
a087e2b5 7340 __io_unaccount_mem(ctx->user, nr_pages);
30975825 7341
2e0464d4
BM
7342 if (ctx->sqo_mm) {
7343 if (acct == ACCT_LOCKED)
7344 ctx->sqo_mm->locked_vm -= nr_pages;
7345 else if (acct == ACCT_PINNED)
7346 atomic64_sub(nr_pages, &ctx->sqo_mm->pinned_vm);
7347 }
a087e2b5
BM
7348}
7349
2e0464d4
BM
7350static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
7351 enum io_mem_account acct)
a087e2b5 7352{
30975825
BM
7353 int ret;
7354
7355 if (ctx->limit_mem) {
7356 ret = __io_account_mem(ctx->user, nr_pages);
7357 if (ret)
7358 return ret;
7359 }
7360
2e0464d4
BM
7361 if (ctx->sqo_mm) {
7362 if (acct == ACCT_LOCKED)
7363 ctx->sqo_mm->locked_vm += nr_pages;
7364 else if (acct == ACCT_PINNED)
7365 atomic64_add(nr_pages, &ctx->sqo_mm->pinned_vm);
7366 }
a087e2b5
BM
7367
7368 return 0;
7369}
7370
2b188cc1
JA
7371static void io_mem_free(void *ptr)
7372{
52e04ef4
MR
7373 struct page *page;
7374
7375 if (!ptr)
7376 return;
2b188cc1 7377
52e04ef4 7378 page = virt_to_head_page(ptr);
2b188cc1
JA
7379 if (put_page_testzero(page))
7380 free_compound_page(page);
7381}
7382
7383static void *io_mem_alloc(size_t size)
7384{
7385 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
7386 __GFP_NORETRY;
7387
7388 return (void *) __get_free_pages(gfp_flags, get_order(size));
7389}
7390
75b28aff
HV
7391static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
7392 size_t *sq_offset)
7393{
7394 struct io_rings *rings;
7395 size_t off, sq_array_size;
7396
7397 off = struct_size(rings, cqes, cq_entries);
7398 if (off == SIZE_MAX)
7399 return SIZE_MAX;
7400
7401#ifdef CONFIG_SMP
7402 off = ALIGN(off, SMP_CACHE_BYTES);
7403 if (off == 0)
7404 return SIZE_MAX;
7405#endif
7406
b36200f5
DV
7407 if (sq_offset)
7408 *sq_offset = off;
7409
75b28aff
HV
7410 sq_array_size = array_size(sizeof(u32), sq_entries);
7411 if (sq_array_size == SIZE_MAX)
7412 return SIZE_MAX;
7413
7414 if (check_add_overflow(off, sq_array_size, &off))
7415 return SIZE_MAX;
7416
75b28aff
HV
7417 return off;
7418}
7419
2b188cc1
JA
7420static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
7421{
75b28aff 7422 size_t pages;
2b188cc1 7423
75b28aff
HV
7424 pages = (size_t)1 << get_order(
7425 rings_size(sq_entries, cq_entries, NULL));
7426 pages += (size_t)1 << get_order(
7427 array_size(sizeof(struct io_uring_sqe), sq_entries));
2b188cc1 7428
75b28aff 7429 return pages;
2b188cc1
JA
7430}
7431
edafccee
JA
7432static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
7433{
7434 int i, j;
7435
7436 if (!ctx->user_bufs)
7437 return -ENXIO;
7438
7439 for (i = 0; i < ctx->nr_user_bufs; i++) {
7440 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7441
7442 for (j = 0; j < imu->nr_bvecs; j++)
f1f6a7dd 7443 unpin_user_page(imu->bvec[j].bv_page);
edafccee 7444
2e0464d4 7445 io_unaccount_mem(ctx, imu->nr_bvecs, ACCT_PINNED);
d4ef6475 7446 kvfree(imu->bvec);
edafccee
JA
7447 imu->nr_bvecs = 0;
7448 }
7449
7450 kfree(ctx->user_bufs);
7451 ctx->user_bufs = NULL;
7452 ctx->nr_user_bufs = 0;
7453 return 0;
7454}
7455
7456static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
7457 void __user *arg, unsigned index)
7458{
7459 struct iovec __user *src;
7460
7461#ifdef CONFIG_COMPAT
7462 if (ctx->compat) {
7463 struct compat_iovec __user *ciovs;
7464 struct compat_iovec ciov;
7465
7466 ciovs = (struct compat_iovec __user *) arg;
7467 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
7468 return -EFAULT;
7469
d55e5f5b 7470 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
7471 dst->iov_len = ciov.iov_len;
7472 return 0;
7473 }
7474#endif
7475 src = (struct iovec __user *) arg;
7476 if (copy_from_user(dst, &src[index], sizeof(*dst)))
7477 return -EFAULT;
7478 return 0;
7479}
7480
7481static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
7482 unsigned nr_args)
7483{
7484 struct vm_area_struct **vmas = NULL;
7485 struct page **pages = NULL;
7486 int i, j, got_pages = 0;
7487 int ret = -EINVAL;
7488
7489 if (ctx->user_bufs)
7490 return -EBUSY;
7491 if (!nr_args || nr_args > UIO_MAXIOV)
7492 return -EINVAL;
7493
7494 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
7495 GFP_KERNEL);
7496 if (!ctx->user_bufs)
7497 return -ENOMEM;
7498
7499 for (i = 0; i < nr_args; i++) {
7500 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7501 unsigned long off, start, end, ubuf;
7502 int pret, nr_pages;
7503 struct iovec iov;
7504 size_t size;
7505
7506 ret = io_copy_iov(ctx, &iov, arg, i);
7507 if (ret)
a278682d 7508 goto err;
edafccee
JA
7509
7510 /*
7511 * Don't impose further limits on the size and buffer
7512 * constraints here, we'll -EINVAL later when IO is
7513 * submitted if they are wrong.
7514 */
7515 ret = -EFAULT;
7516 if (!iov.iov_base || !iov.iov_len)
7517 goto err;
7518
7519 /* arbitrary limit, but we need something */
7520 if (iov.iov_len > SZ_1G)
7521 goto err;
7522
7523 ubuf = (unsigned long) iov.iov_base;
7524 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
7525 start = ubuf >> PAGE_SHIFT;
7526 nr_pages = end - start;
7527
2e0464d4 7528 ret = io_account_mem(ctx, nr_pages, ACCT_PINNED);
a087e2b5
BM
7529 if (ret)
7530 goto err;
edafccee
JA
7531
7532 ret = 0;
7533 if (!pages || nr_pages > got_pages) {
a8c73c1a
DE
7534 kvfree(vmas);
7535 kvfree(pages);
d4ef6475 7536 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
edafccee 7537 GFP_KERNEL);
d4ef6475 7538 vmas = kvmalloc_array(nr_pages,
edafccee
JA
7539 sizeof(struct vm_area_struct *),
7540 GFP_KERNEL);
7541 if (!pages || !vmas) {
7542 ret = -ENOMEM;
2e0464d4 7543 io_unaccount_mem(ctx, nr_pages, ACCT_PINNED);
edafccee
JA
7544 goto err;
7545 }
7546 got_pages = nr_pages;
7547 }
7548
d4ef6475 7549 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
edafccee
JA
7550 GFP_KERNEL);
7551 ret = -ENOMEM;
7552 if (!imu->bvec) {
2e0464d4 7553 io_unaccount_mem(ctx, nr_pages, ACCT_PINNED);
edafccee
JA
7554 goto err;
7555 }
7556
7557 ret = 0;
d8ed45c5 7558 mmap_read_lock(current->mm);
2113b05d 7559 pret = pin_user_pages(ubuf, nr_pages,
932f4a63
IW
7560 FOLL_WRITE | FOLL_LONGTERM,
7561 pages, vmas);
edafccee
JA
7562 if (pret == nr_pages) {
7563 /* don't support file backed memory */
7564 for (j = 0; j < nr_pages; j++) {
7565 struct vm_area_struct *vma = vmas[j];
7566
7567 if (vma->vm_file &&
7568 !is_file_hugepages(vma->vm_file)) {
7569 ret = -EOPNOTSUPP;
7570 break;
7571 }
7572 }
7573 } else {
7574 ret = pret < 0 ? pret : -EFAULT;
7575 }
d8ed45c5 7576 mmap_read_unlock(current->mm);
edafccee
JA
7577 if (ret) {
7578 /*
7579 * if we did partial map, or found file backed vmas,
7580 * release any pages we did get
7581 */
27c4d3a3 7582 if (pret > 0)
f1f6a7dd 7583 unpin_user_pages(pages, pret);
2e0464d4 7584 io_unaccount_mem(ctx, nr_pages, ACCT_PINNED);
d4ef6475 7585 kvfree(imu->bvec);
edafccee
JA
7586 goto err;
7587 }
7588
7589 off = ubuf & ~PAGE_MASK;
7590 size = iov.iov_len;
7591 for (j = 0; j < nr_pages; j++) {
7592 size_t vec_len;
7593
7594 vec_len = min_t(size_t, size, PAGE_SIZE - off);
7595 imu->bvec[j].bv_page = pages[j];
7596 imu->bvec[j].bv_len = vec_len;
7597 imu->bvec[j].bv_offset = off;
7598 off = 0;
7599 size -= vec_len;
7600 }
7601 /* store original address for later verification */
7602 imu->ubuf = ubuf;
7603 imu->len = iov.iov_len;
7604 imu->nr_bvecs = nr_pages;
7605
7606 ctx->nr_user_bufs++;
7607 }
d4ef6475
MR
7608 kvfree(pages);
7609 kvfree(vmas);
edafccee
JA
7610 return 0;
7611err:
d4ef6475
MR
7612 kvfree(pages);
7613 kvfree(vmas);
edafccee
JA
7614 io_sqe_buffer_unregister(ctx);
7615 return ret;
7616}
7617
9b402849
JA
7618static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
7619{
7620 __s32 __user *fds = arg;
7621 int fd;
7622
7623 if (ctx->cq_ev_fd)
7624 return -EBUSY;
7625
7626 if (copy_from_user(&fd, fds, sizeof(*fds)))
7627 return -EFAULT;
7628
7629 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
7630 if (IS_ERR(ctx->cq_ev_fd)) {
7631 int ret = PTR_ERR(ctx->cq_ev_fd);
7632 ctx->cq_ev_fd = NULL;
7633 return ret;
7634 }
7635
7636 return 0;
7637}
7638
7639static int io_eventfd_unregister(struct io_ring_ctx *ctx)
7640{
7641 if (ctx->cq_ev_fd) {
7642 eventfd_ctx_put(ctx->cq_ev_fd);
7643 ctx->cq_ev_fd = NULL;
7644 return 0;
7645 }
7646
7647 return -ENXIO;
7648}
7649
5a2e745d
JA
7650static int __io_destroy_buffers(int id, void *p, void *data)
7651{
7652 struct io_ring_ctx *ctx = data;
7653 struct io_buffer *buf = p;
7654
067524e9 7655 __io_remove_buffers(ctx, buf, id, -1U);
5a2e745d
JA
7656 return 0;
7657}
7658
7659static void io_destroy_buffers(struct io_ring_ctx *ctx)
7660{
7661 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
7662 idr_destroy(&ctx->io_buffer_idr);
7663}
7664
2b188cc1
JA
7665static void io_ring_ctx_free(struct io_ring_ctx *ctx)
7666{
6b06314c 7667 io_finish_async(ctx);
30975825 7668 if (ctx->sqo_mm) {
2b188cc1 7669 mmdrop(ctx->sqo_mm);
30975825
BM
7670 ctx->sqo_mm = NULL;
7671 }
def596e9 7672
edafccee 7673 io_sqe_buffer_unregister(ctx);
6b06314c 7674 io_sqe_files_unregister(ctx);
9b402849 7675 io_eventfd_unregister(ctx);
5a2e745d 7676 io_destroy_buffers(ctx);
41726c9a 7677 idr_destroy(&ctx->personality_idr);
def596e9 7678
2b188cc1 7679#if defined(CONFIG_UNIX)
355e8d26
EB
7680 if (ctx->ring_sock) {
7681 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 7682 sock_release(ctx->ring_sock);
355e8d26 7683 }
2b188cc1
JA
7684#endif
7685
75b28aff 7686 io_mem_free(ctx->rings);
2b188cc1 7687 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
7688
7689 percpu_ref_exit(&ctx->refs);
2b188cc1 7690 free_uid(ctx->user);
181e448d 7691 put_cred(ctx->creds);
78076bb6 7692 kfree(ctx->cancel_hash);
0ddf92e8 7693 kmem_cache_free(req_cachep, ctx->fallback_req);
2b188cc1
JA
7694 kfree(ctx);
7695}
7696
7697static __poll_t io_uring_poll(struct file *file, poll_table *wait)
7698{
7699 struct io_ring_ctx *ctx = file->private_data;
7700 __poll_t mask = 0;
7701
7702 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
7703 /*
7704 * synchronizes with barrier from wq_has_sleeper call in
7705 * io_commit_cqring
7706 */
2b188cc1 7707 smp_rmb();
75b28aff
HV
7708 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
7709 ctx->rings->sq_ring_entries)
2b188cc1 7710 mask |= EPOLLOUT | EPOLLWRNORM;
63e5d81f 7711 if (io_cqring_events(ctx, false))
2b188cc1
JA
7712 mask |= EPOLLIN | EPOLLRDNORM;
7713
7714 return mask;
7715}
7716
7717static int io_uring_fasync(int fd, struct file *file, int on)
7718{
7719 struct io_ring_ctx *ctx = file->private_data;
7720
7721 return fasync_helper(fd, file, on, &ctx->cq_fasync);
7722}
7723
071698e1
JA
7724static int io_remove_personalities(int id, void *p, void *data)
7725{
7726 struct io_ring_ctx *ctx = data;
7727 const struct cred *cred;
7728
7729 cred = idr_remove(&ctx->personality_idr, id);
7730 if (cred)
7731 put_cred(cred);
7732 return 0;
7733}
7734
85faa7b8
JA
7735static void io_ring_exit_work(struct work_struct *work)
7736{
b2edc0a7
PB
7737 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
7738 exit_work);
85faa7b8 7739
56952e91
JA
7740 /*
7741 * If we're doing polled IO and end up having requests being
7742 * submitted async (out-of-line), then completions can come in while
7743 * we're waiting for refs to drop. We need to reap these manually,
7744 * as nobody else will be looking for them.
7745 */
b2edc0a7 7746 do {
56952e91
JA
7747 if (ctx->rings)
7748 io_cqring_overflow_flush(ctx, true);
b2edc0a7
PB
7749 io_iopoll_try_reap_events(ctx);
7750 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
85faa7b8
JA
7751 io_ring_ctx_free(ctx);
7752}
7753
2b188cc1
JA
7754static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
7755{
7756 mutex_lock(&ctx->uring_lock);
7757 percpu_ref_kill(&ctx->refs);
7758 mutex_unlock(&ctx->uring_lock);
7759
5262f567 7760 io_kill_timeouts(ctx);
221c5eb2 7761 io_poll_remove_all(ctx);
561fb04a
JA
7762
7763 if (ctx->io_wq)
7764 io_wq_cancel_all(ctx->io_wq);
7765
15dff286
JA
7766 /* if we failed setting up the ctx, we might not have any rings */
7767 if (ctx->rings)
7768 io_cqring_overflow_flush(ctx, true);
b2edc0a7 7769 io_iopoll_try_reap_events(ctx);
071698e1 7770 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
309fc03a
JA
7771
7772 /*
7773 * Do this upfront, so we won't have a grace period where the ring
7774 * is closed but resources aren't reaped yet. This can cause
7775 * spurious failure in setting up a new ring.
7776 */
760618f7
JA
7777 io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries),
7778 ACCT_LOCKED);
309fc03a 7779
85faa7b8
JA
7780 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
7781 queue_work(system_wq, &ctx->exit_work);
2b188cc1
JA
7782}
7783
7784static int io_uring_release(struct inode *inode, struct file *file)
7785{
7786 struct io_ring_ctx *ctx = file->private_data;
7787
7788 file->private_data = NULL;
7789 io_ring_ctx_wait_and_kill(ctx);
7790 return 0;
7791}
7792
67c4d9e6
PB
7793static bool io_wq_files_match(struct io_wq_work *work, void *data)
7794{
7795 struct files_struct *files = data;
7796
7797 return work->files == files;
7798}
7799
fcb323cc
JA
7800static void io_uring_cancel_files(struct io_ring_ctx *ctx,
7801 struct files_struct *files)
7802{
67c4d9e6
PB
7803 if (list_empty_careful(&ctx->inflight_list))
7804 return;
7805
7806 /* cancel all at once, should be faster than doing it one by one*/
7807 io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
7808
fcb323cc 7809 while (!list_empty_careful(&ctx->inflight_list)) {
d8f1b971
XW
7810 struct io_kiocb *cancel_req = NULL, *req;
7811 DEFINE_WAIT(wait);
fcb323cc
JA
7812
7813 spin_lock_irq(&ctx->inflight_lock);
7814 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
768134d4
JA
7815 if (req->work.files != files)
7816 continue;
7817 /* req is being completed, ignore */
7818 if (!refcount_inc_not_zero(&req->refs))
7819 continue;
7820 cancel_req = req;
7821 break;
fcb323cc 7822 }
768134d4 7823 if (cancel_req)
fcb323cc 7824 prepare_to_wait(&ctx->inflight_wait, &wait,
768134d4 7825 TASK_UNINTERRUPTIBLE);
fcb323cc
JA
7826 spin_unlock_irq(&ctx->inflight_lock);
7827
768134d4
JA
7828 /* We need to keep going until we don't find a matching req */
7829 if (!cancel_req)
fcb323cc 7830 break;
2f6d9b9d 7831
2ca10259
JA
7832 if (cancel_req->flags & REQ_F_OVERFLOW) {
7833 spin_lock_irq(&ctx->completion_lock);
40d8ddd4 7834 list_del(&cancel_req->compl.list);
2ca10259
JA
7835 cancel_req->flags &= ~REQ_F_OVERFLOW;
7836 if (list_empty(&ctx->cq_overflow_list)) {
7837 clear_bit(0, &ctx->sq_check_overflow);
7838 clear_bit(0, &ctx->cq_check_overflow);
6d5f9049 7839 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
2ca10259
JA
7840 }
7841 spin_unlock_irq(&ctx->completion_lock);
7842
7843 WRITE_ONCE(ctx->rings->cq_overflow,
7844 atomic_inc_return(&ctx->cached_cq_overflow));
7845
7846 /*
7847 * Put inflight ref and overflow ref. If that's
7848 * all we had, then we're done with this request.
7849 */
7850 if (refcount_sub_and_test(2, &cancel_req->refs)) {
4518a3cc 7851 io_free_req(cancel_req);
d8f1b971 7852 finish_wait(&ctx->inflight_wait, &wait);
2ca10259
JA
7853 continue;
7854 }
7b53d598
PB
7855 } else {
7856 io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
7857 io_put_req(cancel_req);
2ca10259
JA
7858 }
7859
fcb323cc 7860 schedule();
d8f1b971 7861 finish_wait(&ctx->inflight_wait, &wait);
fcb323cc
JA
7862 }
7863}
7864
801dd57b 7865static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
44e728b8 7866{
801dd57b
PB
7867 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7868 struct task_struct *task = data;
44e728b8 7869
801dd57b 7870 return req->task == task;
44e728b8
PB
7871}
7872
fcb323cc
JA
7873static int io_uring_flush(struct file *file, void *data)
7874{
7875 struct io_ring_ctx *ctx = file->private_data;
7876
7877 io_uring_cancel_files(ctx, data);
6ab23144
JA
7878
7879 /*
7880 * If the task is going away, cancel work it may have pending
7881 */
801dd57b
PB
7882 if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
7883 io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, current, true);
6ab23144 7884
fcb323cc
JA
7885 return 0;
7886}
7887
6c5c240e
RP
7888static void *io_uring_validate_mmap_request(struct file *file,
7889 loff_t pgoff, size_t sz)
2b188cc1 7890{
2b188cc1 7891 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 7892 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
7893 struct page *page;
7894 void *ptr;
7895
7896 switch (offset) {
7897 case IORING_OFF_SQ_RING:
75b28aff
HV
7898 case IORING_OFF_CQ_RING:
7899 ptr = ctx->rings;
2b188cc1
JA
7900 break;
7901 case IORING_OFF_SQES:
7902 ptr = ctx->sq_sqes;
7903 break;
2b188cc1 7904 default:
6c5c240e 7905 return ERR_PTR(-EINVAL);
2b188cc1
JA
7906 }
7907
7908 page = virt_to_head_page(ptr);
a50b854e 7909 if (sz > page_size(page))
6c5c240e
RP
7910 return ERR_PTR(-EINVAL);
7911
7912 return ptr;
7913}
7914
7915#ifdef CONFIG_MMU
7916
7917static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7918{
7919 size_t sz = vma->vm_end - vma->vm_start;
7920 unsigned long pfn;
7921 void *ptr;
7922
7923 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
7924 if (IS_ERR(ptr))
7925 return PTR_ERR(ptr);
2b188cc1
JA
7926
7927 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
7928 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
7929}
7930
6c5c240e
RP
7931#else /* !CONFIG_MMU */
7932
7933static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7934{
7935 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
7936}
7937
7938static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
7939{
7940 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
7941}
7942
7943static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
7944 unsigned long addr, unsigned long len,
7945 unsigned long pgoff, unsigned long flags)
7946{
7947 void *ptr;
7948
7949 ptr = io_uring_validate_mmap_request(file, pgoff, len);
7950 if (IS_ERR(ptr))
7951 return PTR_ERR(ptr);
7952
7953 return (unsigned long) ptr;
7954}
7955
7956#endif /* !CONFIG_MMU */
7957
2b188cc1
JA
7958SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
7959 u32, min_complete, u32, flags, const sigset_t __user *, sig,
7960 size_t, sigsz)
7961{
7962 struct io_ring_ctx *ctx;
7963 long ret = -EBADF;
7964 int submitted = 0;
7965 struct fd f;
7966
4c6e277c 7967 io_run_task_work();
b41e9852 7968
6c271ce2 7969 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
2b188cc1
JA
7970 return -EINVAL;
7971
7972 f = fdget(fd);
7973 if (!f.file)
7974 return -EBADF;
7975
7976 ret = -EOPNOTSUPP;
7977 if (f.file->f_op != &io_uring_fops)
7978 goto out_fput;
7979
7980 ret = -ENXIO;
7981 ctx = f.file->private_data;
7982 if (!percpu_ref_tryget(&ctx->refs))
7983 goto out_fput;
7984
6c271ce2
JA
7985 /*
7986 * For SQ polling, the thread will do all submissions and completions.
7987 * Just return the requested submit count, and wake the thread if
7988 * we were asked to.
7989 */
b2a9eada 7990 ret = 0;
6c271ce2 7991 if (ctx->flags & IORING_SETUP_SQPOLL) {
c1edbf5f
JA
7992 if (!list_empty_careful(&ctx->cq_overflow_list))
7993 io_cqring_overflow_flush(ctx, false);
6c271ce2
JA
7994 if (flags & IORING_ENTER_SQ_WAKEUP)
7995 wake_up(&ctx->sqo_wait);
7996 submitted = to_submit;
b2a9eada 7997 } else if (to_submit) {
2b188cc1 7998 mutex_lock(&ctx->uring_lock);
0cdaf760 7999 submitted = io_submit_sqes(ctx, to_submit, f.file, fd);
2b188cc1 8000 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
8001
8002 if (submitted != to_submit)
8003 goto out;
2b188cc1
JA
8004 }
8005 if (flags & IORING_ENTER_GETEVENTS) {
8006 min_complete = min(min_complete, ctx->cq_entries);
8007
32b2244a
XW
8008 /*
8009 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
8010 * space applications don't need to do io completion events
8011 * polling again, they can rely on io_sq_thread to do polling
8012 * work, which can reduce cpu usage and uring_lock contention.
8013 */
8014 if (ctx->flags & IORING_SETUP_IOPOLL &&
8015 !(ctx->flags & IORING_SETUP_SQPOLL)) {
7668b92a 8016 ret = io_iopoll_check(ctx, min_complete);
def596e9
JA
8017 } else {
8018 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
8019 }
2b188cc1
JA
8020 }
8021
7c504e65 8022out:
6805b32e 8023 percpu_ref_put(&ctx->refs);
2b188cc1
JA
8024out_fput:
8025 fdput(f);
8026 return submitted ? submitted : ret;
8027}
8028
bebdb65e 8029#ifdef CONFIG_PROC_FS
87ce955b
JA
8030static int io_uring_show_cred(int id, void *p, void *data)
8031{
8032 const struct cred *cred = p;
8033 struct seq_file *m = data;
8034 struct user_namespace *uns = seq_user_ns(m);
8035 struct group_info *gi;
8036 kernel_cap_t cap;
8037 unsigned __capi;
8038 int g;
8039
8040 seq_printf(m, "%5d\n", id);
8041 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
8042 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
8043 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
8044 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
8045 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
8046 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
8047 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
8048 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
8049 seq_puts(m, "\n\tGroups:\t");
8050 gi = cred->group_info;
8051 for (g = 0; g < gi->ngroups; g++) {
8052 seq_put_decimal_ull(m, g ? " " : "",
8053 from_kgid_munged(uns, gi->gid[g]));
8054 }
8055 seq_puts(m, "\n\tCapEff:\t");
8056 cap = cred->cap_effective;
8057 CAP_FOR_EACH_U32(__capi)
8058 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
8059 seq_putc(m, '\n');
8060 return 0;
8061}
8062
8063static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
8064{
8065 int i;
8066
8067 mutex_lock(&ctx->uring_lock);
8068 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
8069 for (i = 0; i < ctx->nr_user_files; i++) {
8070 struct fixed_file_table *table;
8071 struct file *f;
8072
8073 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
8074 f = table->files[i & IORING_FILE_TABLE_MASK];
8075 if (f)
8076 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
8077 else
8078 seq_printf(m, "%5u: <none>\n", i);
8079 }
8080 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
8081 for (i = 0; i < ctx->nr_user_bufs; i++) {
8082 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
8083
8084 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
8085 (unsigned int) buf->len);
8086 }
8087 if (!idr_is_empty(&ctx->personality_idr)) {
8088 seq_printf(m, "Personalities:\n");
8089 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
8090 }
d7718a9d
JA
8091 seq_printf(m, "PollList:\n");
8092 spin_lock_irq(&ctx->completion_lock);
8093 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
8094 struct hlist_head *list = &ctx->cancel_hash[i];
8095 struct io_kiocb *req;
8096
8097 hlist_for_each_entry(req, list, hash_node)
8098 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
8099 req->task->task_works != NULL);
8100 }
8101 spin_unlock_irq(&ctx->completion_lock);
87ce955b
JA
8102 mutex_unlock(&ctx->uring_lock);
8103}
8104
8105static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
8106{
8107 struct io_ring_ctx *ctx = f->private_data;
8108
8109 if (percpu_ref_tryget(&ctx->refs)) {
8110 __io_uring_show_fdinfo(ctx, m);
8111 percpu_ref_put(&ctx->refs);
8112 }
8113}
bebdb65e 8114#endif
87ce955b 8115
2b188cc1
JA
8116static const struct file_operations io_uring_fops = {
8117 .release = io_uring_release,
fcb323cc 8118 .flush = io_uring_flush,
2b188cc1 8119 .mmap = io_uring_mmap,
6c5c240e
RP
8120#ifndef CONFIG_MMU
8121 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
8122 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
8123#endif
2b188cc1
JA
8124 .poll = io_uring_poll,
8125 .fasync = io_uring_fasync,
bebdb65e 8126#ifdef CONFIG_PROC_FS
87ce955b 8127 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 8128#endif
2b188cc1
JA
8129};
8130
8131static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
8132 struct io_uring_params *p)
8133{
75b28aff
HV
8134 struct io_rings *rings;
8135 size_t size, sq_array_offset;
2b188cc1 8136
75b28aff
HV
8137 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
8138 if (size == SIZE_MAX)
8139 return -EOVERFLOW;
8140
8141 rings = io_mem_alloc(size);
8142 if (!rings)
2b188cc1
JA
8143 return -ENOMEM;
8144
75b28aff
HV
8145 ctx->rings = rings;
8146 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
8147 rings->sq_ring_mask = p->sq_entries - 1;
8148 rings->cq_ring_mask = p->cq_entries - 1;
8149 rings->sq_ring_entries = p->sq_entries;
8150 rings->cq_ring_entries = p->cq_entries;
8151 ctx->sq_mask = rings->sq_ring_mask;
8152 ctx->cq_mask = rings->cq_ring_mask;
8153 ctx->sq_entries = rings->sq_ring_entries;
8154 ctx->cq_entries = rings->cq_ring_entries;
2b188cc1
JA
8155
8156 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
8157 if (size == SIZE_MAX) {
8158 io_mem_free(ctx->rings);
8159 ctx->rings = NULL;
2b188cc1 8160 return -EOVERFLOW;
eb065d30 8161 }
2b188cc1
JA
8162
8163 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
8164 if (!ctx->sq_sqes) {
8165 io_mem_free(ctx->rings);
8166 ctx->rings = NULL;
2b188cc1 8167 return -ENOMEM;
eb065d30 8168 }
2b188cc1 8169
2b188cc1
JA
8170 return 0;
8171}
8172
8173/*
8174 * Allocate an anonymous fd, this is what constitutes the application
8175 * visible backing of an io_uring instance. The application mmaps this
8176 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
8177 * we have to tie this fd to a socket for file garbage collection purposes.
8178 */
8179static int io_uring_get_fd(struct io_ring_ctx *ctx)
8180{
8181 struct file *file;
8182 int ret;
8183
8184#if defined(CONFIG_UNIX)
8185 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
8186 &ctx->ring_sock);
8187 if (ret)
8188 return ret;
8189#endif
8190
8191 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
8192 if (ret < 0)
8193 goto err;
8194
8195 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
8196 O_RDWR | O_CLOEXEC);
8197 if (IS_ERR(file)) {
8198 put_unused_fd(ret);
8199 ret = PTR_ERR(file);
8200 goto err;
8201 }
8202
8203#if defined(CONFIG_UNIX)
8204 ctx->ring_sock->file = file;
8205#endif
8206 fd_install(ret, file);
8207 return ret;
8208err:
8209#if defined(CONFIG_UNIX)
8210 sock_release(ctx->ring_sock);
8211 ctx->ring_sock = NULL;
8212#endif
8213 return ret;
8214}
8215
7f13657d
XW
8216static int io_uring_create(unsigned entries, struct io_uring_params *p,
8217 struct io_uring_params __user *params)
2b188cc1
JA
8218{
8219 struct user_struct *user = NULL;
8220 struct io_ring_ctx *ctx;
aad5d8da 8221 bool limit_mem;
2b188cc1
JA
8222 int ret;
8223
8110c1a6 8224 if (!entries)
2b188cc1 8225 return -EINVAL;
8110c1a6
JA
8226 if (entries > IORING_MAX_ENTRIES) {
8227 if (!(p->flags & IORING_SETUP_CLAMP))
8228 return -EINVAL;
8229 entries = IORING_MAX_ENTRIES;
8230 }
2b188cc1
JA
8231
8232 /*
8233 * Use twice as many entries for the CQ ring. It's possible for the
8234 * application to drive a higher depth than the size of the SQ ring,
8235 * since the sqes are only used at submission time. This allows for
33a107f0
JA
8236 * some flexibility in overcommitting a bit. If the application has
8237 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
8238 * of CQ ring entries manually.
2b188cc1
JA
8239 */
8240 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
8241 if (p->flags & IORING_SETUP_CQSIZE) {
8242 /*
8243 * If IORING_SETUP_CQSIZE is set, we do the same roundup
8244 * to a power-of-two, if it isn't already. We do NOT impose
8245 * any cq vs sq ring sizing.
8246 */
8110c1a6 8247 if (p->cq_entries < p->sq_entries)
33a107f0 8248 return -EINVAL;
8110c1a6
JA
8249 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
8250 if (!(p->flags & IORING_SETUP_CLAMP))
8251 return -EINVAL;
8252 p->cq_entries = IORING_MAX_CQ_ENTRIES;
8253 }
33a107f0
JA
8254 p->cq_entries = roundup_pow_of_two(p->cq_entries);
8255 } else {
8256 p->cq_entries = 2 * p->sq_entries;
8257 }
2b188cc1
JA
8258
8259 user = get_uid(current_user());
aad5d8da 8260 limit_mem = !capable(CAP_IPC_LOCK);
2b188cc1 8261
aad5d8da 8262 if (limit_mem) {
a087e2b5 8263 ret = __io_account_mem(user,
2b188cc1
JA
8264 ring_pages(p->sq_entries, p->cq_entries));
8265 if (ret) {
8266 free_uid(user);
8267 return ret;
8268 }
8269 }
8270
8271 ctx = io_ring_ctx_alloc(p);
8272 if (!ctx) {
aad5d8da 8273 if (limit_mem)
a087e2b5 8274 __io_unaccount_mem(user, ring_pages(p->sq_entries,
2b188cc1
JA
8275 p->cq_entries));
8276 free_uid(user);
8277 return -ENOMEM;
8278 }
8279 ctx->compat = in_compat_syscall();
2b188cc1 8280 ctx->user = user;
0b8c0ec7 8281 ctx->creds = get_current_cred();
2b188cc1
JA
8282
8283 ret = io_allocate_scq_urings(ctx, p);
8284 if (ret)
8285 goto err;
8286
6c271ce2 8287 ret = io_sq_offload_start(ctx, p);
2b188cc1
JA
8288 if (ret)
8289 goto err;
8290
2b188cc1 8291 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
8292 p->sq_off.head = offsetof(struct io_rings, sq.head);
8293 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
8294 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
8295 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
8296 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
8297 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
8298 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
8299
8300 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
8301 p->cq_off.head = offsetof(struct io_rings, cq.head);
8302 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
8303 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
8304 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
8305 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
8306 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 8307 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 8308
7f13657d
XW
8309 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
8310 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
5769a351
JX
8311 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
8312 IORING_FEAT_POLL_32BITS;
7f13657d
XW
8313
8314 if (copy_to_user(params, p, sizeof(*p))) {
8315 ret = -EFAULT;
8316 goto err;
8317 }
044c1ab3
JA
8318 /*
8319 * Install ring fd as the very last thing, so we don't risk someone
8320 * having closed it before we finish setup
8321 */
8322 ret = io_uring_get_fd(ctx);
8323 if (ret < 0)
8324 goto err;
8325
c826bd7a 8326 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2e0464d4
BM
8327 io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
8328 ACCT_LOCKED);
30975825 8329 ctx->limit_mem = limit_mem;
2b188cc1
JA
8330 return ret;
8331err:
8332 io_ring_ctx_wait_and_kill(ctx);
8333 return ret;
8334}
8335
8336/*
8337 * Sets up an aio uring context, and returns the fd. Applications asks for a
8338 * ring size, we return the actual sq/cq ring sizes (among other things) in the
8339 * params structure passed in.
8340 */
8341static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
8342{
8343 struct io_uring_params p;
2b188cc1
JA
8344 int i;
8345
8346 if (copy_from_user(&p, params, sizeof(p)))
8347 return -EFAULT;
8348 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
8349 if (p.resv[i])
8350 return -EINVAL;
8351 }
8352
6c271ce2 8353 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 8354 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
24369c2e 8355 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
2b188cc1
JA
8356 return -EINVAL;
8357
7f13657d 8358 return io_uring_create(entries, &p, params);
2b188cc1
JA
8359}
8360
8361SYSCALL_DEFINE2(io_uring_setup, u32, entries,
8362 struct io_uring_params __user *, params)
8363{
8364 return io_uring_setup(entries, params);
8365}
8366
66f4af93
JA
8367static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
8368{
8369 struct io_uring_probe *p;
8370 size_t size;
8371 int i, ret;
8372
8373 size = struct_size(p, ops, nr_args);
8374 if (size == SIZE_MAX)
8375 return -EOVERFLOW;
8376 p = kzalloc(size, GFP_KERNEL);
8377 if (!p)
8378 return -ENOMEM;
8379
8380 ret = -EFAULT;
8381 if (copy_from_user(p, arg, size))
8382 goto out;
8383 ret = -EINVAL;
8384 if (memchr_inv(p, 0, size))
8385 goto out;
8386
8387 p->last_op = IORING_OP_LAST - 1;
8388 if (nr_args > IORING_OP_LAST)
8389 nr_args = IORING_OP_LAST;
8390
8391 for (i = 0; i < nr_args; i++) {
8392 p->ops[i].op = i;
8393 if (!io_op_defs[i].not_supported)
8394 p->ops[i].flags = IO_URING_OP_SUPPORTED;
8395 }
8396 p->ops_len = i;
8397
8398 ret = 0;
8399 if (copy_to_user(arg, p, size))
8400 ret = -EFAULT;
8401out:
8402 kfree(p);
8403 return ret;
8404}
8405
071698e1
JA
8406static int io_register_personality(struct io_ring_ctx *ctx)
8407{
8408 const struct cred *creds = get_current_cred();
8409 int id;
8410
8411 id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
8412 USHRT_MAX, GFP_KERNEL);
8413 if (id < 0)
8414 put_cred(creds);
8415 return id;
8416}
8417
8418static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
8419{
8420 const struct cred *old_creds;
8421
8422 old_creds = idr_remove(&ctx->personality_idr, id);
8423 if (old_creds) {
8424 put_cred(old_creds);
8425 return 0;
8426 }
8427
8428 return -EINVAL;
8429}
8430
8431static bool io_register_op_must_quiesce(int op)
8432{
8433 switch (op) {
8434 case IORING_UNREGISTER_FILES:
8435 case IORING_REGISTER_FILES_UPDATE:
8436 case IORING_REGISTER_PROBE:
8437 case IORING_REGISTER_PERSONALITY:
8438 case IORING_UNREGISTER_PERSONALITY:
8439 return false;
8440 default:
8441 return true;
8442 }
8443}
8444
edafccee
JA
8445static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
8446 void __user *arg, unsigned nr_args)
b19062a5
JA
8447 __releases(ctx->uring_lock)
8448 __acquires(ctx->uring_lock)
edafccee
JA
8449{
8450 int ret;
8451
35fa71a0
JA
8452 /*
8453 * We're inside the ring mutex, if the ref is already dying, then
8454 * someone else killed the ctx or is already going through
8455 * io_uring_register().
8456 */
8457 if (percpu_ref_is_dying(&ctx->refs))
8458 return -ENXIO;
8459
071698e1 8460 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 8461 percpu_ref_kill(&ctx->refs);
b19062a5 8462
05f3fb3c
JA
8463 /*
8464 * Drop uring mutex before waiting for references to exit. If
8465 * another thread is currently inside io_uring_enter() it might
8466 * need to grab the uring_lock to make progress. If we hold it
8467 * here across the drain wait, then we can deadlock. It's safe
8468 * to drop the mutex here, since no new references will come in
8469 * after we've killed the percpu ref.
8470 */
8471 mutex_unlock(&ctx->uring_lock);
0f158b4c 8472 ret = wait_for_completion_interruptible(&ctx->ref_comp);
05f3fb3c 8473 mutex_lock(&ctx->uring_lock);
c150368b
JA
8474 if (ret) {
8475 percpu_ref_resurrect(&ctx->refs);
8476 ret = -EINTR;
8477 goto out;
8478 }
05f3fb3c 8479 }
edafccee
JA
8480
8481 switch (opcode) {
8482 case IORING_REGISTER_BUFFERS:
8483 ret = io_sqe_buffer_register(ctx, arg, nr_args);
8484 break;
8485 case IORING_UNREGISTER_BUFFERS:
8486 ret = -EINVAL;
8487 if (arg || nr_args)
8488 break;
8489 ret = io_sqe_buffer_unregister(ctx);
8490 break;
6b06314c
JA
8491 case IORING_REGISTER_FILES:
8492 ret = io_sqe_files_register(ctx, arg, nr_args);
8493 break;
8494 case IORING_UNREGISTER_FILES:
8495 ret = -EINVAL;
8496 if (arg || nr_args)
8497 break;
8498 ret = io_sqe_files_unregister(ctx);
8499 break;
c3a31e60
JA
8500 case IORING_REGISTER_FILES_UPDATE:
8501 ret = io_sqe_files_update(ctx, arg, nr_args);
8502 break;
9b402849 8503 case IORING_REGISTER_EVENTFD:
f2842ab5 8504 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
8505 ret = -EINVAL;
8506 if (nr_args != 1)
8507 break;
8508 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
8509 if (ret)
8510 break;
8511 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
8512 ctx->eventfd_async = 1;
8513 else
8514 ctx->eventfd_async = 0;
9b402849
JA
8515 break;
8516 case IORING_UNREGISTER_EVENTFD:
8517 ret = -EINVAL;
8518 if (arg || nr_args)
8519 break;
8520 ret = io_eventfd_unregister(ctx);
8521 break;
66f4af93
JA
8522 case IORING_REGISTER_PROBE:
8523 ret = -EINVAL;
8524 if (!arg || nr_args > 256)
8525 break;
8526 ret = io_probe(ctx, arg, nr_args);
8527 break;
071698e1
JA
8528 case IORING_REGISTER_PERSONALITY:
8529 ret = -EINVAL;
8530 if (arg || nr_args)
8531 break;
8532 ret = io_register_personality(ctx);
8533 break;
8534 case IORING_UNREGISTER_PERSONALITY:
8535 ret = -EINVAL;
8536 if (arg)
8537 break;
8538 ret = io_unregister_personality(ctx, nr_args);
8539 break;
edafccee
JA
8540 default:
8541 ret = -EINVAL;
8542 break;
8543 }
8544
071698e1 8545 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 8546 /* bring the ctx back to life */
05f3fb3c 8547 percpu_ref_reinit(&ctx->refs);
c150368b 8548out:
0f158b4c 8549 reinit_completion(&ctx->ref_comp);
05f3fb3c 8550 }
edafccee
JA
8551 return ret;
8552}
8553
8554SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
8555 void __user *, arg, unsigned int, nr_args)
8556{
8557 struct io_ring_ctx *ctx;
8558 long ret = -EBADF;
8559 struct fd f;
8560
8561 f = fdget(fd);
8562 if (!f.file)
8563 return -EBADF;
8564
8565 ret = -EOPNOTSUPP;
8566 if (f.file->f_op != &io_uring_fops)
8567 goto out_fput;
8568
8569 ctx = f.file->private_data;
8570
8571 mutex_lock(&ctx->uring_lock);
8572 ret = __io_uring_register(ctx, opcode, arg, nr_args);
8573 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
8574 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
8575 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
8576out_fput:
8577 fdput(f);
8578 return ret;
8579}
8580
2b188cc1
JA
8581static int __init io_uring_init(void)
8582{
d7f62e82
SM
8583#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
8584 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
8585 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
8586} while (0)
8587
8588#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
8589 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
8590 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
8591 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
8592 BUILD_BUG_SQE_ELEM(1, __u8, flags);
8593 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
8594 BUILD_BUG_SQE_ELEM(4, __s32, fd);
8595 BUILD_BUG_SQE_ELEM(8, __u64, off);
8596 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
8597 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 8598 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
8599 BUILD_BUG_SQE_ELEM(24, __u32, len);
8600 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
8601 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
8602 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
8603 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
5769a351
JX
8604 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
8605 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
d7f62e82
SM
8606 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
8607 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
8608 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
8609 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
8610 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
8611 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
8612 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
8613 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 8614 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
8615 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
8616 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
8617 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 8618 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
d7f62e82 8619
d3656344 8620 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
84557871 8621 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
2b188cc1
JA
8622 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
8623 return 0;
8624};
8625__initcall(io_uring_init);