]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/io_uring.c
io_uring: allow O_NONBLOCK async retry
[mirror_ubuntu-jammy-kernel.git] / fs / io_uring.c
CommitLineData
2b188cc1
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
1e84b97b
SB
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
2b188cc1
JA
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
c992fe29 40 * Copyright (c) 2018-2019 Christoph Hellwig
2b188cc1
JA
41 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
52de1fe1 47#include <net/compat.h>
2b188cc1
JA
48#include <linux/refcount.h>
49#include <linux/uio.h>
6b47ee6e 50#include <linux/bits.h>
2b188cc1
JA
51
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
58#include <linux/mmu_context.h>
59#include <linux/percpu.h>
60#include <linux/slab.h>
6c271ce2 61#include <linux/kthread.h>
2b188cc1 62#include <linux/blkdev.h>
edafccee 63#include <linux/bvec.h>
2b188cc1
JA
64#include <linux/net.h>
65#include <net/sock.h>
66#include <net/af_unix.h>
6b06314c 67#include <net/scm.h>
2b188cc1
JA
68#include <linux/anon_inodes.h>
69#include <linux/sched/mm.h>
70#include <linux/uaccess.h>
71#include <linux/nospec.h>
edafccee
JA
72#include <linux/sizes.h>
73#include <linux/hugetlb.h>
aa4c3967 74#include <linux/highmem.h>
15b71abe
JA
75#include <linux/namei.h>
76#include <linux/fsnotify.h>
4840e418 77#include <linux/fadvise.h>
3e4827b0 78#include <linux/eventpoll.h>
ff002b30 79#include <linux/fs_struct.h>
7d67af2c 80#include <linux/splice.h>
b41e9852 81#include <linux/task_work.h>
2b188cc1 82
c826bd7a
DD
83#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
2b188cc1
JA
86#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
561fb04a 89#include "io-wq.h"
2b188cc1 90
5277deaa 91#define IORING_MAX_ENTRIES 32768
33a107f0 92#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
65e19f54
JA
93
94/*
95 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
96 */
97#define IORING_FILE_TABLE_SHIFT 9
98#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
99#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
100#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
2b188cc1
JA
101
102struct io_uring {
103 u32 head ____cacheline_aligned_in_smp;
104 u32 tail ____cacheline_aligned_in_smp;
105};
106
1e84b97b 107/*
75b28aff
HV
108 * This data is shared with the application through the mmap at offsets
109 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
1e84b97b
SB
110 *
111 * The offsets to the member fields are published through struct
112 * io_sqring_offsets when calling io_uring_setup.
113 */
75b28aff 114struct io_rings {
1e84b97b
SB
115 /*
116 * Head and tail offsets into the ring; the offsets need to be
117 * masked to get valid indices.
118 *
75b28aff
HV
119 * The kernel controls head of the sq ring and the tail of the cq ring,
120 * and the application controls tail of the sq ring and the head of the
121 * cq ring.
1e84b97b 122 */
75b28aff 123 struct io_uring sq, cq;
1e84b97b 124 /*
75b28aff 125 * Bitmasks to apply to head and tail offsets (constant, equals
1e84b97b
SB
126 * ring_entries - 1)
127 */
75b28aff
HV
128 u32 sq_ring_mask, cq_ring_mask;
129 /* Ring sizes (constant, power of 2) */
130 u32 sq_ring_entries, cq_ring_entries;
1e84b97b
SB
131 /*
132 * Number of invalid entries dropped by the kernel due to
133 * invalid index stored in array
134 *
135 * Written by the kernel, shouldn't be modified by the
136 * application (i.e. get number of "new events" by comparing to
137 * cached value).
138 *
139 * After a new SQ head value was read by the application this
140 * counter includes all submissions that were dropped reaching
141 * the new SQ head (and possibly more).
142 */
75b28aff 143 u32 sq_dropped;
1e84b97b 144 /*
0d9b5b3a 145 * Runtime SQ flags
1e84b97b
SB
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application.
149 *
150 * The application needs a full memory barrier before checking
151 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
152 */
75b28aff 153 u32 sq_flags;
0d9b5b3a
SG
154 /*
155 * Runtime CQ flags
156 *
157 * Written by the application, shouldn't be modified by the
158 * kernel.
159 */
160 u32 cq_flags;
1e84b97b
SB
161 /*
162 * Number of completion events lost because the queue was full;
163 * this should be avoided by the application by making sure
0b4295b5 164 * there are not more requests pending than there is space in
1e84b97b
SB
165 * the completion queue.
166 *
167 * Written by the kernel, shouldn't be modified by the
168 * application (i.e. get number of "new events" by comparing to
169 * cached value).
170 *
171 * As completion events come in out of order this counter is not
172 * ordered with any other data.
173 */
75b28aff 174 u32 cq_overflow;
1e84b97b
SB
175 /*
176 * Ring buffer of completion events.
177 *
178 * The kernel writes completion events fresh every time they are
179 * produced, so the application is allowed to modify pending
180 * entries.
181 */
75b28aff 182 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
2b188cc1
JA
183};
184
edafccee
JA
185struct io_mapped_ubuf {
186 u64 ubuf;
187 size_t len;
188 struct bio_vec *bvec;
189 unsigned int nr_bvecs;
190};
191
65e19f54
JA
192struct fixed_file_table {
193 struct file **files;
31b51510
JA
194};
195
05589553
XW
196struct fixed_file_ref_node {
197 struct percpu_ref refs;
198 struct list_head node;
199 struct list_head file_list;
200 struct fixed_file_data *file_data;
4a38aed2 201 struct llist_node llist;
05589553
XW
202};
203
05f3fb3c
JA
204struct fixed_file_data {
205 struct fixed_file_table *table;
206 struct io_ring_ctx *ctx;
207
05589553 208 struct percpu_ref *cur_refs;
05f3fb3c 209 struct percpu_ref refs;
05f3fb3c 210 struct completion done;
05589553
XW
211 struct list_head ref_list;
212 spinlock_t lock;
05f3fb3c
JA
213};
214
5a2e745d
JA
215struct io_buffer {
216 struct list_head list;
217 __u64 addr;
218 __s32 len;
219 __u16 bid;
220};
221
2b188cc1
JA
222struct io_ring_ctx {
223 struct {
224 struct percpu_ref refs;
225 } ____cacheline_aligned_in_smp;
226
227 struct {
228 unsigned int flags;
e1d85334
RD
229 unsigned int compat: 1;
230 unsigned int account_mem: 1;
231 unsigned int cq_overflow_flushed: 1;
232 unsigned int drain_next: 1;
233 unsigned int eventfd_async: 1;
2b188cc1 234
75b28aff
HV
235 /*
236 * Ring buffer of indices into array of io_uring_sqe, which is
237 * mmapped by the application using the IORING_OFF_SQES offset.
238 *
239 * This indirection could e.g. be used to assign fixed
240 * io_uring_sqe entries to operations and only submit them to
241 * the queue when needed.
242 *
243 * The kernel modifies neither the indices array nor the entries
244 * array.
245 */
246 u32 *sq_array;
2b188cc1
JA
247 unsigned cached_sq_head;
248 unsigned sq_entries;
249 unsigned sq_mask;
6c271ce2 250 unsigned sq_thread_idle;
498ccd9e 251 unsigned cached_sq_dropped;
206aefde 252 atomic_t cached_cq_overflow;
ad3eb2c8 253 unsigned long sq_check_overflow;
de0617e4
JA
254
255 struct list_head defer_list;
5262f567 256 struct list_head timeout_list;
1d7bb1d5 257 struct list_head cq_overflow_list;
fcb323cc
JA
258
259 wait_queue_head_t inflight_wait;
ad3eb2c8 260 struct io_uring_sqe *sq_sqes;
2b188cc1
JA
261 } ____cacheline_aligned_in_smp;
262
206aefde
JA
263 struct io_rings *rings;
264
2b188cc1 265 /* IO offload */
561fb04a 266 struct io_wq *io_wq;
6c271ce2 267 struct task_struct *sqo_thread; /* if using sq thread polling */
2b188cc1 268 struct mm_struct *sqo_mm;
6c271ce2 269 wait_queue_head_t sqo_wait;
75b28aff 270
6b06314c
JA
271 /*
272 * If used, fixed file set. Writers must ensure that ->refs is dead,
273 * readers must ensure that ->refs is alive as long as the file* is
274 * used. Only updated through io_uring_register(2).
275 */
05f3fb3c 276 struct fixed_file_data *file_data;
6b06314c 277 unsigned nr_user_files;
b14cca0c
PB
278 int ring_fd;
279 struct file *ring_file;
6b06314c 280
edafccee
JA
281 /* if used, fixed mapped user buffers */
282 unsigned nr_user_bufs;
283 struct io_mapped_ubuf *user_bufs;
284
2b188cc1
JA
285 struct user_struct *user;
286
0b8c0ec7 287 const struct cred *creds;
181e448d 288
0f158b4c
JA
289 struct completion ref_comp;
290 struct completion sq_thread_comp;
206aefde 291
0ddf92e8
JA
292 /* if all else fails... */
293 struct io_kiocb *fallback_req;
294
206aefde
JA
295#if defined(CONFIG_UNIX)
296 struct socket *ring_sock;
297#endif
298
5a2e745d
JA
299 struct idr io_buffer_idr;
300
071698e1
JA
301 struct idr personality_idr;
302
206aefde
JA
303 struct {
304 unsigned cached_cq_tail;
305 unsigned cq_entries;
306 unsigned cq_mask;
307 atomic_t cq_timeouts;
ad3eb2c8 308 unsigned long cq_check_overflow;
206aefde
JA
309 struct wait_queue_head cq_wait;
310 struct fasync_struct *cq_fasync;
311 struct eventfd_ctx *cq_ev_fd;
312 } ____cacheline_aligned_in_smp;
2b188cc1
JA
313
314 struct {
315 struct mutex uring_lock;
316 wait_queue_head_t wait;
317 } ____cacheline_aligned_in_smp;
318
319 struct {
320 spinlock_t completion_lock;
e94f141b 321
def596e9
JA
322 /*
323 * ->poll_list is protected by the ctx->uring_lock for
324 * io_uring instances that don't use IORING_SETUP_SQPOLL.
325 * For SQPOLL, only the single threaded io_sq_thread() will
326 * manipulate the list, hence no extra locking is needed there.
327 */
328 struct list_head poll_list;
78076bb6
JA
329 struct hlist_head *cancel_hash;
330 unsigned cancel_hash_bits;
e94f141b 331 bool poll_multi_file;
31b51510 332
fcb323cc
JA
333 spinlock_t inflight_lock;
334 struct list_head inflight_list;
2b188cc1 335 } ____cacheline_aligned_in_smp;
85faa7b8 336
4a38aed2
JA
337 struct delayed_work file_put_work;
338 struct llist_head file_put_llist;
339
85faa7b8 340 struct work_struct exit_work;
2b188cc1
JA
341};
342
09bb8394
JA
343/*
344 * First field must be the file pointer in all the
345 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
346 */
221c5eb2
JA
347struct io_poll_iocb {
348 struct file *file;
0969e783
JA
349 union {
350 struct wait_queue_head *head;
351 u64 addr;
352 };
221c5eb2 353 __poll_t events;
8c838788 354 bool done;
221c5eb2 355 bool canceled;
392edb45 356 struct wait_queue_entry wait;
221c5eb2
JA
357};
358
b5dba59e
JA
359struct io_close {
360 struct file *file;
361 struct file *put_file;
362 int fd;
363};
364
ad8a48ac
JA
365struct io_timeout_data {
366 struct io_kiocb *req;
367 struct hrtimer timer;
368 struct timespec64 ts;
369 enum hrtimer_mode mode;
370};
371
8ed8d3c3
JA
372struct io_accept {
373 struct file *file;
374 struct sockaddr __user *addr;
375 int __user *addr_len;
376 int flags;
09952e3e 377 unsigned long nofile;
8ed8d3c3
JA
378};
379
380struct io_sync {
381 struct file *file;
382 loff_t len;
383 loff_t off;
384 int flags;
d63d1b5e 385 int mode;
8ed8d3c3
JA
386};
387
fbf23849
JA
388struct io_cancel {
389 struct file *file;
390 u64 addr;
391};
392
b29472ee
JA
393struct io_timeout {
394 struct file *file;
395 u64 addr;
396 int flags;
bfe68a22
PB
397 u32 off;
398 u32 target_seq;
b29472ee
JA
399};
400
9adbd45d
JA
401struct io_rw {
402 /* NOTE: kiocb has the file as the first member, so don't do it here */
403 struct kiocb kiocb;
404 u64 addr;
405 u64 len;
406};
407
3fbb51c1
JA
408struct io_connect {
409 struct file *file;
410 struct sockaddr __user *addr;
411 int addr_len;
412};
413
e47293fd
JA
414struct io_sr_msg {
415 struct file *file;
fddaface
JA
416 union {
417 struct user_msghdr __user *msg;
418 void __user *buf;
419 };
e47293fd 420 int msg_flags;
bcda7baa 421 int bgid;
fddaface 422 size_t len;
bcda7baa 423 struct io_buffer *kbuf;
e47293fd
JA
424};
425
15b71abe
JA
426struct io_open {
427 struct file *file;
428 int dfd;
15b71abe 429 struct filename *filename;
c12cedf2 430 struct open_how how;
4022e7af 431 unsigned long nofile;
15b71abe
JA
432};
433
05f3fb3c
JA
434struct io_files_update {
435 struct file *file;
436 u64 arg;
437 u32 nr_args;
438 u32 offset;
439};
440
4840e418
JA
441struct io_fadvise {
442 struct file *file;
443 u64 offset;
444 u32 len;
445 u32 advice;
446};
447
c1ca757b
JA
448struct io_madvise {
449 struct file *file;
450 u64 addr;
451 u32 len;
452 u32 advice;
453};
454
3e4827b0
JA
455struct io_epoll {
456 struct file *file;
457 int epfd;
458 int op;
459 int fd;
460 struct epoll_event event;
e47293fd
JA
461};
462
7d67af2c
PB
463struct io_splice {
464 struct file *file_out;
465 struct file *file_in;
466 loff_t off_out;
467 loff_t off_in;
468 u64 len;
469 unsigned int flags;
470};
471
ddf0322d
JA
472struct io_provide_buf {
473 struct file *file;
474 __u64 addr;
475 __s32 len;
476 __u32 bgid;
477 __u16 nbufs;
478 __u16 bid;
479};
480
1d9e1288
BM
481struct io_statx {
482 struct file *file;
483 int dfd;
484 unsigned int mask;
485 unsigned int flags;
e62753e4 486 const char __user *filename;
1d9e1288
BM
487 struct statx __user *buffer;
488};
489
f499a021
JA
490struct io_async_connect {
491 struct sockaddr_storage address;
492};
493
03b1230c
JA
494struct io_async_msghdr {
495 struct iovec fast_iov[UIO_FASTIOV];
496 struct iovec *iov;
497 struct sockaddr __user *uaddr;
498 struct msghdr msg;
b537916c 499 struct sockaddr_storage addr;
03b1230c
JA
500};
501
f67676d1
JA
502struct io_async_rw {
503 struct iovec fast_iov[UIO_FASTIOV];
504 struct iovec *iov;
505 ssize_t nr_segs;
506 ssize_t size;
507};
508
1a6b74fc 509struct io_async_ctx {
f67676d1
JA
510 union {
511 struct io_async_rw rw;
03b1230c 512 struct io_async_msghdr msg;
f499a021 513 struct io_async_connect connect;
2d28390a 514 struct io_timeout_data timeout;
f67676d1 515 };
1a6b74fc
JA
516};
517
6b47ee6e
PB
518enum {
519 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
520 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
521 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
522 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
523 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
bcda7baa 524 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
6b47ee6e 525
dea3b49c 526 REQ_F_LINK_HEAD_BIT,
6b47ee6e
PB
527 REQ_F_LINK_NEXT_BIT,
528 REQ_F_FAIL_LINK_BIT,
529 REQ_F_INFLIGHT_BIT,
530 REQ_F_CUR_POS_BIT,
531 REQ_F_NOWAIT_BIT,
532 REQ_F_IOPOLL_COMPLETED_BIT,
533 REQ_F_LINK_TIMEOUT_BIT,
534 REQ_F_TIMEOUT_BIT,
535 REQ_F_ISREG_BIT,
536 REQ_F_MUST_PUNT_BIT,
537 REQ_F_TIMEOUT_NOSEQ_BIT,
538 REQ_F_COMP_LOCKED_BIT,
99bc4c38 539 REQ_F_NEED_CLEANUP_BIT,
2ca10259 540 REQ_F_OVERFLOW_BIT,
d7718a9d 541 REQ_F_POLLED_BIT,
bcda7baa 542 REQ_F_BUFFER_SELECTED_BIT,
5b0bbee4 543 REQ_F_NO_FILE_TABLE_BIT,
d4c81f38 544 REQ_F_QUEUE_TIMEOUT_BIT,
84557871
JA
545
546 /* not a real bit, just to check we're not overflowing the space */
547 __REQ_F_LAST_BIT,
6b47ee6e
PB
548};
549
550enum {
551 /* ctx owns file */
552 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
553 /* drain existing IO first */
554 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
555 /* linked sqes */
556 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
557 /* doesn't sever on completion < 0 */
558 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
559 /* IOSQE_ASYNC */
560 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
bcda7baa
JA
561 /* IOSQE_BUFFER_SELECT */
562 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
6b47ee6e 563
dea3b49c
PB
564 /* head of a link */
565 REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
6b47ee6e
PB
566 /* already grabbed next link */
567 REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
568 /* fail rest of links */
569 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
570 /* on inflight list */
571 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
572 /* read/write uses file position */
573 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
574 /* must not punt to workers */
575 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
576 /* polled IO has completed */
577 REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
578 /* has linked timeout */
579 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
580 /* timeout request */
581 REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT),
582 /* regular file */
583 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
584 /* must be punted even for NONBLOCK */
585 REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT),
586 /* no timeout sequence */
587 REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
588 /* completion under lock */
589 REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
99bc4c38
PB
590 /* needs cleanup */
591 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
2ca10259
JA
592 /* in overflow list */
593 REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT),
d7718a9d
JA
594 /* already went through poll handler */
595 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
bcda7baa
JA
596 /* buffer already selected */
597 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
5b0bbee4
JA
598 /* doesn't need file table for this request */
599 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
d4c81f38
PB
600 /* needs to queue linked timeout */
601 REQ_F_QUEUE_TIMEOUT = BIT(REQ_F_QUEUE_TIMEOUT_BIT),
d7718a9d
JA
602};
603
604struct async_poll {
605 struct io_poll_iocb poll;
606 struct io_wq_work work;
6b47ee6e
PB
607};
608
09bb8394
JA
609/*
610 * NOTE! Each of the iocb union members has the file pointer
611 * as the first entry in their struct definition. So you can
612 * access the file pointer through any of the sub-structs,
613 * or directly as just 'ki_filp' in this struct.
614 */
2b188cc1 615struct io_kiocb {
221c5eb2 616 union {
09bb8394 617 struct file *file;
9adbd45d 618 struct io_rw rw;
221c5eb2 619 struct io_poll_iocb poll;
8ed8d3c3
JA
620 struct io_accept accept;
621 struct io_sync sync;
fbf23849 622 struct io_cancel cancel;
b29472ee 623 struct io_timeout timeout;
3fbb51c1 624 struct io_connect connect;
e47293fd 625 struct io_sr_msg sr_msg;
15b71abe 626 struct io_open open;
b5dba59e 627 struct io_close close;
05f3fb3c 628 struct io_files_update files_update;
4840e418 629 struct io_fadvise fadvise;
c1ca757b 630 struct io_madvise madvise;
3e4827b0 631 struct io_epoll epoll;
7d67af2c 632 struct io_splice splice;
ddf0322d 633 struct io_provide_buf pbuf;
1d9e1288 634 struct io_statx statx;
221c5eb2 635 };
2b188cc1 636
1a6b74fc 637 struct io_async_ctx *io;
c398ecb3 638 int cflags;
d625c6ee 639 u8 opcode;
2b188cc1 640
4f4eeba8
BM
641 u16 buf_index;
642
2b188cc1 643 struct io_ring_ctx *ctx;
d7718a9d 644 struct list_head list;
2b188cc1 645 unsigned int flags;
c16361c1 646 refcount_t refs;
3537b6a7
JA
647 struct task_struct *task;
648 unsigned long fsize;
2b188cc1 649 u64 user_data;
9e645e11 650 u32 result;
de0617e4 651 u32 sequence;
2b188cc1 652
d7718a9d
JA
653 struct list_head link_list;
654
fcb323cc
JA
655 struct list_head inflight_entry;
656
05589553
XW
657 struct percpu_ref *fixed_file_refs;
658
b41e9852
JA
659 union {
660 /*
661 * Only commands that never go async can use the below fields,
d7718a9d
JA
662 * obviously. Right now only IORING_OP_POLL_ADD uses them, and
663 * async armed poll handlers for regular commands. The latter
664 * restore the work, if needed.
b41e9852
JA
665 */
666 struct {
b41e9852 667 struct callback_head task_work;
d7718a9d
JA
668 struct hlist_node hash_node;
669 struct async_poll *apoll;
b41e9852
JA
670 };
671 struct io_wq_work work;
672 };
2b188cc1
JA
673};
674
675#define IO_PLUG_THRESHOLD 2
def596e9 676#define IO_IOPOLL_BATCH 8
2b188cc1 677
9a56a232
JA
678struct io_submit_state {
679 struct blk_plug plug;
680
2579f913
JA
681 /*
682 * io_kiocb alloc cache
683 */
684 void *reqs[IO_IOPOLL_BATCH];
6c8a3134 685 unsigned int free_reqs;
2579f913 686
9a56a232
JA
687 /*
688 * File reference cache
689 */
690 struct file *file;
691 unsigned int fd;
692 unsigned int has_refs;
693 unsigned int used_refs;
694 unsigned int ios_left;
695};
696
d3656344
JA
697struct io_op_def {
698 /* needs req->io allocated for deferral/async */
699 unsigned async_ctx : 1;
700 /* needs current->mm setup, does mm access */
701 unsigned needs_mm : 1;
702 /* needs req->file assigned */
703 unsigned needs_file : 1;
fd2206e4
JA
704 /* don't fail if file grab fails */
705 unsigned needs_file_no_error : 1;
d3656344
JA
706 /* hash wq insertion if file is a regular file */
707 unsigned hash_reg_file : 1;
708 /* unbound wq insertion if file is a non-regular file */
709 unsigned unbound_nonreg_file : 1;
66f4af93
JA
710 /* opcode is not supported by this kernel */
711 unsigned not_supported : 1;
f86cd20c
JA
712 /* needs file table */
713 unsigned file_table : 1;
ff002b30
JA
714 /* needs ->fs */
715 unsigned needs_fs : 1;
8a72758c
JA
716 /* set if opcode supports polled "wait" */
717 unsigned pollin : 1;
718 unsigned pollout : 1;
bcda7baa
JA
719 /* op supports buffer selection */
720 unsigned buffer_select : 1;
d3656344
JA
721};
722
723static const struct io_op_def io_op_defs[] = {
0463b6c5
PB
724 [IORING_OP_NOP] = {},
725 [IORING_OP_READV] = {
d3656344
JA
726 .async_ctx = 1,
727 .needs_mm = 1,
728 .needs_file = 1,
729 .unbound_nonreg_file = 1,
8a72758c 730 .pollin = 1,
4d954c25 731 .buffer_select = 1,
d3656344 732 },
0463b6c5 733 [IORING_OP_WRITEV] = {
d3656344
JA
734 .async_ctx = 1,
735 .needs_mm = 1,
736 .needs_file = 1,
737 .hash_reg_file = 1,
738 .unbound_nonreg_file = 1,
8a72758c 739 .pollout = 1,
d3656344 740 },
0463b6c5 741 [IORING_OP_FSYNC] = {
d3656344
JA
742 .needs_file = 1,
743 },
0463b6c5 744 [IORING_OP_READ_FIXED] = {
d3656344
JA
745 .needs_file = 1,
746 .unbound_nonreg_file = 1,
8a72758c 747 .pollin = 1,
d3656344 748 },
0463b6c5 749 [IORING_OP_WRITE_FIXED] = {
d3656344
JA
750 .needs_file = 1,
751 .hash_reg_file = 1,
752 .unbound_nonreg_file = 1,
8a72758c 753 .pollout = 1,
d3656344 754 },
0463b6c5 755 [IORING_OP_POLL_ADD] = {
d3656344
JA
756 .needs_file = 1,
757 .unbound_nonreg_file = 1,
758 },
0463b6c5
PB
759 [IORING_OP_POLL_REMOVE] = {},
760 [IORING_OP_SYNC_FILE_RANGE] = {
d3656344
JA
761 .needs_file = 1,
762 },
0463b6c5 763 [IORING_OP_SENDMSG] = {
d3656344
JA
764 .async_ctx = 1,
765 .needs_mm = 1,
766 .needs_file = 1,
767 .unbound_nonreg_file = 1,
ff002b30 768 .needs_fs = 1,
8a72758c 769 .pollout = 1,
d3656344 770 },
0463b6c5 771 [IORING_OP_RECVMSG] = {
d3656344
JA
772 .async_ctx = 1,
773 .needs_mm = 1,
774 .needs_file = 1,
775 .unbound_nonreg_file = 1,
ff002b30 776 .needs_fs = 1,
8a72758c 777 .pollin = 1,
52de1fe1 778 .buffer_select = 1,
d3656344 779 },
0463b6c5 780 [IORING_OP_TIMEOUT] = {
d3656344
JA
781 .async_ctx = 1,
782 .needs_mm = 1,
783 },
0463b6c5
PB
784 [IORING_OP_TIMEOUT_REMOVE] = {},
785 [IORING_OP_ACCEPT] = {
d3656344
JA
786 .needs_mm = 1,
787 .needs_file = 1,
788 .unbound_nonreg_file = 1,
f86cd20c 789 .file_table = 1,
8a72758c 790 .pollin = 1,
d3656344 791 },
0463b6c5
PB
792 [IORING_OP_ASYNC_CANCEL] = {},
793 [IORING_OP_LINK_TIMEOUT] = {
d3656344
JA
794 .async_ctx = 1,
795 .needs_mm = 1,
796 },
0463b6c5 797 [IORING_OP_CONNECT] = {
d3656344
JA
798 .async_ctx = 1,
799 .needs_mm = 1,
800 .needs_file = 1,
801 .unbound_nonreg_file = 1,
8a72758c 802 .pollout = 1,
d3656344 803 },
0463b6c5 804 [IORING_OP_FALLOCATE] = {
d3656344
JA
805 .needs_file = 1,
806 },
0463b6c5 807 [IORING_OP_OPENAT] = {
f86cd20c 808 .file_table = 1,
ff002b30 809 .needs_fs = 1,
d3656344 810 },
0463b6c5 811 [IORING_OP_CLOSE] = {
fd2206e4
JA
812 .needs_file = 1,
813 .needs_file_no_error = 1,
f86cd20c 814 .file_table = 1,
d3656344 815 },
0463b6c5 816 [IORING_OP_FILES_UPDATE] = {
d3656344 817 .needs_mm = 1,
f86cd20c 818 .file_table = 1,
d3656344 819 },
0463b6c5 820 [IORING_OP_STATX] = {
d3656344 821 .needs_mm = 1,
ff002b30 822 .needs_fs = 1,
5b0bbee4 823 .file_table = 1,
d3656344 824 },
0463b6c5 825 [IORING_OP_READ] = {
3a6820f2
JA
826 .needs_mm = 1,
827 .needs_file = 1,
828 .unbound_nonreg_file = 1,
8a72758c 829 .pollin = 1,
bcda7baa 830 .buffer_select = 1,
3a6820f2 831 },
0463b6c5 832 [IORING_OP_WRITE] = {
3a6820f2
JA
833 .needs_mm = 1,
834 .needs_file = 1,
835 .unbound_nonreg_file = 1,
8a72758c 836 .pollout = 1,
3a6820f2 837 },
0463b6c5 838 [IORING_OP_FADVISE] = {
4840e418
JA
839 .needs_file = 1,
840 },
0463b6c5 841 [IORING_OP_MADVISE] = {
c1ca757b
JA
842 .needs_mm = 1,
843 },
0463b6c5 844 [IORING_OP_SEND] = {
fddaface
JA
845 .needs_mm = 1,
846 .needs_file = 1,
847 .unbound_nonreg_file = 1,
8a72758c 848 .pollout = 1,
fddaface 849 },
0463b6c5 850 [IORING_OP_RECV] = {
fddaface
JA
851 .needs_mm = 1,
852 .needs_file = 1,
853 .unbound_nonreg_file = 1,
8a72758c 854 .pollin = 1,
bcda7baa 855 .buffer_select = 1,
fddaface 856 },
0463b6c5 857 [IORING_OP_OPENAT2] = {
f86cd20c 858 .file_table = 1,
ff002b30 859 .needs_fs = 1,
cebdb986 860 },
3e4827b0
JA
861 [IORING_OP_EPOLL_CTL] = {
862 .unbound_nonreg_file = 1,
863 .file_table = 1,
864 },
7d67af2c
PB
865 [IORING_OP_SPLICE] = {
866 .needs_file = 1,
867 .hash_reg_file = 1,
868 .unbound_nonreg_file = 1,
ddf0322d
JA
869 },
870 [IORING_OP_PROVIDE_BUFFERS] = {},
067524e9 871 [IORING_OP_REMOVE_BUFFERS] = {},
f2a8d5c7
PB
872 [IORING_OP_TEE] = {
873 .needs_file = 1,
874 .hash_reg_file = 1,
875 .unbound_nonreg_file = 1,
876 },
d3656344
JA
877};
878
561fb04a 879static void io_wq_submit_work(struct io_wq_work **workptr);
78e19bbe 880static void io_cqring_fill_event(struct io_kiocb *req, long res);
ec9c02ad 881static void io_put_req(struct io_kiocb *req);
978db57e 882static void __io_double_put_req(struct io_kiocb *req);
94ae5e77
JA
883static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
884static void io_queue_linked_timeout(struct io_kiocb *req);
05f3fb3c
JA
885static int __io_sqe_files_update(struct io_ring_ctx *ctx,
886 struct io_uring_files_update *ip,
887 unsigned nr_args);
f86cd20c 888static int io_grab_files(struct io_kiocb *req);
99bc4c38 889static void io_cleanup_req(struct io_kiocb *req);
b41e9852
JA
890static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
891 int fd, struct file **out_file, bool fixed);
892static void __io_queue_sqe(struct io_kiocb *req,
893 const struct io_uring_sqe *sqe);
de0617e4 894
2b188cc1
JA
895static struct kmem_cache *req_cachep;
896
897static const struct file_operations io_uring_fops;
898
899struct sock *io_uring_get_socket(struct file *file)
900{
901#if defined(CONFIG_UNIX)
902 if (file->f_op == &io_uring_fops) {
903 struct io_ring_ctx *ctx = file->private_data;
904
905 return ctx->ring_sock->sk;
906 }
907#endif
908 return NULL;
909}
910EXPORT_SYMBOL(io_uring_get_socket);
911
4a38aed2
JA
912static void io_file_put_work(struct work_struct *work);
913
0cdaf760
PB
914static inline bool io_async_submit(struct io_ring_ctx *ctx)
915{
916 return ctx->flags & IORING_SETUP_SQPOLL;
917}
918
2b188cc1
JA
919static void io_ring_ctx_ref_free(struct percpu_ref *ref)
920{
921 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
922
0f158b4c 923 complete(&ctx->ref_comp);
2b188cc1
JA
924}
925
926static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
927{
928 struct io_ring_ctx *ctx;
78076bb6 929 int hash_bits;
2b188cc1
JA
930
931 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
932 if (!ctx)
933 return NULL;
934
0ddf92e8
JA
935 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
936 if (!ctx->fallback_req)
937 goto err;
938
78076bb6
JA
939 /*
940 * Use 5 bits less than the max cq entries, that should give us around
941 * 32 entries per hash list if totally full and uniformly spread.
942 */
943 hash_bits = ilog2(p->cq_entries);
944 hash_bits -= 5;
945 if (hash_bits <= 0)
946 hash_bits = 1;
947 ctx->cancel_hash_bits = hash_bits;
948 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
949 GFP_KERNEL);
950 if (!ctx->cancel_hash)
951 goto err;
952 __hash_init(ctx->cancel_hash, 1U << hash_bits);
953
21482896 954 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
206aefde
JA
955 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
956 goto err;
2b188cc1
JA
957
958 ctx->flags = p->flags;
583863ed 959 init_waitqueue_head(&ctx->sqo_wait);
2b188cc1 960 init_waitqueue_head(&ctx->cq_wait);
1d7bb1d5 961 INIT_LIST_HEAD(&ctx->cq_overflow_list);
0f158b4c
JA
962 init_completion(&ctx->ref_comp);
963 init_completion(&ctx->sq_thread_comp);
5a2e745d 964 idr_init(&ctx->io_buffer_idr);
071698e1 965 idr_init(&ctx->personality_idr);
2b188cc1
JA
966 mutex_init(&ctx->uring_lock);
967 init_waitqueue_head(&ctx->wait);
968 spin_lock_init(&ctx->completion_lock);
def596e9 969 INIT_LIST_HEAD(&ctx->poll_list);
de0617e4 970 INIT_LIST_HEAD(&ctx->defer_list);
5262f567 971 INIT_LIST_HEAD(&ctx->timeout_list);
fcb323cc
JA
972 init_waitqueue_head(&ctx->inflight_wait);
973 spin_lock_init(&ctx->inflight_lock);
974 INIT_LIST_HEAD(&ctx->inflight_list);
4a38aed2
JA
975 INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
976 init_llist_head(&ctx->file_put_llist);
2b188cc1 977 return ctx;
206aefde 978err:
0ddf92e8
JA
979 if (ctx->fallback_req)
980 kmem_cache_free(req_cachep, ctx->fallback_req);
78076bb6 981 kfree(ctx->cancel_hash);
206aefde
JA
982 kfree(ctx);
983 return NULL;
2b188cc1
JA
984}
985
9d858b21 986static inline bool __req_need_defer(struct io_kiocb *req)
7adf4eaf 987{
a197f664
JL
988 struct io_ring_ctx *ctx = req->ctx;
989
31af27c7
PB
990 return req->sequence != ctx->cached_cq_tail
991 + atomic_read(&ctx->cached_cq_overflow);
7adf4eaf
JA
992}
993
9d858b21 994static inline bool req_need_defer(struct io_kiocb *req)
de0617e4 995{
87987898 996 if (unlikely(req->flags & REQ_F_IO_DRAIN))
9d858b21 997 return __req_need_defer(req);
de0617e4 998
9d858b21 999 return false;
de0617e4
JA
1000}
1001
de0617e4 1002static void __io_commit_cqring(struct io_ring_ctx *ctx)
2b188cc1 1003{
75b28aff 1004 struct io_rings *rings = ctx->rings;
2b188cc1 1005
07910158
PB
1006 /* order cqe stores with ring update */
1007 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
2b188cc1 1008
07910158
PB
1009 if (wq_has_sleeper(&ctx->cq_wait)) {
1010 wake_up_interruptible(&ctx->cq_wait);
1011 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
2b188cc1
JA
1012 }
1013}
1014
cccf0ee8
JA
1015static inline void io_req_work_grab_env(struct io_kiocb *req,
1016 const struct io_op_def *def)
1017{
1018 if (!req->work.mm && def->needs_mm) {
1019 mmgrab(current->mm);
1020 req->work.mm = current->mm;
2b188cc1 1021 }
cccf0ee8
JA
1022 if (!req->work.creds)
1023 req->work.creds = get_current_cred();
ff002b30
JA
1024 if (!req->work.fs && def->needs_fs) {
1025 spin_lock(&current->fs->lock);
1026 if (!current->fs->in_exec) {
1027 req->work.fs = current->fs;
1028 req->work.fs->users++;
1029 } else {
1030 req->work.flags |= IO_WQ_WORK_CANCEL;
1031 }
1032 spin_unlock(&current->fs->lock);
1033 }
6ab23144
JA
1034 if (!req->work.task_pid)
1035 req->work.task_pid = task_pid_vnr(current);
2b188cc1
JA
1036}
1037
cccf0ee8 1038static inline void io_req_work_drop_env(struct io_kiocb *req)
18d9be1a 1039{
cccf0ee8
JA
1040 if (req->work.mm) {
1041 mmdrop(req->work.mm);
1042 req->work.mm = NULL;
1043 }
1044 if (req->work.creds) {
1045 put_cred(req->work.creds);
1046 req->work.creds = NULL;
1047 }
ff002b30
JA
1048 if (req->work.fs) {
1049 struct fs_struct *fs = req->work.fs;
1050
1051 spin_lock(&req->work.fs->lock);
1052 if (--fs->users)
1053 fs = NULL;
1054 spin_unlock(&req->work.fs->lock);
1055 if (fs)
1056 free_fs_struct(fs);
1057 }
561fb04a
JA
1058}
1059
8766dd51 1060static inline void io_prep_async_work(struct io_kiocb *req,
94ae5e77 1061 struct io_kiocb **link)
18d9be1a 1062{
d3656344 1063 const struct io_op_def *def = &io_op_defs[req->opcode];
54a91f3b 1064
d3656344
JA
1065 if (req->flags & REQ_F_ISREG) {
1066 if (def->hash_reg_file)
8766dd51 1067 io_wq_hash_work(&req->work, file_inode(req->file));
d3656344
JA
1068 } else {
1069 if (def->unbound_nonreg_file)
3529d8c2 1070 req->work.flags |= IO_WQ_WORK_UNBOUND;
54a91f3b 1071 }
cccf0ee8
JA
1072
1073 io_req_work_grab_env(req, def);
54a91f3b 1074
94ae5e77 1075 *link = io_prep_linked_timeout(req);
561fb04a
JA
1076}
1077
a197f664 1078static inline void io_queue_async_work(struct io_kiocb *req)
561fb04a 1079{
a197f664 1080 struct io_ring_ctx *ctx = req->ctx;
94ae5e77 1081 struct io_kiocb *link;
94ae5e77 1082
8766dd51 1083 io_prep_async_work(req, &link);
561fb04a 1084
8766dd51
PB
1085 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1086 &req->work, req->flags);
1087 io_wq_enqueue(ctx->io_wq, &req->work);
94ae5e77
JA
1088
1089 if (link)
1090 io_queue_linked_timeout(link);
18d9be1a
JA
1091}
1092
5262f567
JA
1093static void io_kill_timeout(struct io_kiocb *req)
1094{
1095 int ret;
1096
2d28390a 1097 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
5262f567
JA
1098 if (ret != -1) {
1099 atomic_inc(&req->ctx->cq_timeouts);
842f9612 1100 list_del_init(&req->list);
f0e20b89 1101 req->flags |= REQ_F_COMP_LOCKED;
78e19bbe 1102 io_cqring_fill_event(req, 0);
ec9c02ad 1103 io_put_req(req);
5262f567
JA
1104 }
1105}
1106
1107static void io_kill_timeouts(struct io_ring_ctx *ctx)
1108{
1109 struct io_kiocb *req, *tmp;
1110
1111 spin_lock_irq(&ctx->completion_lock);
1112 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
1113 io_kill_timeout(req);
1114 spin_unlock_irq(&ctx->completion_lock);
1115}
1116
04518945 1117static void __io_queue_deferred(struct io_ring_ctx *ctx)
de0617e4 1118{
04518945
PB
1119 do {
1120 struct io_kiocb *req = list_first_entry(&ctx->defer_list,
1121 struct io_kiocb, list);
de0617e4 1122
04518945
PB
1123 if (req_need_defer(req))
1124 break;
1125 list_del_init(&req->list);
1126 io_queue_async_work(req);
1127 } while (!list_empty(&ctx->defer_list));
1128}
1129
360428f8 1130static void io_flush_timeouts(struct io_ring_ctx *ctx)
de0617e4 1131{
360428f8
PB
1132 while (!list_empty(&ctx->timeout_list)) {
1133 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
1134 struct io_kiocb, list);
de0617e4 1135
360428f8
PB
1136 if (req->flags & REQ_F_TIMEOUT_NOSEQ)
1137 break;
bfe68a22
PB
1138 if (req->timeout.target_seq != ctx->cached_cq_tail
1139 - atomic_read(&ctx->cq_timeouts))
360428f8 1140 break;
bfe68a22 1141
360428f8 1142 list_del_init(&req->list);
5262f567 1143 io_kill_timeout(req);
360428f8
PB
1144 }
1145}
5262f567 1146
360428f8
PB
1147static void io_commit_cqring(struct io_ring_ctx *ctx)
1148{
1149 io_flush_timeouts(ctx);
de0617e4
JA
1150 __io_commit_cqring(ctx);
1151
04518945
PB
1152 if (unlikely(!list_empty(&ctx->defer_list)))
1153 __io_queue_deferred(ctx);
de0617e4
JA
1154}
1155
2b188cc1
JA
1156static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1157{
75b28aff 1158 struct io_rings *rings = ctx->rings;
2b188cc1
JA
1159 unsigned tail;
1160
1161 tail = ctx->cached_cq_tail;
115e12e5
SB
1162 /*
1163 * writes to the cq entry need to come after reading head; the
1164 * control dependency is enough as we're using WRITE_ONCE to
1165 * fill the cq entry
1166 */
75b28aff 1167 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
2b188cc1
JA
1168 return NULL;
1169
1170 ctx->cached_cq_tail++;
75b28aff 1171 return &rings->cqes[tail & ctx->cq_mask];
2b188cc1
JA
1172}
1173
f2842ab5
JA
1174static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1175{
f0b493e6
JA
1176 if (!ctx->cq_ev_fd)
1177 return false;
7e55a19c
SG
1178 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1179 return false;
f2842ab5
JA
1180 if (!ctx->eventfd_async)
1181 return true;
b41e9852 1182 return io_wq_current_is_worker();
f2842ab5
JA
1183}
1184
b41e9852 1185static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1d7bb1d5
JA
1186{
1187 if (waitqueue_active(&ctx->wait))
1188 wake_up(&ctx->wait);
1189 if (waitqueue_active(&ctx->sqo_wait))
1190 wake_up(&ctx->sqo_wait);
b41e9852 1191 if (io_should_trigger_evfd(ctx))
1d7bb1d5
JA
1192 eventfd_signal(ctx->cq_ev_fd, 1);
1193}
1194
c4a2ed72
JA
1195/* Returns true if there are no backlogged entries after the flush */
1196static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1d7bb1d5
JA
1197{
1198 struct io_rings *rings = ctx->rings;
1199 struct io_uring_cqe *cqe;
1200 struct io_kiocb *req;
1201 unsigned long flags;
1202 LIST_HEAD(list);
1203
1204 if (!force) {
1205 if (list_empty_careful(&ctx->cq_overflow_list))
c4a2ed72 1206 return true;
1d7bb1d5
JA
1207 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1208 rings->cq_ring_entries))
c4a2ed72 1209 return false;
1d7bb1d5
JA
1210 }
1211
1212 spin_lock_irqsave(&ctx->completion_lock, flags);
1213
1214 /* if force is set, the ring is going away. always drop after that */
1215 if (force)
69b3e546 1216 ctx->cq_overflow_flushed = 1;
1d7bb1d5 1217
c4a2ed72 1218 cqe = NULL;
1d7bb1d5
JA
1219 while (!list_empty(&ctx->cq_overflow_list)) {
1220 cqe = io_get_cqring(ctx);
1221 if (!cqe && !force)
1222 break;
1223
1224 req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
1225 list);
1226 list_move(&req->list, &list);
2ca10259 1227 req->flags &= ~REQ_F_OVERFLOW;
1d7bb1d5
JA
1228 if (cqe) {
1229 WRITE_ONCE(cqe->user_data, req->user_data);
1230 WRITE_ONCE(cqe->res, req->result);
bcda7baa 1231 WRITE_ONCE(cqe->flags, req->cflags);
1d7bb1d5
JA
1232 } else {
1233 WRITE_ONCE(ctx->rings->cq_overflow,
1234 atomic_inc_return(&ctx->cached_cq_overflow));
1235 }
1236 }
1237
1238 io_commit_cqring(ctx);
ad3eb2c8
JA
1239 if (cqe) {
1240 clear_bit(0, &ctx->sq_check_overflow);
1241 clear_bit(0, &ctx->cq_check_overflow);
1242 }
1d7bb1d5
JA
1243 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1244 io_cqring_ev_posted(ctx);
1245
1246 while (!list_empty(&list)) {
1247 req = list_first_entry(&list, struct io_kiocb, list);
1248 list_del(&req->list);
ec9c02ad 1249 io_put_req(req);
1d7bb1d5 1250 }
c4a2ed72
JA
1251
1252 return cqe != NULL;
1d7bb1d5
JA
1253}
1254
bcda7baa 1255static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1256{
78e19bbe 1257 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1258 struct io_uring_cqe *cqe;
1259
78e19bbe 1260 trace_io_uring_complete(ctx, req->user_data, res);
51c3ff62 1261
2b188cc1
JA
1262 /*
1263 * If we can't get a cq entry, userspace overflowed the
1264 * submission (by quite a lot). Increment the overflow count in
1265 * the ring.
1266 */
1267 cqe = io_get_cqring(ctx);
1d7bb1d5 1268 if (likely(cqe)) {
78e19bbe 1269 WRITE_ONCE(cqe->user_data, req->user_data);
2b188cc1 1270 WRITE_ONCE(cqe->res, res);
bcda7baa 1271 WRITE_ONCE(cqe->flags, cflags);
1d7bb1d5 1272 } else if (ctx->cq_overflow_flushed) {
498ccd9e
JA
1273 WRITE_ONCE(ctx->rings->cq_overflow,
1274 atomic_inc_return(&ctx->cached_cq_overflow));
1d7bb1d5 1275 } else {
ad3eb2c8
JA
1276 if (list_empty(&ctx->cq_overflow_list)) {
1277 set_bit(0, &ctx->sq_check_overflow);
1278 set_bit(0, &ctx->cq_check_overflow);
1279 }
2ca10259 1280 req->flags |= REQ_F_OVERFLOW;
1d7bb1d5
JA
1281 refcount_inc(&req->refs);
1282 req->result = res;
bcda7baa 1283 req->cflags = cflags;
1d7bb1d5 1284 list_add_tail(&req->list, &ctx->cq_overflow_list);
2b188cc1
JA
1285 }
1286}
1287
bcda7baa
JA
1288static void io_cqring_fill_event(struct io_kiocb *req, long res)
1289{
1290 __io_cqring_fill_event(req, res, 0);
1291}
1292
1293static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
2b188cc1 1294{
78e19bbe 1295 struct io_ring_ctx *ctx = req->ctx;
2b188cc1
JA
1296 unsigned long flags;
1297
1298 spin_lock_irqsave(&ctx->completion_lock, flags);
bcda7baa 1299 __io_cqring_fill_event(req, res, cflags);
2b188cc1
JA
1300 io_commit_cqring(ctx);
1301 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1302
8c838788 1303 io_cqring_ev_posted(ctx);
2b188cc1
JA
1304}
1305
bcda7baa
JA
1306static void io_cqring_add_event(struct io_kiocb *req, long res)
1307{
1308 __io_cqring_add_event(req, res, 0);
1309}
1310
0ddf92e8
JA
1311static inline bool io_is_fallback_req(struct io_kiocb *req)
1312{
1313 return req == (struct io_kiocb *)
1314 ((unsigned long) req->ctx->fallback_req & ~1UL);
1315}
1316
1317static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1318{
1319 struct io_kiocb *req;
1320
1321 req = ctx->fallback_req;
dd461af6 1322 if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
0ddf92e8
JA
1323 return req;
1324
1325 return NULL;
1326}
1327
0553b8bd
PB
1328static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
1329 struct io_submit_state *state)
2b188cc1 1330{
fd6fab2c 1331 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2b188cc1
JA
1332 struct io_kiocb *req;
1333
2579f913 1334 if (!state) {
fd6fab2c 1335 req = kmem_cache_alloc(req_cachep, gfp);
2579f913 1336 if (unlikely(!req))
0ddf92e8 1337 goto fallback;
2579f913
JA
1338 } else if (!state->free_reqs) {
1339 size_t sz;
1340 int ret;
1341
1342 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
fd6fab2c
JA
1343 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1344
1345 /*
1346 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1347 * retry single alloc to be on the safe side.
1348 */
1349 if (unlikely(ret <= 0)) {
1350 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1351 if (!state->reqs[0])
0ddf92e8 1352 goto fallback;
fd6fab2c
JA
1353 ret = 1;
1354 }
2579f913 1355 state->free_reqs = ret - 1;
6c8a3134 1356 req = state->reqs[ret - 1];
2579f913 1357 } else {
2579f913 1358 state->free_reqs--;
6c8a3134 1359 req = state->reqs[state->free_reqs];
2b188cc1
JA
1360 }
1361
2579f913 1362 return req;
0ddf92e8 1363fallback:
0553b8bd 1364 return io_get_fallback_req(ctx);
2b188cc1
JA
1365}
1366
8da11c19
PB
1367static inline void io_put_file(struct io_kiocb *req, struct file *file,
1368 bool fixed)
1369{
1370 if (fixed)
05589553 1371 percpu_ref_put(req->fixed_file_refs);
8da11c19
PB
1372 else
1373 fput(file);
1374}
1375
c6ca97b3 1376static void __io_req_aux_free(struct io_kiocb *req)
2b188cc1 1377{
929a3af9
PB
1378 if (req->flags & REQ_F_NEED_CLEANUP)
1379 io_cleanup_req(req);
1380
96fd84d8 1381 kfree(req->io);
8da11c19
PB
1382 if (req->file)
1383 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
3537b6a7
JA
1384 if (req->task)
1385 put_task_struct(req->task);
cccf0ee8
JA
1386
1387 io_req_work_drop_env(req);
def596e9
JA
1388}
1389
9e645e11 1390static void __io_free_req(struct io_kiocb *req)
2b188cc1 1391{
c6ca97b3 1392 __io_req_aux_free(req);
fcb323cc 1393
fcb323cc 1394 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3 1395 struct io_ring_ctx *ctx = req->ctx;
fcb323cc
JA
1396 unsigned long flags;
1397
1398 spin_lock_irqsave(&ctx->inflight_lock, flags);
1399 list_del(&req->inflight_entry);
1400 if (waitqueue_active(&ctx->inflight_wait))
1401 wake_up(&ctx->inflight_wait);
1402 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1403 }
2b85edfc
PB
1404
1405 percpu_ref_put(&req->ctx->refs);
b1e50e54
PB
1406 if (likely(!io_is_fallback_req(req)))
1407 kmem_cache_free(req_cachep, req);
1408 else
dd461af6 1409 clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req);
e65ef56d
JA
1410}
1411
c6ca97b3
JA
1412struct req_batch {
1413 void *reqs[IO_IOPOLL_BATCH];
1414 int to_free;
1415 int need_iter;
1416};
1417
1418static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
1419{
1420 if (!rb->to_free)
1421 return;
1422 if (rb->need_iter) {
1423 int i, inflight = 0;
1424 unsigned long flags;
1425
1426 for (i = 0; i < rb->to_free; i++) {
1427 struct io_kiocb *req = rb->reqs[i];
1428
c6ca97b3
JA
1429 if (req->flags & REQ_F_INFLIGHT)
1430 inflight++;
c6ca97b3
JA
1431 __io_req_aux_free(req);
1432 }
1433 if (!inflight)
1434 goto do_free;
1435
1436 spin_lock_irqsave(&ctx->inflight_lock, flags);
1437 for (i = 0; i < rb->to_free; i++) {
1438 struct io_kiocb *req = rb->reqs[i];
1439
10fef4be 1440 if (req->flags & REQ_F_INFLIGHT) {
c6ca97b3
JA
1441 list_del(&req->inflight_entry);
1442 if (!--inflight)
1443 break;
1444 }
1445 }
1446 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1447
1448 if (waitqueue_active(&ctx->inflight_wait))
1449 wake_up(&ctx->inflight_wait);
1450 }
1451do_free:
1452 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
1453 percpu_ref_put_many(&ctx->refs, rb->to_free);
c6ca97b3 1454 rb->to_free = rb->need_iter = 0;
e65ef56d
JA
1455}
1456
a197f664 1457static bool io_link_cancel_timeout(struct io_kiocb *req)
2665abfd 1458{
a197f664 1459 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1460 int ret;
1461
2d28390a 1462 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
2665abfd 1463 if (ret != -1) {
78e19bbe 1464 io_cqring_fill_event(req, -ECANCELED);
2665abfd 1465 io_commit_cqring(ctx);
dea3b49c 1466 req->flags &= ~REQ_F_LINK_HEAD;
ec9c02ad 1467 io_put_req(req);
2665abfd
JA
1468 return true;
1469 }
1470
1471 return false;
e65ef56d
JA
1472}
1473
ba816ad6 1474static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
9e645e11 1475{
2665abfd 1476 struct io_ring_ctx *ctx = req->ctx;
2665abfd 1477 bool wake_ev = false;
9e645e11 1478
4d7dd462
JA
1479 /* Already got next link */
1480 if (req->flags & REQ_F_LINK_NEXT)
1481 return;
1482
9e645e11
JA
1483 /*
1484 * The list should never be empty when we are called here. But could
1485 * potentially happen if the chain is messed up, check to be on the
1486 * safe side.
1487 */
4493233e
PB
1488 while (!list_empty(&req->link_list)) {
1489 struct io_kiocb *nxt = list_first_entry(&req->link_list,
1490 struct io_kiocb, link_list);
94ae5e77 1491
4493233e
PB
1492 if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
1493 (nxt->flags & REQ_F_TIMEOUT))) {
1494 list_del_init(&nxt->link_list);
94ae5e77 1495 wake_ev |= io_link_cancel_timeout(nxt);
94ae5e77
JA
1496 req->flags &= ~REQ_F_LINK_TIMEOUT;
1497 continue;
1498 }
9e645e11 1499
4493233e
PB
1500 list_del_init(&req->link_list);
1501 if (!list_empty(&nxt->link_list))
dea3b49c 1502 nxt->flags |= REQ_F_LINK_HEAD;
b18fdf71 1503 *nxtptr = nxt;
94ae5e77 1504 break;
9e645e11 1505 }
2665abfd 1506
4d7dd462 1507 req->flags |= REQ_F_LINK_NEXT;
2665abfd
JA
1508 if (wake_ev)
1509 io_cqring_ev_posted(ctx);
9e645e11
JA
1510}
1511
1512/*
dea3b49c 1513 * Called if REQ_F_LINK_HEAD is set, and we fail the head request
9e645e11
JA
1514 */
1515static void io_fail_links(struct io_kiocb *req)
1516{
2665abfd 1517 struct io_ring_ctx *ctx = req->ctx;
2665abfd
JA
1518 unsigned long flags;
1519
1520 spin_lock_irqsave(&ctx->completion_lock, flags);
9e645e11
JA
1521
1522 while (!list_empty(&req->link_list)) {
4493233e
PB
1523 struct io_kiocb *link = list_first_entry(&req->link_list,
1524 struct io_kiocb, link_list);
9e645e11 1525
4493233e 1526 list_del_init(&link->link_list);
c826bd7a 1527 trace_io_uring_fail_link(req, link);
2665abfd
JA
1528
1529 if ((req->flags & REQ_F_LINK_TIMEOUT) &&
d625c6ee 1530 link->opcode == IORING_OP_LINK_TIMEOUT) {
a197f664 1531 io_link_cancel_timeout(link);
2665abfd 1532 } else {
78e19bbe 1533 io_cqring_fill_event(link, -ECANCELED);
978db57e 1534 __io_double_put_req(link);
2665abfd 1535 }
5d960724 1536 req->flags &= ~REQ_F_LINK_TIMEOUT;
9e645e11 1537 }
2665abfd
JA
1538
1539 io_commit_cqring(ctx);
1540 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1541 io_cqring_ev_posted(ctx);
9e645e11
JA
1542}
1543
4d7dd462 1544static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
9e645e11 1545{
dea3b49c 1546 if (likely(!(req->flags & REQ_F_LINK_HEAD)))
2665abfd 1547 return;
2665abfd 1548
9e645e11
JA
1549 /*
1550 * If LINK is set, we have dependent requests in this chain. If we
1551 * didn't fail this request, queue the first one up, moving any other
1552 * dependencies to the next request. In case of failure, fail the rest
1553 * of the chain.
1554 */
2665abfd
JA
1555 if (req->flags & REQ_F_FAIL_LINK) {
1556 io_fail_links(req);
7c9e7f0f
JA
1557 } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
1558 REQ_F_LINK_TIMEOUT) {
2665abfd
JA
1559 struct io_ring_ctx *ctx = req->ctx;
1560 unsigned long flags;
1561
1562 /*
1563 * If this is a timeout link, we could be racing with the
1564 * timeout timer. Grab the completion lock for this case to
7c9e7f0f 1565 * protect against that.
2665abfd
JA
1566 */
1567 spin_lock_irqsave(&ctx->completion_lock, flags);
1568 io_req_link_next(req, nxt);
1569 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1570 } else {
1571 io_req_link_next(req, nxt);
9e645e11 1572 }
4d7dd462 1573}
9e645e11 1574
c69f8dbe
JL
1575static void io_free_req(struct io_kiocb *req)
1576{
944e58bf
PB
1577 struct io_kiocb *nxt = NULL;
1578
1579 io_req_find_next(req, &nxt);
70cf9f32 1580 __io_free_req(req);
944e58bf
PB
1581
1582 if (nxt)
1583 io_queue_async_work(nxt);
c69f8dbe
JL
1584}
1585
7a743e22
PB
1586static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
1587{
1588 struct io_kiocb *link;
8766dd51
PB
1589 const struct io_op_def *def = &io_op_defs[nxt->opcode];
1590
1591 if ((nxt->flags & REQ_F_ISREG) && def->hash_reg_file)
1592 io_wq_hash_work(&nxt->work, file_inode(nxt->file));
7a743e22
PB
1593
1594 *workptr = &nxt->work;
1595 link = io_prep_linked_timeout(nxt);
18a542ff 1596 if (link)
d4c81f38 1597 nxt->flags |= REQ_F_QUEUE_TIMEOUT;
7a743e22
PB
1598}
1599
ba816ad6
JA
1600/*
1601 * Drop reference to request, return next in chain (if there is one) if this
1602 * was the last reference to this request.
1603 */
f9bd67f6 1604__attribute__((nonnull))
ec9c02ad 1605static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
e65ef56d 1606{
2a44f467
JA
1607 if (refcount_dec_and_test(&req->refs)) {
1608 io_req_find_next(req, nxtptr);
4d7dd462 1609 __io_free_req(req);
2a44f467 1610 }
2b188cc1
JA
1611}
1612
e65ef56d
JA
1613static void io_put_req(struct io_kiocb *req)
1614{
1615 if (refcount_dec_and_test(&req->refs))
1616 io_free_req(req);
2b188cc1
JA
1617}
1618
e9fd9396
PB
1619static void io_steal_work(struct io_kiocb *req,
1620 struct io_wq_work **workptr)
7a743e22
PB
1621{
1622 /*
1623 * It's in an io-wq worker, so there always should be at least
1624 * one reference, which will be dropped in io_put_work() just
1625 * after the current handler returns.
1626 *
1627 * It also means, that if the counter dropped to 1, then there is
1628 * no asynchronous users left, so it's safe to steal the next work.
1629 */
7a743e22
PB
1630 if (refcount_read(&req->refs) == 1) {
1631 struct io_kiocb *nxt = NULL;
1632
1633 io_req_find_next(req, &nxt);
1634 if (nxt)
1635 io_wq_assign_next(workptr, nxt);
1636 }
1637}
1638
978db57e
JA
1639/*
1640 * Must only be used if we don't need to care about links, usually from
1641 * within the completion handling itself.
1642 */
1643static void __io_double_put_req(struct io_kiocb *req)
78e19bbe
JA
1644{
1645 /* drop both submit and complete references */
1646 if (refcount_sub_and_test(2, &req->refs))
1647 __io_free_req(req);
1648}
1649
978db57e
JA
1650static void io_double_put_req(struct io_kiocb *req)
1651{
1652 /* drop both submit and complete references */
1653 if (refcount_sub_and_test(2, &req->refs))
1654 io_free_req(req);
1655}
1656
1d7bb1d5 1657static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
a3a0e43f 1658{
84f97dc2
JA
1659 struct io_rings *rings = ctx->rings;
1660
ad3eb2c8
JA
1661 if (test_bit(0, &ctx->cq_check_overflow)) {
1662 /*
1663 * noflush == true is from the waitqueue handler, just ensure
1664 * we wake up the task, and the next invocation will flush the
1665 * entries. We cannot safely to it from here.
1666 */
1667 if (noflush && !list_empty(&ctx->cq_overflow_list))
1668 return -1U;
1d7bb1d5 1669
ad3eb2c8
JA
1670 io_cqring_overflow_flush(ctx, false);
1671 }
1d7bb1d5 1672
a3a0e43f
JA
1673 /* See comment at the top of this file */
1674 smp_rmb();
ad3eb2c8 1675 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
a3a0e43f
JA
1676}
1677
fb5ccc98
PB
1678static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1679{
1680 struct io_rings *rings = ctx->rings;
1681
1682 /* make sure SQ entry isn't read before tail */
1683 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1684}
1685
8237e045 1686static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
e94f141b 1687{
dea3b49c 1688 if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req))
c6ca97b3 1689 return false;
e94f141b 1690
9d9e88a2 1691 if (req->file || req->io)
c6ca97b3
JA
1692 rb->need_iter++;
1693
1694 rb->reqs[rb->to_free++] = req;
1695 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
1696 io_free_req_many(req->ctx, rb);
1697 return true;
e94f141b
JA
1698}
1699
bcda7baa
JA
1700static int io_put_kbuf(struct io_kiocb *req)
1701{
4d954c25 1702 struct io_buffer *kbuf;
bcda7baa
JA
1703 int cflags;
1704
4d954c25 1705 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
bcda7baa
JA
1706 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
1707 cflags |= IORING_CQE_F_BUFFER;
1708 req->rw.addr = 0;
1709 kfree(kbuf);
1710 return cflags;
1711}
1712
def596e9
JA
1713/*
1714 * Find and free completed poll iocbs
1715 */
1716static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1717 struct list_head *done)
1718{
8237e045 1719 struct req_batch rb;
def596e9 1720 struct io_kiocb *req;
def596e9 1721
c6ca97b3 1722 rb.to_free = rb.need_iter = 0;
def596e9 1723 while (!list_empty(done)) {
bcda7baa
JA
1724 int cflags = 0;
1725
def596e9
JA
1726 req = list_first_entry(done, struct io_kiocb, list);
1727 list_del(&req->list);
1728
bcda7baa
JA
1729 if (req->flags & REQ_F_BUFFER_SELECTED)
1730 cflags = io_put_kbuf(req);
1731
1732 __io_cqring_fill_event(req, req->result, cflags);
def596e9
JA
1733 (*nr_events)++;
1734
8237e045
JA
1735 if (refcount_dec_and_test(&req->refs) &&
1736 !io_req_multi_free(&rb, req))
1737 io_free_req(req);
def596e9 1738 }
def596e9 1739
09bb8394 1740 io_commit_cqring(ctx);
32b2244a
XW
1741 if (ctx->flags & IORING_SETUP_SQPOLL)
1742 io_cqring_ev_posted(ctx);
8237e045 1743 io_free_req_many(ctx, &rb);
def596e9
JA
1744}
1745
581f9810
BM
1746static void io_iopoll_queue(struct list_head *again)
1747{
1748 struct io_kiocb *req;
1749
1750 do {
1751 req = list_first_entry(again, struct io_kiocb, list);
1752 list_del(&req->list);
1753 refcount_inc(&req->refs);
1754 io_queue_async_work(req);
1755 } while (!list_empty(again));
1756}
1757
def596e9
JA
1758static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
1759 long min)
1760{
1761 struct io_kiocb *req, *tmp;
1762 LIST_HEAD(done);
581f9810 1763 LIST_HEAD(again);
def596e9
JA
1764 bool spin;
1765 int ret;
1766
1767 /*
1768 * Only spin for completions if we don't have multiple devices hanging
1769 * off our complete list, and we're under the requested amount.
1770 */
1771 spin = !ctx->poll_multi_file && *nr_events < min;
1772
1773 ret = 0;
1774 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
9adbd45d 1775 struct kiocb *kiocb = &req->rw.kiocb;
def596e9
JA
1776
1777 /*
581f9810
BM
1778 * Move completed and retryable entries to our local lists.
1779 * If we find a request that requires polling, break out
1780 * and complete those lists first, if we have entries there.
def596e9
JA
1781 */
1782 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
1783 list_move_tail(&req->list, &done);
1784 continue;
1785 }
1786 if (!list_empty(&done))
1787 break;
1788
581f9810
BM
1789 if (req->result == -EAGAIN) {
1790 list_move_tail(&req->list, &again);
1791 continue;
1792 }
1793 if (!list_empty(&again))
1794 break;
1795
def596e9
JA
1796 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
1797 if (ret < 0)
1798 break;
1799
1800 if (ret && spin)
1801 spin = false;
1802 ret = 0;
1803 }
1804
1805 if (!list_empty(&done))
1806 io_iopoll_complete(ctx, nr_events, &done);
1807
581f9810
BM
1808 if (!list_empty(&again))
1809 io_iopoll_queue(&again);
1810
def596e9
JA
1811 return ret;
1812}
1813
1814/*
d195a66e 1815 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
def596e9
JA
1816 * non-spinning poll check - we'll still enter the driver poll loop, but only
1817 * as a non-spinning completion check.
1818 */
1819static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
1820 long min)
1821{
08f5439f 1822 while (!list_empty(&ctx->poll_list) && !need_resched()) {
def596e9
JA
1823 int ret;
1824
1825 ret = io_do_iopoll(ctx, nr_events, min);
1826 if (ret < 0)
1827 return ret;
1828 if (!min || *nr_events >= min)
1829 return 0;
1830 }
1831
1832 return 1;
1833}
1834
1835/*
1836 * We can't just wait for polled events to come to us, we have to actively
1837 * find and complete them.
1838 */
1839static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1840{
1841 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1842 return;
1843
1844 mutex_lock(&ctx->uring_lock);
1845 while (!list_empty(&ctx->poll_list)) {
1846 unsigned int nr_events = 0;
1847
1848 io_iopoll_getevents(ctx, &nr_events, 1);
08f5439f
JA
1849
1850 /*
1851 * Ensure we allow local-to-the-cpu processing to take place,
1852 * in this case we need to ensure that we reap all events.
1853 */
1854 cond_resched();
def596e9
JA
1855 }
1856 mutex_unlock(&ctx->uring_lock);
1857}
1858
c7849be9
XW
1859static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1860 long min)
def596e9 1861{
2b2ed975 1862 int iters = 0, ret = 0;
500f9fba 1863
c7849be9
XW
1864 /*
1865 * We disallow the app entering submit/complete with polling, but we
1866 * still need to lock the ring to prevent racing with polled issue
1867 * that got punted to a workqueue.
1868 */
1869 mutex_lock(&ctx->uring_lock);
def596e9
JA
1870 do {
1871 int tmin = 0;
1872
a3a0e43f
JA
1873 /*
1874 * Don't enter poll loop if we already have events pending.
1875 * If we do, we can potentially be spinning for commands that
1876 * already triggered a CQE (eg in error).
1877 */
1d7bb1d5 1878 if (io_cqring_events(ctx, false))
a3a0e43f
JA
1879 break;
1880
500f9fba
JA
1881 /*
1882 * If a submit got punted to a workqueue, we can have the
1883 * application entering polling for a command before it gets
1884 * issued. That app will hold the uring_lock for the duration
1885 * of the poll right here, so we need to take a breather every
1886 * now and then to ensure that the issue has a chance to add
1887 * the poll to the issued list. Otherwise we can spin here
1888 * forever, while the workqueue is stuck trying to acquire the
1889 * very same mutex.
1890 */
1891 if (!(++iters & 7)) {
1892 mutex_unlock(&ctx->uring_lock);
1893 mutex_lock(&ctx->uring_lock);
1894 }
1895
def596e9
JA
1896 if (*nr_events < min)
1897 tmin = min - *nr_events;
1898
1899 ret = io_iopoll_getevents(ctx, nr_events, tmin);
1900 if (ret <= 0)
1901 break;
1902 ret = 0;
1903 } while (min && !*nr_events && !need_resched());
1904
500f9fba 1905 mutex_unlock(&ctx->uring_lock);
def596e9
JA
1906 return ret;
1907}
1908
491381ce 1909static void kiocb_end_write(struct io_kiocb *req)
2b188cc1 1910{
491381ce
JA
1911 /*
1912 * Tell lockdep we inherited freeze protection from submission
1913 * thread.
1914 */
1915 if (req->flags & REQ_F_ISREG) {
1916 struct inode *inode = file_inode(req->file);
2b188cc1 1917
491381ce 1918 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2b188cc1 1919 }
491381ce 1920 file_end_write(req->file);
2b188cc1
JA
1921}
1922
4e88d6e7
JA
1923static inline void req_set_fail_links(struct io_kiocb *req)
1924{
1925 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1926 req->flags |= REQ_F_FAIL_LINK;
1927}
1928
ba816ad6 1929static void io_complete_rw_common(struct kiocb *kiocb, long res)
2b188cc1 1930{
9adbd45d 1931 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
bcda7baa 1932 int cflags = 0;
2b188cc1 1933
491381ce
JA
1934 if (kiocb->ki_flags & IOCB_WRITE)
1935 kiocb_end_write(req);
2b188cc1 1936
4e88d6e7
JA
1937 if (res != req->result)
1938 req_set_fail_links(req);
bcda7baa
JA
1939 if (req->flags & REQ_F_BUFFER_SELECTED)
1940 cflags = io_put_kbuf(req);
1941 __io_cqring_add_event(req, res, cflags);
ba816ad6
JA
1942}
1943
1944static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1945{
9adbd45d 1946 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
ba816ad6
JA
1947
1948 io_complete_rw_common(kiocb, res);
e65ef56d 1949 io_put_req(req);
2b188cc1
JA
1950}
1951
def596e9
JA
1952static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1953{
9adbd45d 1954 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
def596e9 1955
491381ce
JA
1956 if (kiocb->ki_flags & IOCB_WRITE)
1957 kiocb_end_write(req);
def596e9 1958
4e88d6e7
JA
1959 if (res != req->result)
1960 req_set_fail_links(req);
9e645e11 1961 req->result = res;
def596e9
JA
1962 if (res != -EAGAIN)
1963 req->flags |= REQ_F_IOPOLL_COMPLETED;
1964}
1965
1966/*
1967 * After the iocb has been issued, it's safe to be found on the poll list.
1968 * Adding the kiocb to the list AFTER submission ensures that we don't
1969 * find it from a io_iopoll_getevents() thread before the issuer is done
1970 * accessing the kiocb cookie.
1971 */
1972static void io_iopoll_req_issued(struct io_kiocb *req)
1973{
1974 struct io_ring_ctx *ctx = req->ctx;
1975
1976 /*
1977 * Track whether we have multiple files in our lists. This will impact
1978 * how we do polling eventually, not spinning if we're on potentially
1979 * different devices.
1980 */
1981 if (list_empty(&ctx->poll_list)) {
1982 ctx->poll_multi_file = false;
1983 } else if (!ctx->poll_multi_file) {
1984 struct io_kiocb *list_req;
1985
1986 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1987 list);
9adbd45d 1988 if (list_req->file != req->file)
def596e9
JA
1989 ctx->poll_multi_file = true;
1990 }
1991
1992 /*
1993 * For fast devices, IO may have already completed. If it has, add
1994 * it to the front so we find it first.
1995 */
1996 if (req->flags & REQ_F_IOPOLL_COMPLETED)
1997 list_add(&req->list, &ctx->poll_list);
1998 else
1999 list_add_tail(&req->list, &ctx->poll_list);
bdcd3eab
XW
2000
2001 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2002 wq_has_sleeper(&ctx->sqo_wait))
2003 wake_up(&ctx->sqo_wait);
def596e9
JA
2004}
2005
9f13c35b 2006static void __io_state_file_put(struct io_submit_state *state)
9a56a232 2007{
9f13c35b 2008 int diff = state->has_refs - state->used_refs;
9a56a232 2009
9f13c35b
PB
2010 if (diff)
2011 fput_many(state->file, diff);
2012 state->file = NULL;
2013}
2014
2015static inline void io_state_file_put(struct io_submit_state *state)
2016{
2017 if (state->file)
2018 __io_state_file_put(state);
9a56a232
JA
2019}
2020
2021/*
2022 * Get as many references to a file as we have IOs left in this submission,
2023 * assuming most submissions are for one file, or at least that each file
2024 * has more than one submission.
2025 */
8da11c19 2026static struct file *__io_file_get(struct io_submit_state *state, int fd)
9a56a232
JA
2027{
2028 if (!state)
2029 return fget(fd);
2030
2031 if (state->file) {
2032 if (state->fd == fd) {
2033 state->used_refs++;
2034 state->ios_left--;
2035 return state->file;
2036 }
9f13c35b 2037 __io_state_file_put(state);
9a56a232
JA
2038 }
2039 state->file = fget_many(fd, state->ios_left);
2040 if (!state->file)
2041 return NULL;
2042
2043 state->fd = fd;
2044 state->has_refs = state->ios_left;
2045 state->used_refs = 1;
2046 state->ios_left--;
2047 return state->file;
2048}
2049
2b188cc1
JA
2050/*
2051 * If we tracked the file through the SCM inflight mechanism, we could support
2052 * any file. For now, just ensure that anything potentially problematic is done
2053 * inline.
2054 */
af197f50 2055static bool io_file_supports_async(struct file *file, int rw)
2b188cc1
JA
2056{
2057 umode_t mode = file_inode(file)->i_mode;
2058
10d59345 2059 if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode))
2b188cc1
JA
2060 return true;
2061 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
2062 return true;
2063
c5b85625
JA
2064 /* any ->read/write should understand O_NONBLOCK */
2065 if (file->f_flags & O_NONBLOCK)
2066 return true;
2067
af197f50
JA
2068 if (!(file->f_mode & FMODE_NOWAIT))
2069 return false;
2070
2071 if (rw == READ)
2072 return file->f_op->read_iter != NULL;
2073
2074 return file->f_op->write_iter != NULL;
2b188cc1
JA
2075}
2076
3529d8c2
JA
2077static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2078 bool force_nonblock)
2b188cc1 2079{
def596e9 2080 struct io_ring_ctx *ctx = req->ctx;
9adbd45d 2081 struct kiocb *kiocb = &req->rw.kiocb;
09bb8394
JA
2082 unsigned ioprio;
2083 int ret;
2b188cc1 2084
491381ce
JA
2085 if (S_ISREG(file_inode(req->file)->i_mode))
2086 req->flags |= REQ_F_ISREG;
2087
2b188cc1 2088 kiocb->ki_pos = READ_ONCE(sqe->off);
ba04291e
JA
2089 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2090 req->flags |= REQ_F_CUR_POS;
2091 kiocb->ki_pos = req->file->f_pos;
2092 }
2b188cc1 2093 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3e577dcd
PB
2094 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2095 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2096 if (unlikely(ret))
2097 return ret;
2b188cc1
JA
2098
2099 ioprio = READ_ONCE(sqe->ioprio);
2100 if (ioprio) {
2101 ret = ioprio_check_cap(ioprio);
2102 if (ret)
09bb8394 2103 return ret;
2b188cc1
JA
2104
2105 kiocb->ki_ioprio = ioprio;
2106 } else
2107 kiocb->ki_ioprio = get_current_ioprio();
2108
8449eeda 2109 /* don't allow async punt if RWF_NOWAIT was requested */
c5b85625 2110 if (kiocb->ki_flags & IOCB_NOWAIT)
8449eeda
SB
2111 req->flags |= REQ_F_NOWAIT;
2112
2113 if (force_nonblock)
2b188cc1 2114 kiocb->ki_flags |= IOCB_NOWAIT;
8449eeda 2115
def596e9 2116 if (ctx->flags & IORING_SETUP_IOPOLL) {
def596e9
JA
2117 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2118 !kiocb->ki_filp->f_op->iopoll)
09bb8394 2119 return -EOPNOTSUPP;
2b188cc1 2120
def596e9
JA
2121 kiocb->ki_flags |= IOCB_HIPRI;
2122 kiocb->ki_complete = io_complete_rw_iopoll;
6873e0bd 2123 req->result = 0;
def596e9 2124 } else {
09bb8394
JA
2125 if (kiocb->ki_flags & IOCB_HIPRI)
2126 return -EINVAL;
def596e9
JA
2127 kiocb->ki_complete = io_complete_rw;
2128 }
9adbd45d 2129
3529d8c2
JA
2130 req->rw.addr = READ_ONCE(sqe->addr);
2131 req->rw.len = READ_ONCE(sqe->len);
4f4eeba8 2132 req->buf_index = READ_ONCE(sqe->buf_index);
2b188cc1 2133 return 0;
2b188cc1
JA
2134}
2135
2136static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2137{
2138 switch (ret) {
2139 case -EIOCBQUEUED:
2140 break;
2141 case -ERESTARTSYS:
2142 case -ERESTARTNOINTR:
2143 case -ERESTARTNOHAND:
2144 case -ERESTART_RESTARTBLOCK:
2145 /*
2146 * We can't just restart the syscall, since previously
2147 * submitted sqes may already be in progress. Just fail this
2148 * IO with EINTR.
2149 */
2150 ret = -EINTR;
2151 /* fall through */
2152 default:
2153 kiocb->ki_complete(kiocb, ret, 0);
2154 }
2155}
2156
014db007 2157static void kiocb_done(struct kiocb *kiocb, ssize_t ret)
ba816ad6 2158{
ba04291e
JA
2159 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2160
2161 if (req->flags & REQ_F_CUR_POS)
2162 req->file->f_pos = kiocb->ki_pos;
bcaec089 2163 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
014db007 2164 io_complete_rw(kiocb, ret, 0);
ba816ad6
JA
2165 else
2166 io_rw_done(kiocb, ret);
2167}
2168
9adbd45d 2169static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
7d009165 2170 struct iov_iter *iter)
edafccee 2171{
9adbd45d
JA
2172 struct io_ring_ctx *ctx = req->ctx;
2173 size_t len = req->rw.len;
edafccee 2174 struct io_mapped_ubuf *imu;
4f4eeba8 2175 u16 index, buf_index;
edafccee
JA
2176 size_t offset;
2177 u64 buf_addr;
2178
2179 /* attempt to use fixed buffers without having provided iovecs */
2180 if (unlikely(!ctx->user_bufs))
2181 return -EFAULT;
2182
4f4eeba8 2183 buf_index = req->buf_index;
edafccee
JA
2184 if (unlikely(buf_index >= ctx->nr_user_bufs))
2185 return -EFAULT;
2186
2187 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2188 imu = &ctx->user_bufs[index];
9adbd45d 2189 buf_addr = req->rw.addr;
edafccee
JA
2190
2191 /* overflow */
2192 if (buf_addr + len < buf_addr)
2193 return -EFAULT;
2194 /* not inside the mapped region */
2195 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2196 return -EFAULT;
2197
2198 /*
2199 * May not be a start of buffer, set size appropriately
2200 * and advance us to the beginning.
2201 */
2202 offset = buf_addr - imu->ubuf;
2203 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
bd11b3a3
JA
2204
2205 if (offset) {
2206 /*
2207 * Don't use iov_iter_advance() here, as it's really slow for
2208 * using the latter parts of a big fixed buffer - it iterates
2209 * over each segment manually. We can cheat a bit here, because
2210 * we know that:
2211 *
2212 * 1) it's a BVEC iter, we set it up
2213 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2214 * first and last bvec
2215 *
2216 * So just find our index, and adjust the iterator afterwards.
2217 * If the offset is within the first bvec (or the whole first
2218 * bvec, just use iov_iter_advance(). This makes it easier
2219 * since we can just skip the first segment, which may not
2220 * be PAGE_SIZE aligned.
2221 */
2222 const struct bio_vec *bvec = imu->bvec;
2223
2224 if (offset <= bvec->bv_len) {
2225 iov_iter_advance(iter, offset);
2226 } else {
2227 unsigned long seg_skip;
2228
2229 /* skip first vec */
2230 offset -= bvec->bv_len;
2231 seg_skip = 1 + (offset >> PAGE_SHIFT);
2232
2233 iter->bvec = bvec + seg_skip;
2234 iter->nr_segs -= seg_skip;
99c79f66 2235 iter->count -= bvec->bv_len + offset;
bd11b3a3 2236 iter->iov_offset = offset & ~PAGE_MASK;
bd11b3a3
JA
2237 }
2238 }
2239
5e559561 2240 return len;
edafccee
JA
2241}
2242
bcda7baa
JA
2243static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2244{
2245 if (needs_lock)
2246 mutex_unlock(&ctx->uring_lock);
2247}
2248
2249static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2250{
2251 /*
2252 * "Normal" inline submissions always hold the uring_lock, since we
2253 * grab it from the system call. Same is true for the SQPOLL offload.
2254 * The only exception is when we've detached the request and issue it
2255 * from an async worker thread, grab the lock for that case.
2256 */
2257 if (needs_lock)
2258 mutex_lock(&ctx->uring_lock);
2259}
2260
2261static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2262 int bgid, struct io_buffer *kbuf,
2263 bool needs_lock)
2264{
2265 struct io_buffer *head;
2266
2267 if (req->flags & REQ_F_BUFFER_SELECTED)
2268 return kbuf;
2269
2270 io_ring_submit_lock(req->ctx, needs_lock);
2271
2272 lockdep_assert_held(&req->ctx->uring_lock);
2273
2274 head = idr_find(&req->ctx->io_buffer_idr, bgid);
2275 if (head) {
2276 if (!list_empty(&head->list)) {
2277 kbuf = list_last_entry(&head->list, struct io_buffer,
2278 list);
2279 list_del(&kbuf->list);
2280 } else {
2281 kbuf = head;
2282 idr_remove(&req->ctx->io_buffer_idr, bgid);
2283 }
2284 if (*len > kbuf->len)
2285 *len = kbuf->len;
2286 } else {
2287 kbuf = ERR_PTR(-ENOBUFS);
2288 }
2289
2290 io_ring_submit_unlock(req->ctx, needs_lock);
2291
2292 return kbuf;
2293}
2294
4d954c25
JA
2295static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2296 bool needs_lock)
2297{
2298 struct io_buffer *kbuf;
4f4eeba8 2299 u16 bgid;
4d954c25
JA
2300
2301 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
4f4eeba8 2302 bgid = req->buf_index;
4d954c25
JA
2303 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2304 if (IS_ERR(kbuf))
2305 return kbuf;
2306 req->rw.addr = (u64) (unsigned long) kbuf;
2307 req->flags |= REQ_F_BUFFER_SELECTED;
2308 return u64_to_user_ptr(kbuf->addr);
2309}
2310
2311#ifdef CONFIG_COMPAT
2312static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2313 bool needs_lock)
2314{
2315 struct compat_iovec __user *uiov;
2316 compat_ssize_t clen;
2317 void __user *buf;
2318 ssize_t len;
2319
2320 uiov = u64_to_user_ptr(req->rw.addr);
2321 if (!access_ok(uiov, sizeof(*uiov)))
2322 return -EFAULT;
2323 if (__get_user(clen, &uiov->iov_len))
2324 return -EFAULT;
2325 if (clen < 0)
2326 return -EINVAL;
2327
2328 len = clen;
2329 buf = io_rw_buffer_select(req, &len, needs_lock);
2330 if (IS_ERR(buf))
2331 return PTR_ERR(buf);
2332 iov[0].iov_base = buf;
2333 iov[0].iov_len = (compat_size_t) len;
2334 return 0;
2335}
2336#endif
2337
2338static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2339 bool needs_lock)
2340{
2341 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2342 void __user *buf;
2343 ssize_t len;
2344
2345 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2346 return -EFAULT;
2347
2348 len = iov[0].iov_len;
2349 if (len < 0)
2350 return -EINVAL;
2351 buf = io_rw_buffer_select(req, &len, needs_lock);
2352 if (IS_ERR(buf))
2353 return PTR_ERR(buf);
2354 iov[0].iov_base = buf;
2355 iov[0].iov_len = len;
2356 return 0;
2357}
2358
2359static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2360 bool needs_lock)
2361{
dddb3e26
JA
2362 if (req->flags & REQ_F_BUFFER_SELECTED) {
2363 struct io_buffer *kbuf;
2364
2365 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2366 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2367 iov[0].iov_len = kbuf->len;
4d954c25 2368 return 0;
dddb3e26 2369 }
4d954c25
JA
2370 if (!req->rw.len)
2371 return 0;
2372 else if (req->rw.len > 1)
2373 return -EINVAL;
2374
2375#ifdef CONFIG_COMPAT
2376 if (req->ctx->compat)
2377 return io_compat_import(req, iov, needs_lock);
2378#endif
2379
2380 return __io_iov_buffer_select(req, iov, needs_lock);
2381}
2382
cf6fd4bd 2383static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
bcda7baa
JA
2384 struct iovec **iovec, struct iov_iter *iter,
2385 bool needs_lock)
2b188cc1 2386{
9adbd45d
JA
2387 void __user *buf = u64_to_user_ptr(req->rw.addr);
2388 size_t sqe_len = req->rw.len;
4d954c25 2389 ssize_t ret;
edafccee
JA
2390 u8 opcode;
2391
d625c6ee 2392 opcode = req->opcode;
7d009165 2393 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
edafccee 2394 *iovec = NULL;
9adbd45d 2395 return io_import_fixed(req, rw, iter);
edafccee 2396 }
2b188cc1 2397
bcda7baa 2398 /* buffer index only valid with fixed read/write, or buffer select */
4f4eeba8 2399 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
9adbd45d
JA
2400 return -EINVAL;
2401
3a6820f2 2402 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
bcda7baa 2403 if (req->flags & REQ_F_BUFFER_SELECT) {
4d954c25
JA
2404 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
2405 if (IS_ERR(buf)) {
bcda7baa 2406 *iovec = NULL;
4d954c25 2407 return PTR_ERR(buf);
bcda7baa 2408 }
3f9d6441 2409 req->rw.len = sqe_len;
bcda7baa
JA
2410 }
2411
3a6820f2
JA
2412 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2413 *iovec = NULL;
3a901598 2414 return ret < 0 ? ret : sqe_len;
3a6820f2
JA
2415 }
2416
f67676d1
JA
2417 if (req->io) {
2418 struct io_async_rw *iorw = &req->io->rw;
2419
2420 *iovec = iorw->iov;
2421 iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size);
2422 if (iorw->iov == iorw->fast_iov)
2423 *iovec = NULL;
2424 return iorw->size;
2425 }
2426
4d954c25
JA
2427 if (req->flags & REQ_F_BUFFER_SELECT) {
2428 ret = io_iov_buffer_select(req, *iovec, needs_lock);
3f9d6441
JA
2429 if (!ret) {
2430 ret = (*iovec)->iov_len;
2431 iov_iter_init(iter, rw, *iovec, 1, ret);
2432 }
4d954c25
JA
2433 *iovec = NULL;
2434 return ret;
2435 }
2436
2b188cc1 2437#ifdef CONFIG_COMPAT
cf6fd4bd 2438 if (req->ctx->compat)
2b188cc1
JA
2439 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
2440 iovec, iter);
2441#endif
2442
2443 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
2444}
2445
31b51510 2446/*
32960613
JA
2447 * For files that don't have ->read_iter() and ->write_iter(), handle them
2448 * by looping over ->read() or ->write() manually.
31b51510 2449 */
32960613
JA
2450static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2451 struct iov_iter *iter)
2452{
2453 ssize_t ret = 0;
2454
2455 /*
2456 * Don't support polled IO through this interface, and we can't
2457 * support non-blocking either. For the latter, this just causes
2458 * the kiocb to be handled from an async context.
2459 */
2460 if (kiocb->ki_flags & IOCB_HIPRI)
2461 return -EOPNOTSUPP;
2462 if (kiocb->ki_flags & IOCB_NOWAIT)
2463 return -EAGAIN;
2464
2465 while (iov_iter_count(iter)) {
311ae9e1 2466 struct iovec iovec;
32960613
JA
2467 ssize_t nr;
2468
311ae9e1
PB
2469 if (!iov_iter_is_bvec(iter)) {
2470 iovec = iov_iter_iovec(iter);
2471 } else {
2472 /* fixed buffers import bvec */
2473 iovec.iov_base = kmap(iter->bvec->bv_page)
2474 + iter->iov_offset;
2475 iovec.iov_len = min(iter->count,
2476 iter->bvec->bv_len - iter->iov_offset);
2477 }
2478
32960613
JA
2479 if (rw == READ) {
2480 nr = file->f_op->read(file, iovec.iov_base,
2481 iovec.iov_len, &kiocb->ki_pos);
2482 } else {
2483 nr = file->f_op->write(file, iovec.iov_base,
2484 iovec.iov_len, &kiocb->ki_pos);
2485 }
2486
311ae9e1
PB
2487 if (iov_iter_is_bvec(iter))
2488 kunmap(iter->bvec->bv_page);
2489
32960613
JA
2490 if (nr < 0) {
2491 if (!ret)
2492 ret = nr;
2493 break;
2494 }
2495 ret += nr;
2496 if (nr != iovec.iov_len)
2497 break;
2498 iov_iter_advance(iter, nr);
2499 }
2500
2501 return ret;
2502}
2503
b7bb4f7d 2504static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
f67676d1
JA
2505 struct iovec *iovec, struct iovec *fast_iov,
2506 struct iov_iter *iter)
2507{
2508 req->io->rw.nr_segs = iter->nr_segs;
2509 req->io->rw.size = io_size;
2510 req->io->rw.iov = iovec;
2511 if (!req->io->rw.iov) {
2512 req->io->rw.iov = req->io->rw.fast_iov;
45097dae
XW
2513 if (req->io->rw.iov != fast_iov)
2514 memcpy(req->io->rw.iov, fast_iov,
2515 sizeof(struct iovec) * iter->nr_segs);
99bc4c38
PB
2516 } else {
2517 req->flags |= REQ_F_NEED_CLEANUP;
f67676d1
JA
2518 }
2519}
2520
3d9932a8
XW
2521static inline int __io_alloc_async_ctx(struct io_kiocb *req)
2522{
2523 req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
2524 return req->io == NULL;
2525}
2526
b7bb4f7d 2527static int io_alloc_async_ctx(struct io_kiocb *req)
f67676d1 2528{
d3656344
JA
2529 if (!io_op_defs[req->opcode].async_ctx)
2530 return 0;
3d9932a8
XW
2531
2532 return __io_alloc_async_ctx(req);
b7bb4f7d
JA
2533}
2534
b7bb4f7d
JA
2535static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2536 struct iovec *iovec, struct iovec *fast_iov,
2537 struct iov_iter *iter)
2538{
980ad263 2539 if (!io_op_defs[req->opcode].async_ctx)
74566df3 2540 return 0;
5d204bcf 2541 if (!req->io) {
3d9932a8 2542 if (__io_alloc_async_ctx(req))
5d204bcf 2543 return -ENOMEM;
b7bb4f7d 2544
5d204bcf
JA
2545 io_req_map_rw(req, io_size, iovec, fast_iov, iter);
2546 }
b7bb4f7d 2547 return 0;
f67676d1
JA
2548}
2549
3529d8c2
JA
2550static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2551 bool force_nonblock)
f67676d1 2552{
3529d8c2
JA
2553 struct io_async_ctx *io;
2554 struct iov_iter iter;
f67676d1
JA
2555 ssize_t ret;
2556
3529d8c2
JA
2557 ret = io_prep_rw(req, sqe, force_nonblock);
2558 if (ret)
2559 return ret;
f67676d1 2560
3529d8c2
JA
2561 if (unlikely(!(req->file->f_mode & FMODE_READ)))
2562 return -EBADF;
f67676d1 2563
5f798bea
PB
2564 /* either don't need iovec imported or already have it */
2565 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2
JA
2566 return 0;
2567
2568 io = req->io;
2569 io->rw.iov = io->rw.fast_iov;
2570 req->io = NULL;
bcda7baa 2571 ret = io_import_iovec(READ, req, &io->rw.iov, &iter, !force_nonblock);
3529d8c2
JA
2572 req->io = io;
2573 if (ret < 0)
2574 return ret;
2575
2576 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2577 return 0;
f67676d1
JA
2578}
2579
014db007 2580static int io_read(struct io_kiocb *req, bool force_nonblock)
2b188cc1
JA
2581{
2582 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 2583 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 2584 struct iov_iter iter;
31b51510 2585 size_t iov_count;
f67676d1 2586 ssize_t io_size, ret;
2b188cc1 2587
bcda7baa 2588 ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
06b76d44
JA
2589 if (ret < 0)
2590 return ret;
2b188cc1 2591
fd6c2e4c
JA
2592 /* Ensure we clear previously set non-block flag */
2593 if (!force_nonblock)
29de5f6a 2594 kiocb->ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 2595
797f3f53 2596 req->result = 0;
f67676d1 2597 io_size = ret;
dea3b49c 2598 if (req->flags & REQ_F_LINK_HEAD)
f67676d1
JA
2599 req->result = io_size;
2600
2601 /*
2602 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2603 * we know to async punt it even if it was opened O_NONBLOCK
2604 */
af197f50 2605 if (force_nonblock && !io_file_supports_async(req->file, READ))
f67676d1 2606 goto copy_iov;
9e645e11 2607
31b51510 2608 iov_count = iov_iter_count(&iter);
9adbd45d 2609 ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
2b188cc1
JA
2610 if (!ret) {
2611 ssize_t ret2;
2612
9adbd45d
JA
2613 if (req->file->f_op->read_iter)
2614 ret2 = call_read_iter(req->file, kiocb, &iter);
32960613 2615 else
9adbd45d 2616 ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
32960613 2617
9d93a3f5 2618 /* Catch -EAGAIN return for forced non-blocking submission */
f67676d1 2619 if (!force_nonblock || ret2 != -EAGAIN) {
014db007 2620 kiocb_done(kiocb, ret2);
f67676d1
JA
2621 } else {
2622copy_iov:
b7bb4f7d 2623 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
2624 inline_vecs, &iter);
2625 if (ret)
2626 goto out_free;
29de5f6a 2627 /* any defer here is final, must blocking retry */
490e8967
JA
2628 if (!(req->flags & REQ_F_NOWAIT) &&
2629 !file_can_poll(req->file))
29de5f6a 2630 req->flags |= REQ_F_MUST_PUNT;
f67676d1
JA
2631 return -EAGAIN;
2632 }
2b188cc1 2633 }
f67676d1 2634out_free:
1e95081c 2635 kfree(iovec);
99bc4c38 2636 req->flags &= ~REQ_F_NEED_CLEANUP;
2b188cc1
JA
2637 return ret;
2638}
2639
3529d8c2
JA
2640static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2641 bool force_nonblock)
f67676d1 2642{
3529d8c2
JA
2643 struct io_async_ctx *io;
2644 struct iov_iter iter;
f67676d1
JA
2645 ssize_t ret;
2646
3529d8c2
JA
2647 ret = io_prep_rw(req, sqe, force_nonblock);
2648 if (ret)
2649 return ret;
f67676d1 2650
3529d8c2
JA
2651 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
2652 return -EBADF;
f67676d1 2653
4ed734b0
JA
2654 req->fsize = rlimit(RLIMIT_FSIZE);
2655
5f798bea
PB
2656 /* either don't need iovec imported or already have it */
2657 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3529d8c2
JA
2658 return 0;
2659
2660 io = req->io;
2661 io->rw.iov = io->rw.fast_iov;
2662 req->io = NULL;
bcda7baa 2663 ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter, !force_nonblock);
3529d8c2
JA
2664 req->io = io;
2665 if (ret < 0)
2666 return ret;
2667
2668 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2669 return 0;
f67676d1
JA
2670}
2671
014db007 2672static int io_write(struct io_kiocb *req, bool force_nonblock)
2b188cc1
JA
2673{
2674 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
9adbd45d 2675 struct kiocb *kiocb = &req->rw.kiocb;
2b188cc1 2676 struct iov_iter iter;
31b51510 2677 size_t iov_count;
f67676d1 2678 ssize_t ret, io_size;
2b188cc1 2679
bcda7baa 2680 ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
06b76d44
JA
2681 if (ret < 0)
2682 return ret;
2b188cc1 2683
fd6c2e4c
JA
2684 /* Ensure we clear previously set non-block flag */
2685 if (!force_nonblock)
9adbd45d 2686 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
fd6c2e4c 2687
797f3f53 2688 req->result = 0;
f67676d1 2689 io_size = ret;
dea3b49c 2690 if (req->flags & REQ_F_LINK_HEAD)
f67676d1 2691 req->result = io_size;
9e645e11 2692
f67676d1
JA
2693 /*
2694 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2695 * we know to async punt it even if it was opened O_NONBLOCK
2696 */
af197f50 2697 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
f67676d1 2698 goto copy_iov;
31b51510 2699
10d59345
JA
2700 /* file path doesn't support NOWAIT for non-direct_IO */
2701 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
2702 (req->flags & REQ_F_ISREG))
f67676d1 2703 goto copy_iov;
31b51510 2704
f67676d1 2705 iov_count = iov_iter_count(&iter);
9adbd45d 2706 ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
2b188cc1 2707 if (!ret) {
9bf7933f
RP
2708 ssize_t ret2;
2709
2b188cc1
JA
2710 /*
2711 * Open-code file_start_write here to grab freeze protection,
2712 * which will be released by another thread in
2713 * io_complete_rw(). Fool lockdep by telling it the lock got
2714 * released so that it doesn't complain about the held lock when
2715 * we return to userspace.
2716 */
491381ce 2717 if (req->flags & REQ_F_ISREG) {
9adbd45d 2718 __sb_start_write(file_inode(req->file)->i_sb,
2b188cc1 2719 SB_FREEZE_WRITE, true);
9adbd45d 2720 __sb_writers_release(file_inode(req->file)->i_sb,
2b188cc1
JA
2721 SB_FREEZE_WRITE);
2722 }
2723 kiocb->ki_flags |= IOCB_WRITE;
9bf7933f 2724
4ed734b0
JA
2725 if (!force_nonblock)
2726 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2727
9adbd45d
JA
2728 if (req->file->f_op->write_iter)
2729 ret2 = call_write_iter(req->file, kiocb, &iter);
32960613 2730 else
9adbd45d 2731 ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
4ed734b0
JA
2732
2733 if (!force_nonblock)
2734 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2735
faac996c 2736 /*
bff6035d 2737 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
faac996c
JA
2738 * retry them without IOCB_NOWAIT.
2739 */
2740 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
2741 ret2 = -EAGAIN;
f67676d1 2742 if (!force_nonblock || ret2 != -EAGAIN) {
014db007 2743 kiocb_done(kiocb, ret2);
f67676d1
JA
2744 } else {
2745copy_iov:
b7bb4f7d 2746 ret = io_setup_async_rw(req, io_size, iovec,
f67676d1
JA
2747 inline_vecs, &iter);
2748 if (ret)
2749 goto out_free;
29de5f6a 2750 /* any defer here is final, must blocking retry */
c5b85625
JA
2751 if (!(req->flags & REQ_F_NOWAIT) &&
2752 !file_can_poll(req->file))
490e8967 2753 req->flags |= REQ_F_MUST_PUNT;
f67676d1
JA
2754 return -EAGAIN;
2755 }
2b188cc1 2756 }
31b51510 2757out_free:
99bc4c38 2758 req->flags &= ~REQ_F_NEED_CLEANUP;
1e95081c 2759 kfree(iovec);
2b188cc1
JA
2760 return ret;
2761}
2762
f2a8d5c7
PB
2763static int __io_splice_prep(struct io_kiocb *req,
2764 const struct io_uring_sqe *sqe)
7d67af2c
PB
2765{
2766 struct io_splice* sp = &req->splice;
2767 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
2768 int ret;
2769
2770 if (req->flags & REQ_F_NEED_CLEANUP)
2771 return 0;
3232dd02
PB
2772 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2773 return -EINVAL;
7d67af2c
PB
2774
2775 sp->file_in = NULL;
7d67af2c
PB
2776 sp->len = READ_ONCE(sqe->len);
2777 sp->flags = READ_ONCE(sqe->splice_flags);
2778
2779 if (unlikely(sp->flags & ~valid_flags))
2780 return -EINVAL;
2781
2782 ret = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), &sp->file_in,
2783 (sp->flags & SPLICE_F_FD_IN_FIXED));
2784 if (ret)
2785 return ret;
2786 req->flags |= REQ_F_NEED_CLEANUP;
2787
2788 if (!S_ISREG(file_inode(sp->file_in)->i_mode))
2789 req->work.flags |= IO_WQ_WORK_UNBOUND;
2790
2791 return 0;
2792}
2793
f2a8d5c7
PB
2794static int io_tee_prep(struct io_kiocb *req,
2795 const struct io_uring_sqe *sqe)
2796{
2797 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
2798 return -EINVAL;
2799 return __io_splice_prep(req, sqe);
2800}
2801
2802static int io_tee(struct io_kiocb *req, bool force_nonblock)
2803{
2804 struct io_splice *sp = &req->splice;
2805 struct file *in = sp->file_in;
2806 struct file *out = sp->file_out;
2807 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
2808 long ret = 0;
2809
2810 if (force_nonblock)
2811 return -EAGAIN;
2812 if (sp->len)
2813 ret = do_tee(in, out, sp->len, flags);
2814
2815 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
2816 req->flags &= ~REQ_F_NEED_CLEANUP;
2817
2818 io_cqring_add_event(req, ret);
2819 if (ret != sp->len)
2820 req_set_fail_links(req);
2821 io_put_req(req);
2822 return 0;
2823}
2824
2825static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2826{
2827 struct io_splice* sp = &req->splice;
2828
2829 sp->off_in = READ_ONCE(sqe->splice_off_in);
2830 sp->off_out = READ_ONCE(sqe->off);
2831 return __io_splice_prep(req, sqe);
2832}
2833
014db007 2834static int io_splice(struct io_kiocb *req, bool force_nonblock)
7d67af2c
PB
2835{
2836 struct io_splice *sp = &req->splice;
2837 struct file *in = sp->file_in;
2838 struct file *out = sp->file_out;
2839 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
2840 loff_t *poff_in, *poff_out;
c9687426 2841 long ret = 0;
7d67af2c 2842
2fb3e822
PB
2843 if (force_nonblock)
2844 return -EAGAIN;
7d67af2c
PB
2845
2846 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
2847 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
c9687426 2848
948a7749 2849 if (sp->len)
c9687426 2850 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
7d67af2c
PB
2851
2852 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
2853 req->flags &= ~REQ_F_NEED_CLEANUP;
2854
2855 io_cqring_add_event(req, ret);
2856 if (ret != sp->len)
2857 req_set_fail_links(req);
014db007 2858 io_put_req(req);
7d67af2c
PB
2859 return 0;
2860}
2861
2b188cc1
JA
2862/*
2863 * IORING_OP_NOP just posts a completion event, nothing else.
2864 */
78e19bbe 2865static int io_nop(struct io_kiocb *req)
2b188cc1
JA
2866{
2867 struct io_ring_ctx *ctx = req->ctx;
2b188cc1 2868
def596e9
JA
2869 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2870 return -EINVAL;
2871
78e19bbe 2872 io_cqring_add_event(req, 0);
e65ef56d 2873 io_put_req(req);
2b188cc1
JA
2874 return 0;
2875}
2876
3529d8c2 2877static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
c992fe29 2878{
6b06314c 2879 struct io_ring_ctx *ctx = req->ctx;
c992fe29 2880
09bb8394
JA
2881 if (!req->file)
2882 return -EBADF;
c992fe29 2883
6b06314c 2884 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
def596e9 2885 return -EINVAL;
edafccee 2886 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
c992fe29
CH
2887 return -EINVAL;
2888
8ed8d3c3
JA
2889 req->sync.flags = READ_ONCE(sqe->fsync_flags);
2890 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
2891 return -EINVAL;
2892
2893 req->sync.off = READ_ONCE(sqe->off);
2894 req->sync.len = READ_ONCE(sqe->len);
c992fe29
CH
2895 return 0;
2896}
2897
ac45abc0 2898static int io_fsync(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3 2899{
8ed8d3c3 2900 loff_t end = req->sync.off + req->sync.len;
8ed8d3c3
JA
2901 int ret;
2902
ac45abc0
PB
2903 /* fsync always requires a blocking context */
2904 if (force_nonblock)
2905 return -EAGAIN;
2906
9adbd45d 2907 ret = vfs_fsync_range(req->file, req->sync.off,
8ed8d3c3
JA
2908 end > 0 ? end : LLONG_MAX,
2909 req->sync.flags & IORING_FSYNC_DATASYNC);
2910 if (ret < 0)
2911 req_set_fail_links(req);
2912 io_cqring_add_event(req, ret);
014db007 2913 io_put_req(req);
c992fe29
CH
2914 return 0;
2915}
2916
d63d1b5e
JA
2917static int io_fallocate_prep(struct io_kiocb *req,
2918 const struct io_uring_sqe *sqe)
2919{
2920 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
2921 return -EINVAL;
3232dd02
PB
2922 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2923 return -EINVAL;
d63d1b5e
JA
2924
2925 req->sync.off = READ_ONCE(sqe->off);
2926 req->sync.len = READ_ONCE(sqe->addr);
2927 req->sync.mode = READ_ONCE(sqe->len);
4ed734b0 2928 req->fsize = rlimit(RLIMIT_FSIZE);
d63d1b5e
JA
2929 return 0;
2930}
2931
014db007 2932static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
5d17b4a4 2933{
ac45abc0
PB
2934 int ret;
2935
d63d1b5e 2936 /* fallocate always requiring blocking context */
ac45abc0 2937 if (force_nonblock)
5d17b4a4
JA
2938 return -EAGAIN;
2939
ac45abc0
PB
2940 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2941 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2942 req->sync.len);
2943 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2944 if (ret < 0)
2945 req_set_fail_links(req);
2946 io_cqring_add_event(req, ret);
2947 io_put_req(req);
5d17b4a4
JA
2948 return 0;
2949}
2950
ec65fea5 2951static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
b7bb4f7d 2952{
f8748881 2953 const char __user *fname;
15b71abe 2954 int ret;
b7bb4f7d 2955
3232dd02
PB
2956 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
2957 return -EINVAL;
ec65fea5 2958 if (unlikely(sqe->ioprio || sqe->buf_index))
15b71abe 2959 return -EINVAL;
ec65fea5 2960 if (unlikely(req->flags & REQ_F_FIXED_FILE))
cf3040ca 2961 return -EBADF;
03b1230c 2962
ec65fea5
PB
2963 /* open.how should be already initialised */
2964 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
2965 req->open.how.flags |= O_LARGEFILE;
3529d8c2 2966
25e72d10
PB
2967 req->open.dfd = READ_ONCE(sqe->fd);
2968 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
f8748881 2969 req->open.filename = getname(fname);
15b71abe
JA
2970 if (IS_ERR(req->open.filename)) {
2971 ret = PTR_ERR(req->open.filename);
2972 req->open.filename = NULL;
2973 return ret;
2974 }
4022e7af 2975 req->open.nofile = rlimit(RLIMIT_NOFILE);
8fef80bf 2976 req->flags |= REQ_F_NEED_CLEANUP;
15b71abe 2977 return 0;
03b1230c
JA
2978}
2979
ec65fea5
PB
2980static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2981{
2982 u64 flags, mode;
2983
2984 if (req->flags & REQ_F_NEED_CLEANUP)
2985 return 0;
2986 mode = READ_ONCE(sqe->len);
2987 flags = READ_ONCE(sqe->open_flags);
2988 req->open.how = build_open_how(flags, mode);
2989 return __io_openat_prep(req, sqe);
2990}
2991
cebdb986 2992static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
aa1fa28f 2993{
cebdb986 2994 struct open_how __user *how;
cebdb986 2995 size_t len;
0fa03c62
JA
2996 int ret;
2997
0bdbdd08
PB
2998 if (req->flags & REQ_F_NEED_CLEANUP)
2999 return 0;
cebdb986
JA
3000 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3001 len = READ_ONCE(sqe->len);
cebdb986
JA
3002 if (len < OPEN_HOW_SIZE_VER0)
3003 return -EINVAL;
3529d8c2 3004
cebdb986
JA
3005 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3006 len);
3007 if (ret)
3008 return ret;
3529d8c2 3009
ec65fea5 3010 return __io_openat_prep(req, sqe);
cebdb986
JA
3011}
3012
014db007 3013static int io_openat2(struct io_kiocb *req, bool force_nonblock)
15b71abe
JA
3014{
3015 struct open_flags op;
15b71abe
JA
3016 struct file *file;
3017 int ret;
3018
f86cd20c 3019 if (force_nonblock)
15b71abe 3020 return -EAGAIN;
15b71abe 3021
cebdb986 3022 ret = build_open_flags(&req->open.how, &op);
15b71abe
JA
3023 if (ret)
3024 goto err;
3025
4022e7af 3026 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
15b71abe
JA
3027 if (ret < 0)
3028 goto err;
3029
3030 file = do_filp_open(req->open.dfd, req->open.filename, &op);
3031 if (IS_ERR(file)) {
3032 put_unused_fd(ret);
3033 ret = PTR_ERR(file);
3034 } else {
3035 fsnotify_open(file);
3036 fd_install(ret, file);
3037 }
3038err:
3039 putname(req->open.filename);
8fef80bf 3040 req->flags &= ~REQ_F_NEED_CLEANUP;
15b71abe
JA
3041 if (ret < 0)
3042 req_set_fail_links(req);
3043 io_cqring_add_event(req, ret);
014db007 3044 io_put_req(req);
15b71abe
JA
3045 return 0;
3046}
3047
014db007 3048static int io_openat(struct io_kiocb *req, bool force_nonblock)
cebdb986 3049{
014db007 3050 return io_openat2(req, force_nonblock);
cebdb986
JA
3051}
3052
067524e9
JA
3053static int io_remove_buffers_prep(struct io_kiocb *req,
3054 const struct io_uring_sqe *sqe)
3055{
3056 struct io_provide_buf *p = &req->pbuf;
3057 u64 tmp;
3058
3059 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3060 return -EINVAL;
3061
3062 tmp = READ_ONCE(sqe->fd);
3063 if (!tmp || tmp > USHRT_MAX)
3064 return -EINVAL;
3065
3066 memset(p, 0, sizeof(*p));
3067 p->nbufs = tmp;
3068 p->bgid = READ_ONCE(sqe->buf_group);
3069 return 0;
3070}
3071
3072static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3073 int bgid, unsigned nbufs)
3074{
3075 unsigned i = 0;
3076
3077 /* shouldn't happen */
3078 if (!nbufs)
3079 return 0;
3080
3081 /* the head kbuf is the list itself */
3082 while (!list_empty(&buf->list)) {
3083 struct io_buffer *nxt;
3084
3085 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3086 list_del(&nxt->list);
3087 kfree(nxt);
3088 if (++i == nbufs)
3089 return i;
3090 }
3091 i++;
3092 kfree(buf);
3093 idr_remove(&ctx->io_buffer_idr, bgid);
3094
3095 return i;
3096}
3097
3098static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock)
3099{
3100 struct io_provide_buf *p = &req->pbuf;
3101 struct io_ring_ctx *ctx = req->ctx;
3102 struct io_buffer *head;
3103 int ret = 0;
3104
3105 io_ring_submit_lock(ctx, !force_nonblock);
3106
3107 lockdep_assert_held(&ctx->uring_lock);
3108
3109 ret = -ENOENT;
3110 head = idr_find(&ctx->io_buffer_idr, p->bgid);
3111 if (head)
3112 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3113
3114 io_ring_submit_lock(ctx, !force_nonblock);
3115 if (ret < 0)
3116 req_set_fail_links(req);
3117 io_cqring_add_event(req, ret);
3118 io_put_req(req);
3119 return 0;
3120}
3121
ddf0322d
JA
3122static int io_provide_buffers_prep(struct io_kiocb *req,
3123 const struct io_uring_sqe *sqe)
3124{
3125 struct io_provide_buf *p = &req->pbuf;
3126 u64 tmp;
3127
3128 if (sqe->ioprio || sqe->rw_flags)
3129 return -EINVAL;
3130
3131 tmp = READ_ONCE(sqe->fd);
3132 if (!tmp || tmp > USHRT_MAX)
3133 return -E2BIG;
3134 p->nbufs = tmp;
3135 p->addr = READ_ONCE(sqe->addr);
3136 p->len = READ_ONCE(sqe->len);
3137
efe68c1c 3138 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
ddf0322d
JA
3139 return -EFAULT;
3140
3141 p->bgid = READ_ONCE(sqe->buf_group);
3142 tmp = READ_ONCE(sqe->off);
3143 if (tmp > USHRT_MAX)
3144 return -E2BIG;
3145 p->bid = tmp;
3146 return 0;
3147}
3148
3149static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3150{
3151 struct io_buffer *buf;
3152 u64 addr = pbuf->addr;
3153 int i, bid = pbuf->bid;
3154
3155 for (i = 0; i < pbuf->nbufs; i++) {
3156 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3157 if (!buf)
3158 break;
3159
3160 buf->addr = addr;
3161 buf->len = pbuf->len;
3162 buf->bid = bid;
3163 addr += pbuf->len;
3164 bid++;
3165 if (!*head) {
3166 INIT_LIST_HEAD(&buf->list);
3167 *head = buf;
3168 } else {
3169 list_add_tail(&buf->list, &(*head)->list);
3170 }
3171 }
3172
3173 return i ? i : -ENOMEM;
3174}
3175
ddf0322d
JA
3176static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock)
3177{
3178 struct io_provide_buf *p = &req->pbuf;
3179 struct io_ring_ctx *ctx = req->ctx;
3180 struct io_buffer *head, *list;
3181 int ret = 0;
3182
3183 io_ring_submit_lock(ctx, !force_nonblock);
3184
3185 lockdep_assert_held(&ctx->uring_lock);
3186
3187 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
3188
3189 ret = io_add_buffers(p, &head);
3190 if (ret < 0)
3191 goto out;
3192
3193 if (!list) {
3194 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
3195 GFP_KERNEL);
3196 if (ret < 0) {
067524e9 3197 __io_remove_buffers(ctx, head, p->bgid, -1U);
ddf0322d
JA
3198 goto out;
3199 }
3200 }
3201out:
3202 io_ring_submit_unlock(ctx, !force_nonblock);
3203 if (ret < 0)
3204 req_set_fail_links(req);
3205 io_cqring_add_event(req, ret);
3206 io_put_req(req);
3207 return 0;
cebdb986
JA
3208}
3209
3e4827b0
JA
3210static int io_epoll_ctl_prep(struct io_kiocb *req,
3211 const struct io_uring_sqe *sqe)
3212{
3213#if defined(CONFIG_EPOLL)
3214 if (sqe->ioprio || sqe->buf_index)
3215 return -EINVAL;
3232dd02
PB
3216 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3217 return -EINVAL;
3e4827b0
JA
3218
3219 req->epoll.epfd = READ_ONCE(sqe->fd);
3220 req->epoll.op = READ_ONCE(sqe->len);
3221 req->epoll.fd = READ_ONCE(sqe->off);
3222
3223 if (ep_op_has_event(req->epoll.op)) {
3224 struct epoll_event __user *ev;
3225
3226 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
3227 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
3228 return -EFAULT;
3229 }
3230
3231 return 0;
3232#else
3233 return -EOPNOTSUPP;
3234#endif
3235}
3236
014db007 3237static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock)
3e4827b0
JA
3238{
3239#if defined(CONFIG_EPOLL)
3240 struct io_epoll *ie = &req->epoll;
3241 int ret;
3242
3243 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
3244 if (force_nonblock && ret == -EAGAIN)
3245 return -EAGAIN;
3246
3247 if (ret < 0)
3248 req_set_fail_links(req);
3249 io_cqring_add_event(req, ret);
014db007 3250 io_put_req(req);
3e4827b0
JA
3251 return 0;
3252#else
3253 return -EOPNOTSUPP;
3254#endif
3255}
3256
c1ca757b
JA
3257static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3258{
3259#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3260 if (sqe->ioprio || sqe->buf_index || sqe->off)
3261 return -EINVAL;
3232dd02
PB
3262 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3263 return -EINVAL;
c1ca757b
JA
3264
3265 req->madvise.addr = READ_ONCE(sqe->addr);
3266 req->madvise.len = READ_ONCE(sqe->len);
3267 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
3268 return 0;
3269#else
3270 return -EOPNOTSUPP;
3271#endif
3272}
3273
014db007 3274static int io_madvise(struct io_kiocb *req, bool force_nonblock)
c1ca757b
JA
3275{
3276#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3277 struct io_madvise *ma = &req->madvise;
3278 int ret;
3279
3280 if (force_nonblock)
3281 return -EAGAIN;
3282
3283 ret = do_madvise(ma->addr, ma->len, ma->advice);
3284 if (ret < 0)
3285 req_set_fail_links(req);
3286 io_cqring_add_event(req, ret);
014db007 3287 io_put_req(req);
c1ca757b
JA
3288 return 0;
3289#else
3290 return -EOPNOTSUPP;
3291#endif
3292}
3293
4840e418
JA
3294static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3295{
3296 if (sqe->ioprio || sqe->buf_index || sqe->addr)
3297 return -EINVAL;
3232dd02
PB
3298 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3299 return -EINVAL;
4840e418
JA
3300
3301 req->fadvise.offset = READ_ONCE(sqe->off);
3302 req->fadvise.len = READ_ONCE(sqe->len);
3303 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
3304 return 0;
3305}
3306
014db007 3307static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
4840e418
JA
3308{
3309 struct io_fadvise *fa = &req->fadvise;
3310 int ret;
3311
3e69426d
JA
3312 if (force_nonblock) {
3313 switch (fa->advice) {
3314 case POSIX_FADV_NORMAL:
3315 case POSIX_FADV_RANDOM:
3316 case POSIX_FADV_SEQUENTIAL:
3317 break;
3318 default:
3319 return -EAGAIN;
3320 }
3321 }
4840e418
JA
3322
3323 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
3324 if (ret < 0)
3325 req_set_fail_links(req);
3326 io_cqring_add_event(req, ret);
014db007 3327 io_put_req(req);
4840e418
JA
3328 return 0;
3329}
3330
eddc7ef5
JA
3331static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3332{
3232dd02
PB
3333 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3334 return -EINVAL;
eddc7ef5
JA
3335 if (sqe->ioprio || sqe->buf_index)
3336 return -EINVAL;
9c280f90 3337 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 3338 return -EBADF;
eddc7ef5 3339
1d9e1288
BM
3340 req->statx.dfd = READ_ONCE(sqe->fd);
3341 req->statx.mask = READ_ONCE(sqe->len);
e62753e4 3342 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
1d9e1288
BM
3343 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3344 req->statx.flags = READ_ONCE(sqe->statx_flags);
eddc7ef5
JA
3345
3346 return 0;
3347}
3348
014db007 3349static int io_statx(struct io_kiocb *req, bool force_nonblock)
eddc7ef5 3350{
1d9e1288 3351 struct io_statx *ctx = &req->statx;
eddc7ef5
JA
3352 int ret;
3353
5b0bbee4
JA
3354 if (force_nonblock) {
3355 /* only need file table for an actual valid fd */
3356 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
3357 req->flags |= REQ_F_NO_FILE_TABLE;
eddc7ef5 3358 return -EAGAIN;
5b0bbee4 3359 }
eddc7ef5 3360
e62753e4
BM
3361 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
3362 ctx->buffer);
eddc7ef5 3363
eddc7ef5
JA
3364 if (ret < 0)
3365 req_set_fail_links(req);
3366 io_cqring_add_event(req, ret);
014db007 3367 io_put_req(req);
eddc7ef5
JA
3368 return 0;
3369}
3370
b5dba59e
JA
3371static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3372{
3373 /*
3374 * If we queue this for async, it must not be cancellable. That would
3375 * leave the 'file' in an undeterminate state.
3376 */
3377 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
3378
3232dd02
PB
3379 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3380 return -EINVAL;
b5dba59e
JA
3381 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
3382 sqe->rw_flags || sqe->buf_index)
3383 return -EINVAL;
9c280f90 3384 if (req->flags & REQ_F_FIXED_FILE)
cf3040ca 3385 return -EBADF;
b5dba59e
JA
3386
3387 req->close.fd = READ_ONCE(sqe->fd);
fd2206e4
JA
3388 if ((req->file && req->file->f_op == &io_uring_fops) ||
3389 req->close.fd == req->ctx->ring_fd)
3390 return -EBADF;
3391
3af73b28 3392 req->close.put_file = NULL;
b5dba59e
JA
3393 return 0;
3394}
3395
014db007 3396static int io_close(struct io_kiocb *req, bool force_nonblock)
b5dba59e 3397{
3af73b28 3398 struct io_close *close = &req->close;
b5dba59e
JA
3399 int ret;
3400
3af73b28
PB
3401 /* might be already done during nonblock submission */
3402 if (!close->put_file) {
3403 ret = __close_fd_get_file(close->fd, &close->put_file);
3404 if (ret < 0)
3405 return (ret == -ENOENT) ? -EBADF : ret;
3406 }
b5dba59e
JA
3407
3408 /* if the file has a flush method, be safe and punt to async */
3af73b28 3409 if (close->put_file->f_op->flush && force_nonblock) {
0bf0eefd
PB
3410 /* avoid grabbing files - we don't need the files */
3411 req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT;
0bf0eefd 3412 return -EAGAIN;
a2100672 3413 }
b5dba59e 3414
3af73b28
PB
3415 /* No ->flush() or already async, safely close from here */
3416 ret = filp_close(close->put_file, req->work.files);
3417 if (ret < 0)
3418 req_set_fail_links(req);
3419 io_cqring_add_event(req, ret);
3420 fput(close->put_file);
3421 close->put_file = NULL;
3422 io_put_req(req);
1a417f4e 3423 return 0;
b5dba59e
JA
3424}
3425
3529d8c2 3426static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5d17b4a4
JA
3427{
3428 struct io_ring_ctx *ctx = req->ctx;
5d17b4a4
JA
3429
3430 if (!req->file)
3431 return -EBADF;
5d17b4a4
JA
3432
3433 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3434 return -EINVAL;
3435 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3436 return -EINVAL;
3437
8ed8d3c3
JA
3438 req->sync.off = READ_ONCE(sqe->off);
3439 req->sync.len = READ_ONCE(sqe->len);
3440 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
8ed8d3c3
JA
3441 return 0;
3442}
3443
ac45abc0 3444static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3 3445{
8ed8d3c3
JA
3446 int ret;
3447
ac45abc0
PB
3448 /* sync_file_range always requires a blocking context */
3449 if (force_nonblock)
3450 return -EAGAIN;
3451
9adbd45d 3452 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
8ed8d3c3
JA
3453 req->sync.flags);
3454 if (ret < 0)
3455 req_set_fail_links(req);
3456 io_cqring_add_event(req, ret);
014db007 3457 io_put_req(req);
5d17b4a4
JA
3458 return 0;
3459}
3460
469956e8 3461#if defined(CONFIG_NET)
02d27d89
PB
3462static int io_setup_async_msg(struct io_kiocb *req,
3463 struct io_async_msghdr *kmsg)
3464{
3465 if (req->io)
3466 return -EAGAIN;
3467 if (io_alloc_async_ctx(req)) {
3468 if (kmsg->iov != kmsg->fast_iov)
3469 kfree(kmsg->iov);
3470 return -ENOMEM;
3471 }
3472 req->flags |= REQ_F_NEED_CLEANUP;
3473 memcpy(&req->io->msg, kmsg, sizeof(*kmsg));
3474 return -EAGAIN;
3475}
3476
3529d8c2 3477static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
03b1230c 3478{
e47293fd 3479 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 3480 struct io_async_ctx *io = req->io;
99bc4c38 3481 int ret;
03b1230c 3482
d2b6f48b
PB
3483 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3484 return -EINVAL;
3485
e47293fd
JA
3486 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3487 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
fddaface 3488 sr->len = READ_ONCE(sqe->len);
3529d8c2 3489
d8768362
JA
3490#ifdef CONFIG_COMPAT
3491 if (req->ctx->compat)
3492 sr->msg_flags |= MSG_CMSG_COMPAT;
3493#endif
3494
fddaface 3495 if (!io || req->opcode == IORING_OP_SEND)
3529d8c2 3496 return 0;
5f798bea
PB
3497 /* iovec is already imported */
3498 if (req->flags & REQ_F_NEED_CLEANUP)
3499 return 0;
3529d8c2 3500
d9688565 3501 io->msg.iov = io->msg.fast_iov;
99bc4c38 3502 ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
e47293fd 3503 &io->msg.iov);
99bc4c38
PB
3504 if (!ret)
3505 req->flags |= REQ_F_NEED_CLEANUP;
3506 return ret;
03b1230c
JA
3507}
3508
014db007 3509static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
aa1fa28f 3510{
0b416c3e 3511 struct io_async_msghdr *kmsg = NULL;
0fa03c62
JA
3512 struct socket *sock;
3513 int ret;
3514
0fa03c62
JA
3515 sock = sock_from_file(req->file, &ret);
3516 if (sock) {
b7bb4f7d 3517 struct io_async_ctx io;
0fa03c62
JA
3518 unsigned flags;
3519
03b1230c 3520 if (req->io) {
0b416c3e 3521 kmsg = &req->io->msg;
b537916c 3522 kmsg->msg.msg_name = &req->io->msg.addr;
0b416c3e
JA
3523 /* if iov is set, it's allocated already */
3524 if (!kmsg->iov)
3525 kmsg->iov = kmsg->fast_iov;
3526 kmsg->msg.msg_iter.iov = kmsg->iov;
03b1230c 3527 } else {
3529d8c2
JA
3528 struct io_sr_msg *sr = &req->sr_msg;
3529
0b416c3e 3530 kmsg = &io.msg;
b537916c 3531 kmsg->msg.msg_name = &io.msg.addr;
3529d8c2
JA
3532
3533 io.msg.iov = io.msg.fast_iov;
3534 ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
3535 sr->msg_flags, &io.msg.iov);
03b1230c 3536 if (ret)
3529d8c2 3537 return ret;
03b1230c 3538 }
0fa03c62 3539
e47293fd
JA
3540 flags = req->sr_msg.msg_flags;
3541 if (flags & MSG_DONTWAIT)
3542 req->flags |= REQ_F_NOWAIT;
3543 else if (force_nonblock)
3544 flags |= MSG_DONTWAIT;
3545
0b416c3e 3546 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
02d27d89
PB
3547 if (force_nonblock && ret == -EAGAIN)
3548 return io_setup_async_msg(req, kmsg);
441cdbd5
JA
3549 if (ret == -ERESTARTSYS)
3550 ret = -EINTR;
0fa03c62
JA
3551 }
3552
1e95081c 3553 if (kmsg && kmsg->iov != kmsg->fast_iov)
0b416c3e 3554 kfree(kmsg->iov);
99bc4c38 3555 req->flags &= ~REQ_F_NEED_CLEANUP;
78e19bbe 3556 io_cqring_add_event(req, ret);
4e88d6e7
JA
3557 if (ret < 0)
3558 req_set_fail_links(req);
014db007 3559 io_put_req(req);
5d17b4a4 3560 return 0;
03b1230c 3561}
aa1fa28f 3562
014db007 3563static int io_send(struct io_kiocb *req, bool force_nonblock)
fddaface 3564{
fddaface
JA
3565 struct socket *sock;
3566 int ret;
3567
fddaface
JA
3568 sock = sock_from_file(req->file, &ret);
3569 if (sock) {
3570 struct io_sr_msg *sr = &req->sr_msg;
3571 struct msghdr msg;
3572 struct iovec iov;
3573 unsigned flags;
3574
3575 ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
3576 &msg.msg_iter);
3577 if (ret)
3578 return ret;
3579
3580 msg.msg_name = NULL;
3581 msg.msg_control = NULL;
3582 msg.msg_controllen = 0;
3583 msg.msg_namelen = 0;
3584
3585 flags = req->sr_msg.msg_flags;
3586 if (flags & MSG_DONTWAIT)
3587 req->flags |= REQ_F_NOWAIT;
3588 else if (force_nonblock)
3589 flags |= MSG_DONTWAIT;
3590
0b7b21e4
JA
3591 msg.msg_flags = flags;
3592 ret = sock_sendmsg(sock, &msg);
fddaface
JA
3593 if (force_nonblock && ret == -EAGAIN)
3594 return -EAGAIN;
3595 if (ret == -ERESTARTSYS)
3596 ret = -EINTR;
3597 }
3598
3599 io_cqring_add_event(req, ret);
3600 if (ret < 0)
3601 req_set_fail_links(req);
014db007 3602 io_put_req(req);
fddaface 3603 return 0;
fddaface
JA
3604}
3605
52de1fe1
JA
3606static int __io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
3607{
3608 struct io_sr_msg *sr = &req->sr_msg;
3609 struct iovec __user *uiov;
3610 size_t iov_len;
3611 int ret;
3612
3613 ret = __copy_msghdr_from_user(&io->msg.msg, sr->msg, &io->msg.uaddr,
3614 &uiov, &iov_len);
3615 if (ret)
3616 return ret;
3617
3618 if (req->flags & REQ_F_BUFFER_SELECT) {
3619 if (iov_len > 1)
3620 return -EINVAL;
3621 if (copy_from_user(io->msg.iov, uiov, sizeof(*uiov)))
3622 return -EFAULT;
3623 sr->len = io->msg.iov[0].iov_len;
3624 iov_iter_init(&io->msg.msg.msg_iter, READ, io->msg.iov, 1,
3625 sr->len);
3626 io->msg.iov = NULL;
3627 } else {
3628 ret = import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
3629 &io->msg.iov, &io->msg.msg.msg_iter);
3630 if (ret > 0)
3631 ret = 0;
3632 }
3633
3634 return ret;
3635}
3636
3637#ifdef CONFIG_COMPAT
3638static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
3639 struct io_async_ctx *io)
3640{
3641 struct compat_msghdr __user *msg_compat;
3642 struct io_sr_msg *sr = &req->sr_msg;
3643 struct compat_iovec __user *uiov;
3644 compat_uptr_t ptr;
3645 compat_size_t len;
3646 int ret;
3647
3648 msg_compat = (struct compat_msghdr __user *) sr->msg;
3649 ret = __get_compat_msghdr(&io->msg.msg, msg_compat, &io->msg.uaddr,
3650 &ptr, &len);
3651 if (ret)
3652 return ret;
3653
3654 uiov = compat_ptr(ptr);
3655 if (req->flags & REQ_F_BUFFER_SELECT) {
3656 compat_ssize_t clen;
3657
3658 if (len > 1)
3659 return -EINVAL;
3660 if (!access_ok(uiov, sizeof(*uiov)))
3661 return -EFAULT;
3662 if (__get_user(clen, &uiov->iov_len))
3663 return -EFAULT;
3664 if (clen < 0)
3665 return -EINVAL;
3666 sr->len = io->msg.iov[0].iov_len;
3667 io->msg.iov = NULL;
3668 } else {
3669 ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV,
3670 &io->msg.iov,
3671 &io->msg.msg.msg_iter);
3672 if (ret < 0)
3673 return ret;
3674 }
3675
3676 return 0;
3677}
3678#endif
3679
3680static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
3681{
3682 io->msg.iov = io->msg.fast_iov;
3683
3684#ifdef CONFIG_COMPAT
3685 if (req->ctx->compat)
3686 return __io_compat_recvmsg_copy_hdr(req, io);
fddaface 3687#endif
52de1fe1
JA
3688
3689 return __io_recvmsg_copy_hdr(req, io);
3690}
3691
bcda7baa
JA
3692static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
3693 int *cflags, bool needs_lock)
3694{
3695 struct io_sr_msg *sr = &req->sr_msg;
3696 struct io_buffer *kbuf;
3697
3698 if (!(req->flags & REQ_F_BUFFER_SELECT))
3699 return NULL;
3700
3701 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
3702 if (IS_ERR(kbuf))
3703 return kbuf;
3704
3705 sr->kbuf = kbuf;
3706 req->flags |= REQ_F_BUFFER_SELECTED;
3707
3708 *cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
3709 *cflags |= IORING_CQE_F_BUFFER;
3710 return kbuf;
fddaface
JA
3711}
3712
3529d8c2
JA
3713static int io_recvmsg_prep(struct io_kiocb *req,
3714 const struct io_uring_sqe *sqe)
aa1fa28f 3715{
e47293fd 3716 struct io_sr_msg *sr = &req->sr_msg;
3529d8c2 3717 struct io_async_ctx *io = req->io;
99bc4c38 3718 int ret;
3529d8c2 3719
d2b6f48b
PB
3720 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3721 return -EINVAL;
3722
3529d8c2
JA
3723 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3724 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
0b7b21e4 3725 sr->len = READ_ONCE(sqe->len);
bcda7baa 3726 sr->bgid = READ_ONCE(sqe->buf_group);
06b76d44 3727
d8768362
JA
3728#ifdef CONFIG_COMPAT
3729 if (req->ctx->compat)
3730 sr->msg_flags |= MSG_CMSG_COMPAT;
3731#endif
3732
fddaface 3733 if (!io || req->opcode == IORING_OP_RECV)
06b76d44 3734 return 0;
5f798bea
PB
3735 /* iovec is already imported */
3736 if (req->flags & REQ_F_NEED_CLEANUP)
3737 return 0;
03b1230c 3738
52de1fe1 3739 ret = io_recvmsg_copy_hdr(req, io);
99bc4c38
PB
3740 if (!ret)
3741 req->flags |= REQ_F_NEED_CLEANUP;
3742 return ret;
aa1fa28f
JA
3743}
3744
014db007 3745static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
aa1fa28f 3746{
0b416c3e 3747 struct io_async_msghdr *kmsg = NULL;
03b1230c 3748 struct socket *sock;
52de1fe1 3749 int ret, cflags = 0;
03b1230c 3750
03b1230c
JA
3751 sock = sock_from_file(req->file, &ret);
3752 if (sock) {
52de1fe1 3753 struct io_buffer *kbuf;
b7bb4f7d 3754 struct io_async_ctx io;
03b1230c
JA
3755 unsigned flags;
3756
03b1230c 3757 if (req->io) {
0b416c3e 3758 kmsg = &req->io->msg;
b537916c 3759 kmsg->msg.msg_name = &req->io->msg.addr;
0b416c3e
JA
3760 /* if iov is set, it's allocated already */
3761 if (!kmsg->iov)
3762 kmsg->iov = kmsg->fast_iov;
3763 kmsg->msg.msg_iter.iov = kmsg->iov;
03b1230c 3764 } else {
0b416c3e 3765 kmsg = &io.msg;
b537916c 3766 kmsg->msg.msg_name = &io.msg.addr;
3529d8c2 3767
52de1fe1 3768 ret = io_recvmsg_copy_hdr(req, &io);
03b1230c 3769 if (ret)
3529d8c2 3770 return ret;
03b1230c
JA
3771 }
3772
52de1fe1
JA
3773 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3774 if (IS_ERR(kbuf)) {
3775 return PTR_ERR(kbuf);
3776 } else if (kbuf) {
3777 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3778 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
3779 1, req->sr_msg.len);
3780 }
3781
e47293fd
JA
3782 flags = req->sr_msg.msg_flags;
3783 if (flags & MSG_DONTWAIT)
3784 req->flags |= REQ_F_NOWAIT;
3785 else if (force_nonblock)
3786 flags |= MSG_DONTWAIT;
3787
3788 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
3789 kmsg->uaddr, flags);
02d27d89
PB
3790 if (force_nonblock && ret == -EAGAIN)
3791 return io_setup_async_msg(req, kmsg);
03b1230c
JA
3792 if (ret == -ERESTARTSYS)
3793 ret = -EINTR;
3794 }
3795
1e95081c 3796 if (kmsg && kmsg->iov != kmsg->fast_iov)
0b416c3e 3797 kfree(kmsg->iov);
99bc4c38 3798 req->flags &= ~REQ_F_NEED_CLEANUP;
52de1fe1 3799 __io_cqring_add_event(req, ret, cflags);
4e88d6e7
JA
3800 if (ret < 0)
3801 req_set_fail_links(req);
014db007 3802 io_put_req(req);
03b1230c 3803 return 0;
0fa03c62 3804}
5d17b4a4 3805
014db007 3806static int io_recv(struct io_kiocb *req, bool force_nonblock)
fddaface 3807{
bcda7baa 3808 struct io_buffer *kbuf = NULL;
fddaface 3809 struct socket *sock;
bcda7baa 3810 int ret, cflags = 0;
fddaface 3811
fddaface
JA
3812 sock = sock_from_file(req->file, &ret);
3813 if (sock) {
3814 struct io_sr_msg *sr = &req->sr_msg;
bcda7baa 3815 void __user *buf = sr->buf;
fddaface
JA
3816 struct msghdr msg;
3817 struct iovec iov;
3818 unsigned flags;
3819
bcda7baa
JA
3820 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3821 if (IS_ERR(kbuf))
3822 return PTR_ERR(kbuf);
3823 else if (kbuf)
3824 buf = u64_to_user_ptr(kbuf->addr);
3825
3826 ret = import_single_range(READ, buf, sr->len, &iov,
fddaface 3827 &msg.msg_iter);
bcda7baa
JA
3828 if (ret) {
3829 kfree(kbuf);
fddaface 3830 return ret;
bcda7baa 3831 }
fddaface 3832
bcda7baa 3833 req->flags |= REQ_F_NEED_CLEANUP;
fddaface
JA
3834 msg.msg_name = NULL;
3835 msg.msg_control = NULL;
3836 msg.msg_controllen = 0;
3837 msg.msg_namelen = 0;
3838 msg.msg_iocb = NULL;
3839 msg.msg_flags = 0;
3840
3841 flags = req->sr_msg.msg_flags;
3842 if (flags & MSG_DONTWAIT)
3843 req->flags |= REQ_F_NOWAIT;
3844 else if (force_nonblock)
3845 flags |= MSG_DONTWAIT;
3846
0b7b21e4 3847 ret = sock_recvmsg(sock, &msg, flags);
fddaface
JA
3848 if (force_nonblock && ret == -EAGAIN)
3849 return -EAGAIN;
3850 if (ret == -ERESTARTSYS)
3851 ret = -EINTR;
3852 }
3853
bcda7baa
JA
3854 kfree(kbuf);
3855 req->flags &= ~REQ_F_NEED_CLEANUP;
3856 __io_cqring_add_event(req, ret, cflags);
fddaface
JA
3857 if (ret < 0)
3858 req_set_fail_links(req);
014db007 3859 io_put_req(req);
fddaface 3860 return 0;
fddaface
JA
3861}
3862
3529d8c2 3863static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
17f2fe35 3864{
8ed8d3c3
JA
3865 struct io_accept *accept = &req->accept;
3866
17f2fe35
JA
3867 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3868 return -EINVAL;
8042d6ce 3869 if (sqe->ioprio || sqe->len || sqe->buf_index)
17f2fe35
JA
3870 return -EINVAL;
3871
d55e5f5b
JA
3872 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3873 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
8ed8d3c3 3874 accept->flags = READ_ONCE(sqe->accept_flags);
09952e3e 3875 accept->nofile = rlimit(RLIMIT_NOFILE);
8ed8d3c3 3876 return 0;
8ed8d3c3 3877}
17f2fe35 3878
ac45abc0 3879static int io_accept(struct io_kiocb *req, bool force_nonblock)
8ed8d3c3
JA
3880{
3881 struct io_accept *accept = &req->accept;
ac45abc0 3882 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
8ed8d3c3
JA
3883 int ret;
3884
8ed8d3c3 3885 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
09952e3e
JA
3886 accept->addr_len, accept->flags,
3887 accept->nofile);
8ed8d3c3 3888 if (ret == -EAGAIN && force_nonblock)
17f2fe35 3889 return -EAGAIN;
ac45abc0
PB
3890 if (ret < 0) {
3891 if (ret == -ERESTARTSYS)
3892 ret = -EINTR;
4e88d6e7 3893 req_set_fail_links(req);
ac45abc0 3894 }
78e19bbe 3895 io_cqring_add_event(req, ret);
014db007 3896 io_put_req(req);
17f2fe35 3897 return 0;
8ed8d3c3
JA
3898}
3899
3529d8c2 3900static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
f499a021 3901{
3529d8c2
JA
3902 struct io_connect *conn = &req->connect;
3903 struct io_async_ctx *io = req->io;
f499a021 3904
3fbb51c1
JA
3905 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3906 return -EINVAL;
3907 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
3908 return -EINVAL;
3909
3529d8c2
JA
3910 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3911 conn->addr_len = READ_ONCE(sqe->addr2);
3912
3913 if (!io)
3914 return 0;
3915
3916 return move_addr_to_kernel(conn->addr, conn->addr_len,
3fbb51c1 3917 &io->connect.address);
f499a021
JA
3918}
3919
014db007 3920static int io_connect(struct io_kiocb *req, bool force_nonblock)
f8e85cf2 3921{
f499a021 3922 struct io_async_ctx __io, *io;
f8e85cf2 3923 unsigned file_flags;
3fbb51c1 3924 int ret;
f8e85cf2 3925
f499a021
JA
3926 if (req->io) {
3927 io = req->io;
3928 } else {
3529d8c2
JA
3929 ret = move_addr_to_kernel(req->connect.addr,
3930 req->connect.addr_len,
3931 &__io.connect.address);
f499a021
JA
3932 if (ret)
3933 goto out;
3934 io = &__io;
3935 }
3936
3fbb51c1
JA
3937 file_flags = force_nonblock ? O_NONBLOCK : 0;
3938
3939 ret = __sys_connect_file(req->file, &io->connect.address,
3940 req->connect.addr_len, file_flags);
87f80d62 3941 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
b7bb4f7d
JA
3942 if (req->io)
3943 return -EAGAIN;
3944 if (io_alloc_async_ctx(req)) {
f499a021
JA
3945 ret = -ENOMEM;
3946 goto out;
3947 }
b7bb4f7d 3948 memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
f8e85cf2 3949 return -EAGAIN;
f499a021 3950 }
f8e85cf2
JA
3951 if (ret == -ERESTARTSYS)
3952 ret = -EINTR;
f499a021 3953out:
4e88d6e7
JA
3954 if (ret < 0)
3955 req_set_fail_links(req);
f8e85cf2 3956 io_cqring_add_event(req, ret);
014db007 3957 io_put_req(req);
f8e85cf2 3958 return 0;
469956e8
Y
3959}
3960#else /* !CONFIG_NET */
3961static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3962{
f8e85cf2 3963 return -EOPNOTSUPP;
f8e85cf2
JA
3964}
3965
469956e8
Y
3966static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
3967{
3968 return -EOPNOTSUPP;
3969}
3970
3971static int io_send(struct io_kiocb *req, bool force_nonblock)
3972{
3973 return -EOPNOTSUPP;
3974}
3975
3976static int io_recvmsg_prep(struct io_kiocb *req,
3977 const struct io_uring_sqe *sqe)
3978{
3979 return -EOPNOTSUPP;
3980}
3981
3982static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
3983{
3984 return -EOPNOTSUPP;
3985}
3986
3987static int io_recv(struct io_kiocb *req, bool force_nonblock)
3988{
3989 return -EOPNOTSUPP;
3990}
3991
3992static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3993{
3994 return -EOPNOTSUPP;
3995}
3996
3997static int io_accept(struct io_kiocb *req, bool force_nonblock)
3998{
3999 return -EOPNOTSUPP;
4000}
4001
4002static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4003{
4004 return -EOPNOTSUPP;
4005}
4006
4007static int io_connect(struct io_kiocb *req, bool force_nonblock)
4008{
f8e85cf2 4009 return -EOPNOTSUPP;
f8e85cf2 4010}
469956e8 4011#endif /* CONFIG_NET */
f8e85cf2 4012
d7718a9d
JA
4013struct io_poll_table {
4014 struct poll_table_struct pt;
4015 struct io_kiocb *req;
4016 int error;
4017};
4018
d7718a9d
JA
4019static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4020 __poll_t mask, task_work_func_t func)
4021{
4022 struct task_struct *tsk;
aa96bf8a 4023 int ret;
d7718a9d
JA
4024
4025 /* for instances that support it check for an event match first: */
4026 if (mask && !(mask & poll->events))
4027 return 0;
4028
4029 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4030
4031 list_del_init(&poll->wait.entry);
4032
4033 tsk = req->task;
4034 req->result = mask;
4035 init_task_work(&req->task_work, func);
4036 /*
e3aabf95
JA
4037 * If this fails, then the task is exiting. When a task exits, the
4038 * work gets canceled, so just cancel this request as well instead
4039 * of executing it. We can't safely execute it anyway, as we may not
4040 * have the needed state needed for it anyway.
d7718a9d 4041 */
aa96bf8a
JA
4042 ret = task_work_add(tsk, &req->task_work, true);
4043 if (unlikely(ret)) {
e3aabf95 4044 WRITE_ONCE(poll->canceled, true);
aa96bf8a
JA
4045 tsk = io_wq_get_task(req->ctx->io_wq);
4046 task_work_add(tsk, &req->task_work, true);
4047 }
d7718a9d
JA
4048 wake_up_process(tsk);
4049 return 1;
4050}
4051
74ce6ce4
JA
4052static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4053 __acquires(&req->ctx->completion_lock)
4054{
4055 struct io_ring_ctx *ctx = req->ctx;
4056
4057 if (!req->result && !READ_ONCE(poll->canceled)) {
4058 struct poll_table_struct pt = { ._key = poll->events };
4059
4060 req->result = vfs_poll(req->file, &pt) & poll->events;
4061 }
4062
4063 spin_lock_irq(&ctx->completion_lock);
4064 if (!req->result && !READ_ONCE(poll->canceled)) {
4065 add_wait_queue(poll->head, &poll->wait);
4066 return true;
4067 }
4068
4069 return false;
4070}
4071
18bceab1
JA
4072static void io_poll_remove_double(struct io_kiocb *req)
4073{
4074 struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
4075
4076 lockdep_assert_held(&req->ctx->completion_lock);
4077
4078 if (poll && poll->head) {
4079 struct wait_queue_head *head = poll->head;
4080
4081 spin_lock(&head->lock);
4082 list_del_init(&poll->wait.entry);
4083 if (poll->wait.private)
4084 refcount_dec(&req->refs);
4085 poll->head = NULL;
4086 spin_unlock(&head->lock);
4087 }
4088}
4089
4090static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4091{
4092 struct io_ring_ctx *ctx = req->ctx;
4093
4094 io_poll_remove_double(req);
4095 req->poll.done = true;
4096 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4097 io_commit_cqring(ctx);
4098}
4099
4100static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
4101{
4102 struct io_ring_ctx *ctx = req->ctx;
4103
4104 if (io_poll_rewait(req, &req->poll)) {
4105 spin_unlock_irq(&ctx->completion_lock);
4106 return;
4107 }
4108
4109 hash_del(&req->hash_node);
4110 io_poll_complete(req, req->result, 0);
4111 req->flags |= REQ_F_COMP_LOCKED;
4112 io_put_req_find_next(req, nxt);
4113 spin_unlock_irq(&ctx->completion_lock);
4114
4115 io_cqring_ev_posted(ctx);
4116}
4117
4118static void io_poll_task_func(struct callback_head *cb)
4119{
4120 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4121 struct io_kiocb *nxt = NULL;
4122
4123 io_poll_task_handler(req, &nxt);
4124 if (nxt) {
4125 struct io_ring_ctx *ctx = nxt->ctx;
4126
4127 mutex_lock(&ctx->uring_lock);
4128 __io_queue_sqe(nxt, NULL);
4129 mutex_unlock(&ctx->uring_lock);
4130 }
4131}
4132
4133static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4134 int sync, void *key)
4135{
4136 struct io_kiocb *req = wait->private;
4137 struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
4138 __poll_t mask = key_to_poll(key);
4139
4140 /* for instances that support it check for an event match first: */
4141 if (mask && !(mask & poll->events))
4142 return 0;
4143
4144 if (req->poll.head) {
4145 bool done;
4146
4147 spin_lock(&req->poll.head->lock);
4148 done = list_empty(&req->poll.wait.entry);
4149 if (!done)
4150 list_del_init(&req->poll.wait.entry);
4151 spin_unlock(&req->poll.head->lock);
4152 if (!done)
4153 __io_async_wake(req, poll, mask, io_poll_task_func);
4154 }
4155 refcount_dec(&req->refs);
4156 return 1;
4157}
4158
4159static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4160 wait_queue_func_t wake_func)
4161{
4162 poll->head = NULL;
4163 poll->done = false;
4164 poll->canceled = false;
4165 poll->events = events;
4166 INIT_LIST_HEAD(&poll->wait.entry);
4167 init_waitqueue_func_entry(&poll->wait, wake_func);
4168}
4169
4170static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
4171 struct wait_queue_head *head)
4172{
4173 struct io_kiocb *req = pt->req;
4174
4175 /*
4176 * If poll->head is already set, it's because the file being polled
4177 * uses multiple waitqueues for poll handling (eg one for read, one
4178 * for write). Setup a separate io_poll_iocb if this happens.
4179 */
4180 if (unlikely(poll->head)) {
4181 /* already have a 2nd entry, fail a third attempt */
4182 if (req->io) {
4183 pt->error = -EINVAL;
4184 return;
4185 }
4186 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
4187 if (!poll) {
4188 pt->error = -ENOMEM;
4189 return;
4190 }
4191 io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
4192 refcount_inc(&req->refs);
4193 poll->wait.private = req;
4194 req->io = (void *) poll;
4195 }
4196
4197 pt->error = 0;
4198 poll->head = head;
4199 add_wait_queue(head, &poll->wait);
4200}
4201
4202static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
4203 struct poll_table_struct *p)
4204{
4205 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4206
4207 __io_queue_proc(&pt->req->apoll->poll, pt, head);
4208}
4209
d7718a9d
JA
4210static void io_async_task_func(struct callback_head *cb)
4211{
4212 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4213 struct async_poll *apoll = req->apoll;
4214 struct io_ring_ctx *ctx = req->ctx;
31067255 4215 bool canceled = false;
d7718a9d
JA
4216
4217 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
4218
74ce6ce4 4219 if (io_poll_rewait(req, &apoll->poll)) {
d7718a9d 4220 spin_unlock_irq(&ctx->completion_lock);
74ce6ce4 4221 return;
d7718a9d
JA
4222 }
4223
31067255
JA
4224 /* If req is still hashed, it cannot have been canceled. Don't check. */
4225 if (hash_hashed(&req->hash_node)) {
74ce6ce4 4226 hash_del(&req->hash_node);
31067255
JA
4227 } else {
4228 canceled = READ_ONCE(apoll->poll.canceled);
4229 if (canceled) {
4230 io_cqring_fill_event(req, -ECANCELED);
4231 io_commit_cqring(ctx);
4232 }
2bae047e
JA
4233 }
4234
74ce6ce4
JA
4235 spin_unlock_irq(&ctx->completion_lock);
4236
44575a67
XW
4237 /* restore ->work in case we need to retry again */
4238 memcpy(&req->work, &apoll->work, sizeof(req->work));
31067255 4239 kfree(apoll);
44575a67 4240
31067255
JA
4241 if (!canceled) {
4242 __set_current_state(TASK_RUNNING);
4243 mutex_lock(&ctx->uring_lock);
4244 __io_queue_sqe(req, NULL);
4245 mutex_unlock(&ctx->uring_lock);
4246 } else {
2bae047e
JA
4247 io_cqring_ev_posted(ctx);
4248 req_set_fail_links(req);
44575a67 4249 io_double_put_req(req);
2bae047e 4250 }
d7718a9d
JA
4251}
4252
4253static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4254 void *key)
4255{
4256 struct io_kiocb *req = wait->private;
4257 struct io_poll_iocb *poll = &req->apoll->poll;
4258
4259 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
4260 key_to_poll(key));
4261
4262 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
4263}
4264
4265static void io_poll_req_insert(struct io_kiocb *req)
4266{
4267 struct io_ring_ctx *ctx = req->ctx;
4268 struct hlist_head *list;
4269
4270 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
4271 hlist_add_head(&req->hash_node, list);
4272}
4273
4274static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
4275 struct io_poll_iocb *poll,
4276 struct io_poll_table *ipt, __poll_t mask,
4277 wait_queue_func_t wake_func)
4278 __acquires(&ctx->completion_lock)
4279{
4280 struct io_ring_ctx *ctx = req->ctx;
4281 bool cancel = false;
4282
4283 poll->file = req->file;
18bceab1
JA
4284 io_init_poll_iocb(poll, mask, wake_func);
4285 poll->wait.private = req;
d7718a9d
JA
4286
4287 ipt->pt._key = mask;
4288 ipt->req = req;
4289 ipt->error = -EINVAL;
4290
d7718a9d
JA
4291 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
4292
4293 spin_lock_irq(&ctx->completion_lock);
4294 if (likely(poll->head)) {
4295 spin_lock(&poll->head->lock);
4296 if (unlikely(list_empty(&poll->wait.entry))) {
4297 if (ipt->error)
4298 cancel = true;
4299 ipt->error = 0;
4300 mask = 0;
4301 }
4302 if (mask || ipt->error)
4303 list_del_init(&poll->wait.entry);
4304 else if (cancel)
4305 WRITE_ONCE(poll->canceled, true);
4306 else if (!poll->done) /* actually waiting for an event */
4307 io_poll_req_insert(req);
4308 spin_unlock(&poll->head->lock);
4309 }
4310
4311 return mask;
4312}
4313
4314static bool io_arm_poll_handler(struct io_kiocb *req)
4315{
4316 const struct io_op_def *def = &io_op_defs[req->opcode];
4317 struct io_ring_ctx *ctx = req->ctx;
4318 struct async_poll *apoll;
4319 struct io_poll_table ipt;
4320 __poll_t mask, ret;
18bceab1 4321 bool had_io;
d7718a9d
JA
4322
4323 if (!req->file || !file_can_poll(req->file))
4324 return false;
4325 if (req->flags & (REQ_F_MUST_PUNT | REQ_F_POLLED))
4326 return false;
4327 if (!def->pollin && !def->pollout)
4328 return false;
4329
4330 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
4331 if (unlikely(!apoll))
4332 return false;
4333
4334 req->flags |= REQ_F_POLLED;
4335 memcpy(&apoll->work, &req->work, sizeof(req->work));
18bceab1 4336 had_io = req->io != NULL;
d7718a9d 4337
3537b6a7 4338 get_task_struct(current);
d7718a9d
JA
4339 req->task = current;
4340 req->apoll = apoll;
4341 INIT_HLIST_NODE(&req->hash_node);
4342
8755d97a 4343 mask = 0;
d7718a9d 4344 if (def->pollin)
8755d97a 4345 mask |= POLLIN | POLLRDNORM;
d7718a9d
JA
4346 if (def->pollout)
4347 mask |= POLLOUT | POLLWRNORM;
4348 mask |= POLLERR | POLLPRI;
4349
4350 ipt.pt._qproc = io_async_queue_proc;
4351
4352 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
4353 io_async_wake);
4354 if (ret) {
4355 ipt.error = 0;
18bceab1
JA
4356 /* only remove double add if we did it here */
4357 if (!had_io)
4358 io_poll_remove_double(req);
d7718a9d
JA
4359 spin_unlock_irq(&ctx->completion_lock);
4360 memcpy(&req->work, &apoll->work, sizeof(req->work));
4361 kfree(apoll);
4362 return false;
4363 }
4364 spin_unlock_irq(&ctx->completion_lock);
4365 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
4366 apoll->poll.events);
4367 return true;
4368}
4369
4370static bool __io_poll_remove_one(struct io_kiocb *req,
4371 struct io_poll_iocb *poll)
221c5eb2 4372{
b41e9852 4373 bool do_complete = false;
221c5eb2
JA
4374
4375 spin_lock(&poll->head->lock);
4376 WRITE_ONCE(poll->canceled, true);
392edb45
JA
4377 if (!list_empty(&poll->wait.entry)) {
4378 list_del_init(&poll->wait.entry);
b41e9852 4379 do_complete = true;
221c5eb2
JA
4380 }
4381 spin_unlock(&poll->head->lock);
3bfa5bcb 4382 hash_del(&req->hash_node);
d7718a9d
JA
4383 return do_complete;
4384}
4385
4386static bool io_poll_remove_one(struct io_kiocb *req)
4387{
4388 bool do_complete;
4389
4390 if (req->opcode == IORING_OP_POLL_ADD) {
18bceab1 4391 io_poll_remove_double(req);
d7718a9d
JA
4392 do_complete = __io_poll_remove_one(req, &req->poll);
4393 } else {
3bfa5bcb
JA
4394 struct async_poll *apoll = req->apoll;
4395
d7718a9d 4396 /* non-poll requests have submit ref still */
3bfa5bcb
JA
4397 do_complete = __io_poll_remove_one(req, &apoll->poll);
4398 if (do_complete) {
d7718a9d 4399 io_put_req(req);
3bfa5bcb
JA
4400 /*
4401 * restore ->work because we will call
4402 * io_req_work_drop_env below when dropping the
4403 * final reference.
4404 */
4405 memcpy(&req->work, &apoll->work, sizeof(req->work));
4406 kfree(apoll);
4407 }
b1f573bd
XW
4408 }
4409
b41e9852
JA
4410 if (do_complete) {
4411 io_cqring_fill_event(req, -ECANCELED);
4412 io_commit_cqring(req->ctx);
4413 req->flags |= REQ_F_COMP_LOCKED;
4414 io_put_req(req);
4415 }
4416
4417 return do_complete;
221c5eb2
JA
4418}
4419
4420static void io_poll_remove_all(struct io_ring_ctx *ctx)
4421{
78076bb6 4422 struct hlist_node *tmp;
221c5eb2 4423 struct io_kiocb *req;
8e2e1faf 4424 int posted = 0, i;
221c5eb2
JA
4425
4426 spin_lock_irq(&ctx->completion_lock);
78076bb6
JA
4427 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
4428 struct hlist_head *list;
4429
4430 list = &ctx->cancel_hash[i];
4431 hlist_for_each_entry_safe(req, tmp, list, hash_node)
8e2e1faf 4432 posted += io_poll_remove_one(req);
221c5eb2
JA
4433 }
4434 spin_unlock_irq(&ctx->completion_lock);
b41e9852 4435
8e2e1faf
JA
4436 if (posted)
4437 io_cqring_ev_posted(ctx);
221c5eb2
JA
4438}
4439
47f46768
JA
4440static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
4441{
78076bb6 4442 struct hlist_head *list;
47f46768
JA
4443 struct io_kiocb *req;
4444
78076bb6
JA
4445 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
4446 hlist_for_each_entry(req, list, hash_node) {
b41e9852
JA
4447 if (sqe_addr != req->user_data)
4448 continue;
4449 if (io_poll_remove_one(req))
eac406c6 4450 return 0;
b41e9852 4451 return -EALREADY;
47f46768
JA
4452 }
4453
4454 return -ENOENT;
4455}
4456
3529d8c2
JA
4457static int io_poll_remove_prep(struct io_kiocb *req,
4458 const struct io_uring_sqe *sqe)
0969e783 4459{
0969e783
JA
4460 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4461 return -EINVAL;
4462 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
4463 sqe->poll_events)
4464 return -EINVAL;
4465
4466 req->poll.addr = READ_ONCE(sqe->addr);
0969e783
JA
4467 return 0;
4468}
4469
221c5eb2
JA
4470/*
4471 * Find a running poll command that matches one specified in sqe->addr,
4472 * and remove it if found.
4473 */
fc4df999 4474static int io_poll_remove(struct io_kiocb *req)
221c5eb2
JA
4475{
4476 struct io_ring_ctx *ctx = req->ctx;
0969e783 4477 u64 addr;
47f46768 4478 int ret;
221c5eb2 4479
0969e783 4480 addr = req->poll.addr;
221c5eb2 4481 spin_lock_irq(&ctx->completion_lock);
0969e783 4482 ret = io_poll_cancel(ctx, addr);
221c5eb2
JA
4483 spin_unlock_irq(&ctx->completion_lock);
4484
78e19bbe 4485 io_cqring_add_event(req, ret);
4e88d6e7
JA
4486 if (ret < 0)
4487 req_set_fail_links(req);
e65ef56d 4488 io_put_req(req);
221c5eb2
JA
4489 return 0;
4490}
4491
221c5eb2
JA
4492static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4493 void *key)
4494{
c2f2eb7d
JA
4495 struct io_kiocb *req = wait->private;
4496 struct io_poll_iocb *poll = &req->poll;
221c5eb2 4497
d7718a9d 4498 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
221c5eb2
JA
4499}
4500
221c5eb2
JA
4501static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
4502 struct poll_table_struct *p)
4503{
4504 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4505
d7718a9d 4506 __io_queue_proc(&pt->req->poll, pt, head);
eac406c6
JA
4507}
4508
3529d8c2 4509static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
221c5eb2
JA
4510{
4511 struct io_poll_iocb *poll = &req->poll;
221c5eb2 4512 u16 events;
221c5eb2
JA
4513
4514 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4515 return -EINVAL;
4516 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
4517 return -EINVAL;
09bb8394
JA
4518 if (!poll->file)
4519 return -EBADF;
221c5eb2 4520
221c5eb2
JA
4521 events = READ_ONCE(sqe->poll_events);
4522 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
b41e9852 4523
3537b6a7 4524 get_task_struct(current);
b41e9852 4525 req->task = current;
0969e783
JA
4526 return 0;
4527}
4528
014db007 4529static int io_poll_add(struct io_kiocb *req)
0969e783
JA
4530{
4531 struct io_poll_iocb *poll = &req->poll;
4532 struct io_ring_ctx *ctx = req->ctx;
4533 struct io_poll_table ipt;
0969e783 4534 __poll_t mask;
0969e783 4535
78076bb6 4536 INIT_HLIST_NODE(&req->hash_node);
36703247 4537 INIT_LIST_HEAD(&req->list);
d7718a9d 4538 ipt.pt._qproc = io_poll_queue_proc;
36703247 4539
d7718a9d
JA
4540 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
4541 io_poll_wake);
221c5eb2 4542
8c838788 4543 if (mask) { /* no async, we'd stolen it */
221c5eb2 4544 ipt.error = 0;
b0dd8a41 4545 io_poll_complete(req, mask, 0);
221c5eb2 4546 }
221c5eb2
JA
4547 spin_unlock_irq(&ctx->completion_lock);
4548
8c838788
JA
4549 if (mask) {
4550 io_cqring_ev_posted(ctx);
014db007 4551 io_put_req(req);
221c5eb2 4552 }
8c838788 4553 return ipt.error;
221c5eb2
JA
4554}
4555
5262f567
JA
4556static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
4557{
ad8a48ac
JA
4558 struct io_timeout_data *data = container_of(timer,
4559 struct io_timeout_data, timer);
4560 struct io_kiocb *req = data->req;
4561 struct io_ring_ctx *ctx = req->ctx;
5262f567
JA
4562 unsigned long flags;
4563
5262f567
JA
4564 atomic_inc(&ctx->cq_timeouts);
4565
4566 spin_lock_irqsave(&ctx->completion_lock, flags);
ef03681a 4567 /*
11365043
JA
4568 * We could be racing with timeout deletion. If the list is empty,
4569 * then timeout lookup already found it and will be handling it.
ef03681a 4570 */
bfe68a22 4571 if (!list_empty(&req->list))
11365043 4572 list_del_init(&req->list);
5262f567 4573
78e19bbe 4574 io_cqring_fill_event(req, -ETIME);
5262f567
JA
4575 io_commit_cqring(ctx);
4576 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4577
4578 io_cqring_ev_posted(ctx);
4e88d6e7 4579 req_set_fail_links(req);
5262f567
JA
4580 io_put_req(req);
4581 return HRTIMER_NORESTART;
4582}
4583
47f46768
JA
4584static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
4585{
4586 struct io_kiocb *req;
4587 int ret = -ENOENT;
4588
4589 list_for_each_entry(req, &ctx->timeout_list, list) {
4590 if (user_data == req->user_data) {
4591 list_del_init(&req->list);
4592 ret = 0;
4593 break;
4594 }
4595 }
4596
4597 if (ret == -ENOENT)
4598 return ret;
4599
2d28390a 4600 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
47f46768
JA
4601 if (ret == -1)
4602 return -EALREADY;
4603
4e88d6e7 4604 req_set_fail_links(req);
47f46768
JA
4605 io_cqring_fill_event(req, -ECANCELED);
4606 io_put_req(req);
4607 return 0;
4608}
4609
3529d8c2
JA
4610static int io_timeout_remove_prep(struct io_kiocb *req,
4611 const struct io_uring_sqe *sqe)
b29472ee 4612{
b29472ee
JA
4613 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4614 return -EINVAL;
4615 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
4616 return -EINVAL;
4617
4618 req->timeout.addr = READ_ONCE(sqe->addr);
4619 req->timeout.flags = READ_ONCE(sqe->timeout_flags);
4620 if (req->timeout.flags)
4621 return -EINVAL;
4622
b29472ee
JA
4623 return 0;
4624}
4625
11365043
JA
4626/*
4627 * Remove or update an existing timeout command
4628 */
fc4df999 4629static int io_timeout_remove(struct io_kiocb *req)
11365043
JA
4630{
4631 struct io_ring_ctx *ctx = req->ctx;
47f46768 4632 int ret;
11365043 4633
11365043 4634 spin_lock_irq(&ctx->completion_lock);
b29472ee 4635 ret = io_timeout_cancel(ctx, req->timeout.addr);
11365043 4636
47f46768 4637 io_cqring_fill_event(req, ret);
11365043
JA
4638 io_commit_cqring(ctx);
4639 spin_unlock_irq(&ctx->completion_lock);
5262f567 4640 io_cqring_ev_posted(ctx);
4e88d6e7
JA
4641 if (ret < 0)
4642 req_set_fail_links(req);
ec9c02ad 4643 io_put_req(req);
11365043 4644 return 0;
5262f567
JA
4645}
4646
3529d8c2 4647static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2d28390a 4648 bool is_timeout_link)
5262f567 4649{
ad8a48ac 4650 struct io_timeout_data *data;
a41525ab 4651 unsigned flags;
56080b02 4652 u32 off = READ_ONCE(sqe->off);
5262f567 4653
ad8a48ac 4654 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5262f567 4655 return -EINVAL;
ad8a48ac 4656 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
a41525ab 4657 return -EINVAL;
56080b02 4658 if (off && is_timeout_link)
2d28390a 4659 return -EINVAL;
a41525ab
JA
4660 flags = READ_ONCE(sqe->timeout_flags);
4661 if (flags & ~IORING_TIMEOUT_ABS)
5262f567 4662 return -EINVAL;
bdf20073 4663
bfe68a22 4664 req->timeout.off = off;
26a61679 4665
3529d8c2 4666 if (!req->io && io_alloc_async_ctx(req))
26a61679
JA
4667 return -ENOMEM;
4668
4669 data = &req->io->timeout;
ad8a48ac 4670 data->req = req;
ad8a48ac
JA
4671 req->flags |= REQ_F_TIMEOUT;
4672
4673 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5262f567
JA
4674 return -EFAULT;
4675
11365043 4676 if (flags & IORING_TIMEOUT_ABS)
ad8a48ac 4677 data->mode = HRTIMER_MODE_ABS;
11365043 4678 else
ad8a48ac 4679 data->mode = HRTIMER_MODE_REL;
11365043 4680
ad8a48ac
JA
4681 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
4682 return 0;
4683}
4684
fc4df999 4685static int io_timeout(struct io_kiocb *req)
ad8a48ac 4686{
ad8a48ac 4687 struct io_ring_ctx *ctx = req->ctx;
bfe68a22 4688 struct io_timeout_data *data = &req->io->timeout;
ad8a48ac 4689 struct list_head *entry;
bfe68a22 4690 u32 tail, off = req->timeout.off;
ad8a48ac 4691
733f5c95 4692 spin_lock_irq(&ctx->completion_lock);
93bd25bb 4693
5262f567
JA
4694 /*
4695 * sqe->off holds how many events that need to occur for this
93bd25bb
JA
4696 * timeout event to be satisfied. If it isn't set, then this is
4697 * a pure timeout request, sequence isn't used.
5262f567 4698 */
bfe68a22 4699 if (!off) {
93bd25bb 4700 req->flags |= REQ_F_TIMEOUT_NOSEQ;
93bd25bb
JA
4701 entry = ctx->timeout_list.prev;
4702 goto add;
4703 }
5262f567 4704
bfe68a22
PB
4705 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
4706 req->timeout.target_seq = tail + off;
5262f567
JA
4707
4708 /*
4709 * Insertion sort, ensuring the first entry in the list is always
4710 * the one we need first.
4711 */
5262f567
JA
4712 list_for_each_prev(entry, &ctx->timeout_list) {
4713 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
5262f567 4714
93bd25bb
JA
4715 if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
4716 continue;
bfe68a22
PB
4717 /* nxt.seq is behind @tail, otherwise would've been completed */
4718 if (off >= nxt->timeout.target_seq - tail)
5262f567
JA
4719 break;
4720 }
93bd25bb 4721add:
5262f567 4722 list_add(&req->list, entry);
ad8a48ac
JA
4723 data->timer.function = io_timeout_fn;
4724 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5262f567 4725 spin_unlock_irq(&ctx->completion_lock);
5262f567
JA
4726 return 0;
4727}
5262f567 4728
62755e35
JA
4729static bool io_cancel_cb(struct io_wq_work *work, void *data)
4730{
4731 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
4732
4733 return req->user_data == (unsigned long) data;
4734}
4735
e977d6d3 4736static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
62755e35 4737{
62755e35 4738 enum io_wq_cancel cancel_ret;
62755e35
JA
4739 int ret = 0;
4740
62755e35
JA
4741 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
4742 switch (cancel_ret) {
4743 case IO_WQ_CANCEL_OK:
4744 ret = 0;
4745 break;
4746 case IO_WQ_CANCEL_RUNNING:
4747 ret = -EALREADY;
4748 break;
4749 case IO_WQ_CANCEL_NOTFOUND:
4750 ret = -ENOENT;
4751 break;
4752 }
4753
e977d6d3
JA
4754 return ret;
4755}
4756
47f46768
JA
4757static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
4758 struct io_kiocb *req, __u64 sqe_addr,
014db007 4759 int success_ret)
47f46768
JA
4760{
4761 unsigned long flags;
4762 int ret;
4763
4764 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
4765 if (ret != -ENOENT) {
4766 spin_lock_irqsave(&ctx->completion_lock, flags);
4767 goto done;
4768 }
4769
4770 spin_lock_irqsave(&ctx->completion_lock, flags);
4771 ret = io_timeout_cancel(ctx, sqe_addr);
4772 if (ret != -ENOENT)
4773 goto done;
4774 ret = io_poll_cancel(ctx, sqe_addr);
4775done:
b0dd8a41
JA
4776 if (!ret)
4777 ret = success_ret;
47f46768
JA
4778 io_cqring_fill_event(req, ret);
4779 io_commit_cqring(ctx);
4780 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4781 io_cqring_ev_posted(ctx);
4782
4e88d6e7
JA
4783 if (ret < 0)
4784 req_set_fail_links(req);
014db007 4785 io_put_req(req);
47f46768
JA
4786}
4787
3529d8c2
JA
4788static int io_async_cancel_prep(struct io_kiocb *req,
4789 const struct io_uring_sqe *sqe)
e977d6d3 4790{
fbf23849 4791 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
e977d6d3
JA
4792 return -EINVAL;
4793 if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
4794 sqe->cancel_flags)
4795 return -EINVAL;
4796
fbf23849
JA
4797 req->cancel.addr = READ_ONCE(sqe->addr);
4798 return 0;
4799}
4800
014db007 4801static int io_async_cancel(struct io_kiocb *req)
fbf23849
JA
4802{
4803 struct io_ring_ctx *ctx = req->ctx;
fbf23849 4804
014db007 4805 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
5262f567
JA
4806 return 0;
4807}
4808
05f3fb3c
JA
4809static int io_files_update_prep(struct io_kiocb *req,
4810 const struct io_uring_sqe *sqe)
4811{
4812 if (sqe->flags || sqe->ioprio || sqe->rw_flags)
4813 return -EINVAL;
4814
4815 req->files_update.offset = READ_ONCE(sqe->off);
4816 req->files_update.nr_args = READ_ONCE(sqe->len);
4817 if (!req->files_update.nr_args)
4818 return -EINVAL;
4819 req->files_update.arg = READ_ONCE(sqe->addr);
4820 return 0;
4821}
4822
4823static int io_files_update(struct io_kiocb *req, bool force_nonblock)
fbf23849
JA
4824{
4825 struct io_ring_ctx *ctx = req->ctx;
05f3fb3c
JA
4826 struct io_uring_files_update up;
4827 int ret;
fbf23849 4828
f86cd20c 4829 if (force_nonblock)
05f3fb3c 4830 return -EAGAIN;
05f3fb3c
JA
4831
4832 up.offset = req->files_update.offset;
4833 up.fds = req->files_update.arg;
4834
4835 mutex_lock(&ctx->uring_lock);
4836 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
4837 mutex_unlock(&ctx->uring_lock);
4838
4839 if (ret < 0)
4840 req_set_fail_links(req);
4841 io_cqring_add_event(req, ret);
4842 io_put_req(req);
5262f567
JA
4843 return 0;
4844}
4845
3529d8c2
JA
4846static int io_req_defer_prep(struct io_kiocb *req,
4847 const struct io_uring_sqe *sqe)
f67676d1 4848{
e781573e 4849 ssize_t ret = 0;
f67676d1 4850
f1d96a8f
PB
4851 if (!sqe)
4852 return 0;
4853
f86cd20c
JA
4854 if (io_op_defs[req->opcode].file_table) {
4855 ret = io_grab_files(req);
4856 if (unlikely(ret))
4857 return ret;
4858 }
4859
cccf0ee8
JA
4860 io_req_work_grab_env(req, &io_op_defs[req->opcode]);
4861
d625c6ee 4862 switch (req->opcode) {
e781573e
JA
4863 case IORING_OP_NOP:
4864 break;
f67676d1
JA
4865 case IORING_OP_READV:
4866 case IORING_OP_READ_FIXED:
3a6820f2 4867 case IORING_OP_READ:
3529d8c2 4868 ret = io_read_prep(req, sqe, true);
f67676d1
JA
4869 break;
4870 case IORING_OP_WRITEV:
4871 case IORING_OP_WRITE_FIXED:
3a6820f2 4872 case IORING_OP_WRITE:
3529d8c2 4873 ret = io_write_prep(req, sqe, true);
f67676d1 4874 break;
0969e783 4875 case IORING_OP_POLL_ADD:
3529d8c2 4876 ret = io_poll_add_prep(req, sqe);
0969e783
JA
4877 break;
4878 case IORING_OP_POLL_REMOVE:
3529d8c2 4879 ret = io_poll_remove_prep(req, sqe);
0969e783 4880 break;
8ed8d3c3 4881 case IORING_OP_FSYNC:
3529d8c2 4882 ret = io_prep_fsync(req, sqe);
8ed8d3c3
JA
4883 break;
4884 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2 4885 ret = io_prep_sfr(req, sqe);
8ed8d3c3 4886 break;
03b1230c 4887 case IORING_OP_SENDMSG:
fddaface 4888 case IORING_OP_SEND:
3529d8c2 4889 ret = io_sendmsg_prep(req, sqe);
03b1230c
JA
4890 break;
4891 case IORING_OP_RECVMSG:
fddaface 4892 case IORING_OP_RECV:
3529d8c2 4893 ret = io_recvmsg_prep(req, sqe);
03b1230c 4894 break;
f499a021 4895 case IORING_OP_CONNECT:
3529d8c2 4896 ret = io_connect_prep(req, sqe);
f499a021 4897 break;
2d28390a 4898 case IORING_OP_TIMEOUT:
3529d8c2 4899 ret = io_timeout_prep(req, sqe, false);
b7bb4f7d 4900 break;
b29472ee 4901 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2 4902 ret = io_timeout_remove_prep(req, sqe);
b29472ee 4903 break;
fbf23849 4904 case IORING_OP_ASYNC_CANCEL:
3529d8c2 4905 ret = io_async_cancel_prep(req, sqe);
fbf23849 4906 break;
2d28390a 4907 case IORING_OP_LINK_TIMEOUT:
3529d8c2 4908 ret = io_timeout_prep(req, sqe, true);
b7bb4f7d 4909 break;
8ed8d3c3 4910 case IORING_OP_ACCEPT:
3529d8c2 4911 ret = io_accept_prep(req, sqe);
8ed8d3c3 4912 break;
d63d1b5e
JA
4913 case IORING_OP_FALLOCATE:
4914 ret = io_fallocate_prep(req, sqe);
4915 break;
15b71abe
JA
4916 case IORING_OP_OPENAT:
4917 ret = io_openat_prep(req, sqe);
4918 break;
b5dba59e
JA
4919 case IORING_OP_CLOSE:
4920 ret = io_close_prep(req, sqe);
4921 break;
05f3fb3c
JA
4922 case IORING_OP_FILES_UPDATE:
4923 ret = io_files_update_prep(req, sqe);
4924 break;
eddc7ef5
JA
4925 case IORING_OP_STATX:
4926 ret = io_statx_prep(req, sqe);
4927 break;
4840e418
JA
4928 case IORING_OP_FADVISE:
4929 ret = io_fadvise_prep(req, sqe);
4930 break;
c1ca757b
JA
4931 case IORING_OP_MADVISE:
4932 ret = io_madvise_prep(req, sqe);
4933 break;
cebdb986
JA
4934 case IORING_OP_OPENAT2:
4935 ret = io_openat2_prep(req, sqe);
4936 break;
3e4827b0
JA
4937 case IORING_OP_EPOLL_CTL:
4938 ret = io_epoll_ctl_prep(req, sqe);
4939 break;
7d67af2c
PB
4940 case IORING_OP_SPLICE:
4941 ret = io_splice_prep(req, sqe);
4942 break;
ddf0322d
JA
4943 case IORING_OP_PROVIDE_BUFFERS:
4944 ret = io_provide_buffers_prep(req, sqe);
4945 break;
067524e9
JA
4946 case IORING_OP_REMOVE_BUFFERS:
4947 ret = io_remove_buffers_prep(req, sqe);
4948 break;
f2a8d5c7
PB
4949 case IORING_OP_TEE:
4950 ret = io_tee_prep(req, sqe);
4951 break;
f67676d1 4952 default:
e781573e
JA
4953 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
4954 req->opcode);
4955 ret = -EINVAL;
b7bb4f7d 4956 break;
f67676d1
JA
4957 }
4958
b7bb4f7d 4959 return ret;
f67676d1
JA
4960}
4961
3529d8c2 4962static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
de0617e4 4963{
a197f664 4964 struct io_ring_ctx *ctx = req->ctx;
f67676d1 4965 int ret;
de0617e4 4966
9d858b21 4967 /* Still need defer if there is pending req in defer list. */
4ee36314 4968 if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
de0617e4
JA
4969 return 0;
4970
650b5481
PB
4971 if (!req->io) {
4972 if (io_alloc_async_ctx(req))
4973 return -EAGAIN;
4974 ret = io_req_defer_prep(req, sqe);
4975 if (ret < 0)
4976 return ret;
4977 }
2d28390a 4978
de0617e4 4979 spin_lock_irq(&ctx->completion_lock);
9d858b21 4980 if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
de0617e4 4981 spin_unlock_irq(&ctx->completion_lock);
de0617e4
JA
4982 return 0;
4983 }
4984
915967f6 4985 trace_io_uring_defer(ctx, req, req->user_data);
de0617e4
JA
4986 list_add_tail(&req->list, &ctx->defer_list);
4987 spin_unlock_irq(&ctx->completion_lock);
4988 return -EIOCBQUEUED;
4989}
4990
99bc4c38
PB
4991static void io_cleanup_req(struct io_kiocb *req)
4992{
4993 struct io_async_ctx *io = req->io;
4994
4995 switch (req->opcode) {
4996 case IORING_OP_READV:
4997 case IORING_OP_READ_FIXED:
4998 case IORING_OP_READ:
bcda7baa
JA
4999 if (req->flags & REQ_F_BUFFER_SELECTED)
5000 kfree((void *)(unsigned long)req->rw.addr);
5001 /* fallthrough */
99bc4c38
PB
5002 case IORING_OP_WRITEV:
5003 case IORING_OP_WRITE_FIXED:
5004 case IORING_OP_WRITE:
5005 if (io->rw.iov != io->rw.fast_iov)
5006 kfree(io->rw.iov);
5007 break;
99bc4c38 5008 case IORING_OP_RECVMSG:
52de1fe1
JA
5009 if (req->flags & REQ_F_BUFFER_SELECTED)
5010 kfree(req->sr_msg.kbuf);
5011 /* fallthrough */
5012 case IORING_OP_SENDMSG:
99bc4c38
PB
5013 if (io->msg.iov != io->msg.fast_iov)
5014 kfree(io->msg.iov);
5015 break;
bcda7baa
JA
5016 case IORING_OP_RECV:
5017 if (req->flags & REQ_F_BUFFER_SELECTED)
5018 kfree(req->sr_msg.kbuf);
5019 break;
8fef80bf
PB
5020 case IORING_OP_OPENAT:
5021 case IORING_OP_OPENAT2:
8fef80bf 5022 break;
7d67af2c 5023 case IORING_OP_SPLICE:
f2a8d5c7 5024 case IORING_OP_TEE:
7d67af2c
PB
5025 io_put_file(req, req->splice.file_in,
5026 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
5027 break;
99bc4c38
PB
5028 }
5029
5030 req->flags &= ~REQ_F_NEED_CLEANUP;
5031}
5032
3529d8c2 5033static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
014db007 5034 bool force_nonblock)
2b188cc1 5035{
a197f664 5036 struct io_ring_ctx *ctx = req->ctx;
d625c6ee 5037 int ret;
2b188cc1 5038
d625c6ee 5039 switch (req->opcode) {
2b188cc1 5040 case IORING_OP_NOP:
78e19bbe 5041 ret = io_nop(req);
2b188cc1
JA
5042 break;
5043 case IORING_OP_READV:
edafccee 5044 case IORING_OP_READ_FIXED:
3a6820f2 5045 case IORING_OP_READ:
3529d8c2
JA
5046 if (sqe) {
5047 ret = io_read_prep(req, sqe, force_nonblock);
5048 if (ret < 0)
5049 break;
5050 }
014db007 5051 ret = io_read(req, force_nonblock);
edafccee 5052 break;
3529d8c2 5053 case IORING_OP_WRITEV:
edafccee 5054 case IORING_OP_WRITE_FIXED:
3a6820f2 5055 case IORING_OP_WRITE:
3529d8c2
JA
5056 if (sqe) {
5057 ret = io_write_prep(req, sqe, force_nonblock);
5058 if (ret < 0)
5059 break;
5060 }
014db007 5061 ret = io_write(req, force_nonblock);
2b188cc1 5062 break;
c992fe29 5063 case IORING_OP_FSYNC:
3529d8c2
JA
5064 if (sqe) {
5065 ret = io_prep_fsync(req, sqe);
5066 if (ret < 0)
5067 break;
5068 }
014db007 5069 ret = io_fsync(req, force_nonblock);
c992fe29 5070 break;
221c5eb2 5071 case IORING_OP_POLL_ADD:
3529d8c2
JA
5072 if (sqe) {
5073 ret = io_poll_add_prep(req, sqe);
5074 if (ret)
5075 break;
5076 }
014db007 5077 ret = io_poll_add(req);
221c5eb2
JA
5078 break;
5079 case IORING_OP_POLL_REMOVE:
3529d8c2
JA
5080 if (sqe) {
5081 ret = io_poll_remove_prep(req, sqe);
5082 if (ret < 0)
5083 break;
5084 }
fc4df999 5085 ret = io_poll_remove(req);
221c5eb2 5086 break;
5d17b4a4 5087 case IORING_OP_SYNC_FILE_RANGE:
3529d8c2
JA
5088 if (sqe) {
5089 ret = io_prep_sfr(req, sqe);
5090 if (ret < 0)
5091 break;
5092 }
014db007 5093 ret = io_sync_file_range(req, force_nonblock);
5d17b4a4 5094 break;
0fa03c62 5095 case IORING_OP_SENDMSG:
fddaface 5096 case IORING_OP_SEND:
3529d8c2
JA
5097 if (sqe) {
5098 ret = io_sendmsg_prep(req, sqe);
5099 if (ret < 0)
5100 break;
5101 }
fddaface 5102 if (req->opcode == IORING_OP_SENDMSG)
014db007 5103 ret = io_sendmsg(req, force_nonblock);
fddaface 5104 else
014db007 5105 ret = io_send(req, force_nonblock);
0fa03c62 5106 break;
aa1fa28f 5107 case IORING_OP_RECVMSG:
fddaface 5108 case IORING_OP_RECV:
3529d8c2
JA
5109 if (sqe) {
5110 ret = io_recvmsg_prep(req, sqe);
5111 if (ret)
5112 break;
5113 }
fddaface 5114 if (req->opcode == IORING_OP_RECVMSG)
014db007 5115 ret = io_recvmsg(req, force_nonblock);
fddaface 5116 else
014db007 5117 ret = io_recv(req, force_nonblock);
aa1fa28f 5118 break;
5262f567 5119 case IORING_OP_TIMEOUT:
3529d8c2
JA
5120 if (sqe) {
5121 ret = io_timeout_prep(req, sqe, false);
5122 if (ret)
5123 break;
5124 }
fc4df999 5125 ret = io_timeout(req);
5262f567 5126 break;
11365043 5127 case IORING_OP_TIMEOUT_REMOVE:
3529d8c2
JA
5128 if (sqe) {
5129 ret = io_timeout_remove_prep(req, sqe);
5130 if (ret)
5131 break;
5132 }
fc4df999 5133 ret = io_timeout_remove(req);
11365043 5134 break;
17f2fe35 5135 case IORING_OP_ACCEPT:
3529d8c2
JA
5136 if (sqe) {
5137 ret = io_accept_prep(req, sqe);
5138 if (ret)
5139 break;
5140 }
014db007 5141 ret = io_accept(req, force_nonblock);
17f2fe35 5142 break;
f8e85cf2 5143 case IORING_OP_CONNECT:
3529d8c2
JA
5144 if (sqe) {
5145 ret = io_connect_prep(req, sqe);
5146 if (ret)
5147 break;
5148 }
014db007 5149 ret = io_connect(req, force_nonblock);
f8e85cf2 5150 break;
62755e35 5151 case IORING_OP_ASYNC_CANCEL:
3529d8c2
JA
5152 if (sqe) {
5153 ret = io_async_cancel_prep(req, sqe);
5154 if (ret)
5155 break;
5156 }
014db007 5157 ret = io_async_cancel(req);
62755e35 5158 break;
d63d1b5e
JA
5159 case IORING_OP_FALLOCATE:
5160 if (sqe) {
5161 ret = io_fallocate_prep(req, sqe);
5162 if (ret)
5163 break;
5164 }
014db007 5165 ret = io_fallocate(req, force_nonblock);
d63d1b5e 5166 break;
15b71abe
JA
5167 case IORING_OP_OPENAT:
5168 if (sqe) {
5169 ret = io_openat_prep(req, sqe);
5170 if (ret)
5171 break;
5172 }
014db007 5173 ret = io_openat(req, force_nonblock);
15b71abe 5174 break;
b5dba59e
JA
5175 case IORING_OP_CLOSE:
5176 if (sqe) {
5177 ret = io_close_prep(req, sqe);
5178 if (ret)
5179 break;
5180 }
014db007 5181 ret = io_close(req, force_nonblock);
b5dba59e 5182 break;
05f3fb3c
JA
5183 case IORING_OP_FILES_UPDATE:
5184 if (sqe) {
5185 ret = io_files_update_prep(req, sqe);
5186 if (ret)
5187 break;
5188 }
5189 ret = io_files_update(req, force_nonblock);
5190 break;
eddc7ef5
JA
5191 case IORING_OP_STATX:
5192 if (sqe) {
5193 ret = io_statx_prep(req, sqe);
5194 if (ret)
5195 break;
5196 }
014db007 5197 ret = io_statx(req, force_nonblock);
eddc7ef5 5198 break;
4840e418
JA
5199 case IORING_OP_FADVISE:
5200 if (sqe) {
5201 ret = io_fadvise_prep(req, sqe);
5202 if (ret)
5203 break;
5204 }
014db007 5205 ret = io_fadvise(req, force_nonblock);
4840e418 5206 break;
c1ca757b
JA
5207 case IORING_OP_MADVISE:
5208 if (sqe) {
5209 ret = io_madvise_prep(req, sqe);
5210 if (ret)
5211 break;
5212 }
014db007 5213 ret = io_madvise(req, force_nonblock);
c1ca757b 5214 break;
cebdb986
JA
5215 case IORING_OP_OPENAT2:
5216 if (sqe) {
5217 ret = io_openat2_prep(req, sqe);
5218 if (ret)
5219 break;
5220 }
014db007 5221 ret = io_openat2(req, force_nonblock);
cebdb986 5222 break;
3e4827b0
JA
5223 case IORING_OP_EPOLL_CTL:
5224 if (sqe) {
5225 ret = io_epoll_ctl_prep(req, sqe);
5226 if (ret)
5227 break;
5228 }
014db007 5229 ret = io_epoll_ctl(req, force_nonblock);
3e4827b0 5230 break;
7d67af2c
PB
5231 case IORING_OP_SPLICE:
5232 if (sqe) {
5233 ret = io_splice_prep(req, sqe);
5234 if (ret < 0)
5235 break;
5236 }
014db007 5237 ret = io_splice(req, force_nonblock);
7d67af2c 5238 break;
ddf0322d
JA
5239 case IORING_OP_PROVIDE_BUFFERS:
5240 if (sqe) {
5241 ret = io_provide_buffers_prep(req, sqe);
5242 if (ret)
5243 break;
5244 }
5245 ret = io_provide_buffers(req, force_nonblock);
5246 break;
067524e9
JA
5247 case IORING_OP_REMOVE_BUFFERS:
5248 if (sqe) {
5249 ret = io_remove_buffers_prep(req, sqe);
5250 if (ret)
5251 break;
5252 }
5253 ret = io_remove_buffers(req, force_nonblock);
3e4827b0 5254 break;
f2a8d5c7
PB
5255 case IORING_OP_TEE:
5256 if (sqe) {
5257 ret = io_tee_prep(req, sqe);
5258 if (ret < 0)
5259 break;
5260 }
5261 ret = io_tee(req, force_nonblock);
5262 break;
2b188cc1
JA
5263 default:
5264 ret = -EINVAL;
5265 break;
5266 }
5267
def596e9
JA
5268 if (ret)
5269 return ret;
5270
b532576e
JA
5271 /* If the op doesn't have a file, we're not polling for it */
5272 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
11ba820b
JA
5273 const bool in_async = io_wq_current_is_worker();
5274
9e645e11 5275 if (req->result == -EAGAIN)
def596e9
JA
5276 return -EAGAIN;
5277
11ba820b
JA
5278 /* workqueue context doesn't hold uring_lock, grab it now */
5279 if (in_async)
5280 mutex_lock(&ctx->uring_lock);
5281
def596e9 5282 io_iopoll_req_issued(req);
11ba820b
JA
5283
5284 if (in_async)
5285 mutex_unlock(&ctx->uring_lock);
def596e9
JA
5286 }
5287
5288 return 0;
2b188cc1
JA
5289}
5290
d4c81f38
PB
5291static void io_arm_async_linked_timeout(struct io_kiocb *req)
5292{
5293 struct io_kiocb *link;
5294
5295 /* link head's timeout is queued in io_queue_async_work() */
5296 if (!(req->flags & REQ_F_QUEUE_TIMEOUT))
5297 return;
5298
5299 link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
5300 io_queue_linked_timeout(link);
5301}
5302
561fb04a 5303static void io_wq_submit_work(struct io_wq_work **workptr)
2b188cc1 5304{
561fb04a 5305 struct io_wq_work *work = *workptr;
2b188cc1 5306 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
561fb04a 5307 int ret = 0;
2b188cc1 5308
d4c81f38
PB
5309 io_arm_async_linked_timeout(req);
5310
0c9d5ccd
JA
5311 /* if NO_CANCEL is set, we must still run the work */
5312 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
5313 IO_WQ_WORK_CANCEL) {
561fb04a 5314 ret = -ECANCELED;
0c9d5ccd 5315 }
31b51510 5316
561fb04a 5317 if (!ret) {
561fb04a 5318 do {
014db007 5319 ret = io_issue_sqe(req, NULL, false);
561fb04a
JA
5320 /*
5321 * We can get EAGAIN for polled IO even though we're
5322 * forcing a sync submission from here, since we can't
5323 * wait for request slots on the block side.
5324 */
5325 if (ret != -EAGAIN)
5326 break;
5327 cond_resched();
5328 } while (1);
5329 }
31b51510 5330
561fb04a 5331 if (ret) {
4e88d6e7 5332 req_set_fail_links(req);
78e19bbe 5333 io_cqring_add_event(req, ret);
817869d2 5334 io_put_req(req);
edafccee 5335 }
2b188cc1 5336
e9fd9396 5337 io_steal_work(req, workptr);
2b188cc1
JA
5338}
5339
65e19f54
JA
5340static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
5341 int index)
5342{
5343 struct fixed_file_table *table;
5344
05f3fb3c 5345 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
84695089 5346 return table->files[index & IORING_FILE_TABLE_MASK];
65e19f54
JA
5347}
5348
8da11c19
PB
5349static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
5350 int fd, struct file **out_file, bool fixed)
09bb8394 5351{
a197f664 5352 struct io_ring_ctx *ctx = req->ctx;
8da11c19 5353 struct file *file;
09bb8394 5354
8da11c19 5355 if (fixed) {
05f3fb3c 5356 if (unlikely(!ctx->file_data ||
09bb8394
JA
5357 (unsigned) fd >= ctx->nr_user_files))
5358 return -EBADF;
b7620121 5359 fd = array_index_nospec(fd, ctx->nr_user_files);
8da11c19 5360 file = io_file_from_index(ctx, fd);
fd2206e4
JA
5361 if (file) {
5362 req->fixed_file_refs = ctx->file_data->cur_refs;
5363 percpu_ref_get(req->fixed_file_refs);
5364 }
09bb8394 5365 } else {
c826bd7a 5366 trace_io_uring_file_get(ctx, fd);
8da11c19 5367 file = __io_file_get(state, fd);
09bb8394
JA
5368 }
5369
fd2206e4
JA
5370 if (file || io_op_defs[req->opcode].needs_file_no_error) {
5371 *out_file = file;
5372 return 0;
5373 }
5374 return -EBADF;
09bb8394
JA
5375}
5376
8da11c19 5377static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
63ff8223 5378 int fd)
8da11c19 5379{
8da11c19
PB
5380 bool fixed;
5381
63ff8223 5382 fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
0cdaf760 5383 if (unlikely(!fixed && io_async_submit(req->ctx)))
8da11c19
PB
5384 return -EBADF;
5385
5386 return io_file_get(state, req, fd, &req->file, fixed);
5387}
5388
a197f664 5389static int io_grab_files(struct io_kiocb *req)
fcb323cc
JA
5390{
5391 int ret = -EBADF;
a197f664 5392 struct io_ring_ctx *ctx = req->ctx;
fcb323cc 5393
5b0bbee4 5394 if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
f86cd20c 5395 return 0;
b14cca0c 5396 if (!ctx->ring_file)
b5dba59e
JA
5397 return -EBADF;
5398
fcb323cc
JA
5399 rcu_read_lock();
5400 spin_lock_irq(&ctx->inflight_lock);
5401 /*
5402 * We use the f_ops->flush() handler to ensure that we can flush
5403 * out work accessing these files if the fd is closed. Check if
5404 * the fd has changed since we started down this path, and disallow
5405 * this operation if it has.
5406 */
b14cca0c 5407 if (fcheck(ctx->ring_fd) == ctx->ring_file) {
fcb323cc
JA
5408 list_add(&req->inflight_entry, &ctx->inflight_list);
5409 req->flags |= REQ_F_INFLIGHT;
5410 req->work.files = current->files;
5411 ret = 0;
5412 }
5413 spin_unlock_irq(&ctx->inflight_lock);
5414 rcu_read_unlock();
5415
5416 return ret;
5417}
5418
2665abfd 5419static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2b188cc1 5420{
ad8a48ac
JA
5421 struct io_timeout_data *data = container_of(timer,
5422 struct io_timeout_data, timer);
5423 struct io_kiocb *req = data->req;
2665abfd
JA
5424 struct io_ring_ctx *ctx = req->ctx;
5425 struct io_kiocb *prev = NULL;
5426 unsigned long flags;
2665abfd
JA
5427
5428 spin_lock_irqsave(&ctx->completion_lock, flags);
5429
5430 /*
5431 * We don't expect the list to be empty, that will only happen if we
5432 * race with the completion of the linked work.
5433 */
4493233e
PB
5434 if (!list_empty(&req->link_list)) {
5435 prev = list_entry(req->link_list.prev, struct io_kiocb,
5436 link_list);
5d960724 5437 if (refcount_inc_not_zero(&prev->refs)) {
4493233e 5438 list_del_init(&req->link_list);
5d960724
JA
5439 prev->flags &= ~REQ_F_LINK_TIMEOUT;
5440 } else
76a46e06 5441 prev = NULL;
2665abfd
JA
5442 }
5443
5444 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5445
5446 if (prev) {
4e88d6e7 5447 req_set_fail_links(prev);
014db007 5448 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
76a46e06 5449 io_put_req(prev);
47f46768
JA
5450 } else {
5451 io_cqring_add_event(req, -ETIME);
5452 io_put_req(req);
2665abfd 5453 }
2665abfd
JA
5454 return HRTIMER_NORESTART;
5455}
5456
ad8a48ac 5457static void io_queue_linked_timeout(struct io_kiocb *req)
2665abfd 5458{
76a46e06 5459 struct io_ring_ctx *ctx = req->ctx;
2665abfd 5460
76a46e06
JA
5461 /*
5462 * If the list is now empty, then our linked request finished before
5463 * we got a chance to setup the timer
5464 */
5465 spin_lock_irq(&ctx->completion_lock);
4493233e 5466 if (!list_empty(&req->link_list)) {
2d28390a 5467 struct io_timeout_data *data = &req->io->timeout;
94ae5e77 5468
ad8a48ac
JA
5469 data->timer.function = io_link_timeout_fn;
5470 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
5471 data->mode);
2665abfd 5472 }
76a46e06 5473 spin_unlock_irq(&ctx->completion_lock);
2665abfd 5474
2665abfd 5475 /* drop submission reference */
76a46e06
JA
5476 io_put_req(req);
5477}
2665abfd 5478
ad8a48ac 5479static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
2665abfd
JA
5480{
5481 struct io_kiocb *nxt;
5482
dea3b49c 5483 if (!(req->flags & REQ_F_LINK_HEAD))
2665abfd 5484 return NULL;
d7718a9d
JA
5485 /* for polled retry, if flag is set, we already went through here */
5486 if (req->flags & REQ_F_POLLED)
5487 return NULL;
2665abfd 5488
4493233e
PB
5489 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
5490 link_list);
d625c6ee 5491 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
76a46e06 5492 return NULL;
2665abfd 5493
76a46e06 5494 req->flags |= REQ_F_LINK_TIMEOUT;
76a46e06 5495 return nxt;
2665abfd
JA
5496}
5497
3529d8c2 5498static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2b188cc1 5499{
4a0a7a18 5500 struct io_kiocb *linked_timeout;
4bc4494e 5501 struct io_kiocb *nxt;
193155c8 5502 const struct cred *old_creds = NULL;
e0c5c576 5503 int ret;
2b188cc1 5504
4a0a7a18
JA
5505again:
5506 linked_timeout = io_prep_linked_timeout(req);
5507
193155c8
JA
5508 if (req->work.creds && req->work.creds != current_cred()) {
5509 if (old_creds)
5510 revert_creds(old_creds);
5511 if (old_creds == req->work.creds)
5512 old_creds = NULL; /* restored original creds */
5513 else
5514 old_creds = override_creds(req->work.creds);
5515 }
5516
014db007 5517 ret = io_issue_sqe(req, sqe, true);
491381ce
JA
5518
5519 /*
5520 * We async punt it if the file wasn't marked NOWAIT, or if the file
5521 * doesn't support non-blocking read/write attempts
5522 */
5523 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
5524 (req->flags & REQ_F_MUST_PUNT))) {
d7718a9d
JA
5525 if (io_arm_poll_handler(req)) {
5526 if (linked_timeout)
5527 io_queue_linked_timeout(linked_timeout);
4bc4494e 5528 goto exit;
d7718a9d 5529 }
86a761f8 5530punt:
f86cd20c 5531 if (io_op_defs[req->opcode].file_table) {
bbad27b2
PB
5532 ret = io_grab_files(req);
5533 if (ret)
5534 goto err;
2b188cc1 5535 }
bbad27b2
PB
5536
5537 /*
5538 * Queued up for async execution, worker will release
5539 * submit reference when the iocb is actually submitted.
5540 */
5541 io_queue_async_work(req);
4bc4494e 5542 goto exit;
2b188cc1 5543 }
e65ef56d 5544
fcb323cc 5545err:
4bc4494e 5546 nxt = NULL;
76a46e06 5547 /* drop submission reference */
2a44f467 5548 io_put_req_find_next(req, &nxt);
e65ef56d 5549
f9bd67f6 5550 if (linked_timeout) {
76a46e06 5551 if (!ret)
f9bd67f6 5552 io_queue_linked_timeout(linked_timeout);
76a46e06 5553 else
f9bd67f6 5554 io_put_req(linked_timeout);
76a46e06
JA
5555 }
5556
e65ef56d 5557 /* and drop final reference, if we failed */
9e645e11 5558 if (ret) {
78e19bbe 5559 io_cqring_add_event(req, ret);
4e88d6e7 5560 req_set_fail_links(req);
e65ef56d 5561 io_put_req(req);
9e645e11 5562 }
4a0a7a18
JA
5563 if (nxt) {
5564 req = nxt;
86a761f8
PB
5565
5566 if (req->flags & REQ_F_FORCE_ASYNC)
5567 goto punt;
4a0a7a18
JA
5568 goto again;
5569 }
4bc4494e 5570exit:
193155c8
JA
5571 if (old_creds)
5572 revert_creds(old_creds);
2b188cc1
JA
5573}
5574
3529d8c2 5575static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4fe2c963
JL
5576{
5577 int ret;
5578
3529d8c2 5579 ret = io_req_defer(req, sqe);
4fe2c963
JL
5580 if (ret) {
5581 if (ret != -EIOCBQUEUED) {
1118591a 5582fail_req:
78e19bbe 5583 io_cqring_add_event(req, ret);
4e88d6e7 5584 req_set_fail_links(req);
78e19bbe 5585 io_double_put_req(req);
4fe2c963 5586 }
2550878f 5587 } else if (req->flags & REQ_F_FORCE_ASYNC) {
bd2ab18a
PB
5588 if (!req->io) {
5589 ret = -EAGAIN;
5590 if (io_alloc_async_ctx(req))
5591 goto fail_req;
5592 ret = io_req_defer_prep(req, sqe);
5593 if (unlikely(ret < 0))
5594 goto fail_req;
5595 }
5596
ce35a47a
JA
5597 /*
5598 * Never try inline submit of IOSQE_ASYNC is set, go straight
5599 * to async execution.
5600 */
5601 req->work.flags |= IO_WQ_WORK_CONCURRENT;
5602 io_queue_async_work(req);
5603 } else {
3529d8c2 5604 __io_queue_sqe(req, sqe);
ce35a47a 5605 }
4fe2c963
JL
5606}
5607
1b4a51b6 5608static inline void io_queue_link_head(struct io_kiocb *req)
4fe2c963 5609{
94ae5e77 5610 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
1b4a51b6
PB
5611 io_cqring_add_event(req, -ECANCELED);
5612 io_double_put_req(req);
5613 } else
3529d8c2 5614 io_queue_sqe(req, NULL);
4fe2c963
JL
5615}
5616
1d4240cc 5617static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
7d01bd74 5618 struct io_kiocb **link)
9e645e11 5619{
a197f664 5620 struct io_ring_ctx *ctx = req->ctx;
ef4ff581 5621 int ret;
9e645e11 5622
9e645e11
JA
5623 /*
5624 * If we already have a head request, queue this one for async
5625 * submittal once the head completes. If we don't have a head but
5626 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
5627 * submitted sync once the chain is complete. If none of those
5628 * conditions are true (normal request), then just queue it.
5629 */
5630 if (*link) {
9d76377f 5631 struct io_kiocb *head = *link;
4e88d6e7 5632
8cdf2193
PB
5633 /*
5634 * Taking sequential execution of a link, draining both sides
5635 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
5636 * requests in the link. So, it drains the head and the
5637 * next after the link request. The last one is done via
5638 * drain_next flag to persist the effect across calls.
5639 */
ef4ff581 5640 if (req->flags & REQ_F_IO_DRAIN) {
711be031
PB
5641 head->flags |= REQ_F_IO_DRAIN;
5642 ctx->drain_next = 1;
5643 }
1d4240cc
PB
5644 if (io_alloc_async_ctx(req))
5645 return -EAGAIN;
9e645e11 5646
3529d8c2 5647 ret = io_req_defer_prep(req, sqe);
2d28390a 5648 if (ret) {
4e88d6e7 5649 /* fail even hard links since we don't submit */
9d76377f 5650 head->flags |= REQ_F_FAIL_LINK;
1d4240cc 5651 return ret;
2d28390a 5652 }
9d76377f
PB
5653 trace_io_uring_link(ctx, req, head);
5654 list_add_tail(&req->link_list, &head->link_list);
32fe525b
PB
5655
5656 /* last request of a link, enqueue the link */
ef4ff581 5657 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
32fe525b
PB
5658 io_queue_link_head(head);
5659 *link = NULL;
5660 }
9e645e11 5661 } else {
711be031
PB
5662 if (unlikely(ctx->drain_next)) {
5663 req->flags |= REQ_F_IO_DRAIN;
ef4ff581 5664 ctx->drain_next = 0;
711be031 5665 }
ef4ff581 5666 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
dea3b49c 5667 req->flags |= REQ_F_LINK_HEAD;
711be031 5668 INIT_LIST_HEAD(&req->link_list);
f1d96a8f 5669
1d4240cc
PB
5670 if (io_alloc_async_ctx(req))
5671 return -EAGAIN;
5672
711be031
PB
5673 ret = io_req_defer_prep(req, sqe);
5674 if (ret)
5675 req->flags |= REQ_F_FAIL_LINK;
5676 *link = req;
5677 } else {
5678 io_queue_sqe(req, sqe);
5679 }
9e645e11 5680 }
2e6e1fde 5681
1d4240cc 5682 return 0;
9e645e11
JA
5683}
5684
9a56a232
JA
5685/*
5686 * Batched submission is done, ensure local IO is flushed out.
5687 */
5688static void io_submit_state_end(struct io_submit_state *state)
5689{
5690 blk_finish_plug(&state->plug);
9f13c35b 5691 io_state_file_put(state);
2579f913 5692 if (state->free_reqs)
6c8a3134 5693 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9a56a232
JA
5694}
5695
5696/*
5697 * Start submission side cache.
5698 */
5699static void io_submit_state_start(struct io_submit_state *state,
22efde59 5700 unsigned int max_ios)
9a56a232
JA
5701{
5702 blk_start_plug(&state->plug);
2579f913 5703 state->free_reqs = 0;
9a56a232
JA
5704 state->file = NULL;
5705 state->ios_left = max_ios;
5706}
5707
2b188cc1
JA
5708static void io_commit_sqring(struct io_ring_ctx *ctx)
5709{
75b28aff 5710 struct io_rings *rings = ctx->rings;
2b188cc1 5711
caf582c6
PB
5712 /*
5713 * Ensure any loads from the SQEs are done at this point,
5714 * since once we write the new head, the application could
5715 * write new data to them.
5716 */
5717 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2b188cc1
JA
5718}
5719
2b188cc1 5720/*
3529d8c2 5721 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
2b188cc1
JA
5722 * that is mapped by userspace. This means that care needs to be taken to
5723 * ensure that reads are stable, as we cannot rely on userspace always
5724 * being a good citizen. If members of the sqe are validated and then later
5725 * used, it's important that those reads are done through READ_ONCE() to
5726 * prevent a re-load down the line.
5727 */
709b302f 5728static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2b188cc1 5729{
75b28aff 5730 u32 *sq_array = ctx->sq_array;
2b188cc1
JA
5731 unsigned head;
5732
5733 /*
5734 * The cached sq head (or cq tail) serves two purposes:
5735 *
5736 * 1) allows us to batch the cost of updating the user visible
5737 * head updates.
5738 * 2) allows the kernel side to track the head on its own, even
5739 * though the application is the one updating it.
5740 */
ee7d46d9 5741 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
709b302f
PB
5742 if (likely(head < ctx->sq_entries))
5743 return &ctx->sq_sqes[head];
2b188cc1
JA
5744
5745 /* drop invalid entries */
498ccd9e 5746 ctx->cached_sq_dropped++;
ee7d46d9 5747 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
709b302f
PB
5748 return NULL;
5749}
5750
5751static inline void io_consume_sqe(struct io_ring_ctx *ctx)
5752{
5753 ctx->cached_sq_head++;
2b188cc1
JA
5754}
5755
ef4ff581
PB
5756#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
5757 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
5758 IOSQE_BUFFER_SELECT)
5759
5760static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
5761 const struct io_uring_sqe *sqe,
0cdaf760 5762 struct io_submit_state *state)
0553b8bd 5763{
ef4ff581 5764 unsigned int sqe_flags;
63ff8223 5765 int id;
ef4ff581 5766
0553b8bd
PB
5767 /*
5768 * All io need record the previous position, if LINK vs DARIN,
5769 * it can be used to mark the position of the first IO in the
5770 * link list.
5771 */
31af27c7 5772 req->sequence = ctx->cached_sq_head - ctx->cached_sq_dropped;
0553b8bd
PB
5773 req->opcode = READ_ONCE(sqe->opcode);
5774 req->user_data = READ_ONCE(sqe->user_data);
5775 req->io = NULL;
5776 req->file = NULL;
5777 req->ctx = ctx;
5778 req->flags = 0;
5779 /* one is dropped after submission, the other at completion */
5780 refcount_set(&req->refs, 2);
5781 req->task = NULL;
5782 req->result = 0;
f5fa38c5 5783 INIT_IO_WORK(&req->work);
ef4ff581
PB
5784
5785 if (unlikely(req->opcode >= IORING_OP_LAST))
5786 return -EINVAL;
5787
5788 if (io_op_defs[req->opcode].needs_mm && !current->mm) {
5789 if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
5790 return -EFAULT;
5791 use_mm(ctx->sqo_mm);
5792 }
5793
5794 sqe_flags = READ_ONCE(sqe->flags);
5795 /* enforce forwards compatibility on users */
5796 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
5797 return -EINVAL;
5798
5799 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
5800 !io_op_defs[req->opcode].buffer_select)
5801 return -EOPNOTSUPP;
5802
5803 id = READ_ONCE(sqe->personality);
5804 if (id) {
5805 req->work.creds = idr_find(&ctx->personality_idr, id);
5806 if (unlikely(!req->work.creds))
5807 return -EINVAL;
5808 get_cred(req->work.creds);
5809 }
5810
5811 /* same numerical values with corresponding REQ_F_*, safe to copy */
c11368a5 5812 req->flags |= sqe_flags;
ef4ff581 5813
63ff8223
JA
5814 if (!io_op_defs[req->opcode].needs_file)
5815 return 0;
5816
5817 return io_req_set_file(state, req, READ_ONCE(sqe->fd));
0553b8bd
PB
5818}
5819
fb5ccc98 5820static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
0cdaf760 5821 struct file *ring_file, int ring_fd)
6c271ce2
JA
5822{
5823 struct io_submit_state state, *statep = NULL;
9e645e11 5824 struct io_kiocb *link = NULL;
9e645e11 5825 int i, submitted = 0;
6c271ce2 5826
c4a2ed72 5827 /* if we have a backlog and couldn't flush it all, return BUSY */
ad3eb2c8
JA
5828 if (test_bit(0, &ctx->sq_check_overflow)) {
5829 if (!list_empty(&ctx->cq_overflow_list) &&
5830 !io_cqring_overflow_flush(ctx, false))
5831 return -EBUSY;
5832 }
6c271ce2 5833
ee7d46d9
PB
5834 /* make sure SQ entry isn't read before tail */
5835 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
9ef4f124 5836
2b85edfc
PB
5837 if (!percpu_ref_tryget_many(&ctx->refs, nr))
5838 return -EAGAIN;
6c271ce2
JA
5839
5840 if (nr > IO_PLUG_THRESHOLD) {
22efde59 5841 io_submit_state_start(&state, nr);
6c271ce2
JA
5842 statep = &state;
5843 }
5844
b14cca0c
PB
5845 ctx->ring_fd = ring_fd;
5846 ctx->ring_file = ring_file;
5847
6c271ce2 5848 for (i = 0; i < nr; i++) {
3529d8c2 5849 const struct io_uring_sqe *sqe;
196be95c 5850 struct io_kiocb *req;
1cb1edb2 5851 int err;
fb5ccc98 5852
b1e50e54
PB
5853 sqe = io_get_sqe(ctx);
5854 if (unlikely(!sqe)) {
5855 io_consume_sqe(ctx);
5856 break;
5857 }
0553b8bd 5858 req = io_alloc_req(ctx, statep);
196be95c
PB
5859 if (unlikely(!req)) {
5860 if (!submitted)
5861 submitted = -EAGAIN;
fb5ccc98 5862 break;
196be95c 5863 }
fb5ccc98 5864
0cdaf760 5865 err = io_init_req(ctx, req, sqe, statep);
709b302f 5866 io_consume_sqe(ctx);
d3656344
JA
5867 /* will complete beyond this point, count as submitted */
5868 submitted++;
5869
ef4ff581 5870 if (unlikely(err)) {
1cb1edb2
PB
5871fail_req:
5872 io_cqring_add_event(req, err);
d3656344 5873 io_double_put_req(req);
196be95c
PB
5874 break;
5875 }
fb5ccc98 5876
354420f7 5877 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
0cdaf760 5878 true, io_async_submit(ctx));
7d01bd74 5879 err = io_submit_sqe(req, sqe, &link);
1d4240cc
PB
5880 if (err)
5881 goto fail_req;
6c271ce2
JA
5882 }
5883
9466f437
PB
5884 if (unlikely(submitted != nr)) {
5885 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
5886
5887 percpu_ref_put_many(&ctx->refs, nr - ref_used);
5888 }
9e645e11 5889 if (link)
1b4a51b6 5890 io_queue_link_head(link);
6c271ce2
JA
5891 if (statep)
5892 io_submit_state_end(&state);
5893
ae9428ca
PB
5894 /* Commit SQ ring head once we've consumed and submitted all SQEs */
5895 io_commit_sqring(ctx);
5896
6c271ce2
JA
5897 return submitted;
5898}
5899
bf9c2f1c
PB
5900static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
5901{
5902 struct mm_struct *mm = current->mm;
5903
5904 if (mm) {
5905 unuse_mm(mm);
5906 mmput(mm);
5907 }
5908}
5909
6c271ce2
JA
5910static int io_sq_thread(void *data)
5911{
6c271ce2 5912 struct io_ring_ctx *ctx = data;
181e448d 5913 const struct cred *old_cred;
6c271ce2
JA
5914 mm_segment_t old_fs;
5915 DEFINE_WAIT(wait);
6c271ce2 5916 unsigned long timeout;
bdcd3eab 5917 int ret = 0;
6c271ce2 5918
0f158b4c 5919 complete(&ctx->sq_thread_comp);
a4c0b3de 5920
6c271ce2
JA
5921 old_fs = get_fs();
5922 set_fs(USER_DS);
181e448d 5923 old_cred = override_creds(ctx->creds);
6c271ce2 5924
bdcd3eab 5925 timeout = jiffies + ctx->sq_thread_idle;
2bbcd6d3 5926 while (!kthread_should_park()) {
fb5ccc98 5927 unsigned int to_submit;
6c271ce2 5928
bdcd3eab 5929 if (!list_empty(&ctx->poll_list)) {
6c271ce2
JA
5930 unsigned nr_events = 0;
5931
bdcd3eab
XW
5932 mutex_lock(&ctx->uring_lock);
5933 if (!list_empty(&ctx->poll_list))
5934 io_iopoll_getevents(ctx, &nr_events, 0);
5935 else
6c271ce2 5936 timeout = jiffies + ctx->sq_thread_idle;
bdcd3eab 5937 mutex_unlock(&ctx->uring_lock);
6c271ce2
JA
5938 }
5939
fb5ccc98 5940 to_submit = io_sqring_entries(ctx);
c1edbf5f
JA
5941
5942 /*
5943 * If submit got -EBUSY, flag us as needing the application
5944 * to enter the kernel to reap and flush events.
5945 */
5946 if (!to_submit || ret == -EBUSY) {
7143b5ac
SG
5947 /*
5948 * Drop cur_mm before scheduling, we can't hold it for
5949 * long periods (or over schedule()). Do this before
5950 * adding ourselves to the waitqueue, as the unuse/drop
5951 * may sleep.
5952 */
bf9c2f1c 5953 io_sq_thread_drop_mm(ctx);
7143b5ac 5954
6c271ce2
JA
5955 /*
5956 * We're polling. If we're within the defined idle
5957 * period, then let us spin without work before going
c1edbf5f
JA
5958 * to sleep. The exception is if we got EBUSY doing
5959 * more IO, we should wait for the application to
5960 * reap events and wake us up.
6c271ce2 5961 */
bdcd3eab 5962 if (!list_empty(&ctx->poll_list) ||
df069d80
JA
5963 (!time_after(jiffies, timeout) && ret != -EBUSY &&
5964 !percpu_ref_is_dying(&ctx->refs))) {
b41e9852
JA
5965 if (current->task_works)
5966 task_work_run();
9831a90c 5967 cond_resched();
6c271ce2
JA
5968 continue;
5969 }
5970
6c271ce2
JA
5971 prepare_to_wait(&ctx->sqo_wait, &wait,
5972 TASK_INTERRUPTIBLE);
5973
bdcd3eab
XW
5974 /*
5975 * While doing polled IO, before going to sleep, we need
5976 * to check if there are new reqs added to poll_list, it
5977 * is because reqs may have been punted to io worker and
5978 * will be added to poll_list later, hence check the
5979 * poll_list again.
5980 */
5981 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
5982 !list_empty_careful(&ctx->poll_list)) {
5983 finish_wait(&ctx->sqo_wait, &wait);
5984 continue;
5985 }
5986
6c271ce2 5987 /* Tell userspace we may need a wakeup call */
75b28aff 5988 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
0d7bae69
SB
5989 /* make sure to read SQ tail after writing flags */
5990 smp_mb();
6c271ce2 5991
fb5ccc98 5992 to_submit = io_sqring_entries(ctx);
c1edbf5f 5993 if (!to_submit || ret == -EBUSY) {
2bbcd6d3 5994 if (kthread_should_park()) {
6c271ce2
JA
5995 finish_wait(&ctx->sqo_wait, &wait);
5996 break;
5997 }
b41e9852
JA
5998 if (current->task_works) {
5999 task_work_run();
10bea96d 6000 finish_wait(&ctx->sqo_wait, &wait);
b41e9852
JA
6001 continue;
6002 }
6c271ce2
JA
6003 if (signal_pending(current))
6004 flush_signals(current);
6005 schedule();
6006 finish_wait(&ctx->sqo_wait, &wait);
6007
75b28aff 6008 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
d4ae271d 6009 ret = 0;
6c271ce2
JA
6010 continue;
6011 }
6012 finish_wait(&ctx->sqo_wait, &wait);
6013
75b28aff 6014 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6c271ce2
JA
6015 }
6016
8a4955ff 6017 mutex_lock(&ctx->uring_lock);
6b668c9b
XW
6018 if (likely(!percpu_ref_is_dying(&ctx->refs)))
6019 ret = io_submit_sqes(ctx, to_submit, NULL, -1);
8a4955ff 6020 mutex_unlock(&ctx->uring_lock);
bdcd3eab 6021 timeout = jiffies + ctx->sq_thread_idle;
6c271ce2
JA
6022 }
6023
b41e9852
JA
6024 if (current->task_works)
6025 task_work_run();
6026
6c271ce2 6027 set_fs(old_fs);
bf9c2f1c 6028 io_sq_thread_drop_mm(ctx);
181e448d 6029 revert_creds(old_cred);
06058632 6030
2bbcd6d3 6031 kthread_parkme();
06058632 6032
6c271ce2
JA
6033 return 0;
6034}
6035
bda52162
JA
6036struct io_wait_queue {
6037 struct wait_queue_entry wq;
6038 struct io_ring_ctx *ctx;
6039 unsigned to_wait;
6040 unsigned nr_timeouts;
6041};
6042
1d7bb1d5 6043static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
bda52162
JA
6044{
6045 struct io_ring_ctx *ctx = iowq->ctx;
6046
6047 /*
d195a66e 6048 * Wake up if we have enough events, or if a timeout occurred since we
bda52162
JA
6049 * started waiting. For timeouts, we always want to return to userspace,
6050 * regardless of event count.
6051 */
1d7bb1d5 6052 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
bda52162
JA
6053 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6054}
6055
6056static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6057 int wake_flags, void *key)
6058{
6059 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6060 wq);
6061
1d7bb1d5
JA
6062 /* use noflush == true, as we can't safely rely on locking context */
6063 if (!io_should_wake(iowq, true))
bda52162
JA
6064 return -1;
6065
6066 return autoremove_wake_function(curr, mode, wake_flags, key);
6067}
6068
2b188cc1
JA
6069/*
6070 * Wait until events become available, if we don't already have some. The
6071 * application must reap them itself, as they reside on the shared cq ring.
6072 */
6073static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
6074 const sigset_t __user *sig, size_t sigsz)
6075{
bda52162
JA
6076 struct io_wait_queue iowq = {
6077 .wq = {
6078 .private = current,
6079 .func = io_wake_function,
6080 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6081 },
6082 .ctx = ctx,
6083 .to_wait = min_events,
6084 };
75b28aff 6085 struct io_rings *rings = ctx->rings;
e9ffa5c2 6086 int ret = 0;
2b188cc1 6087
b41e9852
JA
6088 do {
6089 if (io_cqring_events(ctx, false) >= min_events)
6090 return 0;
6091 if (!current->task_works)
6092 break;
6093 task_work_run();
6094 } while (1);
2b188cc1
JA
6095
6096 if (sig) {
9e75ad5d
AB
6097#ifdef CONFIG_COMPAT
6098 if (in_compat_syscall())
6099 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
b772434b 6100 sigsz);
9e75ad5d
AB
6101 else
6102#endif
b772434b 6103 ret = set_user_sigmask(sig, sigsz);
9e75ad5d 6104
2b188cc1
JA
6105 if (ret)
6106 return ret;
6107 }
6108
bda52162 6109 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
c826bd7a 6110 trace_io_uring_cqring_wait(ctx, min_events);
bda52162
JA
6111 do {
6112 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6113 TASK_INTERRUPTIBLE);
b41e9852
JA
6114 if (current->task_works)
6115 task_work_run();
1d7bb1d5 6116 if (io_should_wake(&iowq, false))
bda52162
JA
6117 break;
6118 schedule();
6119 if (signal_pending(current)) {
e9ffa5c2 6120 ret = -EINTR;
bda52162
JA
6121 break;
6122 }
6123 } while (1);
6124 finish_wait(&ctx->wait, &iowq.wq);
6125
e9ffa5c2 6126 restore_saved_sigmask_unless(ret == -EINTR);
2b188cc1 6127
75b28aff 6128 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2b188cc1
JA
6129}
6130
6b06314c
JA
6131static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6132{
6133#if defined(CONFIG_UNIX)
6134 if (ctx->ring_sock) {
6135 struct sock *sock = ctx->ring_sock->sk;
6136 struct sk_buff *skb;
6137
6138 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6139 kfree_skb(skb);
6140 }
6141#else
6142 int i;
6143
65e19f54
JA
6144 for (i = 0; i < ctx->nr_user_files; i++) {
6145 struct file *file;
6146
6147 file = io_file_from_index(ctx, i);
6148 if (file)
6149 fput(file);
6150 }
6b06314c
JA
6151#endif
6152}
6153
05f3fb3c
JA
6154static void io_file_ref_kill(struct percpu_ref *ref)
6155{
6156 struct fixed_file_data *data;
6157
6158 data = container_of(ref, struct fixed_file_data, refs);
6159 complete(&data->done);
6160}
6161
6b06314c
JA
6162static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
6163{
05f3fb3c 6164 struct fixed_file_data *data = ctx->file_data;
05589553 6165 struct fixed_file_ref_node *ref_node = NULL;
65e19f54
JA
6166 unsigned nr_tables, i;
6167
05f3fb3c 6168 if (!data)
6b06314c
JA
6169 return -ENXIO;
6170
6a4d07cd 6171 spin_lock(&data->lock);
05589553
XW
6172 if (!list_empty(&data->ref_list))
6173 ref_node = list_first_entry(&data->ref_list,
6174 struct fixed_file_ref_node, node);
6a4d07cd 6175 spin_unlock(&data->lock);
05589553
XW
6176 if (ref_node)
6177 percpu_ref_kill(&ref_node->refs);
6178
6179 percpu_ref_kill(&data->refs);
6180
6181 /* wait for all refs nodes to complete */
4a38aed2 6182 flush_delayed_work(&ctx->file_put_work);
2faf852d 6183 wait_for_completion(&data->done);
05f3fb3c 6184
6b06314c 6185 __io_sqe_files_unregister(ctx);
65e19f54
JA
6186 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
6187 for (i = 0; i < nr_tables; i++)
05f3fb3c
JA
6188 kfree(data->table[i].files);
6189 kfree(data->table);
05589553
XW
6190 percpu_ref_exit(&data->refs);
6191 kfree(data);
05f3fb3c 6192 ctx->file_data = NULL;
6b06314c
JA
6193 ctx->nr_user_files = 0;
6194 return 0;
6195}
6196
6c271ce2
JA
6197static void io_sq_thread_stop(struct io_ring_ctx *ctx)
6198{
6199 if (ctx->sqo_thread) {
0f158b4c 6200 wait_for_completion(&ctx->sq_thread_comp);
2bbcd6d3
RP
6201 /*
6202 * The park is a bit of a work-around, without it we get
6203 * warning spews on shutdown with SQPOLL set and affinity
6204 * set to a single CPU.
6205 */
06058632 6206 kthread_park(ctx->sqo_thread);
6c271ce2
JA
6207 kthread_stop(ctx->sqo_thread);
6208 ctx->sqo_thread = NULL;
6209 }
6210}
6211
6b06314c
JA
6212static void io_finish_async(struct io_ring_ctx *ctx)
6213{
6c271ce2
JA
6214 io_sq_thread_stop(ctx);
6215
561fb04a
JA
6216 if (ctx->io_wq) {
6217 io_wq_destroy(ctx->io_wq);
6218 ctx->io_wq = NULL;
6b06314c
JA
6219 }
6220}
6221
6222#if defined(CONFIG_UNIX)
6b06314c
JA
6223/*
6224 * Ensure the UNIX gc is aware of our file set, so we are certain that
6225 * the io_uring can be safely unregistered on process exit, even if we have
6226 * loops in the file referencing.
6227 */
6228static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
6229{
6230 struct sock *sk = ctx->ring_sock->sk;
6231 struct scm_fp_list *fpl;
6232 struct sk_buff *skb;
08a45173 6233 int i, nr_files;
6b06314c 6234
6b06314c
JA
6235 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
6236 if (!fpl)
6237 return -ENOMEM;
6238
6239 skb = alloc_skb(0, GFP_KERNEL);
6240 if (!skb) {
6241 kfree(fpl);
6242 return -ENOMEM;
6243 }
6244
6245 skb->sk = sk;
6b06314c 6246
08a45173 6247 nr_files = 0;
6b06314c
JA
6248 fpl->user = get_uid(ctx->user);
6249 for (i = 0; i < nr; i++) {
65e19f54
JA
6250 struct file *file = io_file_from_index(ctx, i + offset);
6251
6252 if (!file)
08a45173 6253 continue;
65e19f54 6254 fpl->fp[nr_files] = get_file(file);
08a45173
JA
6255 unix_inflight(fpl->user, fpl->fp[nr_files]);
6256 nr_files++;
6b06314c
JA
6257 }
6258
08a45173
JA
6259 if (nr_files) {
6260 fpl->max = SCM_MAX_FD;
6261 fpl->count = nr_files;
6262 UNIXCB(skb).fp = fpl;
05f3fb3c 6263 skb->destructor = unix_destruct_scm;
08a45173
JA
6264 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
6265 skb_queue_head(&sk->sk_receive_queue, skb);
6b06314c 6266
08a45173
JA
6267 for (i = 0; i < nr_files; i++)
6268 fput(fpl->fp[i]);
6269 } else {
6270 kfree_skb(skb);
6271 kfree(fpl);
6272 }
6b06314c
JA
6273
6274 return 0;
6275}
6276
6277/*
6278 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
6279 * causes regular reference counting to break down. We rely on the UNIX
6280 * garbage collection to take care of this problem for us.
6281 */
6282static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6283{
6284 unsigned left, total;
6285 int ret = 0;
6286
6287 total = 0;
6288 left = ctx->nr_user_files;
6289 while (left) {
6290 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6b06314c
JA
6291
6292 ret = __io_sqe_files_scm(ctx, this_files, total);
6293 if (ret)
6294 break;
6295 left -= this_files;
6296 total += this_files;
6297 }
6298
6299 if (!ret)
6300 return 0;
6301
6302 while (total < ctx->nr_user_files) {
65e19f54
JA
6303 struct file *file = io_file_from_index(ctx, total);
6304
6305 if (file)
6306 fput(file);
6b06314c
JA
6307 total++;
6308 }
6309
6310 return ret;
6311}
6312#else
6313static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6314{
6315 return 0;
6316}
6317#endif
6318
65e19f54
JA
6319static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
6320 unsigned nr_files)
6321{
6322 int i;
6323
6324 for (i = 0; i < nr_tables; i++) {
05f3fb3c 6325 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
6326 unsigned this_files;
6327
6328 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
6329 table->files = kcalloc(this_files, sizeof(struct file *),
6330 GFP_KERNEL);
6331 if (!table->files)
6332 break;
6333 nr_files -= this_files;
6334 }
6335
6336 if (i == nr_tables)
6337 return 0;
6338
6339 for (i = 0; i < nr_tables; i++) {
05f3fb3c 6340 struct fixed_file_table *table = &ctx->file_data->table[i];
65e19f54
JA
6341 kfree(table->files);
6342 }
6343 return 1;
6344}
6345
05f3fb3c
JA
6346static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
6347{
6348#if defined(CONFIG_UNIX)
6349 struct sock *sock = ctx->ring_sock->sk;
6350 struct sk_buff_head list, *head = &sock->sk_receive_queue;
6351 struct sk_buff *skb;
6352 int i;
6353
6354 __skb_queue_head_init(&list);
6355
6356 /*
6357 * Find the skb that holds this file in its SCM_RIGHTS. When found,
6358 * remove this entry and rearrange the file array.
6359 */
6360 skb = skb_dequeue(head);
6361 while (skb) {
6362 struct scm_fp_list *fp;
6363
6364 fp = UNIXCB(skb).fp;
6365 for (i = 0; i < fp->count; i++) {
6366 int left;
6367
6368 if (fp->fp[i] != file)
6369 continue;
6370
6371 unix_notinflight(fp->user, fp->fp[i]);
6372 left = fp->count - 1 - i;
6373 if (left) {
6374 memmove(&fp->fp[i], &fp->fp[i + 1],
6375 left * sizeof(struct file *));
6376 }
6377 fp->count--;
6378 if (!fp->count) {
6379 kfree_skb(skb);
6380 skb = NULL;
6381 } else {
6382 __skb_queue_tail(&list, skb);
6383 }
6384 fput(file);
6385 file = NULL;
6386 break;
6387 }
6388
6389 if (!file)
6390 break;
6391
6392 __skb_queue_tail(&list, skb);
6393
6394 skb = skb_dequeue(head);
6395 }
6396
6397 if (skb_peek(&list)) {
6398 spin_lock_irq(&head->lock);
6399 while ((skb = __skb_dequeue(&list)) != NULL)
6400 __skb_queue_tail(head, skb);
6401 spin_unlock_irq(&head->lock);
6402 }
6403#else
6404 fput(file);
6405#endif
6406}
6407
6408struct io_file_put {
05589553 6409 struct list_head list;
05f3fb3c 6410 struct file *file;
05f3fb3c
JA
6411};
6412
4a38aed2 6413static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
65e19f54 6414{
4a38aed2
JA
6415 struct fixed_file_data *file_data = ref_node->file_data;
6416 struct io_ring_ctx *ctx = file_data->ctx;
05f3fb3c 6417 struct io_file_put *pfile, *tmp;
05589553
XW
6418
6419 list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
6a4d07cd 6420 list_del(&pfile->list);
05589553
XW
6421 io_ring_file_put(ctx, pfile->file);
6422 kfree(pfile);
65e19f54 6423 }
05589553 6424
6a4d07cd
JA
6425 spin_lock(&file_data->lock);
6426 list_del(&ref_node->node);
6427 spin_unlock(&file_data->lock);
05589553
XW
6428
6429 percpu_ref_exit(&ref_node->refs);
6430 kfree(ref_node);
6431 percpu_ref_put(&file_data->refs);
2faf852d 6432}
65e19f54 6433
4a38aed2
JA
6434static void io_file_put_work(struct work_struct *work)
6435{
6436 struct io_ring_ctx *ctx;
6437 struct llist_node *node;
6438
6439 ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
6440 node = llist_del_all(&ctx->file_put_llist);
6441
6442 while (node) {
6443 struct fixed_file_ref_node *ref_node;
6444 struct llist_node *next = node->next;
6445
6446 ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
6447 __io_file_put_work(ref_node);
6448 node = next;
6449 }
6450}
6451
05589553 6452static void io_file_data_ref_zero(struct percpu_ref *ref)
2faf852d 6453{
05589553 6454 struct fixed_file_ref_node *ref_node;
4a38aed2
JA
6455 struct io_ring_ctx *ctx;
6456 bool first_add;
6457 int delay = HZ;
65e19f54 6458
05589553 6459 ref_node = container_of(ref, struct fixed_file_ref_node, refs);
4a38aed2 6460 ctx = ref_node->file_data->ctx;
05589553 6461
4a38aed2
JA
6462 if (percpu_ref_is_dying(&ctx->file_data->refs))
6463 delay = 0;
05589553 6464
4a38aed2
JA
6465 first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
6466 if (!delay)
6467 mod_delayed_work(system_wq, &ctx->file_put_work, 0);
6468 else if (first_add)
6469 queue_delayed_work(system_wq, &ctx->file_put_work, delay);
05f3fb3c 6470}
65e19f54 6471
05589553
XW
6472static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
6473 struct io_ring_ctx *ctx)
05f3fb3c 6474{
05589553 6475 struct fixed_file_ref_node *ref_node;
05f3fb3c 6476
05589553
XW
6477 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
6478 if (!ref_node)
6479 return ERR_PTR(-ENOMEM);
05f3fb3c 6480
05589553
XW
6481 if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
6482 0, GFP_KERNEL)) {
6483 kfree(ref_node);
6484 return ERR_PTR(-ENOMEM);
6485 }
6486 INIT_LIST_HEAD(&ref_node->node);
6487 INIT_LIST_HEAD(&ref_node->file_list);
05589553
XW
6488 ref_node->file_data = ctx->file_data;
6489 return ref_node;
05589553
XW
6490}
6491
6492static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
6493{
6494 percpu_ref_exit(&ref_node->refs);
6495 kfree(ref_node);
65e19f54
JA
6496}
6497
6b06314c
JA
6498static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
6499 unsigned nr_args)
6500{
6501 __s32 __user *fds = (__s32 __user *) arg;
65e19f54 6502 unsigned nr_tables;
05f3fb3c 6503 struct file *file;
6b06314c
JA
6504 int fd, ret = 0;
6505 unsigned i;
05589553 6506 struct fixed_file_ref_node *ref_node;
6b06314c 6507
05f3fb3c 6508 if (ctx->file_data)
6b06314c
JA
6509 return -EBUSY;
6510 if (!nr_args)
6511 return -EINVAL;
6512 if (nr_args > IORING_MAX_FIXED_FILES)
6513 return -EMFILE;
6514
05f3fb3c
JA
6515 ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
6516 if (!ctx->file_data)
6517 return -ENOMEM;
6518 ctx->file_data->ctx = ctx;
6519 init_completion(&ctx->file_data->done);
05589553 6520 INIT_LIST_HEAD(&ctx->file_data->ref_list);
f7fe9346 6521 spin_lock_init(&ctx->file_data->lock);
05f3fb3c 6522
65e19f54 6523 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
05f3fb3c
JA
6524 ctx->file_data->table = kcalloc(nr_tables,
6525 sizeof(struct fixed_file_table),
65e19f54 6526 GFP_KERNEL);
05f3fb3c
JA
6527 if (!ctx->file_data->table) {
6528 kfree(ctx->file_data);
6529 ctx->file_data = NULL;
6b06314c 6530 return -ENOMEM;
05f3fb3c
JA
6531 }
6532
05589553 6533 if (percpu_ref_init(&ctx->file_data->refs, io_file_ref_kill,
05f3fb3c
JA
6534 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
6535 kfree(ctx->file_data->table);
6536 kfree(ctx->file_data);
6537 ctx->file_data = NULL;
6b06314c 6538 return -ENOMEM;
05f3fb3c 6539 }
6b06314c 6540
65e19f54 6541 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
05f3fb3c
JA
6542 percpu_ref_exit(&ctx->file_data->refs);
6543 kfree(ctx->file_data->table);
6544 kfree(ctx->file_data);
6545 ctx->file_data = NULL;
65e19f54
JA
6546 return -ENOMEM;
6547 }
6548
08a45173 6549 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
65e19f54
JA
6550 struct fixed_file_table *table;
6551 unsigned index;
6552
6b06314c
JA
6553 ret = -EFAULT;
6554 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
6555 break;
08a45173
JA
6556 /* allow sparse sets */
6557 if (fd == -1) {
6558 ret = 0;
6559 continue;
6560 }
6b06314c 6561
05f3fb3c 6562 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54 6563 index = i & IORING_FILE_TABLE_MASK;
05f3fb3c 6564 file = fget(fd);
6b06314c
JA
6565
6566 ret = -EBADF;
05f3fb3c 6567 if (!file)
6b06314c 6568 break;
05f3fb3c 6569
6b06314c
JA
6570 /*
6571 * Don't allow io_uring instances to be registered. If UNIX
6572 * isn't enabled, then this causes a reference cycle and this
6573 * instance can never get freed. If UNIX is enabled we'll
6574 * handle it just fine, but there's still no point in allowing
6575 * a ring fd as it doesn't support regular read/write anyway.
6576 */
05f3fb3c
JA
6577 if (file->f_op == &io_uring_fops) {
6578 fput(file);
6b06314c
JA
6579 break;
6580 }
6b06314c 6581 ret = 0;
05f3fb3c 6582 table->files[index] = file;
6b06314c
JA
6583 }
6584
6585 if (ret) {
65e19f54 6586 for (i = 0; i < ctx->nr_user_files; i++) {
65e19f54
JA
6587 file = io_file_from_index(ctx, i);
6588 if (file)
6589 fput(file);
6590 }
6591 for (i = 0; i < nr_tables; i++)
05f3fb3c 6592 kfree(ctx->file_data->table[i].files);
6b06314c 6593
05f3fb3c
JA
6594 kfree(ctx->file_data->table);
6595 kfree(ctx->file_data);
6596 ctx->file_data = NULL;
6b06314c
JA
6597 ctx->nr_user_files = 0;
6598 return ret;
6599 }
6600
6601 ret = io_sqe_files_scm(ctx);
05589553 6602 if (ret) {
6b06314c 6603 io_sqe_files_unregister(ctx);
05589553
XW
6604 return ret;
6605 }
6b06314c 6606
05589553
XW
6607 ref_node = alloc_fixed_file_ref_node(ctx);
6608 if (IS_ERR(ref_node)) {
6609 io_sqe_files_unregister(ctx);
6610 return PTR_ERR(ref_node);
6611 }
6612
6613 ctx->file_data->cur_refs = &ref_node->refs;
6a4d07cd 6614 spin_lock(&ctx->file_data->lock);
05589553 6615 list_add(&ref_node->node, &ctx->file_data->ref_list);
6a4d07cd 6616 spin_unlock(&ctx->file_data->lock);
05589553 6617 percpu_ref_get(&ctx->file_data->refs);
6b06314c
JA
6618 return ret;
6619}
6620
c3a31e60
JA
6621static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
6622 int index)
6623{
6624#if defined(CONFIG_UNIX)
6625 struct sock *sock = ctx->ring_sock->sk;
6626 struct sk_buff_head *head = &sock->sk_receive_queue;
6627 struct sk_buff *skb;
6628
6629 /*
6630 * See if we can merge this file into an existing skb SCM_RIGHTS
6631 * file set. If there's no room, fall back to allocating a new skb
6632 * and filling it in.
6633 */
6634 spin_lock_irq(&head->lock);
6635 skb = skb_peek(head);
6636 if (skb) {
6637 struct scm_fp_list *fpl = UNIXCB(skb).fp;
6638
6639 if (fpl->count < SCM_MAX_FD) {
6640 __skb_unlink(skb, head);
6641 spin_unlock_irq(&head->lock);
6642 fpl->fp[fpl->count] = get_file(file);
6643 unix_inflight(fpl->user, fpl->fp[fpl->count]);
6644 fpl->count++;
6645 spin_lock_irq(&head->lock);
6646 __skb_queue_head(head, skb);
6647 } else {
6648 skb = NULL;
6649 }
6650 }
6651 spin_unlock_irq(&head->lock);
6652
6653 if (skb) {
6654 fput(file);
6655 return 0;
6656 }
6657
6658 return __io_sqe_files_scm(ctx, 1, index);
6659#else
6660 return 0;
6661#endif
6662}
6663
a5318d3c 6664static int io_queue_file_removal(struct fixed_file_data *data,
05589553 6665 struct file *file)
05f3fb3c 6666{
a5318d3c 6667 struct io_file_put *pfile;
05589553
XW
6668 struct percpu_ref *refs = data->cur_refs;
6669 struct fixed_file_ref_node *ref_node;
05f3fb3c 6670
05f3fb3c 6671 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
a5318d3c
HD
6672 if (!pfile)
6673 return -ENOMEM;
05f3fb3c 6674
05589553 6675 ref_node = container_of(refs, struct fixed_file_ref_node, refs);
05f3fb3c 6676 pfile->file = file;
05589553
XW
6677 list_add(&pfile->list, &ref_node->file_list);
6678
a5318d3c 6679 return 0;
05f3fb3c
JA
6680}
6681
6682static int __io_sqe_files_update(struct io_ring_ctx *ctx,
6683 struct io_uring_files_update *up,
6684 unsigned nr_args)
6685{
6686 struct fixed_file_data *data = ctx->file_data;
05589553 6687 struct fixed_file_ref_node *ref_node;
05f3fb3c 6688 struct file *file;
c3a31e60
JA
6689 __s32 __user *fds;
6690 int fd, i, err;
6691 __u32 done;
05589553 6692 bool needs_switch = false;
c3a31e60 6693
05f3fb3c 6694 if (check_add_overflow(up->offset, nr_args, &done))
c3a31e60
JA
6695 return -EOVERFLOW;
6696 if (done > ctx->nr_user_files)
6697 return -EINVAL;
6698
05589553
XW
6699 ref_node = alloc_fixed_file_ref_node(ctx);
6700 if (IS_ERR(ref_node))
6701 return PTR_ERR(ref_node);
6702
c3a31e60 6703 done = 0;
05f3fb3c 6704 fds = u64_to_user_ptr(up->fds);
c3a31e60 6705 while (nr_args) {
65e19f54
JA
6706 struct fixed_file_table *table;
6707 unsigned index;
6708
c3a31e60
JA
6709 err = 0;
6710 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
6711 err = -EFAULT;
6712 break;
6713 }
05f3fb3c
JA
6714 i = array_index_nospec(up->offset, ctx->nr_user_files);
6715 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
65e19f54
JA
6716 index = i & IORING_FILE_TABLE_MASK;
6717 if (table->files[index]) {
05f3fb3c 6718 file = io_file_from_index(ctx, index);
a5318d3c
HD
6719 err = io_queue_file_removal(data, file);
6720 if (err)
6721 break;
65e19f54 6722 table->files[index] = NULL;
05589553 6723 needs_switch = true;
c3a31e60
JA
6724 }
6725 if (fd != -1) {
c3a31e60
JA
6726 file = fget(fd);
6727 if (!file) {
6728 err = -EBADF;
6729 break;
6730 }
6731 /*
6732 * Don't allow io_uring instances to be registered. If
6733 * UNIX isn't enabled, then this causes a reference
6734 * cycle and this instance can never get freed. If UNIX
6735 * is enabled we'll handle it just fine, but there's
6736 * still no point in allowing a ring fd as it doesn't
6737 * support regular read/write anyway.
6738 */
6739 if (file->f_op == &io_uring_fops) {
6740 fput(file);
6741 err = -EBADF;
6742 break;
6743 }
65e19f54 6744 table->files[index] = file;
c3a31e60
JA
6745 err = io_sqe_file_register(ctx, file, i);
6746 if (err)
6747 break;
6748 }
6749 nr_args--;
6750 done++;
05f3fb3c
JA
6751 up->offset++;
6752 }
6753
05589553
XW
6754 if (needs_switch) {
6755 percpu_ref_kill(data->cur_refs);
6a4d07cd 6756 spin_lock(&data->lock);
05589553
XW
6757 list_add(&ref_node->node, &data->ref_list);
6758 data->cur_refs = &ref_node->refs;
6a4d07cd 6759 spin_unlock(&data->lock);
05589553
XW
6760 percpu_ref_get(&ctx->file_data->refs);
6761 } else
6762 destroy_fixed_file_ref_node(ref_node);
c3a31e60
JA
6763
6764 return done ? done : err;
6765}
05589553 6766
05f3fb3c
JA
6767static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
6768 unsigned nr_args)
6769{
6770 struct io_uring_files_update up;
6771
6772 if (!ctx->file_data)
6773 return -ENXIO;
6774 if (!nr_args)
6775 return -EINVAL;
6776 if (copy_from_user(&up, arg, sizeof(up)))
6777 return -EFAULT;
6778 if (up.resv)
6779 return -EINVAL;
6780
6781 return __io_sqe_files_update(ctx, &up, nr_args);
6782}
c3a31e60 6783
e9fd9396 6784static void io_free_work(struct io_wq_work *work)
7d723065
JA
6785{
6786 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6787
e9fd9396 6788 /* Consider that io_steal_work() relies on this ref */
7d723065
JA
6789 io_put_req(req);
6790}
6791
24369c2e
PB
6792static int io_init_wq_offload(struct io_ring_ctx *ctx,
6793 struct io_uring_params *p)
6794{
6795 struct io_wq_data data;
6796 struct fd f;
6797 struct io_ring_ctx *ctx_attach;
6798 unsigned int concurrency;
6799 int ret = 0;
6800
6801 data.user = ctx->user;
e9fd9396 6802 data.free_work = io_free_work;
f5fa38c5 6803 data.do_work = io_wq_submit_work;
24369c2e
PB
6804
6805 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
6806 /* Do QD, or 4 * CPUS, whatever is smallest */
6807 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
6808
6809 ctx->io_wq = io_wq_create(concurrency, &data);
6810 if (IS_ERR(ctx->io_wq)) {
6811 ret = PTR_ERR(ctx->io_wq);
6812 ctx->io_wq = NULL;
6813 }
6814 return ret;
6815 }
6816
6817 f = fdget(p->wq_fd);
6818 if (!f.file)
6819 return -EBADF;
6820
6821 if (f.file->f_op != &io_uring_fops) {
6822 ret = -EINVAL;
6823 goto out_fput;
6824 }
6825
6826 ctx_attach = f.file->private_data;
6827 /* @io_wq is protected by holding the fd */
6828 if (!io_wq_get(ctx_attach->io_wq, &data)) {
6829 ret = -EINVAL;
6830 goto out_fput;
6831 }
6832
6833 ctx->io_wq = ctx_attach->io_wq;
6834out_fput:
6835 fdput(f);
6836 return ret;
6837}
6838
6c271ce2
JA
6839static int io_sq_offload_start(struct io_ring_ctx *ctx,
6840 struct io_uring_params *p)
2b188cc1
JA
6841{
6842 int ret;
6843
6844 mmgrab(current->mm);
6845 ctx->sqo_mm = current->mm;
6846
6c271ce2 6847 if (ctx->flags & IORING_SETUP_SQPOLL) {
3ec482d1
JA
6848 ret = -EPERM;
6849 if (!capable(CAP_SYS_ADMIN))
6850 goto err;
6851
917257da
JA
6852 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
6853 if (!ctx->sq_thread_idle)
6854 ctx->sq_thread_idle = HZ;
6855
6c271ce2 6856 if (p->flags & IORING_SETUP_SQ_AFF) {
44a9bd18 6857 int cpu = p->sq_thread_cpu;
6c271ce2 6858
917257da 6859 ret = -EINVAL;
44a9bd18
JA
6860 if (cpu >= nr_cpu_ids)
6861 goto err;
7889f44d 6862 if (!cpu_online(cpu))
917257da
JA
6863 goto err;
6864
6c271ce2
JA
6865 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
6866 ctx, cpu,
6867 "io_uring-sq");
6868 } else {
6869 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
6870 "io_uring-sq");
6871 }
6872 if (IS_ERR(ctx->sqo_thread)) {
6873 ret = PTR_ERR(ctx->sqo_thread);
6874 ctx->sqo_thread = NULL;
6875 goto err;
6876 }
6877 wake_up_process(ctx->sqo_thread);
6878 } else if (p->flags & IORING_SETUP_SQ_AFF) {
6879 /* Can't have SQ_AFF without SQPOLL */
6880 ret = -EINVAL;
6881 goto err;
6882 }
6883
24369c2e
PB
6884 ret = io_init_wq_offload(ctx, p);
6885 if (ret)
2b188cc1 6886 goto err;
2b188cc1
JA
6887
6888 return 0;
6889err:
54a91f3b 6890 io_finish_async(ctx);
2b188cc1
JA
6891 mmdrop(ctx->sqo_mm);
6892 ctx->sqo_mm = NULL;
6893 return ret;
6894}
6895
6896static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
6897{
6898 atomic_long_sub(nr_pages, &user->locked_vm);
6899}
6900
6901static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
6902{
6903 unsigned long page_limit, cur_pages, new_pages;
6904
6905 /* Don't allow more pages than we can safely lock */
6906 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
6907
6908 do {
6909 cur_pages = atomic_long_read(&user->locked_vm);
6910 new_pages = cur_pages + nr_pages;
6911 if (new_pages > page_limit)
6912 return -ENOMEM;
6913 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
6914 new_pages) != cur_pages);
6915
6916 return 0;
6917}
6918
6919static void io_mem_free(void *ptr)
6920{
52e04ef4
MR
6921 struct page *page;
6922
6923 if (!ptr)
6924 return;
2b188cc1 6925
52e04ef4 6926 page = virt_to_head_page(ptr);
2b188cc1
JA
6927 if (put_page_testzero(page))
6928 free_compound_page(page);
6929}
6930
6931static void *io_mem_alloc(size_t size)
6932{
6933 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
6934 __GFP_NORETRY;
6935
6936 return (void *) __get_free_pages(gfp_flags, get_order(size));
6937}
6938
75b28aff
HV
6939static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
6940 size_t *sq_offset)
6941{
6942 struct io_rings *rings;
6943 size_t off, sq_array_size;
6944
6945 off = struct_size(rings, cqes, cq_entries);
6946 if (off == SIZE_MAX)
6947 return SIZE_MAX;
6948
6949#ifdef CONFIG_SMP
6950 off = ALIGN(off, SMP_CACHE_BYTES);
6951 if (off == 0)
6952 return SIZE_MAX;
6953#endif
6954
6955 sq_array_size = array_size(sizeof(u32), sq_entries);
6956 if (sq_array_size == SIZE_MAX)
6957 return SIZE_MAX;
6958
6959 if (check_add_overflow(off, sq_array_size, &off))
6960 return SIZE_MAX;
6961
6962 if (sq_offset)
6963 *sq_offset = off;
6964
6965 return off;
6966}
6967
2b188cc1
JA
6968static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
6969{
75b28aff 6970 size_t pages;
2b188cc1 6971
75b28aff
HV
6972 pages = (size_t)1 << get_order(
6973 rings_size(sq_entries, cq_entries, NULL));
6974 pages += (size_t)1 << get_order(
6975 array_size(sizeof(struct io_uring_sqe), sq_entries));
2b188cc1 6976
75b28aff 6977 return pages;
2b188cc1
JA
6978}
6979
edafccee
JA
6980static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
6981{
6982 int i, j;
6983
6984 if (!ctx->user_bufs)
6985 return -ENXIO;
6986
6987 for (i = 0; i < ctx->nr_user_bufs; i++) {
6988 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
6989
6990 for (j = 0; j < imu->nr_bvecs; j++)
f1f6a7dd 6991 unpin_user_page(imu->bvec[j].bv_page);
edafccee
JA
6992
6993 if (ctx->account_mem)
6994 io_unaccount_mem(ctx->user, imu->nr_bvecs);
d4ef6475 6995 kvfree(imu->bvec);
edafccee
JA
6996 imu->nr_bvecs = 0;
6997 }
6998
6999 kfree(ctx->user_bufs);
7000 ctx->user_bufs = NULL;
7001 ctx->nr_user_bufs = 0;
7002 return 0;
7003}
7004
7005static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
7006 void __user *arg, unsigned index)
7007{
7008 struct iovec __user *src;
7009
7010#ifdef CONFIG_COMPAT
7011 if (ctx->compat) {
7012 struct compat_iovec __user *ciovs;
7013 struct compat_iovec ciov;
7014
7015 ciovs = (struct compat_iovec __user *) arg;
7016 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
7017 return -EFAULT;
7018
d55e5f5b 7019 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
edafccee
JA
7020 dst->iov_len = ciov.iov_len;
7021 return 0;
7022 }
7023#endif
7024 src = (struct iovec __user *) arg;
7025 if (copy_from_user(dst, &src[index], sizeof(*dst)))
7026 return -EFAULT;
7027 return 0;
7028}
7029
7030static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
7031 unsigned nr_args)
7032{
7033 struct vm_area_struct **vmas = NULL;
7034 struct page **pages = NULL;
7035 int i, j, got_pages = 0;
7036 int ret = -EINVAL;
7037
7038 if (ctx->user_bufs)
7039 return -EBUSY;
7040 if (!nr_args || nr_args > UIO_MAXIOV)
7041 return -EINVAL;
7042
7043 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
7044 GFP_KERNEL);
7045 if (!ctx->user_bufs)
7046 return -ENOMEM;
7047
7048 for (i = 0; i < nr_args; i++) {
7049 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7050 unsigned long off, start, end, ubuf;
7051 int pret, nr_pages;
7052 struct iovec iov;
7053 size_t size;
7054
7055 ret = io_copy_iov(ctx, &iov, arg, i);
7056 if (ret)
a278682d 7057 goto err;
edafccee
JA
7058
7059 /*
7060 * Don't impose further limits on the size and buffer
7061 * constraints here, we'll -EINVAL later when IO is
7062 * submitted if they are wrong.
7063 */
7064 ret = -EFAULT;
7065 if (!iov.iov_base || !iov.iov_len)
7066 goto err;
7067
7068 /* arbitrary limit, but we need something */
7069 if (iov.iov_len > SZ_1G)
7070 goto err;
7071
7072 ubuf = (unsigned long) iov.iov_base;
7073 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
7074 start = ubuf >> PAGE_SHIFT;
7075 nr_pages = end - start;
7076
7077 if (ctx->account_mem) {
7078 ret = io_account_mem(ctx->user, nr_pages);
7079 if (ret)
7080 goto err;
7081 }
7082
7083 ret = 0;
7084 if (!pages || nr_pages > got_pages) {
a8c73c1a
DE
7085 kvfree(vmas);
7086 kvfree(pages);
d4ef6475 7087 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
edafccee 7088 GFP_KERNEL);
d4ef6475 7089 vmas = kvmalloc_array(nr_pages,
edafccee
JA
7090 sizeof(struct vm_area_struct *),
7091 GFP_KERNEL);
7092 if (!pages || !vmas) {
7093 ret = -ENOMEM;
7094 if (ctx->account_mem)
7095 io_unaccount_mem(ctx->user, nr_pages);
7096 goto err;
7097 }
7098 got_pages = nr_pages;
7099 }
7100
d4ef6475 7101 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
edafccee
JA
7102 GFP_KERNEL);
7103 ret = -ENOMEM;
7104 if (!imu->bvec) {
7105 if (ctx->account_mem)
7106 io_unaccount_mem(ctx->user, nr_pages);
7107 goto err;
7108 }
7109
7110 ret = 0;
7111 down_read(&current->mm->mmap_sem);
2113b05d 7112 pret = pin_user_pages(ubuf, nr_pages,
932f4a63
IW
7113 FOLL_WRITE | FOLL_LONGTERM,
7114 pages, vmas);
edafccee
JA
7115 if (pret == nr_pages) {
7116 /* don't support file backed memory */
7117 for (j = 0; j < nr_pages; j++) {
7118 struct vm_area_struct *vma = vmas[j];
7119
7120 if (vma->vm_file &&
7121 !is_file_hugepages(vma->vm_file)) {
7122 ret = -EOPNOTSUPP;
7123 break;
7124 }
7125 }
7126 } else {
7127 ret = pret < 0 ? pret : -EFAULT;
7128 }
7129 up_read(&current->mm->mmap_sem);
7130 if (ret) {
7131 /*
7132 * if we did partial map, or found file backed vmas,
7133 * release any pages we did get
7134 */
27c4d3a3 7135 if (pret > 0)
f1f6a7dd 7136 unpin_user_pages(pages, pret);
edafccee
JA
7137 if (ctx->account_mem)
7138 io_unaccount_mem(ctx->user, nr_pages);
d4ef6475 7139 kvfree(imu->bvec);
edafccee
JA
7140 goto err;
7141 }
7142
7143 off = ubuf & ~PAGE_MASK;
7144 size = iov.iov_len;
7145 for (j = 0; j < nr_pages; j++) {
7146 size_t vec_len;
7147
7148 vec_len = min_t(size_t, size, PAGE_SIZE - off);
7149 imu->bvec[j].bv_page = pages[j];
7150 imu->bvec[j].bv_len = vec_len;
7151 imu->bvec[j].bv_offset = off;
7152 off = 0;
7153 size -= vec_len;
7154 }
7155 /* store original address for later verification */
7156 imu->ubuf = ubuf;
7157 imu->len = iov.iov_len;
7158 imu->nr_bvecs = nr_pages;
7159
7160 ctx->nr_user_bufs++;
7161 }
d4ef6475
MR
7162 kvfree(pages);
7163 kvfree(vmas);
edafccee
JA
7164 return 0;
7165err:
d4ef6475
MR
7166 kvfree(pages);
7167 kvfree(vmas);
edafccee
JA
7168 io_sqe_buffer_unregister(ctx);
7169 return ret;
7170}
7171
9b402849
JA
7172static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
7173{
7174 __s32 __user *fds = arg;
7175 int fd;
7176
7177 if (ctx->cq_ev_fd)
7178 return -EBUSY;
7179
7180 if (copy_from_user(&fd, fds, sizeof(*fds)))
7181 return -EFAULT;
7182
7183 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
7184 if (IS_ERR(ctx->cq_ev_fd)) {
7185 int ret = PTR_ERR(ctx->cq_ev_fd);
7186 ctx->cq_ev_fd = NULL;
7187 return ret;
7188 }
7189
7190 return 0;
7191}
7192
7193static int io_eventfd_unregister(struct io_ring_ctx *ctx)
7194{
7195 if (ctx->cq_ev_fd) {
7196 eventfd_ctx_put(ctx->cq_ev_fd);
7197 ctx->cq_ev_fd = NULL;
7198 return 0;
7199 }
7200
7201 return -ENXIO;
7202}
7203
5a2e745d
JA
7204static int __io_destroy_buffers(int id, void *p, void *data)
7205{
7206 struct io_ring_ctx *ctx = data;
7207 struct io_buffer *buf = p;
7208
067524e9 7209 __io_remove_buffers(ctx, buf, id, -1U);
5a2e745d
JA
7210 return 0;
7211}
7212
7213static void io_destroy_buffers(struct io_ring_ctx *ctx)
7214{
7215 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
7216 idr_destroy(&ctx->io_buffer_idr);
7217}
7218
2b188cc1
JA
7219static void io_ring_ctx_free(struct io_ring_ctx *ctx)
7220{
6b06314c 7221 io_finish_async(ctx);
2b188cc1
JA
7222 if (ctx->sqo_mm)
7223 mmdrop(ctx->sqo_mm);
def596e9
JA
7224
7225 io_iopoll_reap_events(ctx);
edafccee 7226 io_sqe_buffer_unregister(ctx);
6b06314c 7227 io_sqe_files_unregister(ctx);
9b402849 7228 io_eventfd_unregister(ctx);
5a2e745d 7229 io_destroy_buffers(ctx);
41726c9a 7230 idr_destroy(&ctx->personality_idr);
def596e9 7231
2b188cc1 7232#if defined(CONFIG_UNIX)
355e8d26
EB
7233 if (ctx->ring_sock) {
7234 ctx->ring_sock->file = NULL; /* so that iput() is called */
2b188cc1 7235 sock_release(ctx->ring_sock);
355e8d26 7236 }
2b188cc1
JA
7237#endif
7238
75b28aff 7239 io_mem_free(ctx->rings);
2b188cc1 7240 io_mem_free(ctx->sq_sqes);
2b188cc1
JA
7241
7242 percpu_ref_exit(&ctx->refs);
7243 if (ctx->account_mem)
7244 io_unaccount_mem(ctx->user,
7245 ring_pages(ctx->sq_entries, ctx->cq_entries));
7246 free_uid(ctx->user);
181e448d 7247 put_cred(ctx->creds);
78076bb6 7248 kfree(ctx->cancel_hash);
0ddf92e8 7249 kmem_cache_free(req_cachep, ctx->fallback_req);
2b188cc1
JA
7250 kfree(ctx);
7251}
7252
7253static __poll_t io_uring_poll(struct file *file, poll_table *wait)
7254{
7255 struct io_ring_ctx *ctx = file->private_data;
7256 __poll_t mask = 0;
7257
7258 poll_wait(file, &ctx->cq_wait, wait);
4f7067c3
SB
7259 /*
7260 * synchronizes with barrier from wq_has_sleeper call in
7261 * io_commit_cqring
7262 */
2b188cc1 7263 smp_rmb();
75b28aff
HV
7264 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
7265 ctx->rings->sq_ring_entries)
2b188cc1 7266 mask |= EPOLLOUT | EPOLLWRNORM;
63e5d81f 7267 if (io_cqring_events(ctx, false))
2b188cc1
JA
7268 mask |= EPOLLIN | EPOLLRDNORM;
7269
7270 return mask;
7271}
7272
7273static int io_uring_fasync(int fd, struct file *file, int on)
7274{
7275 struct io_ring_ctx *ctx = file->private_data;
7276
7277 return fasync_helper(fd, file, on, &ctx->cq_fasync);
7278}
7279
071698e1
JA
7280static int io_remove_personalities(int id, void *p, void *data)
7281{
7282 struct io_ring_ctx *ctx = data;
7283 const struct cred *cred;
7284
7285 cred = idr_remove(&ctx->personality_idr, id);
7286 if (cred)
7287 put_cred(cred);
7288 return 0;
7289}
7290
85faa7b8
JA
7291static void io_ring_exit_work(struct work_struct *work)
7292{
7293 struct io_ring_ctx *ctx;
7294
7295 ctx = container_of(work, struct io_ring_ctx, exit_work);
7296 if (ctx->rings)
7297 io_cqring_overflow_flush(ctx, true);
7298
0f158b4c 7299 wait_for_completion(&ctx->ref_comp);
85faa7b8
JA
7300 io_ring_ctx_free(ctx);
7301}
7302
2b188cc1
JA
7303static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
7304{
7305 mutex_lock(&ctx->uring_lock);
7306 percpu_ref_kill(&ctx->refs);
7307 mutex_unlock(&ctx->uring_lock);
7308
5262f567 7309 io_kill_timeouts(ctx);
221c5eb2 7310 io_poll_remove_all(ctx);
561fb04a
JA
7311
7312 if (ctx->io_wq)
7313 io_wq_cancel_all(ctx->io_wq);
7314
def596e9 7315 io_iopoll_reap_events(ctx);
15dff286
JA
7316 /* if we failed setting up the ctx, we might not have any rings */
7317 if (ctx->rings)
7318 io_cqring_overflow_flush(ctx, true);
071698e1 7319 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
85faa7b8
JA
7320 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
7321 queue_work(system_wq, &ctx->exit_work);
2b188cc1
JA
7322}
7323
7324static int io_uring_release(struct inode *inode, struct file *file)
7325{
7326 struct io_ring_ctx *ctx = file->private_data;
7327
7328 file->private_data = NULL;
7329 io_ring_ctx_wait_and_kill(ctx);
7330 return 0;
7331}
7332
fcb323cc
JA
7333static void io_uring_cancel_files(struct io_ring_ctx *ctx,
7334 struct files_struct *files)
7335{
fcb323cc 7336 while (!list_empty_careful(&ctx->inflight_list)) {
d8f1b971
XW
7337 struct io_kiocb *cancel_req = NULL, *req;
7338 DEFINE_WAIT(wait);
fcb323cc
JA
7339
7340 spin_lock_irq(&ctx->inflight_lock);
7341 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
768134d4
JA
7342 if (req->work.files != files)
7343 continue;
7344 /* req is being completed, ignore */
7345 if (!refcount_inc_not_zero(&req->refs))
7346 continue;
7347 cancel_req = req;
7348 break;
fcb323cc 7349 }
768134d4 7350 if (cancel_req)
fcb323cc 7351 prepare_to_wait(&ctx->inflight_wait, &wait,
768134d4 7352 TASK_UNINTERRUPTIBLE);
fcb323cc
JA
7353 spin_unlock_irq(&ctx->inflight_lock);
7354
768134d4
JA
7355 /* We need to keep going until we don't find a matching req */
7356 if (!cancel_req)
fcb323cc 7357 break;
2f6d9b9d 7358
2ca10259
JA
7359 if (cancel_req->flags & REQ_F_OVERFLOW) {
7360 spin_lock_irq(&ctx->completion_lock);
7361 list_del(&cancel_req->list);
7362 cancel_req->flags &= ~REQ_F_OVERFLOW;
7363 if (list_empty(&ctx->cq_overflow_list)) {
7364 clear_bit(0, &ctx->sq_check_overflow);
7365 clear_bit(0, &ctx->cq_check_overflow);
7366 }
7367 spin_unlock_irq(&ctx->completion_lock);
7368
7369 WRITE_ONCE(ctx->rings->cq_overflow,
7370 atomic_inc_return(&ctx->cached_cq_overflow));
7371
7372 /*
7373 * Put inflight ref and overflow ref. If that's
7374 * all we had, then we're done with this request.
7375 */
7376 if (refcount_sub_and_test(2, &cancel_req->refs)) {
4518a3cc 7377 io_free_req(cancel_req);
d8f1b971 7378 finish_wait(&ctx->inflight_wait, &wait);
2ca10259
JA
7379 continue;
7380 }
7b53d598
PB
7381 } else {
7382 io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
7383 io_put_req(cancel_req);
2ca10259
JA
7384 }
7385
fcb323cc 7386 schedule();
d8f1b971 7387 finish_wait(&ctx->inflight_wait, &wait);
fcb323cc
JA
7388 }
7389}
7390
7391static int io_uring_flush(struct file *file, void *data)
7392{
7393 struct io_ring_ctx *ctx = file->private_data;
7394
7395 io_uring_cancel_files(ctx, data);
6ab23144
JA
7396
7397 /*
7398 * If the task is going away, cancel work it may have pending
7399 */
7400 if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
7401 io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
7402
fcb323cc
JA
7403 return 0;
7404}
7405
6c5c240e
RP
7406static void *io_uring_validate_mmap_request(struct file *file,
7407 loff_t pgoff, size_t sz)
2b188cc1 7408{
2b188cc1 7409 struct io_ring_ctx *ctx = file->private_data;
6c5c240e 7410 loff_t offset = pgoff << PAGE_SHIFT;
2b188cc1
JA
7411 struct page *page;
7412 void *ptr;
7413
7414 switch (offset) {
7415 case IORING_OFF_SQ_RING:
75b28aff
HV
7416 case IORING_OFF_CQ_RING:
7417 ptr = ctx->rings;
2b188cc1
JA
7418 break;
7419 case IORING_OFF_SQES:
7420 ptr = ctx->sq_sqes;
7421 break;
2b188cc1 7422 default:
6c5c240e 7423 return ERR_PTR(-EINVAL);
2b188cc1
JA
7424 }
7425
7426 page = virt_to_head_page(ptr);
a50b854e 7427 if (sz > page_size(page))
6c5c240e
RP
7428 return ERR_PTR(-EINVAL);
7429
7430 return ptr;
7431}
7432
7433#ifdef CONFIG_MMU
7434
7435static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7436{
7437 size_t sz = vma->vm_end - vma->vm_start;
7438 unsigned long pfn;
7439 void *ptr;
7440
7441 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
7442 if (IS_ERR(ptr))
7443 return PTR_ERR(ptr);
2b188cc1
JA
7444
7445 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
7446 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
7447}
7448
6c5c240e
RP
7449#else /* !CONFIG_MMU */
7450
7451static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7452{
7453 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
7454}
7455
7456static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
7457{
7458 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
7459}
7460
7461static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
7462 unsigned long addr, unsigned long len,
7463 unsigned long pgoff, unsigned long flags)
7464{
7465 void *ptr;
7466
7467 ptr = io_uring_validate_mmap_request(file, pgoff, len);
7468 if (IS_ERR(ptr))
7469 return PTR_ERR(ptr);
7470
7471 return (unsigned long) ptr;
7472}
7473
7474#endif /* !CONFIG_MMU */
7475
2b188cc1
JA
7476SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
7477 u32, min_complete, u32, flags, const sigset_t __user *, sig,
7478 size_t, sigsz)
7479{
7480 struct io_ring_ctx *ctx;
7481 long ret = -EBADF;
7482 int submitted = 0;
7483 struct fd f;
7484
b41e9852
JA
7485 if (current->task_works)
7486 task_work_run();
7487
6c271ce2 7488 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
2b188cc1
JA
7489 return -EINVAL;
7490
7491 f = fdget(fd);
7492 if (!f.file)
7493 return -EBADF;
7494
7495 ret = -EOPNOTSUPP;
7496 if (f.file->f_op != &io_uring_fops)
7497 goto out_fput;
7498
7499 ret = -ENXIO;
7500 ctx = f.file->private_data;
7501 if (!percpu_ref_tryget(&ctx->refs))
7502 goto out_fput;
7503
6c271ce2
JA
7504 /*
7505 * For SQ polling, the thread will do all submissions and completions.
7506 * Just return the requested submit count, and wake the thread if
7507 * we were asked to.
7508 */
b2a9eada 7509 ret = 0;
6c271ce2 7510 if (ctx->flags & IORING_SETUP_SQPOLL) {
c1edbf5f
JA
7511 if (!list_empty_careful(&ctx->cq_overflow_list))
7512 io_cqring_overflow_flush(ctx, false);
6c271ce2
JA
7513 if (flags & IORING_ENTER_SQ_WAKEUP)
7514 wake_up(&ctx->sqo_wait);
7515 submitted = to_submit;
b2a9eada 7516 } else if (to_submit) {
2b188cc1 7517 mutex_lock(&ctx->uring_lock);
0cdaf760 7518 submitted = io_submit_sqes(ctx, to_submit, f.file, fd);
2b188cc1 7519 mutex_unlock(&ctx->uring_lock);
7c504e65
PB
7520
7521 if (submitted != to_submit)
7522 goto out;
2b188cc1
JA
7523 }
7524 if (flags & IORING_ENTER_GETEVENTS) {
def596e9
JA
7525 unsigned nr_events = 0;
7526
2b188cc1
JA
7527 min_complete = min(min_complete, ctx->cq_entries);
7528
32b2244a
XW
7529 /*
7530 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
7531 * space applications don't need to do io completion events
7532 * polling again, they can rely on io_sq_thread to do polling
7533 * work, which can reduce cpu usage and uring_lock contention.
7534 */
7535 if (ctx->flags & IORING_SETUP_IOPOLL &&
7536 !(ctx->flags & IORING_SETUP_SQPOLL)) {
def596e9 7537 ret = io_iopoll_check(ctx, &nr_events, min_complete);
def596e9
JA
7538 } else {
7539 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
7540 }
2b188cc1
JA
7541 }
7542
7c504e65 7543out:
6805b32e 7544 percpu_ref_put(&ctx->refs);
2b188cc1
JA
7545out_fput:
7546 fdput(f);
7547 return submitted ? submitted : ret;
7548}
7549
bebdb65e 7550#ifdef CONFIG_PROC_FS
87ce955b
JA
7551static int io_uring_show_cred(int id, void *p, void *data)
7552{
7553 const struct cred *cred = p;
7554 struct seq_file *m = data;
7555 struct user_namespace *uns = seq_user_ns(m);
7556 struct group_info *gi;
7557 kernel_cap_t cap;
7558 unsigned __capi;
7559 int g;
7560
7561 seq_printf(m, "%5d\n", id);
7562 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
7563 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
7564 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
7565 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
7566 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
7567 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
7568 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
7569 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
7570 seq_puts(m, "\n\tGroups:\t");
7571 gi = cred->group_info;
7572 for (g = 0; g < gi->ngroups; g++) {
7573 seq_put_decimal_ull(m, g ? " " : "",
7574 from_kgid_munged(uns, gi->gid[g]));
7575 }
7576 seq_puts(m, "\n\tCapEff:\t");
7577 cap = cred->cap_effective;
7578 CAP_FOR_EACH_U32(__capi)
7579 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
7580 seq_putc(m, '\n');
7581 return 0;
7582}
7583
7584static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
7585{
7586 int i;
7587
7588 mutex_lock(&ctx->uring_lock);
7589 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
7590 for (i = 0; i < ctx->nr_user_files; i++) {
7591 struct fixed_file_table *table;
7592 struct file *f;
7593
7594 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7595 f = table->files[i & IORING_FILE_TABLE_MASK];
7596 if (f)
7597 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
7598 else
7599 seq_printf(m, "%5u: <none>\n", i);
7600 }
7601 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
7602 for (i = 0; i < ctx->nr_user_bufs; i++) {
7603 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
7604
7605 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
7606 (unsigned int) buf->len);
7607 }
7608 if (!idr_is_empty(&ctx->personality_idr)) {
7609 seq_printf(m, "Personalities:\n");
7610 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
7611 }
d7718a9d
JA
7612 seq_printf(m, "PollList:\n");
7613 spin_lock_irq(&ctx->completion_lock);
7614 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
7615 struct hlist_head *list = &ctx->cancel_hash[i];
7616 struct io_kiocb *req;
7617
7618 hlist_for_each_entry(req, list, hash_node)
7619 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
7620 req->task->task_works != NULL);
7621 }
7622 spin_unlock_irq(&ctx->completion_lock);
87ce955b
JA
7623 mutex_unlock(&ctx->uring_lock);
7624}
7625
7626static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
7627{
7628 struct io_ring_ctx *ctx = f->private_data;
7629
7630 if (percpu_ref_tryget(&ctx->refs)) {
7631 __io_uring_show_fdinfo(ctx, m);
7632 percpu_ref_put(&ctx->refs);
7633 }
7634}
bebdb65e 7635#endif
87ce955b 7636
2b188cc1
JA
7637static const struct file_operations io_uring_fops = {
7638 .release = io_uring_release,
fcb323cc 7639 .flush = io_uring_flush,
2b188cc1 7640 .mmap = io_uring_mmap,
6c5c240e
RP
7641#ifndef CONFIG_MMU
7642 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
7643 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
7644#endif
2b188cc1
JA
7645 .poll = io_uring_poll,
7646 .fasync = io_uring_fasync,
bebdb65e 7647#ifdef CONFIG_PROC_FS
87ce955b 7648 .show_fdinfo = io_uring_show_fdinfo,
bebdb65e 7649#endif
2b188cc1
JA
7650};
7651
7652static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
7653 struct io_uring_params *p)
7654{
75b28aff
HV
7655 struct io_rings *rings;
7656 size_t size, sq_array_offset;
2b188cc1 7657
75b28aff
HV
7658 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
7659 if (size == SIZE_MAX)
7660 return -EOVERFLOW;
7661
7662 rings = io_mem_alloc(size);
7663 if (!rings)
2b188cc1
JA
7664 return -ENOMEM;
7665
75b28aff
HV
7666 ctx->rings = rings;
7667 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
7668 rings->sq_ring_mask = p->sq_entries - 1;
7669 rings->cq_ring_mask = p->cq_entries - 1;
7670 rings->sq_ring_entries = p->sq_entries;
7671 rings->cq_ring_entries = p->cq_entries;
7672 ctx->sq_mask = rings->sq_ring_mask;
7673 ctx->cq_mask = rings->cq_ring_mask;
7674 ctx->sq_entries = rings->sq_ring_entries;
7675 ctx->cq_entries = rings->cq_ring_entries;
2b188cc1
JA
7676
7677 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
eb065d30
JA
7678 if (size == SIZE_MAX) {
7679 io_mem_free(ctx->rings);
7680 ctx->rings = NULL;
2b188cc1 7681 return -EOVERFLOW;
eb065d30 7682 }
2b188cc1
JA
7683
7684 ctx->sq_sqes = io_mem_alloc(size);
eb065d30
JA
7685 if (!ctx->sq_sqes) {
7686 io_mem_free(ctx->rings);
7687 ctx->rings = NULL;
2b188cc1 7688 return -ENOMEM;
eb065d30 7689 }
2b188cc1 7690
2b188cc1
JA
7691 return 0;
7692}
7693
7694/*
7695 * Allocate an anonymous fd, this is what constitutes the application
7696 * visible backing of an io_uring instance. The application mmaps this
7697 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
7698 * we have to tie this fd to a socket for file garbage collection purposes.
7699 */
7700static int io_uring_get_fd(struct io_ring_ctx *ctx)
7701{
7702 struct file *file;
7703 int ret;
7704
7705#if defined(CONFIG_UNIX)
7706 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
7707 &ctx->ring_sock);
7708 if (ret)
7709 return ret;
7710#endif
7711
7712 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
7713 if (ret < 0)
7714 goto err;
7715
7716 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
7717 O_RDWR | O_CLOEXEC);
7718 if (IS_ERR(file)) {
7719 put_unused_fd(ret);
7720 ret = PTR_ERR(file);
7721 goto err;
7722 }
7723
7724#if defined(CONFIG_UNIX)
7725 ctx->ring_sock->file = file;
7726#endif
7727 fd_install(ret, file);
7728 return ret;
7729err:
7730#if defined(CONFIG_UNIX)
7731 sock_release(ctx->ring_sock);
7732 ctx->ring_sock = NULL;
7733#endif
7734 return ret;
7735}
7736
7f13657d
XW
7737static int io_uring_create(unsigned entries, struct io_uring_params *p,
7738 struct io_uring_params __user *params)
2b188cc1
JA
7739{
7740 struct user_struct *user = NULL;
7741 struct io_ring_ctx *ctx;
7742 bool account_mem;
7743 int ret;
7744
8110c1a6 7745 if (!entries)
2b188cc1 7746 return -EINVAL;
8110c1a6
JA
7747 if (entries > IORING_MAX_ENTRIES) {
7748 if (!(p->flags & IORING_SETUP_CLAMP))
7749 return -EINVAL;
7750 entries = IORING_MAX_ENTRIES;
7751 }
2b188cc1
JA
7752
7753 /*
7754 * Use twice as many entries for the CQ ring. It's possible for the
7755 * application to drive a higher depth than the size of the SQ ring,
7756 * since the sqes are only used at submission time. This allows for
33a107f0
JA
7757 * some flexibility in overcommitting a bit. If the application has
7758 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
7759 * of CQ ring entries manually.
2b188cc1
JA
7760 */
7761 p->sq_entries = roundup_pow_of_two(entries);
33a107f0
JA
7762 if (p->flags & IORING_SETUP_CQSIZE) {
7763 /*
7764 * If IORING_SETUP_CQSIZE is set, we do the same roundup
7765 * to a power-of-two, if it isn't already. We do NOT impose
7766 * any cq vs sq ring sizing.
7767 */
8110c1a6 7768 if (p->cq_entries < p->sq_entries)
33a107f0 7769 return -EINVAL;
8110c1a6
JA
7770 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
7771 if (!(p->flags & IORING_SETUP_CLAMP))
7772 return -EINVAL;
7773 p->cq_entries = IORING_MAX_CQ_ENTRIES;
7774 }
33a107f0
JA
7775 p->cq_entries = roundup_pow_of_two(p->cq_entries);
7776 } else {
7777 p->cq_entries = 2 * p->sq_entries;
7778 }
2b188cc1
JA
7779
7780 user = get_uid(current_user());
7781 account_mem = !capable(CAP_IPC_LOCK);
7782
7783 if (account_mem) {
7784 ret = io_account_mem(user,
7785 ring_pages(p->sq_entries, p->cq_entries));
7786 if (ret) {
7787 free_uid(user);
7788 return ret;
7789 }
7790 }
7791
7792 ctx = io_ring_ctx_alloc(p);
7793 if (!ctx) {
7794 if (account_mem)
7795 io_unaccount_mem(user, ring_pages(p->sq_entries,
7796 p->cq_entries));
7797 free_uid(user);
7798 return -ENOMEM;
7799 }
7800 ctx->compat = in_compat_syscall();
7801 ctx->account_mem = account_mem;
7802 ctx->user = user;
0b8c0ec7 7803 ctx->creds = get_current_cred();
2b188cc1
JA
7804
7805 ret = io_allocate_scq_urings(ctx, p);
7806 if (ret)
7807 goto err;
7808
6c271ce2 7809 ret = io_sq_offload_start(ctx, p);
2b188cc1
JA
7810 if (ret)
7811 goto err;
7812
2b188cc1 7813 memset(&p->sq_off, 0, sizeof(p->sq_off));
75b28aff
HV
7814 p->sq_off.head = offsetof(struct io_rings, sq.head);
7815 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
7816 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
7817 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
7818 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
7819 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
7820 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
2b188cc1
JA
7821
7822 memset(&p->cq_off, 0, sizeof(p->cq_off));
75b28aff
HV
7823 p->cq_off.head = offsetof(struct io_rings, cq.head);
7824 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
7825 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
7826 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
7827 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
7828 p->cq_off.cqes = offsetof(struct io_rings, cqes);
0d9b5b3a 7829 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
ac90f249 7830
7f13657d
XW
7831 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
7832 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
7833 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL;
7834
7835 if (copy_to_user(params, p, sizeof(*p))) {
7836 ret = -EFAULT;
7837 goto err;
7838 }
044c1ab3
JA
7839 /*
7840 * Install ring fd as the very last thing, so we don't risk someone
7841 * having closed it before we finish setup
7842 */
7843 ret = io_uring_get_fd(ctx);
7844 if (ret < 0)
7845 goto err;
7846
c826bd7a 7847 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
2b188cc1
JA
7848 return ret;
7849err:
7850 io_ring_ctx_wait_and_kill(ctx);
7851 return ret;
7852}
7853
7854/*
7855 * Sets up an aio uring context, and returns the fd. Applications asks for a
7856 * ring size, we return the actual sq/cq ring sizes (among other things) in the
7857 * params structure passed in.
7858 */
7859static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
7860{
7861 struct io_uring_params p;
2b188cc1
JA
7862 int i;
7863
7864 if (copy_from_user(&p, params, sizeof(p)))
7865 return -EFAULT;
7866 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
7867 if (p.resv[i])
7868 return -EINVAL;
7869 }
7870
6c271ce2 7871 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8110c1a6 7872 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
24369c2e 7873 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
2b188cc1
JA
7874 return -EINVAL;
7875
7f13657d 7876 return io_uring_create(entries, &p, params);
2b188cc1
JA
7877}
7878
7879SYSCALL_DEFINE2(io_uring_setup, u32, entries,
7880 struct io_uring_params __user *, params)
7881{
7882 return io_uring_setup(entries, params);
7883}
7884
66f4af93
JA
7885static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
7886{
7887 struct io_uring_probe *p;
7888 size_t size;
7889 int i, ret;
7890
7891 size = struct_size(p, ops, nr_args);
7892 if (size == SIZE_MAX)
7893 return -EOVERFLOW;
7894 p = kzalloc(size, GFP_KERNEL);
7895 if (!p)
7896 return -ENOMEM;
7897
7898 ret = -EFAULT;
7899 if (copy_from_user(p, arg, size))
7900 goto out;
7901 ret = -EINVAL;
7902 if (memchr_inv(p, 0, size))
7903 goto out;
7904
7905 p->last_op = IORING_OP_LAST - 1;
7906 if (nr_args > IORING_OP_LAST)
7907 nr_args = IORING_OP_LAST;
7908
7909 for (i = 0; i < nr_args; i++) {
7910 p->ops[i].op = i;
7911 if (!io_op_defs[i].not_supported)
7912 p->ops[i].flags = IO_URING_OP_SUPPORTED;
7913 }
7914 p->ops_len = i;
7915
7916 ret = 0;
7917 if (copy_to_user(arg, p, size))
7918 ret = -EFAULT;
7919out:
7920 kfree(p);
7921 return ret;
7922}
7923
071698e1
JA
7924static int io_register_personality(struct io_ring_ctx *ctx)
7925{
7926 const struct cred *creds = get_current_cred();
7927 int id;
7928
7929 id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
7930 USHRT_MAX, GFP_KERNEL);
7931 if (id < 0)
7932 put_cred(creds);
7933 return id;
7934}
7935
7936static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
7937{
7938 const struct cred *old_creds;
7939
7940 old_creds = idr_remove(&ctx->personality_idr, id);
7941 if (old_creds) {
7942 put_cred(old_creds);
7943 return 0;
7944 }
7945
7946 return -EINVAL;
7947}
7948
7949static bool io_register_op_must_quiesce(int op)
7950{
7951 switch (op) {
7952 case IORING_UNREGISTER_FILES:
7953 case IORING_REGISTER_FILES_UPDATE:
7954 case IORING_REGISTER_PROBE:
7955 case IORING_REGISTER_PERSONALITY:
7956 case IORING_UNREGISTER_PERSONALITY:
7957 return false;
7958 default:
7959 return true;
7960 }
7961}
7962
edafccee
JA
7963static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
7964 void __user *arg, unsigned nr_args)
b19062a5
JA
7965 __releases(ctx->uring_lock)
7966 __acquires(ctx->uring_lock)
edafccee
JA
7967{
7968 int ret;
7969
35fa71a0
JA
7970 /*
7971 * We're inside the ring mutex, if the ref is already dying, then
7972 * someone else killed the ctx or is already going through
7973 * io_uring_register().
7974 */
7975 if (percpu_ref_is_dying(&ctx->refs))
7976 return -ENXIO;
7977
071698e1 7978 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 7979 percpu_ref_kill(&ctx->refs);
b19062a5 7980
05f3fb3c
JA
7981 /*
7982 * Drop uring mutex before waiting for references to exit. If
7983 * another thread is currently inside io_uring_enter() it might
7984 * need to grab the uring_lock to make progress. If we hold it
7985 * here across the drain wait, then we can deadlock. It's safe
7986 * to drop the mutex here, since no new references will come in
7987 * after we've killed the percpu ref.
7988 */
7989 mutex_unlock(&ctx->uring_lock);
0f158b4c 7990 ret = wait_for_completion_interruptible(&ctx->ref_comp);
05f3fb3c 7991 mutex_lock(&ctx->uring_lock);
c150368b
JA
7992 if (ret) {
7993 percpu_ref_resurrect(&ctx->refs);
7994 ret = -EINTR;
7995 goto out;
7996 }
05f3fb3c 7997 }
edafccee
JA
7998
7999 switch (opcode) {
8000 case IORING_REGISTER_BUFFERS:
8001 ret = io_sqe_buffer_register(ctx, arg, nr_args);
8002 break;
8003 case IORING_UNREGISTER_BUFFERS:
8004 ret = -EINVAL;
8005 if (arg || nr_args)
8006 break;
8007 ret = io_sqe_buffer_unregister(ctx);
8008 break;
6b06314c
JA
8009 case IORING_REGISTER_FILES:
8010 ret = io_sqe_files_register(ctx, arg, nr_args);
8011 break;
8012 case IORING_UNREGISTER_FILES:
8013 ret = -EINVAL;
8014 if (arg || nr_args)
8015 break;
8016 ret = io_sqe_files_unregister(ctx);
8017 break;
c3a31e60
JA
8018 case IORING_REGISTER_FILES_UPDATE:
8019 ret = io_sqe_files_update(ctx, arg, nr_args);
8020 break;
9b402849 8021 case IORING_REGISTER_EVENTFD:
f2842ab5 8022 case IORING_REGISTER_EVENTFD_ASYNC:
9b402849
JA
8023 ret = -EINVAL;
8024 if (nr_args != 1)
8025 break;
8026 ret = io_eventfd_register(ctx, arg);
f2842ab5
JA
8027 if (ret)
8028 break;
8029 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
8030 ctx->eventfd_async = 1;
8031 else
8032 ctx->eventfd_async = 0;
9b402849
JA
8033 break;
8034 case IORING_UNREGISTER_EVENTFD:
8035 ret = -EINVAL;
8036 if (arg || nr_args)
8037 break;
8038 ret = io_eventfd_unregister(ctx);
8039 break;
66f4af93
JA
8040 case IORING_REGISTER_PROBE:
8041 ret = -EINVAL;
8042 if (!arg || nr_args > 256)
8043 break;
8044 ret = io_probe(ctx, arg, nr_args);
8045 break;
071698e1
JA
8046 case IORING_REGISTER_PERSONALITY:
8047 ret = -EINVAL;
8048 if (arg || nr_args)
8049 break;
8050 ret = io_register_personality(ctx);
8051 break;
8052 case IORING_UNREGISTER_PERSONALITY:
8053 ret = -EINVAL;
8054 if (arg)
8055 break;
8056 ret = io_unregister_personality(ctx, nr_args);
8057 break;
edafccee
JA
8058 default:
8059 ret = -EINVAL;
8060 break;
8061 }
8062
071698e1 8063 if (io_register_op_must_quiesce(opcode)) {
05f3fb3c 8064 /* bring the ctx back to life */
05f3fb3c 8065 percpu_ref_reinit(&ctx->refs);
c150368b 8066out:
0f158b4c 8067 reinit_completion(&ctx->ref_comp);
05f3fb3c 8068 }
edafccee
JA
8069 return ret;
8070}
8071
8072SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
8073 void __user *, arg, unsigned int, nr_args)
8074{
8075 struct io_ring_ctx *ctx;
8076 long ret = -EBADF;
8077 struct fd f;
8078
8079 f = fdget(fd);
8080 if (!f.file)
8081 return -EBADF;
8082
8083 ret = -EOPNOTSUPP;
8084 if (f.file->f_op != &io_uring_fops)
8085 goto out_fput;
8086
8087 ctx = f.file->private_data;
8088
8089 mutex_lock(&ctx->uring_lock);
8090 ret = __io_uring_register(ctx, opcode, arg, nr_args);
8091 mutex_unlock(&ctx->uring_lock);
c826bd7a
DD
8092 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
8093 ctx->cq_ev_fd != NULL, ret);
edafccee
JA
8094out_fput:
8095 fdput(f);
8096 return ret;
8097}
8098
2b188cc1
JA
8099static int __init io_uring_init(void)
8100{
d7f62e82
SM
8101#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
8102 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
8103 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
8104} while (0)
8105
8106#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
8107 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
8108 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
8109 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
8110 BUILD_BUG_SQE_ELEM(1, __u8, flags);
8111 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
8112 BUILD_BUG_SQE_ELEM(4, __s32, fd);
8113 BUILD_BUG_SQE_ELEM(8, __u64, off);
8114 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
8115 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7d67af2c 8116 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
d7f62e82
SM
8117 BUILD_BUG_SQE_ELEM(24, __u32, len);
8118 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
8119 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
8120 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
8121 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
8122 BUILD_BUG_SQE_ELEM(28, __u16, poll_events);
8123 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
8124 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
8125 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
8126 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
8127 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
8128 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
8129 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
8130 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7d67af2c 8131 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
d7f62e82
SM
8132 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
8133 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
8134 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7d67af2c 8135 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
d7f62e82 8136
d3656344 8137 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
84557871 8138 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
2b188cc1
JA
8139 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
8140 return 0;
8141};
8142__initcall(io_uring_init);