]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/io_uring.c
x86/boot/compressed: Reorganize zero-size section asserts
[mirror_ubuntu-jammy-kernel.git] / fs / io_uring.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
40 * Copyright (c) 2018-2019 Christoph Hellwig
41 */
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <linux/compat.h>
47 #include <net/compat.h>
48 #include <linux/refcount.h>
49 #include <linux/uio.h>
50 #include <linux/bits.h>
51
52 #include <linux/sched/signal.h>
53 #include <linux/fs.h>
54 #include <linux/file.h>
55 #include <linux/fdtable.h>
56 #include <linux/mm.h>
57 #include <linux/mman.h>
58 #include <linux/percpu.h>
59 #include <linux/slab.h>
60 #include <linux/kthread.h>
61 #include <linux/blkdev.h>
62 #include <linux/bvec.h>
63 #include <linux/net.h>
64 #include <net/sock.h>
65 #include <net/af_unix.h>
66 #include <net/scm.h>
67 #include <linux/anon_inodes.h>
68 #include <linux/sched/mm.h>
69 #include <linux/uaccess.h>
70 #include <linux/nospec.h>
71 #include <linux/sizes.h>
72 #include <linux/hugetlb.h>
73 #include <linux/highmem.h>
74 #include <linux/namei.h>
75 #include <linux/fsnotify.h>
76 #include <linux/fadvise.h>
77 #include <linux/eventpoll.h>
78 #include <linux/fs_struct.h>
79 #include <linux/splice.h>
80 #include <linux/task_work.h>
81 #include <linux/pagemap.h>
82
83 #define CREATE_TRACE_POINTS
84 #include <trace/events/io_uring.h>
85
86 #include <uapi/linux/io_uring.h>
87
88 #include "internal.h"
89 #include "io-wq.h"
90
91 #define IORING_MAX_ENTRIES 32768
92 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
93
94 /*
95 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
96 */
97 #define IORING_FILE_TABLE_SHIFT 9
98 #define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
99 #define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
100 #define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
101
102 struct io_uring {
103 u32 head ____cacheline_aligned_in_smp;
104 u32 tail ____cacheline_aligned_in_smp;
105 };
106
107 /*
108 * This data is shared with the application through the mmap at offsets
109 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
110 *
111 * The offsets to the member fields are published through struct
112 * io_sqring_offsets when calling io_uring_setup.
113 */
114 struct io_rings {
115 /*
116 * Head and tail offsets into the ring; the offsets need to be
117 * masked to get valid indices.
118 *
119 * The kernel controls head of the sq ring and the tail of the cq ring,
120 * and the application controls tail of the sq ring and the head of the
121 * cq ring.
122 */
123 struct io_uring sq, cq;
124 /*
125 * Bitmasks to apply to head and tail offsets (constant, equals
126 * ring_entries - 1)
127 */
128 u32 sq_ring_mask, cq_ring_mask;
129 /* Ring sizes (constant, power of 2) */
130 u32 sq_ring_entries, cq_ring_entries;
131 /*
132 * Number of invalid entries dropped by the kernel due to
133 * invalid index stored in array
134 *
135 * Written by the kernel, shouldn't be modified by the
136 * application (i.e. get number of "new events" by comparing to
137 * cached value).
138 *
139 * After a new SQ head value was read by the application this
140 * counter includes all submissions that were dropped reaching
141 * the new SQ head (and possibly more).
142 */
143 u32 sq_dropped;
144 /*
145 * Runtime SQ flags
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application.
149 *
150 * The application needs a full memory barrier before checking
151 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
152 */
153 u32 sq_flags;
154 /*
155 * Runtime CQ flags
156 *
157 * Written by the application, shouldn't be modified by the
158 * kernel.
159 */
160 u32 cq_flags;
161 /*
162 * Number of completion events lost because the queue was full;
163 * this should be avoided by the application by making sure
164 * there are not more requests pending than there is space in
165 * the completion queue.
166 *
167 * Written by the kernel, shouldn't be modified by the
168 * application (i.e. get number of "new events" by comparing to
169 * cached value).
170 *
171 * As completion events come in out of order this counter is not
172 * ordered with any other data.
173 */
174 u32 cq_overflow;
175 /*
176 * Ring buffer of completion events.
177 *
178 * The kernel writes completion events fresh every time they are
179 * produced, so the application is allowed to modify pending
180 * entries.
181 */
182 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
183 };
184
185 struct io_mapped_ubuf {
186 u64 ubuf;
187 size_t len;
188 struct bio_vec *bvec;
189 unsigned int nr_bvecs;
190 };
191
192 struct fixed_file_table {
193 struct file **files;
194 };
195
196 struct fixed_file_ref_node {
197 struct percpu_ref refs;
198 struct list_head node;
199 struct list_head file_list;
200 struct fixed_file_data *file_data;
201 struct llist_node llist;
202 };
203
204 struct fixed_file_data {
205 struct fixed_file_table *table;
206 struct io_ring_ctx *ctx;
207
208 struct percpu_ref *cur_refs;
209 struct percpu_ref refs;
210 struct completion done;
211 struct list_head ref_list;
212 spinlock_t lock;
213 };
214
215 struct io_buffer {
216 struct list_head list;
217 __u64 addr;
218 __s32 len;
219 __u16 bid;
220 };
221
222 struct io_ring_ctx {
223 struct {
224 struct percpu_ref refs;
225 } ____cacheline_aligned_in_smp;
226
227 struct {
228 unsigned int flags;
229 unsigned int compat: 1;
230 unsigned int limit_mem: 1;
231 unsigned int cq_overflow_flushed: 1;
232 unsigned int drain_next: 1;
233 unsigned int eventfd_async: 1;
234
235 /*
236 * Ring buffer of indices into array of io_uring_sqe, which is
237 * mmapped by the application using the IORING_OFF_SQES offset.
238 *
239 * This indirection could e.g. be used to assign fixed
240 * io_uring_sqe entries to operations and only submit them to
241 * the queue when needed.
242 *
243 * The kernel modifies neither the indices array nor the entries
244 * array.
245 */
246 u32 *sq_array;
247 unsigned cached_sq_head;
248 unsigned sq_entries;
249 unsigned sq_mask;
250 unsigned sq_thread_idle;
251 unsigned cached_sq_dropped;
252 atomic_t cached_cq_overflow;
253 unsigned long sq_check_overflow;
254
255 struct list_head defer_list;
256 struct list_head timeout_list;
257 struct list_head cq_overflow_list;
258
259 wait_queue_head_t inflight_wait;
260 struct io_uring_sqe *sq_sqes;
261 } ____cacheline_aligned_in_smp;
262
263 struct io_rings *rings;
264
265 /* IO offload */
266 struct io_wq *io_wq;
267 struct task_struct *sqo_thread; /* if using sq thread polling */
268 struct mm_struct *sqo_mm;
269 wait_queue_head_t sqo_wait;
270
271 /*
272 * If used, fixed file set. Writers must ensure that ->refs is dead,
273 * readers must ensure that ->refs is alive as long as the file* is
274 * used. Only updated through io_uring_register(2).
275 */
276 struct fixed_file_data *file_data;
277 unsigned nr_user_files;
278 int ring_fd;
279 struct file *ring_file;
280
281 /* if used, fixed mapped user buffers */
282 unsigned nr_user_bufs;
283 struct io_mapped_ubuf *user_bufs;
284
285 struct user_struct *user;
286
287 const struct cred *creds;
288
289 struct completion ref_comp;
290 struct completion sq_thread_comp;
291
292 /* if all else fails... */
293 struct io_kiocb *fallback_req;
294
295 #if defined(CONFIG_UNIX)
296 struct socket *ring_sock;
297 #endif
298
299 struct idr io_buffer_idr;
300
301 struct idr personality_idr;
302
303 struct {
304 unsigned cached_cq_tail;
305 unsigned cq_entries;
306 unsigned cq_mask;
307 atomic_t cq_timeouts;
308 unsigned long cq_check_overflow;
309 struct wait_queue_head cq_wait;
310 struct fasync_struct *cq_fasync;
311 struct eventfd_ctx *cq_ev_fd;
312 } ____cacheline_aligned_in_smp;
313
314 struct {
315 struct mutex uring_lock;
316 wait_queue_head_t wait;
317 } ____cacheline_aligned_in_smp;
318
319 struct {
320 spinlock_t completion_lock;
321
322 /*
323 * ->iopoll_list is protected by the ctx->uring_lock for
324 * io_uring instances that don't use IORING_SETUP_SQPOLL.
325 * For SQPOLL, only the single threaded io_sq_thread() will
326 * manipulate the list, hence no extra locking is needed there.
327 */
328 struct list_head iopoll_list;
329 struct hlist_head *cancel_hash;
330 unsigned cancel_hash_bits;
331 bool poll_multi_file;
332
333 spinlock_t inflight_lock;
334 struct list_head inflight_list;
335 } ____cacheline_aligned_in_smp;
336
337 struct delayed_work file_put_work;
338 struct llist_head file_put_llist;
339
340 struct work_struct exit_work;
341 };
342
343 /*
344 * First field must be the file pointer in all the
345 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
346 */
347 struct io_poll_iocb {
348 struct file *file;
349 union {
350 struct wait_queue_head *head;
351 u64 addr;
352 };
353 __poll_t events;
354 bool done;
355 bool canceled;
356 struct wait_queue_entry wait;
357 };
358
359 struct io_close {
360 struct file *file;
361 struct file *put_file;
362 int fd;
363 };
364
365 struct io_timeout_data {
366 struct io_kiocb *req;
367 struct hrtimer timer;
368 struct timespec64 ts;
369 enum hrtimer_mode mode;
370 };
371
372 struct io_accept {
373 struct file *file;
374 struct sockaddr __user *addr;
375 int __user *addr_len;
376 int flags;
377 unsigned long nofile;
378 };
379
380 struct io_sync {
381 struct file *file;
382 loff_t len;
383 loff_t off;
384 int flags;
385 int mode;
386 };
387
388 struct io_cancel {
389 struct file *file;
390 u64 addr;
391 };
392
393 struct io_timeout {
394 struct file *file;
395 u64 addr;
396 int flags;
397 u32 off;
398 u32 target_seq;
399 struct list_head list;
400 };
401
402 struct io_rw {
403 /* NOTE: kiocb has the file as the first member, so don't do it here */
404 struct kiocb kiocb;
405 u64 addr;
406 u64 len;
407 };
408
409 struct io_connect {
410 struct file *file;
411 struct sockaddr __user *addr;
412 int addr_len;
413 };
414
415 struct io_sr_msg {
416 struct file *file;
417 union {
418 struct user_msghdr __user *umsg;
419 void __user *buf;
420 };
421 int msg_flags;
422 int bgid;
423 size_t len;
424 struct io_buffer *kbuf;
425 };
426
427 struct io_open {
428 struct file *file;
429 int dfd;
430 struct filename *filename;
431 struct open_how how;
432 unsigned long nofile;
433 };
434
435 struct io_files_update {
436 struct file *file;
437 u64 arg;
438 u32 nr_args;
439 u32 offset;
440 };
441
442 struct io_fadvise {
443 struct file *file;
444 u64 offset;
445 u32 len;
446 u32 advice;
447 };
448
449 struct io_madvise {
450 struct file *file;
451 u64 addr;
452 u32 len;
453 u32 advice;
454 };
455
456 struct io_epoll {
457 struct file *file;
458 int epfd;
459 int op;
460 int fd;
461 struct epoll_event event;
462 };
463
464 struct io_splice {
465 struct file *file_out;
466 struct file *file_in;
467 loff_t off_out;
468 loff_t off_in;
469 u64 len;
470 unsigned int flags;
471 };
472
473 struct io_provide_buf {
474 struct file *file;
475 __u64 addr;
476 __s32 len;
477 __u32 bgid;
478 __u16 nbufs;
479 __u16 bid;
480 };
481
482 struct io_statx {
483 struct file *file;
484 int dfd;
485 unsigned int mask;
486 unsigned int flags;
487 const char __user *filename;
488 struct statx __user *buffer;
489 };
490
491 struct io_completion {
492 struct file *file;
493 struct list_head list;
494 int cflags;
495 };
496
497 struct io_async_connect {
498 struct sockaddr_storage address;
499 };
500
501 struct io_async_msghdr {
502 struct iovec fast_iov[UIO_FASTIOV];
503 struct iovec *iov;
504 struct sockaddr __user *uaddr;
505 struct msghdr msg;
506 struct sockaddr_storage addr;
507 };
508
509 struct io_async_rw {
510 struct iovec fast_iov[UIO_FASTIOV];
511 struct iovec *iov;
512 ssize_t nr_segs;
513 ssize_t size;
514 struct wait_page_queue wpq;
515 };
516
517 struct io_async_ctx {
518 union {
519 struct io_async_rw rw;
520 struct io_async_msghdr msg;
521 struct io_async_connect connect;
522 struct io_timeout_data timeout;
523 };
524 };
525
526 enum {
527 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
528 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
529 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
530 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
531 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
532 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
533
534 REQ_F_LINK_HEAD_BIT,
535 REQ_F_FAIL_LINK_BIT,
536 REQ_F_INFLIGHT_BIT,
537 REQ_F_CUR_POS_BIT,
538 REQ_F_NOWAIT_BIT,
539 REQ_F_LINK_TIMEOUT_BIT,
540 REQ_F_ISREG_BIT,
541 REQ_F_COMP_LOCKED_BIT,
542 REQ_F_NEED_CLEANUP_BIT,
543 REQ_F_OVERFLOW_BIT,
544 REQ_F_POLLED_BIT,
545 REQ_F_BUFFER_SELECTED_BIT,
546 REQ_F_NO_FILE_TABLE_BIT,
547 REQ_F_WORK_INITIALIZED_BIT,
548 REQ_F_TASK_PINNED_BIT,
549
550 /* not a real bit, just to check we're not overflowing the space */
551 __REQ_F_LAST_BIT,
552 };
553
554 enum {
555 /* ctx owns file */
556 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
557 /* drain existing IO first */
558 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
559 /* linked sqes */
560 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
561 /* doesn't sever on completion < 0 */
562 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
563 /* IOSQE_ASYNC */
564 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
565 /* IOSQE_BUFFER_SELECT */
566 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
567
568 /* head of a link */
569 REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
570 /* fail rest of links */
571 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
572 /* on inflight list */
573 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
574 /* read/write uses file position */
575 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
576 /* must not punt to workers */
577 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
578 /* has linked timeout */
579 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
580 /* regular file */
581 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
582 /* completion under lock */
583 REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
584 /* needs cleanup */
585 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
586 /* in overflow list */
587 REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT),
588 /* already went through poll handler */
589 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
590 /* buffer already selected */
591 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
592 /* doesn't need file table for this request */
593 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
594 /* io_wq_work is initialized */
595 REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
596 /* req->task is refcounted */
597 REQ_F_TASK_PINNED = BIT(REQ_F_TASK_PINNED_BIT),
598 };
599
600 struct async_poll {
601 struct io_poll_iocb poll;
602 struct io_poll_iocb *double_poll;
603 };
604
605 /*
606 * NOTE! Each of the iocb union members has the file pointer
607 * as the first entry in their struct definition. So you can
608 * access the file pointer through any of the sub-structs,
609 * or directly as just 'ki_filp' in this struct.
610 */
611 struct io_kiocb {
612 union {
613 struct file *file;
614 struct io_rw rw;
615 struct io_poll_iocb poll;
616 struct io_accept accept;
617 struct io_sync sync;
618 struct io_cancel cancel;
619 struct io_timeout timeout;
620 struct io_connect connect;
621 struct io_sr_msg sr_msg;
622 struct io_open open;
623 struct io_close close;
624 struct io_files_update files_update;
625 struct io_fadvise fadvise;
626 struct io_madvise madvise;
627 struct io_epoll epoll;
628 struct io_splice splice;
629 struct io_provide_buf pbuf;
630 struct io_statx statx;
631 /* use only after cleaning per-op data, see io_clean_op() */
632 struct io_completion compl;
633 };
634
635 struct io_async_ctx *io;
636 u8 opcode;
637 /* polled IO has completed */
638 u8 iopoll_completed;
639
640 u16 buf_index;
641 u32 result;
642
643 struct io_ring_ctx *ctx;
644 unsigned int flags;
645 refcount_t refs;
646 struct task_struct *task;
647 u64 user_data;
648
649 struct list_head link_list;
650
651 /*
652 * 1. used with ctx->iopoll_list with reads/writes
653 * 2. to track reqs with ->files (see io_op_def::file_table)
654 */
655 struct list_head inflight_entry;
656
657 struct percpu_ref *fixed_file_refs;
658 struct callback_head task_work;
659 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
660 struct hlist_node hash_node;
661 struct async_poll *apoll;
662 struct io_wq_work work;
663 };
664
665 struct io_defer_entry {
666 struct list_head list;
667 struct io_kiocb *req;
668 u32 seq;
669 };
670
671 #define IO_IOPOLL_BATCH 8
672
673 struct io_comp_state {
674 unsigned int nr;
675 struct list_head list;
676 struct io_ring_ctx *ctx;
677 };
678
679 struct io_submit_state {
680 struct blk_plug plug;
681
682 /*
683 * io_kiocb alloc cache
684 */
685 void *reqs[IO_IOPOLL_BATCH];
686 unsigned int free_reqs;
687
688 /*
689 * Batch completion logic
690 */
691 struct io_comp_state comp;
692
693 /*
694 * File reference cache
695 */
696 struct file *file;
697 unsigned int fd;
698 unsigned int has_refs;
699 unsigned int ios_left;
700 };
701
702 struct io_op_def {
703 /* needs req->io allocated for deferral/async */
704 unsigned async_ctx : 1;
705 /* needs current->mm setup, does mm access */
706 unsigned needs_mm : 1;
707 /* needs req->file assigned */
708 unsigned needs_file : 1;
709 /* don't fail if file grab fails */
710 unsigned needs_file_no_error : 1;
711 /* hash wq insertion if file is a regular file */
712 unsigned hash_reg_file : 1;
713 /* unbound wq insertion if file is a non-regular file */
714 unsigned unbound_nonreg_file : 1;
715 /* opcode is not supported by this kernel */
716 unsigned not_supported : 1;
717 /* needs file table */
718 unsigned file_table : 1;
719 /* needs ->fs */
720 unsigned needs_fs : 1;
721 /* set if opcode supports polled "wait" */
722 unsigned pollin : 1;
723 unsigned pollout : 1;
724 /* op supports buffer selection */
725 unsigned buffer_select : 1;
726 unsigned needs_fsize : 1;
727 };
728
729 static const struct io_op_def io_op_defs[] = {
730 [IORING_OP_NOP] = {},
731 [IORING_OP_READV] = {
732 .async_ctx = 1,
733 .needs_mm = 1,
734 .needs_file = 1,
735 .unbound_nonreg_file = 1,
736 .pollin = 1,
737 .buffer_select = 1,
738 },
739 [IORING_OP_WRITEV] = {
740 .async_ctx = 1,
741 .needs_mm = 1,
742 .needs_file = 1,
743 .hash_reg_file = 1,
744 .unbound_nonreg_file = 1,
745 .pollout = 1,
746 .needs_fsize = 1,
747 },
748 [IORING_OP_FSYNC] = {
749 .needs_file = 1,
750 },
751 [IORING_OP_READ_FIXED] = {
752 .needs_file = 1,
753 .unbound_nonreg_file = 1,
754 .pollin = 1,
755 },
756 [IORING_OP_WRITE_FIXED] = {
757 .needs_file = 1,
758 .hash_reg_file = 1,
759 .unbound_nonreg_file = 1,
760 .pollout = 1,
761 .needs_fsize = 1,
762 },
763 [IORING_OP_POLL_ADD] = {
764 .needs_file = 1,
765 .unbound_nonreg_file = 1,
766 },
767 [IORING_OP_POLL_REMOVE] = {},
768 [IORING_OP_SYNC_FILE_RANGE] = {
769 .needs_file = 1,
770 },
771 [IORING_OP_SENDMSG] = {
772 .async_ctx = 1,
773 .needs_mm = 1,
774 .needs_file = 1,
775 .unbound_nonreg_file = 1,
776 .needs_fs = 1,
777 .pollout = 1,
778 },
779 [IORING_OP_RECVMSG] = {
780 .async_ctx = 1,
781 .needs_mm = 1,
782 .needs_file = 1,
783 .unbound_nonreg_file = 1,
784 .needs_fs = 1,
785 .pollin = 1,
786 .buffer_select = 1,
787 },
788 [IORING_OP_TIMEOUT] = {
789 .async_ctx = 1,
790 .needs_mm = 1,
791 },
792 [IORING_OP_TIMEOUT_REMOVE] = {},
793 [IORING_OP_ACCEPT] = {
794 .needs_mm = 1,
795 .needs_file = 1,
796 .unbound_nonreg_file = 1,
797 .file_table = 1,
798 .pollin = 1,
799 },
800 [IORING_OP_ASYNC_CANCEL] = {},
801 [IORING_OP_LINK_TIMEOUT] = {
802 .async_ctx = 1,
803 .needs_mm = 1,
804 },
805 [IORING_OP_CONNECT] = {
806 .async_ctx = 1,
807 .needs_mm = 1,
808 .needs_file = 1,
809 .unbound_nonreg_file = 1,
810 .pollout = 1,
811 },
812 [IORING_OP_FALLOCATE] = {
813 .needs_file = 1,
814 .needs_fsize = 1,
815 },
816 [IORING_OP_OPENAT] = {
817 .file_table = 1,
818 .needs_fs = 1,
819 },
820 [IORING_OP_CLOSE] = {
821 .needs_file = 1,
822 .needs_file_no_error = 1,
823 .file_table = 1,
824 },
825 [IORING_OP_FILES_UPDATE] = {
826 .needs_mm = 1,
827 .file_table = 1,
828 },
829 [IORING_OP_STATX] = {
830 .needs_mm = 1,
831 .needs_fs = 1,
832 .file_table = 1,
833 },
834 [IORING_OP_READ] = {
835 .needs_mm = 1,
836 .needs_file = 1,
837 .unbound_nonreg_file = 1,
838 .pollin = 1,
839 .buffer_select = 1,
840 },
841 [IORING_OP_WRITE] = {
842 .needs_mm = 1,
843 .needs_file = 1,
844 .unbound_nonreg_file = 1,
845 .pollout = 1,
846 .needs_fsize = 1,
847 },
848 [IORING_OP_FADVISE] = {
849 .needs_file = 1,
850 },
851 [IORING_OP_MADVISE] = {
852 .needs_mm = 1,
853 },
854 [IORING_OP_SEND] = {
855 .needs_mm = 1,
856 .needs_file = 1,
857 .unbound_nonreg_file = 1,
858 .pollout = 1,
859 },
860 [IORING_OP_RECV] = {
861 .needs_mm = 1,
862 .needs_file = 1,
863 .unbound_nonreg_file = 1,
864 .pollin = 1,
865 .buffer_select = 1,
866 },
867 [IORING_OP_OPENAT2] = {
868 .file_table = 1,
869 .needs_fs = 1,
870 },
871 [IORING_OP_EPOLL_CTL] = {
872 .unbound_nonreg_file = 1,
873 .file_table = 1,
874 },
875 [IORING_OP_SPLICE] = {
876 .needs_file = 1,
877 .hash_reg_file = 1,
878 .unbound_nonreg_file = 1,
879 },
880 [IORING_OP_PROVIDE_BUFFERS] = {},
881 [IORING_OP_REMOVE_BUFFERS] = {},
882 [IORING_OP_TEE] = {
883 .needs_file = 1,
884 .hash_reg_file = 1,
885 .unbound_nonreg_file = 1,
886 },
887 };
888
889 enum io_mem_account {
890 ACCT_LOCKED,
891 ACCT_PINNED,
892 };
893
894 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
895 struct io_comp_state *cs);
896 static void io_cqring_fill_event(struct io_kiocb *req, long res);
897 static void io_put_req(struct io_kiocb *req);
898 static void io_double_put_req(struct io_kiocb *req);
899 static void __io_double_put_req(struct io_kiocb *req);
900 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
901 static void io_queue_linked_timeout(struct io_kiocb *req);
902 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
903 struct io_uring_files_update *ip,
904 unsigned nr_args);
905 static int io_prep_work_files(struct io_kiocb *req);
906 static void __io_clean_op(struct io_kiocb *req);
907 static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
908 int fd, struct file **out_file, bool fixed);
909 static void __io_queue_sqe(struct io_kiocb *req,
910 const struct io_uring_sqe *sqe,
911 struct io_comp_state *cs);
912 static void io_file_put_work(struct work_struct *work);
913
914 static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
915 struct iovec **iovec, struct iov_iter *iter,
916 bool needs_lock);
917 static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
918 struct iovec *iovec, struct iovec *fast_iov,
919 struct iov_iter *iter);
920
921 static struct kmem_cache *req_cachep;
922
923 static const struct file_operations io_uring_fops;
924
925 struct sock *io_uring_get_socket(struct file *file)
926 {
927 #if defined(CONFIG_UNIX)
928 if (file->f_op == &io_uring_fops) {
929 struct io_ring_ctx *ctx = file->private_data;
930
931 return ctx->ring_sock->sk;
932 }
933 #endif
934 return NULL;
935 }
936 EXPORT_SYMBOL(io_uring_get_socket);
937
938 static void io_get_req_task(struct io_kiocb *req)
939 {
940 if (req->flags & REQ_F_TASK_PINNED)
941 return;
942 get_task_struct(req->task);
943 req->flags |= REQ_F_TASK_PINNED;
944 }
945
946 static inline void io_clean_op(struct io_kiocb *req)
947 {
948 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
949 __io_clean_op(req);
950 }
951
952 /* not idempotent -- it doesn't clear REQ_F_TASK_PINNED */
953 static void __io_put_req_task(struct io_kiocb *req)
954 {
955 if (req->flags & REQ_F_TASK_PINNED)
956 put_task_struct(req->task);
957 }
958
959 static void io_sq_thread_drop_mm(void)
960 {
961 struct mm_struct *mm = current->mm;
962
963 if (mm) {
964 kthread_unuse_mm(mm);
965 mmput(mm);
966 }
967 }
968
969 static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
970 {
971 if (!current->mm) {
972 if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) ||
973 !mmget_not_zero(ctx->sqo_mm)))
974 return -EFAULT;
975 kthread_use_mm(ctx->sqo_mm);
976 }
977
978 return 0;
979 }
980
981 static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
982 struct io_kiocb *req)
983 {
984 if (!io_op_defs[req->opcode].needs_mm)
985 return 0;
986 return __io_sq_thread_acquire_mm(ctx);
987 }
988
989 static inline void req_set_fail_links(struct io_kiocb *req)
990 {
991 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
992 req->flags |= REQ_F_FAIL_LINK;
993 }
994
995 /*
996 * Note: must call io_req_init_async() for the first time you
997 * touch any members of io_wq_work.
998 */
999 static inline void io_req_init_async(struct io_kiocb *req)
1000 {
1001 if (req->flags & REQ_F_WORK_INITIALIZED)
1002 return;
1003
1004 memset(&req->work, 0, sizeof(req->work));
1005 req->flags |= REQ_F_WORK_INITIALIZED;
1006 }
1007
1008 static inline bool io_async_submit(struct io_ring_ctx *ctx)
1009 {
1010 return ctx->flags & IORING_SETUP_SQPOLL;
1011 }
1012
1013 static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1014 {
1015 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1016
1017 complete(&ctx->ref_comp);
1018 }
1019
1020 static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1021 {
1022 return !req->timeout.off;
1023 }
1024
1025 static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1026 {
1027 struct io_ring_ctx *ctx;
1028 int hash_bits;
1029
1030 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1031 if (!ctx)
1032 return NULL;
1033
1034 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
1035 if (!ctx->fallback_req)
1036 goto err;
1037
1038 /*
1039 * Use 5 bits less than the max cq entries, that should give us around
1040 * 32 entries per hash list if totally full and uniformly spread.
1041 */
1042 hash_bits = ilog2(p->cq_entries);
1043 hash_bits -= 5;
1044 if (hash_bits <= 0)
1045 hash_bits = 1;
1046 ctx->cancel_hash_bits = hash_bits;
1047 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1048 GFP_KERNEL);
1049 if (!ctx->cancel_hash)
1050 goto err;
1051 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1052
1053 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
1054 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1055 goto err;
1056
1057 ctx->flags = p->flags;
1058 init_waitqueue_head(&ctx->sqo_wait);
1059 init_waitqueue_head(&ctx->cq_wait);
1060 INIT_LIST_HEAD(&ctx->cq_overflow_list);
1061 init_completion(&ctx->ref_comp);
1062 init_completion(&ctx->sq_thread_comp);
1063 idr_init(&ctx->io_buffer_idr);
1064 idr_init(&ctx->personality_idr);
1065 mutex_init(&ctx->uring_lock);
1066 init_waitqueue_head(&ctx->wait);
1067 spin_lock_init(&ctx->completion_lock);
1068 INIT_LIST_HEAD(&ctx->iopoll_list);
1069 INIT_LIST_HEAD(&ctx->defer_list);
1070 INIT_LIST_HEAD(&ctx->timeout_list);
1071 init_waitqueue_head(&ctx->inflight_wait);
1072 spin_lock_init(&ctx->inflight_lock);
1073 INIT_LIST_HEAD(&ctx->inflight_list);
1074 INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
1075 init_llist_head(&ctx->file_put_llist);
1076 return ctx;
1077 err:
1078 if (ctx->fallback_req)
1079 kmem_cache_free(req_cachep, ctx->fallback_req);
1080 kfree(ctx->cancel_hash);
1081 kfree(ctx);
1082 return NULL;
1083 }
1084
1085 static bool req_need_defer(struct io_kiocb *req, u32 seq)
1086 {
1087 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1088 struct io_ring_ctx *ctx = req->ctx;
1089
1090 return seq != ctx->cached_cq_tail
1091 + atomic_read(&ctx->cached_cq_overflow);
1092 }
1093
1094 return false;
1095 }
1096
1097 static void __io_commit_cqring(struct io_ring_ctx *ctx)
1098 {
1099 struct io_rings *rings = ctx->rings;
1100
1101 /* order cqe stores with ring update */
1102 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
1103
1104 if (wq_has_sleeper(&ctx->cq_wait)) {
1105 wake_up_interruptible(&ctx->cq_wait);
1106 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1107 }
1108 }
1109
1110 static void io_req_clean_work(struct io_kiocb *req)
1111 {
1112 if (!(req->flags & REQ_F_WORK_INITIALIZED))
1113 return;
1114
1115 if (req->work.mm) {
1116 mmdrop(req->work.mm);
1117 req->work.mm = NULL;
1118 }
1119 if (req->work.creds) {
1120 put_cred(req->work.creds);
1121 req->work.creds = NULL;
1122 }
1123 if (req->work.fs) {
1124 struct fs_struct *fs = req->work.fs;
1125
1126 spin_lock(&req->work.fs->lock);
1127 if (--fs->users)
1128 fs = NULL;
1129 spin_unlock(&req->work.fs->lock);
1130 if (fs)
1131 free_fs_struct(fs);
1132 req->work.fs = NULL;
1133 }
1134 req->flags &= ~REQ_F_WORK_INITIALIZED;
1135 }
1136
1137 static void io_prep_async_work(struct io_kiocb *req)
1138 {
1139 const struct io_op_def *def = &io_op_defs[req->opcode];
1140
1141 io_req_init_async(req);
1142
1143 if (req->flags & REQ_F_ISREG) {
1144 if (def->hash_reg_file)
1145 io_wq_hash_work(&req->work, file_inode(req->file));
1146 } else {
1147 if (def->unbound_nonreg_file)
1148 req->work.flags |= IO_WQ_WORK_UNBOUND;
1149 }
1150 if (!req->work.mm && def->needs_mm) {
1151 mmgrab(current->mm);
1152 req->work.mm = current->mm;
1153 }
1154 if (!req->work.creds)
1155 req->work.creds = get_current_cred();
1156 if (!req->work.fs && def->needs_fs) {
1157 spin_lock(&current->fs->lock);
1158 if (!current->fs->in_exec) {
1159 req->work.fs = current->fs;
1160 req->work.fs->users++;
1161 } else {
1162 req->work.flags |= IO_WQ_WORK_CANCEL;
1163 }
1164 spin_unlock(&current->fs->lock);
1165 }
1166 if (def->needs_fsize)
1167 req->work.fsize = rlimit(RLIMIT_FSIZE);
1168 else
1169 req->work.fsize = RLIM_INFINITY;
1170 }
1171
1172 static void io_prep_async_link(struct io_kiocb *req)
1173 {
1174 struct io_kiocb *cur;
1175
1176 io_prep_async_work(req);
1177 if (req->flags & REQ_F_LINK_HEAD)
1178 list_for_each_entry(cur, &req->link_list, link_list)
1179 io_prep_async_work(cur);
1180 }
1181
1182 static void __io_queue_async_work(struct io_kiocb *req)
1183 {
1184 struct io_ring_ctx *ctx = req->ctx;
1185 struct io_kiocb *link = io_prep_linked_timeout(req);
1186
1187 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1188 &req->work, req->flags);
1189 io_wq_enqueue(ctx->io_wq, &req->work);
1190
1191 if (link)
1192 io_queue_linked_timeout(link);
1193 }
1194
1195 static void io_queue_async_work(struct io_kiocb *req)
1196 {
1197 /* init ->work of the whole link before punting */
1198 io_prep_async_link(req);
1199 __io_queue_async_work(req);
1200 }
1201
1202 static void io_kill_timeout(struct io_kiocb *req)
1203 {
1204 int ret;
1205
1206 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
1207 if (ret != -1) {
1208 atomic_set(&req->ctx->cq_timeouts,
1209 atomic_read(&req->ctx->cq_timeouts) + 1);
1210 list_del_init(&req->timeout.list);
1211 req->flags |= REQ_F_COMP_LOCKED;
1212 io_cqring_fill_event(req, 0);
1213 io_put_req(req);
1214 }
1215 }
1216
1217 static void io_kill_timeouts(struct io_ring_ctx *ctx)
1218 {
1219 struct io_kiocb *req, *tmp;
1220
1221 spin_lock_irq(&ctx->completion_lock);
1222 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list)
1223 io_kill_timeout(req);
1224 spin_unlock_irq(&ctx->completion_lock);
1225 }
1226
1227 static void __io_queue_deferred(struct io_ring_ctx *ctx)
1228 {
1229 do {
1230 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1231 struct io_defer_entry, list);
1232
1233 if (req_need_defer(de->req, de->seq))
1234 break;
1235 list_del_init(&de->list);
1236 /* punt-init is done before queueing for defer */
1237 __io_queue_async_work(de->req);
1238 kfree(de);
1239 } while (!list_empty(&ctx->defer_list));
1240 }
1241
1242 static void io_flush_timeouts(struct io_ring_ctx *ctx)
1243 {
1244 while (!list_empty(&ctx->timeout_list)) {
1245 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
1246 struct io_kiocb, timeout.list);
1247
1248 if (io_is_timeout_noseq(req))
1249 break;
1250 if (req->timeout.target_seq != ctx->cached_cq_tail
1251 - atomic_read(&ctx->cq_timeouts))
1252 break;
1253
1254 list_del_init(&req->timeout.list);
1255 io_kill_timeout(req);
1256 }
1257 }
1258
1259 static void io_commit_cqring(struct io_ring_ctx *ctx)
1260 {
1261 io_flush_timeouts(ctx);
1262 __io_commit_cqring(ctx);
1263
1264 if (unlikely(!list_empty(&ctx->defer_list)))
1265 __io_queue_deferred(ctx);
1266 }
1267
1268 static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1269 {
1270 struct io_rings *rings = ctx->rings;
1271 unsigned tail;
1272
1273 tail = ctx->cached_cq_tail;
1274 /*
1275 * writes to the cq entry need to come after reading head; the
1276 * control dependency is enough as we're using WRITE_ONCE to
1277 * fill the cq entry
1278 */
1279 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
1280 return NULL;
1281
1282 ctx->cached_cq_tail++;
1283 return &rings->cqes[tail & ctx->cq_mask];
1284 }
1285
1286 static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1287 {
1288 if (!ctx->cq_ev_fd)
1289 return false;
1290 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1291 return false;
1292 if (!ctx->eventfd_async)
1293 return true;
1294 return io_wq_current_is_worker();
1295 }
1296
1297 static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1298 {
1299 if (waitqueue_active(&ctx->wait))
1300 wake_up(&ctx->wait);
1301 if (waitqueue_active(&ctx->sqo_wait))
1302 wake_up(&ctx->sqo_wait);
1303 if (io_should_trigger_evfd(ctx))
1304 eventfd_signal(ctx->cq_ev_fd, 1);
1305 }
1306
1307 static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
1308 {
1309 if (list_empty(&ctx->cq_overflow_list)) {
1310 clear_bit(0, &ctx->sq_check_overflow);
1311 clear_bit(0, &ctx->cq_check_overflow);
1312 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1313 }
1314 }
1315
1316 /* Returns true if there are no backlogged entries after the flush */
1317 static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
1318 {
1319 struct io_rings *rings = ctx->rings;
1320 struct io_uring_cqe *cqe;
1321 struct io_kiocb *req;
1322 unsigned long flags;
1323 LIST_HEAD(list);
1324
1325 if (!force) {
1326 if (list_empty_careful(&ctx->cq_overflow_list))
1327 return true;
1328 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1329 rings->cq_ring_entries))
1330 return false;
1331 }
1332
1333 spin_lock_irqsave(&ctx->completion_lock, flags);
1334
1335 /* if force is set, the ring is going away. always drop after that */
1336 if (force)
1337 ctx->cq_overflow_flushed = 1;
1338
1339 cqe = NULL;
1340 while (!list_empty(&ctx->cq_overflow_list)) {
1341 cqe = io_get_cqring(ctx);
1342 if (!cqe && !force)
1343 break;
1344
1345 req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
1346 compl.list);
1347 list_move(&req->compl.list, &list);
1348 req->flags &= ~REQ_F_OVERFLOW;
1349 if (cqe) {
1350 WRITE_ONCE(cqe->user_data, req->user_data);
1351 WRITE_ONCE(cqe->res, req->result);
1352 WRITE_ONCE(cqe->flags, req->compl.cflags);
1353 } else {
1354 WRITE_ONCE(ctx->rings->cq_overflow,
1355 atomic_inc_return(&ctx->cached_cq_overflow));
1356 }
1357 }
1358
1359 io_commit_cqring(ctx);
1360 io_cqring_mark_overflow(ctx);
1361
1362 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1363 io_cqring_ev_posted(ctx);
1364
1365 while (!list_empty(&list)) {
1366 req = list_first_entry(&list, struct io_kiocb, compl.list);
1367 list_del(&req->compl.list);
1368 io_put_req(req);
1369 }
1370
1371 return cqe != NULL;
1372 }
1373
1374 static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
1375 {
1376 struct io_ring_ctx *ctx = req->ctx;
1377 struct io_uring_cqe *cqe;
1378
1379 trace_io_uring_complete(ctx, req->user_data, res);
1380
1381 /*
1382 * If we can't get a cq entry, userspace overflowed the
1383 * submission (by quite a lot). Increment the overflow count in
1384 * the ring.
1385 */
1386 cqe = io_get_cqring(ctx);
1387 if (likely(cqe)) {
1388 WRITE_ONCE(cqe->user_data, req->user_data);
1389 WRITE_ONCE(cqe->res, res);
1390 WRITE_ONCE(cqe->flags, cflags);
1391 } else if (ctx->cq_overflow_flushed) {
1392 WRITE_ONCE(ctx->rings->cq_overflow,
1393 atomic_inc_return(&ctx->cached_cq_overflow));
1394 } else {
1395 if (list_empty(&ctx->cq_overflow_list)) {
1396 set_bit(0, &ctx->sq_check_overflow);
1397 set_bit(0, &ctx->cq_check_overflow);
1398 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
1399 }
1400 io_clean_op(req);
1401 req->flags |= REQ_F_OVERFLOW;
1402 req->result = res;
1403 req->compl.cflags = cflags;
1404 refcount_inc(&req->refs);
1405 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
1406 }
1407 }
1408
1409 static void io_cqring_fill_event(struct io_kiocb *req, long res)
1410 {
1411 __io_cqring_fill_event(req, res, 0);
1412 }
1413
1414 static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
1415 {
1416 struct io_ring_ctx *ctx = req->ctx;
1417 unsigned long flags;
1418
1419 spin_lock_irqsave(&ctx->completion_lock, flags);
1420 __io_cqring_fill_event(req, res, cflags);
1421 io_commit_cqring(ctx);
1422 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1423
1424 io_cqring_ev_posted(ctx);
1425 }
1426
1427 static void io_submit_flush_completions(struct io_comp_state *cs)
1428 {
1429 struct io_ring_ctx *ctx = cs->ctx;
1430
1431 spin_lock_irq(&ctx->completion_lock);
1432 while (!list_empty(&cs->list)) {
1433 struct io_kiocb *req;
1434
1435 req = list_first_entry(&cs->list, struct io_kiocb, compl.list);
1436 list_del(&req->compl.list);
1437 __io_cqring_fill_event(req, req->result, req->compl.cflags);
1438 if (!(req->flags & REQ_F_LINK_HEAD)) {
1439 req->flags |= REQ_F_COMP_LOCKED;
1440 io_put_req(req);
1441 } else {
1442 spin_unlock_irq(&ctx->completion_lock);
1443 io_put_req(req);
1444 spin_lock_irq(&ctx->completion_lock);
1445 }
1446 }
1447 io_commit_cqring(ctx);
1448 spin_unlock_irq(&ctx->completion_lock);
1449
1450 io_cqring_ev_posted(ctx);
1451 cs->nr = 0;
1452 }
1453
1454 static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags,
1455 struct io_comp_state *cs)
1456 {
1457 if (!cs) {
1458 io_cqring_add_event(req, res, cflags);
1459 io_put_req(req);
1460 } else {
1461 io_clean_op(req);
1462 req->result = res;
1463 req->compl.cflags = cflags;
1464 list_add_tail(&req->compl.list, &cs->list);
1465 if (++cs->nr >= 32)
1466 io_submit_flush_completions(cs);
1467 }
1468 }
1469
1470 static void io_req_complete(struct io_kiocb *req, long res)
1471 {
1472 __io_req_complete(req, res, 0, NULL);
1473 }
1474
1475 static inline bool io_is_fallback_req(struct io_kiocb *req)
1476 {
1477 return req == (struct io_kiocb *)
1478 ((unsigned long) req->ctx->fallback_req & ~1UL);
1479 }
1480
1481 static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1482 {
1483 struct io_kiocb *req;
1484
1485 req = ctx->fallback_req;
1486 if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
1487 return req;
1488
1489 return NULL;
1490 }
1491
1492 static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
1493 struct io_submit_state *state)
1494 {
1495 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1496 struct io_kiocb *req;
1497
1498 if (!state->free_reqs) {
1499 size_t sz;
1500 int ret;
1501
1502 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
1503 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1504
1505 /*
1506 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1507 * retry single alloc to be on the safe side.
1508 */
1509 if (unlikely(ret <= 0)) {
1510 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1511 if (!state->reqs[0])
1512 goto fallback;
1513 ret = 1;
1514 }
1515 state->free_reqs = ret - 1;
1516 req = state->reqs[ret - 1];
1517 } else {
1518 state->free_reqs--;
1519 req = state->reqs[state->free_reqs];
1520 }
1521
1522 return req;
1523 fallback:
1524 return io_get_fallback_req(ctx);
1525 }
1526
1527 static inline void io_put_file(struct io_kiocb *req, struct file *file,
1528 bool fixed)
1529 {
1530 if (fixed)
1531 percpu_ref_put(req->fixed_file_refs);
1532 else
1533 fput(file);
1534 }
1535
1536 static void io_dismantle_req(struct io_kiocb *req)
1537 {
1538 io_clean_op(req);
1539
1540 if (req->io)
1541 kfree(req->io);
1542 if (req->file)
1543 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
1544 io_req_clean_work(req);
1545
1546 if (req->flags & REQ_F_INFLIGHT) {
1547 struct io_ring_ctx *ctx = req->ctx;
1548 unsigned long flags;
1549
1550 spin_lock_irqsave(&ctx->inflight_lock, flags);
1551 list_del(&req->inflight_entry);
1552 if (waitqueue_active(&ctx->inflight_wait))
1553 wake_up(&ctx->inflight_wait);
1554 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1555 }
1556 }
1557
1558 static void __io_free_req(struct io_kiocb *req)
1559 {
1560 struct io_ring_ctx *ctx;
1561
1562 io_dismantle_req(req);
1563 __io_put_req_task(req);
1564 ctx = req->ctx;
1565 if (likely(!io_is_fallback_req(req)))
1566 kmem_cache_free(req_cachep, req);
1567 else
1568 clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
1569 percpu_ref_put(&ctx->refs);
1570 }
1571
1572 static bool io_link_cancel_timeout(struct io_kiocb *req)
1573 {
1574 struct io_ring_ctx *ctx = req->ctx;
1575 int ret;
1576
1577 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
1578 if (ret != -1) {
1579 io_cqring_fill_event(req, -ECANCELED);
1580 io_commit_cqring(ctx);
1581 req->flags &= ~REQ_F_LINK_HEAD;
1582 io_put_req(req);
1583 return true;
1584 }
1585
1586 return false;
1587 }
1588
1589 static bool __io_kill_linked_timeout(struct io_kiocb *req)
1590 {
1591 struct io_kiocb *link;
1592 bool wake_ev;
1593
1594 if (list_empty(&req->link_list))
1595 return false;
1596 link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
1597 if (link->opcode != IORING_OP_LINK_TIMEOUT)
1598 return false;
1599
1600 list_del_init(&link->link_list);
1601 wake_ev = io_link_cancel_timeout(link);
1602 req->flags &= ~REQ_F_LINK_TIMEOUT;
1603 return wake_ev;
1604 }
1605
1606 static void io_kill_linked_timeout(struct io_kiocb *req)
1607 {
1608 struct io_ring_ctx *ctx = req->ctx;
1609 bool wake_ev;
1610
1611 if (!(req->flags & REQ_F_COMP_LOCKED)) {
1612 unsigned long flags;
1613
1614 spin_lock_irqsave(&ctx->completion_lock, flags);
1615 wake_ev = __io_kill_linked_timeout(req);
1616 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1617 } else {
1618 wake_ev = __io_kill_linked_timeout(req);
1619 }
1620
1621 if (wake_ev)
1622 io_cqring_ev_posted(ctx);
1623 }
1624
1625 static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
1626 {
1627 struct io_kiocb *nxt;
1628
1629 /*
1630 * The list should never be empty when we are called here. But could
1631 * potentially happen if the chain is messed up, check to be on the
1632 * safe side.
1633 */
1634 if (unlikely(list_empty(&req->link_list)))
1635 return NULL;
1636
1637 nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list);
1638 list_del_init(&req->link_list);
1639 if (!list_empty(&nxt->link_list))
1640 nxt->flags |= REQ_F_LINK_HEAD;
1641 return nxt;
1642 }
1643
1644 /*
1645 * Called if REQ_F_LINK_HEAD is set, and we fail the head request
1646 */
1647 static void __io_fail_links(struct io_kiocb *req)
1648 {
1649 struct io_ring_ctx *ctx = req->ctx;
1650
1651 while (!list_empty(&req->link_list)) {
1652 struct io_kiocb *link = list_first_entry(&req->link_list,
1653 struct io_kiocb, link_list);
1654
1655 list_del_init(&link->link_list);
1656 trace_io_uring_fail_link(req, link);
1657
1658 io_cqring_fill_event(link, -ECANCELED);
1659 __io_double_put_req(link);
1660 req->flags &= ~REQ_F_LINK_TIMEOUT;
1661 }
1662
1663 io_commit_cqring(ctx);
1664 io_cqring_ev_posted(ctx);
1665 }
1666
1667 static void io_fail_links(struct io_kiocb *req)
1668 {
1669 struct io_ring_ctx *ctx = req->ctx;
1670
1671 if (!(req->flags & REQ_F_COMP_LOCKED)) {
1672 unsigned long flags;
1673
1674 spin_lock_irqsave(&ctx->completion_lock, flags);
1675 __io_fail_links(req);
1676 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1677 } else {
1678 __io_fail_links(req);
1679 }
1680
1681 io_cqring_ev_posted(ctx);
1682 }
1683
1684 static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
1685 {
1686 req->flags &= ~REQ_F_LINK_HEAD;
1687 if (req->flags & REQ_F_LINK_TIMEOUT)
1688 io_kill_linked_timeout(req);
1689
1690 /*
1691 * If LINK is set, we have dependent requests in this chain. If we
1692 * didn't fail this request, queue the first one up, moving any other
1693 * dependencies to the next request. In case of failure, fail the rest
1694 * of the chain.
1695 */
1696 if (likely(!(req->flags & REQ_F_FAIL_LINK)))
1697 return io_req_link_next(req);
1698 io_fail_links(req);
1699 return NULL;
1700 }
1701
1702 static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1703 {
1704 if (likely(!(req->flags & REQ_F_LINK_HEAD)))
1705 return NULL;
1706 return __io_req_find_next(req);
1707 }
1708
1709 static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
1710 {
1711 struct task_struct *tsk = req->task;
1712 struct io_ring_ctx *ctx = req->ctx;
1713 int ret, notify = TWA_RESUME;
1714
1715 /*
1716 * SQPOLL kernel thread doesn't need notification, just a wakeup.
1717 * If we're not using an eventfd, then TWA_RESUME is always fine,
1718 * as we won't have dependencies between request completions for
1719 * other kernel wait conditions.
1720 */
1721 if (ctx->flags & IORING_SETUP_SQPOLL)
1722 notify = 0;
1723 else if (ctx->cq_ev_fd)
1724 notify = TWA_SIGNAL;
1725
1726 ret = task_work_add(tsk, cb, notify);
1727 if (!ret)
1728 wake_up_process(tsk);
1729 return ret;
1730 }
1731
1732 static void __io_req_task_cancel(struct io_kiocb *req, int error)
1733 {
1734 struct io_ring_ctx *ctx = req->ctx;
1735
1736 spin_lock_irq(&ctx->completion_lock);
1737 io_cqring_fill_event(req, error);
1738 io_commit_cqring(ctx);
1739 spin_unlock_irq(&ctx->completion_lock);
1740
1741 io_cqring_ev_posted(ctx);
1742 req_set_fail_links(req);
1743 io_double_put_req(req);
1744 }
1745
1746 static void io_req_task_cancel(struct callback_head *cb)
1747 {
1748 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
1749
1750 __io_req_task_cancel(req, -ECANCELED);
1751 }
1752
1753 static void __io_req_task_submit(struct io_kiocb *req)
1754 {
1755 struct io_ring_ctx *ctx = req->ctx;
1756
1757 if (!__io_sq_thread_acquire_mm(ctx)) {
1758 mutex_lock(&ctx->uring_lock);
1759 __io_queue_sqe(req, NULL, NULL);
1760 mutex_unlock(&ctx->uring_lock);
1761 } else {
1762 __io_req_task_cancel(req, -EFAULT);
1763 }
1764 }
1765
1766 static void io_req_task_submit(struct callback_head *cb)
1767 {
1768 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
1769
1770 __io_req_task_submit(req);
1771 }
1772
1773 static void io_req_task_queue(struct io_kiocb *req)
1774 {
1775 int ret;
1776
1777 init_task_work(&req->task_work, io_req_task_submit);
1778
1779 ret = io_req_task_work_add(req, &req->task_work);
1780 if (unlikely(ret)) {
1781 struct task_struct *tsk;
1782
1783 init_task_work(&req->task_work, io_req_task_cancel);
1784 tsk = io_wq_get_task(req->ctx->io_wq);
1785 task_work_add(tsk, &req->task_work, 0);
1786 wake_up_process(tsk);
1787 }
1788 }
1789
1790 static void io_queue_next(struct io_kiocb *req)
1791 {
1792 struct io_kiocb *nxt = io_req_find_next(req);
1793
1794 if (nxt)
1795 io_req_task_queue(nxt);
1796 }
1797
1798 static void io_free_req(struct io_kiocb *req)
1799 {
1800 io_queue_next(req);
1801 __io_free_req(req);
1802 }
1803
1804 struct req_batch {
1805 void *reqs[IO_IOPOLL_BATCH];
1806 int to_free;
1807
1808 struct task_struct *task;
1809 int task_refs;
1810 };
1811
1812 static inline void io_init_req_batch(struct req_batch *rb)
1813 {
1814 rb->to_free = 0;
1815 rb->task_refs = 0;
1816 rb->task = NULL;
1817 }
1818
1819 static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
1820 struct req_batch *rb)
1821 {
1822 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
1823 percpu_ref_put_many(&ctx->refs, rb->to_free);
1824 rb->to_free = 0;
1825 }
1826
1827 static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
1828 struct req_batch *rb)
1829 {
1830 if (rb->to_free)
1831 __io_req_free_batch_flush(ctx, rb);
1832 if (rb->task) {
1833 put_task_struct_many(rb->task, rb->task_refs);
1834 rb->task = NULL;
1835 }
1836 }
1837
1838 static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
1839 {
1840 if (unlikely(io_is_fallback_req(req))) {
1841 io_free_req(req);
1842 return;
1843 }
1844 if (req->flags & REQ_F_LINK_HEAD)
1845 io_queue_next(req);
1846
1847 if (req->flags & REQ_F_TASK_PINNED) {
1848 if (req->task != rb->task) {
1849 if (rb->task)
1850 put_task_struct_many(rb->task, rb->task_refs);
1851 rb->task = req->task;
1852 rb->task_refs = 0;
1853 }
1854 rb->task_refs++;
1855 req->flags &= ~REQ_F_TASK_PINNED;
1856 }
1857
1858 io_dismantle_req(req);
1859 rb->reqs[rb->to_free++] = req;
1860 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
1861 __io_req_free_batch_flush(req->ctx, rb);
1862 }
1863
1864 /*
1865 * Drop reference to request, return next in chain (if there is one) if this
1866 * was the last reference to this request.
1867 */
1868 static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
1869 {
1870 struct io_kiocb *nxt = NULL;
1871
1872 if (refcount_dec_and_test(&req->refs)) {
1873 nxt = io_req_find_next(req);
1874 __io_free_req(req);
1875 }
1876 return nxt;
1877 }
1878
1879 static void io_put_req(struct io_kiocb *req)
1880 {
1881 if (refcount_dec_and_test(&req->refs))
1882 io_free_req(req);
1883 }
1884
1885 static struct io_wq_work *io_steal_work(struct io_kiocb *req)
1886 {
1887 struct io_kiocb *nxt;
1888
1889 /*
1890 * A ref is owned by io-wq in which context we're. So, if that's the
1891 * last one, it's safe to steal next work. False negatives are Ok,
1892 * it just will be re-punted async in io_put_work()
1893 */
1894 if (refcount_read(&req->refs) != 1)
1895 return NULL;
1896
1897 nxt = io_req_find_next(req);
1898 return nxt ? &nxt->work : NULL;
1899 }
1900
1901 /*
1902 * Must only be used if we don't need to care about links, usually from
1903 * within the completion handling itself.
1904 */
1905 static void __io_double_put_req(struct io_kiocb *req)
1906 {
1907 /* drop both submit and complete references */
1908 if (refcount_sub_and_test(2, &req->refs))
1909 __io_free_req(req);
1910 }
1911
1912 static void io_double_put_req(struct io_kiocb *req)
1913 {
1914 /* drop both submit and complete references */
1915 if (refcount_sub_and_test(2, &req->refs))
1916 io_free_req(req);
1917 }
1918
1919 static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
1920 {
1921 struct io_rings *rings = ctx->rings;
1922
1923 if (test_bit(0, &ctx->cq_check_overflow)) {
1924 /*
1925 * noflush == true is from the waitqueue handler, just ensure
1926 * we wake up the task, and the next invocation will flush the
1927 * entries. We cannot safely to it from here.
1928 */
1929 if (noflush && !list_empty(&ctx->cq_overflow_list))
1930 return -1U;
1931
1932 io_cqring_overflow_flush(ctx, false);
1933 }
1934
1935 /* See comment at the top of this file */
1936 smp_rmb();
1937 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
1938 }
1939
1940 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1941 {
1942 struct io_rings *rings = ctx->rings;
1943
1944 /* make sure SQ entry isn't read before tail */
1945 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1946 }
1947
1948 static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
1949 {
1950 unsigned int cflags;
1951
1952 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
1953 cflags |= IORING_CQE_F_BUFFER;
1954 req->flags &= ~REQ_F_BUFFER_SELECTED;
1955 kfree(kbuf);
1956 return cflags;
1957 }
1958
1959 static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
1960 {
1961 struct io_buffer *kbuf;
1962
1963 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
1964 return io_put_kbuf(req, kbuf);
1965 }
1966
1967 static inline bool io_run_task_work(void)
1968 {
1969 if (current->task_works) {
1970 __set_current_state(TASK_RUNNING);
1971 task_work_run();
1972 return true;
1973 }
1974
1975 return false;
1976 }
1977
1978 static void io_iopoll_queue(struct list_head *again)
1979 {
1980 struct io_kiocb *req;
1981
1982 do {
1983 req = list_first_entry(again, struct io_kiocb, inflight_entry);
1984 list_del(&req->inflight_entry);
1985 __io_complete_rw(req, -EAGAIN, 0, NULL);
1986 } while (!list_empty(again));
1987 }
1988
1989 /*
1990 * Find and free completed poll iocbs
1991 */
1992 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1993 struct list_head *done)
1994 {
1995 struct req_batch rb;
1996 struct io_kiocb *req;
1997 LIST_HEAD(again);
1998
1999 /* order with ->result store in io_complete_rw_iopoll() */
2000 smp_rmb();
2001
2002 io_init_req_batch(&rb);
2003 while (!list_empty(done)) {
2004 int cflags = 0;
2005
2006 req = list_first_entry(done, struct io_kiocb, inflight_entry);
2007 if (READ_ONCE(req->result) == -EAGAIN) {
2008 req->iopoll_completed = 0;
2009 list_move_tail(&req->inflight_entry, &again);
2010 continue;
2011 }
2012 list_del(&req->inflight_entry);
2013
2014 if (req->flags & REQ_F_BUFFER_SELECTED)
2015 cflags = io_put_rw_kbuf(req);
2016
2017 __io_cqring_fill_event(req, req->result, cflags);
2018 (*nr_events)++;
2019
2020 if (refcount_dec_and_test(&req->refs))
2021 io_req_free_batch(&rb, req);
2022 }
2023
2024 io_commit_cqring(ctx);
2025 if (ctx->flags & IORING_SETUP_SQPOLL)
2026 io_cqring_ev_posted(ctx);
2027 io_req_free_batch_finish(ctx, &rb);
2028
2029 if (!list_empty(&again))
2030 io_iopoll_queue(&again);
2031 }
2032
2033 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2034 long min)
2035 {
2036 struct io_kiocb *req, *tmp;
2037 LIST_HEAD(done);
2038 bool spin;
2039 int ret;
2040
2041 /*
2042 * Only spin for completions if we don't have multiple devices hanging
2043 * off our complete list, and we're under the requested amount.
2044 */
2045 spin = !ctx->poll_multi_file && *nr_events < min;
2046
2047 ret = 0;
2048 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
2049 struct kiocb *kiocb = &req->rw.kiocb;
2050
2051 /*
2052 * Move completed and retryable entries to our local lists.
2053 * If we find a request that requires polling, break out
2054 * and complete those lists first, if we have entries there.
2055 */
2056 if (READ_ONCE(req->iopoll_completed)) {
2057 list_move_tail(&req->inflight_entry, &done);
2058 continue;
2059 }
2060 if (!list_empty(&done))
2061 break;
2062
2063 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2064 if (ret < 0)
2065 break;
2066
2067 /* iopoll may have completed current req */
2068 if (READ_ONCE(req->iopoll_completed))
2069 list_move_tail(&req->inflight_entry, &done);
2070
2071 if (ret && spin)
2072 spin = false;
2073 ret = 0;
2074 }
2075
2076 if (!list_empty(&done))
2077 io_iopoll_complete(ctx, nr_events, &done);
2078
2079 return ret;
2080 }
2081
2082 /*
2083 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
2084 * non-spinning poll check - we'll still enter the driver poll loop, but only
2085 * as a non-spinning completion check.
2086 */
2087 static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2088 long min)
2089 {
2090 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
2091 int ret;
2092
2093 ret = io_do_iopoll(ctx, nr_events, min);
2094 if (ret < 0)
2095 return ret;
2096 if (*nr_events >= min)
2097 return 0;
2098 }
2099
2100 return 1;
2101 }
2102
2103 /*
2104 * We can't just wait for polled events to come to us, we have to actively
2105 * find and complete them.
2106 */
2107 static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
2108 {
2109 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2110 return;
2111
2112 mutex_lock(&ctx->uring_lock);
2113 while (!list_empty(&ctx->iopoll_list)) {
2114 unsigned int nr_events = 0;
2115
2116 io_do_iopoll(ctx, &nr_events, 0);
2117
2118 /* let it sleep and repeat later if can't complete a request */
2119 if (nr_events == 0)
2120 break;
2121 /*
2122 * Ensure we allow local-to-the-cpu processing to take place,
2123 * in this case we need to ensure that we reap all events.
2124 * Also let task_work, etc. to progress by releasing the mutex
2125 */
2126 if (need_resched()) {
2127 mutex_unlock(&ctx->uring_lock);
2128 cond_resched();
2129 mutex_lock(&ctx->uring_lock);
2130 }
2131 }
2132 mutex_unlock(&ctx->uring_lock);
2133 }
2134
2135 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
2136 {
2137 unsigned int nr_events = 0;
2138 int iters = 0, ret = 0;
2139
2140 /*
2141 * We disallow the app entering submit/complete with polling, but we
2142 * still need to lock the ring to prevent racing with polled issue
2143 * that got punted to a workqueue.
2144 */
2145 mutex_lock(&ctx->uring_lock);
2146 do {
2147 /*
2148 * Don't enter poll loop if we already have events pending.
2149 * If we do, we can potentially be spinning for commands that
2150 * already triggered a CQE (eg in error).
2151 */
2152 if (io_cqring_events(ctx, false))
2153 break;
2154
2155 /*
2156 * If a submit got punted to a workqueue, we can have the
2157 * application entering polling for a command before it gets
2158 * issued. That app will hold the uring_lock for the duration
2159 * of the poll right here, so we need to take a breather every
2160 * now and then to ensure that the issue has a chance to add
2161 * the poll to the issued list. Otherwise we can spin here
2162 * forever, while the workqueue is stuck trying to acquire the
2163 * very same mutex.
2164 */
2165 if (!(++iters & 7)) {
2166 mutex_unlock(&ctx->uring_lock);
2167 io_run_task_work();
2168 mutex_lock(&ctx->uring_lock);
2169 }
2170
2171 ret = io_iopoll_getevents(ctx, &nr_events, min);
2172 if (ret <= 0)
2173 break;
2174 ret = 0;
2175 } while (min && !nr_events && !need_resched());
2176
2177 mutex_unlock(&ctx->uring_lock);
2178 return ret;
2179 }
2180
2181 static void kiocb_end_write(struct io_kiocb *req)
2182 {
2183 /*
2184 * Tell lockdep we inherited freeze protection from submission
2185 * thread.
2186 */
2187 if (req->flags & REQ_F_ISREG) {
2188 struct inode *inode = file_inode(req->file);
2189
2190 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2191 }
2192 file_end_write(req->file);
2193 }
2194
2195 static void io_complete_rw_common(struct kiocb *kiocb, long res,
2196 struct io_comp_state *cs)
2197 {
2198 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2199 int cflags = 0;
2200
2201 if (kiocb->ki_flags & IOCB_WRITE)
2202 kiocb_end_write(req);
2203
2204 if (res != req->result)
2205 req_set_fail_links(req);
2206 if (req->flags & REQ_F_BUFFER_SELECTED)
2207 cflags = io_put_rw_kbuf(req);
2208 __io_req_complete(req, res, cflags, cs);
2209 }
2210
2211 #ifdef CONFIG_BLOCK
2212 static bool io_resubmit_prep(struct io_kiocb *req, int error)
2213 {
2214 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
2215 ssize_t ret = -ECANCELED;
2216 struct iov_iter iter;
2217 int rw;
2218
2219 if (error) {
2220 ret = error;
2221 goto end_req;
2222 }
2223
2224 switch (req->opcode) {
2225 case IORING_OP_READV:
2226 case IORING_OP_READ_FIXED:
2227 case IORING_OP_READ:
2228 rw = READ;
2229 break;
2230 case IORING_OP_WRITEV:
2231 case IORING_OP_WRITE_FIXED:
2232 case IORING_OP_WRITE:
2233 rw = WRITE;
2234 break;
2235 default:
2236 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2237 req->opcode);
2238 goto end_req;
2239 }
2240
2241 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2242 if (ret < 0)
2243 goto end_req;
2244 ret = io_setup_async_rw(req, ret, iovec, inline_vecs, &iter);
2245 if (!ret)
2246 return true;
2247 kfree(iovec);
2248 end_req:
2249 req_set_fail_links(req);
2250 io_req_complete(req, ret);
2251 return false;
2252 }
2253
2254 static void io_rw_resubmit(struct callback_head *cb)
2255 {
2256 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2257 struct io_ring_ctx *ctx = req->ctx;
2258 int err;
2259
2260 err = io_sq_thread_acquire_mm(ctx, req);
2261
2262 if (io_resubmit_prep(req, err)) {
2263 refcount_inc(&req->refs);
2264 io_queue_async_work(req);
2265 }
2266 }
2267 #endif
2268
2269 static bool io_rw_reissue(struct io_kiocb *req, long res)
2270 {
2271 #ifdef CONFIG_BLOCK
2272 int ret;
2273
2274 if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
2275 return false;
2276
2277 init_task_work(&req->task_work, io_rw_resubmit);
2278 ret = io_req_task_work_add(req, &req->task_work);
2279 if (!ret)
2280 return true;
2281 #endif
2282 return false;
2283 }
2284
2285 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2286 struct io_comp_state *cs)
2287 {
2288 if (!io_rw_reissue(req, res))
2289 io_complete_rw_common(&req->rw.kiocb, res, cs);
2290 }
2291
2292 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2293 {
2294 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2295
2296 __io_complete_rw(req, res, res2, NULL);
2297 }
2298
2299 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2300 {
2301 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2302
2303 if (kiocb->ki_flags & IOCB_WRITE)
2304 kiocb_end_write(req);
2305
2306 if (res != -EAGAIN && res != req->result)
2307 req_set_fail_links(req);
2308
2309 WRITE_ONCE(req->result, res);
2310 /* order with io_poll_complete() checking ->result */
2311 smp_wmb();
2312 WRITE_ONCE(req->iopoll_completed, 1);
2313 }
2314
2315 /*
2316 * After the iocb has been issued, it's safe to be found on the poll list.
2317 * Adding the kiocb to the list AFTER submission ensures that we don't
2318 * find it from a io_iopoll_getevents() thread before the issuer is done
2319 * accessing the kiocb cookie.
2320 */
2321 static void io_iopoll_req_issued(struct io_kiocb *req)
2322 {
2323 struct io_ring_ctx *ctx = req->ctx;
2324
2325 /*
2326 * Track whether we have multiple files in our lists. This will impact
2327 * how we do polling eventually, not spinning if we're on potentially
2328 * different devices.
2329 */
2330 if (list_empty(&ctx->iopoll_list)) {
2331 ctx->poll_multi_file = false;
2332 } else if (!ctx->poll_multi_file) {
2333 struct io_kiocb *list_req;
2334
2335 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
2336 inflight_entry);
2337 if (list_req->file != req->file)
2338 ctx->poll_multi_file = true;
2339 }
2340
2341 /*
2342 * For fast devices, IO may have already completed. If it has, add
2343 * it to the front so we find it first.
2344 */
2345 if (READ_ONCE(req->iopoll_completed))
2346 list_add(&req->inflight_entry, &ctx->iopoll_list);
2347 else
2348 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
2349
2350 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2351 wq_has_sleeper(&ctx->sqo_wait))
2352 wake_up(&ctx->sqo_wait);
2353 }
2354
2355 static void __io_state_file_put(struct io_submit_state *state)
2356 {
2357 if (state->has_refs)
2358 fput_many(state->file, state->has_refs);
2359 state->file = NULL;
2360 }
2361
2362 static inline void io_state_file_put(struct io_submit_state *state)
2363 {
2364 if (state->file)
2365 __io_state_file_put(state);
2366 }
2367
2368 /*
2369 * Get as many references to a file as we have IOs left in this submission,
2370 * assuming most submissions are for one file, or at least that each file
2371 * has more than one submission.
2372 */
2373 static struct file *__io_file_get(struct io_submit_state *state, int fd)
2374 {
2375 if (!state)
2376 return fget(fd);
2377
2378 if (state->file) {
2379 if (state->fd == fd) {
2380 state->has_refs--;
2381 state->ios_left--;
2382 return state->file;
2383 }
2384 __io_state_file_put(state);
2385 }
2386 state->file = fget_many(fd, state->ios_left);
2387 if (!state->file)
2388 return NULL;
2389
2390 state->fd = fd;
2391 state->ios_left--;
2392 state->has_refs = state->ios_left;
2393 return state->file;
2394 }
2395
2396 static bool io_bdev_nowait(struct block_device *bdev)
2397 {
2398 #ifdef CONFIG_BLOCK
2399 return !bdev || queue_is_mq(bdev_get_queue(bdev));
2400 #else
2401 return true;
2402 #endif
2403 }
2404
2405 /*
2406 * If we tracked the file through the SCM inflight mechanism, we could support
2407 * any file. For now, just ensure that anything potentially problematic is done
2408 * inline.
2409 */
2410 static bool io_file_supports_async(struct file *file, int rw)
2411 {
2412 umode_t mode = file_inode(file)->i_mode;
2413
2414 if (S_ISBLK(mode)) {
2415 if (io_bdev_nowait(file->f_inode->i_bdev))
2416 return true;
2417 return false;
2418 }
2419 if (S_ISCHR(mode) || S_ISSOCK(mode))
2420 return true;
2421 if (S_ISREG(mode)) {
2422 if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
2423 file->f_op != &io_uring_fops)
2424 return true;
2425 return false;
2426 }
2427
2428 /* any ->read/write should understand O_NONBLOCK */
2429 if (file->f_flags & O_NONBLOCK)
2430 return true;
2431
2432 if (!(file->f_mode & FMODE_NOWAIT))
2433 return false;
2434
2435 if (rw == READ)
2436 return file->f_op->read_iter != NULL;
2437
2438 return file->f_op->write_iter != NULL;
2439 }
2440
2441 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2442 bool force_nonblock)
2443 {
2444 struct io_ring_ctx *ctx = req->ctx;
2445 struct kiocb *kiocb = &req->rw.kiocb;
2446 unsigned ioprio;
2447 int ret;
2448
2449 if (S_ISREG(file_inode(req->file)->i_mode))
2450 req->flags |= REQ_F_ISREG;
2451
2452 kiocb->ki_pos = READ_ONCE(sqe->off);
2453 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2454 req->flags |= REQ_F_CUR_POS;
2455 kiocb->ki_pos = req->file->f_pos;
2456 }
2457 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
2458 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2459 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2460 if (unlikely(ret))
2461 return ret;
2462
2463 ioprio = READ_ONCE(sqe->ioprio);
2464 if (ioprio) {
2465 ret = ioprio_check_cap(ioprio);
2466 if (ret)
2467 return ret;
2468
2469 kiocb->ki_ioprio = ioprio;
2470 } else
2471 kiocb->ki_ioprio = get_current_ioprio();
2472
2473 /* don't allow async punt if RWF_NOWAIT was requested */
2474 if (kiocb->ki_flags & IOCB_NOWAIT)
2475 req->flags |= REQ_F_NOWAIT;
2476
2477 if (kiocb->ki_flags & IOCB_DIRECT)
2478 io_get_req_task(req);
2479
2480 if (force_nonblock)
2481 kiocb->ki_flags |= IOCB_NOWAIT;
2482
2483 if (ctx->flags & IORING_SETUP_IOPOLL) {
2484 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2485 !kiocb->ki_filp->f_op->iopoll)
2486 return -EOPNOTSUPP;
2487
2488 kiocb->ki_flags |= IOCB_HIPRI;
2489 kiocb->ki_complete = io_complete_rw_iopoll;
2490 req->iopoll_completed = 0;
2491 io_get_req_task(req);
2492 } else {
2493 if (kiocb->ki_flags & IOCB_HIPRI)
2494 return -EINVAL;
2495 kiocb->ki_complete = io_complete_rw;
2496 }
2497
2498 req->rw.addr = READ_ONCE(sqe->addr);
2499 req->rw.len = READ_ONCE(sqe->len);
2500 req->buf_index = READ_ONCE(sqe->buf_index);
2501 return 0;
2502 }
2503
2504 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2505 {
2506 switch (ret) {
2507 case -EIOCBQUEUED:
2508 break;
2509 case -ERESTARTSYS:
2510 case -ERESTARTNOINTR:
2511 case -ERESTARTNOHAND:
2512 case -ERESTART_RESTARTBLOCK:
2513 /*
2514 * We can't just restart the syscall, since previously
2515 * submitted sqes may already be in progress. Just fail this
2516 * IO with EINTR.
2517 */
2518 ret = -EINTR;
2519 /* fall through */
2520 default:
2521 kiocb->ki_complete(kiocb, ret, 0);
2522 }
2523 }
2524
2525 static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2526 struct io_comp_state *cs)
2527 {
2528 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2529
2530 if (req->flags & REQ_F_CUR_POS)
2531 req->file->f_pos = kiocb->ki_pos;
2532 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
2533 __io_complete_rw(req, ret, 0, cs);
2534 else
2535 io_rw_done(kiocb, ret);
2536 }
2537
2538 static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
2539 struct iov_iter *iter)
2540 {
2541 struct io_ring_ctx *ctx = req->ctx;
2542 size_t len = req->rw.len;
2543 struct io_mapped_ubuf *imu;
2544 u16 index, buf_index;
2545 size_t offset;
2546 u64 buf_addr;
2547
2548 /* attempt to use fixed buffers without having provided iovecs */
2549 if (unlikely(!ctx->user_bufs))
2550 return -EFAULT;
2551
2552 buf_index = req->buf_index;
2553 if (unlikely(buf_index >= ctx->nr_user_bufs))
2554 return -EFAULT;
2555
2556 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2557 imu = &ctx->user_bufs[index];
2558 buf_addr = req->rw.addr;
2559
2560 /* overflow */
2561 if (buf_addr + len < buf_addr)
2562 return -EFAULT;
2563 /* not inside the mapped region */
2564 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2565 return -EFAULT;
2566
2567 /*
2568 * May not be a start of buffer, set size appropriately
2569 * and advance us to the beginning.
2570 */
2571 offset = buf_addr - imu->ubuf;
2572 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
2573
2574 if (offset) {
2575 /*
2576 * Don't use iov_iter_advance() here, as it's really slow for
2577 * using the latter parts of a big fixed buffer - it iterates
2578 * over each segment manually. We can cheat a bit here, because
2579 * we know that:
2580 *
2581 * 1) it's a BVEC iter, we set it up
2582 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2583 * first and last bvec
2584 *
2585 * So just find our index, and adjust the iterator afterwards.
2586 * If the offset is within the first bvec (or the whole first
2587 * bvec, just use iov_iter_advance(). This makes it easier
2588 * since we can just skip the first segment, which may not
2589 * be PAGE_SIZE aligned.
2590 */
2591 const struct bio_vec *bvec = imu->bvec;
2592
2593 if (offset <= bvec->bv_len) {
2594 iov_iter_advance(iter, offset);
2595 } else {
2596 unsigned long seg_skip;
2597
2598 /* skip first vec */
2599 offset -= bvec->bv_len;
2600 seg_skip = 1 + (offset >> PAGE_SHIFT);
2601
2602 iter->bvec = bvec + seg_skip;
2603 iter->nr_segs -= seg_skip;
2604 iter->count -= bvec->bv_len + offset;
2605 iter->iov_offset = offset & ~PAGE_MASK;
2606 }
2607 }
2608
2609 return len;
2610 }
2611
2612 static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2613 {
2614 if (needs_lock)
2615 mutex_unlock(&ctx->uring_lock);
2616 }
2617
2618 static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2619 {
2620 /*
2621 * "Normal" inline submissions always hold the uring_lock, since we
2622 * grab it from the system call. Same is true for the SQPOLL offload.
2623 * The only exception is when we've detached the request and issue it
2624 * from an async worker thread, grab the lock for that case.
2625 */
2626 if (needs_lock)
2627 mutex_lock(&ctx->uring_lock);
2628 }
2629
2630 static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2631 int bgid, struct io_buffer *kbuf,
2632 bool needs_lock)
2633 {
2634 struct io_buffer *head;
2635
2636 if (req->flags & REQ_F_BUFFER_SELECTED)
2637 return kbuf;
2638
2639 io_ring_submit_lock(req->ctx, needs_lock);
2640
2641 lockdep_assert_held(&req->ctx->uring_lock);
2642
2643 head = idr_find(&req->ctx->io_buffer_idr, bgid);
2644 if (head) {
2645 if (!list_empty(&head->list)) {
2646 kbuf = list_last_entry(&head->list, struct io_buffer,
2647 list);
2648 list_del(&kbuf->list);
2649 } else {
2650 kbuf = head;
2651 idr_remove(&req->ctx->io_buffer_idr, bgid);
2652 }
2653 if (*len > kbuf->len)
2654 *len = kbuf->len;
2655 } else {
2656 kbuf = ERR_PTR(-ENOBUFS);
2657 }
2658
2659 io_ring_submit_unlock(req->ctx, needs_lock);
2660
2661 return kbuf;
2662 }
2663
2664 static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2665 bool needs_lock)
2666 {
2667 struct io_buffer *kbuf;
2668 u16 bgid;
2669
2670 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2671 bgid = req->buf_index;
2672 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2673 if (IS_ERR(kbuf))
2674 return kbuf;
2675 req->rw.addr = (u64) (unsigned long) kbuf;
2676 req->flags |= REQ_F_BUFFER_SELECTED;
2677 return u64_to_user_ptr(kbuf->addr);
2678 }
2679
2680 #ifdef CONFIG_COMPAT
2681 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2682 bool needs_lock)
2683 {
2684 struct compat_iovec __user *uiov;
2685 compat_ssize_t clen;
2686 void __user *buf;
2687 ssize_t len;
2688
2689 uiov = u64_to_user_ptr(req->rw.addr);
2690 if (!access_ok(uiov, sizeof(*uiov)))
2691 return -EFAULT;
2692 if (__get_user(clen, &uiov->iov_len))
2693 return -EFAULT;
2694 if (clen < 0)
2695 return -EINVAL;
2696
2697 len = clen;
2698 buf = io_rw_buffer_select(req, &len, needs_lock);
2699 if (IS_ERR(buf))
2700 return PTR_ERR(buf);
2701 iov[0].iov_base = buf;
2702 iov[0].iov_len = (compat_size_t) len;
2703 return 0;
2704 }
2705 #endif
2706
2707 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2708 bool needs_lock)
2709 {
2710 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2711 void __user *buf;
2712 ssize_t len;
2713
2714 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2715 return -EFAULT;
2716
2717 len = iov[0].iov_len;
2718 if (len < 0)
2719 return -EINVAL;
2720 buf = io_rw_buffer_select(req, &len, needs_lock);
2721 if (IS_ERR(buf))
2722 return PTR_ERR(buf);
2723 iov[0].iov_base = buf;
2724 iov[0].iov_len = len;
2725 return 0;
2726 }
2727
2728 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2729 bool needs_lock)
2730 {
2731 if (req->flags & REQ_F_BUFFER_SELECTED) {
2732 struct io_buffer *kbuf;
2733
2734 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2735 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2736 iov[0].iov_len = kbuf->len;
2737 return 0;
2738 }
2739 if (!req->rw.len)
2740 return 0;
2741 else if (req->rw.len > 1)
2742 return -EINVAL;
2743
2744 #ifdef CONFIG_COMPAT
2745 if (req->ctx->compat)
2746 return io_compat_import(req, iov, needs_lock);
2747 #endif
2748
2749 return __io_iov_buffer_select(req, iov, needs_lock);
2750 }
2751
2752 static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
2753 struct iovec **iovec, struct iov_iter *iter,
2754 bool needs_lock)
2755 {
2756 void __user *buf = u64_to_user_ptr(req->rw.addr);
2757 size_t sqe_len = req->rw.len;
2758 ssize_t ret;
2759 u8 opcode;
2760
2761 opcode = req->opcode;
2762 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
2763 *iovec = NULL;
2764 return io_import_fixed(req, rw, iter);
2765 }
2766
2767 /* buffer index only valid with fixed read/write, or buffer select */
2768 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
2769 return -EINVAL;
2770
2771 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
2772 if (req->flags & REQ_F_BUFFER_SELECT) {
2773 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
2774 if (IS_ERR(buf)) {
2775 *iovec = NULL;
2776 return PTR_ERR(buf);
2777 }
2778 req->rw.len = sqe_len;
2779 }
2780
2781 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2782 *iovec = NULL;
2783 return ret < 0 ? ret : sqe_len;
2784 }
2785
2786 if (req->io) {
2787 struct io_async_rw *iorw = &req->io->rw;
2788
2789 iov_iter_init(iter, rw, iorw->iov, iorw->nr_segs, iorw->size);
2790 *iovec = NULL;
2791 return iorw->size;
2792 }
2793
2794 if (req->flags & REQ_F_BUFFER_SELECT) {
2795 ret = io_iov_buffer_select(req, *iovec, needs_lock);
2796 if (!ret) {
2797 ret = (*iovec)->iov_len;
2798 iov_iter_init(iter, rw, *iovec, 1, ret);
2799 }
2800 *iovec = NULL;
2801 return ret;
2802 }
2803
2804 #ifdef CONFIG_COMPAT
2805 if (req->ctx->compat)
2806 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
2807 iovec, iter);
2808 #endif
2809
2810 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
2811 }
2812
2813 /*
2814 * For files that don't have ->read_iter() and ->write_iter(), handle them
2815 * by looping over ->read() or ->write() manually.
2816 */
2817 static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2818 struct iov_iter *iter)
2819 {
2820 ssize_t ret = 0;
2821
2822 /*
2823 * Don't support polled IO through this interface, and we can't
2824 * support non-blocking either. For the latter, this just causes
2825 * the kiocb to be handled from an async context.
2826 */
2827 if (kiocb->ki_flags & IOCB_HIPRI)
2828 return -EOPNOTSUPP;
2829 if (kiocb->ki_flags & IOCB_NOWAIT)
2830 return -EAGAIN;
2831
2832 while (iov_iter_count(iter)) {
2833 struct iovec iovec;
2834 ssize_t nr;
2835
2836 if (!iov_iter_is_bvec(iter)) {
2837 iovec = iov_iter_iovec(iter);
2838 } else {
2839 /* fixed buffers import bvec */
2840 iovec.iov_base = kmap(iter->bvec->bv_page)
2841 + iter->iov_offset;
2842 iovec.iov_len = min(iter->count,
2843 iter->bvec->bv_len - iter->iov_offset);
2844 }
2845
2846 if (rw == READ) {
2847 nr = file->f_op->read(file, iovec.iov_base,
2848 iovec.iov_len, &kiocb->ki_pos);
2849 } else {
2850 nr = file->f_op->write(file, iovec.iov_base,
2851 iovec.iov_len, &kiocb->ki_pos);
2852 }
2853
2854 if (iov_iter_is_bvec(iter))
2855 kunmap(iter->bvec->bv_page);
2856
2857 if (nr < 0) {
2858 if (!ret)
2859 ret = nr;
2860 break;
2861 }
2862 ret += nr;
2863 if (nr != iovec.iov_len)
2864 break;
2865 iov_iter_advance(iter, nr);
2866 }
2867
2868 return ret;
2869 }
2870
2871 static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
2872 struct iovec *iovec, struct iovec *fast_iov,
2873 struct iov_iter *iter)
2874 {
2875 struct io_async_rw *rw = &req->io->rw;
2876
2877 rw->nr_segs = iter->nr_segs;
2878 rw->size = io_size;
2879 if (!iovec) {
2880 rw->iov = rw->fast_iov;
2881 if (rw->iov != fast_iov)
2882 memcpy(rw->iov, fast_iov,
2883 sizeof(struct iovec) * iter->nr_segs);
2884 } else {
2885 rw->iov = iovec;
2886 req->flags |= REQ_F_NEED_CLEANUP;
2887 }
2888 }
2889
2890 static inline int __io_alloc_async_ctx(struct io_kiocb *req)
2891 {
2892 req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
2893 return req->io == NULL;
2894 }
2895
2896 static int io_alloc_async_ctx(struct io_kiocb *req)
2897 {
2898 if (!io_op_defs[req->opcode].async_ctx)
2899 return 0;
2900
2901 return __io_alloc_async_ctx(req);
2902 }
2903
2904 static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2905 struct iovec *iovec, struct iovec *fast_iov,
2906 struct iov_iter *iter)
2907 {
2908 if (!io_op_defs[req->opcode].async_ctx)
2909 return 0;
2910 if (!req->io) {
2911 if (__io_alloc_async_ctx(req))
2912 return -ENOMEM;
2913
2914 io_req_map_rw(req, io_size, iovec, fast_iov, iter);
2915 }
2916 return 0;
2917 }
2918
2919 static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
2920 bool force_nonblock)
2921 {
2922 struct io_async_ctx *io = req->io;
2923 struct iov_iter iter;
2924 ssize_t ret;
2925
2926 io->rw.iov = io->rw.fast_iov;
2927 req->io = NULL;
2928 ret = io_import_iovec(rw, req, &io->rw.iov, &iter, !force_nonblock);
2929 req->io = io;
2930 if (unlikely(ret < 0))
2931 return ret;
2932
2933 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2934 return 0;
2935 }
2936
2937 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2938 bool force_nonblock)
2939 {
2940 ssize_t ret;
2941
2942 ret = io_prep_rw(req, sqe, force_nonblock);
2943 if (ret)
2944 return ret;
2945
2946 if (unlikely(!(req->file->f_mode & FMODE_READ)))
2947 return -EBADF;
2948
2949 /* either don't need iovec imported or already have it */
2950 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
2951 return 0;
2952 return io_rw_prep_async(req, READ, force_nonblock);
2953 }
2954
2955 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
2956 int sync, void *arg)
2957 {
2958 struct wait_page_queue *wpq;
2959 struct io_kiocb *req = wait->private;
2960 struct wait_page_key *key = arg;
2961 int ret;
2962
2963 wpq = container_of(wait, struct wait_page_queue, wait);
2964
2965 if (!wake_page_match(wpq, key))
2966 return 0;
2967
2968 /* Stop waking things up if the page is locked again */
2969 if (test_bit(key->bit_nr, &key->page->flags))
2970 return -1;
2971
2972 list_del_init(&wait->entry);
2973
2974 init_task_work(&req->task_work, io_req_task_submit);
2975 /* submit ref gets dropped, acquire a new one */
2976 refcount_inc(&req->refs);
2977 ret = io_req_task_work_add(req, &req->task_work);
2978 if (unlikely(ret)) {
2979 struct task_struct *tsk;
2980
2981 /* queue just for cancelation */
2982 init_task_work(&req->task_work, io_req_task_cancel);
2983 tsk = io_wq_get_task(req->ctx->io_wq);
2984 task_work_add(tsk, &req->task_work, 0);
2985 wake_up_process(tsk);
2986 }
2987 return 1;
2988 }
2989
2990 static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb,
2991 struct wait_page_queue *wait,
2992 wait_queue_func_t func,
2993 void *data)
2994 {
2995 /* Can't support async wakeup with polled IO */
2996 if (kiocb->ki_flags & IOCB_HIPRI)
2997 return -EINVAL;
2998 if (kiocb->ki_filp->f_mode & FMODE_BUF_RASYNC) {
2999 wait->wait.func = func;
3000 wait->wait.private = data;
3001 wait->wait.flags = 0;
3002 INIT_LIST_HEAD(&wait->wait.entry);
3003 kiocb->ki_flags |= IOCB_WAITQ;
3004 kiocb->ki_waitq = wait;
3005 return 0;
3006 }
3007
3008 return -EOPNOTSUPP;
3009 }
3010
3011
3012 static bool io_rw_should_retry(struct io_kiocb *req)
3013 {
3014 struct kiocb *kiocb = &req->rw.kiocb;
3015 int ret;
3016
3017 /* never retry for NOWAIT, we just complete with -EAGAIN */
3018 if (req->flags & REQ_F_NOWAIT)
3019 return false;
3020
3021 /* already tried, or we're doing O_DIRECT */
3022 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_WAITQ))
3023 return false;
3024 /*
3025 * just use poll if we can, and don't attempt if the fs doesn't
3026 * support callback based unlocks
3027 */
3028 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3029 return false;
3030
3031 /*
3032 * If request type doesn't require req->io to defer in general,
3033 * we need to allocate it here
3034 */
3035 if (!req->io && __io_alloc_async_ctx(req))
3036 return false;
3037
3038 ret = kiocb_wait_page_queue_init(kiocb, &req->io->rw.wpq,
3039 io_async_buf_func, req);
3040 if (!ret) {
3041 io_get_req_task(req);
3042 return true;
3043 }
3044
3045 return false;
3046 }
3047
3048 static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3049 {
3050 if (req->file->f_op->read_iter)
3051 return call_read_iter(req->file, &req->rw.kiocb, iter);
3052 return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter);
3053 }
3054
3055 static int io_read(struct io_kiocb *req, bool force_nonblock,
3056 struct io_comp_state *cs)
3057 {
3058 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3059 struct kiocb *kiocb = &req->rw.kiocb;
3060 struct iov_iter iter;
3061 size_t iov_count;
3062 ssize_t io_size, ret, ret2;
3063 unsigned long nr_segs;
3064
3065 ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
3066 if (ret < 0)
3067 return ret;
3068 io_size = ret;
3069 req->result = io_size;
3070
3071 /* Ensure we clear previously set non-block flag */
3072 if (!force_nonblock)
3073 kiocb->ki_flags &= ~IOCB_NOWAIT;
3074
3075 /* If the file doesn't support async, just async punt */
3076 if (force_nonblock && !io_file_supports_async(req->file, READ))
3077 goto copy_iov;
3078
3079 iov_count = iov_iter_count(&iter);
3080 nr_segs = iter.nr_segs;
3081 ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
3082 if (unlikely(ret))
3083 goto out_free;
3084
3085 ret2 = io_iter_do_read(req, &iter);
3086
3087 /* Catch -EAGAIN return for forced non-blocking submission */
3088 if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) {
3089 kiocb_done(kiocb, ret2, cs);
3090 } else {
3091 iter.count = iov_count;
3092 iter.nr_segs = nr_segs;
3093 copy_iov:
3094 ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
3095 &iter);
3096 if (ret)
3097 goto out_free;
3098 /* it's copied and will be cleaned with ->io */
3099 iovec = NULL;
3100 /* if we can retry, do so with the callbacks armed */
3101 if (io_rw_should_retry(req)) {
3102 ret2 = io_iter_do_read(req, &iter);
3103 if (ret2 == -EIOCBQUEUED) {
3104 goto out_free;
3105 } else if (ret2 != -EAGAIN) {
3106 kiocb_done(kiocb, ret2, cs);
3107 goto out_free;
3108 }
3109 }
3110 kiocb->ki_flags &= ~IOCB_WAITQ;
3111 return -EAGAIN;
3112 }
3113 out_free:
3114 if (iovec)
3115 kfree(iovec);
3116 return ret;
3117 }
3118
3119 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
3120 bool force_nonblock)
3121 {
3122 ssize_t ret;
3123
3124 ret = io_prep_rw(req, sqe, force_nonblock);
3125 if (ret)
3126 return ret;
3127
3128 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3129 return -EBADF;
3130
3131 /* either don't need iovec imported or already have it */
3132 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
3133 return 0;
3134 return io_rw_prep_async(req, WRITE, force_nonblock);
3135 }
3136
3137 static int io_write(struct io_kiocb *req, bool force_nonblock,
3138 struct io_comp_state *cs)
3139 {
3140 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3141 struct kiocb *kiocb = &req->rw.kiocb;
3142 struct iov_iter iter;
3143 size_t iov_count;
3144 ssize_t ret, ret2, io_size;
3145 unsigned long nr_segs;
3146
3147 ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
3148 if (ret < 0)
3149 return ret;
3150 io_size = ret;
3151 req->result = io_size;
3152
3153 /* Ensure we clear previously set non-block flag */
3154 if (!force_nonblock)
3155 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
3156
3157 /* If the file doesn't support async, just async punt */
3158 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
3159 goto copy_iov;
3160
3161 /* file path doesn't support NOWAIT for non-direct_IO */
3162 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3163 (req->flags & REQ_F_ISREG))
3164 goto copy_iov;
3165
3166 iov_count = iov_iter_count(&iter);
3167 nr_segs = iter.nr_segs;
3168 ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
3169 if (unlikely(ret))
3170 goto out_free;
3171
3172 /*
3173 * Open-code file_start_write here to grab freeze protection,
3174 * which will be released by another thread in
3175 * io_complete_rw(). Fool lockdep by telling it the lock got
3176 * released so that it doesn't complain about the held lock when
3177 * we return to userspace.
3178 */
3179 if (req->flags & REQ_F_ISREG) {
3180 __sb_start_write(file_inode(req->file)->i_sb,
3181 SB_FREEZE_WRITE, true);
3182 __sb_writers_release(file_inode(req->file)->i_sb,
3183 SB_FREEZE_WRITE);
3184 }
3185 kiocb->ki_flags |= IOCB_WRITE;
3186
3187 if (req->file->f_op->write_iter)
3188 ret2 = call_write_iter(req->file, kiocb, &iter);
3189 else
3190 ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
3191
3192 /*
3193 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3194 * retry them without IOCB_NOWAIT.
3195 */
3196 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3197 ret2 = -EAGAIN;
3198 if (!force_nonblock || ret2 != -EAGAIN) {
3199 kiocb_done(kiocb, ret2, cs);
3200 } else {
3201 iter.count = iov_count;
3202 iter.nr_segs = nr_segs;
3203 copy_iov:
3204 ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
3205 &iter);
3206 if (ret)
3207 goto out_free;
3208 /* it's copied and will be cleaned with ->io */
3209 iovec = NULL;
3210 return -EAGAIN;
3211 }
3212 out_free:
3213 if (iovec)
3214 kfree(iovec);
3215 return ret;
3216 }
3217
3218 static int __io_splice_prep(struct io_kiocb *req,
3219 const struct io_uring_sqe *sqe)
3220 {
3221 struct io_splice* sp = &req->splice;
3222 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
3223 int ret;
3224
3225 if (req->flags & REQ_F_NEED_CLEANUP)
3226 return 0;
3227 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3228 return -EINVAL;
3229
3230 sp->file_in = NULL;
3231 sp->len = READ_ONCE(sqe->len);
3232 sp->flags = READ_ONCE(sqe->splice_flags);
3233
3234 if (unlikely(sp->flags & ~valid_flags))
3235 return -EINVAL;
3236
3237 ret = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), &sp->file_in,
3238 (sp->flags & SPLICE_F_FD_IN_FIXED));
3239 if (ret)
3240 return ret;
3241 req->flags |= REQ_F_NEED_CLEANUP;
3242
3243 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3244 /*
3245 * Splice operation will be punted aync, and here need to
3246 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3247 */
3248 io_req_init_async(req);
3249 req->work.flags |= IO_WQ_WORK_UNBOUND;
3250 }
3251
3252 return 0;
3253 }
3254
3255 static int io_tee_prep(struct io_kiocb *req,
3256 const struct io_uring_sqe *sqe)
3257 {
3258 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3259 return -EINVAL;
3260 return __io_splice_prep(req, sqe);
3261 }
3262
3263 static int io_tee(struct io_kiocb *req, bool force_nonblock)
3264 {
3265 struct io_splice *sp = &req->splice;
3266 struct file *in = sp->file_in;
3267 struct file *out = sp->file_out;
3268 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3269 long ret = 0;
3270
3271 if (force_nonblock)
3272 return -EAGAIN;
3273 if (sp->len)
3274 ret = do_tee(in, out, sp->len, flags);
3275
3276 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3277 req->flags &= ~REQ_F_NEED_CLEANUP;
3278
3279 if (ret != sp->len)
3280 req_set_fail_links(req);
3281 io_req_complete(req, ret);
3282 return 0;
3283 }
3284
3285 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3286 {
3287 struct io_splice* sp = &req->splice;
3288
3289 sp->off_in = READ_ONCE(sqe->splice_off_in);
3290 sp->off_out = READ_ONCE(sqe->off);
3291 return __io_splice_prep(req, sqe);
3292 }
3293
3294 static int io_splice(struct io_kiocb *req, bool force_nonblock)
3295 {
3296 struct io_splice *sp = &req->splice;
3297 struct file *in = sp->file_in;
3298 struct file *out = sp->file_out;
3299 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3300 loff_t *poff_in, *poff_out;
3301 long ret = 0;
3302
3303 if (force_nonblock)
3304 return -EAGAIN;
3305
3306 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3307 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
3308
3309 if (sp->len)
3310 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
3311
3312 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3313 req->flags &= ~REQ_F_NEED_CLEANUP;
3314
3315 if (ret != sp->len)
3316 req_set_fail_links(req);
3317 io_req_complete(req, ret);
3318 return 0;
3319 }
3320
3321 /*
3322 * IORING_OP_NOP just posts a completion event, nothing else.
3323 */
3324 static int io_nop(struct io_kiocb *req, struct io_comp_state *cs)
3325 {
3326 struct io_ring_ctx *ctx = req->ctx;
3327
3328 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3329 return -EINVAL;
3330
3331 __io_req_complete(req, 0, 0, cs);
3332 return 0;
3333 }
3334
3335 static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3336 {
3337 struct io_ring_ctx *ctx = req->ctx;
3338
3339 if (!req->file)
3340 return -EBADF;
3341
3342 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3343 return -EINVAL;
3344 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3345 return -EINVAL;
3346
3347 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3348 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3349 return -EINVAL;
3350
3351 req->sync.off = READ_ONCE(sqe->off);
3352 req->sync.len = READ_ONCE(sqe->len);
3353 return 0;
3354 }
3355
3356 static int io_fsync(struct io_kiocb *req, bool force_nonblock)
3357 {
3358 loff_t end = req->sync.off + req->sync.len;
3359 int ret;
3360
3361 /* fsync always requires a blocking context */
3362 if (force_nonblock)
3363 return -EAGAIN;
3364
3365 ret = vfs_fsync_range(req->file, req->sync.off,
3366 end > 0 ? end : LLONG_MAX,
3367 req->sync.flags & IORING_FSYNC_DATASYNC);
3368 if (ret < 0)
3369 req_set_fail_links(req);
3370 io_req_complete(req, ret);
3371 return 0;
3372 }
3373
3374 static int io_fallocate_prep(struct io_kiocb *req,
3375 const struct io_uring_sqe *sqe)
3376 {
3377 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3378 return -EINVAL;
3379 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3380 return -EINVAL;
3381
3382 req->sync.off = READ_ONCE(sqe->off);
3383 req->sync.len = READ_ONCE(sqe->addr);
3384 req->sync.mode = READ_ONCE(sqe->len);
3385 return 0;
3386 }
3387
3388 static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
3389 {
3390 int ret;
3391
3392 /* fallocate always requiring blocking context */
3393 if (force_nonblock)
3394 return -EAGAIN;
3395 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3396 req->sync.len);
3397 if (ret < 0)
3398 req_set_fail_links(req);
3399 io_req_complete(req, ret);
3400 return 0;
3401 }
3402
3403 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3404 {
3405 const char __user *fname;
3406 int ret;
3407
3408 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3409 return -EINVAL;
3410 if (unlikely(sqe->ioprio || sqe->buf_index))
3411 return -EINVAL;
3412 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3413 return -EBADF;
3414
3415 /* open.how should be already initialised */
3416 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
3417 req->open.how.flags |= O_LARGEFILE;
3418
3419 req->open.dfd = READ_ONCE(sqe->fd);
3420 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3421 req->open.filename = getname(fname);
3422 if (IS_ERR(req->open.filename)) {
3423 ret = PTR_ERR(req->open.filename);
3424 req->open.filename = NULL;
3425 return ret;
3426 }
3427 req->open.nofile = rlimit(RLIMIT_NOFILE);
3428 req->flags |= REQ_F_NEED_CLEANUP;
3429 return 0;
3430 }
3431
3432 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3433 {
3434 u64 flags, mode;
3435
3436 if (req->flags & REQ_F_NEED_CLEANUP)
3437 return 0;
3438 mode = READ_ONCE(sqe->len);
3439 flags = READ_ONCE(sqe->open_flags);
3440 req->open.how = build_open_how(flags, mode);
3441 return __io_openat_prep(req, sqe);
3442 }
3443
3444 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3445 {
3446 struct open_how __user *how;
3447 size_t len;
3448 int ret;
3449
3450 if (req->flags & REQ_F_NEED_CLEANUP)
3451 return 0;
3452 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3453 len = READ_ONCE(sqe->len);
3454 if (len < OPEN_HOW_SIZE_VER0)
3455 return -EINVAL;
3456
3457 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3458 len);
3459 if (ret)
3460 return ret;
3461
3462 return __io_openat_prep(req, sqe);
3463 }
3464
3465 static int io_openat2(struct io_kiocb *req, bool force_nonblock)
3466 {
3467 struct open_flags op;
3468 struct file *file;
3469 int ret;
3470
3471 if (force_nonblock)
3472 return -EAGAIN;
3473
3474 ret = build_open_flags(&req->open.how, &op);
3475 if (ret)
3476 goto err;
3477
3478 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
3479 if (ret < 0)
3480 goto err;
3481
3482 file = do_filp_open(req->open.dfd, req->open.filename, &op);
3483 if (IS_ERR(file)) {
3484 put_unused_fd(ret);
3485 ret = PTR_ERR(file);
3486 } else {
3487 fsnotify_open(file);
3488 fd_install(ret, file);
3489 }
3490 err:
3491 putname(req->open.filename);
3492 req->flags &= ~REQ_F_NEED_CLEANUP;
3493 if (ret < 0)
3494 req_set_fail_links(req);
3495 io_req_complete(req, ret);
3496 return 0;
3497 }
3498
3499 static int io_openat(struct io_kiocb *req, bool force_nonblock)
3500 {
3501 return io_openat2(req, force_nonblock);
3502 }
3503
3504 static int io_remove_buffers_prep(struct io_kiocb *req,
3505 const struct io_uring_sqe *sqe)
3506 {
3507 struct io_provide_buf *p = &req->pbuf;
3508 u64 tmp;
3509
3510 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3511 return -EINVAL;
3512
3513 tmp = READ_ONCE(sqe->fd);
3514 if (!tmp || tmp > USHRT_MAX)
3515 return -EINVAL;
3516
3517 memset(p, 0, sizeof(*p));
3518 p->nbufs = tmp;
3519 p->bgid = READ_ONCE(sqe->buf_group);
3520 return 0;
3521 }
3522
3523 static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3524 int bgid, unsigned nbufs)
3525 {
3526 unsigned i = 0;
3527
3528 /* shouldn't happen */
3529 if (!nbufs)
3530 return 0;
3531
3532 /* the head kbuf is the list itself */
3533 while (!list_empty(&buf->list)) {
3534 struct io_buffer *nxt;
3535
3536 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3537 list_del(&nxt->list);
3538 kfree(nxt);
3539 if (++i == nbufs)
3540 return i;
3541 }
3542 i++;
3543 kfree(buf);
3544 idr_remove(&ctx->io_buffer_idr, bgid);
3545
3546 return i;
3547 }
3548
3549 static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
3550 struct io_comp_state *cs)
3551 {
3552 struct io_provide_buf *p = &req->pbuf;
3553 struct io_ring_ctx *ctx = req->ctx;
3554 struct io_buffer *head;
3555 int ret = 0;
3556
3557 io_ring_submit_lock(ctx, !force_nonblock);
3558
3559 lockdep_assert_held(&ctx->uring_lock);
3560
3561 ret = -ENOENT;
3562 head = idr_find(&ctx->io_buffer_idr, p->bgid);
3563 if (head)
3564 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3565
3566 io_ring_submit_lock(ctx, !force_nonblock);
3567 if (ret < 0)
3568 req_set_fail_links(req);
3569 __io_req_complete(req, ret, 0, cs);
3570 return 0;
3571 }
3572
3573 static int io_provide_buffers_prep(struct io_kiocb *req,
3574 const struct io_uring_sqe *sqe)
3575 {
3576 struct io_provide_buf *p = &req->pbuf;
3577 u64 tmp;
3578
3579 if (sqe->ioprio || sqe->rw_flags)
3580 return -EINVAL;
3581
3582 tmp = READ_ONCE(sqe->fd);
3583 if (!tmp || tmp > USHRT_MAX)
3584 return -E2BIG;
3585 p->nbufs = tmp;
3586 p->addr = READ_ONCE(sqe->addr);
3587 p->len = READ_ONCE(sqe->len);
3588
3589 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
3590 return -EFAULT;
3591
3592 p->bgid = READ_ONCE(sqe->buf_group);
3593 tmp = READ_ONCE(sqe->off);
3594 if (tmp > USHRT_MAX)
3595 return -E2BIG;
3596 p->bid = tmp;
3597 return 0;
3598 }
3599
3600 static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3601 {
3602 struct io_buffer *buf;
3603 u64 addr = pbuf->addr;
3604 int i, bid = pbuf->bid;
3605
3606 for (i = 0; i < pbuf->nbufs; i++) {
3607 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3608 if (!buf)
3609 break;
3610
3611 buf->addr = addr;
3612 buf->len = pbuf->len;
3613 buf->bid = bid;
3614 addr += pbuf->len;
3615 bid++;
3616 if (!*head) {
3617 INIT_LIST_HEAD(&buf->list);
3618 *head = buf;
3619 } else {
3620 list_add_tail(&buf->list, &(*head)->list);
3621 }
3622 }
3623
3624 return i ? i : -ENOMEM;
3625 }
3626
3627 static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
3628 struct io_comp_state *cs)
3629 {
3630 struct io_provide_buf *p = &req->pbuf;
3631 struct io_ring_ctx *ctx = req->ctx;
3632 struct io_buffer *head, *list;
3633 int ret = 0;
3634
3635 io_ring_submit_lock(ctx, !force_nonblock);
3636
3637 lockdep_assert_held(&ctx->uring_lock);
3638
3639 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
3640
3641 ret = io_add_buffers(p, &head);
3642 if (ret < 0)
3643 goto out;
3644
3645 if (!list) {
3646 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
3647 GFP_KERNEL);
3648 if (ret < 0) {
3649 __io_remove_buffers(ctx, head, p->bgid, -1U);
3650 goto out;
3651 }
3652 }
3653 out:
3654 io_ring_submit_unlock(ctx, !force_nonblock);
3655 if (ret < 0)
3656 req_set_fail_links(req);
3657 __io_req_complete(req, ret, 0, cs);
3658 return 0;
3659 }
3660
3661 static int io_epoll_ctl_prep(struct io_kiocb *req,
3662 const struct io_uring_sqe *sqe)
3663 {
3664 #if defined(CONFIG_EPOLL)
3665 if (sqe->ioprio || sqe->buf_index)
3666 return -EINVAL;
3667 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3668 return -EINVAL;
3669
3670 req->epoll.epfd = READ_ONCE(sqe->fd);
3671 req->epoll.op = READ_ONCE(sqe->len);
3672 req->epoll.fd = READ_ONCE(sqe->off);
3673
3674 if (ep_op_has_event(req->epoll.op)) {
3675 struct epoll_event __user *ev;
3676
3677 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
3678 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
3679 return -EFAULT;
3680 }
3681
3682 return 0;
3683 #else
3684 return -EOPNOTSUPP;
3685 #endif
3686 }
3687
3688 static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
3689 struct io_comp_state *cs)
3690 {
3691 #if defined(CONFIG_EPOLL)
3692 struct io_epoll *ie = &req->epoll;
3693 int ret;
3694
3695 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
3696 if (force_nonblock && ret == -EAGAIN)
3697 return -EAGAIN;
3698
3699 if (ret < 0)
3700 req_set_fail_links(req);
3701 __io_req_complete(req, ret, 0, cs);
3702 return 0;
3703 #else
3704 return -EOPNOTSUPP;
3705 #endif
3706 }
3707
3708 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3709 {
3710 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3711 if (sqe->ioprio || sqe->buf_index || sqe->off)
3712 return -EINVAL;
3713 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3714 return -EINVAL;
3715
3716 req->madvise.addr = READ_ONCE(sqe->addr);
3717 req->madvise.len = READ_ONCE(sqe->len);
3718 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
3719 return 0;
3720 #else
3721 return -EOPNOTSUPP;
3722 #endif
3723 }
3724
3725 static int io_madvise(struct io_kiocb *req, bool force_nonblock)
3726 {
3727 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3728 struct io_madvise *ma = &req->madvise;
3729 int ret;
3730
3731 if (force_nonblock)
3732 return -EAGAIN;
3733
3734 ret = do_madvise(ma->addr, ma->len, ma->advice);
3735 if (ret < 0)
3736 req_set_fail_links(req);
3737 io_req_complete(req, ret);
3738 return 0;
3739 #else
3740 return -EOPNOTSUPP;
3741 #endif
3742 }
3743
3744 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3745 {
3746 if (sqe->ioprio || sqe->buf_index || sqe->addr)
3747 return -EINVAL;
3748 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3749 return -EINVAL;
3750
3751 req->fadvise.offset = READ_ONCE(sqe->off);
3752 req->fadvise.len = READ_ONCE(sqe->len);
3753 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
3754 return 0;
3755 }
3756
3757 static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
3758 {
3759 struct io_fadvise *fa = &req->fadvise;
3760 int ret;
3761
3762 if (force_nonblock) {
3763 switch (fa->advice) {
3764 case POSIX_FADV_NORMAL:
3765 case POSIX_FADV_RANDOM:
3766 case POSIX_FADV_SEQUENTIAL:
3767 break;
3768 default:
3769 return -EAGAIN;
3770 }
3771 }
3772
3773 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
3774 if (ret < 0)
3775 req_set_fail_links(req);
3776 io_req_complete(req, ret);
3777 return 0;
3778 }
3779
3780 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3781 {
3782 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3783 return -EINVAL;
3784 if (sqe->ioprio || sqe->buf_index)
3785 return -EINVAL;
3786 if (req->flags & REQ_F_FIXED_FILE)
3787 return -EBADF;
3788
3789 req->statx.dfd = READ_ONCE(sqe->fd);
3790 req->statx.mask = READ_ONCE(sqe->len);
3791 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
3792 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3793 req->statx.flags = READ_ONCE(sqe->statx_flags);
3794
3795 return 0;
3796 }
3797
3798 static int io_statx(struct io_kiocb *req, bool force_nonblock)
3799 {
3800 struct io_statx *ctx = &req->statx;
3801 int ret;
3802
3803 if (force_nonblock) {
3804 /* only need file table for an actual valid fd */
3805 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
3806 req->flags |= REQ_F_NO_FILE_TABLE;
3807 return -EAGAIN;
3808 }
3809
3810 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
3811 ctx->buffer);
3812
3813 if (ret < 0)
3814 req_set_fail_links(req);
3815 io_req_complete(req, ret);
3816 return 0;
3817 }
3818
3819 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3820 {
3821 /*
3822 * If we queue this for async, it must not be cancellable. That would
3823 * leave the 'file' in an undeterminate state, and here need to modify
3824 * io_wq_work.flags, so initialize io_wq_work firstly.
3825 */
3826 io_req_init_async(req);
3827 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
3828
3829 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3830 return -EINVAL;
3831 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
3832 sqe->rw_flags || sqe->buf_index)
3833 return -EINVAL;
3834 if (req->flags & REQ_F_FIXED_FILE)
3835 return -EBADF;
3836
3837 req->close.fd = READ_ONCE(sqe->fd);
3838 if ((req->file && req->file->f_op == &io_uring_fops) ||
3839 req->close.fd == req->ctx->ring_fd)
3840 return -EBADF;
3841
3842 req->close.put_file = NULL;
3843 return 0;
3844 }
3845
3846 static int io_close(struct io_kiocb *req, bool force_nonblock,
3847 struct io_comp_state *cs)
3848 {
3849 struct io_close *close = &req->close;
3850 int ret;
3851
3852 /* might be already done during nonblock submission */
3853 if (!close->put_file) {
3854 ret = __close_fd_get_file(close->fd, &close->put_file);
3855 if (ret < 0)
3856 return (ret == -ENOENT) ? -EBADF : ret;
3857 }
3858
3859 /* if the file has a flush method, be safe and punt to async */
3860 if (close->put_file->f_op->flush && force_nonblock) {
3861 /* was never set, but play safe */
3862 req->flags &= ~REQ_F_NOWAIT;
3863 /* avoid grabbing files - we don't need the files */
3864 req->flags |= REQ_F_NO_FILE_TABLE;
3865 return -EAGAIN;
3866 }
3867
3868 /* No ->flush() or already async, safely close from here */
3869 ret = filp_close(close->put_file, req->work.files);
3870 if (ret < 0)
3871 req_set_fail_links(req);
3872 fput(close->put_file);
3873 close->put_file = NULL;
3874 __io_req_complete(req, ret, 0, cs);
3875 return 0;
3876 }
3877
3878 static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3879 {
3880 struct io_ring_ctx *ctx = req->ctx;
3881
3882 if (!req->file)
3883 return -EBADF;
3884
3885 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3886 return -EINVAL;
3887 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3888 return -EINVAL;
3889
3890 req->sync.off = READ_ONCE(sqe->off);
3891 req->sync.len = READ_ONCE(sqe->len);
3892 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
3893 return 0;
3894 }
3895
3896 static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
3897 {
3898 int ret;
3899
3900 /* sync_file_range always requires a blocking context */
3901 if (force_nonblock)
3902 return -EAGAIN;
3903
3904 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
3905 req->sync.flags);
3906 if (ret < 0)
3907 req_set_fail_links(req);
3908 io_req_complete(req, ret);
3909 return 0;
3910 }
3911
3912 #if defined(CONFIG_NET)
3913 static int io_setup_async_msg(struct io_kiocb *req,
3914 struct io_async_msghdr *kmsg)
3915 {
3916 if (req->io)
3917 return -EAGAIN;
3918 if (io_alloc_async_ctx(req)) {
3919 if (kmsg->iov != kmsg->fast_iov)
3920 kfree(kmsg->iov);
3921 return -ENOMEM;
3922 }
3923 req->flags |= REQ_F_NEED_CLEANUP;
3924 memcpy(&req->io->msg, kmsg, sizeof(*kmsg));
3925 return -EAGAIN;
3926 }
3927
3928 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
3929 struct io_async_msghdr *iomsg)
3930 {
3931 iomsg->iov = iomsg->fast_iov;
3932 iomsg->msg.msg_name = &iomsg->addr;
3933 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
3934 req->sr_msg.msg_flags, &iomsg->iov);
3935 }
3936
3937 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3938 {
3939 struct io_sr_msg *sr = &req->sr_msg;
3940 struct io_async_ctx *io = req->io;
3941 int ret;
3942
3943 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3944 return -EINVAL;
3945
3946 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3947 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
3948 sr->len = READ_ONCE(sqe->len);
3949
3950 #ifdef CONFIG_COMPAT
3951 if (req->ctx->compat)
3952 sr->msg_flags |= MSG_CMSG_COMPAT;
3953 #endif
3954
3955 if (!io || req->opcode == IORING_OP_SEND)
3956 return 0;
3957 /* iovec is already imported */
3958 if (req->flags & REQ_F_NEED_CLEANUP)
3959 return 0;
3960
3961 ret = io_sendmsg_copy_hdr(req, &io->msg);
3962 if (!ret)
3963 req->flags |= REQ_F_NEED_CLEANUP;
3964 return ret;
3965 }
3966
3967 static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
3968 struct io_comp_state *cs)
3969 {
3970 struct io_async_msghdr iomsg, *kmsg;
3971 struct socket *sock;
3972 unsigned flags;
3973 int ret;
3974
3975 sock = sock_from_file(req->file, &ret);
3976 if (unlikely(!sock))
3977 return ret;
3978
3979 if (req->io) {
3980 kmsg = &req->io->msg;
3981 kmsg->msg.msg_name = &req->io->msg.addr;
3982 /* if iov is set, it's allocated already */
3983 if (!kmsg->iov)
3984 kmsg->iov = kmsg->fast_iov;
3985 kmsg->msg.msg_iter.iov = kmsg->iov;
3986 } else {
3987 ret = io_sendmsg_copy_hdr(req, &iomsg);
3988 if (ret)
3989 return ret;
3990 kmsg = &iomsg;
3991 }
3992
3993 flags = req->sr_msg.msg_flags;
3994 if (flags & MSG_DONTWAIT)
3995 req->flags |= REQ_F_NOWAIT;
3996 else if (force_nonblock)
3997 flags |= MSG_DONTWAIT;
3998
3999 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
4000 if (force_nonblock && ret == -EAGAIN)
4001 return io_setup_async_msg(req, kmsg);
4002 if (ret == -ERESTARTSYS)
4003 ret = -EINTR;
4004
4005 if (kmsg->iov != kmsg->fast_iov)
4006 kfree(kmsg->iov);
4007 req->flags &= ~REQ_F_NEED_CLEANUP;
4008 if (ret < 0)
4009 req_set_fail_links(req);
4010 __io_req_complete(req, ret, 0, cs);
4011 return 0;
4012 }
4013
4014 static int io_send(struct io_kiocb *req, bool force_nonblock,
4015 struct io_comp_state *cs)
4016 {
4017 struct io_sr_msg *sr = &req->sr_msg;
4018 struct msghdr msg;
4019 struct iovec iov;
4020 struct socket *sock;
4021 unsigned flags;
4022 int ret;
4023
4024 sock = sock_from_file(req->file, &ret);
4025 if (unlikely(!sock))
4026 return ret;
4027
4028 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4029 if (unlikely(ret))
4030 return ret;;
4031
4032 msg.msg_name = NULL;
4033 msg.msg_control = NULL;
4034 msg.msg_controllen = 0;
4035 msg.msg_namelen = 0;
4036
4037 flags = req->sr_msg.msg_flags;
4038 if (flags & MSG_DONTWAIT)
4039 req->flags |= REQ_F_NOWAIT;
4040 else if (force_nonblock)
4041 flags |= MSG_DONTWAIT;
4042
4043 msg.msg_flags = flags;
4044 ret = sock_sendmsg(sock, &msg);
4045 if (force_nonblock && ret == -EAGAIN)
4046 return -EAGAIN;
4047 if (ret == -ERESTARTSYS)
4048 ret = -EINTR;
4049
4050 if (ret < 0)
4051 req_set_fail_links(req);
4052 __io_req_complete(req, ret, 0, cs);
4053 return 0;
4054 }
4055
4056 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4057 struct io_async_msghdr *iomsg)
4058 {
4059 struct io_sr_msg *sr = &req->sr_msg;
4060 struct iovec __user *uiov;
4061 size_t iov_len;
4062 int ret;
4063
4064 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4065 &iomsg->uaddr, &uiov, &iov_len);
4066 if (ret)
4067 return ret;
4068
4069 if (req->flags & REQ_F_BUFFER_SELECT) {
4070 if (iov_len > 1)
4071 return -EINVAL;
4072 if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov)))
4073 return -EFAULT;
4074 sr->len = iomsg->iov[0].iov_len;
4075 iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1,
4076 sr->len);
4077 iomsg->iov = NULL;
4078 } else {
4079 ret = import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
4080 &iomsg->iov, &iomsg->msg.msg_iter);
4081 if (ret > 0)
4082 ret = 0;
4083 }
4084
4085 return ret;
4086 }
4087
4088 #ifdef CONFIG_COMPAT
4089 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
4090 struct io_async_msghdr *iomsg)
4091 {
4092 struct compat_msghdr __user *msg_compat;
4093 struct io_sr_msg *sr = &req->sr_msg;
4094 struct compat_iovec __user *uiov;
4095 compat_uptr_t ptr;
4096 compat_size_t len;
4097 int ret;
4098
4099 msg_compat = (struct compat_msghdr __user *) sr->umsg;
4100 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
4101 &ptr, &len);
4102 if (ret)
4103 return ret;
4104
4105 uiov = compat_ptr(ptr);
4106 if (req->flags & REQ_F_BUFFER_SELECT) {
4107 compat_ssize_t clen;
4108
4109 if (len > 1)
4110 return -EINVAL;
4111 if (!access_ok(uiov, sizeof(*uiov)))
4112 return -EFAULT;
4113 if (__get_user(clen, &uiov->iov_len))
4114 return -EFAULT;
4115 if (clen < 0)
4116 return -EINVAL;
4117 sr->len = iomsg->iov[0].iov_len;
4118 iomsg->iov = NULL;
4119 } else {
4120 ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV,
4121 &iomsg->iov,
4122 &iomsg->msg.msg_iter);
4123 if (ret < 0)
4124 return ret;
4125 }
4126
4127 return 0;
4128 }
4129 #endif
4130
4131 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4132 struct io_async_msghdr *iomsg)
4133 {
4134 iomsg->msg.msg_name = &iomsg->addr;
4135 iomsg->iov = iomsg->fast_iov;
4136
4137 #ifdef CONFIG_COMPAT
4138 if (req->ctx->compat)
4139 return __io_compat_recvmsg_copy_hdr(req, iomsg);
4140 #endif
4141
4142 return __io_recvmsg_copy_hdr(req, iomsg);
4143 }
4144
4145 static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
4146 bool needs_lock)
4147 {
4148 struct io_sr_msg *sr = &req->sr_msg;
4149 struct io_buffer *kbuf;
4150
4151 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4152 if (IS_ERR(kbuf))
4153 return kbuf;
4154
4155 sr->kbuf = kbuf;
4156 req->flags |= REQ_F_BUFFER_SELECTED;
4157 return kbuf;
4158 }
4159
4160 static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4161 {
4162 return io_put_kbuf(req, req->sr_msg.kbuf);
4163 }
4164
4165 static int io_recvmsg_prep(struct io_kiocb *req,
4166 const struct io_uring_sqe *sqe)
4167 {
4168 struct io_sr_msg *sr = &req->sr_msg;
4169 struct io_async_ctx *io = req->io;
4170 int ret;
4171
4172 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4173 return -EINVAL;
4174
4175 sr->msg_flags = READ_ONCE(sqe->msg_flags);
4176 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4177 sr->len = READ_ONCE(sqe->len);
4178 sr->bgid = READ_ONCE(sqe->buf_group);
4179
4180 #ifdef CONFIG_COMPAT
4181 if (req->ctx->compat)
4182 sr->msg_flags |= MSG_CMSG_COMPAT;
4183 #endif
4184
4185 if (!io || req->opcode == IORING_OP_RECV)
4186 return 0;
4187 /* iovec is already imported */
4188 if (req->flags & REQ_F_NEED_CLEANUP)
4189 return 0;
4190
4191 ret = io_recvmsg_copy_hdr(req, &io->msg);
4192 if (!ret)
4193 req->flags |= REQ_F_NEED_CLEANUP;
4194 return ret;
4195 }
4196
4197 static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
4198 struct io_comp_state *cs)
4199 {
4200 struct io_async_msghdr iomsg, *kmsg;
4201 struct socket *sock;
4202 struct io_buffer *kbuf;
4203 unsigned flags;
4204 int ret, cflags = 0;
4205
4206 sock = sock_from_file(req->file, &ret);
4207 if (unlikely(!sock))
4208 return ret;
4209
4210 if (req->io) {
4211 kmsg = &req->io->msg;
4212 kmsg->msg.msg_name = &req->io->msg.addr;
4213 /* if iov is set, it's allocated already */
4214 if (!kmsg->iov)
4215 kmsg->iov = kmsg->fast_iov;
4216 kmsg->msg.msg_iter.iov = kmsg->iov;
4217 } else {
4218 ret = io_recvmsg_copy_hdr(req, &iomsg);
4219 if (ret)
4220 return ret;
4221 kmsg = &iomsg;
4222 }
4223
4224 if (req->flags & REQ_F_BUFFER_SELECT) {
4225 kbuf = io_recv_buffer_select(req, !force_nonblock);
4226 if (IS_ERR(kbuf))
4227 return PTR_ERR(kbuf);
4228 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
4229 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
4230 1, req->sr_msg.len);
4231 }
4232
4233 flags = req->sr_msg.msg_flags;
4234 if (flags & MSG_DONTWAIT)
4235 req->flags |= REQ_F_NOWAIT;
4236 else if (force_nonblock)
4237 flags |= MSG_DONTWAIT;
4238
4239 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4240 kmsg->uaddr, flags);
4241 if (force_nonblock && ret == -EAGAIN)
4242 return io_setup_async_msg(req, kmsg);
4243 if (ret == -ERESTARTSYS)
4244 ret = -EINTR;
4245
4246 if (req->flags & REQ_F_BUFFER_SELECTED)
4247 cflags = io_put_recv_kbuf(req);
4248 if (kmsg->iov != kmsg->fast_iov)
4249 kfree(kmsg->iov);
4250 req->flags &= ~REQ_F_NEED_CLEANUP;
4251 if (ret < 0)
4252 req_set_fail_links(req);
4253 __io_req_complete(req, ret, cflags, cs);
4254 return 0;
4255 }
4256
4257 static int io_recv(struct io_kiocb *req, bool force_nonblock,
4258 struct io_comp_state *cs)
4259 {
4260 struct io_buffer *kbuf;
4261 struct io_sr_msg *sr = &req->sr_msg;
4262 struct msghdr msg;
4263 void __user *buf = sr->buf;
4264 struct socket *sock;
4265 struct iovec iov;
4266 unsigned flags;
4267 int ret, cflags = 0;
4268
4269 sock = sock_from_file(req->file, &ret);
4270 if (unlikely(!sock))
4271 return ret;
4272
4273 if (req->flags & REQ_F_BUFFER_SELECT) {
4274 kbuf = io_recv_buffer_select(req, !force_nonblock);
4275 if (IS_ERR(kbuf))
4276 return PTR_ERR(kbuf);
4277 buf = u64_to_user_ptr(kbuf->addr);
4278 }
4279
4280 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
4281 if (unlikely(ret))
4282 goto out_free;
4283
4284 msg.msg_name = NULL;
4285 msg.msg_control = NULL;
4286 msg.msg_controllen = 0;
4287 msg.msg_namelen = 0;
4288 msg.msg_iocb = NULL;
4289 msg.msg_flags = 0;
4290
4291 flags = req->sr_msg.msg_flags;
4292 if (flags & MSG_DONTWAIT)
4293 req->flags |= REQ_F_NOWAIT;
4294 else if (force_nonblock)
4295 flags |= MSG_DONTWAIT;
4296
4297 ret = sock_recvmsg(sock, &msg, flags);
4298 if (force_nonblock && ret == -EAGAIN)
4299 return -EAGAIN;
4300 if (ret == -ERESTARTSYS)
4301 ret = -EINTR;
4302 out_free:
4303 if (req->flags & REQ_F_BUFFER_SELECTED)
4304 cflags = io_put_recv_kbuf(req);
4305 if (ret < 0)
4306 req_set_fail_links(req);
4307 __io_req_complete(req, ret, cflags, cs);
4308 return 0;
4309 }
4310
4311 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4312 {
4313 struct io_accept *accept = &req->accept;
4314
4315 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4316 return -EINVAL;
4317 if (sqe->ioprio || sqe->len || sqe->buf_index)
4318 return -EINVAL;
4319
4320 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4321 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4322 accept->flags = READ_ONCE(sqe->accept_flags);
4323 accept->nofile = rlimit(RLIMIT_NOFILE);
4324 return 0;
4325 }
4326
4327 static int io_accept(struct io_kiocb *req, bool force_nonblock,
4328 struct io_comp_state *cs)
4329 {
4330 struct io_accept *accept = &req->accept;
4331 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
4332 int ret;
4333
4334 if (req->file->f_flags & O_NONBLOCK)
4335 req->flags |= REQ_F_NOWAIT;
4336
4337 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
4338 accept->addr_len, accept->flags,
4339 accept->nofile);
4340 if (ret == -EAGAIN && force_nonblock)
4341 return -EAGAIN;
4342 if (ret < 0) {
4343 if (ret == -ERESTARTSYS)
4344 ret = -EINTR;
4345 req_set_fail_links(req);
4346 }
4347 __io_req_complete(req, ret, 0, cs);
4348 return 0;
4349 }
4350
4351 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4352 {
4353 struct io_connect *conn = &req->connect;
4354 struct io_async_ctx *io = req->io;
4355
4356 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4357 return -EINVAL;
4358 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4359 return -EINVAL;
4360
4361 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4362 conn->addr_len = READ_ONCE(sqe->addr2);
4363
4364 if (!io)
4365 return 0;
4366
4367 return move_addr_to_kernel(conn->addr, conn->addr_len,
4368 &io->connect.address);
4369 }
4370
4371 static int io_connect(struct io_kiocb *req, bool force_nonblock,
4372 struct io_comp_state *cs)
4373 {
4374 struct io_async_ctx __io, *io;
4375 unsigned file_flags;
4376 int ret;
4377
4378 if (req->io) {
4379 io = req->io;
4380 } else {
4381 ret = move_addr_to_kernel(req->connect.addr,
4382 req->connect.addr_len,
4383 &__io.connect.address);
4384 if (ret)
4385 goto out;
4386 io = &__io;
4387 }
4388
4389 file_flags = force_nonblock ? O_NONBLOCK : 0;
4390
4391 ret = __sys_connect_file(req->file, &io->connect.address,
4392 req->connect.addr_len, file_flags);
4393 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
4394 if (req->io)
4395 return -EAGAIN;
4396 if (io_alloc_async_ctx(req)) {
4397 ret = -ENOMEM;
4398 goto out;
4399 }
4400 memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
4401 return -EAGAIN;
4402 }
4403 if (ret == -ERESTARTSYS)
4404 ret = -EINTR;
4405 out:
4406 if (ret < 0)
4407 req_set_fail_links(req);
4408 __io_req_complete(req, ret, 0, cs);
4409 return 0;
4410 }
4411 #else /* !CONFIG_NET */
4412 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4413 {
4414 return -EOPNOTSUPP;
4415 }
4416
4417 static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
4418 struct io_comp_state *cs)
4419 {
4420 return -EOPNOTSUPP;
4421 }
4422
4423 static int io_send(struct io_kiocb *req, bool force_nonblock,
4424 struct io_comp_state *cs)
4425 {
4426 return -EOPNOTSUPP;
4427 }
4428
4429 static int io_recvmsg_prep(struct io_kiocb *req,
4430 const struct io_uring_sqe *sqe)
4431 {
4432 return -EOPNOTSUPP;
4433 }
4434
4435 static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
4436 struct io_comp_state *cs)
4437 {
4438 return -EOPNOTSUPP;
4439 }
4440
4441 static int io_recv(struct io_kiocb *req, bool force_nonblock,
4442 struct io_comp_state *cs)
4443 {
4444 return -EOPNOTSUPP;
4445 }
4446
4447 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4448 {
4449 return -EOPNOTSUPP;
4450 }
4451
4452 static int io_accept(struct io_kiocb *req, bool force_nonblock,
4453 struct io_comp_state *cs)
4454 {
4455 return -EOPNOTSUPP;
4456 }
4457
4458 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4459 {
4460 return -EOPNOTSUPP;
4461 }
4462
4463 static int io_connect(struct io_kiocb *req, bool force_nonblock,
4464 struct io_comp_state *cs)
4465 {
4466 return -EOPNOTSUPP;
4467 }
4468 #endif /* CONFIG_NET */
4469
4470 struct io_poll_table {
4471 struct poll_table_struct pt;
4472 struct io_kiocb *req;
4473 int error;
4474 };
4475
4476 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4477 __poll_t mask, task_work_func_t func)
4478 {
4479 int ret;
4480
4481 /* for instances that support it check for an event match first: */
4482 if (mask && !(mask & poll->events))
4483 return 0;
4484
4485 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4486
4487 list_del_init(&poll->wait.entry);
4488
4489 req->result = mask;
4490 init_task_work(&req->task_work, func);
4491 /*
4492 * If this fails, then the task is exiting. When a task exits, the
4493 * work gets canceled, so just cancel this request as well instead
4494 * of executing it. We can't safely execute it anyway, as we may not
4495 * have the needed state needed for it anyway.
4496 */
4497 ret = io_req_task_work_add(req, &req->task_work);
4498 if (unlikely(ret)) {
4499 struct task_struct *tsk;
4500
4501 WRITE_ONCE(poll->canceled, true);
4502 tsk = io_wq_get_task(req->ctx->io_wq);
4503 task_work_add(tsk, &req->task_work, 0);
4504 wake_up_process(tsk);
4505 }
4506 return 1;
4507 }
4508
4509 static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4510 __acquires(&req->ctx->completion_lock)
4511 {
4512 struct io_ring_ctx *ctx = req->ctx;
4513
4514 if (!req->result && !READ_ONCE(poll->canceled)) {
4515 struct poll_table_struct pt = { ._key = poll->events };
4516
4517 req->result = vfs_poll(req->file, &pt) & poll->events;
4518 }
4519
4520 spin_lock_irq(&ctx->completion_lock);
4521 if (!req->result && !READ_ONCE(poll->canceled)) {
4522 add_wait_queue(poll->head, &poll->wait);
4523 return true;
4524 }
4525
4526 return false;
4527 }
4528
4529 static void io_poll_remove_double(struct io_kiocb *req, void *data)
4530 {
4531 struct io_poll_iocb *poll = data;
4532
4533 lockdep_assert_held(&req->ctx->completion_lock);
4534
4535 if (poll && poll->head) {
4536 struct wait_queue_head *head = poll->head;
4537
4538 spin_lock(&head->lock);
4539 list_del_init(&poll->wait.entry);
4540 if (poll->wait.private)
4541 refcount_dec(&req->refs);
4542 poll->head = NULL;
4543 spin_unlock(&head->lock);
4544 }
4545 }
4546
4547 static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4548 {
4549 struct io_ring_ctx *ctx = req->ctx;
4550
4551 io_poll_remove_double(req, req->io);
4552 req->poll.done = true;
4553 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4554 io_commit_cqring(ctx);
4555 }
4556
4557 static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
4558 {
4559 struct io_ring_ctx *ctx = req->ctx;
4560
4561 if (io_poll_rewait(req, &req->poll)) {
4562 spin_unlock_irq(&ctx->completion_lock);
4563 return;
4564 }
4565
4566 hash_del(&req->hash_node);
4567 io_poll_complete(req, req->result, 0);
4568 req->flags |= REQ_F_COMP_LOCKED;
4569 *nxt = io_put_req_find_next(req);
4570 spin_unlock_irq(&ctx->completion_lock);
4571
4572 io_cqring_ev_posted(ctx);
4573 }
4574
4575 static void io_poll_task_func(struct callback_head *cb)
4576 {
4577 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4578 struct io_kiocb *nxt = NULL;
4579
4580 io_poll_task_handler(req, &nxt);
4581 if (nxt)
4582 __io_req_task_submit(nxt);
4583 }
4584
4585 static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4586 int sync, void *key)
4587 {
4588 struct io_kiocb *req = wait->private;
4589 struct io_poll_iocb *poll = req->apoll->double_poll;
4590 __poll_t mask = key_to_poll(key);
4591
4592 /* for instances that support it check for an event match first: */
4593 if (mask && !(mask & poll->events))
4594 return 0;
4595
4596 if (poll && poll->head) {
4597 bool done;
4598
4599 spin_lock(&poll->head->lock);
4600 done = list_empty(&poll->wait.entry);
4601 if (!done)
4602 list_del_init(&poll->wait.entry);
4603 spin_unlock(&poll->head->lock);
4604 if (!done)
4605 __io_async_wake(req, poll, mask, io_poll_task_func);
4606 }
4607 refcount_dec(&req->refs);
4608 return 1;
4609 }
4610
4611 static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4612 wait_queue_func_t wake_func)
4613 {
4614 poll->head = NULL;
4615 poll->done = false;
4616 poll->canceled = false;
4617 poll->events = events;
4618 INIT_LIST_HEAD(&poll->wait.entry);
4619 init_waitqueue_func_entry(&poll->wait, wake_func);
4620 }
4621
4622 static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
4623 struct wait_queue_head *head,
4624 struct io_poll_iocb **poll_ptr)
4625 {
4626 struct io_kiocb *req = pt->req;
4627
4628 /*
4629 * If poll->head is already set, it's because the file being polled
4630 * uses multiple waitqueues for poll handling (eg one for read, one
4631 * for write). Setup a separate io_poll_iocb if this happens.
4632 */
4633 if (unlikely(poll->head)) {
4634 /* already have a 2nd entry, fail a third attempt */
4635 if (*poll_ptr) {
4636 pt->error = -EINVAL;
4637 return;
4638 }
4639 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
4640 if (!poll) {
4641 pt->error = -ENOMEM;
4642 return;
4643 }
4644 io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
4645 refcount_inc(&req->refs);
4646 poll->wait.private = req;
4647 *poll_ptr = poll;
4648 }
4649
4650 pt->error = 0;
4651 poll->head = head;
4652
4653 if (poll->events & EPOLLEXCLUSIVE)
4654 add_wait_queue_exclusive(head, &poll->wait);
4655 else
4656 add_wait_queue(head, &poll->wait);
4657 }
4658
4659 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
4660 struct poll_table_struct *p)
4661 {
4662 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4663 struct async_poll *apoll = pt->req->apoll;
4664
4665 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
4666 }
4667
4668 static void io_async_task_func(struct callback_head *cb)
4669 {
4670 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4671 struct async_poll *apoll = req->apoll;
4672 struct io_ring_ctx *ctx = req->ctx;
4673
4674 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
4675
4676 if (io_poll_rewait(req, &apoll->poll)) {
4677 spin_unlock_irq(&ctx->completion_lock);
4678 return;
4679 }
4680
4681 /* If req is still hashed, it cannot have been canceled. Don't check. */
4682 if (hash_hashed(&req->hash_node))
4683 hash_del(&req->hash_node);
4684
4685 io_poll_remove_double(req, apoll->double_poll);
4686 spin_unlock_irq(&ctx->completion_lock);
4687
4688 if (!READ_ONCE(apoll->poll.canceled))
4689 __io_req_task_submit(req);
4690 else
4691 __io_req_task_cancel(req, -ECANCELED);
4692
4693 kfree(apoll->double_poll);
4694 kfree(apoll);
4695 }
4696
4697 static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4698 void *key)
4699 {
4700 struct io_kiocb *req = wait->private;
4701 struct io_poll_iocb *poll = &req->apoll->poll;
4702
4703 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
4704 key_to_poll(key));
4705
4706 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
4707 }
4708
4709 static void io_poll_req_insert(struct io_kiocb *req)
4710 {
4711 struct io_ring_ctx *ctx = req->ctx;
4712 struct hlist_head *list;
4713
4714 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
4715 hlist_add_head(&req->hash_node, list);
4716 }
4717
4718 static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
4719 struct io_poll_iocb *poll,
4720 struct io_poll_table *ipt, __poll_t mask,
4721 wait_queue_func_t wake_func)
4722 __acquires(&ctx->completion_lock)
4723 {
4724 struct io_ring_ctx *ctx = req->ctx;
4725 bool cancel = false;
4726
4727 io_init_poll_iocb(poll, mask, wake_func);
4728 poll->file = req->file;
4729 poll->wait.private = req;
4730
4731 ipt->pt._key = mask;
4732 ipt->req = req;
4733 ipt->error = -EINVAL;
4734
4735 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
4736
4737 spin_lock_irq(&ctx->completion_lock);
4738 if (likely(poll->head)) {
4739 spin_lock(&poll->head->lock);
4740 if (unlikely(list_empty(&poll->wait.entry))) {
4741 if (ipt->error)
4742 cancel = true;
4743 ipt->error = 0;
4744 mask = 0;
4745 }
4746 if (mask || ipt->error)
4747 list_del_init(&poll->wait.entry);
4748 else if (cancel)
4749 WRITE_ONCE(poll->canceled, true);
4750 else if (!poll->done) /* actually waiting for an event */
4751 io_poll_req_insert(req);
4752 spin_unlock(&poll->head->lock);
4753 }
4754
4755 return mask;
4756 }
4757
4758 static bool io_arm_poll_handler(struct io_kiocb *req)
4759 {
4760 const struct io_op_def *def = &io_op_defs[req->opcode];
4761 struct io_ring_ctx *ctx = req->ctx;
4762 struct async_poll *apoll;
4763 struct io_poll_table ipt;
4764 __poll_t mask, ret;
4765
4766 if (!req->file || !file_can_poll(req->file))
4767 return false;
4768 if (req->flags & REQ_F_POLLED)
4769 return false;
4770 if (!def->pollin && !def->pollout)
4771 return false;
4772
4773 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
4774 if (unlikely(!apoll))
4775 return false;
4776 apoll->double_poll = NULL;
4777
4778 req->flags |= REQ_F_POLLED;
4779 io_get_req_task(req);
4780 req->apoll = apoll;
4781 INIT_HLIST_NODE(&req->hash_node);
4782
4783 mask = 0;
4784 if (def->pollin)
4785 mask |= POLLIN | POLLRDNORM;
4786 if (def->pollout)
4787 mask |= POLLOUT | POLLWRNORM;
4788 mask |= POLLERR | POLLPRI;
4789
4790 ipt.pt._qproc = io_async_queue_proc;
4791
4792 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
4793 io_async_wake);
4794 if (ret) {
4795 io_poll_remove_double(req, apoll->double_poll);
4796 spin_unlock_irq(&ctx->completion_lock);
4797 kfree(apoll->double_poll);
4798 kfree(apoll);
4799 return false;
4800 }
4801 spin_unlock_irq(&ctx->completion_lock);
4802 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
4803 apoll->poll.events);
4804 return true;
4805 }
4806
4807 static bool __io_poll_remove_one(struct io_kiocb *req,
4808 struct io_poll_iocb *poll)
4809 {
4810 bool do_complete = false;
4811
4812 spin_lock(&poll->head->lock);
4813 WRITE_ONCE(poll->canceled, true);
4814 if (!list_empty(&poll->wait.entry)) {
4815 list_del_init(&poll->wait.entry);
4816 do_complete = true;
4817 }
4818 spin_unlock(&poll->head->lock);
4819 hash_del(&req->hash_node);
4820 return do_complete;
4821 }
4822
4823 static bool io_poll_remove_one(struct io_kiocb *req)
4824 {
4825 bool do_complete;
4826
4827 if (req->opcode == IORING_OP_POLL_ADD) {
4828 io_poll_remove_double(req, req->io);
4829 do_complete = __io_poll_remove_one(req, &req->poll);
4830 } else {
4831 struct async_poll *apoll = req->apoll;
4832
4833 io_poll_remove_double(req, apoll->double_poll);
4834
4835 /* non-poll requests have submit ref still */
4836 do_complete = __io_poll_remove_one(req, &apoll->poll);
4837 if (do_complete) {
4838 io_put_req(req);
4839 kfree(apoll->double_poll);
4840 kfree(apoll);
4841 }
4842 }
4843
4844 if (do_complete) {
4845 io_cqring_fill_event(req, -ECANCELED);
4846 io_commit_cqring(req->ctx);
4847 req->flags |= REQ_F_COMP_LOCKED;
4848 io_put_req(req);
4849 }
4850
4851 return do_complete;
4852 }
4853
4854 static void io_poll_remove_all(struct io_ring_ctx *ctx)
4855 {
4856 struct hlist_node *tmp;
4857 struct io_kiocb *req;
4858 int posted = 0, i;
4859
4860 spin_lock_irq(&ctx->completion_lock);
4861 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
4862 struct hlist_head *list;
4863
4864 list = &ctx->cancel_hash[i];
4865 hlist_for_each_entry_safe(req, tmp, list, hash_node)
4866 posted += io_poll_remove_one(req);
4867 }
4868 spin_unlock_irq(&ctx->completion_lock);
4869
4870 if (posted)
4871 io_cqring_ev_posted(ctx);
4872 }
4873
4874 static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
4875 {
4876 struct hlist_head *list;
4877 struct io_kiocb *req;
4878
4879 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
4880 hlist_for_each_entry(req, list, hash_node) {
4881 if (sqe_addr != req->user_data)
4882 continue;
4883 if (io_poll_remove_one(req))
4884 return 0;
4885 return -EALREADY;
4886 }
4887
4888 return -ENOENT;
4889 }
4890
4891 static int io_poll_remove_prep(struct io_kiocb *req,
4892 const struct io_uring_sqe *sqe)
4893 {
4894 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4895 return -EINVAL;
4896 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
4897 sqe->poll_events)
4898 return -EINVAL;
4899
4900 req->poll.addr = READ_ONCE(sqe->addr);
4901 return 0;
4902 }
4903
4904 /*
4905 * Find a running poll command that matches one specified in sqe->addr,
4906 * and remove it if found.
4907 */
4908 static int io_poll_remove(struct io_kiocb *req)
4909 {
4910 struct io_ring_ctx *ctx = req->ctx;
4911 u64 addr;
4912 int ret;
4913
4914 addr = req->poll.addr;
4915 spin_lock_irq(&ctx->completion_lock);
4916 ret = io_poll_cancel(ctx, addr);
4917 spin_unlock_irq(&ctx->completion_lock);
4918
4919 if (ret < 0)
4920 req_set_fail_links(req);
4921 io_req_complete(req, ret);
4922 return 0;
4923 }
4924
4925 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4926 void *key)
4927 {
4928 struct io_kiocb *req = wait->private;
4929 struct io_poll_iocb *poll = &req->poll;
4930
4931 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
4932 }
4933
4934 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
4935 struct poll_table_struct *p)
4936 {
4937 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4938
4939 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->io);
4940 }
4941
4942 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4943 {
4944 struct io_poll_iocb *poll = &req->poll;
4945 u32 events;
4946
4947 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4948 return -EINVAL;
4949 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
4950 return -EINVAL;
4951 if (!poll->file)
4952 return -EBADF;
4953
4954 events = READ_ONCE(sqe->poll32_events);
4955 #ifdef __BIG_ENDIAN
4956 events = swahw32(events);
4957 #endif
4958 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
4959 (events & EPOLLEXCLUSIVE);
4960
4961 io_get_req_task(req);
4962 return 0;
4963 }
4964
4965 static int io_poll_add(struct io_kiocb *req)
4966 {
4967 struct io_poll_iocb *poll = &req->poll;
4968 struct io_ring_ctx *ctx = req->ctx;
4969 struct io_poll_table ipt;
4970 __poll_t mask;
4971
4972 INIT_HLIST_NODE(&req->hash_node);
4973 ipt.pt._qproc = io_poll_queue_proc;
4974
4975 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
4976 io_poll_wake);
4977
4978 if (mask) { /* no async, we'd stolen it */
4979 ipt.error = 0;
4980 io_poll_complete(req, mask, 0);
4981 }
4982 spin_unlock_irq(&ctx->completion_lock);
4983
4984 if (mask) {
4985 io_cqring_ev_posted(ctx);
4986 io_put_req(req);
4987 }
4988 return ipt.error;
4989 }
4990
4991 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
4992 {
4993 struct io_timeout_data *data = container_of(timer,
4994 struct io_timeout_data, timer);
4995 struct io_kiocb *req = data->req;
4996 struct io_ring_ctx *ctx = req->ctx;
4997 unsigned long flags;
4998
4999 spin_lock_irqsave(&ctx->completion_lock, flags);
5000 atomic_set(&req->ctx->cq_timeouts,
5001 atomic_read(&req->ctx->cq_timeouts) + 1);
5002
5003 /*
5004 * We could be racing with timeout deletion. If the list is empty,
5005 * then timeout lookup already found it and will be handling it.
5006 */
5007 if (!list_empty(&req->timeout.list))
5008 list_del_init(&req->timeout.list);
5009
5010 io_cqring_fill_event(req, -ETIME);
5011 io_commit_cqring(ctx);
5012 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5013
5014 io_cqring_ev_posted(ctx);
5015 req_set_fail_links(req);
5016 io_put_req(req);
5017 return HRTIMER_NORESTART;
5018 }
5019
5020 static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5021 {
5022 struct io_kiocb *req;
5023 int ret = -ENOENT;
5024
5025 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5026 if (user_data == req->user_data) {
5027 list_del_init(&req->timeout.list);
5028 ret = 0;
5029 break;
5030 }
5031 }
5032
5033 if (ret == -ENOENT)
5034 return ret;
5035
5036 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
5037 if (ret == -1)
5038 return -EALREADY;
5039
5040 req_set_fail_links(req);
5041 io_cqring_fill_event(req, -ECANCELED);
5042 io_put_req(req);
5043 return 0;
5044 }
5045
5046 static int io_timeout_remove_prep(struct io_kiocb *req,
5047 const struct io_uring_sqe *sqe)
5048 {
5049 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5050 return -EINVAL;
5051 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5052 return -EINVAL;
5053 if (sqe->ioprio || sqe->buf_index || sqe->len)
5054 return -EINVAL;
5055
5056 req->timeout.addr = READ_ONCE(sqe->addr);
5057 req->timeout.flags = READ_ONCE(sqe->timeout_flags);
5058 if (req->timeout.flags)
5059 return -EINVAL;
5060
5061 return 0;
5062 }
5063
5064 /*
5065 * Remove or update an existing timeout command
5066 */
5067 static int io_timeout_remove(struct io_kiocb *req)
5068 {
5069 struct io_ring_ctx *ctx = req->ctx;
5070 int ret;
5071
5072 spin_lock_irq(&ctx->completion_lock);
5073 ret = io_timeout_cancel(ctx, req->timeout.addr);
5074
5075 io_cqring_fill_event(req, ret);
5076 io_commit_cqring(ctx);
5077 spin_unlock_irq(&ctx->completion_lock);
5078 io_cqring_ev_posted(ctx);
5079 if (ret < 0)
5080 req_set_fail_links(req);
5081 io_put_req(req);
5082 return 0;
5083 }
5084
5085 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5086 bool is_timeout_link)
5087 {
5088 struct io_timeout_data *data;
5089 unsigned flags;
5090 u32 off = READ_ONCE(sqe->off);
5091
5092 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5093 return -EINVAL;
5094 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
5095 return -EINVAL;
5096 if (off && is_timeout_link)
5097 return -EINVAL;
5098 flags = READ_ONCE(sqe->timeout_flags);
5099 if (flags & ~IORING_TIMEOUT_ABS)
5100 return -EINVAL;
5101
5102 req->timeout.off = off;
5103
5104 if (!req->io && io_alloc_async_ctx(req))
5105 return -ENOMEM;
5106
5107 data = &req->io->timeout;
5108 data->req = req;
5109
5110 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5111 return -EFAULT;
5112
5113 if (flags & IORING_TIMEOUT_ABS)
5114 data->mode = HRTIMER_MODE_ABS;
5115 else
5116 data->mode = HRTIMER_MODE_REL;
5117
5118 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5119 return 0;
5120 }
5121
5122 static int io_timeout(struct io_kiocb *req)
5123 {
5124 struct io_ring_ctx *ctx = req->ctx;
5125 struct io_timeout_data *data = &req->io->timeout;
5126 struct list_head *entry;
5127 u32 tail, off = req->timeout.off;
5128
5129 spin_lock_irq(&ctx->completion_lock);
5130
5131 /*
5132 * sqe->off holds how many events that need to occur for this
5133 * timeout event to be satisfied. If it isn't set, then this is
5134 * a pure timeout request, sequence isn't used.
5135 */
5136 if (io_is_timeout_noseq(req)) {
5137 entry = ctx->timeout_list.prev;
5138 goto add;
5139 }
5140
5141 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5142 req->timeout.target_seq = tail + off;
5143
5144 /*
5145 * Insertion sort, ensuring the first entry in the list is always
5146 * the one we need first.
5147 */
5148 list_for_each_prev(entry, &ctx->timeout_list) {
5149 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5150 timeout.list);
5151
5152 if (io_is_timeout_noseq(nxt))
5153 continue;
5154 /* nxt.seq is behind @tail, otherwise would've been completed */
5155 if (off >= nxt->timeout.target_seq - tail)
5156 break;
5157 }
5158 add:
5159 list_add(&req->timeout.list, entry);
5160 data->timer.function = io_timeout_fn;
5161 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5162 spin_unlock_irq(&ctx->completion_lock);
5163 return 0;
5164 }
5165
5166 static bool io_cancel_cb(struct io_wq_work *work, void *data)
5167 {
5168 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5169
5170 return req->user_data == (unsigned long) data;
5171 }
5172
5173 static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
5174 {
5175 enum io_wq_cancel cancel_ret;
5176 int ret = 0;
5177
5178 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
5179 switch (cancel_ret) {
5180 case IO_WQ_CANCEL_OK:
5181 ret = 0;
5182 break;
5183 case IO_WQ_CANCEL_RUNNING:
5184 ret = -EALREADY;
5185 break;
5186 case IO_WQ_CANCEL_NOTFOUND:
5187 ret = -ENOENT;
5188 break;
5189 }
5190
5191 return ret;
5192 }
5193
5194 static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5195 struct io_kiocb *req, __u64 sqe_addr,
5196 int success_ret)
5197 {
5198 unsigned long flags;
5199 int ret;
5200
5201 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
5202 if (ret != -ENOENT) {
5203 spin_lock_irqsave(&ctx->completion_lock, flags);
5204 goto done;
5205 }
5206
5207 spin_lock_irqsave(&ctx->completion_lock, flags);
5208 ret = io_timeout_cancel(ctx, sqe_addr);
5209 if (ret != -ENOENT)
5210 goto done;
5211 ret = io_poll_cancel(ctx, sqe_addr);
5212 done:
5213 if (!ret)
5214 ret = success_ret;
5215 io_cqring_fill_event(req, ret);
5216 io_commit_cqring(ctx);
5217 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5218 io_cqring_ev_posted(ctx);
5219
5220 if (ret < 0)
5221 req_set_fail_links(req);
5222 io_put_req(req);
5223 }
5224
5225 static int io_async_cancel_prep(struct io_kiocb *req,
5226 const struct io_uring_sqe *sqe)
5227 {
5228 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5229 return -EINVAL;
5230 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5231 return -EINVAL;
5232 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
5233 return -EINVAL;
5234
5235 req->cancel.addr = READ_ONCE(sqe->addr);
5236 return 0;
5237 }
5238
5239 static int io_async_cancel(struct io_kiocb *req)
5240 {
5241 struct io_ring_ctx *ctx = req->ctx;
5242
5243 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
5244 return 0;
5245 }
5246
5247 static int io_files_update_prep(struct io_kiocb *req,
5248 const struct io_uring_sqe *sqe)
5249 {
5250 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5251 return -EINVAL;
5252 if (sqe->ioprio || sqe->rw_flags)
5253 return -EINVAL;
5254
5255 req->files_update.offset = READ_ONCE(sqe->off);
5256 req->files_update.nr_args = READ_ONCE(sqe->len);
5257 if (!req->files_update.nr_args)
5258 return -EINVAL;
5259 req->files_update.arg = READ_ONCE(sqe->addr);
5260 return 0;
5261 }
5262
5263 static int io_files_update(struct io_kiocb *req, bool force_nonblock,
5264 struct io_comp_state *cs)
5265 {
5266 struct io_ring_ctx *ctx = req->ctx;
5267 struct io_uring_files_update up;
5268 int ret;
5269
5270 if (force_nonblock)
5271 return -EAGAIN;
5272
5273 up.offset = req->files_update.offset;
5274 up.fds = req->files_update.arg;
5275
5276 mutex_lock(&ctx->uring_lock);
5277 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
5278 mutex_unlock(&ctx->uring_lock);
5279
5280 if (ret < 0)
5281 req_set_fail_links(req);
5282 __io_req_complete(req, ret, 0, cs);
5283 return 0;
5284 }
5285
5286 static int io_req_defer_prep(struct io_kiocb *req,
5287 const struct io_uring_sqe *sqe)
5288 {
5289 ssize_t ret = 0;
5290
5291 if (!sqe)
5292 return 0;
5293
5294 if (io_alloc_async_ctx(req))
5295 return -EAGAIN;
5296 ret = io_prep_work_files(req);
5297 if (unlikely(ret))
5298 return ret;
5299
5300 switch (req->opcode) {
5301 case IORING_OP_NOP:
5302 break;
5303 case IORING_OP_READV:
5304 case IORING_OP_READ_FIXED:
5305 case IORING_OP_READ:
5306 ret = io_read_prep(req, sqe, true);
5307 break;
5308 case IORING_OP_WRITEV:
5309 case IORING_OP_WRITE_FIXED:
5310 case IORING_OP_WRITE:
5311 ret = io_write_prep(req, sqe, true);
5312 break;
5313 case IORING_OP_POLL_ADD:
5314 ret = io_poll_add_prep(req, sqe);
5315 break;
5316 case IORING_OP_POLL_REMOVE:
5317 ret = io_poll_remove_prep(req, sqe);
5318 break;
5319 case IORING_OP_FSYNC:
5320 ret = io_prep_fsync(req, sqe);
5321 break;
5322 case IORING_OP_SYNC_FILE_RANGE:
5323 ret = io_prep_sfr(req, sqe);
5324 break;
5325 case IORING_OP_SENDMSG:
5326 case IORING_OP_SEND:
5327 ret = io_sendmsg_prep(req, sqe);
5328 break;
5329 case IORING_OP_RECVMSG:
5330 case IORING_OP_RECV:
5331 ret = io_recvmsg_prep(req, sqe);
5332 break;
5333 case IORING_OP_CONNECT:
5334 ret = io_connect_prep(req, sqe);
5335 break;
5336 case IORING_OP_TIMEOUT:
5337 ret = io_timeout_prep(req, sqe, false);
5338 break;
5339 case IORING_OP_TIMEOUT_REMOVE:
5340 ret = io_timeout_remove_prep(req, sqe);
5341 break;
5342 case IORING_OP_ASYNC_CANCEL:
5343 ret = io_async_cancel_prep(req, sqe);
5344 break;
5345 case IORING_OP_LINK_TIMEOUT:
5346 ret = io_timeout_prep(req, sqe, true);
5347 break;
5348 case IORING_OP_ACCEPT:
5349 ret = io_accept_prep(req, sqe);
5350 break;
5351 case IORING_OP_FALLOCATE:
5352 ret = io_fallocate_prep(req, sqe);
5353 break;
5354 case IORING_OP_OPENAT:
5355 ret = io_openat_prep(req, sqe);
5356 break;
5357 case IORING_OP_CLOSE:
5358 ret = io_close_prep(req, sqe);
5359 break;
5360 case IORING_OP_FILES_UPDATE:
5361 ret = io_files_update_prep(req, sqe);
5362 break;
5363 case IORING_OP_STATX:
5364 ret = io_statx_prep(req, sqe);
5365 break;
5366 case IORING_OP_FADVISE:
5367 ret = io_fadvise_prep(req, sqe);
5368 break;
5369 case IORING_OP_MADVISE:
5370 ret = io_madvise_prep(req, sqe);
5371 break;
5372 case IORING_OP_OPENAT2:
5373 ret = io_openat2_prep(req, sqe);
5374 break;
5375 case IORING_OP_EPOLL_CTL:
5376 ret = io_epoll_ctl_prep(req, sqe);
5377 break;
5378 case IORING_OP_SPLICE:
5379 ret = io_splice_prep(req, sqe);
5380 break;
5381 case IORING_OP_PROVIDE_BUFFERS:
5382 ret = io_provide_buffers_prep(req, sqe);
5383 break;
5384 case IORING_OP_REMOVE_BUFFERS:
5385 ret = io_remove_buffers_prep(req, sqe);
5386 break;
5387 case IORING_OP_TEE:
5388 ret = io_tee_prep(req, sqe);
5389 break;
5390 default:
5391 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5392 req->opcode);
5393 ret = -EINVAL;
5394 break;
5395 }
5396
5397 return ret;
5398 }
5399
5400 static u32 io_get_sequence(struct io_kiocb *req)
5401 {
5402 struct io_kiocb *pos;
5403 struct io_ring_ctx *ctx = req->ctx;
5404 u32 total_submitted, nr_reqs = 1;
5405
5406 if (req->flags & REQ_F_LINK_HEAD)
5407 list_for_each_entry(pos, &req->link_list, link_list)
5408 nr_reqs++;
5409
5410 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
5411 return total_submitted - nr_reqs;
5412 }
5413
5414 static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5415 {
5416 struct io_ring_ctx *ctx = req->ctx;
5417 struct io_defer_entry *de;
5418 int ret;
5419 u32 seq;
5420
5421 /* Still need defer if there is pending req in defer list. */
5422 if (likely(list_empty_careful(&ctx->defer_list) &&
5423 !(req->flags & REQ_F_IO_DRAIN)))
5424 return 0;
5425
5426 seq = io_get_sequence(req);
5427 /* Still a chance to pass the sequence check */
5428 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
5429 return 0;
5430
5431 if (!req->io) {
5432 ret = io_req_defer_prep(req, sqe);
5433 if (ret)
5434 return ret;
5435 }
5436 io_prep_async_link(req);
5437 de = kmalloc(sizeof(*de), GFP_KERNEL);
5438 if (!de)
5439 return -ENOMEM;
5440
5441 spin_lock_irq(&ctx->completion_lock);
5442 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
5443 spin_unlock_irq(&ctx->completion_lock);
5444 kfree(de);
5445 io_queue_async_work(req);
5446 return -EIOCBQUEUED;
5447 }
5448
5449 trace_io_uring_defer(ctx, req, req->user_data);
5450 de->req = req;
5451 de->seq = seq;
5452 list_add_tail(&de->list, &ctx->defer_list);
5453 spin_unlock_irq(&ctx->completion_lock);
5454 return -EIOCBQUEUED;
5455 }
5456
5457 static void __io_clean_op(struct io_kiocb *req)
5458 {
5459 struct io_async_ctx *io = req->io;
5460
5461 if (req->flags & REQ_F_BUFFER_SELECTED) {
5462 switch (req->opcode) {
5463 case IORING_OP_READV:
5464 case IORING_OP_READ_FIXED:
5465 case IORING_OP_READ:
5466 kfree((void *)(unsigned long)req->rw.addr);
5467 break;
5468 case IORING_OP_RECVMSG:
5469 case IORING_OP_RECV:
5470 kfree(req->sr_msg.kbuf);
5471 break;
5472 }
5473 req->flags &= ~REQ_F_BUFFER_SELECTED;
5474 }
5475
5476 if (req->flags & REQ_F_NEED_CLEANUP) {
5477 switch (req->opcode) {
5478 case IORING_OP_READV:
5479 case IORING_OP_READ_FIXED:
5480 case IORING_OP_READ:
5481 case IORING_OP_WRITEV:
5482 case IORING_OP_WRITE_FIXED:
5483 case IORING_OP_WRITE:
5484 if (io->rw.iov != io->rw.fast_iov)
5485 kfree(io->rw.iov);
5486 break;
5487 case IORING_OP_RECVMSG:
5488 case IORING_OP_SENDMSG:
5489 if (io->msg.iov != io->msg.fast_iov)
5490 kfree(io->msg.iov);
5491 break;
5492 case IORING_OP_SPLICE:
5493 case IORING_OP_TEE:
5494 io_put_file(req, req->splice.file_in,
5495 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
5496 break;
5497 }
5498 req->flags &= ~REQ_F_NEED_CLEANUP;
5499 }
5500 }
5501
5502 static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5503 bool force_nonblock, struct io_comp_state *cs)
5504 {
5505 struct io_ring_ctx *ctx = req->ctx;
5506 int ret;
5507
5508 switch (req->opcode) {
5509 case IORING_OP_NOP:
5510 ret = io_nop(req, cs);
5511 break;
5512 case IORING_OP_READV:
5513 case IORING_OP_READ_FIXED:
5514 case IORING_OP_READ:
5515 if (sqe) {
5516 ret = io_read_prep(req, sqe, force_nonblock);
5517 if (ret < 0)
5518 break;
5519 }
5520 ret = io_read(req, force_nonblock, cs);
5521 break;
5522 case IORING_OP_WRITEV:
5523 case IORING_OP_WRITE_FIXED:
5524 case IORING_OP_WRITE:
5525 if (sqe) {
5526 ret = io_write_prep(req, sqe, force_nonblock);
5527 if (ret < 0)
5528 break;
5529 }
5530 ret = io_write(req, force_nonblock, cs);
5531 break;
5532 case IORING_OP_FSYNC:
5533 if (sqe) {
5534 ret = io_prep_fsync(req, sqe);
5535 if (ret < 0)
5536 break;
5537 }
5538 ret = io_fsync(req, force_nonblock);
5539 break;
5540 case IORING_OP_POLL_ADD:
5541 if (sqe) {
5542 ret = io_poll_add_prep(req, sqe);
5543 if (ret)
5544 break;
5545 }
5546 ret = io_poll_add(req);
5547 break;
5548 case IORING_OP_POLL_REMOVE:
5549 if (sqe) {
5550 ret = io_poll_remove_prep(req, sqe);
5551 if (ret < 0)
5552 break;
5553 }
5554 ret = io_poll_remove(req);
5555 break;
5556 case IORING_OP_SYNC_FILE_RANGE:
5557 if (sqe) {
5558 ret = io_prep_sfr(req, sqe);
5559 if (ret < 0)
5560 break;
5561 }
5562 ret = io_sync_file_range(req, force_nonblock);
5563 break;
5564 case IORING_OP_SENDMSG:
5565 case IORING_OP_SEND:
5566 if (sqe) {
5567 ret = io_sendmsg_prep(req, sqe);
5568 if (ret < 0)
5569 break;
5570 }
5571 if (req->opcode == IORING_OP_SENDMSG)
5572 ret = io_sendmsg(req, force_nonblock, cs);
5573 else
5574 ret = io_send(req, force_nonblock, cs);
5575 break;
5576 case IORING_OP_RECVMSG:
5577 case IORING_OP_RECV:
5578 if (sqe) {
5579 ret = io_recvmsg_prep(req, sqe);
5580 if (ret)
5581 break;
5582 }
5583 if (req->opcode == IORING_OP_RECVMSG)
5584 ret = io_recvmsg(req, force_nonblock, cs);
5585 else
5586 ret = io_recv(req, force_nonblock, cs);
5587 break;
5588 case IORING_OP_TIMEOUT:
5589 if (sqe) {
5590 ret = io_timeout_prep(req, sqe, false);
5591 if (ret)
5592 break;
5593 }
5594 ret = io_timeout(req);
5595 break;
5596 case IORING_OP_TIMEOUT_REMOVE:
5597 if (sqe) {
5598 ret = io_timeout_remove_prep(req, sqe);
5599 if (ret)
5600 break;
5601 }
5602 ret = io_timeout_remove(req);
5603 break;
5604 case IORING_OP_ACCEPT:
5605 if (sqe) {
5606 ret = io_accept_prep(req, sqe);
5607 if (ret)
5608 break;
5609 }
5610 ret = io_accept(req, force_nonblock, cs);
5611 break;
5612 case IORING_OP_CONNECT:
5613 if (sqe) {
5614 ret = io_connect_prep(req, sqe);
5615 if (ret)
5616 break;
5617 }
5618 ret = io_connect(req, force_nonblock, cs);
5619 break;
5620 case IORING_OP_ASYNC_CANCEL:
5621 if (sqe) {
5622 ret = io_async_cancel_prep(req, sqe);
5623 if (ret)
5624 break;
5625 }
5626 ret = io_async_cancel(req);
5627 break;
5628 case IORING_OP_FALLOCATE:
5629 if (sqe) {
5630 ret = io_fallocate_prep(req, sqe);
5631 if (ret)
5632 break;
5633 }
5634 ret = io_fallocate(req, force_nonblock);
5635 break;
5636 case IORING_OP_OPENAT:
5637 if (sqe) {
5638 ret = io_openat_prep(req, sqe);
5639 if (ret)
5640 break;
5641 }
5642 ret = io_openat(req, force_nonblock);
5643 break;
5644 case IORING_OP_CLOSE:
5645 if (sqe) {
5646 ret = io_close_prep(req, sqe);
5647 if (ret)
5648 break;
5649 }
5650 ret = io_close(req, force_nonblock, cs);
5651 break;
5652 case IORING_OP_FILES_UPDATE:
5653 if (sqe) {
5654 ret = io_files_update_prep(req, sqe);
5655 if (ret)
5656 break;
5657 }
5658 ret = io_files_update(req, force_nonblock, cs);
5659 break;
5660 case IORING_OP_STATX:
5661 if (sqe) {
5662 ret = io_statx_prep(req, sqe);
5663 if (ret)
5664 break;
5665 }
5666 ret = io_statx(req, force_nonblock);
5667 break;
5668 case IORING_OP_FADVISE:
5669 if (sqe) {
5670 ret = io_fadvise_prep(req, sqe);
5671 if (ret)
5672 break;
5673 }
5674 ret = io_fadvise(req, force_nonblock);
5675 break;
5676 case IORING_OP_MADVISE:
5677 if (sqe) {
5678 ret = io_madvise_prep(req, sqe);
5679 if (ret)
5680 break;
5681 }
5682 ret = io_madvise(req, force_nonblock);
5683 break;
5684 case IORING_OP_OPENAT2:
5685 if (sqe) {
5686 ret = io_openat2_prep(req, sqe);
5687 if (ret)
5688 break;
5689 }
5690 ret = io_openat2(req, force_nonblock);
5691 break;
5692 case IORING_OP_EPOLL_CTL:
5693 if (sqe) {
5694 ret = io_epoll_ctl_prep(req, sqe);
5695 if (ret)
5696 break;
5697 }
5698 ret = io_epoll_ctl(req, force_nonblock, cs);
5699 break;
5700 case IORING_OP_SPLICE:
5701 if (sqe) {
5702 ret = io_splice_prep(req, sqe);
5703 if (ret < 0)
5704 break;
5705 }
5706 ret = io_splice(req, force_nonblock);
5707 break;
5708 case IORING_OP_PROVIDE_BUFFERS:
5709 if (sqe) {
5710 ret = io_provide_buffers_prep(req, sqe);
5711 if (ret)
5712 break;
5713 }
5714 ret = io_provide_buffers(req, force_nonblock, cs);
5715 break;
5716 case IORING_OP_REMOVE_BUFFERS:
5717 if (sqe) {
5718 ret = io_remove_buffers_prep(req, sqe);
5719 if (ret)
5720 break;
5721 }
5722 ret = io_remove_buffers(req, force_nonblock, cs);
5723 break;
5724 case IORING_OP_TEE:
5725 if (sqe) {
5726 ret = io_tee_prep(req, sqe);
5727 if (ret < 0)
5728 break;
5729 }
5730 ret = io_tee(req, force_nonblock);
5731 break;
5732 default:
5733 ret = -EINVAL;
5734 break;
5735 }
5736
5737 if (ret)
5738 return ret;
5739
5740 /* If the op doesn't have a file, we're not polling for it */
5741 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
5742 const bool in_async = io_wq_current_is_worker();
5743
5744 /* workqueue context doesn't hold uring_lock, grab it now */
5745 if (in_async)
5746 mutex_lock(&ctx->uring_lock);
5747
5748 io_iopoll_req_issued(req);
5749
5750 if (in_async)
5751 mutex_unlock(&ctx->uring_lock);
5752 }
5753
5754 return 0;
5755 }
5756
5757 static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
5758 {
5759 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5760 struct io_kiocb *timeout;
5761 int ret = 0;
5762
5763 timeout = io_prep_linked_timeout(req);
5764 if (timeout)
5765 io_queue_linked_timeout(timeout);
5766
5767 /* if NO_CANCEL is set, we must still run the work */
5768 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
5769 IO_WQ_WORK_CANCEL) {
5770 ret = -ECANCELED;
5771 }
5772
5773 if (!ret) {
5774 do {
5775 ret = io_issue_sqe(req, NULL, false, NULL);
5776 /*
5777 * We can get EAGAIN for polled IO even though we're
5778 * forcing a sync submission from here, since we can't
5779 * wait for request slots on the block side.
5780 */
5781 if (ret != -EAGAIN)
5782 break;
5783 cond_resched();
5784 } while (1);
5785 }
5786
5787 if (ret) {
5788 req_set_fail_links(req);
5789 io_req_complete(req, ret);
5790 }
5791
5792 return io_steal_work(req);
5793 }
5794
5795 static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
5796 int index)
5797 {
5798 struct fixed_file_table *table;
5799
5800 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
5801 return table->files[index & IORING_FILE_TABLE_MASK];
5802 }
5803
5804 static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
5805 int fd, struct file **out_file, bool fixed)
5806 {
5807 struct io_ring_ctx *ctx = req->ctx;
5808 struct file *file;
5809
5810 if (fixed) {
5811 if (unlikely(!ctx->file_data ||
5812 (unsigned) fd >= ctx->nr_user_files))
5813 return -EBADF;
5814 fd = array_index_nospec(fd, ctx->nr_user_files);
5815 file = io_file_from_index(ctx, fd);
5816 if (file) {
5817 req->fixed_file_refs = ctx->file_data->cur_refs;
5818 percpu_ref_get(req->fixed_file_refs);
5819 }
5820 } else {
5821 trace_io_uring_file_get(ctx, fd);
5822 file = __io_file_get(state, fd);
5823 }
5824
5825 if (file || io_op_defs[req->opcode].needs_file_no_error) {
5826 *out_file = file;
5827 return 0;
5828 }
5829 return -EBADF;
5830 }
5831
5832 static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
5833 int fd)
5834 {
5835 bool fixed;
5836
5837 fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
5838 if (unlikely(!fixed && io_async_submit(req->ctx)))
5839 return -EBADF;
5840
5841 return io_file_get(state, req, fd, &req->file, fixed);
5842 }
5843
5844 static int io_grab_files(struct io_kiocb *req)
5845 {
5846 int ret = -EBADF;
5847 struct io_ring_ctx *ctx = req->ctx;
5848
5849 io_req_init_async(req);
5850
5851 if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
5852 return 0;
5853 if (!ctx->ring_file)
5854 return -EBADF;
5855
5856 rcu_read_lock();
5857 spin_lock_irq(&ctx->inflight_lock);
5858 /*
5859 * We use the f_ops->flush() handler to ensure that we can flush
5860 * out work accessing these files if the fd is closed. Check if
5861 * the fd has changed since we started down this path, and disallow
5862 * this operation if it has.
5863 */
5864 if (fcheck(ctx->ring_fd) == ctx->ring_file) {
5865 list_add(&req->inflight_entry, &ctx->inflight_list);
5866 req->flags |= REQ_F_INFLIGHT;
5867 req->work.files = current->files;
5868 ret = 0;
5869 }
5870 spin_unlock_irq(&ctx->inflight_lock);
5871 rcu_read_unlock();
5872
5873 return ret;
5874 }
5875
5876 static inline int io_prep_work_files(struct io_kiocb *req)
5877 {
5878 if (!io_op_defs[req->opcode].file_table)
5879 return 0;
5880 return io_grab_files(req);
5881 }
5882
5883 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
5884 {
5885 struct io_timeout_data *data = container_of(timer,
5886 struct io_timeout_data, timer);
5887 struct io_kiocb *req = data->req;
5888 struct io_ring_ctx *ctx = req->ctx;
5889 struct io_kiocb *prev = NULL;
5890 unsigned long flags;
5891
5892 spin_lock_irqsave(&ctx->completion_lock, flags);
5893
5894 /*
5895 * We don't expect the list to be empty, that will only happen if we
5896 * race with the completion of the linked work.
5897 */
5898 if (!list_empty(&req->link_list)) {
5899 prev = list_entry(req->link_list.prev, struct io_kiocb,
5900 link_list);
5901 if (refcount_inc_not_zero(&prev->refs)) {
5902 list_del_init(&req->link_list);
5903 prev->flags &= ~REQ_F_LINK_TIMEOUT;
5904 } else
5905 prev = NULL;
5906 }
5907
5908 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5909
5910 if (prev) {
5911 req_set_fail_links(prev);
5912 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
5913 io_put_req(prev);
5914 } else {
5915 io_req_complete(req, -ETIME);
5916 }
5917 return HRTIMER_NORESTART;
5918 }
5919
5920 static void io_queue_linked_timeout(struct io_kiocb *req)
5921 {
5922 struct io_ring_ctx *ctx = req->ctx;
5923
5924 /*
5925 * If the list is now empty, then our linked request finished before
5926 * we got a chance to setup the timer
5927 */
5928 spin_lock_irq(&ctx->completion_lock);
5929 if (!list_empty(&req->link_list)) {
5930 struct io_timeout_data *data = &req->io->timeout;
5931
5932 data->timer.function = io_link_timeout_fn;
5933 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
5934 data->mode);
5935 }
5936 spin_unlock_irq(&ctx->completion_lock);
5937
5938 /* drop submission reference */
5939 io_put_req(req);
5940 }
5941
5942 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
5943 {
5944 struct io_kiocb *nxt;
5945
5946 if (!(req->flags & REQ_F_LINK_HEAD))
5947 return NULL;
5948 if (req->flags & REQ_F_LINK_TIMEOUT)
5949 return NULL;
5950
5951 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
5952 link_list);
5953 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
5954 return NULL;
5955
5956 req->flags |= REQ_F_LINK_TIMEOUT;
5957 return nxt;
5958 }
5959
5960 static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5961 struct io_comp_state *cs)
5962 {
5963 struct io_kiocb *linked_timeout;
5964 struct io_kiocb *nxt;
5965 const struct cred *old_creds = NULL;
5966 int ret;
5967
5968 again:
5969 linked_timeout = io_prep_linked_timeout(req);
5970
5971 if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds &&
5972 req->work.creds != current_cred()) {
5973 if (old_creds)
5974 revert_creds(old_creds);
5975 if (old_creds == req->work.creds)
5976 old_creds = NULL; /* restored original creds */
5977 else
5978 old_creds = override_creds(req->work.creds);
5979 }
5980
5981 ret = io_issue_sqe(req, sqe, true, cs);
5982
5983 /*
5984 * We async punt it if the file wasn't marked NOWAIT, or if the file
5985 * doesn't support non-blocking read/write attempts
5986 */
5987 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
5988 if (!io_arm_poll_handler(req)) {
5989 punt:
5990 ret = io_prep_work_files(req);
5991 if (unlikely(ret))
5992 goto err;
5993 /*
5994 * Queued up for async execution, worker will release
5995 * submit reference when the iocb is actually submitted.
5996 */
5997 io_queue_async_work(req);
5998 }
5999
6000 if (linked_timeout)
6001 io_queue_linked_timeout(linked_timeout);
6002 goto exit;
6003 }
6004
6005 if (unlikely(ret)) {
6006 err:
6007 /* un-prep timeout, so it'll be killed as any other linked */
6008 req->flags &= ~REQ_F_LINK_TIMEOUT;
6009 req_set_fail_links(req);
6010 io_put_req(req);
6011 io_req_complete(req, ret);
6012 goto exit;
6013 }
6014
6015 /* drop submission reference */
6016 nxt = io_put_req_find_next(req);
6017 if (linked_timeout)
6018 io_queue_linked_timeout(linked_timeout);
6019
6020 if (nxt) {
6021 req = nxt;
6022
6023 if (req->flags & REQ_F_FORCE_ASYNC)
6024 goto punt;
6025 goto again;
6026 }
6027 exit:
6028 if (old_creds)
6029 revert_creds(old_creds);
6030 }
6031
6032 static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6033 struct io_comp_state *cs)
6034 {
6035 int ret;
6036
6037 ret = io_req_defer(req, sqe);
6038 if (ret) {
6039 if (ret != -EIOCBQUEUED) {
6040 fail_req:
6041 req_set_fail_links(req);
6042 io_put_req(req);
6043 io_req_complete(req, ret);
6044 }
6045 } else if (req->flags & REQ_F_FORCE_ASYNC) {
6046 if (!req->io) {
6047 ret = io_req_defer_prep(req, sqe);
6048 if (unlikely(ret))
6049 goto fail_req;
6050 }
6051
6052 /*
6053 * Never try inline submit of IOSQE_ASYNC is set, go straight
6054 * to async execution.
6055 */
6056 io_req_init_async(req);
6057 req->work.flags |= IO_WQ_WORK_CONCURRENT;
6058 io_queue_async_work(req);
6059 } else {
6060 __io_queue_sqe(req, sqe, cs);
6061 }
6062 }
6063
6064 static inline void io_queue_link_head(struct io_kiocb *req,
6065 struct io_comp_state *cs)
6066 {
6067 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
6068 io_put_req(req);
6069 io_req_complete(req, -ECANCELED);
6070 } else
6071 io_queue_sqe(req, NULL, cs);
6072 }
6073
6074 static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6075 struct io_kiocb **link, struct io_comp_state *cs)
6076 {
6077 struct io_ring_ctx *ctx = req->ctx;
6078 int ret;
6079
6080 /*
6081 * If we already have a head request, queue this one for async
6082 * submittal once the head completes. If we don't have a head but
6083 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6084 * submitted sync once the chain is complete. If none of those
6085 * conditions are true (normal request), then just queue it.
6086 */
6087 if (*link) {
6088 struct io_kiocb *head = *link;
6089
6090 /*
6091 * Taking sequential execution of a link, draining both sides
6092 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6093 * requests in the link. So, it drains the head and the
6094 * next after the link request. The last one is done via
6095 * drain_next flag to persist the effect across calls.
6096 */
6097 if (req->flags & REQ_F_IO_DRAIN) {
6098 head->flags |= REQ_F_IO_DRAIN;
6099 ctx->drain_next = 1;
6100 }
6101 ret = io_req_defer_prep(req, sqe);
6102 if (unlikely(ret)) {
6103 /* fail even hard links since we don't submit */
6104 head->flags |= REQ_F_FAIL_LINK;
6105 return ret;
6106 }
6107 trace_io_uring_link(ctx, req, head);
6108 io_get_req_task(req);
6109 list_add_tail(&req->link_list, &head->link_list);
6110
6111 /* last request of a link, enqueue the link */
6112 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
6113 io_queue_link_head(head, cs);
6114 *link = NULL;
6115 }
6116 } else {
6117 if (unlikely(ctx->drain_next)) {
6118 req->flags |= REQ_F_IO_DRAIN;
6119 ctx->drain_next = 0;
6120 }
6121 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
6122 req->flags |= REQ_F_LINK_HEAD;
6123 INIT_LIST_HEAD(&req->link_list);
6124
6125 ret = io_req_defer_prep(req, sqe);
6126 if (unlikely(ret))
6127 req->flags |= REQ_F_FAIL_LINK;
6128 *link = req;
6129 } else {
6130 io_queue_sqe(req, sqe, cs);
6131 }
6132 }
6133
6134 return 0;
6135 }
6136
6137 /*
6138 * Batched submission is done, ensure local IO is flushed out.
6139 */
6140 static void io_submit_state_end(struct io_submit_state *state)
6141 {
6142 if (!list_empty(&state->comp.list))
6143 io_submit_flush_completions(&state->comp);
6144 blk_finish_plug(&state->plug);
6145 io_state_file_put(state);
6146 if (state->free_reqs)
6147 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
6148 }
6149
6150 /*
6151 * Start submission side cache.
6152 */
6153 static void io_submit_state_start(struct io_submit_state *state,
6154 struct io_ring_ctx *ctx, unsigned int max_ios)
6155 {
6156 blk_start_plug(&state->plug);
6157 #ifdef CONFIG_BLOCK
6158 state->plug.nowait = true;
6159 #endif
6160 state->comp.nr = 0;
6161 INIT_LIST_HEAD(&state->comp.list);
6162 state->comp.ctx = ctx;
6163 state->free_reqs = 0;
6164 state->file = NULL;
6165 state->ios_left = max_ios;
6166 }
6167
6168 static void io_commit_sqring(struct io_ring_ctx *ctx)
6169 {
6170 struct io_rings *rings = ctx->rings;
6171
6172 /*
6173 * Ensure any loads from the SQEs are done at this point,
6174 * since once we write the new head, the application could
6175 * write new data to them.
6176 */
6177 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
6178 }
6179
6180 /*
6181 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
6182 * that is mapped by userspace. This means that care needs to be taken to
6183 * ensure that reads are stable, as we cannot rely on userspace always
6184 * being a good citizen. If members of the sqe are validated and then later
6185 * used, it's important that those reads are done through READ_ONCE() to
6186 * prevent a re-load down the line.
6187 */
6188 static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
6189 {
6190 u32 *sq_array = ctx->sq_array;
6191 unsigned head;
6192
6193 /*
6194 * The cached sq head (or cq tail) serves two purposes:
6195 *
6196 * 1) allows us to batch the cost of updating the user visible
6197 * head updates.
6198 * 2) allows the kernel side to track the head on its own, even
6199 * though the application is the one updating it.
6200 */
6201 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
6202 if (likely(head < ctx->sq_entries))
6203 return &ctx->sq_sqes[head];
6204
6205 /* drop invalid entries */
6206 ctx->cached_sq_dropped++;
6207 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
6208 return NULL;
6209 }
6210
6211 static inline void io_consume_sqe(struct io_ring_ctx *ctx)
6212 {
6213 ctx->cached_sq_head++;
6214 }
6215
6216 #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
6217 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
6218 IOSQE_BUFFER_SELECT)
6219
6220 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
6221 const struct io_uring_sqe *sqe,
6222 struct io_submit_state *state)
6223 {
6224 unsigned int sqe_flags;
6225 int id;
6226
6227 req->opcode = READ_ONCE(sqe->opcode);
6228 req->user_data = READ_ONCE(sqe->user_data);
6229 req->io = NULL;
6230 req->file = NULL;
6231 req->ctx = ctx;
6232 req->flags = 0;
6233 /* one is dropped after submission, the other at completion */
6234 refcount_set(&req->refs, 2);
6235 req->task = current;
6236 req->result = 0;
6237
6238 if (unlikely(req->opcode >= IORING_OP_LAST))
6239 return -EINVAL;
6240
6241 if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
6242 return -EFAULT;
6243
6244 sqe_flags = READ_ONCE(sqe->flags);
6245 /* enforce forwards compatibility on users */
6246 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
6247 return -EINVAL;
6248
6249 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6250 !io_op_defs[req->opcode].buffer_select)
6251 return -EOPNOTSUPP;
6252
6253 id = READ_ONCE(sqe->personality);
6254 if (id) {
6255 io_req_init_async(req);
6256 req->work.creds = idr_find(&ctx->personality_idr, id);
6257 if (unlikely(!req->work.creds))
6258 return -EINVAL;
6259 get_cred(req->work.creds);
6260 }
6261
6262 /* same numerical values with corresponding REQ_F_*, safe to copy */
6263 req->flags |= sqe_flags;
6264
6265 if (!io_op_defs[req->opcode].needs_file)
6266 return 0;
6267
6268 return io_req_set_file(state, req, READ_ONCE(sqe->fd));
6269 }
6270
6271 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
6272 struct file *ring_file, int ring_fd)
6273 {
6274 struct io_submit_state state;
6275 struct io_kiocb *link = NULL;
6276 int i, submitted = 0;
6277
6278 /* if we have a backlog and couldn't flush it all, return BUSY */
6279 if (test_bit(0, &ctx->sq_check_overflow)) {
6280 if (!list_empty(&ctx->cq_overflow_list) &&
6281 !io_cqring_overflow_flush(ctx, false))
6282 return -EBUSY;
6283 }
6284
6285 /* make sure SQ entry isn't read before tail */
6286 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
6287
6288 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6289 return -EAGAIN;
6290
6291 io_submit_state_start(&state, ctx, nr);
6292
6293 ctx->ring_fd = ring_fd;
6294 ctx->ring_file = ring_file;
6295
6296 for (i = 0; i < nr; i++) {
6297 const struct io_uring_sqe *sqe;
6298 struct io_kiocb *req;
6299 int err;
6300
6301 sqe = io_get_sqe(ctx);
6302 if (unlikely(!sqe)) {
6303 io_consume_sqe(ctx);
6304 break;
6305 }
6306 req = io_alloc_req(ctx, &state);
6307 if (unlikely(!req)) {
6308 if (!submitted)
6309 submitted = -EAGAIN;
6310 break;
6311 }
6312
6313 err = io_init_req(ctx, req, sqe, &state);
6314 io_consume_sqe(ctx);
6315 /* will complete beyond this point, count as submitted */
6316 submitted++;
6317
6318 if (unlikely(err)) {
6319 fail_req:
6320 io_put_req(req);
6321 io_req_complete(req, err);
6322 break;
6323 }
6324
6325 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
6326 true, io_async_submit(ctx));
6327 err = io_submit_sqe(req, sqe, &link, &state.comp);
6328 if (err)
6329 goto fail_req;
6330 }
6331
6332 if (unlikely(submitted != nr)) {
6333 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
6334
6335 percpu_ref_put_many(&ctx->refs, nr - ref_used);
6336 }
6337 if (link)
6338 io_queue_link_head(link, &state.comp);
6339 io_submit_state_end(&state);
6340
6341 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6342 io_commit_sqring(ctx);
6343
6344 return submitted;
6345 }
6346
6347 static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6348 {
6349 /* Tell userspace we may need a wakeup call */
6350 spin_lock_irq(&ctx->completion_lock);
6351 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6352 spin_unlock_irq(&ctx->completion_lock);
6353 }
6354
6355 static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6356 {
6357 spin_lock_irq(&ctx->completion_lock);
6358 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6359 spin_unlock_irq(&ctx->completion_lock);
6360 }
6361
6362 static int io_sq_thread(void *data)
6363 {
6364 struct io_ring_ctx *ctx = data;
6365 const struct cred *old_cred;
6366 DEFINE_WAIT(wait);
6367 unsigned long timeout;
6368 int ret = 0;
6369
6370 complete(&ctx->sq_thread_comp);
6371
6372 old_cred = override_creds(ctx->creds);
6373
6374 timeout = jiffies + ctx->sq_thread_idle;
6375 while (!kthread_should_park()) {
6376 unsigned int to_submit;
6377
6378 if (!list_empty(&ctx->iopoll_list)) {
6379 unsigned nr_events = 0;
6380
6381 mutex_lock(&ctx->uring_lock);
6382 if (!list_empty(&ctx->iopoll_list) && !need_resched())
6383 io_do_iopoll(ctx, &nr_events, 0);
6384 else
6385 timeout = jiffies + ctx->sq_thread_idle;
6386 mutex_unlock(&ctx->uring_lock);
6387 }
6388
6389 to_submit = io_sqring_entries(ctx);
6390
6391 /*
6392 * If submit got -EBUSY, flag us as needing the application
6393 * to enter the kernel to reap and flush events.
6394 */
6395 if (!to_submit || ret == -EBUSY || need_resched()) {
6396 /*
6397 * Drop cur_mm before scheduling, we can't hold it for
6398 * long periods (or over schedule()). Do this before
6399 * adding ourselves to the waitqueue, as the unuse/drop
6400 * may sleep.
6401 */
6402 io_sq_thread_drop_mm();
6403
6404 /*
6405 * We're polling. If we're within the defined idle
6406 * period, then let us spin without work before going
6407 * to sleep. The exception is if we got EBUSY doing
6408 * more IO, we should wait for the application to
6409 * reap events and wake us up.
6410 */
6411 if (!list_empty(&ctx->iopoll_list) || need_resched() ||
6412 (!time_after(jiffies, timeout) && ret != -EBUSY &&
6413 !percpu_ref_is_dying(&ctx->refs))) {
6414 io_run_task_work();
6415 cond_resched();
6416 continue;
6417 }
6418
6419 prepare_to_wait(&ctx->sqo_wait, &wait,
6420 TASK_INTERRUPTIBLE);
6421
6422 /*
6423 * While doing polled IO, before going to sleep, we need
6424 * to check if there are new reqs added to iopoll_list,
6425 * it is because reqs may have been punted to io worker
6426 * and will be added to iopoll_list later, hence check
6427 * the iopoll_list again.
6428 */
6429 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6430 !list_empty_careful(&ctx->iopoll_list)) {
6431 finish_wait(&ctx->sqo_wait, &wait);
6432 continue;
6433 }
6434
6435 io_ring_set_wakeup_flag(ctx);
6436
6437 to_submit = io_sqring_entries(ctx);
6438 if (!to_submit || ret == -EBUSY) {
6439 if (kthread_should_park()) {
6440 finish_wait(&ctx->sqo_wait, &wait);
6441 break;
6442 }
6443 if (io_run_task_work()) {
6444 finish_wait(&ctx->sqo_wait, &wait);
6445 io_ring_clear_wakeup_flag(ctx);
6446 continue;
6447 }
6448 if (signal_pending(current))
6449 flush_signals(current);
6450 schedule();
6451 finish_wait(&ctx->sqo_wait, &wait);
6452
6453 io_ring_clear_wakeup_flag(ctx);
6454 ret = 0;
6455 continue;
6456 }
6457 finish_wait(&ctx->sqo_wait, &wait);
6458
6459 io_ring_clear_wakeup_flag(ctx);
6460 }
6461
6462 mutex_lock(&ctx->uring_lock);
6463 if (likely(!percpu_ref_is_dying(&ctx->refs)))
6464 ret = io_submit_sqes(ctx, to_submit, NULL, -1);
6465 mutex_unlock(&ctx->uring_lock);
6466 timeout = jiffies + ctx->sq_thread_idle;
6467 }
6468
6469 io_run_task_work();
6470
6471 io_sq_thread_drop_mm();
6472 revert_creds(old_cred);
6473
6474 kthread_parkme();
6475
6476 return 0;
6477 }
6478
6479 struct io_wait_queue {
6480 struct wait_queue_entry wq;
6481 struct io_ring_ctx *ctx;
6482 unsigned to_wait;
6483 unsigned nr_timeouts;
6484 };
6485
6486 static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
6487 {
6488 struct io_ring_ctx *ctx = iowq->ctx;
6489
6490 /*
6491 * Wake up if we have enough events, or if a timeout occurred since we
6492 * started waiting. For timeouts, we always want to return to userspace,
6493 * regardless of event count.
6494 */
6495 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
6496 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6497 }
6498
6499 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6500 int wake_flags, void *key)
6501 {
6502 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6503 wq);
6504
6505 /* use noflush == true, as we can't safely rely on locking context */
6506 if (!io_should_wake(iowq, true))
6507 return -1;
6508
6509 return autoremove_wake_function(curr, mode, wake_flags, key);
6510 }
6511
6512 /*
6513 * Wait until events become available, if we don't already have some. The
6514 * application must reap them itself, as they reside on the shared cq ring.
6515 */
6516 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
6517 const sigset_t __user *sig, size_t sigsz)
6518 {
6519 struct io_wait_queue iowq = {
6520 .wq = {
6521 .private = current,
6522 .func = io_wake_function,
6523 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6524 },
6525 .ctx = ctx,
6526 .to_wait = min_events,
6527 };
6528 struct io_rings *rings = ctx->rings;
6529 int ret = 0;
6530
6531 do {
6532 if (io_cqring_events(ctx, false) >= min_events)
6533 return 0;
6534 if (!io_run_task_work())
6535 break;
6536 } while (1);
6537
6538 if (sig) {
6539 #ifdef CONFIG_COMPAT
6540 if (in_compat_syscall())
6541 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
6542 sigsz);
6543 else
6544 #endif
6545 ret = set_user_sigmask(sig, sigsz);
6546
6547 if (ret)
6548 return ret;
6549 }
6550
6551 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
6552 trace_io_uring_cqring_wait(ctx, min_events);
6553 do {
6554 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6555 TASK_INTERRUPTIBLE);
6556 /* make sure we run task_work before checking for signals */
6557 if (io_run_task_work())
6558 continue;
6559 if (signal_pending(current)) {
6560 if (current->jobctl & JOBCTL_TASK_WORK) {
6561 spin_lock_irq(&current->sighand->siglock);
6562 current->jobctl &= ~JOBCTL_TASK_WORK;
6563 recalc_sigpending();
6564 spin_unlock_irq(&current->sighand->siglock);
6565 continue;
6566 }
6567 ret = -EINTR;
6568 break;
6569 }
6570 if (io_should_wake(&iowq, false))
6571 break;
6572 schedule();
6573 } while (1);
6574 finish_wait(&ctx->wait, &iowq.wq);
6575
6576 restore_saved_sigmask_unless(ret == -EINTR);
6577
6578 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
6579 }
6580
6581 static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6582 {
6583 #if defined(CONFIG_UNIX)
6584 if (ctx->ring_sock) {
6585 struct sock *sock = ctx->ring_sock->sk;
6586 struct sk_buff *skb;
6587
6588 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6589 kfree_skb(skb);
6590 }
6591 #else
6592 int i;
6593
6594 for (i = 0; i < ctx->nr_user_files; i++) {
6595 struct file *file;
6596
6597 file = io_file_from_index(ctx, i);
6598 if (file)
6599 fput(file);
6600 }
6601 #endif
6602 }
6603
6604 static void io_file_ref_kill(struct percpu_ref *ref)
6605 {
6606 struct fixed_file_data *data;
6607
6608 data = container_of(ref, struct fixed_file_data, refs);
6609 complete(&data->done);
6610 }
6611
6612 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
6613 {
6614 struct fixed_file_data *data = ctx->file_data;
6615 struct fixed_file_ref_node *ref_node = NULL;
6616 unsigned nr_tables, i;
6617
6618 if (!data)
6619 return -ENXIO;
6620
6621 spin_lock(&data->lock);
6622 if (!list_empty(&data->ref_list))
6623 ref_node = list_first_entry(&data->ref_list,
6624 struct fixed_file_ref_node, node);
6625 spin_unlock(&data->lock);
6626 if (ref_node)
6627 percpu_ref_kill(&ref_node->refs);
6628
6629 percpu_ref_kill(&data->refs);
6630
6631 /* wait for all refs nodes to complete */
6632 flush_delayed_work(&ctx->file_put_work);
6633 wait_for_completion(&data->done);
6634
6635 __io_sqe_files_unregister(ctx);
6636 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
6637 for (i = 0; i < nr_tables; i++)
6638 kfree(data->table[i].files);
6639 kfree(data->table);
6640 percpu_ref_exit(&data->refs);
6641 kfree(data);
6642 ctx->file_data = NULL;
6643 ctx->nr_user_files = 0;
6644 return 0;
6645 }
6646
6647 static void io_sq_thread_stop(struct io_ring_ctx *ctx)
6648 {
6649 if (ctx->sqo_thread) {
6650 wait_for_completion(&ctx->sq_thread_comp);
6651 /*
6652 * The park is a bit of a work-around, without it we get
6653 * warning spews on shutdown with SQPOLL set and affinity
6654 * set to a single CPU.
6655 */
6656 kthread_park(ctx->sqo_thread);
6657 kthread_stop(ctx->sqo_thread);
6658 ctx->sqo_thread = NULL;
6659 }
6660 }
6661
6662 static void io_finish_async(struct io_ring_ctx *ctx)
6663 {
6664 io_sq_thread_stop(ctx);
6665
6666 if (ctx->io_wq) {
6667 io_wq_destroy(ctx->io_wq);
6668 ctx->io_wq = NULL;
6669 }
6670 }
6671
6672 #if defined(CONFIG_UNIX)
6673 /*
6674 * Ensure the UNIX gc is aware of our file set, so we are certain that
6675 * the io_uring can be safely unregistered on process exit, even if we have
6676 * loops in the file referencing.
6677 */
6678 static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
6679 {
6680 struct sock *sk = ctx->ring_sock->sk;
6681 struct scm_fp_list *fpl;
6682 struct sk_buff *skb;
6683 int i, nr_files;
6684
6685 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
6686 if (!fpl)
6687 return -ENOMEM;
6688
6689 skb = alloc_skb(0, GFP_KERNEL);
6690 if (!skb) {
6691 kfree(fpl);
6692 return -ENOMEM;
6693 }
6694
6695 skb->sk = sk;
6696
6697 nr_files = 0;
6698 fpl->user = get_uid(ctx->user);
6699 for (i = 0; i < nr; i++) {
6700 struct file *file = io_file_from_index(ctx, i + offset);
6701
6702 if (!file)
6703 continue;
6704 fpl->fp[nr_files] = get_file(file);
6705 unix_inflight(fpl->user, fpl->fp[nr_files]);
6706 nr_files++;
6707 }
6708
6709 if (nr_files) {
6710 fpl->max = SCM_MAX_FD;
6711 fpl->count = nr_files;
6712 UNIXCB(skb).fp = fpl;
6713 skb->destructor = unix_destruct_scm;
6714 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
6715 skb_queue_head(&sk->sk_receive_queue, skb);
6716
6717 for (i = 0; i < nr_files; i++)
6718 fput(fpl->fp[i]);
6719 } else {
6720 kfree_skb(skb);
6721 kfree(fpl);
6722 }
6723
6724 return 0;
6725 }
6726
6727 /*
6728 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
6729 * causes regular reference counting to break down. We rely on the UNIX
6730 * garbage collection to take care of this problem for us.
6731 */
6732 static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6733 {
6734 unsigned left, total;
6735 int ret = 0;
6736
6737 total = 0;
6738 left = ctx->nr_user_files;
6739 while (left) {
6740 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
6741
6742 ret = __io_sqe_files_scm(ctx, this_files, total);
6743 if (ret)
6744 break;
6745 left -= this_files;
6746 total += this_files;
6747 }
6748
6749 if (!ret)
6750 return 0;
6751
6752 while (total < ctx->nr_user_files) {
6753 struct file *file = io_file_from_index(ctx, total);
6754
6755 if (file)
6756 fput(file);
6757 total++;
6758 }
6759
6760 return ret;
6761 }
6762 #else
6763 static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6764 {
6765 return 0;
6766 }
6767 #endif
6768
6769 static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
6770 unsigned nr_files)
6771 {
6772 int i;
6773
6774 for (i = 0; i < nr_tables; i++) {
6775 struct fixed_file_table *table = &ctx->file_data->table[i];
6776 unsigned this_files;
6777
6778 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
6779 table->files = kcalloc(this_files, sizeof(struct file *),
6780 GFP_KERNEL);
6781 if (!table->files)
6782 break;
6783 nr_files -= this_files;
6784 }
6785
6786 if (i == nr_tables)
6787 return 0;
6788
6789 for (i = 0; i < nr_tables; i++) {
6790 struct fixed_file_table *table = &ctx->file_data->table[i];
6791 kfree(table->files);
6792 }
6793 return 1;
6794 }
6795
6796 static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
6797 {
6798 #if defined(CONFIG_UNIX)
6799 struct sock *sock = ctx->ring_sock->sk;
6800 struct sk_buff_head list, *head = &sock->sk_receive_queue;
6801 struct sk_buff *skb;
6802 int i;
6803
6804 __skb_queue_head_init(&list);
6805
6806 /*
6807 * Find the skb that holds this file in its SCM_RIGHTS. When found,
6808 * remove this entry and rearrange the file array.
6809 */
6810 skb = skb_dequeue(head);
6811 while (skb) {
6812 struct scm_fp_list *fp;
6813
6814 fp = UNIXCB(skb).fp;
6815 for (i = 0; i < fp->count; i++) {
6816 int left;
6817
6818 if (fp->fp[i] != file)
6819 continue;
6820
6821 unix_notinflight(fp->user, fp->fp[i]);
6822 left = fp->count - 1 - i;
6823 if (left) {
6824 memmove(&fp->fp[i], &fp->fp[i + 1],
6825 left * sizeof(struct file *));
6826 }
6827 fp->count--;
6828 if (!fp->count) {
6829 kfree_skb(skb);
6830 skb = NULL;
6831 } else {
6832 __skb_queue_tail(&list, skb);
6833 }
6834 fput(file);
6835 file = NULL;
6836 break;
6837 }
6838
6839 if (!file)
6840 break;
6841
6842 __skb_queue_tail(&list, skb);
6843
6844 skb = skb_dequeue(head);
6845 }
6846
6847 if (skb_peek(&list)) {
6848 spin_lock_irq(&head->lock);
6849 while ((skb = __skb_dequeue(&list)) != NULL)
6850 __skb_queue_tail(head, skb);
6851 spin_unlock_irq(&head->lock);
6852 }
6853 #else
6854 fput(file);
6855 #endif
6856 }
6857
6858 struct io_file_put {
6859 struct list_head list;
6860 struct file *file;
6861 };
6862
6863 static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
6864 {
6865 struct fixed_file_data *file_data = ref_node->file_data;
6866 struct io_ring_ctx *ctx = file_data->ctx;
6867 struct io_file_put *pfile, *tmp;
6868
6869 list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
6870 list_del(&pfile->list);
6871 io_ring_file_put(ctx, pfile->file);
6872 kfree(pfile);
6873 }
6874
6875 spin_lock(&file_data->lock);
6876 list_del(&ref_node->node);
6877 spin_unlock(&file_data->lock);
6878
6879 percpu_ref_exit(&ref_node->refs);
6880 kfree(ref_node);
6881 percpu_ref_put(&file_data->refs);
6882 }
6883
6884 static void io_file_put_work(struct work_struct *work)
6885 {
6886 struct io_ring_ctx *ctx;
6887 struct llist_node *node;
6888
6889 ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
6890 node = llist_del_all(&ctx->file_put_llist);
6891
6892 while (node) {
6893 struct fixed_file_ref_node *ref_node;
6894 struct llist_node *next = node->next;
6895
6896 ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
6897 __io_file_put_work(ref_node);
6898 node = next;
6899 }
6900 }
6901
6902 static void io_file_data_ref_zero(struct percpu_ref *ref)
6903 {
6904 struct fixed_file_ref_node *ref_node;
6905 struct io_ring_ctx *ctx;
6906 bool first_add;
6907 int delay = HZ;
6908
6909 ref_node = container_of(ref, struct fixed_file_ref_node, refs);
6910 ctx = ref_node->file_data->ctx;
6911
6912 if (percpu_ref_is_dying(&ctx->file_data->refs))
6913 delay = 0;
6914
6915 first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
6916 if (!delay)
6917 mod_delayed_work(system_wq, &ctx->file_put_work, 0);
6918 else if (first_add)
6919 queue_delayed_work(system_wq, &ctx->file_put_work, delay);
6920 }
6921
6922 static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
6923 struct io_ring_ctx *ctx)
6924 {
6925 struct fixed_file_ref_node *ref_node;
6926
6927 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
6928 if (!ref_node)
6929 return ERR_PTR(-ENOMEM);
6930
6931 if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
6932 0, GFP_KERNEL)) {
6933 kfree(ref_node);
6934 return ERR_PTR(-ENOMEM);
6935 }
6936 INIT_LIST_HEAD(&ref_node->node);
6937 INIT_LIST_HEAD(&ref_node->file_list);
6938 ref_node->file_data = ctx->file_data;
6939 return ref_node;
6940 }
6941
6942 static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
6943 {
6944 percpu_ref_exit(&ref_node->refs);
6945 kfree(ref_node);
6946 }
6947
6948 static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
6949 unsigned nr_args)
6950 {
6951 __s32 __user *fds = (__s32 __user *) arg;
6952 unsigned nr_tables;
6953 struct file *file;
6954 int fd, ret = 0;
6955 unsigned i;
6956 struct fixed_file_ref_node *ref_node;
6957
6958 if (ctx->file_data)
6959 return -EBUSY;
6960 if (!nr_args)
6961 return -EINVAL;
6962 if (nr_args > IORING_MAX_FIXED_FILES)
6963 return -EMFILE;
6964
6965 ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
6966 if (!ctx->file_data)
6967 return -ENOMEM;
6968 ctx->file_data->ctx = ctx;
6969 init_completion(&ctx->file_data->done);
6970 INIT_LIST_HEAD(&ctx->file_data->ref_list);
6971 spin_lock_init(&ctx->file_data->lock);
6972
6973 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
6974 ctx->file_data->table = kcalloc(nr_tables,
6975 sizeof(struct fixed_file_table),
6976 GFP_KERNEL);
6977 if (!ctx->file_data->table) {
6978 kfree(ctx->file_data);
6979 ctx->file_data = NULL;
6980 return -ENOMEM;
6981 }
6982
6983 if (percpu_ref_init(&ctx->file_data->refs, io_file_ref_kill,
6984 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
6985 kfree(ctx->file_data->table);
6986 kfree(ctx->file_data);
6987 ctx->file_data = NULL;
6988 return -ENOMEM;
6989 }
6990
6991 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
6992 percpu_ref_exit(&ctx->file_data->refs);
6993 kfree(ctx->file_data->table);
6994 kfree(ctx->file_data);
6995 ctx->file_data = NULL;
6996 return -ENOMEM;
6997 }
6998
6999 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
7000 struct fixed_file_table *table;
7001 unsigned index;
7002
7003 ret = -EFAULT;
7004 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
7005 break;
7006 /* allow sparse sets */
7007 if (fd == -1) {
7008 ret = 0;
7009 continue;
7010 }
7011
7012 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7013 index = i & IORING_FILE_TABLE_MASK;
7014 file = fget(fd);
7015
7016 ret = -EBADF;
7017 if (!file)
7018 break;
7019
7020 /*
7021 * Don't allow io_uring instances to be registered. If UNIX
7022 * isn't enabled, then this causes a reference cycle and this
7023 * instance can never get freed. If UNIX is enabled we'll
7024 * handle it just fine, but there's still no point in allowing
7025 * a ring fd as it doesn't support regular read/write anyway.
7026 */
7027 if (file->f_op == &io_uring_fops) {
7028 fput(file);
7029 break;
7030 }
7031 ret = 0;
7032 table->files[index] = file;
7033 }
7034
7035 if (ret) {
7036 for (i = 0; i < ctx->nr_user_files; i++) {
7037 file = io_file_from_index(ctx, i);
7038 if (file)
7039 fput(file);
7040 }
7041 for (i = 0; i < nr_tables; i++)
7042 kfree(ctx->file_data->table[i].files);
7043
7044 percpu_ref_exit(&ctx->file_data->refs);
7045 kfree(ctx->file_data->table);
7046 kfree(ctx->file_data);
7047 ctx->file_data = NULL;
7048 ctx->nr_user_files = 0;
7049 return ret;
7050 }
7051
7052 ret = io_sqe_files_scm(ctx);
7053 if (ret) {
7054 io_sqe_files_unregister(ctx);
7055 return ret;
7056 }
7057
7058 ref_node = alloc_fixed_file_ref_node(ctx);
7059 if (IS_ERR(ref_node)) {
7060 io_sqe_files_unregister(ctx);
7061 return PTR_ERR(ref_node);
7062 }
7063
7064 ctx->file_data->cur_refs = &ref_node->refs;
7065 spin_lock(&ctx->file_data->lock);
7066 list_add(&ref_node->node, &ctx->file_data->ref_list);
7067 spin_unlock(&ctx->file_data->lock);
7068 percpu_ref_get(&ctx->file_data->refs);
7069 return ret;
7070 }
7071
7072 static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7073 int index)
7074 {
7075 #if defined(CONFIG_UNIX)
7076 struct sock *sock = ctx->ring_sock->sk;
7077 struct sk_buff_head *head = &sock->sk_receive_queue;
7078 struct sk_buff *skb;
7079
7080 /*
7081 * See if we can merge this file into an existing skb SCM_RIGHTS
7082 * file set. If there's no room, fall back to allocating a new skb
7083 * and filling it in.
7084 */
7085 spin_lock_irq(&head->lock);
7086 skb = skb_peek(head);
7087 if (skb) {
7088 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7089
7090 if (fpl->count < SCM_MAX_FD) {
7091 __skb_unlink(skb, head);
7092 spin_unlock_irq(&head->lock);
7093 fpl->fp[fpl->count] = get_file(file);
7094 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7095 fpl->count++;
7096 spin_lock_irq(&head->lock);
7097 __skb_queue_head(head, skb);
7098 } else {
7099 skb = NULL;
7100 }
7101 }
7102 spin_unlock_irq(&head->lock);
7103
7104 if (skb) {
7105 fput(file);
7106 return 0;
7107 }
7108
7109 return __io_sqe_files_scm(ctx, 1, index);
7110 #else
7111 return 0;
7112 #endif
7113 }
7114
7115 static int io_queue_file_removal(struct fixed_file_data *data,
7116 struct file *file)
7117 {
7118 struct io_file_put *pfile;
7119 struct percpu_ref *refs = data->cur_refs;
7120 struct fixed_file_ref_node *ref_node;
7121
7122 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
7123 if (!pfile)
7124 return -ENOMEM;
7125
7126 ref_node = container_of(refs, struct fixed_file_ref_node, refs);
7127 pfile->file = file;
7128 list_add(&pfile->list, &ref_node->file_list);
7129
7130 return 0;
7131 }
7132
7133 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
7134 struct io_uring_files_update *up,
7135 unsigned nr_args)
7136 {
7137 struct fixed_file_data *data = ctx->file_data;
7138 struct fixed_file_ref_node *ref_node;
7139 struct file *file;
7140 __s32 __user *fds;
7141 int fd, i, err;
7142 __u32 done;
7143 bool needs_switch = false;
7144
7145 if (check_add_overflow(up->offset, nr_args, &done))
7146 return -EOVERFLOW;
7147 if (done > ctx->nr_user_files)
7148 return -EINVAL;
7149
7150 ref_node = alloc_fixed_file_ref_node(ctx);
7151 if (IS_ERR(ref_node))
7152 return PTR_ERR(ref_node);
7153
7154 done = 0;
7155 fds = u64_to_user_ptr(up->fds);
7156 while (nr_args) {
7157 struct fixed_file_table *table;
7158 unsigned index;
7159
7160 err = 0;
7161 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7162 err = -EFAULT;
7163 break;
7164 }
7165 i = array_index_nospec(up->offset, ctx->nr_user_files);
7166 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7167 index = i & IORING_FILE_TABLE_MASK;
7168 if (table->files[index]) {
7169 file = io_file_from_index(ctx, index);
7170 err = io_queue_file_removal(data, file);
7171 if (err)
7172 break;
7173 table->files[index] = NULL;
7174 needs_switch = true;
7175 }
7176 if (fd != -1) {
7177 file = fget(fd);
7178 if (!file) {
7179 err = -EBADF;
7180 break;
7181 }
7182 /*
7183 * Don't allow io_uring instances to be registered. If
7184 * UNIX isn't enabled, then this causes a reference
7185 * cycle and this instance can never get freed. If UNIX
7186 * is enabled we'll handle it just fine, but there's
7187 * still no point in allowing a ring fd as it doesn't
7188 * support regular read/write anyway.
7189 */
7190 if (file->f_op == &io_uring_fops) {
7191 fput(file);
7192 err = -EBADF;
7193 break;
7194 }
7195 table->files[index] = file;
7196 err = io_sqe_file_register(ctx, file, i);
7197 if (err) {
7198 fput(file);
7199 break;
7200 }
7201 }
7202 nr_args--;
7203 done++;
7204 up->offset++;
7205 }
7206
7207 if (needs_switch) {
7208 percpu_ref_kill(data->cur_refs);
7209 spin_lock(&data->lock);
7210 list_add(&ref_node->node, &data->ref_list);
7211 data->cur_refs = &ref_node->refs;
7212 spin_unlock(&data->lock);
7213 percpu_ref_get(&ctx->file_data->refs);
7214 } else
7215 destroy_fixed_file_ref_node(ref_node);
7216
7217 return done ? done : err;
7218 }
7219
7220 static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7221 unsigned nr_args)
7222 {
7223 struct io_uring_files_update up;
7224
7225 if (!ctx->file_data)
7226 return -ENXIO;
7227 if (!nr_args)
7228 return -EINVAL;
7229 if (copy_from_user(&up, arg, sizeof(up)))
7230 return -EFAULT;
7231 if (up.resv)
7232 return -EINVAL;
7233
7234 return __io_sqe_files_update(ctx, &up, nr_args);
7235 }
7236
7237 static void io_free_work(struct io_wq_work *work)
7238 {
7239 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7240
7241 /* Consider that io_steal_work() relies on this ref */
7242 io_put_req(req);
7243 }
7244
7245 static int io_init_wq_offload(struct io_ring_ctx *ctx,
7246 struct io_uring_params *p)
7247 {
7248 struct io_wq_data data;
7249 struct fd f;
7250 struct io_ring_ctx *ctx_attach;
7251 unsigned int concurrency;
7252 int ret = 0;
7253
7254 data.user = ctx->user;
7255 data.free_work = io_free_work;
7256 data.do_work = io_wq_submit_work;
7257
7258 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
7259 /* Do QD, or 4 * CPUS, whatever is smallest */
7260 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
7261
7262 ctx->io_wq = io_wq_create(concurrency, &data);
7263 if (IS_ERR(ctx->io_wq)) {
7264 ret = PTR_ERR(ctx->io_wq);
7265 ctx->io_wq = NULL;
7266 }
7267 return ret;
7268 }
7269
7270 f = fdget(p->wq_fd);
7271 if (!f.file)
7272 return -EBADF;
7273
7274 if (f.file->f_op != &io_uring_fops) {
7275 ret = -EINVAL;
7276 goto out_fput;
7277 }
7278
7279 ctx_attach = f.file->private_data;
7280 /* @io_wq is protected by holding the fd */
7281 if (!io_wq_get(ctx_attach->io_wq, &data)) {
7282 ret = -EINVAL;
7283 goto out_fput;
7284 }
7285
7286 ctx->io_wq = ctx_attach->io_wq;
7287 out_fput:
7288 fdput(f);
7289 return ret;
7290 }
7291
7292 static int io_sq_offload_start(struct io_ring_ctx *ctx,
7293 struct io_uring_params *p)
7294 {
7295 int ret;
7296
7297 mmgrab(current->mm);
7298 ctx->sqo_mm = current->mm;
7299
7300 if (ctx->flags & IORING_SETUP_SQPOLL) {
7301 ret = -EPERM;
7302 if (!capable(CAP_SYS_ADMIN))
7303 goto err;
7304
7305 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
7306 if (!ctx->sq_thread_idle)
7307 ctx->sq_thread_idle = HZ;
7308
7309 if (p->flags & IORING_SETUP_SQ_AFF) {
7310 int cpu = p->sq_thread_cpu;
7311
7312 ret = -EINVAL;
7313 if (cpu >= nr_cpu_ids)
7314 goto err;
7315 if (!cpu_online(cpu))
7316 goto err;
7317
7318 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
7319 ctx, cpu,
7320 "io_uring-sq");
7321 } else {
7322 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
7323 "io_uring-sq");
7324 }
7325 if (IS_ERR(ctx->sqo_thread)) {
7326 ret = PTR_ERR(ctx->sqo_thread);
7327 ctx->sqo_thread = NULL;
7328 goto err;
7329 }
7330 wake_up_process(ctx->sqo_thread);
7331 } else if (p->flags & IORING_SETUP_SQ_AFF) {
7332 /* Can't have SQ_AFF without SQPOLL */
7333 ret = -EINVAL;
7334 goto err;
7335 }
7336
7337 ret = io_init_wq_offload(ctx, p);
7338 if (ret)
7339 goto err;
7340
7341 return 0;
7342 err:
7343 io_finish_async(ctx);
7344 if (ctx->sqo_mm) {
7345 mmdrop(ctx->sqo_mm);
7346 ctx->sqo_mm = NULL;
7347 }
7348 return ret;
7349 }
7350
7351 static inline void __io_unaccount_mem(struct user_struct *user,
7352 unsigned long nr_pages)
7353 {
7354 atomic_long_sub(nr_pages, &user->locked_vm);
7355 }
7356
7357 static inline int __io_account_mem(struct user_struct *user,
7358 unsigned long nr_pages)
7359 {
7360 unsigned long page_limit, cur_pages, new_pages;
7361
7362 /* Don't allow more pages than we can safely lock */
7363 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
7364
7365 do {
7366 cur_pages = atomic_long_read(&user->locked_vm);
7367 new_pages = cur_pages + nr_pages;
7368 if (new_pages > page_limit)
7369 return -ENOMEM;
7370 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
7371 new_pages) != cur_pages);
7372
7373 return 0;
7374 }
7375
7376 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
7377 enum io_mem_account acct)
7378 {
7379 if (ctx->limit_mem)
7380 __io_unaccount_mem(ctx->user, nr_pages);
7381
7382 if (ctx->sqo_mm) {
7383 if (acct == ACCT_LOCKED)
7384 ctx->sqo_mm->locked_vm -= nr_pages;
7385 else if (acct == ACCT_PINNED)
7386 atomic64_sub(nr_pages, &ctx->sqo_mm->pinned_vm);
7387 }
7388 }
7389
7390 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
7391 enum io_mem_account acct)
7392 {
7393 int ret;
7394
7395 if (ctx->limit_mem) {
7396 ret = __io_account_mem(ctx->user, nr_pages);
7397 if (ret)
7398 return ret;
7399 }
7400
7401 if (ctx->sqo_mm) {
7402 if (acct == ACCT_LOCKED)
7403 ctx->sqo_mm->locked_vm += nr_pages;
7404 else if (acct == ACCT_PINNED)
7405 atomic64_add(nr_pages, &ctx->sqo_mm->pinned_vm);
7406 }
7407
7408 return 0;
7409 }
7410
7411 static void io_mem_free(void *ptr)
7412 {
7413 struct page *page;
7414
7415 if (!ptr)
7416 return;
7417
7418 page = virt_to_head_page(ptr);
7419 if (put_page_testzero(page))
7420 free_compound_page(page);
7421 }
7422
7423 static void *io_mem_alloc(size_t size)
7424 {
7425 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
7426 __GFP_NORETRY;
7427
7428 return (void *) __get_free_pages(gfp_flags, get_order(size));
7429 }
7430
7431 static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
7432 size_t *sq_offset)
7433 {
7434 struct io_rings *rings;
7435 size_t off, sq_array_size;
7436
7437 off = struct_size(rings, cqes, cq_entries);
7438 if (off == SIZE_MAX)
7439 return SIZE_MAX;
7440
7441 #ifdef CONFIG_SMP
7442 off = ALIGN(off, SMP_CACHE_BYTES);
7443 if (off == 0)
7444 return SIZE_MAX;
7445 #endif
7446
7447 if (sq_offset)
7448 *sq_offset = off;
7449
7450 sq_array_size = array_size(sizeof(u32), sq_entries);
7451 if (sq_array_size == SIZE_MAX)
7452 return SIZE_MAX;
7453
7454 if (check_add_overflow(off, sq_array_size, &off))
7455 return SIZE_MAX;
7456
7457 return off;
7458 }
7459
7460 static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
7461 {
7462 size_t pages;
7463
7464 pages = (size_t)1 << get_order(
7465 rings_size(sq_entries, cq_entries, NULL));
7466 pages += (size_t)1 << get_order(
7467 array_size(sizeof(struct io_uring_sqe), sq_entries));
7468
7469 return pages;
7470 }
7471
7472 static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
7473 {
7474 int i, j;
7475
7476 if (!ctx->user_bufs)
7477 return -ENXIO;
7478
7479 for (i = 0; i < ctx->nr_user_bufs; i++) {
7480 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7481
7482 for (j = 0; j < imu->nr_bvecs; j++)
7483 unpin_user_page(imu->bvec[j].bv_page);
7484
7485 io_unaccount_mem(ctx, imu->nr_bvecs, ACCT_PINNED);
7486 kvfree(imu->bvec);
7487 imu->nr_bvecs = 0;
7488 }
7489
7490 kfree(ctx->user_bufs);
7491 ctx->user_bufs = NULL;
7492 ctx->nr_user_bufs = 0;
7493 return 0;
7494 }
7495
7496 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
7497 void __user *arg, unsigned index)
7498 {
7499 struct iovec __user *src;
7500
7501 #ifdef CONFIG_COMPAT
7502 if (ctx->compat) {
7503 struct compat_iovec __user *ciovs;
7504 struct compat_iovec ciov;
7505
7506 ciovs = (struct compat_iovec __user *) arg;
7507 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
7508 return -EFAULT;
7509
7510 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
7511 dst->iov_len = ciov.iov_len;
7512 return 0;
7513 }
7514 #endif
7515 src = (struct iovec __user *) arg;
7516 if (copy_from_user(dst, &src[index], sizeof(*dst)))
7517 return -EFAULT;
7518 return 0;
7519 }
7520
7521 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
7522 unsigned nr_args)
7523 {
7524 struct vm_area_struct **vmas = NULL;
7525 struct page **pages = NULL;
7526 int i, j, got_pages = 0;
7527 int ret = -EINVAL;
7528
7529 if (ctx->user_bufs)
7530 return -EBUSY;
7531 if (!nr_args || nr_args > UIO_MAXIOV)
7532 return -EINVAL;
7533
7534 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
7535 GFP_KERNEL);
7536 if (!ctx->user_bufs)
7537 return -ENOMEM;
7538
7539 for (i = 0; i < nr_args; i++) {
7540 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7541 unsigned long off, start, end, ubuf;
7542 int pret, nr_pages;
7543 struct iovec iov;
7544 size_t size;
7545
7546 ret = io_copy_iov(ctx, &iov, arg, i);
7547 if (ret)
7548 goto err;
7549
7550 /*
7551 * Don't impose further limits on the size and buffer
7552 * constraints here, we'll -EINVAL later when IO is
7553 * submitted if they are wrong.
7554 */
7555 ret = -EFAULT;
7556 if (!iov.iov_base || !iov.iov_len)
7557 goto err;
7558
7559 /* arbitrary limit, but we need something */
7560 if (iov.iov_len > SZ_1G)
7561 goto err;
7562
7563 ubuf = (unsigned long) iov.iov_base;
7564 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
7565 start = ubuf >> PAGE_SHIFT;
7566 nr_pages = end - start;
7567
7568 ret = io_account_mem(ctx, nr_pages, ACCT_PINNED);
7569 if (ret)
7570 goto err;
7571
7572 ret = 0;
7573 if (!pages || nr_pages > got_pages) {
7574 kvfree(vmas);
7575 kvfree(pages);
7576 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
7577 GFP_KERNEL);
7578 vmas = kvmalloc_array(nr_pages,
7579 sizeof(struct vm_area_struct *),
7580 GFP_KERNEL);
7581 if (!pages || !vmas) {
7582 ret = -ENOMEM;
7583 io_unaccount_mem(ctx, nr_pages, ACCT_PINNED);
7584 goto err;
7585 }
7586 got_pages = nr_pages;
7587 }
7588
7589 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
7590 GFP_KERNEL);
7591 ret = -ENOMEM;
7592 if (!imu->bvec) {
7593 io_unaccount_mem(ctx, nr_pages, ACCT_PINNED);
7594 goto err;
7595 }
7596
7597 ret = 0;
7598 mmap_read_lock(current->mm);
7599 pret = pin_user_pages(ubuf, nr_pages,
7600 FOLL_WRITE | FOLL_LONGTERM,
7601 pages, vmas);
7602 if (pret == nr_pages) {
7603 /* don't support file backed memory */
7604 for (j = 0; j < nr_pages; j++) {
7605 struct vm_area_struct *vma = vmas[j];
7606
7607 if (vma->vm_file &&
7608 !is_file_hugepages(vma->vm_file)) {
7609 ret = -EOPNOTSUPP;
7610 break;
7611 }
7612 }
7613 } else {
7614 ret = pret < 0 ? pret : -EFAULT;
7615 }
7616 mmap_read_unlock(current->mm);
7617 if (ret) {
7618 /*
7619 * if we did partial map, or found file backed vmas,
7620 * release any pages we did get
7621 */
7622 if (pret > 0)
7623 unpin_user_pages(pages, pret);
7624 io_unaccount_mem(ctx, nr_pages, ACCT_PINNED);
7625 kvfree(imu->bvec);
7626 goto err;
7627 }
7628
7629 off = ubuf & ~PAGE_MASK;
7630 size = iov.iov_len;
7631 for (j = 0; j < nr_pages; j++) {
7632 size_t vec_len;
7633
7634 vec_len = min_t(size_t, size, PAGE_SIZE - off);
7635 imu->bvec[j].bv_page = pages[j];
7636 imu->bvec[j].bv_len = vec_len;
7637 imu->bvec[j].bv_offset = off;
7638 off = 0;
7639 size -= vec_len;
7640 }
7641 /* store original address for later verification */
7642 imu->ubuf = ubuf;
7643 imu->len = iov.iov_len;
7644 imu->nr_bvecs = nr_pages;
7645
7646 ctx->nr_user_bufs++;
7647 }
7648 kvfree(pages);
7649 kvfree(vmas);
7650 return 0;
7651 err:
7652 kvfree(pages);
7653 kvfree(vmas);
7654 io_sqe_buffer_unregister(ctx);
7655 return ret;
7656 }
7657
7658 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
7659 {
7660 __s32 __user *fds = arg;
7661 int fd;
7662
7663 if (ctx->cq_ev_fd)
7664 return -EBUSY;
7665
7666 if (copy_from_user(&fd, fds, sizeof(*fds)))
7667 return -EFAULT;
7668
7669 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
7670 if (IS_ERR(ctx->cq_ev_fd)) {
7671 int ret = PTR_ERR(ctx->cq_ev_fd);
7672 ctx->cq_ev_fd = NULL;
7673 return ret;
7674 }
7675
7676 return 0;
7677 }
7678
7679 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
7680 {
7681 if (ctx->cq_ev_fd) {
7682 eventfd_ctx_put(ctx->cq_ev_fd);
7683 ctx->cq_ev_fd = NULL;
7684 return 0;
7685 }
7686
7687 return -ENXIO;
7688 }
7689
7690 static int __io_destroy_buffers(int id, void *p, void *data)
7691 {
7692 struct io_ring_ctx *ctx = data;
7693 struct io_buffer *buf = p;
7694
7695 __io_remove_buffers(ctx, buf, id, -1U);
7696 return 0;
7697 }
7698
7699 static void io_destroy_buffers(struct io_ring_ctx *ctx)
7700 {
7701 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
7702 idr_destroy(&ctx->io_buffer_idr);
7703 }
7704
7705 static void io_ring_ctx_free(struct io_ring_ctx *ctx)
7706 {
7707 io_finish_async(ctx);
7708 io_sqe_buffer_unregister(ctx);
7709 if (ctx->sqo_mm) {
7710 mmdrop(ctx->sqo_mm);
7711 ctx->sqo_mm = NULL;
7712 }
7713
7714 io_sqe_files_unregister(ctx);
7715 io_eventfd_unregister(ctx);
7716 io_destroy_buffers(ctx);
7717 idr_destroy(&ctx->personality_idr);
7718
7719 #if defined(CONFIG_UNIX)
7720 if (ctx->ring_sock) {
7721 ctx->ring_sock->file = NULL; /* so that iput() is called */
7722 sock_release(ctx->ring_sock);
7723 }
7724 #endif
7725
7726 io_mem_free(ctx->rings);
7727 io_mem_free(ctx->sq_sqes);
7728
7729 percpu_ref_exit(&ctx->refs);
7730 free_uid(ctx->user);
7731 put_cred(ctx->creds);
7732 kfree(ctx->cancel_hash);
7733 kmem_cache_free(req_cachep, ctx->fallback_req);
7734 kfree(ctx);
7735 }
7736
7737 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
7738 {
7739 struct io_ring_ctx *ctx = file->private_data;
7740 __poll_t mask = 0;
7741
7742 poll_wait(file, &ctx->cq_wait, wait);
7743 /*
7744 * synchronizes with barrier from wq_has_sleeper call in
7745 * io_commit_cqring
7746 */
7747 smp_rmb();
7748 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
7749 ctx->rings->sq_ring_entries)
7750 mask |= EPOLLOUT | EPOLLWRNORM;
7751 if (io_cqring_events(ctx, false))
7752 mask |= EPOLLIN | EPOLLRDNORM;
7753
7754 return mask;
7755 }
7756
7757 static int io_uring_fasync(int fd, struct file *file, int on)
7758 {
7759 struct io_ring_ctx *ctx = file->private_data;
7760
7761 return fasync_helper(fd, file, on, &ctx->cq_fasync);
7762 }
7763
7764 static int io_remove_personalities(int id, void *p, void *data)
7765 {
7766 struct io_ring_ctx *ctx = data;
7767 const struct cred *cred;
7768
7769 cred = idr_remove(&ctx->personality_idr, id);
7770 if (cred)
7771 put_cred(cred);
7772 return 0;
7773 }
7774
7775 static void io_ring_exit_work(struct work_struct *work)
7776 {
7777 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
7778 exit_work);
7779
7780 /*
7781 * If we're doing polled IO and end up having requests being
7782 * submitted async (out-of-line), then completions can come in while
7783 * we're waiting for refs to drop. We need to reap these manually,
7784 * as nobody else will be looking for them.
7785 */
7786 do {
7787 if (ctx->rings)
7788 io_cqring_overflow_flush(ctx, true);
7789 io_iopoll_try_reap_events(ctx);
7790 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
7791 io_ring_ctx_free(ctx);
7792 }
7793
7794 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
7795 {
7796 mutex_lock(&ctx->uring_lock);
7797 percpu_ref_kill(&ctx->refs);
7798 mutex_unlock(&ctx->uring_lock);
7799
7800 io_kill_timeouts(ctx);
7801 io_poll_remove_all(ctx);
7802
7803 if (ctx->io_wq)
7804 io_wq_cancel_all(ctx->io_wq);
7805
7806 /* if we failed setting up the ctx, we might not have any rings */
7807 if (ctx->rings)
7808 io_cqring_overflow_flush(ctx, true);
7809 io_iopoll_try_reap_events(ctx);
7810 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
7811
7812 /*
7813 * Do this upfront, so we won't have a grace period where the ring
7814 * is closed but resources aren't reaped yet. This can cause
7815 * spurious failure in setting up a new ring.
7816 */
7817 io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries),
7818 ACCT_LOCKED);
7819
7820 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
7821 queue_work(system_wq, &ctx->exit_work);
7822 }
7823
7824 static int io_uring_release(struct inode *inode, struct file *file)
7825 {
7826 struct io_ring_ctx *ctx = file->private_data;
7827
7828 file->private_data = NULL;
7829 io_ring_ctx_wait_and_kill(ctx);
7830 return 0;
7831 }
7832
7833 static bool io_wq_files_match(struct io_wq_work *work, void *data)
7834 {
7835 struct files_struct *files = data;
7836
7837 return work->files == files;
7838 }
7839
7840 static void io_uring_cancel_files(struct io_ring_ctx *ctx,
7841 struct files_struct *files)
7842 {
7843 if (list_empty_careful(&ctx->inflight_list))
7844 return;
7845
7846 /* cancel all at once, should be faster than doing it one by one*/
7847 io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
7848
7849 while (!list_empty_careful(&ctx->inflight_list)) {
7850 struct io_kiocb *cancel_req = NULL, *req;
7851 DEFINE_WAIT(wait);
7852
7853 spin_lock_irq(&ctx->inflight_lock);
7854 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
7855 if (req->work.files != files)
7856 continue;
7857 /* req is being completed, ignore */
7858 if (!refcount_inc_not_zero(&req->refs))
7859 continue;
7860 cancel_req = req;
7861 break;
7862 }
7863 if (cancel_req)
7864 prepare_to_wait(&ctx->inflight_wait, &wait,
7865 TASK_UNINTERRUPTIBLE);
7866 spin_unlock_irq(&ctx->inflight_lock);
7867
7868 /* We need to keep going until we don't find a matching req */
7869 if (!cancel_req)
7870 break;
7871
7872 if (cancel_req->flags & REQ_F_OVERFLOW) {
7873 spin_lock_irq(&ctx->completion_lock);
7874 list_del(&cancel_req->compl.list);
7875 cancel_req->flags &= ~REQ_F_OVERFLOW;
7876
7877 io_cqring_mark_overflow(ctx);
7878 WRITE_ONCE(ctx->rings->cq_overflow,
7879 atomic_inc_return(&ctx->cached_cq_overflow));
7880 io_commit_cqring(ctx);
7881 spin_unlock_irq(&ctx->completion_lock);
7882
7883 /*
7884 * Put inflight ref and overflow ref. If that's
7885 * all we had, then we're done with this request.
7886 */
7887 if (refcount_sub_and_test(2, &cancel_req->refs)) {
7888 io_free_req(cancel_req);
7889 finish_wait(&ctx->inflight_wait, &wait);
7890 continue;
7891 }
7892 } else {
7893 io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
7894 io_put_req(cancel_req);
7895 }
7896
7897 schedule();
7898 finish_wait(&ctx->inflight_wait, &wait);
7899 }
7900 }
7901
7902 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
7903 {
7904 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7905 struct task_struct *task = data;
7906
7907 return req->task == task;
7908 }
7909
7910 static int io_uring_flush(struct file *file, void *data)
7911 {
7912 struct io_ring_ctx *ctx = file->private_data;
7913
7914 io_uring_cancel_files(ctx, data);
7915
7916 /*
7917 * If the task is going away, cancel work it may have pending
7918 */
7919 if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
7920 io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, current, true);
7921
7922 return 0;
7923 }
7924
7925 static void *io_uring_validate_mmap_request(struct file *file,
7926 loff_t pgoff, size_t sz)
7927 {
7928 struct io_ring_ctx *ctx = file->private_data;
7929 loff_t offset = pgoff << PAGE_SHIFT;
7930 struct page *page;
7931 void *ptr;
7932
7933 switch (offset) {
7934 case IORING_OFF_SQ_RING:
7935 case IORING_OFF_CQ_RING:
7936 ptr = ctx->rings;
7937 break;
7938 case IORING_OFF_SQES:
7939 ptr = ctx->sq_sqes;
7940 break;
7941 default:
7942 return ERR_PTR(-EINVAL);
7943 }
7944
7945 page = virt_to_head_page(ptr);
7946 if (sz > page_size(page))
7947 return ERR_PTR(-EINVAL);
7948
7949 return ptr;
7950 }
7951
7952 #ifdef CONFIG_MMU
7953
7954 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7955 {
7956 size_t sz = vma->vm_end - vma->vm_start;
7957 unsigned long pfn;
7958 void *ptr;
7959
7960 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
7961 if (IS_ERR(ptr))
7962 return PTR_ERR(ptr);
7963
7964 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
7965 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
7966 }
7967
7968 #else /* !CONFIG_MMU */
7969
7970 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7971 {
7972 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
7973 }
7974
7975 static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
7976 {
7977 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
7978 }
7979
7980 static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
7981 unsigned long addr, unsigned long len,
7982 unsigned long pgoff, unsigned long flags)
7983 {
7984 void *ptr;
7985
7986 ptr = io_uring_validate_mmap_request(file, pgoff, len);
7987 if (IS_ERR(ptr))
7988 return PTR_ERR(ptr);
7989
7990 return (unsigned long) ptr;
7991 }
7992
7993 #endif /* !CONFIG_MMU */
7994
7995 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
7996 u32, min_complete, u32, flags, const sigset_t __user *, sig,
7997 size_t, sigsz)
7998 {
7999 struct io_ring_ctx *ctx;
8000 long ret = -EBADF;
8001 int submitted = 0;
8002 struct fd f;
8003
8004 io_run_task_work();
8005
8006 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
8007 return -EINVAL;
8008
8009 f = fdget(fd);
8010 if (!f.file)
8011 return -EBADF;
8012
8013 ret = -EOPNOTSUPP;
8014 if (f.file->f_op != &io_uring_fops)
8015 goto out_fput;
8016
8017 ret = -ENXIO;
8018 ctx = f.file->private_data;
8019 if (!percpu_ref_tryget(&ctx->refs))
8020 goto out_fput;
8021
8022 /*
8023 * For SQ polling, the thread will do all submissions and completions.
8024 * Just return the requested submit count, and wake the thread if
8025 * we were asked to.
8026 */
8027 ret = 0;
8028 if (ctx->flags & IORING_SETUP_SQPOLL) {
8029 if (!list_empty_careful(&ctx->cq_overflow_list))
8030 io_cqring_overflow_flush(ctx, false);
8031 if (flags & IORING_ENTER_SQ_WAKEUP)
8032 wake_up(&ctx->sqo_wait);
8033 submitted = to_submit;
8034 } else if (to_submit) {
8035 mutex_lock(&ctx->uring_lock);
8036 submitted = io_submit_sqes(ctx, to_submit, f.file, fd);
8037 mutex_unlock(&ctx->uring_lock);
8038
8039 if (submitted != to_submit)
8040 goto out;
8041 }
8042 if (flags & IORING_ENTER_GETEVENTS) {
8043 min_complete = min(min_complete, ctx->cq_entries);
8044
8045 /*
8046 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
8047 * space applications don't need to do io completion events
8048 * polling again, they can rely on io_sq_thread to do polling
8049 * work, which can reduce cpu usage and uring_lock contention.
8050 */
8051 if (ctx->flags & IORING_SETUP_IOPOLL &&
8052 !(ctx->flags & IORING_SETUP_SQPOLL)) {
8053 ret = io_iopoll_check(ctx, min_complete);
8054 } else {
8055 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
8056 }
8057 }
8058
8059 out:
8060 percpu_ref_put(&ctx->refs);
8061 out_fput:
8062 fdput(f);
8063 return submitted ? submitted : ret;
8064 }
8065
8066 #ifdef CONFIG_PROC_FS
8067 static int io_uring_show_cred(int id, void *p, void *data)
8068 {
8069 const struct cred *cred = p;
8070 struct seq_file *m = data;
8071 struct user_namespace *uns = seq_user_ns(m);
8072 struct group_info *gi;
8073 kernel_cap_t cap;
8074 unsigned __capi;
8075 int g;
8076
8077 seq_printf(m, "%5d\n", id);
8078 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
8079 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
8080 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
8081 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
8082 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
8083 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
8084 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
8085 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
8086 seq_puts(m, "\n\tGroups:\t");
8087 gi = cred->group_info;
8088 for (g = 0; g < gi->ngroups; g++) {
8089 seq_put_decimal_ull(m, g ? " " : "",
8090 from_kgid_munged(uns, gi->gid[g]));
8091 }
8092 seq_puts(m, "\n\tCapEff:\t");
8093 cap = cred->cap_effective;
8094 CAP_FOR_EACH_U32(__capi)
8095 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
8096 seq_putc(m, '\n');
8097 return 0;
8098 }
8099
8100 static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
8101 {
8102 int i;
8103
8104 mutex_lock(&ctx->uring_lock);
8105 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
8106 for (i = 0; i < ctx->nr_user_files; i++) {
8107 struct fixed_file_table *table;
8108 struct file *f;
8109
8110 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
8111 f = table->files[i & IORING_FILE_TABLE_MASK];
8112 if (f)
8113 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
8114 else
8115 seq_printf(m, "%5u: <none>\n", i);
8116 }
8117 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
8118 for (i = 0; i < ctx->nr_user_bufs; i++) {
8119 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
8120
8121 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
8122 (unsigned int) buf->len);
8123 }
8124 if (!idr_is_empty(&ctx->personality_idr)) {
8125 seq_printf(m, "Personalities:\n");
8126 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
8127 }
8128 seq_printf(m, "PollList:\n");
8129 spin_lock_irq(&ctx->completion_lock);
8130 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
8131 struct hlist_head *list = &ctx->cancel_hash[i];
8132 struct io_kiocb *req;
8133
8134 hlist_for_each_entry(req, list, hash_node)
8135 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
8136 req->task->task_works != NULL);
8137 }
8138 spin_unlock_irq(&ctx->completion_lock);
8139 mutex_unlock(&ctx->uring_lock);
8140 }
8141
8142 static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
8143 {
8144 struct io_ring_ctx *ctx = f->private_data;
8145
8146 if (percpu_ref_tryget(&ctx->refs)) {
8147 __io_uring_show_fdinfo(ctx, m);
8148 percpu_ref_put(&ctx->refs);
8149 }
8150 }
8151 #endif
8152
8153 static const struct file_operations io_uring_fops = {
8154 .release = io_uring_release,
8155 .flush = io_uring_flush,
8156 .mmap = io_uring_mmap,
8157 #ifndef CONFIG_MMU
8158 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
8159 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
8160 #endif
8161 .poll = io_uring_poll,
8162 .fasync = io_uring_fasync,
8163 #ifdef CONFIG_PROC_FS
8164 .show_fdinfo = io_uring_show_fdinfo,
8165 #endif
8166 };
8167
8168 static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
8169 struct io_uring_params *p)
8170 {
8171 struct io_rings *rings;
8172 size_t size, sq_array_offset;
8173
8174 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
8175 if (size == SIZE_MAX)
8176 return -EOVERFLOW;
8177
8178 rings = io_mem_alloc(size);
8179 if (!rings)
8180 return -ENOMEM;
8181
8182 ctx->rings = rings;
8183 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
8184 rings->sq_ring_mask = p->sq_entries - 1;
8185 rings->cq_ring_mask = p->cq_entries - 1;
8186 rings->sq_ring_entries = p->sq_entries;
8187 rings->cq_ring_entries = p->cq_entries;
8188 ctx->sq_mask = rings->sq_ring_mask;
8189 ctx->cq_mask = rings->cq_ring_mask;
8190 ctx->sq_entries = rings->sq_ring_entries;
8191 ctx->cq_entries = rings->cq_ring_entries;
8192
8193 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
8194 if (size == SIZE_MAX) {
8195 io_mem_free(ctx->rings);
8196 ctx->rings = NULL;
8197 return -EOVERFLOW;
8198 }
8199
8200 ctx->sq_sqes = io_mem_alloc(size);
8201 if (!ctx->sq_sqes) {
8202 io_mem_free(ctx->rings);
8203 ctx->rings = NULL;
8204 return -ENOMEM;
8205 }
8206
8207 return 0;
8208 }
8209
8210 /*
8211 * Allocate an anonymous fd, this is what constitutes the application
8212 * visible backing of an io_uring instance. The application mmaps this
8213 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
8214 * we have to tie this fd to a socket for file garbage collection purposes.
8215 */
8216 static int io_uring_get_fd(struct io_ring_ctx *ctx)
8217 {
8218 struct file *file;
8219 int ret;
8220
8221 #if defined(CONFIG_UNIX)
8222 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
8223 &ctx->ring_sock);
8224 if (ret)
8225 return ret;
8226 #endif
8227
8228 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
8229 if (ret < 0)
8230 goto err;
8231
8232 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
8233 O_RDWR | O_CLOEXEC);
8234 if (IS_ERR(file)) {
8235 put_unused_fd(ret);
8236 ret = PTR_ERR(file);
8237 goto err;
8238 }
8239
8240 #if defined(CONFIG_UNIX)
8241 ctx->ring_sock->file = file;
8242 #endif
8243 fd_install(ret, file);
8244 return ret;
8245 err:
8246 #if defined(CONFIG_UNIX)
8247 sock_release(ctx->ring_sock);
8248 ctx->ring_sock = NULL;
8249 #endif
8250 return ret;
8251 }
8252
8253 static int io_uring_create(unsigned entries, struct io_uring_params *p,
8254 struct io_uring_params __user *params)
8255 {
8256 struct user_struct *user = NULL;
8257 struct io_ring_ctx *ctx;
8258 bool limit_mem;
8259 int ret;
8260
8261 if (!entries)
8262 return -EINVAL;
8263 if (entries > IORING_MAX_ENTRIES) {
8264 if (!(p->flags & IORING_SETUP_CLAMP))
8265 return -EINVAL;
8266 entries = IORING_MAX_ENTRIES;
8267 }
8268
8269 /*
8270 * Use twice as many entries for the CQ ring. It's possible for the
8271 * application to drive a higher depth than the size of the SQ ring,
8272 * since the sqes are only used at submission time. This allows for
8273 * some flexibility in overcommitting a bit. If the application has
8274 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
8275 * of CQ ring entries manually.
8276 */
8277 p->sq_entries = roundup_pow_of_two(entries);
8278 if (p->flags & IORING_SETUP_CQSIZE) {
8279 /*
8280 * If IORING_SETUP_CQSIZE is set, we do the same roundup
8281 * to a power-of-two, if it isn't already. We do NOT impose
8282 * any cq vs sq ring sizing.
8283 */
8284 if (p->cq_entries < p->sq_entries)
8285 return -EINVAL;
8286 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
8287 if (!(p->flags & IORING_SETUP_CLAMP))
8288 return -EINVAL;
8289 p->cq_entries = IORING_MAX_CQ_ENTRIES;
8290 }
8291 p->cq_entries = roundup_pow_of_two(p->cq_entries);
8292 } else {
8293 p->cq_entries = 2 * p->sq_entries;
8294 }
8295
8296 user = get_uid(current_user());
8297 limit_mem = !capable(CAP_IPC_LOCK);
8298
8299 if (limit_mem) {
8300 ret = __io_account_mem(user,
8301 ring_pages(p->sq_entries, p->cq_entries));
8302 if (ret) {
8303 free_uid(user);
8304 return ret;
8305 }
8306 }
8307
8308 ctx = io_ring_ctx_alloc(p);
8309 if (!ctx) {
8310 if (limit_mem)
8311 __io_unaccount_mem(user, ring_pages(p->sq_entries,
8312 p->cq_entries));
8313 free_uid(user);
8314 return -ENOMEM;
8315 }
8316 ctx->compat = in_compat_syscall();
8317 ctx->user = user;
8318 ctx->creds = get_current_cred();
8319
8320 ret = io_allocate_scq_urings(ctx, p);
8321 if (ret)
8322 goto err;
8323
8324 ret = io_sq_offload_start(ctx, p);
8325 if (ret)
8326 goto err;
8327
8328 memset(&p->sq_off, 0, sizeof(p->sq_off));
8329 p->sq_off.head = offsetof(struct io_rings, sq.head);
8330 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
8331 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
8332 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
8333 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
8334 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
8335 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
8336
8337 memset(&p->cq_off, 0, sizeof(p->cq_off));
8338 p->cq_off.head = offsetof(struct io_rings, cq.head);
8339 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
8340 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
8341 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
8342 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
8343 p->cq_off.cqes = offsetof(struct io_rings, cqes);
8344 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
8345
8346 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
8347 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
8348 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
8349 IORING_FEAT_POLL_32BITS;
8350
8351 if (copy_to_user(params, p, sizeof(*p))) {
8352 ret = -EFAULT;
8353 goto err;
8354 }
8355
8356 /*
8357 * Account memory _before_ installing the file descriptor. Once
8358 * the descriptor is installed, it can get closed at any time.
8359 */
8360 io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
8361 ACCT_LOCKED);
8362 ctx->limit_mem = limit_mem;
8363
8364 /*
8365 * Install ring fd as the very last thing, so we don't risk someone
8366 * having closed it before we finish setup
8367 */
8368 ret = io_uring_get_fd(ctx);
8369 if (ret < 0)
8370 goto err;
8371
8372 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
8373 return ret;
8374 err:
8375 io_ring_ctx_wait_and_kill(ctx);
8376 return ret;
8377 }
8378
8379 /*
8380 * Sets up an aio uring context, and returns the fd. Applications asks for a
8381 * ring size, we return the actual sq/cq ring sizes (among other things) in the
8382 * params structure passed in.
8383 */
8384 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
8385 {
8386 struct io_uring_params p;
8387 int i;
8388
8389 if (copy_from_user(&p, params, sizeof(p)))
8390 return -EFAULT;
8391 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
8392 if (p.resv[i])
8393 return -EINVAL;
8394 }
8395
8396 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
8397 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
8398 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
8399 return -EINVAL;
8400
8401 return io_uring_create(entries, &p, params);
8402 }
8403
8404 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
8405 struct io_uring_params __user *, params)
8406 {
8407 return io_uring_setup(entries, params);
8408 }
8409
8410 static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
8411 {
8412 struct io_uring_probe *p;
8413 size_t size;
8414 int i, ret;
8415
8416 size = struct_size(p, ops, nr_args);
8417 if (size == SIZE_MAX)
8418 return -EOVERFLOW;
8419 p = kzalloc(size, GFP_KERNEL);
8420 if (!p)
8421 return -ENOMEM;
8422
8423 ret = -EFAULT;
8424 if (copy_from_user(p, arg, size))
8425 goto out;
8426 ret = -EINVAL;
8427 if (memchr_inv(p, 0, size))
8428 goto out;
8429
8430 p->last_op = IORING_OP_LAST - 1;
8431 if (nr_args > IORING_OP_LAST)
8432 nr_args = IORING_OP_LAST;
8433
8434 for (i = 0; i < nr_args; i++) {
8435 p->ops[i].op = i;
8436 if (!io_op_defs[i].not_supported)
8437 p->ops[i].flags = IO_URING_OP_SUPPORTED;
8438 }
8439 p->ops_len = i;
8440
8441 ret = 0;
8442 if (copy_to_user(arg, p, size))
8443 ret = -EFAULT;
8444 out:
8445 kfree(p);
8446 return ret;
8447 }
8448
8449 static int io_register_personality(struct io_ring_ctx *ctx)
8450 {
8451 const struct cred *creds = get_current_cred();
8452 int id;
8453
8454 id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
8455 USHRT_MAX, GFP_KERNEL);
8456 if (id < 0)
8457 put_cred(creds);
8458 return id;
8459 }
8460
8461 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
8462 {
8463 const struct cred *old_creds;
8464
8465 old_creds = idr_remove(&ctx->personality_idr, id);
8466 if (old_creds) {
8467 put_cred(old_creds);
8468 return 0;
8469 }
8470
8471 return -EINVAL;
8472 }
8473
8474 static bool io_register_op_must_quiesce(int op)
8475 {
8476 switch (op) {
8477 case IORING_UNREGISTER_FILES:
8478 case IORING_REGISTER_FILES_UPDATE:
8479 case IORING_REGISTER_PROBE:
8480 case IORING_REGISTER_PERSONALITY:
8481 case IORING_UNREGISTER_PERSONALITY:
8482 return false;
8483 default:
8484 return true;
8485 }
8486 }
8487
8488 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
8489 void __user *arg, unsigned nr_args)
8490 __releases(ctx->uring_lock)
8491 __acquires(ctx->uring_lock)
8492 {
8493 int ret;
8494
8495 /*
8496 * We're inside the ring mutex, if the ref is already dying, then
8497 * someone else killed the ctx or is already going through
8498 * io_uring_register().
8499 */
8500 if (percpu_ref_is_dying(&ctx->refs))
8501 return -ENXIO;
8502
8503 if (io_register_op_must_quiesce(opcode)) {
8504 percpu_ref_kill(&ctx->refs);
8505
8506 /*
8507 * Drop uring mutex before waiting for references to exit. If
8508 * another thread is currently inside io_uring_enter() it might
8509 * need to grab the uring_lock to make progress. If we hold it
8510 * here across the drain wait, then we can deadlock. It's safe
8511 * to drop the mutex here, since no new references will come in
8512 * after we've killed the percpu ref.
8513 */
8514 mutex_unlock(&ctx->uring_lock);
8515 ret = wait_for_completion_interruptible(&ctx->ref_comp);
8516 mutex_lock(&ctx->uring_lock);
8517 if (ret) {
8518 percpu_ref_resurrect(&ctx->refs);
8519 ret = -EINTR;
8520 goto out;
8521 }
8522 }
8523
8524 switch (opcode) {
8525 case IORING_REGISTER_BUFFERS:
8526 ret = io_sqe_buffer_register(ctx, arg, nr_args);
8527 break;
8528 case IORING_UNREGISTER_BUFFERS:
8529 ret = -EINVAL;
8530 if (arg || nr_args)
8531 break;
8532 ret = io_sqe_buffer_unregister(ctx);
8533 break;
8534 case IORING_REGISTER_FILES:
8535 ret = io_sqe_files_register(ctx, arg, nr_args);
8536 break;
8537 case IORING_UNREGISTER_FILES:
8538 ret = -EINVAL;
8539 if (arg || nr_args)
8540 break;
8541 ret = io_sqe_files_unregister(ctx);
8542 break;
8543 case IORING_REGISTER_FILES_UPDATE:
8544 ret = io_sqe_files_update(ctx, arg, nr_args);
8545 break;
8546 case IORING_REGISTER_EVENTFD:
8547 case IORING_REGISTER_EVENTFD_ASYNC:
8548 ret = -EINVAL;
8549 if (nr_args != 1)
8550 break;
8551 ret = io_eventfd_register(ctx, arg);
8552 if (ret)
8553 break;
8554 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
8555 ctx->eventfd_async = 1;
8556 else
8557 ctx->eventfd_async = 0;
8558 break;
8559 case IORING_UNREGISTER_EVENTFD:
8560 ret = -EINVAL;
8561 if (arg || nr_args)
8562 break;
8563 ret = io_eventfd_unregister(ctx);
8564 break;
8565 case IORING_REGISTER_PROBE:
8566 ret = -EINVAL;
8567 if (!arg || nr_args > 256)
8568 break;
8569 ret = io_probe(ctx, arg, nr_args);
8570 break;
8571 case IORING_REGISTER_PERSONALITY:
8572 ret = -EINVAL;
8573 if (arg || nr_args)
8574 break;
8575 ret = io_register_personality(ctx);
8576 break;
8577 case IORING_UNREGISTER_PERSONALITY:
8578 ret = -EINVAL;
8579 if (arg)
8580 break;
8581 ret = io_unregister_personality(ctx, nr_args);
8582 break;
8583 default:
8584 ret = -EINVAL;
8585 break;
8586 }
8587
8588 if (io_register_op_must_quiesce(opcode)) {
8589 /* bring the ctx back to life */
8590 percpu_ref_reinit(&ctx->refs);
8591 out:
8592 reinit_completion(&ctx->ref_comp);
8593 }
8594 return ret;
8595 }
8596
8597 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
8598 void __user *, arg, unsigned int, nr_args)
8599 {
8600 struct io_ring_ctx *ctx;
8601 long ret = -EBADF;
8602 struct fd f;
8603
8604 f = fdget(fd);
8605 if (!f.file)
8606 return -EBADF;
8607
8608 ret = -EOPNOTSUPP;
8609 if (f.file->f_op != &io_uring_fops)
8610 goto out_fput;
8611
8612 ctx = f.file->private_data;
8613
8614 mutex_lock(&ctx->uring_lock);
8615 ret = __io_uring_register(ctx, opcode, arg, nr_args);
8616 mutex_unlock(&ctx->uring_lock);
8617 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
8618 ctx->cq_ev_fd != NULL, ret);
8619 out_fput:
8620 fdput(f);
8621 return ret;
8622 }
8623
8624 static int __init io_uring_init(void)
8625 {
8626 #define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
8627 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
8628 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
8629 } while (0)
8630
8631 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
8632 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
8633 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
8634 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
8635 BUILD_BUG_SQE_ELEM(1, __u8, flags);
8636 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
8637 BUILD_BUG_SQE_ELEM(4, __s32, fd);
8638 BUILD_BUG_SQE_ELEM(8, __u64, off);
8639 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
8640 BUILD_BUG_SQE_ELEM(16, __u64, addr);
8641 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
8642 BUILD_BUG_SQE_ELEM(24, __u32, len);
8643 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
8644 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
8645 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
8646 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
8647 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
8648 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
8649 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
8650 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
8651 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
8652 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
8653 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
8654 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
8655 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
8656 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
8657 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
8658 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
8659 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
8660 BUILD_BUG_SQE_ELEM(42, __u16, personality);
8661 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
8662
8663 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
8664 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
8665 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
8666 return 0;
8667 };
8668 __initcall(io_uring_init);