]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/io_uring.c
io_uring: don't do async setup for links' heads
[mirror_ubuntu-jammy-kernel.git] / fs / io_uring.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
29 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
40 * Copyright (c) 2018-2019 Christoph Hellwig
41 */
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <linux/compat.h>
47 #include <net/compat.h>
48 #include <linux/refcount.h>
49 #include <linux/uio.h>
50 #include <linux/bits.h>
51
52 #include <linux/sched/signal.h>
53 #include <linux/fs.h>
54 #include <linux/file.h>
55 #include <linux/fdtable.h>
56 #include <linux/mm.h>
57 #include <linux/mman.h>
58 #include <linux/percpu.h>
59 #include <linux/slab.h>
60 #include <linux/kthread.h>
61 #include <linux/blkdev.h>
62 #include <linux/bvec.h>
63 #include <linux/net.h>
64 #include <net/sock.h>
65 #include <net/af_unix.h>
66 #include <net/scm.h>
67 #include <linux/anon_inodes.h>
68 #include <linux/sched/mm.h>
69 #include <linux/uaccess.h>
70 #include <linux/nospec.h>
71 #include <linux/sizes.h>
72 #include <linux/hugetlb.h>
73 #include <linux/highmem.h>
74 #include <linux/namei.h>
75 #include <linux/fsnotify.h>
76 #include <linux/fadvise.h>
77 #include <linux/eventpoll.h>
78 #include <linux/fs_struct.h>
79 #include <linux/splice.h>
80 #include <linux/task_work.h>
81 #include <linux/pagemap.h>
82 #include <linux/io_uring.h>
83 #include <linux/blk-cgroup.h>
84 #include <linux/audit.h>
85
86 #define CREATE_TRACE_POINTS
87 #include <trace/events/io_uring.h>
88
89 #include <uapi/linux/io_uring.h>
90
91 #include "internal.h"
92 #include "io-wq.h"
93
94 #define IORING_MAX_ENTRIES 32768
95 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
96
97 /*
98 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
99 */
100 #define IORING_FILE_TABLE_SHIFT 9
101 #define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
102 #define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
103 #define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
104 #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
105 IORING_REGISTER_LAST + IORING_OP_LAST)
106
107 #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
108 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
109 IOSQE_BUFFER_SELECT)
110
111 struct io_uring {
112 u32 head ____cacheline_aligned_in_smp;
113 u32 tail ____cacheline_aligned_in_smp;
114 };
115
116 /*
117 * This data is shared with the application through the mmap at offsets
118 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
119 *
120 * The offsets to the member fields are published through struct
121 * io_sqring_offsets when calling io_uring_setup.
122 */
123 struct io_rings {
124 /*
125 * Head and tail offsets into the ring; the offsets need to be
126 * masked to get valid indices.
127 *
128 * The kernel controls head of the sq ring and the tail of the cq ring,
129 * and the application controls tail of the sq ring and the head of the
130 * cq ring.
131 */
132 struct io_uring sq, cq;
133 /*
134 * Bitmasks to apply to head and tail offsets (constant, equals
135 * ring_entries - 1)
136 */
137 u32 sq_ring_mask, cq_ring_mask;
138 /* Ring sizes (constant, power of 2) */
139 u32 sq_ring_entries, cq_ring_entries;
140 /*
141 * Number of invalid entries dropped by the kernel due to
142 * invalid index stored in array
143 *
144 * Written by the kernel, shouldn't be modified by the
145 * application (i.e. get number of "new events" by comparing to
146 * cached value).
147 *
148 * After a new SQ head value was read by the application this
149 * counter includes all submissions that were dropped reaching
150 * the new SQ head (and possibly more).
151 */
152 u32 sq_dropped;
153 /*
154 * Runtime SQ flags
155 *
156 * Written by the kernel, shouldn't be modified by the
157 * application.
158 *
159 * The application needs a full memory barrier before checking
160 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
161 */
162 u32 sq_flags;
163 /*
164 * Runtime CQ flags
165 *
166 * Written by the application, shouldn't be modified by the
167 * kernel.
168 */
169 u32 cq_flags;
170 /*
171 * Number of completion events lost because the queue was full;
172 * this should be avoided by the application by making sure
173 * there are not more requests pending than there is space in
174 * the completion queue.
175 *
176 * Written by the kernel, shouldn't be modified by the
177 * application (i.e. get number of "new events" by comparing to
178 * cached value).
179 *
180 * As completion events come in out of order this counter is not
181 * ordered with any other data.
182 */
183 u32 cq_overflow;
184 /*
185 * Ring buffer of completion events.
186 *
187 * The kernel writes completion events fresh every time they are
188 * produced, so the application is allowed to modify pending
189 * entries.
190 */
191 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
192 };
193
194 enum io_uring_cmd_flags {
195 IO_URING_F_NONBLOCK = 1,
196 IO_URING_F_COMPLETE_DEFER = 2,
197 };
198
199 struct io_mapped_ubuf {
200 u64 ubuf;
201 size_t len;
202 struct bio_vec *bvec;
203 unsigned int nr_bvecs;
204 unsigned long acct_pages;
205 };
206
207 struct io_ring_ctx;
208
209 struct io_rsrc_put {
210 struct list_head list;
211 union {
212 void *rsrc;
213 struct file *file;
214 };
215 };
216
217 struct fixed_rsrc_table {
218 struct file **files;
219 };
220
221 struct fixed_rsrc_ref_node {
222 struct percpu_ref refs;
223 struct list_head node;
224 struct list_head rsrc_list;
225 struct fixed_rsrc_data *rsrc_data;
226 void (*rsrc_put)(struct io_ring_ctx *ctx,
227 struct io_rsrc_put *prsrc);
228 struct llist_node llist;
229 bool done;
230 };
231
232 struct fixed_rsrc_data {
233 struct fixed_rsrc_table *table;
234 struct io_ring_ctx *ctx;
235
236 struct fixed_rsrc_ref_node *node;
237 struct percpu_ref refs;
238 struct completion done;
239 };
240
241 struct io_buffer {
242 struct list_head list;
243 __u64 addr;
244 __s32 len;
245 __u16 bid;
246 };
247
248 struct io_restriction {
249 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
250 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
251 u8 sqe_flags_allowed;
252 u8 sqe_flags_required;
253 bool registered;
254 };
255
256 struct io_sq_data {
257 refcount_t refs;
258 struct mutex lock;
259
260 /* ctx's that are using this sqd */
261 struct list_head ctx_list;
262 struct list_head ctx_new_list;
263 struct mutex ctx_lock;
264
265 struct task_struct *thread;
266 struct wait_queue_head wait;
267
268 unsigned sq_thread_idle;
269 };
270
271 #define IO_IOPOLL_BATCH 8
272 #define IO_COMPL_BATCH 32
273 #define IO_REQ_CACHE_SIZE 32
274 #define IO_REQ_ALLOC_BATCH 8
275
276 struct io_comp_state {
277 struct io_kiocb *reqs[IO_COMPL_BATCH];
278 unsigned int nr;
279 unsigned int locked_free_nr;
280 /* inline/task_work completion list, under ->uring_lock */
281 struct list_head free_list;
282 /* IRQ completion list, under ->completion_lock */
283 struct list_head locked_free_list;
284 };
285
286 struct io_submit_link {
287 struct io_kiocb *head;
288 struct io_kiocb *last;
289 };
290
291 struct io_submit_state {
292 struct blk_plug plug;
293 struct io_submit_link link;
294
295 /*
296 * io_kiocb alloc cache
297 */
298 void *reqs[IO_REQ_CACHE_SIZE];
299 unsigned int free_reqs;
300
301 bool plug_started;
302
303 /*
304 * Batch completion logic
305 */
306 struct io_comp_state comp;
307
308 /*
309 * File reference cache
310 */
311 struct file *file;
312 unsigned int fd;
313 unsigned int file_refs;
314 unsigned int ios_left;
315 };
316
317 struct io_ring_ctx {
318 struct {
319 struct percpu_ref refs;
320 } ____cacheline_aligned_in_smp;
321
322 struct {
323 unsigned int flags;
324 unsigned int compat: 1;
325 unsigned int limit_mem: 1;
326 unsigned int cq_overflow_flushed: 1;
327 unsigned int drain_next: 1;
328 unsigned int eventfd_async: 1;
329 unsigned int restricted: 1;
330 unsigned int sqo_dead: 1;
331
332 /*
333 * Ring buffer of indices into array of io_uring_sqe, which is
334 * mmapped by the application using the IORING_OFF_SQES offset.
335 *
336 * This indirection could e.g. be used to assign fixed
337 * io_uring_sqe entries to operations and only submit them to
338 * the queue when needed.
339 *
340 * The kernel modifies neither the indices array nor the entries
341 * array.
342 */
343 u32 *sq_array;
344 unsigned cached_sq_head;
345 unsigned sq_entries;
346 unsigned sq_mask;
347 unsigned sq_thread_idle;
348 unsigned cached_sq_dropped;
349 unsigned cached_cq_overflow;
350 unsigned long sq_check_overflow;
351
352 struct list_head defer_list;
353 struct list_head timeout_list;
354 struct list_head cq_overflow_list;
355
356 struct io_uring_sqe *sq_sqes;
357 } ____cacheline_aligned_in_smp;
358
359 struct {
360 struct mutex uring_lock;
361 wait_queue_head_t wait;
362 } ____cacheline_aligned_in_smp;
363
364 struct io_submit_state submit_state;
365
366 struct io_rings *rings;
367
368 /* IO offload */
369 struct io_wq *io_wq;
370
371 /*
372 * For SQPOLL usage - we hold a reference to the parent task, so we
373 * have access to the ->files
374 */
375 struct task_struct *sqo_task;
376
377 /* Only used for accounting purposes */
378 struct mm_struct *mm_account;
379
380 #ifdef CONFIG_BLK_CGROUP
381 struct cgroup_subsys_state *sqo_blkcg_css;
382 #endif
383
384 struct io_sq_data *sq_data; /* if using sq thread polling */
385
386 struct wait_queue_head sqo_sq_wait;
387 struct list_head sqd_list;
388
389 /*
390 * If used, fixed file set. Writers must ensure that ->refs is dead,
391 * readers must ensure that ->refs is alive as long as the file* is
392 * used. Only updated through io_uring_register(2).
393 */
394 struct fixed_rsrc_data *file_data;
395 unsigned nr_user_files;
396
397 /* if used, fixed mapped user buffers */
398 unsigned nr_user_bufs;
399 struct io_mapped_ubuf *user_bufs;
400
401 struct user_struct *user;
402
403 const struct cred *creds;
404
405 #ifdef CONFIG_AUDIT
406 kuid_t loginuid;
407 unsigned int sessionid;
408 #endif
409
410 struct completion ref_comp;
411 struct completion sq_thread_comp;
412
413 #if defined(CONFIG_UNIX)
414 struct socket *ring_sock;
415 #endif
416
417 struct idr io_buffer_idr;
418
419 struct idr personality_idr;
420
421 struct {
422 unsigned cached_cq_tail;
423 unsigned cq_entries;
424 unsigned cq_mask;
425 atomic_t cq_timeouts;
426 unsigned cq_last_tm_flush;
427 unsigned long cq_check_overflow;
428 struct wait_queue_head cq_wait;
429 struct fasync_struct *cq_fasync;
430 struct eventfd_ctx *cq_ev_fd;
431 } ____cacheline_aligned_in_smp;
432
433 struct {
434 spinlock_t completion_lock;
435
436 /*
437 * ->iopoll_list is protected by the ctx->uring_lock for
438 * io_uring instances that don't use IORING_SETUP_SQPOLL.
439 * For SQPOLL, only the single threaded io_sq_thread() will
440 * manipulate the list, hence no extra locking is needed there.
441 */
442 struct list_head iopoll_list;
443 struct hlist_head *cancel_hash;
444 unsigned cancel_hash_bits;
445 bool poll_multi_file;
446
447 spinlock_t inflight_lock;
448 struct list_head inflight_list;
449 } ____cacheline_aligned_in_smp;
450
451 struct delayed_work rsrc_put_work;
452 struct llist_head rsrc_put_llist;
453 struct list_head rsrc_ref_list;
454 spinlock_t rsrc_ref_lock;
455
456 struct io_restriction restrictions;
457
458 /* Keep this last, we don't need it for the fast path */
459 struct work_struct exit_work;
460 };
461
462 /*
463 * First field must be the file pointer in all the
464 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
465 */
466 struct io_poll_iocb {
467 struct file *file;
468 struct wait_queue_head *head;
469 __poll_t events;
470 bool done;
471 bool canceled;
472 struct wait_queue_entry wait;
473 };
474
475 struct io_poll_remove {
476 struct file *file;
477 u64 addr;
478 };
479
480 struct io_close {
481 struct file *file;
482 int fd;
483 };
484
485 struct io_timeout_data {
486 struct io_kiocb *req;
487 struct hrtimer timer;
488 struct timespec64 ts;
489 enum hrtimer_mode mode;
490 };
491
492 struct io_accept {
493 struct file *file;
494 struct sockaddr __user *addr;
495 int __user *addr_len;
496 int flags;
497 unsigned long nofile;
498 };
499
500 struct io_sync {
501 struct file *file;
502 loff_t len;
503 loff_t off;
504 int flags;
505 int mode;
506 };
507
508 struct io_cancel {
509 struct file *file;
510 u64 addr;
511 };
512
513 struct io_timeout {
514 struct file *file;
515 u32 off;
516 u32 target_seq;
517 struct list_head list;
518 /* head of the link, used by linked timeouts only */
519 struct io_kiocb *head;
520 };
521
522 struct io_timeout_rem {
523 struct file *file;
524 u64 addr;
525
526 /* timeout update */
527 struct timespec64 ts;
528 u32 flags;
529 };
530
531 struct io_rw {
532 /* NOTE: kiocb has the file as the first member, so don't do it here */
533 struct kiocb kiocb;
534 u64 addr;
535 u64 len;
536 };
537
538 struct io_connect {
539 struct file *file;
540 struct sockaddr __user *addr;
541 int addr_len;
542 };
543
544 struct io_sr_msg {
545 struct file *file;
546 union {
547 struct user_msghdr __user *umsg;
548 void __user *buf;
549 };
550 int msg_flags;
551 int bgid;
552 size_t len;
553 struct io_buffer *kbuf;
554 };
555
556 struct io_open {
557 struct file *file;
558 int dfd;
559 struct filename *filename;
560 struct open_how how;
561 unsigned long nofile;
562 };
563
564 struct io_rsrc_update {
565 struct file *file;
566 u64 arg;
567 u32 nr_args;
568 u32 offset;
569 };
570
571 struct io_fadvise {
572 struct file *file;
573 u64 offset;
574 u32 len;
575 u32 advice;
576 };
577
578 struct io_madvise {
579 struct file *file;
580 u64 addr;
581 u32 len;
582 u32 advice;
583 };
584
585 struct io_epoll {
586 struct file *file;
587 int epfd;
588 int op;
589 int fd;
590 struct epoll_event event;
591 };
592
593 struct io_splice {
594 struct file *file_out;
595 struct file *file_in;
596 loff_t off_out;
597 loff_t off_in;
598 u64 len;
599 unsigned int flags;
600 };
601
602 struct io_provide_buf {
603 struct file *file;
604 __u64 addr;
605 __s32 len;
606 __u32 bgid;
607 __u16 nbufs;
608 __u16 bid;
609 };
610
611 struct io_statx {
612 struct file *file;
613 int dfd;
614 unsigned int mask;
615 unsigned int flags;
616 const char __user *filename;
617 struct statx __user *buffer;
618 };
619
620 struct io_shutdown {
621 struct file *file;
622 int how;
623 };
624
625 struct io_rename {
626 struct file *file;
627 int old_dfd;
628 int new_dfd;
629 struct filename *oldpath;
630 struct filename *newpath;
631 int flags;
632 };
633
634 struct io_unlink {
635 struct file *file;
636 int dfd;
637 int flags;
638 struct filename *filename;
639 };
640
641 struct io_completion {
642 struct file *file;
643 struct list_head list;
644 int cflags;
645 };
646
647 struct io_async_connect {
648 struct sockaddr_storage address;
649 };
650
651 struct io_async_msghdr {
652 struct iovec fast_iov[UIO_FASTIOV];
653 /* points to an allocated iov, if NULL we use fast_iov instead */
654 struct iovec *free_iov;
655 struct sockaddr __user *uaddr;
656 struct msghdr msg;
657 struct sockaddr_storage addr;
658 };
659
660 struct io_async_rw {
661 struct iovec fast_iov[UIO_FASTIOV];
662 const struct iovec *free_iovec;
663 struct iov_iter iter;
664 size_t bytes_done;
665 struct wait_page_queue wpq;
666 };
667
668 enum {
669 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
670 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
671 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
672 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
673 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
674 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
675
676 REQ_F_FAIL_LINK_BIT,
677 REQ_F_INFLIGHT_BIT,
678 REQ_F_CUR_POS_BIT,
679 REQ_F_NOWAIT_BIT,
680 REQ_F_LINK_TIMEOUT_BIT,
681 REQ_F_ISREG_BIT,
682 REQ_F_NEED_CLEANUP_BIT,
683 REQ_F_POLLED_BIT,
684 REQ_F_BUFFER_SELECTED_BIT,
685 REQ_F_NO_FILE_TABLE_BIT,
686 REQ_F_WORK_INITIALIZED_BIT,
687 REQ_F_LTIMEOUT_ACTIVE_BIT,
688 REQ_F_COMPLETE_INLINE_BIT,
689
690 /* not a real bit, just to check we're not overflowing the space */
691 __REQ_F_LAST_BIT,
692 };
693
694 enum {
695 /* ctx owns file */
696 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
697 /* drain existing IO first */
698 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
699 /* linked sqes */
700 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
701 /* doesn't sever on completion < 0 */
702 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
703 /* IOSQE_ASYNC */
704 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
705 /* IOSQE_BUFFER_SELECT */
706 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
707
708 /* fail rest of links */
709 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
710 /* on inflight list */
711 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
712 /* read/write uses file position */
713 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
714 /* must not punt to workers */
715 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
716 /* has or had linked timeout */
717 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
718 /* regular file */
719 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
720 /* needs cleanup */
721 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
722 /* already went through poll handler */
723 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
724 /* buffer already selected */
725 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
726 /* doesn't need file table for this request */
727 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
728 /* io_wq_work is initialized */
729 REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
730 /* linked timeout is active, i.e. prepared by link's head */
731 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
732 /* completion is deferred through io_comp_state */
733 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
734 };
735
736 struct async_poll {
737 struct io_poll_iocb poll;
738 struct io_poll_iocb *double_poll;
739 };
740
741 struct io_task_work {
742 struct io_wq_work_node node;
743 task_work_func_t func;
744 };
745
746 /*
747 * NOTE! Each of the iocb union members has the file pointer
748 * as the first entry in their struct definition. So you can
749 * access the file pointer through any of the sub-structs,
750 * or directly as just 'ki_filp' in this struct.
751 */
752 struct io_kiocb {
753 union {
754 struct file *file;
755 struct io_rw rw;
756 struct io_poll_iocb poll;
757 struct io_poll_remove poll_remove;
758 struct io_accept accept;
759 struct io_sync sync;
760 struct io_cancel cancel;
761 struct io_timeout timeout;
762 struct io_timeout_rem timeout_rem;
763 struct io_connect connect;
764 struct io_sr_msg sr_msg;
765 struct io_open open;
766 struct io_close close;
767 struct io_rsrc_update rsrc_update;
768 struct io_fadvise fadvise;
769 struct io_madvise madvise;
770 struct io_epoll epoll;
771 struct io_splice splice;
772 struct io_provide_buf pbuf;
773 struct io_statx statx;
774 struct io_shutdown shutdown;
775 struct io_rename rename;
776 struct io_unlink unlink;
777 /* use only after cleaning per-op data, see io_clean_op() */
778 struct io_completion compl;
779 };
780
781 /* opcode allocated if it needs to store data for async defer */
782 void *async_data;
783 u8 opcode;
784 /* polled IO has completed */
785 u8 iopoll_completed;
786
787 u16 buf_index;
788 u32 result;
789
790 struct io_ring_ctx *ctx;
791 unsigned int flags;
792 refcount_t refs;
793 struct task_struct *task;
794 u64 user_data;
795
796 struct io_kiocb *link;
797 struct percpu_ref *fixed_rsrc_refs;
798
799 /*
800 * 1. used with ctx->iopoll_list with reads/writes
801 * 2. to track reqs with ->files (see io_op_def::file_table)
802 */
803 struct list_head inflight_entry;
804 union {
805 struct io_task_work io_task_work;
806 struct callback_head task_work;
807 };
808 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
809 struct hlist_node hash_node;
810 struct async_poll *apoll;
811 struct io_wq_work work;
812 };
813
814 struct io_defer_entry {
815 struct list_head list;
816 struct io_kiocb *req;
817 u32 seq;
818 };
819
820 struct io_op_def {
821 /* needs req->file assigned */
822 unsigned needs_file : 1;
823 /* hash wq insertion if file is a regular file */
824 unsigned hash_reg_file : 1;
825 /* unbound wq insertion if file is a non-regular file */
826 unsigned unbound_nonreg_file : 1;
827 /* opcode is not supported by this kernel */
828 unsigned not_supported : 1;
829 /* set if opcode supports polled "wait" */
830 unsigned pollin : 1;
831 unsigned pollout : 1;
832 /* op supports buffer selection */
833 unsigned buffer_select : 1;
834 /* must always have async data allocated */
835 unsigned needs_async_data : 1;
836 /* should block plug */
837 unsigned plug : 1;
838 /* size of async data needed, if any */
839 unsigned short async_size;
840 unsigned work_flags;
841 };
842
843 static const struct io_op_def io_op_defs[] = {
844 [IORING_OP_NOP] = {},
845 [IORING_OP_READV] = {
846 .needs_file = 1,
847 .unbound_nonreg_file = 1,
848 .pollin = 1,
849 .buffer_select = 1,
850 .needs_async_data = 1,
851 .plug = 1,
852 .async_size = sizeof(struct io_async_rw),
853 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
854 },
855 [IORING_OP_WRITEV] = {
856 .needs_file = 1,
857 .hash_reg_file = 1,
858 .unbound_nonreg_file = 1,
859 .pollout = 1,
860 .needs_async_data = 1,
861 .plug = 1,
862 .async_size = sizeof(struct io_async_rw),
863 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
864 IO_WQ_WORK_FSIZE,
865 },
866 [IORING_OP_FSYNC] = {
867 .needs_file = 1,
868 .work_flags = IO_WQ_WORK_BLKCG,
869 },
870 [IORING_OP_READ_FIXED] = {
871 .needs_file = 1,
872 .unbound_nonreg_file = 1,
873 .pollin = 1,
874 .plug = 1,
875 .async_size = sizeof(struct io_async_rw),
876 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
877 },
878 [IORING_OP_WRITE_FIXED] = {
879 .needs_file = 1,
880 .hash_reg_file = 1,
881 .unbound_nonreg_file = 1,
882 .pollout = 1,
883 .plug = 1,
884 .async_size = sizeof(struct io_async_rw),
885 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE |
886 IO_WQ_WORK_MM,
887 },
888 [IORING_OP_POLL_ADD] = {
889 .needs_file = 1,
890 .unbound_nonreg_file = 1,
891 },
892 [IORING_OP_POLL_REMOVE] = {},
893 [IORING_OP_SYNC_FILE_RANGE] = {
894 .needs_file = 1,
895 .work_flags = IO_WQ_WORK_BLKCG,
896 },
897 [IORING_OP_SENDMSG] = {
898 .needs_file = 1,
899 .unbound_nonreg_file = 1,
900 .pollout = 1,
901 .needs_async_data = 1,
902 .async_size = sizeof(struct io_async_msghdr),
903 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
904 },
905 [IORING_OP_RECVMSG] = {
906 .needs_file = 1,
907 .unbound_nonreg_file = 1,
908 .pollin = 1,
909 .buffer_select = 1,
910 .needs_async_data = 1,
911 .async_size = sizeof(struct io_async_msghdr),
912 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
913 },
914 [IORING_OP_TIMEOUT] = {
915 .needs_async_data = 1,
916 .async_size = sizeof(struct io_timeout_data),
917 .work_flags = IO_WQ_WORK_MM,
918 },
919 [IORING_OP_TIMEOUT_REMOVE] = {
920 /* used by timeout updates' prep() */
921 .work_flags = IO_WQ_WORK_MM,
922 },
923 [IORING_OP_ACCEPT] = {
924 .needs_file = 1,
925 .unbound_nonreg_file = 1,
926 .pollin = 1,
927 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
928 },
929 [IORING_OP_ASYNC_CANCEL] = {},
930 [IORING_OP_LINK_TIMEOUT] = {
931 .needs_async_data = 1,
932 .async_size = sizeof(struct io_timeout_data),
933 .work_flags = IO_WQ_WORK_MM,
934 },
935 [IORING_OP_CONNECT] = {
936 .needs_file = 1,
937 .unbound_nonreg_file = 1,
938 .pollout = 1,
939 .needs_async_data = 1,
940 .async_size = sizeof(struct io_async_connect),
941 .work_flags = IO_WQ_WORK_MM,
942 },
943 [IORING_OP_FALLOCATE] = {
944 .needs_file = 1,
945 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE,
946 },
947 [IORING_OP_OPENAT] = {
948 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG |
949 IO_WQ_WORK_FS | IO_WQ_WORK_MM,
950 },
951 [IORING_OP_CLOSE] = {
952 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
953 },
954 [IORING_OP_FILES_UPDATE] = {
955 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM,
956 },
957 [IORING_OP_STATX] = {
958 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM |
959 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
960 },
961 [IORING_OP_READ] = {
962 .needs_file = 1,
963 .unbound_nonreg_file = 1,
964 .pollin = 1,
965 .buffer_select = 1,
966 .plug = 1,
967 .async_size = sizeof(struct io_async_rw),
968 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
969 },
970 [IORING_OP_WRITE] = {
971 .needs_file = 1,
972 .unbound_nonreg_file = 1,
973 .pollout = 1,
974 .plug = 1,
975 .async_size = sizeof(struct io_async_rw),
976 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
977 IO_WQ_WORK_FSIZE,
978 },
979 [IORING_OP_FADVISE] = {
980 .needs_file = 1,
981 .work_flags = IO_WQ_WORK_BLKCG,
982 },
983 [IORING_OP_MADVISE] = {
984 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
985 },
986 [IORING_OP_SEND] = {
987 .needs_file = 1,
988 .unbound_nonreg_file = 1,
989 .pollout = 1,
990 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
991 },
992 [IORING_OP_RECV] = {
993 .needs_file = 1,
994 .unbound_nonreg_file = 1,
995 .pollin = 1,
996 .buffer_select = 1,
997 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
998 },
999 [IORING_OP_OPENAT2] = {
1000 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_FS |
1001 IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
1002 },
1003 [IORING_OP_EPOLL_CTL] = {
1004 .unbound_nonreg_file = 1,
1005 .work_flags = IO_WQ_WORK_FILES,
1006 },
1007 [IORING_OP_SPLICE] = {
1008 .needs_file = 1,
1009 .hash_reg_file = 1,
1010 .unbound_nonreg_file = 1,
1011 .work_flags = IO_WQ_WORK_BLKCG,
1012 },
1013 [IORING_OP_PROVIDE_BUFFERS] = {},
1014 [IORING_OP_REMOVE_BUFFERS] = {},
1015 [IORING_OP_TEE] = {
1016 .needs_file = 1,
1017 .hash_reg_file = 1,
1018 .unbound_nonreg_file = 1,
1019 },
1020 [IORING_OP_SHUTDOWN] = {
1021 .needs_file = 1,
1022 },
1023 [IORING_OP_RENAMEAT] = {
1024 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
1025 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
1026 },
1027 [IORING_OP_UNLINKAT] = {
1028 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
1029 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
1030 },
1031 };
1032
1033 static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1034 struct task_struct *task,
1035 struct files_struct *files);
1036 static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
1037 static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
1038 struct io_ring_ctx *ctx);
1039 static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
1040 struct fixed_rsrc_ref_node *ref_node);
1041
1042 static bool io_rw_reissue(struct io_kiocb *req);
1043 static void io_cqring_fill_event(struct io_kiocb *req, long res);
1044 static void io_put_req(struct io_kiocb *req);
1045 static void io_put_req_deferred(struct io_kiocb *req, int nr);
1046 static void io_double_put_req(struct io_kiocb *req);
1047 static void io_dismantle_req(struct io_kiocb *req);
1048 static void io_put_task(struct task_struct *task, int nr);
1049 static void io_queue_next(struct io_kiocb *req);
1050 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
1051 static void __io_queue_linked_timeout(struct io_kiocb *req);
1052 static void io_queue_linked_timeout(struct io_kiocb *req);
1053 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
1054 struct io_uring_rsrc_update *ip,
1055 unsigned nr_args);
1056 static void __io_clean_op(struct io_kiocb *req);
1057 static struct file *io_file_get(struct io_submit_state *state,
1058 struct io_kiocb *req, int fd, bool fixed);
1059 static void __io_queue_sqe(struct io_kiocb *req);
1060 static void io_rsrc_put_work(struct work_struct *work);
1061
1062 static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
1063 struct iov_iter *iter, bool needs_lock);
1064 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
1065 const struct iovec *fast_iov,
1066 struct iov_iter *iter, bool force);
1067 static void io_req_task_queue(struct io_kiocb *req);
1068 static void io_submit_flush_completions(struct io_comp_state *cs,
1069 struct io_ring_ctx *ctx);
1070
1071 static struct kmem_cache *req_cachep;
1072
1073 static const struct file_operations io_uring_fops;
1074
1075 struct sock *io_uring_get_socket(struct file *file)
1076 {
1077 #if defined(CONFIG_UNIX)
1078 if (file->f_op == &io_uring_fops) {
1079 struct io_ring_ctx *ctx = file->private_data;
1080
1081 return ctx->ring_sock->sk;
1082 }
1083 #endif
1084 return NULL;
1085 }
1086 EXPORT_SYMBOL(io_uring_get_socket);
1087
1088 #define io_for_each_link(pos, head) \
1089 for (pos = (head); pos; pos = pos->link)
1090
1091 static inline void io_clean_op(struct io_kiocb *req)
1092 {
1093 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
1094 __io_clean_op(req);
1095 }
1096
1097 static inline void io_set_resource_node(struct io_kiocb *req)
1098 {
1099 struct io_ring_ctx *ctx = req->ctx;
1100
1101 if (!req->fixed_rsrc_refs) {
1102 req->fixed_rsrc_refs = &ctx->file_data->node->refs;
1103 percpu_ref_get(req->fixed_rsrc_refs);
1104 }
1105 }
1106
1107 static bool io_match_task(struct io_kiocb *head,
1108 struct task_struct *task,
1109 struct files_struct *files)
1110 {
1111 struct io_kiocb *req;
1112
1113 if (task && head->task != task) {
1114 /* in terms of cancelation, always match if req task is dead */
1115 if (head->task->flags & PF_EXITING)
1116 return true;
1117 return false;
1118 }
1119 if (!files)
1120 return true;
1121
1122 io_for_each_link(req, head) {
1123 if (!(req->flags & REQ_F_WORK_INITIALIZED))
1124 continue;
1125 if (req->file && req->file->f_op == &io_uring_fops)
1126 return true;
1127 if ((req->work.flags & IO_WQ_WORK_FILES) &&
1128 req->work.identity->files == files)
1129 return true;
1130 }
1131 return false;
1132 }
1133
1134 static void io_sq_thread_drop_mm_files(void)
1135 {
1136 struct files_struct *files = current->files;
1137 struct mm_struct *mm = current->mm;
1138
1139 if (mm) {
1140 kthread_unuse_mm(mm);
1141 mmput(mm);
1142 current->mm = NULL;
1143 }
1144 if (files) {
1145 struct nsproxy *nsproxy = current->nsproxy;
1146
1147 task_lock(current);
1148 current->files = NULL;
1149 current->nsproxy = NULL;
1150 task_unlock(current);
1151 put_files_struct(files);
1152 put_nsproxy(nsproxy);
1153 }
1154 }
1155
1156 static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
1157 {
1158 if (!current->files) {
1159 struct files_struct *files;
1160 struct nsproxy *nsproxy;
1161
1162 task_lock(ctx->sqo_task);
1163 files = ctx->sqo_task->files;
1164 if (!files) {
1165 task_unlock(ctx->sqo_task);
1166 return -EOWNERDEAD;
1167 }
1168 atomic_inc(&files->count);
1169 get_nsproxy(ctx->sqo_task->nsproxy);
1170 nsproxy = ctx->sqo_task->nsproxy;
1171 task_unlock(ctx->sqo_task);
1172
1173 task_lock(current);
1174 current->files = files;
1175 current->nsproxy = nsproxy;
1176 task_unlock(current);
1177 }
1178 return 0;
1179 }
1180
1181 static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
1182 {
1183 struct mm_struct *mm;
1184
1185 if (current->mm)
1186 return 0;
1187
1188 task_lock(ctx->sqo_task);
1189 mm = ctx->sqo_task->mm;
1190 if (unlikely(!mm || !mmget_not_zero(mm)))
1191 mm = NULL;
1192 task_unlock(ctx->sqo_task);
1193
1194 if (mm) {
1195 kthread_use_mm(mm);
1196 return 0;
1197 }
1198
1199 return -EFAULT;
1200 }
1201
1202 static int __io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
1203 struct io_kiocb *req)
1204 {
1205 const struct io_op_def *def = &io_op_defs[req->opcode];
1206 int ret;
1207
1208 if (def->work_flags & IO_WQ_WORK_MM) {
1209 ret = __io_sq_thread_acquire_mm(ctx);
1210 if (unlikely(ret))
1211 return ret;
1212 }
1213
1214 if (def->needs_file || (def->work_flags & IO_WQ_WORK_FILES)) {
1215 ret = __io_sq_thread_acquire_files(ctx);
1216 if (unlikely(ret))
1217 return ret;
1218 }
1219
1220 return 0;
1221 }
1222
1223 static inline int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
1224 struct io_kiocb *req)
1225 {
1226 if (!(ctx->flags & IORING_SETUP_SQPOLL))
1227 return 0;
1228 return __io_sq_thread_acquire_mm_files(ctx, req);
1229 }
1230
1231 static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
1232 struct cgroup_subsys_state **cur_css)
1233
1234 {
1235 #ifdef CONFIG_BLK_CGROUP
1236 /* puts the old one when swapping */
1237 if (*cur_css != ctx->sqo_blkcg_css) {
1238 kthread_associate_blkcg(ctx->sqo_blkcg_css);
1239 *cur_css = ctx->sqo_blkcg_css;
1240 }
1241 #endif
1242 }
1243
1244 static void io_sq_thread_unassociate_blkcg(void)
1245 {
1246 #ifdef CONFIG_BLK_CGROUP
1247 kthread_associate_blkcg(NULL);
1248 #endif
1249 }
1250
1251 static inline void req_set_fail_links(struct io_kiocb *req)
1252 {
1253 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1254 req->flags |= REQ_F_FAIL_LINK;
1255 }
1256
1257 /*
1258 * None of these are dereferenced, they are simply used to check if any of
1259 * them have changed. If we're under current and check they are still the
1260 * same, we're fine to grab references to them for actual out-of-line use.
1261 */
1262 static void io_init_identity(struct io_identity *id)
1263 {
1264 id->files = current->files;
1265 id->mm = current->mm;
1266 #ifdef CONFIG_BLK_CGROUP
1267 rcu_read_lock();
1268 id->blkcg_css = blkcg_css();
1269 rcu_read_unlock();
1270 #endif
1271 id->creds = current_cred();
1272 id->nsproxy = current->nsproxy;
1273 id->fs = current->fs;
1274 id->fsize = rlimit(RLIMIT_FSIZE);
1275 #ifdef CONFIG_AUDIT
1276 id->loginuid = current->loginuid;
1277 id->sessionid = current->sessionid;
1278 #endif
1279 refcount_set(&id->count, 1);
1280 }
1281
1282 static inline void __io_req_init_async(struct io_kiocb *req)
1283 {
1284 memset(&req->work, 0, sizeof(req->work));
1285 req->flags |= REQ_F_WORK_INITIALIZED;
1286 }
1287
1288 /*
1289 * Note: must call io_req_init_async() for the first time you
1290 * touch any members of io_wq_work.
1291 */
1292 static inline void io_req_init_async(struct io_kiocb *req)
1293 {
1294 struct io_uring_task *tctx = current->io_uring;
1295
1296 if (req->flags & REQ_F_WORK_INITIALIZED)
1297 return;
1298
1299 __io_req_init_async(req);
1300
1301 /* Grab a ref if this isn't our static identity */
1302 req->work.identity = tctx->identity;
1303 if (tctx->identity != &tctx->__identity)
1304 refcount_inc(&req->work.identity->count);
1305 }
1306
1307 static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1308 {
1309 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1310
1311 complete(&ctx->ref_comp);
1312 }
1313
1314 static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1315 {
1316 return !req->timeout.off;
1317 }
1318
1319 static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1320 {
1321 struct io_ring_ctx *ctx;
1322 int hash_bits;
1323
1324 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1325 if (!ctx)
1326 return NULL;
1327
1328 /*
1329 * Use 5 bits less than the max cq entries, that should give us around
1330 * 32 entries per hash list if totally full and uniformly spread.
1331 */
1332 hash_bits = ilog2(p->cq_entries);
1333 hash_bits -= 5;
1334 if (hash_bits <= 0)
1335 hash_bits = 1;
1336 ctx->cancel_hash_bits = hash_bits;
1337 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1338 GFP_KERNEL);
1339 if (!ctx->cancel_hash)
1340 goto err;
1341 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1342
1343 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
1344 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1345 goto err;
1346
1347 ctx->flags = p->flags;
1348 init_waitqueue_head(&ctx->sqo_sq_wait);
1349 INIT_LIST_HEAD(&ctx->sqd_list);
1350 init_waitqueue_head(&ctx->cq_wait);
1351 INIT_LIST_HEAD(&ctx->cq_overflow_list);
1352 init_completion(&ctx->ref_comp);
1353 init_completion(&ctx->sq_thread_comp);
1354 idr_init(&ctx->io_buffer_idr);
1355 idr_init(&ctx->personality_idr);
1356 mutex_init(&ctx->uring_lock);
1357 init_waitqueue_head(&ctx->wait);
1358 spin_lock_init(&ctx->completion_lock);
1359 INIT_LIST_HEAD(&ctx->iopoll_list);
1360 INIT_LIST_HEAD(&ctx->defer_list);
1361 INIT_LIST_HEAD(&ctx->timeout_list);
1362 spin_lock_init(&ctx->inflight_lock);
1363 INIT_LIST_HEAD(&ctx->inflight_list);
1364 spin_lock_init(&ctx->rsrc_ref_lock);
1365 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
1366 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1367 init_llist_head(&ctx->rsrc_put_llist);
1368 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
1369 INIT_LIST_HEAD(&ctx->submit_state.comp.locked_free_list);
1370 return ctx;
1371 err:
1372 kfree(ctx->cancel_hash);
1373 kfree(ctx);
1374 return NULL;
1375 }
1376
1377 static bool req_need_defer(struct io_kiocb *req, u32 seq)
1378 {
1379 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1380 struct io_ring_ctx *ctx = req->ctx;
1381
1382 return seq != ctx->cached_cq_tail
1383 + READ_ONCE(ctx->cached_cq_overflow);
1384 }
1385
1386 return false;
1387 }
1388
1389 static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
1390 {
1391 if (req->work.identity == &tctx->__identity)
1392 return;
1393 if (refcount_dec_and_test(&req->work.identity->count))
1394 kfree(req->work.identity);
1395 }
1396
1397 static void io_req_clean_work(struct io_kiocb *req)
1398 {
1399 if (!(req->flags & REQ_F_WORK_INITIALIZED))
1400 return;
1401
1402 if (req->work.flags & IO_WQ_WORK_MM)
1403 mmdrop(req->work.identity->mm);
1404 #ifdef CONFIG_BLK_CGROUP
1405 if (req->work.flags & IO_WQ_WORK_BLKCG)
1406 css_put(req->work.identity->blkcg_css);
1407 #endif
1408 if (req->work.flags & IO_WQ_WORK_CREDS)
1409 put_cred(req->work.identity->creds);
1410 if (req->work.flags & IO_WQ_WORK_FS) {
1411 struct fs_struct *fs = req->work.identity->fs;
1412
1413 spin_lock(&req->work.identity->fs->lock);
1414 if (--fs->users)
1415 fs = NULL;
1416 spin_unlock(&req->work.identity->fs->lock);
1417 if (fs)
1418 free_fs_struct(fs);
1419 }
1420 if (req->work.flags & IO_WQ_WORK_FILES) {
1421 put_files_struct(req->work.identity->files);
1422 put_nsproxy(req->work.identity->nsproxy);
1423 }
1424 if (req->flags & REQ_F_INFLIGHT) {
1425 struct io_ring_ctx *ctx = req->ctx;
1426 struct io_uring_task *tctx = req->task->io_uring;
1427 unsigned long flags;
1428
1429 spin_lock_irqsave(&ctx->inflight_lock, flags);
1430 list_del(&req->inflight_entry);
1431 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1432 req->flags &= ~REQ_F_INFLIGHT;
1433 if (atomic_read(&tctx->in_idle))
1434 wake_up(&tctx->wait);
1435 }
1436
1437 req->flags &= ~REQ_F_WORK_INITIALIZED;
1438 req->work.flags &= ~(IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | IO_WQ_WORK_FS |
1439 IO_WQ_WORK_CREDS | IO_WQ_WORK_FILES);
1440 io_put_identity(req->task->io_uring, req);
1441 }
1442
1443 /*
1444 * Create a private copy of io_identity, since some fields don't match
1445 * the current context.
1446 */
1447 static bool io_identity_cow(struct io_kiocb *req)
1448 {
1449 struct io_uring_task *tctx = current->io_uring;
1450 const struct cred *creds = NULL;
1451 struct io_identity *id;
1452
1453 if (req->work.flags & IO_WQ_WORK_CREDS)
1454 creds = req->work.identity->creds;
1455
1456 id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL);
1457 if (unlikely(!id)) {
1458 req->work.flags |= IO_WQ_WORK_CANCEL;
1459 return false;
1460 }
1461
1462 /*
1463 * We can safely just re-init the creds we copied Either the field
1464 * matches the current one, or we haven't grabbed it yet. The only
1465 * exception is ->creds, through registered personalities, so handle
1466 * that one separately.
1467 */
1468 io_init_identity(id);
1469 if (creds)
1470 id->creds = creds;
1471
1472 /* add one for this request */
1473 refcount_inc(&id->count);
1474
1475 /* drop tctx and req identity references, if needed */
1476 if (tctx->identity != &tctx->__identity &&
1477 refcount_dec_and_test(&tctx->identity->count))
1478 kfree(tctx->identity);
1479 if (req->work.identity != &tctx->__identity &&
1480 refcount_dec_and_test(&req->work.identity->count))
1481 kfree(req->work.identity);
1482
1483 req->work.identity = id;
1484 tctx->identity = id;
1485 return true;
1486 }
1487
1488 static void io_req_track_inflight(struct io_kiocb *req)
1489 {
1490 struct io_ring_ctx *ctx = req->ctx;
1491
1492 if (!(req->flags & REQ_F_INFLIGHT)) {
1493 io_req_init_async(req);
1494 req->flags |= REQ_F_INFLIGHT;
1495
1496 spin_lock_irq(&ctx->inflight_lock);
1497 list_add(&req->inflight_entry, &ctx->inflight_list);
1498 spin_unlock_irq(&ctx->inflight_lock);
1499 }
1500 }
1501
1502 static bool io_grab_identity(struct io_kiocb *req)
1503 {
1504 const struct io_op_def *def = &io_op_defs[req->opcode];
1505 struct io_identity *id = req->work.identity;
1506
1507 if (def->work_flags & IO_WQ_WORK_FSIZE) {
1508 if (id->fsize != rlimit(RLIMIT_FSIZE))
1509 return false;
1510 req->work.flags |= IO_WQ_WORK_FSIZE;
1511 }
1512 #ifdef CONFIG_BLK_CGROUP
1513 if (!(req->work.flags & IO_WQ_WORK_BLKCG) &&
1514 (def->work_flags & IO_WQ_WORK_BLKCG)) {
1515 rcu_read_lock();
1516 if (id->blkcg_css != blkcg_css()) {
1517 rcu_read_unlock();
1518 return false;
1519 }
1520 /*
1521 * This should be rare, either the cgroup is dying or the task
1522 * is moving cgroups. Just punt to root for the handful of ios.
1523 */
1524 if (css_tryget_online(id->blkcg_css))
1525 req->work.flags |= IO_WQ_WORK_BLKCG;
1526 rcu_read_unlock();
1527 }
1528 #endif
1529 if (!(req->work.flags & IO_WQ_WORK_CREDS)) {
1530 if (id->creds != current_cred())
1531 return false;
1532 get_cred(id->creds);
1533 req->work.flags |= IO_WQ_WORK_CREDS;
1534 }
1535 #ifdef CONFIG_AUDIT
1536 if (!uid_eq(current->loginuid, id->loginuid) ||
1537 current->sessionid != id->sessionid)
1538 return false;
1539 #endif
1540 if (!(req->work.flags & IO_WQ_WORK_FS) &&
1541 (def->work_flags & IO_WQ_WORK_FS)) {
1542 if (current->fs != id->fs)
1543 return false;
1544 spin_lock(&id->fs->lock);
1545 if (!id->fs->in_exec) {
1546 id->fs->users++;
1547 req->work.flags |= IO_WQ_WORK_FS;
1548 } else {
1549 req->work.flags |= IO_WQ_WORK_CANCEL;
1550 }
1551 spin_unlock(&current->fs->lock);
1552 }
1553 if (!(req->work.flags & IO_WQ_WORK_FILES) &&
1554 (def->work_flags & IO_WQ_WORK_FILES) &&
1555 !(req->flags & REQ_F_NO_FILE_TABLE)) {
1556 if (id->files != current->files ||
1557 id->nsproxy != current->nsproxy)
1558 return false;
1559 atomic_inc(&id->files->count);
1560 get_nsproxy(id->nsproxy);
1561 req->work.flags |= IO_WQ_WORK_FILES;
1562 io_req_track_inflight(req);
1563 }
1564 if (!(req->work.flags & IO_WQ_WORK_MM) &&
1565 (def->work_flags & IO_WQ_WORK_MM)) {
1566 if (id->mm != current->mm)
1567 return false;
1568 mmgrab(id->mm);
1569 req->work.flags |= IO_WQ_WORK_MM;
1570 }
1571
1572 return true;
1573 }
1574
1575 static void io_prep_async_work(struct io_kiocb *req)
1576 {
1577 const struct io_op_def *def = &io_op_defs[req->opcode];
1578 struct io_ring_ctx *ctx = req->ctx;
1579
1580 io_req_init_async(req);
1581
1582 if (req->flags & REQ_F_FORCE_ASYNC)
1583 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1584
1585 if (req->flags & REQ_F_ISREG) {
1586 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
1587 io_wq_hash_work(&req->work, file_inode(req->file));
1588 } else {
1589 if (def->unbound_nonreg_file)
1590 req->work.flags |= IO_WQ_WORK_UNBOUND;
1591 }
1592
1593 /* if we fail grabbing identity, we must COW, regrab, and retry */
1594 if (io_grab_identity(req))
1595 return;
1596
1597 if (!io_identity_cow(req))
1598 return;
1599
1600 /* can't fail at this point */
1601 if (!io_grab_identity(req))
1602 WARN_ON(1);
1603 }
1604
1605 static void io_prep_async_link(struct io_kiocb *req)
1606 {
1607 struct io_kiocb *cur;
1608
1609 io_for_each_link(cur, req)
1610 io_prep_async_work(cur);
1611 }
1612
1613 static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
1614 {
1615 struct io_ring_ctx *ctx = req->ctx;
1616 struct io_kiocb *link = io_prep_linked_timeout(req);
1617
1618 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1619 &req->work, req->flags);
1620 io_wq_enqueue(ctx->io_wq, &req->work);
1621 return link;
1622 }
1623
1624 static void io_queue_async_work(struct io_kiocb *req)
1625 {
1626 struct io_kiocb *link;
1627
1628 /* init ->work of the whole link before punting */
1629 io_prep_async_link(req);
1630 link = __io_queue_async_work(req);
1631
1632 if (link)
1633 io_queue_linked_timeout(link);
1634 }
1635
1636 static void io_kill_timeout(struct io_kiocb *req)
1637 {
1638 struct io_timeout_data *io = req->async_data;
1639 int ret;
1640
1641 ret = hrtimer_try_to_cancel(&io->timer);
1642 if (ret != -1) {
1643 atomic_set(&req->ctx->cq_timeouts,
1644 atomic_read(&req->ctx->cq_timeouts) + 1);
1645 list_del_init(&req->timeout.list);
1646 io_cqring_fill_event(req, 0);
1647 io_put_req_deferred(req, 1);
1648 }
1649 }
1650
1651 /*
1652 * Returns true if we found and killed one or more timeouts
1653 */
1654 static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
1655 struct files_struct *files)
1656 {
1657 struct io_kiocb *req, *tmp;
1658 int canceled = 0;
1659
1660 spin_lock_irq(&ctx->completion_lock);
1661 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
1662 if (io_match_task(req, tsk, files)) {
1663 io_kill_timeout(req);
1664 canceled++;
1665 }
1666 }
1667 spin_unlock_irq(&ctx->completion_lock);
1668 return canceled != 0;
1669 }
1670
1671 static void __io_queue_deferred(struct io_ring_ctx *ctx)
1672 {
1673 do {
1674 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1675 struct io_defer_entry, list);
1676
1677 if (req_need_defer(de->req, de->seq))
1678 break;
1679 list_del_init(&de->list);
1680 io_req_task_queue(de->req);
1681 kfree(de);
1682 } while (!list_empty(&ctx->defer_list));
1683 }
1684
1685 static void io_flush_timeouts(struct io_ring_ctx *ctx)
1686 {
1687 u32 seq;
1688
1689 if (list_empty(&ctx->timeout_list))
1690 return;
1691
1692 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1693
1694 do {
1695 u32 events_needed, events_got;
1696 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
1697 struct io_kiocb, timeout.list);
1698
1699 if (io_is_timeout_noseq(req))
1700 break;
1701
1702 /*
1703 * Since seq can easily wrap around over time, subtract
1704 * the last seq at which timeouts were flushed before comparing.
1705 * Assuming not more than 2^31-1 events have happened since,
1706 * these subtractions won't have wrapped, so we can check if
1707 * target is in [last_seq, current_seq] by comparing the two.
1708 */
1709 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1710 events_got = seq - ctx->cq_last_tm_flush;
1711 if (events_got < events_needed)
1712 break;
1713
1714 list_del_init(&req->timeout.list);
1715 io_kill_timeout(req);
1716 } while (!list_empty(&ctx->timeout_list));
1717
1718 ctx->cq_last_tm_flush = seq;
1719 }
1720
1721 static void io_commit_cqring(struct io_ring_ctx *ctx)
1722 {
1723 io_flush_timeouts(ctx);
1724
1725 /* order cqe stores with ring update */
1726 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
1727
1728 if (unlikely(!list_empty(&ctx->defer_list)))
1729 __io_queue_deferred(ctx);
1730 }
1731
1732 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1733 {
1734 struct io_rings *r = ctx->rings;
1735
1736 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1737 }
1738
1739 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1740 {
1741 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1742 }
1743
1744 static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1745 {
1746 struct io_rings *rings = ctx->rings;
1747 unsigned tail;
1748
1749 /*
1750 * writes to the cq entry need to come after reading head; the
1751 * control dependency is enough as we're using WRITE_ONCE to
1752 * fill the cq entry
1753 */
1754 if (__io_cqring_events(ctx) == rings->cq_ring_entries)
1755 return NULL;
1756
1757 tail = ctx->cached_cq_tail++;
1758 return &rings->cqes[tail & ctx->cq_mask];
1759 }
1760
1761 static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1762 {
1763 if (!ctx->cq_ev_fd)
1764 return false;
1765 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1766 return false;
1767 if (!ctx->eventfd_async)
1768 return true;
1769 return io_wq_current_is_worker();
1770 }
1771
1772 static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1773 {
1774 /* see waitqueue_active() comment */
1775 smp_mb();
1776
1777 if (waitqueue_active(&ctx->wait))
1778 wake_up(&ctx->wait);
1779 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1780 wake_up(&ctx->sq_data->wait);
1781 if (io_should_trigger_evfd(ctx))
1782 eventfd_signal(ctx->cq_ev_fd, 1);
1783 if (waitqueue_active(&ctx->cq_wait)) {
1784 wake_up_interruptible(&ctx->cq_wait);
1785 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1786 }
1787 }
1788
1789 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1790 {
1791 /* see waitqueue_active() comment */
1792 smp_mb();
1793
1794 if (ctx->flags & IORING_SETUP_SQPOLL) {
1795 if (waitqueue_active(&ctx->wait))
1796 wake_up(&ctx->wait);
1797 }
1798 if (io_should_trigger_evfd(ctx))
1799 eventfd_signal(ctx->cq_ev_fd, 1);
1800 if (waitqueue_active(&ctx->cq_wait)) {
1801 wake_up_interruptible(&ctx->cq_wait);
1802 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1803 }
1804 }
1805
1806 /* Returns true if there are no backlogged entries after the flush */
1807 static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1808 struct task_struct *tsk,
1809 struct files_struct *files)
1810 {
1811 struct io_rings *rings = ctx->rings;
1812 struct io_kiocb *req, *tmp;
1813 struct io_uring_cqe *cqe;
1814 unsigned long flags;
1815 bool all_flushed, posted;
1816 LIST_HEAD(list);
1817
1818 if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
1819 return false;
1820
1821 posted = false;
1822 spin_lock_irqsave(&ctx->completion_lock, flags);
1823 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
1824 if (!io_match_task(req, tsk, files))
1825 continue;
1826
1827 cqe = io_get_cqring(ctx);
1828 if (!cqe && !force)
1829 break;
1830
1831 list_move(&req->compl.list, &list);
1832 if (cqe) {
1833 WRITE_ONCE(cqe->user_data, req->user_data);
1834 WRITE_ONCE(cqe->res, req->result);
1835 WRITE_ONCE(cqe->flags, req->compl.cflags);
1836 } else {
1837 ctx->cached_cq_overflow++;
1838 WRITE_ONCE(ctx->rings->cq_overflow,
1839 ctx->cached_cq_overflow);
1840 }
1841 posted = true;
1842 }
1843
1844 all_flushed = list_empty(&ctx->cq_overflow_list);
1845 if (all_flushed) {
1846 clear_bit(0, &ctx->sq_check_overflow);
1847 clear_bit(0, &ctx->cq_check_overflow);
1848 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1849 }
1850
1851 if (posted)
1852 io_commit_cqring(ctx);
1853 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1854 if (posted)
1855 io_cqring_ev_posted(ctx);
1856
1857 while (!list_empty(&list)) {
1858 req = list_first_entry(&list, struct io_kiocb, compl.list);
1859 list_del(&req->compl.list);
1860 io_put_req(req);
1861 }
1862
1863 return all_flushed;
1864 }
1865
1866 static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1867 struct task_struct *tsk,
1868 struct files_struct *files)
1869 {
1870 if (test_bit(0, &ctx->cq_check_overflow)) {
1871 /* iopoll syncs against uring_lock, not completion_lock */
1872 if (ctx->flags & IORING_SETUP_IOPOLL)
1873 mutex_lock(&ctx->uring_lock);
1874 __io_cqring_overflow_flush(ctx, force, tsk, files);
1875 if (ctx->flags & IORING_SETUP_IOPOLL)
1876 mutex_unlock(&ctx->uring_lock);
1877 }
1878 }
1879
1880 static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
1881 {
1882 struct io_ring_ctx *ctx = req->ctx;
1883 struct io_uring_cqe *cqe;
1884
1885 trace_io_uring_complete(ctx, req->user_data, res);
1886
1887 /*
1888 * If we can't get a cq entry, userspace overflowed the
1889 * submission (by quite a lot). Increment the overflow count in
1890 * the ring.
1891 */
1892 cqe = io_get_cqring(ctx);
1893 if (likely(cqe)) {
1894 WRITE_ONCE(cqe->user_data, req->user_data);
1895 WRITE_ONCE(cqe->res, res);
1896 WRITE_ONCE(cqe->flags, cflags);
1897 } else if (ctx->cq_overflow_flushed ||
1898 atomic_read(&req->task->io_uring->in_idle)) {
1899 /*
1900 * If we're in ring overflow flush mode, or in task cancel mode,
1901 * then we cannot store the request for later flushing, we need
1902 * to drop it on the floor.
1903 */
1904 ctx->cached_cq_overflow++;
1905 WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
1906 } else {
1907 if (list_empty(&ctx->cq_overflow_list)) {
1908 set_bit(0, &ctx->sq_check_overflow);
1909 set_bit(0, &ctx->cq_check_overflow);
1910 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
1911 }
1912 io_clean_op(req);
1913 req->result = res;
1914 req->compl.cflags = cflags;
1915 refcount_inc(&req->refs);
1916 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
1917 }
1918 }
1919
1920 static void io_cqring_fill_event(struct io_kiocb *req, long res)
1921 {
1922 __io_cqring_fill_event(req, res, 0);
1923 }
1924
1925 static inline void io_req_complete_post(struct io_kiocb *req, long res,
1926 unsigned int cflags)
1927 {
1928 struct io_ring_ctx *ctx = req->ctx;
1929 unsigned long flags;
1930
1931 spin_lock_irqsave(&ctx->completion_lock, flags);
1932 __io_cqring_fill_event(req, res, cflags);
1933 io_commit_cqring(ctx);
1934 /*
1935 * If we're the last reference to this request, add to our locked
1936 * free_list cache.
1937 */
1938 if (refcount_dec_and_test(&req->refs)) {
1939 struct io_comp_state *cs = &ctx->submit_state.comp;
1940
1941 io_dismantle_req(req);
1942 io_put_task(req->task, 1);
1943 list_add(&req->compl.list, &cs->locked_free_list);
1944 cs->locked_free_nr++;
1945 } else
1946 req = NULL;
1947 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1948
1949 io_cqring_ev_posted(ctx);
1950 if (req) {
1951 io_queue_next(req);
1952 percpu_ref_put(&ctx->refs);
1953 }
1954 }
1955
1956 static void io_req_complete_state(struct io_kiocb *req, long res,
1957 unsigned int cflags)
1958 {
1959 io_clean_op(req);
1960 req->result = res;
1961 req->compl.cflags = cflags;
1962 req->flags |= REQ_F_COMPLETE_INLINE;
1963 }
1964
1965 static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1966 long res, unsigned cflags)
1967 {
1968 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1969 io_req_complete_state(req, res, cflags);
1970 else
1971 io_req_complete_post(req, res, cflags);
1972 }
1973
1974 static inline void io_req_complete(struct io_kiocb *req, long res)
1975 {
1976 __io_req_complete(req, 0, res, 0);
1977 }
1978
1979 static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
1980 {
1981 struct io_submit_state *state = &ctx->submit_state;
1982 struct io_comp_state *cs = &state->comp;
1983 struct io_kiocb *req = NULL;
1984
1985 /*
1986 * If we have more than a batch's worth of requests in our IRQ side
1987 * locked cache, grab the lock and move them over to our submission
1988 * side cache.
1989 */
1990 if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) {
1991 spin_lock_irq(&ctx->completion_lock);
1992 list_splice_init(&cs->locked_free_list, &cs->free_list);
1993 cs->locked_free_nr = 0;
1994 spin_unlock_irq(&ctx->completion_lock);
1995 }
1996
1997 while (!list_empty(&cs->free_list)) {
1998 req = list_first_entry(&cs->free_list, struct io_kiocb,
1999 compl.list);
2000 list_del(&req->compl.list);
2001 state->reqs[state->free_reqs++] = req;
2002 if (state->free_reqs == ARRAY_SIZE(state->reqs))
2003 break;
2004 }
2005
2006 return req != NULL;
2007 }
2008
2009 static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
2010 {
2011 struct io_submit_state *state = &ctx->submit_state;
2012
2013 BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
2014
2015 if (!state->free_reqs) {
2016 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
2017 int ret;
2018
2019 if (io_flush_cached_reqs(ctx))
2020 goto got_req;
2021
2022 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
2023 state->reqs);
2024
2025 /*
2026 * Bulk alloc is all-or-nothing. If we fail to get a batch,
2027 * retry single alloc to be on the safe side.
2028 */
2029 if (unlikely(ret <= 0)) {
2030 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
2031 if (!state->reqs[0])
2032 return NULL;
2033 ret = 1;
2034 }
2035 state->free_reqs = ret;
2036 }
2037 got_req:
2038 state->free_reqs--;
2039 return state->reqs[state->free_reqs];
2040 }
2041
2042 static inline void io_put_file(struct io_kiocb *req, struct file *file,
2043 bool fixed)
2044 {
2045 if (!fixed)
2046 fput(file);
2047 }
2048
2049 static void io_dismantle_req(struct io_kiocb *req)
2050 {
2051 io_clean_op(req);
2052
2053 if (req->async_data)
2054 kfree(req->async_data);
2055 if (req->file)
2056 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
2057 if (req->fixed_rsrc_refs)
2058 percpu_ref_put(req->fixed_rsrc_refs);
2059 io_req_clean_work(req);
2060 }
2061
2062 static inline void io_put_task(struct task_struct *task, int nr)
2063 {
2064 struct io_uring_task *tctx = task->io_uring;
2065
2066 percpu_counter_sub(&tctx->inflight, nr);
2067 if (unlikely(atomic_read(&tctx->in_idle)))
2068 wake_up(&tctx->wait);
2069 put_task_struct_many(task, nr);
2070 }
2071
2072 static void __io_free_req(struct io_kiocb *req)
2073 {
2074 struct io_ring_ctx *ctx = req->ctx;
2075
2076 io_dismantle_req(req);
2077 io_put_task(req->task, 1);
2078
2079 kmem_cache_free(req_cachep, req);
2080 percpu_ref_put(&ctx->refs);
2081 }
2082
2083 static inline void io_remove_next_linked(struct io_kiocb *req)
2084 {
2085 struct io_kiocb *nxt = req->link;
2086
2087 req->link = nxt->link;
2088 nxt->link = NULL;
2089 }
2090
2091 static void io_kill_linked_timeout(struct io_kiocb *req)
2092 {
2093 struct io_ring_ctx *ctx = req->ctx;
2094 struct io_kiocb *link;
2095 bool cancelled = false;
2096 unsigned long flags;
2097
2098 spin_lock_irqsave(&ctx->completion_lock, flags);
2099 link = req->link;
2100
2101 /*
2102 * Can happen if a linked timeout fired and link had been like
2103 * req -> link t-out -> link t-out [-> ...]
2104 */
2105 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
2106 struct io_timeout_data *io = link->async_data;
2107 int ret;
2108
2109 io_remove_next_linked(req);
2110 link->timeout.head = NULL;
2111 ret = hrtimer_try_to_cancel(&io->timer);
2112 if (ret != -1) {
2113 io_cqring_fill_event(link, -ECANCELED);
2114 io_commit_cqring(ctx);
2115 cancelled = true;
2116 }
2117 }
2118 req->flags &= ~REQ_F_LINK_TIMEOUT;
2119 spin_unlock_irqrestore(&ctx->completion_lock, flags);
2120
2121 if (cancelled) {
2122 io_cqring_ev_posted(ctx);
2123 io_put_req(link);
2124 }
2125 }
2126
2127
2128 static void io_fail_links(struct io_kiocb *req)
2129 {
2130 struct io_kiocb *link, *nxt;
2131 struct io_ring_ctx *ctx = req->ctx;
2132 unsigned long flags;
2133
2134 spin_lock_irqsave(&ctx->completion_lock, flags);
2135 link = req->link;
2136 req->link = NULL;
2137
2138 while (link) {
2139 nxt = link->link;
2140 link->link = NULL;
2141
2142 trace_io_uring_fail_link(req, link);
2143 io_cqring_fill_event(link, -ECANCELED);
2144
2145 /*
2146 * It's ok to free under spinlock as they're not linked anymore,
2147 * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on
2148 * work.fs->lock.
2149 */
2150 if (link->flags & REQ_F_WORK_INITIALIZED)
2151 io_put_req_deferred(link, 2);
2152 else
2153 io_double_put_req(link);
2154 link = nxt;
2155 }
2156 io_commit_cqring(ctx);
2157 spin_unlock_irqrestore(&ctx->completion_lock, flags);
2158
2159 io_cqring_ev_posted(ctx);
2160 }
2161
2162 static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
2163 {
2164 if (req->flags & REQ_F_LINK_TIMEOUT)
2165 io_kill_linked_timeout(req);
2166
2167 /*
2168 * If LINK is set, we have dependent requests in this chain. If we
2169 * didn't fail this request, queue the first one up, moving any other
2170 * dependencies to the next request. In case of failure, fail the rest
2171 * of the chain.
2172 */
2173 if (likely(!(req->flags & REQ_F_FAIL_LINK))) {
2174 struct io_kiocb *nxt = req->link;
2175
2176 req->link = NULL;
2177 return nxt;
2178 }
2179 io_fail_links(req);
2180 return NULL;
2181 }
2182
2183 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
2184 {
2185 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
2186 return NULL;
2187 return __io_req_find_next(req);
2188 }
2189
2190 static bool __tctx_task_work(struct io_uring_task *tctx)
2191 {
2192 struct io_ring_ctx *ctx = NULL;
2193 struct io_wq_work_list list;
2194 struct io_wq_work_node *node;
2195
2196 if (wq_list_empty(&tctx->task_list))
2197 return false;
2198
2199 spin_lock_irq(&tctx->task_lock);
2200 list = tctx->task_list;
2201 INIT_WQ_LIST(&tctx->task_list);
2202 spin_unlock_irq(&tctx->task_lock);
2203
2204 node = list.first;
2205 while (node) {
2206 struct io_wq_work_node *next = node->next;
2207 struct io_ring_ctx *this_ctx;
2208 struct io_kiocb *req;
2209
2210 req = container_of(node, struct io_kiocb, io_task_work.node);
2211 this_ctx = req->ctx;
2212 req->task_work.func(&req->task_work);
2213 node = next;
2214
2215 if (!ctx) {
2216 ctx = this_ctx;
2217 } else if (ctx != this_ctx) {
2218 mutex_lock(&ctx->uring_lock);
2219 io_submit_flush_completions(&ctx->submit_state.comp, ctx);
2220 mutex_unlock(&ctx->uring_lock);
2221 ctx = this_ctx;
2222 }
2223 }
2224
2225 if (ctx && ctx->submit_state.comp.nr) {
2226 mutex_lock(&ctx->uring_lock);
2227 io_submit_flush_completions(&ctx->submit_state.comp, ctx);
2228 mutex_unlock(&ctx->uring_lock);
2229 }
2230
2231 return list.first != NULL;
2232 }
2233
2234 static void tctx_task_work(struct callback_head *cb)
2235 {
2236 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
2237
2238 while (__tctx_task_work(tctx))
2239 cond_resched();
2240
2241 clear_bit(0, &tctx->task_state);
2242 }
2243
2244 static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
2245 enum task_work_notify_mode notify)
2246 {
2247 struct io_uring_task *tctx = tsk->io_uring;
2248 struct io_wq_work_node *node, *prev;
2249 unsigned long flags;
2250 int ret;
2251
2252 WARN_ON_ONCE(!tctx);
2253
2254 spin_lock_irqsave(&tctx->task_lock, flags);
2255 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
2256 spin_unlock_irqrestore(&tctx->task_lock, flags);
2257
2258 /* task_work already pending, we're done */
2259 if (test_bit(0, &tctx->task_state) ||
2260 test_and_set_bit(0, &tctx->task_state))
2261 return 0;
2262
2263 if (!task_work_add(tsk, &tctx->task_work, notify))
2264 return 0;
2265
2266 /*
2267 * Slow path - we failed, find and delete work. if the work is not
2268 * in the list, it got run and we're fine.
2269 */
2270 ret = 0;
2271 spin_lock_irqsave(&tctx->task_lock, flags);
2272 wq_list_for_each(node, prev, &tctx->task_list) {
2273 if (&req->io_task_work.node == node) {
2274 wq_list_del(&tctx->task_list, node, prev);
2275 ret = 1;
2276 break;
2277 }
2278 }
2279 spin_unlock_irqrestore(&tctx->task_lock, flags);
2280 clear_bit(0, &tctx->task_state);
2281 return ret;
2282 }
2283
2284 static int io_req_task_work_add(struct io_kiocb *req)
2285 {
2286 struct task_struct *tsk = req->task;
2287 struct io_ring_ctx *ctx = req->ctx;
2288 enum task_work_notify_mode notify;
2289 int ret;
2290
2291 if (tsk->flags & PF_EXITING)
2292 return -ESRCH;
2293
2294 /*
2295 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2296 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2297 * processing task_work. There's no reliable way to tell if TWA_RESUME
2298 * will do the job.
2299 */
2300 notify = TWA_NONE;
2301 if (!(ctx->flags & IORING_SETUP_SQPOLL))
2302 notify = TWA_SIGNAL;
2303
2304 ret = io_task_work_add(tsk, req, notify);
2305 if (!ret)
2306 wake_up_process(tsk);
2307
2308 return ret;
2309 }
2310
2311 static void io_req_task_work_add_fallback(struct io_kiocb *req,
2312 task_work_func_t cb)
2313 {
2314 struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq);
2315
2316 init_task_work(&req->task_work, cb);
2317 task_work_add(tsk, &req->task_work, TWA_NONE);
2318 wake_up_process(tsk);
2319 }
2320
2321 static void __io_req_task_cancel(struct io_kiocb *req, int error)
2322 {
2323 struct io_ring_ctx *ctx = req->ctx;
2324
2325 spin_lock_irq(&ctx->completion_lock);
2326 io_cqring_fill_event(req, error);
2327 io_commit_cqring(ctx);
2328 spin_unlock_irq(&ctx->completion_lock);
2329
2330 io_cqring_ev_posted(ctx);
2331 req_set_fail_links(req);
2332 io_double_put_req(req);
2333 }
2334
2335 static void io_req_task_cancel(struct callback_head *cb)
2336 {
2337 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2338 struct io_ring_ctx *ctx = req->ctx;
2339
2340 __io_req_task_cancel(req, -ECANCELED);
2341 percpu_ref_put(&ctx->refs);
2342 }
2343
2344 static void __io_req_task_submit(struct io_kiocb *req)
2345 {
2346 struct io_ring_ctx *ctx = req->ctx;
2347
2348 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
2349 mutex_lock(&ctx->uring_lock);
2350 if (!ctx->sqo_dead && !(current->flags & PF_EXITING) &&
2351 !io_sq_thread_acquire_mm_files(ctx, req))
2352 __io_queue_sqe(req);
2353 else
2354 __io_req_task_cancel(req, -EFAULT);
2355 mutex_unlock(&ctx->uring_lock);
2356 }
2357
2358 static void io_req_task_submit(struct callback_head *cb)
2359 {
2360 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2361
2362 __io_req_task_submit(req);
2363 }
2364
2365 static void io_req_task_queue(struct io_kiocb *req)
2366 {
2367 int ret;
2368
2369 req->task_work.func = io_req_task_submit;
2370 ret = io_req_task_work_add(req);
2371 if (unlikely(ret)) {
2372 percpu_ref_get(&req->ctx->refs);
2373 io_req_task_work_add_fallback(req, io_req_task_cancel);
2374 }
2375 }
2376
2377 static inline void io_queue_next(struct io_kiocb *req)
2378 {
2379 struct io_kiocb *nxt = io_req_find_next(req);
2380
2381 if (nxt)
2382 io_req_task_queue(nxt);
2383 }
2384
2385 static void io_free_req(struct io_kiocb *req)
2386 {
2387 io_queue_next(req);
2388 __io_free_req(req);
2389 }
2390
2391 struct req_batch {
2392 struct task_struct *task;
2393 int task_refs;
2394 int ctx_refs;
2395 };
2396
2397 static inline void io_init_req_batch(struct req_batch *rb)
2398 {
2399 rb->task_refs = 0;
2400 rb->ctx_refs = 0;
2401 rb->task = NULL;
2402 }
2403
2404 static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2405 struct req_batch *rb)
2406 {
2407 if (rb->task)
2408 io_put_task(rb->task, rb->task_refs);
2409 if (rb->ctx_refs)
2410 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
2411 }
2412
2413 static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2414 struct io_submit_state *state)
2415 {
2416 io_queue_next(req);
2417
2418 if (req->task != rb->task) {
2419 if (rb->task)
2420 io_put_task(rb->task, rb->task_refs);
2421 rb->task = req->task;
2422 rb->task_refs = 0;
2423 }
2424 rb->task_refs++;
2425 rb->ctx_refs++;
2426
2427 io_dismantle_req(req);
2428 if (state->free_reqs != ARRAY_SIZE(state->reqs))
2429 state->reqs[state->free_reqs++] = req;
2430 else
2431 list_add(&req->compl.list, &state->comp.free_list);
2432 }
2433
2434 static void io_submit_flush_completions(struct io_comp_state *cs,
2435 struct io_ring_ctx *ctx)
2436 {
2437 int i, nr = cs->nr;
2438 struct io_kiocb *req;
2439 struct req_batch rb;
2440
2441 io_init_req_batch(&rb);
2442 spin_lock_irq(&ctx->completion_lock);
2443 for (i = 0; i < nr; i++) {
2444 req = cs->reqs[i];
2445 __io_cqring_fill_event(req, req->result, req->compl.cflags);
2446 }
2447 io_commit_cqring(ctx);
2448 spin_unlock_irq(&ctx->completion_lock);
2449
2450 io_cqring_ev_posted(ctx);
2451 for (i = 0; i < nr; i++) {
2452 req = cs->reqs[i];
2453
2454 /* submission and completion refs */
2455 if (refcount_sub_and_test(2, &req->refs))
2456 io_req_free_batch(&rb, req, &ctx->submit_state);
2457 }
2458
2459 io_req_free_batch_finish(ctx, &rb);
2460 cs->nr = 0;
2461 }
2462
2463 /*
2464 * Drop reference to request, return next in chain (if there is one) if this
2465 * was the last reference to this request.
2466 */
2467 static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
2468 {
2469 struct io_kiocb *nxt = NULL;
2470
2471 if (refcount_dec_and_test(&req->refs)) {
2472 nxt = io_req_find_next(req);
2473 __io_free_req(req);
2474 }
2475 return nxt;
2476 }
2477
2478 static void io_put_req(struct io_kiocb *req)
2479 {
2480 if (refcount_dec_and_test(&req->refs))
2481 io_free_req(req);
2482 }
2483
2484 static void io_put_req_deferred_cb(struct callback_head *cb)
2485 {
2486 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2487
2488 io_free_req(req);
2489 }
2490
2491 static void io_free_req_deferred(struct io_kiocb *req)
2492 {
2493 int ret;
2494
2495 req->task_work.func = io_put_req_deferred_cb;
2496 ret = io_req_task_work_add(req);
2497 if (unlikely(ret))
2498 io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
2499 }
2500
2501 static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2502 {
2503 if (refcount_sub_and_test(refs, &req->refs))
2504 io_free_req_deferred(req);
2505 }
2506
2507 static void io_double_put_req(struct io_kiocb *req)
2508 {
2509 /* drop both submit and complete references */
2510 if (refcount_sub_and_test(2, &req->refs))
2511 io_free_req(req);
2512 }
2513
2514 static unsigned io_cqring_events(struct io_ring_ctx *ctx)
2515 {
2516 /* See comment at the top of this file */
2517 smp_rmb();
2518 return __io_cqring_events(ctx);
2519 }
2520
2521 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2522 {
2523 struct io_rings *rings = ctx->rings;
2524
2525 /* make sure SQ entry isn't read before tail */
2526 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2527 }
2528
2529 static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
2530 {
2531 unsigned int cflags;
2532
2533 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2534 cflags |= IORING_CQE_F_BUFFER;
2535 req->flags &= ~REQ_F_BUFFER_SELECTED;
2536 kfree(kbuf);
2537 return cflags;
2538 }
2539
2540 static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2541 {
2542 struct io_buffer *kbuf;
2543
2544 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2545 return io_put_kbuf(req, kbuf);
2546 }
2547
2548 static inline bool io_run_task_work(void)
2549 {
2550 /*
2551 * Not safe to run on exiting task, and the task_work handling will
2552 * not add work to such a task.
2553 */
2554 if (unlikely(current->flags & PF_EXITING))
2555 return false;
2556 if (current->task_works) {
2557 __set_current_state(TASK_RUNNING);
2558 task_work_run();
2559 return true;
2560 }
2561
2562 return false;
2563 }
2564
2565 /*
2566 * Find and free completed poll iocbs
2567 */
2568 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2569 struct list_head *done)
2570 {
2571 struct req_batch rb;
2572 struct io_kiocb *req;
2573
2574 /* order with ->result store in io_complete_rw_iopoll() */
2575 smp_rmb();
2576
2577 io_init_req_batch(&rb);
2578 while (!list_empty(done)) {
2579 int cflags = 0;
2580
2581 req = list_first_entry(done, struct io_kiocb, inflight_entry);
2582 list_del(&req->inflight_entry);
2583
2584 if (READ_ONCE(req->result) == -EAGAIN) {
2585 req->iopoll_completed = 0;
2586 if (io_rw_reissue(req))
2587 continue;
2588 }
2589
2590 if (req->flags & REQ_F_BUFFER_SELECTED)
2591 cflags = io_put_rw_kbuf(req);
2592
2593 __io_cqring_fill_event(req, req->result, cflags);
2594 (*nr_events)++;
2595
2596 if (refcount_dec_and_test(&req->refs))
2597 io_req_free_batch(&rb, req, &ctx->submit_state);
2598 }
2599
2600 io_commit_cqring(ctx);
2601 io_cqring_ev_posted_iopoll(ctx);
2602 io_req_free_batch_finish(ctx, &rb);
2603 }
2604
2605 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2606 long min)
2607 {
2608 struct io_kiocb *req, *tmp;
2609 LIST_HEAD(done);
2610 bool spin;
2611 int ret;
2612
2613 /*
2614 * Only spin for completions if we don't have multiple devices hanging
2615 * off our complete list, and we're under the requested amount.
2616 */
2617 spin = !ctx->poll_multi_file && *nr_events < min;
2618
2619 ret = 0;
2620 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
2621 struct kiocb *kiocb = &req->rw.kiocb;
2622
2623 /*
2624 * Move completed and retryable entries to our local lists.
2625 * If we find a request that requires polling, break out
2626 * and complete those lists first, if we have entries there.
2627 */
2628 if (READ_ONCE(req->iopoll_completed)) {
2629 list_move_tail(&req->inflight_entry, &done);
2630 continue;
2631 }
2632 if (!list_empty(&done))
2633 break;
2634
2635 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2636 if (ret < 0)
2637 break;
2638
2639 /* iopoll may have completed current req */
2640 if (READ_ONCE(req->iopoll_completed))
2641 list_move_tail(&req->inflight_entry, &done);
2642
2643 if (ret && spin)
2644 spin = false;
2645 ret = 0;
2646 }
2647
2648 if (!list_empty(&done))
2649 io_iopoll_complete(ctx, nr_events, &done);
2650
2651 return ret;
2652 }
2653
2654 /*
2655 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
2656 * non-spinning poll check - we'll still enter the driver poll loop, but only
2657 * as a non-spinning completion check.
2658 */
2659 static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2660 long min)
2661 {
2662 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
2663 int ret;
2664
2665 ret = io_do_iopoll(ctx, nr_events, min);
2666 if (ret < 0)
2667 return ret;
2668 if (*nr_events >= min)
2669 return 0;
2670 }
2671
2672 return 1;
2673 }
2674
2675 /*
2676 * We can't just wait for polled events to come to us, we have to actively
2677 * find and complete them.
2678 */
2679 static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
2680 {
2681 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2682 return;
2683
2684 mutex_lock(&ctx->uring_lock);
2685 while (!list_empty(&ctx->iopoll_list)) {
2686 unsigned int nr_events = 0;
2687
2688 io_do_iopoll(ctx, &nr_events, 0);
2689
2690 /* let it sleep and repeat later if can't complete a request */
2691 if (nr_events == 0)
2692 break;
2693 /*
2694 * Ensure we allow local-to-the-cpu processing to take place,
2695 * in this case we need to ensure that we reap all events.
2696 * Also let task_work, etc. to progress by releasing the mutex
2697 */
2698 if (need_resched()) {
2699 mutex_unlock(&ctx->uring_lock);
2700 cond_resched();
2701 mutex_lock(&ctx->uring_lock);
2702 }
2703 }
2704 mutex_unlock(&ctx->uring_lock);
2705 }
2706
2707 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
2708 {
2709 unsigned int nr_events = 0;
2710 int iters = 0, ret = 0;
2711
2712 /*
2713 * We disallow the app entering submit/complete with polling, but we
2714 * still need to lock the ring to prevent racing with polled issue
2715 * that got punted to a workqueue.
2716 */
2717 mutex_lock(&ctx->uring_lock);
2718 do {
2719 /*
2720 * Don't enter poll loop if we already have events pending.
2721 * If we do, we can potentially be spinning for commands that
2722 * already triggered a CQE (eg in error).
2723 */
2724 if (test_bit(0, &ctx->cq_check_overflow))
2725 __io_cqring_overflow_flush(ctx, false, NULL, NULL);
2726 if (io_cqring_events(ctx))
2727 break;
2728
2729 /*
2730 * If a submit got punted to a workqueue, we can have the
2731 * application entering polling for a command before it gets
2732 * issued. That app will hold the uring_lock for the duration
2733 * of the poll right here, so we need to take a breather every
2734 * now and then to ensure that the issue has a chance to add
2735 * the poll to the issued list. Otherwise we can spin here
2736 * forever, while the workqueue is stuck trying to acquire the
2737 * very same mutex.
2738 */
2739 if (!(++iters & 7)) {
2740 mutex_unlock(&ctx->uring_lock);
2741 io_run_task_work();
2742 mutex_lock(&ctx->uring_lock);
2743 }
2744
2745 ret = io_iopoll_getevents(ctx, &nr_events, min);
2746 if (ret <= 0)
2747 break;
2748 ret = 0;
2749 } while (min && !nr_events && !need_resched());
2750
2751 mutex_unlock(&ctx->uring_lock);
2752 return ret;
2753 }
2754
2755 static void kiocb_end_write(struct io_kiocb *req)
2756 {
2757 /*
2758 * Tell lockdep we inherited freeze protection from submission
2759 * thread.
2760 */
2761 if (req->flags & REQ_F_ISREG) {
2762 struct inode *inode = file_inode(req->file);
2763
2764 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2765 }
2766 file_end_write(req->file);
2767 }
2768
2769 #ifdef CONFIG_BLOCK
2770 static bool io_resubmit_prep(struct io_kiocb *req)
2771 {
2772 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
2773 int rw, ret;
2774 struct iov_iter iter;
2775
2776 /* already prepared */
2777 if (req->async_data)
2778 return true;
2779
2780 switch (req->opcode) {
2781 case IORING_OP_READV:
2782 case IORING_OP_READ_FIXED:
2783 case IORING_OP_READ:
2784 rw = READ;
2785 break;
2786 case IORING_OP_WRITEV:
2787 case IORING_OP_WRITE_FIXED:
2788 case IORING_OP_WRITE:
2789 rw = WRITE;
2790 break;
2791 default:
2792 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2793 req->opcode);
2794 return false;
2795 }
2796
2797 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2798 if (ret < 0)
2799 return false;
2800 return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
2801 }
2802 #endif
2803
2804 static bool io_rw_reissue(struct io_kiocb *req)
2805 {
2806 #ifdef CONFIG_BLOCK
2807 umode_t mode = file_inode(req->file)->i_mode;
2808 int ret;
2809
2810 if (!S_ISBLK(mode) && !S_ISREG(mode))
2811 return false;
2812 if ((req->flags & REQ_F_NOWAIT) || io_wq_current_is_worker())
2813 return false;
2814
2815 lockdep_assert_held(&req->ctx->uring_lock);
2816
2817 ret = io_sq_thread_acquire_mm_files(req->ctx, req);
2818
2819 if (!ret && io_resubmit_prep(req)) {
2820 refcount_inc(&req->refs);
2821 io_queue_async_work(req);
2822 return true;
2823 }
2824 req_set_fail_links(req);
2825 #endif
2826 return false;
2827 }
2828
2829 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2830 unsigned int issue_flags)
2831 {
2832 int cflags = 0;
2833
2834 if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
2835 return;
2836 if (res != req->result)
2837 req_set_fail_links(req);
2838
2839 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2840 kiocb_end_write(req);
2841 if (req->flags & REQ_F_BUFFER_SELECTED)
2842 cflags = io_put_rw_kbuf(req);
2843 __io_req_complete(req, issue_flags, res, cflags);
2844 }
2845
2846 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2847 {
2848 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2849
2850 __io_complete_rw(req, res, res2, 0);
2851 }
2852
2853 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2854 {
2855 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2856
2857 if (kiocb->ki_flags & IOCB_WRITE)
2858 kiocb_end_write(req);
2859
2860 if (res != -EAGAIN && res != req->result)
2861 req_set_fail_links(req);
2862
2863 WRITE_ONCE(req->result, res);
2864 /* order with io_poll_complete() checking ->result */
2865 smp_wmb();
2866 WRITE_ONCE(req->iopoll_completed, 1);
2867 }
2868
2869 /*
2870 * After the iocb has been issued, it's safe to be found on the poll list.
2871 * Adding the kiocb to the list AFTER submission ensures that we don't
2872 * find it from a io_iopoll_getevents() thread before the issuer is done
2873 * accessing the kiocb cookie.
2874 */
2875 static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
2876 {
2877 struct io_ring_ctx *ctx = req->ctx;
2878
2879 /*
2880 * Track whether we have multiple files in our lists. This will impact
2881 * how we do polling eventually, not spinning if we're on potentially
2882 * different devices.
2883 */
2884 if (list_empty(&ctx->iopoll_list)) {
2885 ctx->poll_multi_file = false;
2886 } else if (!ctx->poll_multi_file) {
2887 struct io_kiocb *list_req;
2888
2889 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
2890 inflight_entry);
2891 if (list_req->file != req->file)
2892 ctx->poll_multi_file = true;
2893 }
2894
2895 /*
2896 * For fast devices, IO may have already completed. If it has, add
2897 * it to the front so we find it first.
2898 */
2899 if (READ_ONCE(req->iopoll_completed))
2900 list_add(&req->inflight_entry, &ctx->iopoll_list);
2901 else
2902 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
2903
2904 /*
2905 * If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread
2906 * task context or in io worker task context. If current task context is
2907 * sq thread, we don't need to check whether should wake up sq thread.
2908 */
2909 if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) &&
2910 wq_has_sleeper(&ctx->sq_data->wait))
2911 wake_up(&ctx->sq_data->wait);
2912 }
2913
2914 static inline void io_state_file_put(struct io_submit_state *state)
2915 {
2916 if (state->file_refs) {
2917 fput_many(state->file, state->file_refs);
2918 state->file_refs = 0;
2919 }
2920 }
2921
2922 /*
2923 * Get as many references to a file as we have IOs left in this submission,
2924 * assuming most submissions are for one file, or at least that each file
2925 * has more than one submission.
2926 */
2927 static struct file *__io_file_get(struct io_submit_state *state, int fd)
2928 {
2929 if (!state)
2930 return fget(fd);
2931
2932 if (state->file_refs) {
2933 if (state->fd == fd) {
2934 state->file_refs--;
2935 return state->file;
2936 }
2937 io_state_file_put(state);
2938 }
2939 state->file = fget_many(fd, state->ios_left);
2940 if (unlikely(!state->file))
2941 return NULL;
2942
2943 state->fd = fd;
2944 state->file_refs = state->ios_left - 1;
2945 return state->file;
2946 }
2947
2948 static bool io_bdev_nowait(struct block_device *bdev)
2949 {
2950 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
2951 }
2952
2953 /*
2954 * If we tracked the file through the SCM inflight mechanism, we could support
2955 * any file. For now, just ensure that anything potentially problematic is done
2956 * inline.
2957 */
2958 static bool io_file_supports_async(struct file *file, int rw)
2959 {
2960 umode_t mode = file_inode(file)->i_mode;
2961
2962 if (S_ISBLK(mode)) {
2963 if (IS_ENABLED(CONFIG_BLOCK) &&
2964 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
2965 return true;
2966 return false;
2967 }
2968 if (S_ISCHR(mode) || S_ISSOCK(mode))
2969 return true;
2970 if (S_ISREG(mode)) {
2971 if (IS_ENABLED(CONFIG_BLOCK) &&
2972 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
2973 file->f_op != &io_uring_fops)
2974 return true;
2975 return false;
2976 }
2977
2978 /* any ->read/write should understand O_NONBLOCK */
2979 if (file->f_flags & O_NONBLOCK)
2980 return true;
2981
2982 if (!(file->f_mode & FMODE_NOWAIT))
2983 return false;
2984
2985 if (rw == READ)
2986 return file->f_op->read_iter != NULL;
2987
2988 return file->f_op->write_iter != NULL;
2989 }
2990
2991 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2992 {
2993 struct io_ring_ctx *ctx = req->ctx;
2994 struct kiocb *kiocb = &req->rw.kiocb;
2995 struct file *file = req->file;
2996 unsigned ioprio;
2997 int ret;
2998
2999 if (S_ISREG(file_inode(file)->i_mode))
3000 req->flags |= REQ_F_ISREG;
3001
3002 kiocb->ki_pos = READ_ONCE(sqe->off);
3003 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
3004 req->flags |= REQ_F_CUR_POS;
3005 kiocb->ki_pos = file->f_pos;
3006 }
3007 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3008 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
3009 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
3010 if (unlikely(ret))
3011 return ret;
3012
3013 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
3014 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
3015 req->flags |= REQ_F_NOWAIT;
3016
3017 ioprio = READ_ONCE(sqe->ioprio);
3018 if (ioprio) {
3019 ret = ioprio_check_cap(ioprio);
3020 if (ret)
3021 return ret;
3022
3023 kiocb->ki_ioprio = ioprio;
3024 } else
3025 kiocb->ki_ioprio = get_current_ioprio();
3026
3027 if (ctx->flags & IORING_SETUP_IOPOLL) {
3028 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
3029 !kiocb->ki_filp->f_op->iopoll)
3030 return -EOPNOTSUPP;
3031
3032 kiocb->ki_flags |= IOCB_HIPRI;
3033 kiocb->ki_complete = io_complete_rw_iopoll;
3034 req->iopoll_completed = 0;
3035 } else {
3036 if (kiocb->ki_flags & IOCB_HIPRI)
3037 return -EINVAL;
3038 kiocb->ki_complete = io_complete_rw;
3039 }
3040
3041 req->rw.addr = READ_ONCE(sqe->addr);
3042 req->rw.len = READ_ONCE(sqe->len);
3043 req->buf_index = READ_ONCE(sqe->buf_index);
3044 return 0;
3045 }
3046
3047 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
3048 {
3049 switch (ret) {
3050 case -EIOCBQUEUED:
3051 break;
3052 case -ERESTARTSYS:
3053 case -ERESTARTNOINTR:
3054 case -ERESTARTNOHAND:
3055 case -ERESTART_RESTARTBLOCK:
3056 /*
3057 * We can't just restart the syscall, since previously
3058 * submitted sqes may already be in progress. Just fail this
3059 * IO with EINTR.
3060 */
3061 ret = -EINTR;
3062 fallthrough;
3063 default:
3064 kiocb->ki_complete(kiocb, ret, 0);
3065 }
3066 }
3067
3068 static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
3069 unsigned int issue_flags)
3070 {
3071 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
3072 struct io_async_rw *io = req->async_data;
3073
3074 /* add previously done IO, if any */
3075 if (io && io->bytes_done > 0) {
3076 if (ret < 0)
3077 ret = io->bytes_done;
3078 else
3079 ret += io->bytes_done;
3080 }
3081
3082 if (req->flags & REQ_F_CUR_POS)
3083 req->file->f_pos = kiocb->ki_pos;
3084 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
3085 __io_complete_rw(req, ret, 0, issue_flags);
3086 else
3087 io_rw_done(kiocb, ret);
3088 }
3089
3090 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
3091 {
3092 struct io_ring_ctx *ctx = req->ctx;
3093 size_t len = req->rw.len;
3094 struct io_mapped_ubuf *imu;
3095 u16 index, buf_index = req->buf_index;
3096 size_t offset;
3097 u64 buf_addr;
3098
3099 if (unlikely(buf_index >= ctx->nr_user_bufs))
3100 return -EFAULT;
3101 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
3102 imu = &ctx->user_bufs[index];
3103 buf_addr = req->rw.addr;
3104
3105 /* overflow */
3106 if (buf_addr + len < buf_addr)
3107 return -EFAULT;
3108 /* not inside the mapped region */
3109 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
3110 return -EFAULT;
3111
3112 /*
3113 * May not be a start of buffer, set size appropriately
3114 * and advance us to the beginning.
3115 */
3116 offset = buf_addr - imu->ubuf;
3117 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
3118
3119 if (offset) {
3120 /*
3121 * Don't use iov_iter_advance() here, as it's really slow for
3122 * using the latter parts of a big fixed buffer - it iterates
3123 * over each segment manually. We can cheat a bit here, because
3124 * we know that:
3125 *
3126 * 1) it's a BVEC iter, we set it up
3127 * 2) all bvecs are PAGE_SIZE in size, except potentially the
3128 * first and last bvec
3129 *
3130 * So just find our index, and adjust the iterator afterwards.
3131 * If the offset is within the first bvec (or the whole first
3132 * bvec, just use iov_iter_advance(). This makes it easier
3133 * since we can just skip the first segment, which may not
3134 * be PAGE_SIZE aligned.
3135 */
3136 const struct bio_vec *bvec = imu->bvec;
3137
3138 if (offset <= bvec->bv_len) {
3139 iov_iter_advance(iter, offset);
3140 } else {
3141 unsigned long seg_skip;
3142
3143 /* skip first vec */
3144 offset -= bvec->bv_len;
3145 seg_skip = 1 + (offset >> PAGE_SHIFT);
3146
3147 iter->bvec = bvec + seg_skip;
3148 iter->nr_segs -= seg_skip;
3149 iter->count -= bvec->bv_len + offset;
3150 iter->iov_offset = offset & ~PAGE_MASK;
3151 }
3152 }
3153
3154 return 0;
3155 }
3156
3157 static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
3158 {
3159 if (needs_lock)
3160 mutex_unlock(&ctx->uring_lock);
3161 }
3162
3163 static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
3164 {
3165 /*
3166 * "Normal" inline submissions always hold the uring_lock, since we
3167 * grab it from the system call. Same is true for the SQPOLL offload.
3168 * The only exception is when we've detached the request and issue it
3169 * from an async worker thread, grab the lock for that case.
3170 */
3171 if (needs_lock)
3172 mutex_lock(&ctx->uring_lock);
3173 }
3174
3175 static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
3176 int bgid, struct io_buffer *kbuf,
3177 bool needs_lock)
3178 {
3179 struct io_buffer *head;
3180
3181 if (req->flags & REQ_F_BUFFER_SELECTED)
3182 return kbuf;
3183
3184 io_ring_submit_lock(req->ctx, needs_lock);
3185
3186 lockdep_assert_held(&req->ctx->uring_lock);
3187
3188 head = idr_find(&req->ctx->io_buffer_idr, bgid);
3189 if (head) {
3190 if (!list_empty(&head->list)) {
3191 kbuf = list_last_entry(&head->list, struct io_buffer,
3192 list);
3193 list_del(&kbuf->list);
3194 } else {
3195 kbuf = head;
3196 idr_remove(&req->ctx->io_buffer_idr, bgid);
3197 }
3198 if (*len > kbuf->len)
3199 *len = kbuf->len;
3200 } else {
3201 kbuf = ERR_PTR(-ENOBUFS);
3202 }
3203
3204 io_ring_submit_unlock(req->ctx, needs_lock);
3205
3206 return kbuf;
3207 }
3208
3209 static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
3210 bool needs_lock)
3211 {
3212 struct io_buffer *kbuf;
3213 u16 bgid;
3214
3215 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3216 bgid = req->buf_index;
3217 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
3218 if (IS_ERR(kbuf))
3219 return kbuf;
3220 req->rw.addr = (u64) (unsigned long) kbuf;
3221 req->flags |= REQ_F_BUFFER_SELECTED;
3222 return u64_to_user_ptr(kbuf->addr);
3223 }
3224
3225 #ifdef CONFIG_COMPAT
3226 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3227 bool needs_lock)
3228 {
3229 struct compat_iovec __user *uiov;
3230 compat_ssize_t clen;
3231 void __user *buf;
3232 ssize_t len;
3233
3234 uiov = u64_to_user_ptr(req->rw.addr);
3235 if (!access_ok(uiov, sizeof(*uiov)))
3236 return -EFAULT;
3237 if (__get_user(clen, &uiov->iov_len))
3238 return -EFAULT;
3239 if (clen < 0)
3240 return -EINVAL;
3241
3242 len = clen;
3243 buf = io_rw_buffer_select(req, &len, needs_lock);
3244 if (IS_ERR(buf))
3245 return PTR_ERR(buf);
3246 iov[0].iov_base = buf;
3247 iov[0].iov_len = (compat_size_t) len;
3248 return 0;
3249 }
3250 #endif
3251
3252 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3253 bool needs_lock)
3254 {
3255 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3256 void __user *buf;
3257 ssize_t len;
3258
3259 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3260 return -EFAULT;
3261
3262 len = iov[0].iov_len;
3263 if (len < 0)
3264 return -EINVAL;
3265 buf = io_rw_buffer_select(req, &len, needs_lock);
3266 if (IS_ERR(buf))
3267 return PTR_ERR(buf);
3268 iov[0].iov_base = buf;
3269 iov[0].iov_len = len;
3270 return 0;
3271 }
3272
3273 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3274 bool needs_lock)
3275 {
3276 if (req->flags & REQ_F_BUFFER_SELECTED) {
3277 struct io_buffer *kbuf;
3278
3279 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3280 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3281 iov[0].iov_len = kbuf->len;
3282 return 0;
3283 }
3284 if (req->rw.len != 1)
3285 return -EINVAL;
3286
3287 #ifdef CONFIG_COMPAT
3288 if (req->ctx->compat)
3289 return io_compat_import(req, iov, needs_lock);
3290 #endif
3291
3292 return __io_iov_buffer_select(req, iov, needs_lock);
3293 }
3294
3295 static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3296 struct iov_iter *iter, bool needs_lock)
3297 {
3298 void __user *buf = u64_to_user_ptr(req->rw.addr);
3299 size_t sqe_len = req->rw.len;
3300 u8 opcode = req->opcode;
3301 ssize_t ret;
3302
3303 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
3304 *iovec = NULL;
3305 return io_import_fixed(req, rw, iter);
3306 }
3307
3308 /* buffer index only valid with fixed read/write, or buffer select */
3309 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
3310 return -EINVAL;
3311
3312 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
3313 if (req->flags & REQ_F_BUFFER_SELECT) {
3314 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
3315 if (IS_ERR(buf))
3316 return PTR_ERR(buf);
3317 req->rw.len = sqe_len;
3318 }
3319
3320 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3321 *iovec = NULL;
3322 return ret;
3323 }
3324
3325 if (req->flags & REQ_F_BUFFER_SELECT) {
3326 ret = io_iov_buffer_select(req, *iovec, needs_lock);
3327 if (!ret)
3328 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
3329 *iovec = NULL;
3330 return ret;
3331 }
3332
3333 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3334 req->ctx->compat);
3335 }
3336
3337 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3338 {
3339 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
3340 }
3341
3342 /*
3343 * For files that don't have ->read_iter() and ->write_iter(), handle them
3344 * by looping over ->read() or ->write() manually.
3345 */
3346 static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
3347 {
3348 struct kiocb *kiocb = &req->rw.kiocb;
3349 struct file *file = req->file;
3350 ssize_t ret = 0;
3351
3352 /*
3353 * Don't support polled IO through this interface, and we can't
3354 * support non-blocking either. For the latter, this just causes
3355 * the kiocb to be handled from an async context.
3356 */
3357 if (kiocb->ki_flags & IOCB_HIPRI)
3358 return -EOPNOTSUPP;
3359 if (kiocb->ki_flags & IOCB_NOWAIT)
3360 return -EAGAIN;
3361
3362 while (iov_iter_count(iter)) {
3363 struct iovec iovec;
3364 ssize_t nr;
3365
3366 if (!iov_iter_is_bvec(iter)) {
3367 iovec = iov_iter_iovec(iter);
3368 } else {
3369 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3370 iovec.iov_len = req->rw.len;
3371 }
3372
3373 if (rw == READ) {
3374 nr = file->f_op->read(file, iovec.iov_base,
3375 iovec.iov_len, io_kiocb_ppos(kiocb));
3376 } else {
3377 nr = file->f_op->write(file, iovec.iov_base,
3378 iovec.iov_len, io_kiocb_ppos(kiocb));
3379 }
3380
3381 if (nr < 0) {
3382 if (!ret)
3383 ret = nr;
3384 break;
3385 }
3386 ret += nr;
3387 if (nr != iovec.iov_len)
3388 break;
3389 req->rw.len -= nr;
3390 req->rw.addr += nr;
3391 iov_iter_advance(iter, nr);
3392 }
3393
3394 return ret;
3395 }
3396
3397 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3398 const struct iovec *fast_iov, struct iov_iter *iter)
3399 {
3400 struct io_async_rw *rw = req->async_data;
3401
3402 memcpy(&rw->iter, iter, sizeof(*iter));
3403 rw->free_iovec = iovec;
3404 rw->bytes_done = 0;
3405 /* can only be fixed buffers, no need to do anything */
3406 if (iov_iter_is_bvec(iter))
3407 return;
3408 if (!iovec) {
3409 unsigned iov_off = 0;
3410
3411 rw->iter.iov = rw->fast_iov;
3412 if (iter->iov != fast_iov) {
3413 iov_off = iter->iov - fast_iov;
3414 rw->iter.iov += iov_off;
3415 }
3416 if (rw->fast_iov != fast_iov)
3417 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
3418 sizeof(struct iovec) * iter->nr_segs);
3419 } else {
3420 req->flags |= REQ_F_NEED_CLEANUP;
3421 }
3422 }
3423
3424 static inline int __io_alloc_async_data(struct io_kiocb *req)
3425 {
3426 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3427 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3428 return req->async_data == NULL;
3429 }
3430
3431 static int io_alloc_async_data(struct io_kiocb *req)
3432 {
3433 if (!io_op_defs[req->opcode].needs_async_data)
3434 return 0;
3435
3436 return __io_alloc_async_data(req);
3437 }
3438
3439 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3440 const struct iovec *fast_iov,
3441 struct iov_iter *iter, bool force)
3442 {
3443 if (!force && !io_op_defs[req->opcode].needs_async_data)
3444 return 0;
3445 if (!req->async_data) {
3446 if (__io_alloc_async_data(req)) {
3447 kfree(iovec);
3448 return -ENOMEM;
3449 }
3450
3451 io_req_map_rw(req, iovec, fast_iov, iter);
3452 }
3453 return 0;
3454 }
3455
3456 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
3457 {
3458 struct io_async_rw *iorw = req->async_data;
3459 struct iovec *iov = iorw->fast_iov;
3460 int ret;
3461
3462 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
3463 if (unlikely(ret < 0))
3464 return ret;
3465
3466 iorw->bytes_done = 0;
3467 iorw->free_iovec = iov;
3468 if (iov)
3469 req->flags |= REQ_F_NEED_CLEANUP;
3470 return 0;
3471 }
3472
3473 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3474 {
3475 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3476 return -EBADF;
3477 return io_prep_rw(req, sqe);
3478 }
3479
3480 /*
3481 * This is our waitqueue callback handler, registered through lock_page_async()
3482 * when we initially tried to do the IO with the iocb armed our waitqueue.
3483 * This gets called when the page is unlocked, and we generally expect that to
3484 * happen when the page IO is completed and the page is now uptodate. This will
3485 * queue a task_work based retry of the operation, attempting to copy the data
3486 * again. If the latter fails because the page was NOT uptodate, then we will
3487 * do a thread based blocking retry of the operation. That's the unexpected
3488 * slow path.
3489 */
3490 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3491 int sync, void *arg)
3492 {
3493 struct wait_page_queue *wpq;
3494 struct io_kiocb *req = wait->private;
3495 struct wait_page_key *key = arg;
3496
3497 wpq = container_of(wait, struct wait_page_queue, wait);
3498
3499 if (!wake_page_match(wpq, key))
3500 return 0;
3501
3502 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
3503 list_del_init(&wait->entry);
3504
3505 /* submit ref gets dropped, acquire a new one */
3506 refcount_inc(&req->refs);
3507 io_req_task_queue(req);
3508 return 1;
3509 }
3510
3511 /*
3512 * This controls whether a given IO request should be armed for async page
3513 * based retry. If we return false here, the request is handed to the async
3514 * worker threads for retry. If we're doing buffered reads on a regular file,
3515 * we prepare a private wait_page_queue entry and retry the operation. This
3516 * will either succeed because the page is now uptodate and unlocked, or it
3517 * will register a callback when the page is unlocked at IO completion. Through
3518 * that callback, io_uring uses task_work to setup a retry of the operation.
3519 * That retry will attempt the buffered read again. The retry will generally
3520 * succeed, or in rare cases where it fails, we then fall back to using the
3521 * async worker threads for a blocking retry.
3522 */
3523 static bool io_rw_should_retry(struct io_kiocb *req)
3524 {
3525 struct io_async_rw *rw = req->async_data;
3526 struct wait_page_queue *wait = &rw->wpq;
3527 struct kiocb *kiocb = &req->rw.kiocb;
3528
3529 /* never retry for NOWAIT, we just complete with -EAGAIN */
3530 if (req->flags & REQ_F_NOWAIT)
3531 return false;
3532
3533 /* Only for buffered IO */
3534 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
3535 return false;
3536
3537 /*
3538 * just use poll if we can, and don't attempt if the fs doesn't
3539 * support callback based unlocks
3540 */
3541 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3542 return false;
3543
3544 wait->wait.func = io_async_buf_func;
3545 wait->wait.private = req;
3546 wait->wait.flags = 0;
3547 INIT_LIST_HEAD(&wait->wait.entry);
3548 kiocb->ki_flags |= IOCB_WAITQ;
3549 kiocb->ki_flags &= ~IOCB_NOWAIT;
3550 kiocb->ki_waitq = wait;
3551 return true;
3552 }
3553
3554 static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3555 {
3556 if (req->file->f_op->read_iter)
3557 return call_read_iter(req->file, &req->rw.kiocb, iter);
3558 else if (req->file->f_op->read)
3559 return loop_rw_iter(READ, req, iter);
3560 else
3561 return -EINVAL;
3562 }
3563
3564 static int io_read(struct io_kiocb *req, unsigned int issue_flags)
3565 {
3566 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3567 struct kiocb *kiocb = &req->rw.kiocb;
3568 struct iov_iter __iter, *iter = &__iter;
3569 struct io_async_rw *rw = req->async_data;
3570 ssize_t io_size, ret, ret2;
3571 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3572
3573 if (rw) {
3574 iter = &rw->iter;
3575 iovec = NULL;
3576 } else {
3577 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3578 if (ret < 0)
3579 return ret;
3580 }
3581 io_size = iov_iter_count(iter);
3582 req->result = io_size;
3583
3584 /* Ensure we clear previously set non-block flag */
3585 if (!force_nonblock)
3586 kiocb->ki_flags &= ~IOCB_NOWAIT;
3587 else
3588 kiocb->ki_flags |= IOCB_NOWAIT;
3589
3590 /* If the file doesn't support async, just async punt */
3591 if (force_nonblock && !io_file_supports_async(req->file, READ)) {
3592 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3593 return ret ?: -EAGAIN;
3594 }
3595
3596 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
3597 if (unlikely(ret)) {
3598 kfree(iovec);
3599 return ret;
3600 }
3601
3602 ret = io_iter_do_read(req, iter);
3603
3604 if (ret == -EIOCBQUEUED) {
3605 goto out_free;
3606 } else if (ret == -EAGAIN) {
3607 /* IOPOLL retry should happen for io-wq threads */
3608 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
3609 goto done;
3610 /* no retry on NONBLOCK nor RWF_NOWAIT */
3611 if (req->flags & REQ_F_NOWAIT)
3612 goto done;
3613 /* some cases will consume bytes even on error returns */
3614 iov_iter_revert(iter, io_size - iov_iter_count(iter));
3615 ret = 0;
3616 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
3617 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
3618 /* read all, failed, already did sync or don't want to retry */
3619 goto done;
3620 }
3621
3622 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3623 if (ret2)
3624 return ret2;
3625
3626 iovec = NULL;
3627 rw = req->async_data;
3628 /* now use our persistent iterator, if we aren't already */
3629 iter = &rw->iter;
3630
3631 do {
3632 io_size -= ret;
3633 rw->bytes_done += ret;
3634 /* if we can retry, do so with the callbacks armed */
3635 if (!io_rw_should_retry(req)) {
3636 kiocb->ki_flags &= ~IOCB_WAITQ;
3637 return -EAGAIN;
3638 }
3639
3640 /*
3641 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3642 * we get -EIOCBQUEUED, then we'll get a notification when the
3643 * desired page gets unlocked. We can also get a partial read
3644 * here, and if we do, then just retry at the new offset.
3645 */
3646 ret = io_iter_do_read(req, iter);
3647 if (ret == -EIOCBQUEUED)
3648 return 0;
3649 /* we got some bytes, but not all. retry. */
3650 } while (ret > 0 && ret < io_size);
3651 done:
3652 kiocb_done(kiocb, ret, issue_flags);
3653 out_free:
3654 /* it's faster to check here then delegate to kfree */
3655 if (iovec)
3656 kfree(iovec);
3657 return 0;
3658 }
3659
3660 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3661 {
3662 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3663 return -EBADF;
3664 return io_prep_rw(req, sqe);
3665 }
3666
3667 static int io_write(struct io_kiocb *req, unsigned int issue_flags)
3668 {
3669 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3670 struct kiocb *kiocb = &req->rw.kiocb;
3671 struct iov_iter __iter, *iter = &__iter;
3672 struct io_async_rw *rw = req->async_data;
3673 ssize_t ret, ret2, io_size;
3674 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3675
3676 if (rw) {
3677 iter = &rw->iter;
3678 iovec = NULL;
3679 } else {
3680 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3681 if (ret < 0)
3682 return ret;
3683 }
3684 io_size = iov_iter_count(iter);
3685 req->result = io_size;
3686
3687 /* Ensure we clear previously set non-block flag */
3688 if (!force_nonblock)
3689 kiocb->ki_flags &= ~IOCB_NOWAIT;
3690 else
3691 kiocb->ki_flags |= IOCB_NOWAIT;
3692
3693 /* If the file doesn't support async, just async punt */
3694 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
3695 goto copy_iov;
3696
3697 /* file path doesn't support NOWAIT for non-direct_IO */
3698 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3699 (req->flags & REQ_F_ISREG))
3700 goto copy_iov;
3701
3702 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
3703 if (unlikely(ret))
3704 goto out_free;
3705
3706 /*
3707 * Open-code file_start_write here to grab freeze protection,
3708 * which will be released by another thread in
3709 * io_complete_rw(). Fool lockdep by telling it the lock got
3710 * released so that it doesn't complain about the held lock when
3711 * we return to userspace.
3712 */
3713 if (req->flags & REQ_F_ISREG) {
3714 sb_start_write(file_inode(req->file)->i_sb);
3715 __sb_writers_release(file_inode(req->file)->i_sb,
3716 SB_FREEZE_WRITE);
3717 }
3718 kiocb->ki_flags |= IOCB_WRITE;
3719
3720 if (req->file->f_op->write_iter)
3721 ret2 = call_write_iter(req->file, kiocb, iter);
3722 else if (req->file->f_op->write)
3723 ret2 = loop_rw_iter(WRITE, req, iter);
3724 else
3725 ret2 = -EINVAL;
3726
3727 /*
3728 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3729 * retry them without IOCB_NOWAIT.
3730 */
3731 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3732 ret2 = -EAGAIN;
3733 /* no retry on NONBLOCK nor RWF_NOWAIT */
3734 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
3735 goto done;
3736 if (!force_nonblock || ret2 != -EAGAIN) {
3737 /* IOPOLL retry should happen for io-wq threads */
3738 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3739 goto copy_iov;
3740 done:
3741 kiocb_done(kiocb, ret2, issue_flags);
3742 } else {
3743 copy_iov:
3744 /* some cases will consume bytes even on error returns */
3745 iov_iter_revert(iter, io_size - iov_iter_count(iter));
3746 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
3747 return ret ?: -EAGAIN;
3748 }
3749 out_free:
3750 /* it's reportedly faster than delegating the null check to kfree() */
3751 if (iovec)
3752 kfree(iovec);
3753 return ret;
3754 }
3755
3756 static int io_renameat_prep(struct io_kiocb *req,
3757 const struct io_uring_sqe *sqe)
3758 {
3759 struct io_rename *ren = &req->rename;
3760 const char __user *oldf, *newf;
3761
3762 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3763 return -EBADF;
3764
3765 ren->old_dfd = READ_ONCE(sqe->fd);
3766 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3767 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3768 ren->new_dfd = READ_ONCE(sqe->len);
3769 ren->flags = READ_ONCE(sqe->rename_flags);
3770
3771 ren->oldpath = getname(oldf);
3772 if (IS_ERR(ren->oldpath))
3773 return PTR_ERR(ren->oldpath);
3774
3775 ren->newpath = getname(newf);
3776 if (IS_ERR(ren->newpath)) {
3777 putname(ren->oldpath);
3778 return PTR_ERR(ren->newpath);
3779 }
3780
3781 req->flags |= REQ_F_NEED_CLEANUP;
3782 return 0;
3783 }
3784
3785 static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
3786 {
3787 struct io_rename *ren = &req->rename;
3788 int ret;
3789
3790 if (issue_flags & IO_URING_F_NONBLOCK)
3791 return -EAGAIN;
3792
3793 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3794 ren->newpath, ren->flags);
3795
3796 req->flags &= ~REQ_F_NEED_CLEANUP;
3797 if (ret < 0)
3798 req_set_fail_links(req);
3799 io_req_complete(req, ret);
3800 return 0;
3801 }
3802
3803 static int io_unlinkat_prep(struct io_kiocb *req,
3804 const struct io_uring_sqe *sqe)
3805 {
3806 struct io_unlink *un = &req->unlink;
3807 const char __user *fname;
3808
3809 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3810 return -EBADF;
3811
3812 un->dfd = READ_ONCE(sqe->fd);
3813
3814 un->flags = READ_ONCE(sqe->unlink_flags);
3815 if (un->flags & ~AT_REMOVEDIR)
3816 return -EINVAL;
3817
3818 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3819 un->filename = getname(fname);
3820 if (IS_ERR(un->filename))
3821 return PTR_ERR(un->filename);
3822
3823 req->flags |= REQ_F_NEED_CLEANUP;
3824 return 0;
3825 }
3826
3827 static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
3828 {
3829 struct io_unlink *un = &req->unlink;
3830 int ret;
3831
3832 if (issue_flags & IO_URING_F_NONBLOCK)
3833 return -EAGAIN;
3834
3835 if (un->flags & AT_REMOVEDIR)
3836 ret = do_rmdir(un->dfd, un->filename);
3837 else
3838 ret = do_unlinkat(un->dfd, un->filename);
3839
3840 req->flags &= ~REQ_F_NEED_CLEANUP;
3841 if (ret < 0)
3842 req_set_fail_links(req);
3843 io_req_complete(req, ret);
3844 return 0;
3845 }
3846
3847 static int io_shutdown_prep(struct io_kiocb *req,
3848 const struct io_uring_sqe *sqe)
3849 {
3850 #if defined(CONFIG_NET)
3851 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3852 return -EINVAL;
3853 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3854 sqe->buf_index)
3855 return -EINVAL;
3856
3857 req->shutdown.how = READ_ONCE(sqe->len);
3858 return 0;
3859 #else
3860 return -EOPNOTSUPP;
3861 #endif
3862 }
3863
3864 static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
3865 {
3866 #if defined(CONFIG_NET)
3867 struct socket *sock;
3868 int ret;
3869
3870 if (issue_flags & IO_URING_F_NONBLOCK)
3871 return -EAGAIN;
3872
3873 sock = sock_from_file(req->file);
3874 if (unlikely(!sock))
3875 return -ENOTSOCK;
3876
3877 ret = __sys_shutdown_sock(sock, req->shutdown.how);
3878 if (ret < 0)
3879 req_set_fail_links(req);
3880 io_req_complete(req, ret);
3881 return 0;
3882 #else
3883 return -EOPNOTSUPP;
3884 #endif
3885 }
3886
3887 static int __io_splice_prep(struct io_kiocb *req,
3888 const struct io_uring_sqe *sqe)
3889 {
3890 struct io_splice* sp = &req->splice;
3891 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
3892
3893 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3894 return -EINVAL;
3895
3896 sp->file_in = NULL;
3897 sp->len = READ_ONCE(sqe->len);
3898 sp->flags = READ_ONCE(sqe->splice_flags);
3899
3900 if (unlikely(sp->flags & ~valid_flags))
3901 return -EINVAL;
3902
3903 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3904 (sp->flags & SPLICE_F_FD_IN_FIXED));
3905 if (!sp->file_in)
3906 return -EBADF;
3907 req->flags |= REQ_F_NEED_CLEANUP;
3908
3909 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3910 /*
3911 * Splice operation will be punted aync, and here need to
3912 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3913 */
3914 io_req_init_async(req);
3915 req->work.flags |= IO_WQ_WORK_UNBOUND;
3916 }
3917
3918 return 0;
3919 }
3920
3921 static int io_tee_prep(struct io_kiocb *req,
3922 const struct io_uring_sqe *sqe)
3923 {
3924 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3925 return -EINVAL;
3926 return __io_splice_prep(req, sqe);
3927 }
3928
3929 static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
3930 {
3931 struct io_splice *sp = &req->splice;
3932 struct file *in = sp->file_in;
3933 struct file *out = sp->file_out;
3934 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3935 long ret = 0;
3936
3937 if (issue_flags & IO_URING_F_NONBLOCK)
3938 return -EAGAIN;
3939 if (sp->len)
3940 ret = do_tee(in, out, sp->len, flags);
3941
3942 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3943 req->flags &= ~REQ_F_NEED_CLEANUP;
3944
3945 if (ret != sp->len)
3946 req_set_fail_links(req);
3947 io_req_complete(req, ret);
3948 return 0;
3949 }
3950
3951 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3952 {
3953 struct io_splice* sp = &req->splice;
3954
3955 sp->off_in = READ_ONCE(sqe->splice_off_in);
3956 sp->off_out = READ_ONCE(sqe->off);
3957 return __io_splice_prep(req, sqe);
3958 }
3959
3960 static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
3961 {
3962 struct io_splice *sp = &req->splice;
3963 struct file *in = sp->file_in;
3964 struct file *out = sp->file_out;
3965 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3966 loff_t *poff_in, *poff_out;
3967 long ret = 0;
3968
3969 if (issue_flags & IO_URING_F_NONBLOCK)
3970 return -EAGAIN;
3971
3972 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3973 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
3974
3975 if (sp->len)
3976 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
3977
3978 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3979 req->flags &= ~REQ_F_NEED_CLEANUP;
3980
3981 if (ret != sp->len)
3982 req_set_fail_links(req);
3983 io_req_complete(req, ret);
3984 return 0;
3985 }
3986
3987 /*
3988 * IORING_OP_NOP just posts a completion event, nothing else.
3989 */
3990 static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
3991 {
3992 struct io_ring_ctx *ctx = req->ctx;
3993
3994 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3995 return -EINVAL;
3996
3997 __io_req_complete(req, issue_flags, 0, 0);
3998 return 0;
3999 }
4000
4001 static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4002 {
4003 struct io_ring_ctx *ctx = req->ctx;
4004
4005 if (!req->file)
4006 return -EBADF;
4007
4008 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4009 return -EINVAL;
4010 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4011 return -EINVAL;
4012
4013 req->sync.flags = READ_ONCE(sqe->fsync_flags);
4014 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
4015 return -EINVAL;
4016
4017 req->sync.off = READ_ONCE(sqe->off);
4018 req->sync.len = READ_ONCE(sqe->len);
4019 return 0;
4020 }
4021
4022 static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
4023 {
4024 loff_t end = req->sync.off + req->sync.len;
4025 int ret;
4026
4027 /* fsync always requires a blocking context */
4028 if (issue_flags & IO_URING_F_NONBLOCK)
4029 return -EAGAIN;
4030
4031 ret = vfs_fsync_range(req->file, req->sync.off,
4032 end > 0 ? end : LLONG_MAX,
4033 req->sync.flags & IORING_FSYNC_DATASYNC);
4034 if (ret < 0)
4035 req_set_fail_links(req);
4036 io_req_complete(req, ret);
4037 return 0;
4038 }
4039
4040 static int io_fallocate_prep(struct io_kiocb *req,
4041 const struct io_uring_sqe *sqe)
4042 {
4043 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
4044 return -EINVAL;
4045 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4046 return -EINVAL;
4047
4048 req->sync.off = READ_ONCE(sqe->off);
4049 req->sync.len = READ_ONCE(sqe->addr);
4050 req->sync.mode = READ_ONCE(sqe->len);
4051 return 0;
4052 }
4053
4054 static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
4055 {
4056 int ret;
4057
4058 /* fallocate always requiring blocking context */
4059 if (issue_flags & IO_URING_F_NONBLOCK)
4060 return -EAGAIN;
4061 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
4062 req->sync.len);
4063 if (ret < 0)
4064 req_set_fail_links(req);
4065 io_req_complete(req, ret);
4066 return 0;
4067 }
4068
4069 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4070 {
4071 const char __user *fname;
4072 int ret;
4073
4074 if (unlikely(sqe->ioprio || sqe->buf_index))
4075 return -EINVAL;
4076 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4077 return -EBADF;
4078
4079 /* open.how should be already initialised */
4080 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
4081 req->open.how.flags |= O_LARGEFILE;
4082
4083 req->open.dfd = READ_ONCE(sqe->fd);
4084 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
4085 req->open.filename = getname(fname);
4086 if (IS_ERR(req->open.filename)) {
4087 ret = PTR_ERR(req->open.filename);
4088 req->open.filename = NULL;
4089 return ret;
4090 }
4091 req->open.nofile = rlimit(RLIMIT_NOFILE);
4092 req->flags |= REQ_F_NEED_CLEANUP;
4093 return 0;
4094 }
4095
4096 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4097 {
4098 u64 flags, mode;
4099
4100 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4101 return -EINVAL;
4102 mode = READ_ONCE(sqe->len);
4103 flags = READ_ONCE(sqe->open_flags);
4104 req->open.how = build_open_how(flags, mode);
4105 return __io_openat_prep(req, sqe);
4106 }
4107
4108 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4109 {
4110 struct open_how __user *how;
4111 size_t len;
4112 int ret;
4113
4114 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4115 return -EINVAL;
4116 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4117 len = READ_ONCE(sqe->len);
4118 if (len < OPEN_HOW_SIZE_VER0)
4119 return -EINVAL;
4120
4121 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4122 len);
4123 if (ret)
4124 return ret;
4125
4126 return __io_openat_prep(req, sqe);
4127 }
4128
4129 static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
4130 {
4131 struct open_flags op;
4132 struct file *file;
4133 bool nonblock_set;
4134 bool resolve_nonblock;
4135 int ret;
4136
4137 ret = build_open_flags(&req->open.how, &op);
4138 if (ret)
4139 goto err;
4140 nonblock_set = op.open_flag & O_NONBLOCK;
4141 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
4142 if (issue_flags & IO_URING_F_NONBLOCK) {
4143 /*
4144 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
4145 * it'll always -EAGAIN
4146 */
4147 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
4148 return -EAGAIN;
4149 op.lookup_flags |= LOOKUP_CACHED;
4150 op.open_flag |= O_NONBLOCK;
4151 }
4152
4153 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
4154 if (ret < 0)
4155 goto err;
4156
4157 file = do_filp_open(req->open.dfd, req->open.filename, &op);
4158 /* only retry if RESOLVE_CACHED wasn't already set by application */
4159 if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
4160 file == ERR_PTR(-EAGAIN)) {
4161 /*
4162 * We could hang on to this 'fd', but seems like marginal
4163 * gain for something that is now known to be a slower path.
4164 * So just put it, and we'll get a new one when we retry.
4165 */
4166 put_unused_fd(ret);
4167 return -EAGAIN;
4168 }
4169
4170 if (IS_ERR(file)) {
4171 put_unused_fd(ret);
4172 ret = PTR_ERR(file);
4173 } else {
4174 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
4175 file->f_flags &= ~O_NONBLOCK;
4176 fsnotify_open(file);
4177 fd_install(ret, file);
4178 }
4179 err:
4180 putname(req->open.filename);
4181 req->flags &= ~REQ_F_NEED_CLEANUP;
4182 if (ret < 0)
4183 req_set_fail_links(req);
4184 io_req_complete(req, ret);
4185 return 0;
4186 }
4187
4188 static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
4189 {
4190 return io_openat2(req, issue_flags & IO_URING_F_NONBLOCK);
4191 }
4192
4193 static int io_remove_buffers_prep(struct io_kiocb *req,
4194 const struct io_uring_sqe *sqe)
4195 {
4196 struct io_provide_buf *p = &req->pbuf;
4197 u64 tmp;
4198
4199 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
4200 return -EINVAL;
4201
4202 tmp = READ_ONCE(sqe->fd);
4203 if (!tmp || tmp > USHRT_MAX)
4204 return -EINVAL;
4205
4206 memset(p, 0, sizeof(*p));
4207 p->nbufs = tmp;
4208 p->bgid = READ_ONCE(sqe->buf_group);
4209 return 0;
4210 }
4211
4212 static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
4213 int bgid, unsigned nbufs)
4214 {
4215 unsigned i = 0;
4216
4217 /* shouldn't happen */
4218 if (!nbufs)
4219 return 0;
4220
4221 /* the head kbuf is the list itself */
4222 while (!list_empty(&buf->list)) {
4223 struct io_buffer *nxt;
4224
4225 nxt = list_first_entry(&buf->list, struct io_buffer, list);
4226 list_del(&nxt->list);
4227 kfree(nxt);
4228 if (++i == nbufs)
4229 return i;
4230 }
4231 i++;
4232 kfree(buf);
4233 idr_remove(&ctx->io_buffer_idr, bgid);
4234
4235 return i;
4236 }
4237
4238 static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
4239 {
4240 struct io_provide_buf *p = &req->pbuf;
4241 struct io_ring_ctx *ctx = req->ctx;
4242 struct io_buffer *head;
4243 int ret = 0;
4244 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4245
4246 io_ring_submit_lock(ctx, !force_nonblock);
4247
4248 lockdep_assert_held(&ctx->uring_lock);
4249
4250 ret = -ENOENT;
4251 head = idr_find(&ctx->io_buffer_idr, p->bgid);
4252 if (head)
4253 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
4254 if (ret < 0)
4255 req_set_fail_links(req);
4256
4257 /* need to hold the lock to complete IOPOLL requests */
4258 if (ctx->flags & IORING_SETUP_IOPOLL) {
4259 __io_req_complete(req, issue_flags, ret, 0);
4260 io_ring_submit_unlock(ctx, !force_nonblock);
4261 } else {
4262 io_ring_submit_unlock(ctx, !force_nonblock);
4263 __io_req_complete(req, issue_flags, ret, 0);
4264 }
4265 return 0;
4266 }
4267
4268 static int io_provide_buffers_prep(struct io_kiocb *req,
4269 const struct io_uring_sqe *sqe)
4270 {
4271 struct io_provide_buf *p = &req->pbuf;
4272 u64 tmp;
4273
4274 if (sqe->ioprio || sqe->rw_flags)
4275 return -EINVAL;
4276
4277 tmp = READ_ONCE(sqe->fd);
4278 if (!tmp || tmp > USHRT_MAX)
4279 return -E2BIG;
4280 p->nbufs = tmp;
4281 p->addr = READ_ONCE(sqe->addr);
4282 p->len = READ_ONCE(sqe->len);
4283
4284 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
4285 return -EFAULT;
4286
4287 p->bgid = READ_ONCE(sqe->buf_group);
4288 tmp = READ_ONCE(sqe->off);
4289 if (tmp > USHRT_MAX)
4290 return -E2BIG;
4291 p->bid = tmp;
4292 return 0;
4293 }
4294
4295 static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4296 {
4297 struct io_buffer *buf;
4298 u64 addr = pbuf->addr;
4299 int i, bid = pbuf->bid;
4300
4301 for (i = 0; i < pbuf->nbufs; i++) {
4302 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4303 if (!buf)
4304 break;
4305
4306 buf->addr = addr;
4307 buf->len = pbuf->len;
4308 buf->bid = bid;
4309 addr += pbuf->len;
4310 bid++;
4311 if (!*head) {
4312 INIT_LIST_HEAD(&buf->list);
4313 *head = buf;
4314 } else {
4315 list_add_tail(&buf->list, &(*head)->list);
4316 }
4317 }
4318
4319 return i ? i : -ENOMEM;
4320 }
4321
4322 static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
4323 {
4324 struct io_provide_buf *p = &req->pbuf;
4325 struct io_ring_ctx *ctx = req->ctx;
4326 struct io_buffer *head, *list;
4327 int ret = 0;
4328 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4329
4330 io_ring_submit_lock(ctx, !force_nonblock);
4331
4332 lockdep_assert_held(&ctx->uring_lock);
4333
4334 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
4335
4336 ret = io_add_buffers(p, &head);
4337 if (ret < 0)
4338 goto out;
4339
4340 if (!list) {
4341 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
4342 GFP_KERNEL);
4343 if (ret < 0) {
4344 __io_remove_buffers(ctx, head, p->bgid, -1U);
4345 goto out;
4346 }
4347 }
4348 out:
4349 if (ret < 0)
4350 req_set_fail_links(req);
4351
4352 /* need to hold the lock to complete IOPOLL requests */
4353 if (ctx->flags & IORING_SETUP_IOPOLL) {
4354 __io_req_complete(req, issue_flags, ret, 0);
4355 io_ring_submit_unlock(ctx, !force_nonblock);
4356 } else {
4357 io_ring_submit_unlock(ctx, !force_nonblock);
4358 __io_req_complete(req, issue_flags, ret, 0);
4359 }
4360 return 0;
4361 }
4362
4363 static int io_epoll_ctl_prep(struct io_kiocb *req,
4364 const struct io_uring_sqe *sqe)
4365 {
4366 #if defined(CONFIG_EPOLL)
4367 if (sqe->ioprio || sqe->buf_index)
4368 return -EINVAL;
4369 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
4370 return -EINVAL;
4371
4372 req->epoll.epfd = READ_ONCE(sqe->fd);
4373 req->epoll.op = READ_ONCE(sqe->len);
4374 req->epoll.fd = READ_ONCE(sqe->off);
4375
4376 if (ep_op_has_event(req->epoll.op)) {
4377 struct epoll_event __user *ev;
4378
4379 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4380 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4381 return -EFAULT;
4382 }
4383
4384 return 0;
4385 #else
4386 return -EOPNOTSUPP;
4387 #endif
4388 }
4389
4390 static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
4391 {
4392 #if defined(CONFIG_EPOLL)
4393 struct io_epoll *ie = &req->epoll;
4394 int ret;
4395 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4396
4397 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4398 if (force_nonblock && ret == -EAGAIN)
4399 return -EAGAIN;
4400
4401 if (ret < 0)
4402 req_set_fail_links(req);
4403 __io_req_complete(req, issue_flags, ret, 0);
4404 return 0;
4405 #else
4406 return -EOPNOTSUPP;
4407 #endif
4408 }
4409
4410 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4411 {
4412 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4413 if (sqe->ioprio || sqe->buf_index || sqe->off)
4414 return -EINVAL;
4415 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4416 return -EINVAL;
4417
4418 req->madvise.addr = READ_ONCE(sqe->addr);
4419 req->madvise.len = READ_ONCE(sqe->len);
4420 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4421 return 0;
4422 #else
4423 return -EOPNOTSUPP;
4424 #endif
4425 }
4426
4427 static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
4428 {
4429 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4430 struct io_madvise *ma = &req->madvise;
4431 int ret;
4432
4433 if (issue_flags & IO_URING_F_NONBLOCK)
4434 return -EAGAIN;
4435
4436 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
4437 if (ret < 0)
4438 req_set_fail_links(req);
4439 io_req_complete(req, ret);
4440 return 0;
4441 #else
4442 return -EOPNOTSUPP;
4443 #endif
4444 }
4445
4446 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4447 {
4448 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4449 return -EINVAL;
4450 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4451 return -EINVAL;
4452
4453 req->fadvise.offset = READ_ONCE(sqe->off);
4454 req->fadvise.len = READ_ONCE(sqe->len);
4455 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4456 return 0;
4457 }
4458
4459 static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
4460 {
4461 struct io_fadvise *fa = &req->fadvise;
4462 int ret;
4463
4464 if (issue_flags & IO_URING_F_NONBLOCK) {
4465 switch (fa->advice) {
4466 case POSIX_FADV_NORMAL:
4467 case POSIX_FADV_RANDOM:
4468 case POSIX_FADV_SEQUENTIAL:
4469 break;
4470 default:
4471 return -EAGAIN;
4472 }
4473 }
4474
4475 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4476 if (ret < 0)
4477 req_set_fail_links(req);
4478 io_req_complete(req, ret);
4479 return 0;
4480 }
4481
4482 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4483 {
4484 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
4485 return -EINVAL;
4486 if (sqe->ioprio || sqe->buf_index)
4487 return -EINVAL;
4488 if (req->flags & REQ_F_FIXED_FILE)
4489 return -EBADF;
4490
4491 req->statx.dfd = READ_ONCE(sqe->fd);
4492 req->statx.mask = READ_ONCE(sqe->len);
4493 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
4494 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4495 req->statx.flags = READ_ONCE(sqe->statx_flags);
4496
4497 return 0;
4498 }
4499
4500 static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
4501 {
4502 struct io_statx *ctx = &req->statx;
4503 int ret;
4504
4505 if (issue_flags & IO_URING_F_NONBLOCK) {
4506 /* only need file table for an actual valid fd */
4507 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
4508 req->flags |= REQ_F_NO_FILE_TABLE;
4509 return -EAGAIN;
4510 }
4511
4512 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4513 ctx->buffer);
4514
4515 if (ret < 0)
4516 req_set_fail_links(req);
4517 io_req_complete(req, ret);
4518 return 0;
4519 }
4520
4521 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4522 {
4523 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4524 return -EINVAL;
4525 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4526 sqe->rw_flags || sqe->buf_index)
4527 return -EINVAL;
4528 if (req->flags & REQ_F_FIXED_FILE)
4529 return -EBADF;
4530
4531 req->close.fd = READ_ONCE(sqe->fd);
4532 return 0;
4533 }
4534
4535 static int io_close(struct io_kiocb *req, unsigned int issue_flags)
4536 {
4537 struct files_struct *files = current->files;
4538 struct io_close *close = &req->close;
4539 struct fdtable *fdt;
4540 struct file *file;
4541 int ret;
4542
4543 file = NULL;
4544 ret = -EBADF;
4545 spin_lock(&files->file_lock);
4546 fdt = files_fdtable(files);
4547 if (close->fd >= fdt->max_fds) {
4548 spin_unlock(&files->file_lock);
4549 goto err;
4550 }
4551 file = fdt->fd[close->fd];
4552 if (!file) {
4553 spin_unlock(&files->file_lock);
4554 goto err;
4555 }
4556
4557 if (file->f_op == &io_uring_fops) {
4558 spin_unlock(&files->file_lock);
4559 file = NULL;
4560 goto err;
4561 }
4562
4563 /* if the file has a flush method, be safe and punt to async */
4564 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
4565 spin_unlock(&files->file_lock);
4566 return -EAGAIN;
4567 }
4568
4569 ret = __close_fd_get_file(close->fd, &file);
4570 spin_unlock(&files->file_lock);
4571 if (ret < 0) {
4572 if (ret == -ENOENT)
4573 ret = -EBADF;
4574 goto err;
4575 }
4576
4577 /* No ->flush() or already async, safely close from here */
4578 ret = filp_close(file, current->files);
4579 err:
4580 if (ret < 0)
4581 req_set_fail_links(req);
4582 if (file)
4583 fput(file);
4584 __io_req_complete(req, issue_flags, ret, 0);
4585 return 0;
4586 }
4587
4588 static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4589 {
4590 struct io_ring_ctx *ctx = req->ctx;
4591
4592 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4593 return -EINVAL;
4594 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4595 return -EINVAL;
4596
4597 req->sync.off = READ_ONCE(sqe->off);
4598 req->sync.len = READ_ONCE(sqe->len);
4599 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
4600 return 0;
4601 }
4602
4603 static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
4604 {
4605 int ret;
4606
4607 /* sync_file_range always requires a blocking context */
4608 if (issue_flags & IO_URING_F_NONBLOCK)
4609 return -EAGAIN;
4610
4611 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
4612 req->sync.flags);
4613 if (ret < 0)
4614 req_set_fail_links(req);
4615 io_req_complete(req, ret);
4616 return 0;
4617 }
4618
4619 #if defined(CONFIG_NET)
4620 static int io_setup_async_msg(struct io_kiocb *req,
4621 struct io_async_msghdr *kmsg)
4622 {
4623 struct io_async_msghdr *async_msg = req->async_data;
4624
4625 if (async_msg)
4626 return -EAGAIN;
4627 if (io_alloc_async_data(req)) {
4628 kfree(kmsg->free_iov);
4629 return -ENOMEM;
4630 }
4631 async_msg = req->async_data;
4632 req->flags |= REQ_F_NEED_CLEANUP;
4633 memcpy(async_msg, kmsg, sizeof(*kmsg));
4634 async_msg->msg.msg_name = &async_msg->addr;
4635 /* if were using fast_iov, set it to the new one */
4636 if (!async_msg->free_iov)
4637 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4638
4639 return -EAGAIN;
4640 }
4641
4642 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4643 struct io_async_msghdr *iomsg)
4644 {
4645 iomsg->msg.msg_name = &iomsg->addr;
4646 iomsg->free_iov = iomsg->fast_iov;
4647 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
4648 req->sr_msg.msg_flags, &iomsg->free_iov);
4649 }
4650
4651 static int io_sendmsg_prep_async(struct io_kiocb *req)
4652 {
4653 int ret;
4654
4655 if (!io_op_defs[req->opcode].needs_async_data)
4656 return 0;
4657 ret = io_sendmsg_copy_hdr(req, req->async_data);
4658 if (!ret)
4659 req->flags |= REQ_F_NEED_CLEANUP;
4660 return ret;
4661 }
4662
4663 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4664 {
4665 struct io_sr_msg *sr = &req->sr_msg;
4666
4667 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4668 return -EINVAL;
4669
4670 sr->msg_flags = READ_ONCE(sqe->msg_flags);
4671 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4672 sr->len = READ_ONCE(sqe->len);
4673
4674 #ifdef CONFIG_COMPAT
4675 if (req->ctx->compat)
4676 sr->msg_flags |= MSG_CMSG_COMPAT;
4677 #endif
4678 return 0;
4679 }
4680
4681 static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
4682 {
4683 struct io_async_msghdr iomsg, *kmsg;
4684 struct socket *sock;
4685 unsigned flags;
4686 int ret;
4687
4688 sock = sock_from_file(req->file);
4689 if (unlikely(!sock))
4690 return -ENOTSOCK;
4691
4692 kmsg = req->async_data;
4693 if (!kmsg) {
4694 ret = io_sendmsg_copy_hdr(req, &iomsg);
4695 if (ret)
4696 return ret;
4697 kmsg = &iomsg;
4698 }
4699
4700 flags = req->sr_msg.msg_flags;
4701 if (flags & MSG_DONTWAIT)
4702 req->flags |= REQ_F_NOWAIT;
4703 else if (issue_flags & IO_URING_F_NONBLOCK)
4704 flags |= MSG_DONTWAIT;
4705
4706 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
4707 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
4708 return io_setup_async_msg(req, kmsg);
4709 if (ret == -ERESTARTSYS)
4710 ret = -EINTR;
4711
4712 /* fast path, check for non-NULL to avoid function call */
4713 if (kmsg->free_iov)
4714 kfree(kmsg->free_iov);
4715 req->flags &= ~REQ_F_NEED_CLEANUP;
4716 if (ret < 0)
4717 req_set_fail_links(req);
4718 __io_req_complete(req, issue_flags, ret, 0);
4719 return 0;
4720 }
4721
4722 static int io_send(struct io_kiocb *req, unsigned int issue_flags)
4723 {
4724 struct io_sr_msg *sr = &req->sr_msg;
4725 struct msghdr msg;
4726 struct iovec iov;
4727 struct socket *sock;
4728 unsigned flags;
4729 int ret;
4730
4731 sock = sock_from_file(req->file);
4732 if (unlikely(!sock))
4733 return -ENOTSOCK;
4734
4735 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4736 if (unlikely(ret))
4737 return ret;
4738
4739 msg.msg_name = NULL;
4740 msg.msg_control = NULL;
4741 msg.msg_controllen = 0;
4742 msg.msg_namelen = 0;
4743
4744 flags = req->sr_msg.msg_flags;
4745 if (flags & MSG_DONTWAIT)
4746 req->flags |= REQ_F_NOWAIT;
4747 else if (issue_flags & IO_URING_F_NONBLOCK)
4748 flags |= MSG_DONTWAIT;
4749
4750 msg.msg_flags = flags;
4751 ret = sock_sendmsg(sock, &msg);
4752 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
4753 return -EAGAIN;
4754 if (ret == -ERESTARTSYS)
4755 ret = -EINTR;
4756
4757 if (ret < 0)
4758 req_set_fail_links(req);
4759 __io_req_complete(req, issue_flags, ret, 0);
4760 return 0;
4761 }
4762
4763 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4764 struct io_async_msghdr *iomsg)
4765 {
4766 struct io_sr_msg *sr = &req->sr_msg;
4767 struct iovec __user *uiov;
4768 size_t iov_len;
4769 int ret;
4770
4771 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4772 &iomsg->uaddr, &uiov, &iov_len);
4773 if (ret)
4774 return ret;
4775
4776 if (req->flags & REQ_F_BUFFER_SELECT) {
4777 if (iov_len > 1)
4778 return -EINVAL;
4779 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
4780 return -EFAULT;
4781 sr->len = iomsg->fast_iov[0].iov_len;
4782 iomsg->free_iov = NULL;
4783 } else {
4784 iomsg->free_iov = iomsg->fast_iov;
4785 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
4786 &iomsg->free_iov, &iomsg->msg.msg_iter,
4787 false);
4788 if (ret > 0)
4789 ret = 0;
4790 }
4791
4792 return ret;
4793 }
4794
4795 #ifdef CONFIG_COMPAT
4796 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
4797 struct io_async_msghdr *iomsg)
4798 {
4799 struct compat_msghdr __user *msg_compat;
4800 struct io_sr_msg *sr = &req->sr_msg;
4801 struct compat_iovec __user *uiov;
4802 compat_uptr_t ptr;
4803 compat_size_t len;
4804 int ret;
4805
4806 msg_compat = (struct compat_msghdr __user *) sr->umsg;
4807 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
4808 &ptr, &len);
4809 if (ret)
4810 return ret;
4811
4812 uiov = compat_ptr(ptr);
4813 if (req->flags & REQ_F_BUFFER_SELECT) {
4814 compat_ssize_t clen;
4815
4816 if (len > 1)
4817 return -EINVAL;
4818 if (!access_ok(uiov, sizeof(*uiov)))
4819 return -EFAULT;
4820 if (__get_user(clen, &uiov->iov_len))
4821 return -EFAULT;
4822 if (clen < 0)
4823 return -EINVAL;
4824 sr->len = clen;
4825 iomsg->free_iov = NULL;
4826 } else {
4827 iomsg->free_iov = iomsg->fast_iov;
4828 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
4829 UIO_FASTIOV, &iomsg->free_iov,
4830 &iomsg->msg.msg_iter, true);
4831 if (ret < 0)
4832 return ret;
4833 }
4834
4835 return 0;
4836 }
4837 #endif
4838
4839 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4840 struct io_async_msghdr *iomsg)
4841 {
4842 iomsg->msg.msg_name = &iomsg->addr;
4843
4844 #ifdef CONFIG_COMPAT
4845 if (req->ctx->compat)
4846 return __io_compat_recvmsg_copy_hdr(req, iomsg);
4847 #endif
4848
4849 return __io_recvmsg_copy_hdr(req, iomsg);
4850 }
4851
4852 static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
4853 bool needs_lock)
4854 {
4855 struct io_sr_msg *sr = &req->sr_msg;
4856 struct io_buffer *kbuf;
4857
4858 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4859 if (IS_ERR(kbuf))
4860 return kbuf;
4861
4862 sr->kbuf = kbuf;
4863 req->flags |= REQ_F_BUFFER_SELECTED;
4864 return kbuf;
4865 }
4866
4867 static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4868 {
4869 return io_put_kbuf(req, req->sr_msg.kbuf);
4870 }
4871
4872 static int io_recvmsg_prep_async(struct io_kiocb *req)
4873 {
4874 int ret;
4875
4876 if (!io_op_defs[req->opcode].needs_async_data)
4877 return 0;
4878 ret = io_recvmsg_copy_hdr(req, req->async_data);
4879 if (!ret)
4880 req->flags |= REQ_F_NEED_CLEANUP;
4881 return ret;
4882 }
4883
4884 static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4885 {
4886 struct io_sr_msg *sr = &req->sr_msg;
4887
4888 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4889 return -EINVAL;
4890
4891 sr->msg_flags = READ_ONCE(sqe->msg_flags);
4892 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4893 sr->len = READ_ONCE(sqe->len);
4894 sr->bgid = READ_ONCE(sqe->buf_group);
4895
4896 #ifdef CONFIG_COMPAT
4897 if (req->ctx->compat)
4898 sr->msg_flags |= MSG_CMSG_COMPAT;
4899 #endif
4900 return 0;
4901 }
4902
4903 static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
4904 {
4905 struct io_async_msghdr iomsg, *kmsg;
4906 struct socket *sock;
4907 struct io_buffer *kbuf;
4908 unsigned flags;
4909 int ret, cflags = 0;
4910 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4911
4912 sock = sock_from_file(req->file);
4913 if (unlikely(!sock))
4914 return -ENOTSOCK;
4915
4916 kmsg = req->async_data;
4917 if (!kmsg) {
4918 ret = io_recvmsg_copy_hdr(req, &iomsg);
4919 if (ret)
4920 return ret;
4921 kmsg = &iomsg;
4922 }
4923
4924 if (req->flags & REQ_F_BUFFER_SELECT) {
4925 kbuf = io_recv_buffer_select(req, !force_nonblock);
4926 if (IS_ERR(kbuf))
4927 return PTR_ERR(kbuf);
4928 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
4929 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4930 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
4931 1, req->sr_msg.len);
4932 }
4933
4934 flags = req->sr_msg.msg_flags;
4935 if (flags & MSG_DONTWAIT)
4936 req->flags |= REQ_F_NOWAIT;
4937 else if (force_nonblock)
4938 flags |= MSG_DONTWAIT;
4939
4940 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4941 kmsg->uaddr, flags);
4942 if (force_nonblock && ret == -EAGAIN)
4943 return io_setup_async_msg(req, kmsg);
4944 if (ret == -ERESTARTSYS)
4945 ret = -EINTR;
4946
4947 if (req->flags & REQ_F_BUFFER_SELECTED)
4948 cflags = io_put_recv_kbuf(req);
4949 /* fast path, check for non-NULL to avoid function call */
4950 if (kmsg->free_iov)
4951 kfree(kmsg->free_iov);
4952 req->flags &= ~REQ_F_NEED_CLEANUP;
4953 if (ret < 0)
4954 req_set_fail_links(req);
4955 __io_req_complete(req, issue_flags, ret, cflags);
4956 return 0;
4957 }
4958
4959 static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
4960 {
4961 struct io_buffer *kbuf;
4962 struct io_sr_msg *sr = &req->sr_msg;
4963 struct msghdr msg;
4964 void __user *buf = sr->buf;
4965 struct socket *sock;
4966 struct iovec iov;
4967 unsigned flags;
4968 int ret, cflags = 0;
4969 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4970
4971 sock = sock_from_file(req->file);
4972 if (unlikely(!sock))
4973 return -ENOTSOCK;
4974
4975 if (req->flags & REQ_F_BUFFER_SELECT) {
4976 kbuf = io_recv_buffer_select(req, !force_nonblock);
4977 if (IS_ERR(kbuf))
4978 return PTR_ERR(kbuf);
4979 buf = u64_to_user_ptr(kbuf->addr);
4980 }
4981
4982 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
4983 if (unlikely(ret))
4984 goto out_free;
4985
4986 msg.msg_name = NULL;
4987 msg.msg_control = NULL;
4988 msg.msg_controllen = 0;
4989 msg.msg_namelen = 0;
4990 msg.msg_iocb = NULL;
4991 msg.msg_flags = 0;
4992
4993 flags = req->sr_msg.msg_flags;
4994 if (flags & MSG_DONTWAIT)
4995 req->flags |= REQ_F_NOWAIT;
4996 else if (force_nonblock)
4997 flags |= MSG_DONTWAIT;
4998
4999 ret = sock_recvmsg(sock, &msg, flags);
5000 if (force_nonblock && ret == -EAGAIN)
5001 return -EAGAIN;
5002 if (ret == -ERESTARTSYS)
5003 ret = -EINTR;
5004 out_free:
5005 if (req->flags & REQ_F_BUFFER_SELECTED)
5006 cflags = io_put_recv_kbuf(req);
5007 if (ret < 0)
5008 req_set_fail_links(req);
5009 __io_req_complete(req, issue_flags, ret, cflags);
5010 return 0;
5011 }
5012
5013 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5014 {
5015 struct io_accept *accept = &req->accept;
5016
5017 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5018 return -EINVAL;
5019 if (sqe->ioprio || sqe->len || sqe->buf_index)
5020 return -EINVAL;
5021
5022 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5023 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5024 accept->flags = READ_ONCE(sqe->accept_flags);
5025 accept->nofile = rlimit(RLIMIT_NOFILE);
5026 return 0;
5027 }
5028
5029 static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
5030 {
5031 struct io_accept *accept = &req->accept;
5032 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
5033 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
5034 int ret;
5035
5036 if (req->file->f_flags & O_NONBLOCK)
5037 req->flags |= REQ_F_NOWAIT;
5038
5039 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
5040 accept->addr_len, accept->flags,
5041 accept->nofile);
5042 if (ret == -EAGAIN && force_nonblock)
5043 return -EAGAIN;
5044 if (ret < 0) {
5045 if (ret == -ERESTARTSYS)
5046 ret = -EINTR;
5047 req_set_fail_links(req);
5048 }
5049 __io_req_complete(req, issue_flags, ret, 0);
5050 return 0;
5051 }
5052
5053 static int io_connect_prep_async(struct io_kiocb *req)
5054 {
5055 struct io_async_connect *io = req->async_data;
5056 struct io_connect *conn = &req->connect;
5057
5058 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
5059 }
5060
5061 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5062 {
5063 struct io_connect *conn = &req->connect;
5064
5065 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5066 return -EINVAL;
5067 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
5068 return -EINVAL;
5069
5070 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5071 conn->addr_len = READ_ONCE(sqe->addr2);
5072 return 0;
5073 }
5074
5075 static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
5076 {
5077 struct io_async_connect __io, *io;
5078 unsigned file_flags;
5079 int ret;
5080 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
5081
5082 if (req->async_data) {
5083 io = req->async_data;
5084 } else {
5085 ret = move_addr_to_kernel(req->connect.addr,
5086 req->connect.addr_len,
5087 &__io.address);
5088 if (ret)
5089 goto out;
5090 io = &__io;
5091 }
5092
5093 file_flags = force_nonblock ? O_NONBLOCK : 0;
5094
5095 ret = __sys_connect_file(req->file, &io->address,
5096 req->connect.addr_len, file_flags);
5097 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
5098 if (req->async_data)
5099 return -EAGAIN;
5100 if (io_alloc_async_data(req)) {
5101 ret = -ENOMEM;
5102 goto out;
5103 }
5104 io = req->async_data;
5105 memcpy(req->async_data, &__io, sizeof(__io));
5106 return -EAGAIN;
5107 }
5108 if (ret == -ERESTARTSYS)
5109 ret = -EINTR;
5110 out:
5111 if (ret < 0)
5112 req_set_fail_links(req);
5113 __io_req_complete(req, issue_flags, ret, 0);
5114 return 0;
5115 }
5116 #else /* !CONFIG_NET */
5117 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5118 {
5119 return -EOPNOTSUPP;
5120 }
5121
5122 static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
5123 {
5124 return -EOPNOTSUPP;
5125 }
5126
5127 static int io_send(struct io_kiocb *req, unsigned int issue_flags)
5128 {
5129 return -EOPNOTSUPP;
5130 }
5131
5132 static int io_recvmsg_prep(struct io_kiocb *req,
5133 const struct io_uring_sqe *sqe)
5134 {
5135 return -EOPNOTSUPP;
5136 }
5137
5138 static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
5139 {
5140 return -EOPNOTSUPP;
5141 }
5142
5143 static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
5144 {
5145 return -EOPNOTSUPP;
5146 }
5147
5148 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5149 {
5150 return -EOPNOTSUPP;
5151 }
5152
5153 static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
5154 {
5155 return -EOPNOTSUPP;
5156 }
5157
5158 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5159 {
5160 return -EOPNOTSUPP;
5161 }
5162
5163 static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
5164 {
5165 return -EOPNOTSUPP;
5166 }
5167 #endif /* CONFIG_NET */
5168
5169 struct io_poll_table {
5170 struct poll_table_struct pt;
5171 struct io_kiocb *req;
5172 int error;
5173 };
5174
5175 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
5176 __poll_t mask, task_work_func_t func)
5177 {
5178 int ret;
5179
5180 /* for instances that support it check for an event match first: */
5181 if (mask && !(mask & poll->events))
5182 return 0;
5183
5184 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
5185
5186 list_del_init(&poll->wait.entry);
5187
5188 req->result = mask;
5189 req->task_work.func = func;
5190 percpu_ref_get(&req->ctx->refs);
5191
5192 /*
5193 * If this fails, then the task is exiting. When a task exits, the
5194 * work gets canceled, so just cancel this request as well instead
5195 * of executing it. We can't safely execute it anyway, as we may not
5196 * have the needed state needed for it anyway.
5197 */
5198 ret = io_req_task_work_add(req);
5199 if (unlikely(ret)) {
5200 WRITE_ONCE(poll->canceled, true);
5201 io_req_task_work_add_fallback(req, func);
5202 }
5203 return 1;
5204 }
5205
5206 static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
5207 __acquires(&req->ctx->completion_lock)
5208 {
5209 struct io_ring_ctx *ctx = req->ctx;
5210
5211 if (!req->result && !READ_ONCE(poll->canceled)) {
5212 struct poll_table_struct pt = { ._key = poll->events };
5213
5214 req->result = vfs_poll(req->file, &pt) & poll->events;
5215 }
5216
5217 spin_lock_irq(&ctx->completion_lock);
5218 if (!req->result && !READ_ONCE(poll->canceled)) {
5219 add_wait_queue(poll->head, &poll->wait);
5220 return true;
5221 }
5222
5223 return false;
5224 }
5225
5226 static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
5227 {
5228 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
5229 if (req->opcode == IORING_OP_POLL_ADD)
5230 return req->async_data;
5231 return req->apoll->double_poll;
5232 }
5233
5234 static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5235 {
5236 if (req->opcode == IORING_OP_POLL_ADD)
5237 return &req->poll;
5238 return &req->apoll->poll;
5239 }
5240
5241 static void io_poll_remove_double(struct io_kiocb *req)
5242 {
5243 struct io_poll_iocb *poll = io_poll_get_double(req);
5244
5245 lockdep_assert_held(&req->ctx->completion_lock);
5246
5247 if (poll && poll->head) {
5248 struct wait_queue_head *head = poll->head;
5249
5250 spin_lock(&head->lock);
5251 list_del_init(&poll->wait.entry);
5252 if (poll->wait.private)
5253 refcount_dec(&req->refs);
5254 poll->head = NULL;
5255 spin_unlock(&head->lock);
5256 }
5257 }
5258
5259 static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
5260 {
5261 struct io_ring_ctx *ctx = req->ctx;
5262
5263 io_poll_remove_double(req);
5264 req->poll.done = true;
5265 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
5266 io_commit_cqring(ctx);
5267 }
5268
5269 static void io_poll_task_func(struct callback_head *cb)
5270 {
5271 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5272 struct io_ring_ctx *ctx = req->ctx;
5273 struct io_kiocb *nxt;
5274
5275 if (io_poll_rewait(req, &req->poll)) {
5276 spin_unlock_irq(&ctx->completion_lock);
5277 } else {
5278 hash_del(&req->hash_node);
5279 io_poll_complete(req, req->result, 0);
5280 spin_unlock_irq(&ctx->completion_lock);
5281
5282 nxt = io_put_req_find_next(req);
5283 io_cqring_ev_posted(ctx);
5284 if (nxt)
5285 __io_req_task_submit(nxt);
5286 }
5287
5288 percpu_ref_put(&ctx->refs);
5289 }
5290
5291 static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
5292 int sync, void *key)
5293 {
5294 struct io_kiocb *req = wait->private;
5295 struct io_poll_iocb *poll = io_poll_get_single(req);
5296 __poll_t mask = key_to_poll(key);
5297
5298 /* for instances that support it check for an event match first: */
5299 if (mask && !(mask & poll->events))
5300 return 0;
5301
5302 list_del_init(&wait->entry);
5303
5304 if (poll && poll->head) {
5305 bool done;
5306
5307 spin_lock(&poll->head->lock);
5308 done = list_empty(&poll->wait.entry);
5309 if (!done)
5310 list_del_init(&poll->wait.entry);
5311 /* make sure double remove sees this as being gone */
5312 wait->private = NULL;
5313 spin_unlock(&poll->head->lock);
5314 if (!done) {
5315 /* use wait func handler, so it matches the rq type */
5316 poll->wait.func(&poll->wait, mode, sync, key);
5317 }
5318 }
5319 refcount_dec(&req->refs);
5320 return 1;
5321 }
5322
5323 static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5324 wait_queue_func_t wake_func)
5325 {
5326 poll->head = NULL;
5327 poll->done = false;
5328 poll->canceled = false;
5329 poll->events = events;
5330 INIT_LIST_HEAD(&poll->wait.entry);
5331 init_waitqueue_func_entry(&poll->wait, wake_func);
5332 }
5333
5334 static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
5335 struct wait_queue_head *head,
5336 struct io_poll_iocb **poll_ptr)
5337 {
5338 struct io_kiocb *req = pt->req;
5339
5340 /*
5341 * If poll->head is already set, it's because the file being polled
5342 * uses multiple waitqueues for poll handling (eg one for read, one
5343 * for write). Setup a separate io_poll_iocb if this happens.
5344 */
5345 if (unlikely(poll->head)) {
5346 struct io_poll_iocb *poll_one = poll;
5347
5348 /* already have a 2nd entry, fail a third attempt */
5349 if (*poll_ptr) {
5350 pt->error = -EINVAL;
5351 return;
5352 }
5353 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5354 if (!poll) {
5355 pt->error = -ENOMEM;
5356 return;
5357 }
5358 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
5359 refcount_inc(&req->refs);
5360 poll->wait.private = req;
5361 *poll_ptr = poll;
5362 }
5363
5364 pt->error = 0;
5365 poll->head = head;
5366
5367 if (poll->events & EPOLLEXCLUSIVE)
5368 add_wait_queue_exclusive(head, &poll->wait);
5369 else
5370 add_wait_queue(head, &poll->wait);
5371 }
5372
5373 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5374 struct poll_table_struct *p)
5375 {
5376 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5377 struct async_poll *apoll = pt->req->apoll;
5378
5379 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
5380 }
5381
5382 static void io_async_task_func(struct callback_head *cb)
5383 {
5384 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5385 struct async_poll *apoll = req->apoll;
5386 struct io_ring_ctx *ctx = req->ctx;
5387
5388 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5389
5390 if (io_poll_rewait(req, &apoll->poll)) {
5391 spin_unlock_irq(&ctx->completion_lock);
5392 percpu_ref_put(&ctx->refs);
5393 return;
5394 }
5395
5396 /* If req is still hashed, it cannot have been canceled. Don't check. */
5397 if (hash_hashed(&req->hash_node))
5398 hash_del(&req->hash_node);
5399
5400 io_poll_remove_double(req);
5401 spin_unlock_irq(&ctx->completion_lock);
5402
5403 if (!READ_ONCE(apoll->poll.canceled))
5404 __io_req_task_submit(req);
5405 else
5406 __io_req_task_cancel(req, -ECANCELED);
5407
5408 percpu_ref_put(&ctx->refs);
5409 kfree(apoll->double_poll);
5410 kfree(apoll);
5411 }
5412
5413 static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5414 void *key)
5415 {
5416 struct io_kiocb *req = wait->private;
5417 struct io_poll_iocb *poll = &req->apoll->poll;
5418
5419 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5420 key_to_poll(key));
5421
5422 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5423 }
5424
5425 static void io_poll_req_insert(struct io_kiocb *req)
5426 {
5427 struct io_ring_ctx *ctx = req->ctx;
5428 struct hlist_head *list;
5429
5430 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5431 hlist_add_head(&req->hash_node, list);
5432 }
5433
5434 static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5435 struct io_poll_iocb *poll,
5436 struct io_poll_table *ipt, __poll_t mask,
5437 wait_queue_func_t wake_func)
5438 __acquires(&ctx->completion_lock)
5439 {
5440 struct io_ring_ctx *ctx = req->ctx;
5441 bool cancel = false;
5442
5443 INIT_HLIST_NODE(&req->hash_node);
5444 io_init_poll_iocb(poll, mask, wake_func);
5445 poll->file = req->file;
5446 poll->wait.private = req;
5447
5448 ipt->pt._key = mask;
5449 ipt->req = req;
5450 ipt->error = -EINVAL;
5451
5452 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5453
5454 spin_lock_irq(&ctx->completion_lock);
5455 if (likely(poll->head)) {
5456 spin_lock(&poll->head->lock);
5457 if (unlikely(list_empty(&poll->wait.entry))) {
5458 if (ipt->error)
5459 cancel = true;
5460 ipt->error = 0;
5461 mask = 0;
5462 }
5463 if (mask || ipt->error)
5464 list_del_init(&poll->wait.entry);
5465 else if (cancel)
5466 WRITE_ONCE(poll->canceled, true);
5467 else if (!poll->done) /* actually waiting for an event */
5468 io_poll_req_insert(req);
5469 spin_unlock(&poll->head->lock);
5470 }
5471
5472 return mask;
5473 }
5474
5475 static bool io_arm_poll_handler(struct io_kiocb *req)
5476 {
5477 const struct io_op_def *def = &io_op_defs[req->opcode];
5478 struct io_ring_ctx *ctx = req->ctx;
5479 struct async_poll *apoll;
5480 struct io_poll_table ipt;
5481 __poll_t mask, ret;
5482 int rw;
5483
5484 if (!req->file || !file_can_poll(req->file))
5485 return false;
5486 if (req->flags & REQ_F_POLLED)
5487 return false;
5488 if (def->pollin)
5489 rw = READ;
5490 else if (def->pollout)
5491 rw = WRITE;
5492 else
5493 return false;
5494 /* if we can't nonblock try, then no point in arming a poll handler */
5495 if (!io_file_supports_async(req->file, rw))
5496 return false;
5497
5498 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5499 if (unlikely(!apoll))
5500 return false;
5501 apoll->double_poll = NULL;
5502
5503 req->flags |= REQ_F_POLLED;
5504 req->apoll = apoll;
5505
5506 mask = 0;
5507 if (def->pollin)
5508 mask |= POLLIN | POLLRDNORM;
5509 if (def->pollout)
5510 mask |= POLLOUT | POLLWRNORM;
5511
5512 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5513 if ((req->opcode == IORING_OP_RECVMSG) &&
5514 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5515 mask &= ~POLLIN;
5516
5517 mask |= POLLERR | POLLPRI;
5518
5519 ipt.pt._qproc = io_async_queue_proc;
5520
5521 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5522 io_async_wake);
5523 if (ret || ipt.error) {
5524 io_poll_remove_double(req);
5525 spin_unlock_irq(&ctx->completion_lock);
5526 kfree(apoll->double_poll);
5527 kfree(apoll);
5528 return false;
5529 }
5530 spin_unlock_irq(&ctx->completion_lock);
5531 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5532 apoll->poll.events);
5533 return true;
5534 }
5535
5536 static bool __io_poll_remove_one(struct io_kiocb *req,
5537 struct io_poll_iocb *poll)
5538 {
5539 bool do_complete = false;
5540
5541 spin_lock(&poll->head->lock);
5542 WRITE_ONCE(poll->canceled, true);
5543 if (!list_empty(&poll->wait.entry)) {
5544 list_del_init(&poll->wait.entry);
5545 do_complete = true;
5546 }
5547 spin_unlock(&poll->head->lock);
5548 hash_del(&req->hash_node);
5549 return do_complete;
5550 }
5551
5552 static bool io_poll_remove_one(struct io_kiocb *req)
5553 {
5554 bool do_complete;
5555
5556 io_poll_remove_double(req);
5557
5558 if (req->opcode == IORING_OP_POLL_ADD) {
5559 do_complete = __io_poll_remove_one(req, &req->poll);
5560 } else {
5561 struct async_poll *apoll = req->apoll;
5562
5563 /* non-poll requests have submit ref still */
5564 do_complete = __io_poll_remove_one(req, &apoll->poll);
5565 if (do_complete) {
5566 io_put_req(req);
5567 kfree(apoll->double_poll);
5568 kfree(apoll);
5569 }
5570 }
5571
5572 if (do_complete) {
5573 io_cqring_fill_event(req, -ECANCELED);
5574 io_commit_cqring(req->ctx);
5575 req_set_fail_links(req);
5576 io_put_req_deferred(req, 1);
5577 }
5578
5579 return do_complete;
5580 }
5581
5582 /*
5583 * Returns true if we found and killed one or more poll requests
5584 */
5585 static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5586 struct files_struct *files)
5587 {
5588 struct hlist_node *tmp;
5589 struct io_kiocb *req;
5590 int posted = 0, i;
5591
5592 spin_lock_irq(&ctx->completion_lock);
5593 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5594 struct hlist_head *list;
5595
5596 list = &ctx->cancel_hash[i];
5597 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
5598 if (io_match_task(req, tsk, files))
5599 posted += io_poll_remove_one(req);
5600 }
5601 }
5602 spin_unlock_irq(&ctx->completion_lock);
5603
5604 if (posted)
5605 io_cqring_ev_posted(ctx);
5606
5607 return posted != 0;
5608 }
5609
5610 static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5611 {
5612 struct hlist_head *list;
5613 struct io_kiocb *req;
5614
5615 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5616 hlist_for_each_entry(req, list, hash_node) {
5617 if (sqe_addr != req->user_data)
5618 continue;
5619 if (io_poll_remove_one(req))
5620 return 0;
5621 return -EALREADY;
5622 }
5623
5624 return -ENOENT;
5625 }
5626
5627 static int io_poll_remove_prep(struct io_kiocb *req,
5628 const struct io_uring_sqe *sqe)
5629 {
5630 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5631 return -EINVAL;
5632 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5633 sqe->poll_events)
5634 return -EINVAL;
5635
5636 req->poll_remove.addr = READ_ONCE(sqe->addr);
5637 return 0;
5638 }
5639
5640 /*
5641 * Find a running poll command that matches one specified in sqe->addr,
5642 * and remove it if found.
5643 */
5644 static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
5645 {
5646 struct io_ring_ctx *ctx = req->ctx;
5647 int ret;
5648
5649 spin_lock_irq(&ctx->completion_lock);
5650 ret = io_poll_cancel(ctx, req->poll_remove.addr);
5651 spin_unlock_irq(&ctx->completion_lock);
5652
5653 if (ret < 0)
5654 req_set_fail_links(req);
5655 io_req_complete(req, ret);
5656 return 0;
5657 }
5658
5659 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5660 void *key)
5661 {
5662 struct io_kiocb *req = wait->private;
5663 struct io_poll_iocb *poll = &req->poll;
5664
5665 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
5666 }
5667
5668 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5669 struct poll_table_struct *p)
5670 {
5671 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5672
5673 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
5674 }
5675
5676 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5677 {
5678 struct io_poll_iocb *poll = &req->poll;
5679 u32 events;
5680
5681 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5682 return -EINVAL;
5683 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5684 return -EINVAL;
5685
5686 events = READ_ONCE(sqe->poll32_events);
5687 #ifdef __BIG_ENDIAN
5688 events = swahw32(events);
5689 #endif
5690 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
5691 (events & EPOLLEXCLUSIVE);
5692 return 0;
5693 }
5694
5695 static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
5696 {
5697 struct io_poll_iocb *poll = &req->poll;
5698 struct io_ring_ctx *ctx = req->ctx;
5699 struct io_poll_table ipt;
5700 __poll_t mask;
5701
5702 ipt.pt._qproc = io_poll_queue_proc;
5703
5704 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5705 io_poll_wake);
5706
5707 if (mask) { /* no async, we'd stolen it */
5708 ipt.error = 0;
5709 io_poll_complete(req, mask, 0);
5710 }
5711 spin_unlock_irq(&ctx->completion_lock);
5712
5713 if (mask) {
5714 io_cqring_ev_posted(ctx);
5715 io_put_req(req);
5716 }
5717 return ipt.error;
5718 }
5719
5720 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5721 {
5722 struct io_timeout_data *data = container_of(timer,
5723 struct io_timeout_data, timer);
5724 struct io_kiocb *req = data->req;
5725 struct io_ring_ctx *ctx = req->ctx;
5726 unsigned long flags;
5727
5728 spin_lock_irqsave(&ctx->completion_lock, flags);
5729 list_del_init(&req->timeout.list);
5730 atomic_set(&req->ctx->cq_timeouts,
5731 atomic_read(&req->ctx->cq_timeouts) + 1);
5732
5733 io_cqring_fill_event(req, -ETIME);
5734 io_commit_cqring(ctx);
5735 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5736
5737 io_cqring_ev_posted(ctx);
5738 req_set_fail_links(req);
5739 io_put_req(req);
5740 return HRTIMER_NORESTART;
5741 }
5742
5743 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5744 __u64 user_data)
5745 {
5746 struct io_timeout_data *io;
5747 struct io_kiocb *req;
5748 int ret = -ENOENT;
5749
5750 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5751 if (user_data == req->user_data) {
5752 ret = 0;
5753 break;
5754 }
5755 }
5756
5757 if (ret == -ENOENT)
5758 return ERR_PTR(ret);
5759
5760 io = req->async_data;
5761 ret = hrtimer_try_to_cancel(&io->timer);
5762 if (ret == -1)
5763 return ERR_PTR(-EALREADY);
5764 list_del_init(&req->timeout.list);
5765 return req;
5766 }
5767
5768 static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5769 {
5770 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5771
5772 if (IS_ERR(req))
5773 return PTR_ERR(req);
5774
5775 req_set_fail_links(req);
5776 io_cqring_fill_event(req, -ECANCELED);
5777 io_put_req_deferred(req, 1);
5778 return 0;
5779 }
5780
5781 static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5782 struct timespec64 *ts, enum hrtimer_mode mode)
5783 {
5784 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5785 struct io_timeout_data *data;
5786
5787 if (IS_ERR(req))
5788 return PTR_ERR(req);
5789
5790 req->timeout.off = 0; /* noseq */
5791 data = req->async_data;
5792 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5793 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5794 data->timer.function = io_timeout_fn;
5795 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5796 return 0;
5797 }
5798
5799 static int io_timeout_remove_prep(struct io_kiocb *req,
5800 const struct io_uring_sqe *sqe)
5801 {
5802 struct io_timeout_rem *tr = &req->timeout_rem;
5803
5804 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5805 return -EINVAL;
5806 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5807 return -EINVAL;
5808 if (sqe->ioprio || sqe->buf_index || sqe->len)
5809 return -EINVAL;
5810
5811 tr->addr = READ_ONCE(sqe->addr);
5812 tr->flags = READ_ONCE(sqe->timeout_flags);
5813 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5814 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5815 return -EINVAL;
5816 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5817 return -EFAULT;
5818 } else if (tr->flags) {
5819 /* timeout removal doesn't support flags */
5820 return -EINVAL;
5821 }
5822
5823 return 0;
5824 }
5825
5826 static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5827 {
5828 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5829 : HRTIMER_MODE_REL;
5830 }
5831
5832 /*
5833 * Remove or update an existing timeout command
5834 */
5835 static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
5836 {
5837 struct io_timeout_rem *tr = &req->timeout_rem;
5838 struct io_ring_ctx *ctx = req->ctx;
5839 int ret;
5840
5841 spin_lock_irq(&ctx->completion_lock);
5842 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
5843 ret = io_timeout_cancel(ctx, tr->addr);
5844 else
5845 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5846 io_translate_timeout_mode(tr->flags));
5847
5848 io_cqring_fill_event(req, ret);
5849 io_commit_cqring(ctx);
5850 spin_unlock_irq(&ctx->completion_lock);
5851 io_cqring_ev_posted(ctx);
5852 if (ret < 0)
5853 req_set_fail_links(req);
5854 io_put_req(req);
5855 return 0;
5856 }
5857
5858 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5859 bool is_timeout_link)
5860 {
5861 struct io_timeout_data *data;
5862 unsigned flags;
5863 u32 off = READ_ONCE(sqe->off);
5864
5865 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5866 return -EINVAL;
5867 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
5868 return -EINVAL;
5869 if (off && is_timeout_link)
5870 return -EINVAL;
5871 flags = READ_ONCE(sqe->timeout_flags);
5872 if (flags & ~IORING_TIMEOUT_ABS)
5873 return -EINVAL;
5874
5875 req->timeout.off = off;
5876
5877 if (!req->async_data && io_alloc_async_data(req))
5878 return -ENOMEM;
5879
5880 data = req->async_data;
5881 data->req = req;
5882
5883 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5884 return -EFAULT;
5885
5886 data->mode = io_translate_timeout_mode(flags);
5887 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5888 return 0;
5889 }
5890
5891 static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
5892 {
5893 struct io_ring_ctx *ctx = req->ctx;
5894 struct io_timeout_data *data = req->async_data;
5895 struct list_head *entry;
5896 u32 tail, off = req->timeout.off;
5897
5898 spin_lock_irq(&ctx->completion_lock);
5899
5900 /*
5901 * sqe->off holds how many events that need to occur for this
5902 * timeout event to be satisfied. If it isn't set, then this is
5903 * a pure timeout request, sequence isn't used.
5904 */
5905 if (io_is_timeout_noseq(req)) {
5906 entry = ctx->timeout_list.prev;
5907 goto add;
5908 }
5909
5910 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5911 req->timeout.target_seq = tail + off;
5912
5913 /* Update the last seq here in case io_flush_timeouts() hasn't.
5914 * This is safe because ->completion_lock is held, and submissions
5915 * and completions are never mixed in the same ->completion_lock section.
5916 */
5917 ctx->cq_last_tm_flush = tail;
5918
5919 /*
5920 * Insertion sort, ensuring the first entry in the list is always
5921 * the one we need first.
5922 */
5923 list_for_each_prev(entry, &ctx->timeout_list) {
5924 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5925 timeout.list);
5926
5927 if (io_is_timeout_noseq(nxt))
5928 continue;
5929 /* nxt.seq is behind @tail, otherwise would've been completed */
5930 if (off >= nxt->timeout.target_seq - tail)
5931 break;
5932 }
5933 add:
5934 list_add(&req->timeout.list, entry);
5935 data->timer.function = io_timeout_fn;
5936 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5937 spin_unlock_irq(&ctx->completion_lock);
5938 return 0;
5939 }
5940
5941 static bool io_cancel_cb(struct io_wq_work *work, void *data)
5942 {
5943 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5944
5945 return req->user_data == (unsigned long) data;
5946 }
5947
5948 static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
5949 {
5950 enum io_wq_cancel cancel_ret;
5951 int ret = 0;
5952
5953 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
5954 switch (cancel_ret) {
5955 case IO_WQ_CANCEL_OK:
5956 ret = 0;
5957 break;
5958 case IO_WQ_CANCEL_RUNNING:
5959 ret = -EALREADY;
5960 break;
5961 case IO_WQ_CANCEL_NOTFOUND:
5962 ret = -ENOENT;
5963 break;
5964 }
5965
5966 return ret;
5967 }
5968
5969 static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5970 struct io_kiocb *req, __u64 sqe_addr,
5971 int success_ret)
5972 {
5973 unsigned long flags;
5974 int ret;
5975
5976 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
5977 if (ret != -ENOENT) {
5978 spin_lock_irqsave(&ctx->completion_lock, flags);
5979 goto done;
5980 }
5981
5982 spin_lock_irqsave(&ctx->completion_lock, flags);
5983 ret = io_timeout_cancel(ctx, sqe_addr);
5984 if (ret != -ENOENT)
5985 goto done;
5986 ret = io_poll_cancel(ctx, sqe_addr);
5987 done:
5988 if (!ret)
5989 ret = success_ret;
5990 io_cqring_fill_event(req, ret);
5991 io_commit_cqring(ctx);
5992 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5993 io_cqring_ev_posted(ctx);
5994
5995 if (ret < 0)
5996 req_set_fail_links(req);
5997 io_put_req(req);
5998 }
5999
6000 static int io_async_cancel_prep(struct io_kiocb *req,
6001 const struct io_uring_sqe *sqe)
6002 {
6003 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6004 return -EINVAL;
6005 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6006 return -EINVAL;
6007 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
6008 return -EINVAL;
6009
6010 req->cancel.addr = READ_ONCE(sqe->addr);
6011 return 0;
6012 }
6013
6014 static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
6015 {
6016 struct io_ring_ctx *ctx = req->ctx;
6017
6018 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
6019 return 0;
6020 }
6021
6022 static int io_rsrc_update_prep(struct io_kiocb *req,
6023 const struct io_uring_sqe *sqe)
6024 {
6025 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
6026 return -EINVAL;
6027 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6028 return -EINVAL;
6029 if (sqe->ioprio || sqe->rw_flags)
6030 return -EINVAL;
6031
6032 req->rsrc_update.offset = READ_ONCE(sqe->off);
6033 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
6034 if (!req->rsrc_update.nr_args)
6035 return -EINVAL;
6036 req->rsrc_update.arg = READ_ONCE(sqe->addr);
6037 return 0;
6038 }
6039
6040 static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
6041 {
6042 struct io_ring_ctx *ctx = req->ctx;
6043 struct io_uring_rsrc_update up;
6044 int ret;
6045
6046 if (issue_flags & IO_URING_F_NONBLOCK)
6047 return -EAGAIN;
6048
6049 up.offset = req->rsrc_update.offset;
6050 up.data = req->rsrc_update.arg;
6051
6052 mutex_lock(&ctx->uring_lock);
6053 ret = __io_sqe_files_update(ctx, &up, req->rsrc_update.nr_args);
6054 mutex_unlock(&ctx->uring_lock);
6055
6056 if (ret < 0)
6057 req_set_fail_links(req);
6058 __io_req_complete(req, issue_flags, ret, 0);
6059 return 0;
6060 }
6061
6062 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6063 {
6064 switch (req->opcode) {
6065 case IORING_OP_NOP:
6066 return 0;
6067 case IORING_OP_READV:
6068 case IORING_OP_READ_FIXED:
6069 case IORING_OP_READ:
6070 return io_read_prep(req, sqe);
6071 case IORING_OP_WRITEV:
6072 case IORING_OP_WRITE_FIXED:
6073 case IORING_OP_WRITE:
6074 return io_write_prep(req, sqe);
6075 case IORING_OP_POLL_ADD:
6076 return io_poll_add_prep(req, sqe);
6077 case IORING_OP_POLL_REMOVE:
6078 return io_poll_remove_prep(req, sqe);
6079 case IORING_OP_FSYNC:
6080 return io_fsync_prep(req, sqe);
6081 case IORING_OP_SYNC_FILE_RANGE:
6082 return io_sfr_prep(req, sqe);
6083 case IORING_OP_SENDMSG:
6084 case IORING_OP_SEND:
6085 return io_sendmsg_prep(req, sqe);
6086 case IORING_OP_RECVMSG:
6087 case IORING_OP_RECV:
6088 return io_recvmsg_prep(req, sqe);
6089 case IORING_OP_CONNECT:
6090 return io_connect_prep(req, sqe);
6091 case IORING_OP_TIMEOUT:
6092 return io_timeout_prep(req, sqe, false);
6093 case IORING_OP_TIMEOUT_REMOVE:
6094 return io_timeout_remove_prep(req, sqe);
6095 case IORING_OP_ASYNC_CANCEL:
6096 return io_async_cancel_prep(req, sqe);
6097 case IORING_OP_LINK_TIMEOUT:
6098 return io_timeout_prep(req, sqe, true);
6099 case IORING_OP_ACCEPT:
6100 return io_accept_prep(req, sqe);
6101 case IORING_OP_FALLOCATE:
6102 return io_fallocate_prep(req, sqe);
6103 case IORING_OP_OPENAT:
6104 return io_openat_prep(req, sqe);
6105 case IORING_OP_CLOSE:
6106 return io_close_prep(req, sqe);
6107 case IORING_OP_FILES_UPDATE:
6108 return io_rsrc_update_prep(req, sqe);
6109 case IORING_OP_STATX:
6110 return io_statx_prep(req, sqe);
6111 case IORING_OP_FADVISE:
6112 return io_fadvise_prep(req, sqe);
6113 case IORING_OP_MADVISE:
6114 return io_madvise_prep(req, sqe);
6115 case IORING_OP_OPENAT2:
6116 return io_openat2_prep(req, sqe);
6117 case IORING_OP_EPOLL_CTL:
6118 return io_epoll_ctl_prep(req, sqe);
6119 case IORING_OP_SPLICE:
6120 return io_splice_prep(req, sqe);
6121 case IORING_OP_PROVIDE_BUFFERS:
6122 return io_provide_buffers_prep(req, sqe);
6123 case IORING_OP_REMOVE_BUFFERS:
6124 return io_remove_buffers_prep(req, sqe);
6125 case IORING_OP_TEE:
6126 return io_tee_prep(req, sqe);
6127 case IORING_OP_SHUTDOWN:
6128 return io_shutdown_prep(req, sqe);
6129 case IORING_OP_RENAMEAT:
6130 return io_renameat_prep(req, sqe);
6131 case IORING_OP_UNLINKAT:
6132 return io_unlinkat_prep(req, sqe);
6133 }
6134
6135 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
6136 req->opcode);
6137 return-EINVAL;
6138 }
6139
6140 static int io_req_prep_async(struct io_kiocb *req)
6141 {
6142 switch (req->opcode) {
6143 case IORING_OP_READV:
6144 case IORING_OP_READ_FIXED:
6145 case IORING_OP_READ:
6146 return io_rw_prep_async(req, READ);
6147 case IORING_OP_WRITEV:
6148 case IORING_OP_WRITE_FIXED:
6149 case IORING_OP_WRITE:
6150 return io_rw_prep_async(req, WRITE);
6151 case IORING_OP_SENDMSG:
6152 case IORING_OP_SEND:
6153 return io_sendmsg_prep_async(req);
6154 case IORING_OP_RECVMSG:
6155 case IORING_OP_RECV:
6156 return io_recvmsg_prep_async(req);
6157 case IORING_OP_CONNECT:
6158 return io_connect_prep_async(req);
6159 }
6160 return 0;
6161 }
6162
6163 static int io_req_defer_prep(struct io_kiocb *req)
6164 {
6165 if (!io_op_defs[req->opcode].needs_async_data)
6166 return 0;
6167 /* some opcodes init it during the inital prep */
6168 if (req->async_data)
6169 return 0;
6170 if (__io_alloc_async_data(req))
6171 return -EAGAIN;
6172 return io_req_prep_async(req);
6173 }
6174
6175 static u32 io_get_sequence(struct io_kiocb *req)
6176 {
6177 struct io_kiocb *pos;
6178 struct io_ring_ctx *ctx = req->ctx;
6179 u32 total_submitted, nr_reqs = 0;
6180
6181 io_for_each_link(pos, req)
6182 nr_reqs++;
6183
6184 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
6185 return total_submitted - nr_reqs;
6186 }
6187
6188 static int io_req_defer(struct io_kiocb *req)
6189 {
6190 struct io_ring_ctx *ctx = req->ctx;
6191 struct io_defer_entry *de;
6192 int ret;
6193 u32 seq;
6194
6195 /* Still need defer if there is pending req in defer list. */
6196 if (likely(list_empty_careful(&ctx->defer_list) &&
6197 !(req->flags & REQ_F_IO_DRAIN)))
6198 return 0;
6199
6200 seq = io_get_sequence(req);
6201 /* Still a chance to pass the sequence check */
6202 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
6203 return 0;
6204
6205 ret = io_req_defer_prep(req);
6206 if (ret)
6207 return ret;
6208 io_prep_async_link(req);
6209 de = kmalloc(sizeof(*de), GFP_KERNEL);
6210 if (!de)
6211 return -ENOMEM;
6212
6213 spin_lock_irq(&ctx->completion_lock);
6214 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
6215 spin_unlock_irq(&ctx->completion_lock);
6216 kfree(de);
6217 io_queue_async_work(req);
6218 return -EIOCBQUEUED;
6219 }
6220
6221 trace_io_uring_defer(ctx, req, req->user_data);
6222 de->req = req;
6223 de->seq = seq;
6224 list_add_tail(&de->list, &ctx->defer_list);
6225 spin_unlock_irq(&ctx->completion_lock);
6226 return -EIOCBQUEUED;
6227 }
6228
6229 static void __io_clean_op(struct io_kiocb *req)
6230 {
6231 if (req->flags & REQ_F_BUFFER_SELECTED) {
6232 switch (req->opcode) {
6233 case IORING_OP_READV:
6234 case IORING_OP_READ_FIXED:
6235 case IORING_OP_READ:
6236 kfree((void *)(unsigned long)req->rw.addr);
6237 break;
6238 case IORING_OP_RECVMSG:
6239 case IORING_OP_RECV:
6240 kfree(req->sr_msg.kbuf);
6241 break;
6242 }
6243 req->flags &= ~REQ_F_BUFFER_SELECTED;
6244 }
6245
6246 if (req->flags & REQ_F_NEED_CLEANUP) {
6247 switch (req->opcode) {
6248 case IORING_OP_READV:
6249 case IORING_OP_READ_FIXED:
6250 case IORING_OP_READ:
6251 case IORING_OP_WRITEV:
6252 case IORING_OP_WRITE_FIXED:
6253 case IORING_OP_WRITE: {
6254 struct io_async_rw *io = req->async_data;
6255 if (io->free_iovec)
6256 kfree(io->free_iovec);
6257 break;
6258 }
6259 case IORING_OP_RECVMSG:
6260 case IORING_OP_SENDMSG: {
6261 struct io_async_msghdr *io = req->async_data;
6262
6263 kfree(io->free_iov);
6264 break;
6265 }
6266 case IORING_OP_SPLICE:
6267 case IORING_OP_TEE:
6268 io_put_file(req, req->splice.file_in,
6269 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
6270 break;
6271 case IORING_OP_OPENAT:
6272 case IORING_OP_OPENAT2:
6273 if (req->open.filename)
6274 putname(req->open.filename);
6275 break;
6276 case IORING_OP_RENAMEAT:
6277 putname(req->rename.oldpath);
6278 putname(req->rename.newpath);
6279 break;
6280 case IORING_OP_UNLINKAT:
6281 putname(req->unlink.filename);
6282 break;
6283 }
6284 req->flags &= ~REQ_F_NEED_CLEANUP;
6285 }
6286 }
6287
6288 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
6289 {
6290 struct io_ring_ctx *ctx = req->ctx;
6291 int ret;
6292
6293 switch (req->opcode) {
6294 case IORING_OP_NOP:
6295 ret = io_nop(req, issue_flags);
6296 break;
6297 case IORING_OP_READV:
6298 case IORING_OP_READ_FIXED:
6299 case IORING_OP_READ:
6300 ret = io_read(req, issue_flags);
6301 break;
6302 case IORING_OP_WRITEV:
6303 case IORING_OP_WRITE_FIXED:
6304 case IORING_OP_WRITE:
6305 ret = io_write(req, issue_flags);
6306 break;
6307 case IORING_OP_FSYNC:
6308 ret = io_fsync(req, issue_flags);
6309 break;
6310 case IORING_OP_POLL_ADD:
6311 ret = io_poll_add(req, issue_flags);
6312 break;
6313 case IORING_OP_POLL_REMOVE:
6314 ret = io_poll_remove(req, issue_flags);
6315 break;
6316 case IORING_OP_SYNC_FILE_RANGE:
6317 ret = io_sync_file_range(req, issue_flags);
6318 break;
6319 case IORING_OP_SENDMSG:
6320 ret = io_sendmsg(req, issue_flags);
6321 break;
6322 case IORING_OP_SEND:
6323 ret = io_send(req, issue_flags);
6324 break;
6325 case IORING_OP_RECVMSG:
6326 ret = io_recvmsg(req, issue_flags);
6327 break;
6328 case IORING_OP_RECV:
6329 ret = io_recv(req, issue_flags);
6330 break;
6331 case IORING_OP_TIMEOUT:
6332 ret = io_timeout(req, issue_flags);
6333 break;
6334 case IORING_OP_TIMEOUT_REMOVE:
6335 ret = io_timeout_remove(req, issue_flags);
6336 break;
6337 case IORING_OP_ACCEPT:
6338 ret = io_accept(req, issue_flags);
6339 break;
6340 case IORING_OP_CONNECT:
6341 ret = io_connect(req, issue_flags);
6342 break;
6343 case IORING_OP_ASYNC_CANCEL:
6344 ret = io_async_cancel(req, issue_flags);
6345 break;
6346 case IORING_OP_FALLOCATE:
6347 ret = io_fallocate(req, issue_flags);
6348 break;
6349 case IORING_OP_OPENAT:
6350 ret = io_openat(req, issue_flags);
6351 break;
6352 case IORING_OP_CLOSE:
6353 ret = io_close(req, issue_flags);
6354 break;
6355 case IORING_OP_FILES_UPDATE:
6356 ret = io_files_update(req, issue_flags);
6357 break;
6358 case IORING_OP_STATX:
6359 ret = io_statx(req, issue_flags);
6360 break;
6361 case IORING_OP_FADVISE:
6362 ret = io_fadvise(req, issue_flags);
6363 break;
6364 case IORING_OP_MADVISE:
6365 ret = io_madvise(req, issue_flags);
6366 break;
6367 case IORING_OP_OPENAT2:
6368 ret = io_openat2(req, issue_flags);
6369 break;
6370 case IORING_OP_EPOLL_CTL:
6371 ret = io_epoll_ctl(req, issue_flags);
6372 break;
6373 case IORING_OP_SPLICE:
6374 ret = io_splice(req, issue_flags);
6375 break;
6376 case IORING_OP_PROVIDE_BUFFERS:
6377 ret = io_provide_buffers(req, issue_flags);
6378 break;
6379 case IORING_OP_REMOVE_BUFFERS:
6380 ret = io_remove_buffers(req, issue_flags);
6381 break;
6382 case IORING_OP_TEE:
6383 ret = io_tee(req, issue_flags);
6384 break;
6385 case IORING_OP_SHUTDOWN:
6386 ret = io_shutdown(req, issue_flags);
6387 break;
6388 case IORING_OP_RENAMEAT:
6389 ret = io_renameat(req, issue_flags);
6390 break;
6391 case IORING_OP_UNLINKAT:
6392 ret = io_unlinkat(req, issue_flags);
6393 break;
6394 default:
6395 ret = -EINVAL;
6396 break;
6397 }
6398
6399 if (ret)
6400 return ret;
6401
6402 /* If the op doesn't have a file, we're not polling for it */
6403 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
6404 const bool in_async = io_wq_current_is_worker();
6405
6406 /* workqueue context doesn't hold uring_lock, grab it now */
6407 if (in_async)
6408 mutex_lock(&ctx->uring_lock);
6409
6410 io_iopoll_req_issued(req, in_async);
6411
6412 if (in_async)
6413 mutex_unlock(&ctx->uring_lock);
6414 }
6415
6416 return 0;
6417 }
6418
6419 static void io_wq_submit_work(struct io_wq_work *work)
6420 {
6421 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6422 struct io_kiocb *timeout;
6423 int ret = 0;
6424
6425 timeout = io_prep_linked_timeout(req);
6426 if (timeout)
6427 io_queue_linked_timeout(timeout);
6428
6429 if (work->flags & IO_WQ_WORK_CANCEL)
6430 ret = -ECANCELED;
6431
6432 if (!ret) {
6433 do {
6434 ret = io_issue_sqe(req, 0);
6435 /*
6436 * We can get EAGAIN for polled IO even though we're
6437 * forcing a sync submission from here, since we can't
6438 * wait for request slots on the block side.
6439 */
6440 if (ret != -EAGAIN)
6441 break;
6442 cond_resched();
6443 } while (1);
6444 }
6445
6446 if (ret) {
6447 struct io_ring_ctx *lock_ctx = NULL;
6448
6449 if (req->ctx->flags & IORING_SETUP_IOPOLL)
6450 lock_ctx = req->ctx;
6451
6452 /*
6453 * io_iopoll_complete() does not hold completion_lock to
6454 * complete polled io, so here for polled io, we can not call
6455 * io_req_complete() directly, otherwise there maybe concurrent
6456 * access to cqring, defer_list, etc, which is not safe. Given
6457 * that io_iopoll_complete() is always called under uring_lock,
6458 * so here for polled io, we also get uring_lock to complete
6459 * it.
6460 */
6461 if (lock_ctx)
6462 mutex_lock(&lock_ctx->uring_lock);
6463
6464 req_set_fail_links(req);
6465 io_req_complete(req, ret);
6466
6467 if (lock_ctx)
6468 mutex_unlock(&lock_ctx->uring_lock);
6469 }
6470 }
6471
6472 static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6473 int index)
6474 {
6475 struct fixed_rsrc_table *table;
6476
6477 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
6478 return table->files[index & IORING_FILE_TABLE_MASK];
6479 }
6480
6481 static struct file *io_file_get(struct io_submit_state *state,
6482 struct io_kiocb *req, int fd, bool fixed)
6483 {
6484 struct io_ring_ctx *ctx = req->ctx;
6485 struct file *file;
6486
6487 if (fixed) {
6488 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
6489 return NULL;
6490 fd = array_index_nospec(fd, ctx->nr_user_files);
6491 file = io_file_from_index(ctx, fd);
6492 io_set_resource_node(req);
6493 } else {
6494 trace_io_uring_file_get(ctx, fd);
6495 file = __io_file_get(state, fd);
6496 }
6497
6498 if (file && unlikely(file->f_op == &io_uring_fops))
6499 io_req_track_inflight(req);
6500 return file;
6501 }
6502
6503 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6504 {
6505 struct io_timeout_data *data = container_of(timer,
6506 struct io_timeout_data, timer);
6507 struct io_kiocb *prev, *req = data->req;
6508 struct io_ring_ctx *ctx = req->ctx;
6509 unsigned long flags;
6510
6511 spin_lock_irqsave(&ctx->completion_lock, flags);
6512 prev = req->timeout.head;
6513 req->timeout.head = NULL;
6514
6515 /*
6516 * We don't expect the list to be empty, that will only happen if we
6517 * race with the completion of the linked work.
6518 */
6519 if (prev && refcount_inc_not_zero(&prev->refs))
6520 io_remove_next_linked(prev);
6521 else
6522 prev = NULL;
6523 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6524
6525 if (prev) {
6526 req_set_fail_links(prev);
6527 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
6528 io_put_req_deferred(prev, 1);
6529 } else {
6530 io_req_complete_post(req, -ETIME, 0);
6531 io_put_req_deferred(req, 1);
6532 }
6533 return HRTIMER_NORESTART;
6534 }
6535
6536 static void __io_queue_linked_timeout(struct io_kiocb *req)
6537 {
6538 /*
6539 * If the back reference is NULL, then our linked request finished
6540 * before we got a chance to setup the timer
6541 */
6542 if (req->timeout.head) {
6543 struct io_timeout_data *data = req->async_data;
6544
6545 data->timer.function = io_link_timeout_fn;
6546 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6547 data->mode);
6548 }
6549 }
6550
6551 static void io_queue_linked_timeout(struct io_kiocb *req)
6552 {
6553 struct io_ring_ctx *ctx = req->ctx;
6554
6555 spin_lock_irq(&ctx->completion_lock);
6556 __io_queue_linked_timeout(req);
6557 spin_unlock_irq(&ctx->completion_lock);
6558
6559 /* drop submission reference */
6560 io_put_req(req);
6561 }
6562
6563 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
6564 {
6565 struct io_kiocb *nxt = req->link;
6566
6567 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6568 nxt->opcode != IORING_OP_LINK_TIMEOUT)
6569 return NULL;
6570
6571 nxt->timeout.head = req;
6572 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
6573 req->flags |= REQ_F_LINK_TIMEOUT;
6574 return nxt;
6575 }
6576
6577 static void __io_queue_sqe(struct io_kiocb *req)
6578 {
6579 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
6580 const struct cred *old_creds = NULL;
6581 int ret;
6582
6583 if ((req->flags & REQ_F_WORK_INITIALIZED) &&
6584 (req->work.flags & IO_WQ_WORK_CREDS) &&
6585 req->work.identity->creds != current_cred())
6586 old_creds = override_creds(req->work.identity->creds);
6587
6588 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
6589
6590 if (old_creds)
6591 revert_creds(old_creds);
6592
6593 /*
6594 * We async punt it if the file wasn't marked NOWAIT, or if the file
6595 * doesn't support non-blocking read/write attempts
6596 */
6597 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
6598 if (!io_arm_poll_handler(req)) {
6599 /*
6600 * Queued up for async execution, worker will release
6601 * submit reference when the iocb is actually submitted.
6602 */
6603 io_queue_async_work(req);
6604 }
6605 } else if (likely(!ret)) {
6606 /* drop submission reference */
6607 if (req->flags & REQ_F_COMPLETE_INLINE) {
6608 struct io_ring_ctx *ctx = req->ctx;
6609 struct io_comp_state *cs = &ctx->submit_state.comp;
6610
6611 cs->reqs[cs->nr++] = req;
6612 if (cs->nr == ARRAY_SIZE(cs->reqs))
6613 io_submit_flush_completions(cs, ctx);
6614 } else {
6615 io_put_req(req);
6616 }
6617 } else {
6618 req_set_fail_links(req);
6619 io_put_req(req);
6620 io_req_complete(req, ret);
6621 }
6622 if (linked_timeout)
6623 io_queue_linked_timeout(linked_timeout);
6624 }
6625
6626 static void io_queue_sqe(struct io_kiocb *req)
6627 {
6628 int ret;
6629
6630 ret = io_req_defer(req);
6631 if (ret) {
6632 if (ret != -EIOCBQUEUED) {
6633 fail_req:
6634 req_set_fail_links(req);
6635 io_put_req(req);
6636 io_req_complete(req, ret);
6637 }
6638 } else if (req->flags & REQ_F_FORCE_ASYNC) {
6639 ret = io_req_defer_prep(req);
6640 if (unlikely(ret))
6641 goto fail_req;
6642 io_queue_async_work(req);
6643 } else {
6644 __io_queue_sqe(req);
6645 }
6646 }
6647
6648 static inline void io_queue_link_head(struct io_kiocb *req)
6649 {
6650 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
6651 io_put_req(req);
6652 io_req_complete(req, -ECANCELED);
6653 } else
6654 io_queue_sqe(req);
6655 }
6656
6657 /*
6658 * Check SQE restrictions (opcode and flags).
6659 *
6660 * Returns 'true' if SQE is allowed, 'false' otherwise.
6661 */
6662 static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6663 struct io_kiocb *req,
6664 unsigned int sqe_flags)
6665 {
6666 if (!ctx->restricted)
6667 return true;
6668
6669 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6670 return false;
6671
6672 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6673 ctx->restrictions.sqe_flags_required)
6674 return false;
6675
6676 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6677 ctx->restrictions.sqe_flags_required))
6678 return false;
6679
6680 return true;
6681 }
6682
6683 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
6684 const struct io_uring_sqe *sqe)
6685 {
6686 struct io_submit_state *state;
6687 unsigned int sqe_flags;
6688 int id, ret = 0;
6689
6690 req->opcode = READ_ONCE(sqe->opcode);
6691 /* same numerical values with corresponding REQ_F_*, safe to copy */
6692 req->flags = sqe_flags = READ_ONCE(sqe->flags);
6693 req->user_data = READ_ONCE(sqe->user_data);
6694 req->async_data = NULL;
6695 req->file = NULL;
6696 req->ctx = ctx;
6697 req->link = NULL;
6698 req->fixed_rsrc_refs = NULL;
6699 /* one is dropped after submission, the other at completion */
6700 refcount_set(&req->refs, 2);
6701 req->task = current;
6702 req->result = 0;
6703
6704 /* enforce forwards compatibility on users */
6705 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
6706 return -EINVAL;
6707
6708 if (unlikely(req->opcode >= IORING_OP_LAST))
6709 return -EINVAL;
6710
6711 if (unlikely(io_sq_thread_acquire_mm_files(ctx, req)))
6712 return -EFAULT;
6713
6714 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6715 return -EACCES;
6716
6717 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6718 !io_op_defs[req->opcode].buffer_select)
6719 return -EOPNOTSUPP;
6720
6721 id = READ_ONCE(sqe->personality);
6722 if (id) {
6723 struct io_identity *iod;
6724
6725 iod = idr_find(&ctx->personality_idr, id);
6726 if (unlikely(!iod))
6727 return -EINVAL;
6728 refcount_inc(&iod->count);
6729
6730 __io_req_init_async(req);
6731 get_cred(iod->creds);
6732 req->work.identity = iod;
6733 req->work.flags |= IO_WQ_WORK_CREDS;
6734 }
6735
6736 state = &ctx->submit_state;
6737
6738 /*
6739 * Plug now if we have more than 1 IO left after this, and the target
6740 * is potentially a read/write to block based storage.
6741 */
6742 if (!state->plug_started && state->ios_left > 1 &&
6743 io_op_defs[req->opcode].plug) {
6744 blk_start_plug(&state->plug);
6745 state->plug_started = true;
6746 }
6747
6748 if (io_op_defs[req->opcode].needs_file) {
6749 bool fixed = req->flags & REQ_F_FIXED_FILE;
6750
6751 req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
6752 if (unlikely(!req->file))
6753 ret = -EBADF;
6754 }
6755
6756 state->ios_left--;
6757 return ret;
6758 }
6759
6760 static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
6761 const struct io_uring_sqe *sqe)
6762 {
6763 struct io_submit_link *link = &ctx->submit_state.link;
6764 int ret;
6765
6766 ret = io_init_req(ctx, req, sqe);
6767 if (unlikely(ret)) {
6768 fail_req:
6769 io_put_req(req);
6770 io_req_complete(req, ret);
6771 /* fail even hard links since we don't submit */
6772 if (link->head)
6773 link->head->flags |= REQ_F_FAIL_LINK;
6774 return ret;
6775 }
6776 ret = io_req_prep(req, sqe);
6777 if (unlikely(ret))
6778 goto fail_req;
6779
6780 /* don't need @sqe from now on */
6781 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
6782 true, ctx->flags & IORING_SETUP_SQPOLL);
6783
6784 /*
6785 * If we already have a head request, queue this one for async
6786 * submittal once the head completes. If we don't have a head but
6787 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6788 * submitted sync once the chain is complete. If none of those
6789 * conditions are true (normal request), then just queue it.
6790 */
6791 if (link->head) {
6792 struct io_kiocb *head = link->head;
6793
6794 /*
6795 * Taking sequential execution of a link, draining both sides
6796 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6797 * requests in the link. So, it drains the head and the
6798 * next after the link request. The last one is done via
6799 * drain_next flag to persist the effect across calls.
6800 */
6801 if (req->flags & REQ_F_IO_DRAIN) {
6802 head->flags |= REQ_F_IO_DRAIN;
6803 ctx->drain_next = 1;
6804 }
6805 ret = io_req_defer_prep(req);
6806 if (unlikely(ret))
6807 goto fail_req;
6808 trace_io_uring_link(ctx, req, head);
6809 link->last->link = req;
6810 link->last = req;
6811
6812 /* last request of a link, enqueue the link */
6813 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
6814 io_queue_link_head(head);
6815 link->head = NULL;
6816 }
6817 } else {
6818 if (unlikely(ctx->drain_next)) {
6819 req->flags |= REQ_F_IO_DRAIN;
6820 ctx->drain_next = 0;
6821 }
6822 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
6823 link->head = req;
6824 link->last = req;
6825 } else {
6826 io_queue_sqe(req);
6827 }
6828 }
6829
6830 return 0;
6831 }
6832
6833 /*
6834 * Batched submission is done, ensure local IO is flushed out.
6835 */
6836 static void io_submit_state_end(struct io_submit_state *state,
6837 struct io_ring_ctx *ctx)
6838 {
6839 if (state->link.head)
6840 io_queue_link_head(state->link.head);
6841 if (state->comp.nr)
6842 io_submit_flush_completions(&state->comp, ctx);
6843 if (state->plug_started)
6844 blk_finish_plug(&state->plug);
6845 io_state_file_put(state);
6846 }
6847
6848 /*
6849 * Start submission side cache.
6850 */
6851 static void io_submit_state_start(struct io_submit_state *state,
6852 unsigned int max_ios)
6853 {
6854 state->plug_started = false;
6855 state->ios_left = max_ios;
6856 /* set only head, no need to init link_last in advance */
6857 state->link.head = NULL;
6858 }
6859
6860 static void io_commit_sqring(struct io_ring_ctx *ctx)
6861 {
6862 struct io_rings *rings = ctx->rings;
6863
6864 /*
6865 * Ensure any loads from the SQEs are done at this point,
6866 * since once we write the new head, the application could
6867 * write new data to them.
6868 */
6869 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
6870 }
6871
6872 /*
6873 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
6874 * that is mapped by userspace. This means that care needs to be taken to
6875 * ensure that reads are stable, as we cannot rely on userspace always
6876 * being a good citizen. If members of the sqe are validated and then later
6877 * used, it's important that those reads are done through READ_ONCE() to
6878 * prevent a re-load down the line.
6879 */
6880 static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
6881 {
6882 u32 *sq_array = ctx->sq_array;
6883 unsigned head;
6884
6885 /*
6886 * The cached sq head (or cq tail) serves two purposes:
6887 *
6888 * 1) allows us to batch the cost of updating the user visible
6889 * head updates.
6890 * 2) allows the kernel side to track the head on its own, even
6891 * though the application is the one updating it.
6892 */
6893 head = READ_ONCE(sq_array[ctx->cached_sq_head++ & ctx->sq_mask]);
6894 if (likely(head < ctx->sq_entries))
6895 return &ctx->sq_sqes[head];
6896
6897 /* drop invalid entries */
6898 ctx->cached_sq_dropped++;
6899 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
6900 return NULL;
6901 }
6902
6903 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
6904 {
6905 int submitted = 0;
6906
6907 /* if we have a backlog and couldn't flush it all, return BUSY */
6908 if (test_bit(0, &ctx->sq_check_overflow)) {
6909 if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
6910 return -EBUSY;
6911 }
6912
6913 /* make sure SQ entry isn't read before tail */
6914 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
6915
6916 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6917 return -EAGAIN;
6918
6919 percpu_counter_add(&current->io_uring->inflight, nr);
6920 refcount_add(nr, &current->usage);
6921 io_submit_state_start(&ctx->submit_state, nr);
6922
6923 while (submitted < nr) {
6924 const struct io_uring_sqe *sqe;
6925 struct io_kiocb *req;
6926
6927 req = io_alloc_req(ctx);
6928 if (unlikely(!req)) {
6929 if (!submitted)
6930 submitted = -EAGAIN;
6931 break;
6932 }
6933 sqe = io_get_sqe(ctx);
6934 if (unlikely(!sqe)) {
6935 kmem_cache_free(req_cachep, req);
6936 break;
6937 }
6938 /* will complete beyond this point, count as submitted */
6939 submitted++;
6940 if (io_submit_sqe(ctx, req, sqe))
6941 break;
6942 }
6943
6944 if (unlikely(submitted != nr)) {
6945 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
6946 struct io_uring_task *tctx = current->io_uring;
6947 int unused = nr - ref_used;
6948
6949 percpu_ref_put_many(&ctx->refs, unused);
6950 percpu_counter_sub(&tctx->inflight, unused);
6951 put_task_struct_many(current, unused);
6952 }
6953
6954 io_submit_state_end(&ctx->submit_state, ctx);
6955 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6956 io_commit_sqring(ctx);
6957
6958 return submitted;
6959 }
6960
6961 static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6962 {
6963 /* Tell userspace we may need a wakeup call */
6964 spin_lock_irq(&ctx->completion_lock);
6965 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6966 spin_unlock_irq(&ctx->completion_lock);
6967 }
6968
6969 static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6970 {
6971 spin_lock_irq(&ctx->completion_lock);
6972 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6973 spin_unlock_irq(&ctx->completion_lock);
6974 }
6975
6976 static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
6977 {
6978 unsigned int to_submit;
6979 int ret = 0;
6980
6981 to_submit = io_sqring_entries(ctx);
6982 /* if we're handling multiple rings, cap submit size for fairness */
6983 if (cap_entries && to_submit > 8)
6984 to_submit = 8;
6985
6986 if (!list_empty(&ctx->iopoll_list) || to_submit) {
6987 unsigned nr_events = 0;
6988
6989 mutex_lock(&ctx->uring_lock);
6990 if (!list_empty(&ctx->iopoll_list))
6991 io_do_iopoll(ctx, &nr_events, 0);
6992
6993 if (to_submit && !ctx->sqo_dead &&
6994 likely(!percpu_ref_is_dying(&ctx->refs)))
6995 ret = io_submit_sqes(ctx, to_submit);
6996 mutex_unlock(&ctx->uring_lock);
6997 }
6998
6999 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
7000 wake_up(&ctx->sqo_sq_wait);
7001
7002 return ret;
7003 }
7004
7005 static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
7006 {
7007 struct io_ring_ctx *ctx;
7008 unsigned sq_thread_idle = 0;
7009
7010 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
7011 if (sq_thread_idle < ctx->sq_thread_idle)
7012 sq_thread_idle = ctx->sq_thread_idle;
7013 }
7014
7015 sqd->sq_thread_idle = sq_thread_idle;
7016 }
7017
7018 static void io_sqd_init_new(struct io_sq_data *sqd)
7019 {
7020 struct io_ring_ctx *ctx;
7021
7022 while (!list_empty(&sqd->ctx_new_list)) {
7023 ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
7024 list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
7025 complete(&ctx->sq_thread_comp);
7026 }
7027
7028 io_sqd_update_thread_idle(sqd);
7029 }
7030
7031 static int io_sq_thread(void *data)
7032 {
7033 struct cgroup_subsys_state *cur_css = NULL;
7034 struct files_struct *old_files = current->files;
7035 struct nsproxy *old_nsproxy = current->nsproxy;
7036 const struct cred *old_cred = NULL;
7037 struct io_sq_data *sqd = data;
7038 struct io_ring_ctx *ctx;
7039 unsigned long timeout = 0;
7040 DEFINE_WAIT(wait);
7041
7042 task_lock(current);
7043 current->files = NULL;
7044 current->nsproxy = NULL;
7045 task_unlock(current);
7046
7047 while (!kthread_should_stop()) {
7048 int ret;
7049 bool cap_entries, sqt_spin, needs_sched;
7050
7051 /*
7052 * Any changes to the sqd lists are synchronized through the
7053 * kthread parking. This synchronizes the thread vs users,
7054 * the users are synchronized on the sqd->ctx_lock.
7055 */
7056 if (kthread_should_park()) {
7057 kthread_parkme();
7058 /*
7059 * When sq thread is unparked, in case the previous park operation
7060 * comes from io_put_sq_data(), which means that sq thread is going
7061 * to be stopped, so here needs to have a check.
7062 */
7063 if (kthread_should_stop())
7064 break;
7065 }
7066
7067 if (unlikely(!list_empty(&sqd->ctx_new_list))) {
7068 io_sqd_init_new(sqd);
7069 timeout = jiffies + sqd->sq_thread_idle;
7070 }
7071
7072 sqt_spin = false;
7073 cap_entries = !list_is_singular(&sqd->ctx_list);
7074 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
7075 if (current->cred != ctx->creds) {
7076 if (old_cred)
7077 revert_creds(old_cred);
7078 old_cred = override_creds(ctx->creds);
7079 }
7080 io_sq_thread_associate_blkcg(ctx, &cur_css);
7081 #ifdef CONFIG_AUDIT
7082 current->loginuid = ctx->loginuid;
7083 current->sessionid = ctx->sessionid;
7084 #endif
7085
7086 ret = __io_sq_thread(ctx, cap_entries);
7087 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
7088 sqt_spin = true;
7089
7090 io_sq_thread_drop_mm_files();
7091 }
7092
7093 if (sqt_spin || !time_after(jiffies, timeout)) {
7094 io_run_task_work();
7095 io_sq_thread_drop_mm_files();
7096 cond_resched();
7097 if (sqt_spin)
7098 timeout = jiffies + sqd->sq_thread_idle;
7099 continue;
7100 }
7101
7102 needs_sched = true;
7103 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
7104 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
7105 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
7106 !list_empty_careful(&ctx->iopoll_list)) {
7107 needs_sched = false;
7108 break;
7109 }
7110 if (io_sqring_entries(ctx)) {
7111 needs_sched = false;
7112 break;
7113 }
7114 }
7115
7116 if (needs_sched && !kthread_should_park()) {
7117 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7118 io_ring_set_wakeup_flag(ctx);
7119
7120 schedule();
7121 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7122 io_ring_clear_wakeup_flag(ctx);
7123 }
7124
7125 finish_wait(&sqd->wait, &wait);
7126 timeout = jiffies + sqd->sq_thread_idle;
7127 }
7128
7129 io_run_task_work();
7130 io_sq_thread_drop_mm_files();
7131
7132 if (cur_css)
7133 io_sq_thread_unassociate_blkcg();
7134 if (old_cred)
7135 revert_creds(old_cred);
7136
7137 task_lock(current);
7138 current->files = old_files;
7139 current->nsproxy = old_nsproxy;
7140 task_unlock(current);
7141
7142 kthread_parkme();
7143
7144 return 0;
7145 }
7146
7147 struct io_wait_queue {
7148 struct wait_queue_entry wq;
7149 struct io_ring_ctx *ctx;
7150 unsigned to_wait;
7151 unsigned nr_timeouts;
7152 };
7153
7154 static inline bool io_should_wake(struct io_wait_queue *iowq)
7155 {
7156 struct io_ring_ctx *ctx = iowq->ctx;
7157
7158 /*
7159 * Wake up if we have enough events, or if a timeout occurred since we
7160 * started waiting. For timeouts, we always want to return to userspace,
7161 * regardless of event count.
7162 */
7163 return io_cqring_events(ctx) >= iowq->to_wait ||
7164 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
7165 }
7166
7167 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7168 int wake_flags, void *key)
7169 {
7170 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7171 wq);
7172
7173 /*
7174 * Cannot safely flush overflowed CQEs from here, ensure we wake up
7175 * the task, and the next invocation will do it.
7176 */
7177 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
7178 return autoremove_wake_function(curr, mode, wake_flags, key);
7179 return -1;
7180 }
7181
7182 static int io_run_task_work_sig(void)
7183 {
7184 if (io_run_task_work())
7185 return 1;
7186 if (!signal_pending(current))
7187 return 0;
7188 if (test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))
7189 return -ERESTARTSYS;
7190 return -EINTR;
7191 }
7192
7193 /* when returns >0, the caller should retry */
7194 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
7195 struct io_wait_queue *iowq,
7196 signed long *timeout)
7197 {
7198 int ret;
7199
7200 /* make sure we run task_work before checking for signals */
7201 ret = io_run_task_work_sig();
7202 if (ret || io_should_wake(iowq))
7203 return ret;
7204 /* let the caller flush overflows, retry */
7205 if (test_bit(0, &ctx->cq_check_overflow))
7206 return 1;
7207
7208 *timeout = schedule_timeout(*timeout);
7209 return !*timeout ? -ETIME : 1;
7210 }
7211
7212 /*
7213 * Wait until events become available, if we don't already have some. The
7214 * application must reap them itself, as they reside on the shared cq ring.
7215 */
7216 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
7217 const sigset_t __user *sig, size_t sigsz,
7218 struct __kernel_timespec __user *uts)
7219 {
7220 struct io_wait_queue iowq = {
7221 .wq = {
7222 .private = current,
7223 .func = io_wake_function,
7224 .entry = LIST_HEAD_INIT(iowq.wq.entry),
7225 },
7226 .ctx = ctx,
7227 .to_wait = min_events,
7228 };
7229 struct io_rings *rings = ctx->rings;
7230 signed long timeout = MAX_SCHEDULE_TIMEOUT;
7231 int ret;
7232
7233 do {
7234 io_cqring_overflow_flush(ctx, false, NULL, NULL);
7235 if (io_cqring_events(ctx) >= min_events)
7236 return 0;
7237 if (!io_run_task_work())
7238 break;
7239 } while (1);
7240
7241 if (sig) {
7242 #ifdef CONFIG_COMPAT
7243 if (in_compat_syscall())
7244 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
7245 sigsz);
7246 else
7247 #endif
7248 ret = set_user_sigmask(sig, sigsz);
7249
7250 if (ret)
7251 return ret;
7252 }
7253
7254 if (uts) {
7255 struct timespec64 ts;
7256
7257 if (get_timespec64(&ts, uts))
7258 return -EFAULT;
7259 timeout = timespec64_to_jiffies(&ts);
7260 }
7261
7262 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
7263 trace_io_uring_cqring_wait(ctx, min_events);
7264 do {
7265 io_cqring_overflow_flush(ctx, false, NULL, NULL);
7266 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
7267 TASK_INTERRUPTIBLE);
7268 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
7269 finish_wait(&ctx->wait, &iowq.wq);
7270 } while (ret > 0);
7271
7272 restore_saved_sigmask_unless(ret == -EINTR);
7273
7274 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
7275 }
7276
7277 static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7278 {
7279 #if defined(CONFIG_UNIX)
7280 if (ctx->ring_sock) {
7281 struct sock *sock = ctx->ring_sock->sk;
7282 struct sk_buff *skb;
7283
7284 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7285 kfree_skb(skb);
7286 }
7287 #else
7288 int i;
7289
7290 for (i = 0; i < ctx->nr_user_files; i++) {
7291 struct file *file;
7292
7293 file = io_file_from_index(ctx, i);
7294 if (file)
7295 fput(file);
7296 }
7297 #endif
7298 }
7299
7300 static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
7301 {
7302 struct fixed_rsrc_data *data;
7303
7304 data = container_of(ref, struct fixed_rsrc_data, refs);
7305 complete(&data->done);
7306 }
7307
7308 static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
7309 {
7310 spin_lock_bh(&ctx->rsrc_ref_lock);
7311 }
7312
7313 static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
7314 {
7315 spin_unlock_bh(&ctx->rsrc_ref_lock);
7316 }
7317
7318 static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
7319 struct fixed_rsrc_data *rsrc_data,
7320 struct fixed_rsrc_ref_node *ref_node)
7321 {
7322 io_rsrc_ref_lock(ctx);
7323 rsrc_data->node = ref_node;
7324 list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
7325 io_rsrc_ref_unlock(ctx);
7326 percpu_ref_get(&rsrc_data->refs);
7327 }
7328
7329 static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
7330 struct io_ring_ctx *ctx,
7331 struct fixed_rsrc_ref_node *backup_node)
7332 {
7333 struct fixed_rsrc_ref_node *ref_node;
7334 int ret;
7335
7336 io_rsrc_ref_lock(ctx);
7337 ref_node = data->node;
7338 io_rsrc_ref_unlock(ctx);
7339 if (ref_node)
7340 percpu_ref_kill(&ref_node->refs);
7341
7342 percpu_ref_kill(&data->refs);
7343
7344 /* wait for all refs nodes to complete */
7345 flush_delayed_work(&ctx->rsrc_put_work);
7346 do {
7347 ret = wait_for_completion_interruptible(&data->done);
7348 if (!ret)
7349 break;
7350 ret = io_run_task_work_sig();
7351 if (ret < 0) {
7352 percpu_ref_resurrect(&data->refs);
7353 reinit_completion(&data->done);
7354 io_sqe_rsrc_set_node(ctx, data, backup_node);
7355 return ret;
7356 }
7357 } while (1);
7358
7359 destroy_fixed_rsrc_ref_node(backup_node);
7360 return 0;
7361 }
7362
7363 static struct fixed_rsrc_data *alloc_fixed_rsrc_data(struct io_ring_ctx *ctx)
7364 {
7365 struct fixed_rsrc_data *data;
7366
7367 data = kzalloc(sizeof(*data), GFP_KERNEL);
7368 if (!data)
7369 return NULL;
7370
7371 if (percpu_ref_init(&data->refs, io_rsrc_data_ref_zero,
7372 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
7373 kfree(data);
7374 return NULL;
7375 }
7376 data->ctx = ctx;
7377 init_completion(&data->done);
7378 return data;
7379 }
7380
7381 static void free_fixed_rsrc_data(struct fixed_rsrc_data *data)
7382 {
7383 percpu_ref_exit(&data->refs);
7384 kfree(data->table);
7385 kfree(data);
7386 }
7387
7388 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7389 {
7390 struct fixed_rsrc_data *data = ctx->file_data;
7391 struct fixed_rsrc_ref_node *backup_node;
7392 unsigned nr_tables, i;
7393 int ret;
7394
7395 if (!data)
7396 return -ENXIO;
7397 backup_node = alloc_fixed_rsrc_ref_node(ctx);
7398 if (!backup_node)
7399 return -ENOMEM;
7400 init_fixed_file_ref_node(ctx, backup_node);
7401
7402 ret = io_rsrc_ref_quiesce(data, ctx, backup_node);
7403 if (ret)
7404 return ret;
7405
7406 __io_sqe_files_unregister(ctx);
7407 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
7408 for (i = 0; i < nr_tables; i++)
7409 kfree(data->table[i].files);
7410 free_fixed_rsrc_data(data);
7411 ctx->file_data = NULL;
7412 ctx->nr_user_files = 0;
7413 return 0;
7414 }
7415
7416 static void io_put_sq_data(struct io_sq_data *sqd)
7417 {
7418 if (refcount_dec_and_test(&sqd->refs)) {
7419 /*
7420 * The park is a bit of a work-around, without it we get
7421 * warning spews on shutdown with SQPOLL set and affinity
7422 * set to a single CPU.
7423 */
7424 if (sqd->thread) {
7425 kthread_park(sqd->thread);
7426 kthread_stop(sqd->thread);
7427 }
7428
7429 kfree(sqd);
7430 }
7431 }
7432
7433 static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7434 {
7435 struct io_ring_ctx *ctx_attach;
7436 struct io_sq_data *sqd;
7437 struct fd f;
7438
7439 f = fdget(p->wq_fd);
7440 if (!f.file)
7441 return ERR_PTR(-ENXIO);
7442 if (f.file->f_op != &io_uring_fops) {
7443 fdput(f);
7444 return ERR_PTR(-EINVAL);
7445 }
7446
7447 ctx_attach = f.file->private_data;
7448 sqd = ctx_attach->sq_data;
7449 if (!sqd) {
7450 fdput(f);
7451 return ERR_PTR(-EINVAL);
7452 }
7453
7454 refcount_inc(&sqd->refs);
7455 fdput(f);
7456 return sqd;
7457 }
7458
7459 static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
7460 {
7461 struct io_sq_data *sqd;
7462
7463 if (p->flags & IORING_SETUP_ATTACH_WQ)
7464 return io_attach_sq_data(p);
7465
7466 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7467 if (!sqd)
7468 return ERR_PTR(-ENOMEM);
7469
7470 refcount_set(&sqd->refs, 1);
7471 INIT_LIST_HEAD(&sqd->ctx_list);
7472 INIT_LIST_HEAD(&sqd->ctx_new_list);
7473 mutex_init(&sqd->ctx_lock);
7474 mutex_init(&sqd->lock);
7475 init_waitqueue_head(&sqd->wait);
7476 return sqd;
7477 }
7478
7479 static void io_sq_thread_unpark(struct io_sq_data *sqd)
7480 __releases(&sqd->lock)
7481 {
7482 if (!sqd->thread)
7483 return;
7484 kthread_unpark(sqd->thread);
7485 mutex_unlock(&sqd->lock);
7486 }
7487
7488 static void io_sq_thread_park(struct io_sq_data *sqd)
7489 __acquires(&sqd->lock)
7490 {
7491 if (!sqd->thread)
7492 return;
7493 mutex_lock(&sqd->lock);
7494 kthread_park(sqd->thread);
7495 }
7496
7497 static void io_sq_thread_stop(struct io_ring_ctx *ctx)
7498 {
7499 struct io_sq_data *sqd = ctx->sq_data;
7500
7501 if (sqd) {
7502 if (sqd->thread) {
7503 /*
7504 * We may arrive here from the error branch in
7505 * io_sq_offload_create() where the kthread is created
7506 * without being waked up, thus wake it up now to make
7507 * sure the wait will complete.
7508 */
7509 wake_up_process(sqd->thread);
7510 wait_for_completion(&ctx->sq_thread_comp);
7511
7512 io_sq_thread_park(sqd);
7513 }
7514
7515 mutex_lock(&sqd->ctx_lock);
7516 list_del(&ctx->sqd_list);
7517 io_sqd_update_thread_idle(sqd);
7518 mutex_unlock(&sqd->ctx_lock);
7519
7520 if (sqd->thread)
7521 io_sq_thread_unpark(sqd);
7522
7523 io_put_sq_data(sqd);
7524 ctx->sq_data = NULL;
7525 }
7526 }
7527
7528 static void io_finish_async(struct io_ring_ctx *ctx)
7529 {
7530 io_sq_thread_stop(ctx);
7531
7532 if (ctx->io_wq) {
7533 io_wq_destroy(ctx->io_wq);
7534 ctx->io_wq = NULL;
7535 }
7536 }
7537
7538 #if defined(CONFIG_UNIX)
7539 /*
7540 * Ensure the UNIX gc is aware of our file set, so we are certain that
7541 * the io_uring can be safely unregistered on process exit, even if we have
7542 * loops in the file referencing.
7543 */
7544 static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7545 {
7546 struct sock *sk = ctx->ring_sock->sk;
7547 struct scm_fp_list *fpl;
7548 struct sk_buff *skb;
7549 int i, nr_files;
7550
7551 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7552 if (!fpl)
7553 return -ENOMEM;
7554
7555 skb = alloc_skb(0, GFP_KERNEL);
7556 if (!skb) {
7557 kfree(fpl);
7558 return -ENOMEM;
7559 }
7560
7561 skb->sk = sk;
7562
7563 nr_files = 0;
7564 fpl->user = get_uid(ctx->user);
7565 for (i = 0; i < nr; i++) {
7566 struct file *file = io_file_from_index(ctx, i + offset);
7567
7568 if (!file)
7569 continue;
7570 fpl->fp[nr_files] = get_file(file);
7571 unix_inflight(fpl->user, fpl->fp[nr_files]);
7572 nr_files++;
7573 }
7574
7575 if (nr_files) {
7576 fpl->max = SCM_MAX_FD;
7577 fpl->count = nr_files;
7578 UNIXCB(skb).fp = fpl;
7579 skb->destructor = unix_destruct_scm;
7580 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7581 skb_queue_head(&sk->sk_receive_queue, skb);
7582
7583 for (i = 0; i < nr_files; i++)
7584 fput(fpl->fp[i]);
7585 } else {
7586 kfree_skb(skb);
7587 kfree(fpl);
7588 }
7589
7590 return 0;
7591 }
7592
7593 /*
7594 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7595 * causes regular reference counting to break down. We rely on the UNIX
7596 * garbage collection to take care of this problem for us.
7597 */
7598 static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7599 {
7600 unsigned left, total;
7601 int ret = 0;
7602
7603 total = 0;
7604 left = ctx->nr_user_files;
7605 while (left) {
7606 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
7607
7608 ret = __io_sqe_files_scm(ctx, this_files, total);
7609 if (ret)
7610 break;
7611 left -= this_files;
7612 total += this_files;
7613 }
7614
7615 if (!ret)
7616 return 0;
7617
7618 while (total < ctx->nr_user_files) {
7619 struct file *file = io_file_from_index(ctx, total);
7620
7621 if (file)
7622 fput(file);
7623 total++;
7624 }
7625
7626 return ret;
7627 }
7628 #else
7629 static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7630 {
7631 return 0;
7632 }
7633 #endif
7634
7635 static int io_sqe_alloc_file_tables(struct fixed_rsrc_data *file_data,
7636 unsigned nr_tables, unsigned nr_files)
7637 {
7638 int i;
7639
7640 for (i = 0; i < nr_tables; i++) {
7641 struct fixed_rsrc_table *table = &file_data->table[i];
7642 unsigned this_files;
7643
7644 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7645 table->files = kcalloc(this_files, sizeof(struct file *),
7646 GFP_KERNEL);
7647 if (!table->files)
7648 break;
7649 nr_files -= this_files;
7650 }
7651
7652 if (i == nr_tables)
7653 return 0;
7654
7655 for (i = 0; i < nr_tables; i++) {
7656 struct fixed_rsrc_table *table = &file_data->table[i];
7657 kfree(table->files);
7658 }
7659 return 1;
7660 }
7661
7662 static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
7663 {
7664 struct file *file = prsrc->file;
7665 #if defined(CONFIG_UNIX)
7666 struct sock *sock = ctx->ring_sock->sk;
7667 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7668 struct sk_buff *skb;
7669 int i;
7670
7671 __skb_queue_head_init(&list);
7672
7673 /*
7674 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7675 * remove this entry and rearrange the file array.
7676 */
7677 skb = skb_dequeue(head);
7678 while (skb) {
7679 struct scm_fp_list *fp;
7680
7681 fp = UNIXCB(skb).fp;
7682 for (i = 0; i < fp->count; i++) {
7683 int left;
7684
7685 if (fp->fp[i] != file)
7686 continue;
7687
7688 unix_notinflight(fp->user, fp->fp[i]);
7689 left = fp->count - 1 - i;
7690 if (left) {
7691 memmove(&fp->fp[i], &fp->fp[i + 1],
7692 left * sizeof(struct file *));
7693 }
7694 fp->count--;
7695 if (!fp->count) {
7696 kfree_skb(skb);
7697 skb = NULL;
7698 } else {
7699 __skb_queue_tail(&list, skb);
7700 }
7701 fput(file);
7702 file = NULL;
7703 break;
7704 }
7705
7706 if (!file)
7707 break;
7708
7709 __skb_queue_tail(&list, skb);
7710
7711 skb = skb_dequeue(head);
7712 }
7713
7714 if (skb_peek(&list)) {
7715 spin_lock_irq(&head->lock);
7716 while ((skb = __skb_dequeue(&list)) != NULL)
7717 __skb_queue_tail(head, skb);
7718 spin_unlock_irq(&head->lock);
7719 }
7720 #else
7721 fput(file);
7722 #endif
7723 }
7724
7725 static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node)
7726 {
7727 struct fixed_rsrc_data *rsrc_data = ref_node->rsrc_data;
7728 struct io_ring_ctx *ctx = rsrc_data->ctx;
7729 struct io_rsrc_put *prsrc, *tmp;
7730
7731 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7732 list_del(&prsrc->list);
7733 ref_node->rsrc_put(ctx, prsrc);
7734 kfree(prsrc);
7735 }
7736
7737 percpu_ref_exit(&ref_node->refs);
7738 kfree(ref_node);
7739 percpu_ref_put(&rsrc_data->refs);
7740 }
7741
7742 static void io_rsrc_put_work(struct work_struct *work)
7743 {
7744 struct io_ring_ctx *ctx;
7745 struct llist_node *node;
7746
7747 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7748 node = llist_del_all(&ctx->rsrc_put_llist);
7749
7750 while (node) {
7751 struct fixed_rsrc_ref_node *ref_node;
7752 struct llist_node *next = node->next;
7753
7754 ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist);
7755 __io_rsrc_put_work(ref_node);
7756 node = next;
7757 }
7758 }
7759
7760 static struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
7761 unsigned i)
7762 {
7763 struct fixed_rsrc_table *table;
7764
7765 table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7766 return &table->files[i & IORING_FILE_TABLE_MASK];
7767 }
7768
7769 static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
7770 {
7771 struct fixed_rsrc_ref_node *ref_node;
7772 struct fixed_rsrc_data *data;
7773 struct io_ring_ctx *ctx;
7774 bool first_add = false;
7775 int delay = HZ;
7776
7777 ref_node = container_of(ref, struct fixed_rsrc_ref_node, refs);
7778 data = ref_node->rsrc_data;
7779 ctx = data->ctx;
7780
7781 io_rsrc_ref_lock(ctx);
7782 ref_node->done = true;
7783
7784 while (!list_empty(&ctx->rsrc_ref_list)) {
7785 ref_node = list_first_entry(&ctx->rsrc_ref_list,
7786 struct fixed_rsrc_ref_node, node);
7787 /* recycle ref nodes in order */
7788 if (!ref_node->done)
7789 break;
7790 list_del(&ref_node->node);
7791 first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
7792 }
7793 io_rsrc_ref_unlock(ctx);
7794
7795 if (percpu_ref_is_dying(&data->refs))
7796 delay = 0;
7797
7798 if (!delay)
7799 mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0);
7800 else if (first_add)
7801 queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
7802 }
7803
7804 static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
7805 struct io_ring_ctx *ctx)
7806 {
7807 struct fixed_rsrc_ref_node *ref_node;
7808
7809 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7810 if (!ref_node)
7811 return NULL;
7812
7813 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
7814 0, GFP_KERNEL)) {
7815 kfree(ref_node);
7816 return NULL;
7817 }
7818 INIT_LIST_HEAD(&ref_node->node);
7819 INIT_LIST_HEAD(&ref_node->rsrc_list);
7820 ref_node->done = false;
7821 return ref_node;
7822 }
7823
7824 static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
7825 struct fixed_rsrc_ref_node *ref_node)
7826 {
7827 ref_node->rsrc_data = ctx->file_data;
7828 ref_node->rsrc_put = io_ring_file_put;
7829 }
7830
7831 static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node)
7832 {
7833 percpu_ref_exit(&ref_node->refs);
7834 kfree(ref_node);
7835 }
7836
7837
7838 static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7839 unsigned nr_args)
7840 {
7841 __s32 __user *fds = (__s32 __user *) arg;
7842 unsigned nr_tables, i;
7843 struct file *file;
7844 int fd, ret = -ENOMEM;
7845 struct fixed_rsrc_ref_node *ref_node;
7846 struct fixed_rsrc_data *file_data;
7847
7848 if (ctx->file_data)
7849 return -EBUSY;
7850 if (!nr_args)
7851 return -EINVAL;
7852 if (nr_args > IORING_MAX_FIXED_FILES)
7853 return -EMFILE;
7854
7855 file_data = alloc_fixed_rsrc_data(ctx);
7856 if (!file_data)
7857 return -ENOMEM;
7858 ctx->file_data = file_data;
7859
7860 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
7861 file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
7862 GFP_KERNEL);
7863 if (!file_data->table)
7864 goto out_free;
7865
7866 if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
7867 goto out_free;
7868
7869 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
7870 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7871 ret = -EFAULT;
7872 goto out_fput;
7873 }
7874 /* allow sparse sets */
7875 if (fd == -1)
7876 continue;
7877
7878 file = fget(fd);
7879 ret = -EBADF;
7880 if (!file)
7881 goto out_fput;
7882
7883 /*
7884 * Don't allow io_uring instances to be registered. If UNIX
7885 * isn't enabled, then this causes a reference cycle and this
7886 * instance can never get freed. If UNIX is enabled we'll
7887 * handle it just fine, but there's still no point in allowing
7888 * a ring fd as it doesn't support regular read/write anyway.
7889 */
7890 if (file->f_op == &io_uring_fops) {
7891 fput(file);
7892 goto out_fput;
7893 }
7894 *io_fixed_file_slot(file_data, i) = file;
7895 }
7896
7897 ret = io_sqe_files_scm(ctx);
7898 if (ret) {
7899 io_sqe_files_unregister(ctx);
7900 return ret;
7901 }
7902
7903 ref_node = alloc_fixed_rsrc_ref_node(ctx);
7904 if (!ref_node) {
7905 io_sqe_files_unregister(ctx);
7906 return -ENOMEM;
7907 }
7908 init_fixed_file_ref_node(ctx, ref_node);
7909
7910 io_sqe_rsrc_set_node(ctx, file_data, ref_node);
7911 return ret;
7912 out_fput:
7913 for (i = 0; i < ctx->nr_user_files; i++) {
7914 file = io_file_from_index(ctx, i);
7915 if (file)
7916 fput(file);
7917 }
7918 for (i = 0; i < nr_tables; i++)
7919 kfree(file_data->table[i].files);
7920 ctx->nr_user_files = 0;
7921 out_free:
7922 free_fixed_rsrc_data(ctx->file_data);
7923 ctx->file_data = NULL;
7924 return ret;
7925 }
7926
7927 static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7928 int index)
7929 {
7930 #if defined(CONFIG_UNIX)
7931 struct sock *sock = ctx->ring_sock->sk;
7932 struct sk_buff_head *head = &sock->sk_receive_queue;
7933 struct sk_buff *skb;
7934
7935 /*
7936 * See if we can merge this file into an existing skb SCM_RIGHTS
7937 * file set. If there's no room, fall back to allocating a new skb
7938 * and filling it in.
7939 */
7940 spin_lock_irq(&head->lock);
7941 skb = skb_peek(head);
7942 if (skb) {
7943 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7944
7945 if (fpl->count < SCM_MAX_FD) {
7946 __skb_unlink(skb, head);
7947 spin_unlock_irq(&head->lock);
7948 fpl->fp[fpl->count] = get_file(file);
7949 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7950 fpl->count++;
7951 spin_lock_irq(&head->lock);
7952 __skb_queue_head(head, skb);
7953 } else {
7954 skb = NULL;
7955 }
7956 }
7957 spin_unlock_irq(&head->lock);
7958
7959 if (skb) {
7960 fput(file);
7961 return 0;
7962 }
7963
7964 return __io_sqe_files_scm(ctx, 1, index);
7965 #else
7966 return 0;
7967 #endif
7968 }
7969
7970 static int io_queue_rsrc_removal(struct fixed_rsrc_data *data, void *rsrc)
7971 {
7972 struct io_rsrc_put *prsrc;
7973 struct fixed_rsrc_ref_node *ref_node = data->node;
7974
7975 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7976 if (!prsrc)
7977 return -ENOMEM;
7978
7979 prsrc->rsrc = rsrc;
7980 list_add(&prsrc->list, &ref_node->rsrc_list);
7981
7982 return 0;
7983 }
7984
7985 static inline int io_queue_file_removal(struct fixed_rsrc_data *data,
7986 struct file *file)
7987 {
7988 return io_queue_rsrc_removal(data, (void *)file);
7989 }
7990
7991 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
7992 struct io_uring_rsrc_update *up,
7993 unsigned nr_args)
7994 {
7995 struct fixed_rsrc_data *data = ctx->file_data;
7996 struct fixed_rsrc_ref_node *ref_node;
7997 struct file *file, **file_slot;
7998 __s32 __user *fds;
7999 int fd, i, err;
8000 __u32 done;
8001 bool needs_switch = false;
8002
8003 if (check_add_overflow(up->offset, nr_args, &done))
8004 return -EOVERFLOW;
8005 if (done > ctx->nr_user_files)
8006 return -EINVAL;
8007
8008 ref_node = alloc_fixed_rsrc_ref_node(ctx);
8009 if (!ref_node)
8010 return -ENOMEM;
8011 init_fixed_file_ref_node(ctx, ref_node);
8012
8013 fds = u64_to_user_ptr(up->data);
8014 for (done = 0; done < nr_args; done++) {
8015 err = 0;
8016 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
8017 err = -EFAULT;
8018 break;
8019 }
8020 if (fd == IORING_REGISTER_FILES_SKIP)
8021 continue;
8022
8023 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
8024 file_slot = io_fixed_file_slot(ctx->file_data, i);
8025
8026 if (*file_slot) {
8027 err = io_queue_file_removal(data, *file_slot);
8028 if (err)
8029 break;
8030 *file_slot = NULL;
8031 needs_switch = true;
8032 }
8033 if (fd != -1) {
8034 file = fget(fd);
8035 if (!file) {
8036 err = -EBADF;
8037 break;
8038 }
8039 /*
8040 * Don't allow io_uring instances to be registered. If
8041 * UNIX isn't enabled, then this causes a reference
8042 * cycle and this instance can never get freed. If UNIX
8043 * is enabled we'll handle it just fine, but there's
8044 * still no point in allowing a ring fd as it doesn't
8045 * support regular read/write anyway.
8046 */
8047 if (file->f_op == &io_uring_fops) {
8048 fput(file);
8049 err = -EBADF;
8050 break;
8051 }
8052 *file_slot = file;
8053 err = io_sqe_file_register(ctx, file, i);
8054 if (err) {
8055 *file_slot = NULL;
8056 fput(file);
8057 break;
8058 }
8059 }
8060 }
8061
8062 if (needs_switch) {
8063 percpu_ref_kill(&data->node->refs);
8064 io_sqe_rsrc_set_node(ctx, data, ref_node);
8065 } else
8066 destroy_fixed_rsrc_ref_node(ref_node);
8067
8068 return done ? done : err;
8069 }
8070
8071 static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
8072 unsigned nr_args)
8073 {
8074 struct io_uring_rsrc_update up;
8075
8076 if (!ctx->file_data)
8077 return -ENXIO;
8078 if (!nr_args)
8079 return -EINVAL;
8080 if (copy_from_user(&up, arg, sizeof(up)))
8081 return -EFAULT;
8082 if (up.resv)
8083 return -EINVAL;
8084
8085 return __io_sqe_files_update(ctx, &up, nr_args);
8086 }
8087
8088 static struct io_wq_work *io_free_work(struct io_wq_work *work)
8089 {
8090 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8091
8092 req = io_put_req_find_next(req);
8093 return req ? &req->work : NULL;
8094 }
8095
8096 static int io_init_wq_offload(struct io_ring_ctx *ctx,
8097 struct io_uring_params *p)
8098 {
8099 struct io_wq_data data;
8100 struct fd f;
8101 struct io_ring_ctx *ctx_attach;
8102 unsigned int concurrency;
8103 int ret = 0;
8104
8105 data.user = ctx->user;
8106 data.free_work = io_free_work;
8107 data.do_work = io_wq_submit_work;
8108
8109 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
8110 /* Do QD, or 4 * CPUS, whatever is smallest */
8111 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
8112
8113 ctx->io_wq = io_wq_create(concurrency, &data);
8114 if (IS_ERR(ctx->io_wq)) {
8115 ret = PTR_ERR(ctx->io_wq);
8116 ctx->io_wq = NULL;
8117 }
8118 return ret;
8119 }
8120
8121 f = fdget(p->wq_fd);
8122 if (!f.file)
8123 return -EBADF;
8124
8125 if (f.file->f_op != &io_uring_fops) {
8126 ret = -EINVAL;
8127 goto out_fput;
8128 }
8129
8130 ctx_attach = f.file->private_data;
8131 /* @io_wq is protected by holding the fd */
8132 if (!io_wq_get(ctx_attach->io_wq, &data)) {
8133 ret = -EINVAL;
8134 goto out_fput;
8135 }
8136
8137 ctx->io_wq = ctx_attach->io_wq;
8138 out_fput:
8139 fdput(f);
8140 return ret;
8141 }
8142
8143 static int io_uring_alloc_task_context(struct task_struct *task)
8144 {
8145 struct io_uring_task *tctx;
8146 int ret;
8147
8148 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
8149 if (unlikely(!tctx))
8150 return -ENOMEM;
8151
8152 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
8153 if (unlikely(ret)) {
8154 kfree(tctx);
8155 return ret;
8156 }
8157
8158 xa_init(&tctx->xa);
8159 init_waitqueue_head(&tctx->wait);
8160 tctx->last = NULL;
8161 atomic_set(&tctx->in_idle, 0);
8162 tctx->sqpoll = false;
8163 io_init_identity(&tctx->__identity);
8164 tctx->identity = &tctx->__identity;
8165 task->io_uring = tctx;
8166 spin_lock_init(&tctx->task_lock);
8167 INIT_WQ_LIST(&tctx->task_list);
8168 tctx->task_state = 0;
8169 init_task_work(&tctx->task_work, tctx_task_work);
8170 return 0;
8171 }
8172
8173 void __io_uring_free(struct task_struct *tsk)
8174 {
8175 struct io_uring_task *tctx = tsk->io_uring;
8176
8177 WARN_ON_ONCE(!xa_empty(&tctx->xa));
8178 WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
8179 if (tctx->identity != &tctx->__identity)
8180 kfree(tctx->identity);
8181 percpu_counter_destroy(&tctx->inflight);
8182 kfree(tctx);
8183 tsk->io_uring = NULL;
8184 }
8185
8186 static int io_sq_offload_create(struct io_ring_ctx *ctx,
8187 struct io_uring_params *p)
8188 {
8189 int ret;
8190
8191 if (ctx->flags & IORING_SETUP_SQPOLL) {
8192 struct io_sq_data *sqd;
8193
8194 ret = -EPERM;
8195 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
8196 goto err;
8197
8198 sqd = io_get_sq_data(p);
8199 if (IS_ERR(sqd)) {
8200 ret = PTR_ERR(sqd);
8201 goto err;
8202 }
8203
8204 ctx->sq_data = sqd;
8205 io_sq_thread_park(sqd);
8206 mutex_lock(&sqd->ctx_lock);
8207 list_add(&ctx->sqd_list, &sqd->ctx_new_list);
8208 mutex_unlock(&sqd->ctx_lock);
8209 io_sq_thread_unpark(sqd);
8210
8211 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8212 if (!ctx->sq_thread_idle)
8213 ctx->sq_thread_idle = HZ;
8214
8215 if (sqd->thread)
8216 goto done;
8217
8218 if (p->flags & IORING_SETUP_SQ_AFF) {
8219 int cpu = p->sq_thread_cpu;
8220
8221 ret = -EINVAL;
8222 if (cpu >= nr_cpu_ids)
8223 goto err;
8224 if (!cpu_online(cpu))
8225 goto err;
8226
8227 sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd,
8228 cpu, "io_uring-sq");
8229 } else {
8230 sqd->thread = kthread_create(io_sq_thread, sqd,
8231 "io_uring-sq");
8232 }
8233 if (IS_ERR(sqd->thread)) {
8234 ret = PTR_ERR(sqd->thread);
8235 sqd->thread = NULL;
8236 goto err;
8237 }
8238 ret = io_uring_alloc_task_context(sqd->thread);
8239 if (ret)
8240 goto err;
8241 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8242 /* Can't have SQ_AFF without SQPOLL */
8243 ret = -EINVAL;
8244 goto err;
8245 }
8246
8247 done:
8248 ret = io_init_wq_offload(ctx, p);
8249 if (ret)
8250 goto err;
8251
8252 return 0;
8253 err:
8254 io_finish_async(ctx);
8255 return ret;
8256 }
8257
8258 static void io_sq_offload_start(struct io_ring_ctx *ctx)
8259 {
8260 struct io_sq_data *sqd = ctx->sq_data;
8261
8262 if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread)
8263 wake_up_process(sqd->thread);
8264 }
8265
8266 static inline void __io_unaccount_mem(struct user_struct *user,
8267 unsigned long nr_pages)
8268 {
8269 atomic_long_sub(nr_pages, &user->locked_vm);
8270 }
8271
8272 static inline int __io_account_mem(struct user_struct *user,
8273 unsigned long nr_pages)
8274 {
8275 unsigned long page_limit, cur_pages, new_pages;
8276
8277 /* Don't allow more pages than we can safely lock */
8278 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8279
8280 do {
8281 cur_pages = atomic_long_read(&user->locked_vm);
8282 new_pages = cur_pages + nr_pages;
8283 if (new_pages > page_limit)
8284 return -ENOMEM;
8285 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8286 new_pages) != cur_pages);
8287
8288 return 0;
8289 }
8290
8291 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
8292 {
8293 if (ctx->limit_mem)
8294 __io_unaccount_mem(ctx->user, nr_pages);
8295
8296 if (ctx->mm_account)
8297 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
8298 }
8299
8300 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
8301 {
8302 int ret;
8303
8304 if (ctx->limit_mem) {
8305 ret = __io_account_mem(ctx->user, nr_pages);
8306 if (ret)
8307 return ret;
8308 }
8309
8310 if (ctx->mm_account)
8311 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
8312
8313 return 0;
8314 }
8315
8316 static void io_mem_free(void *ptr)
8317 {
8318 struct page *page;
8319
8320 if (!ptr)
8321 return;
8322
8323 page = virt_to_head_page(ptr);
8324 if (put_page_testzero(page))
8325 free_compound_page(page);
8326 }
8327
8328 static void *io_mem_alloc(size_t size)
8329 {
8330 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
8331 __GFP_NORETRY | __GFP_ACCOUNT;
8332
8333 return (void *) __get_free_pages(gfp_flags, get_order(size));
8334 }
8335
8336 static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8337 size_t *sq_offset)
8338 {
8339 struct io_rings *rings;
8340 size_t off, sq_array_size;
8341
8342 off = struct_size(rings, cqes, cq_entries);
8343 if (off == SIZE_MAX)
8344 return SIZE_MAX;
8345
8346 #ifdef CONFIG_SMP
8347 off = ALIGN(off, SMP_CACHE_BYTES);
8348 if (off == 0)
8349 return SIZE_MAX;
8350 #endif
8351
8352 if (sq_offset)
8353 *sq_offset = off;
8354
8355 sq_array_size = array_size(sizeof(u32), sq_entries);
8356 if (sq_array_size == SIZE_MAX)
8357 return SIZE_MAX;
8358
8359 if (check_add_overflow(off, sq_array_size, &off))
8360 return SIZE_MAX;
8361
8362 return off;
8363 }
8364
8365 static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8366 {
8367 int i, j;
8368
8369 if (!ctx->user_bufs)
8370 return -ENXIO;
8371
8372 for (i = 0; i < ctx->nr_user_bufs; i++) {
8373 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8374
8375 for (j = 0; j < imu->nr_bvecs; j++)
8376 unpin_user_page(imu->bvec[j].bv_page);
8377
8378 if (imu->acct_pages)
8379 io_unaccount_mem(ctx, imu->acct_pages);
8380 kvfree(imu->bvec);
8381 imu->nr_bvecs = 0;
8382 }
8383
8384 kfree(ctx->user_bufs);
8385 ctx->user_bufs = NULL;
8386 ctx->nr_user_bufs = 0;
8387 return 0;
8388 }
8389
8390 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8391 void __user *arg, unsigned index)
8392 {
8393 struct iovec __user *src;
8394
8395 #ifdef CONFIG_COMPAT
8396 if (ctx->compat) {
8397 struct compat_iovec __user *ciovs;
8398 struct compat_iovec ciov;
8399
8400 ciovs = (struct compat_iovec __user *) arg;
8401 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8402 return -EFAULT;
8403
8404 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
8405 dst->iov_len = ciov.iov_len;
8406 return 0;
8407 }
8408 #endif
8409 src = (struct iovec __user *) arg;
8410 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8411 return -EFAULT;
8412 return 0;
8413 }
8414
8415 /*
8416 * Not super efficient, but this is just a registration time. And we do cache
8417 * the last compound head, so generally we'll only do a full search if we don't
8418 * match that one.
8419 *
8420 * We check if the given compound head page has already been accounted, to
8421 * avoid double accounting it. This allows us to account the full size of the
8422 * page, not just the constituent pages of a huge page.
8423 */
8424 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8425 int nr_pages, struct page *hpage)
8426 {
8427 int i, j;
8428
8429 /* check current page array */
8430 for (i = 0; i < nr_pages; i++) {
8431 if (!PageCompound(pages[i]))
8432 continue;
8433 if (compound_head(pages[i]) == hpage)
8434 return true;
8435 }
8436
8437 /* check previously registered pages */
8438 for (i = 0; i < ctx->nr_user_bufs; i++) {
8439 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8440
8441 for (j = 0; j < imu->nr_bvecs; j++) {
8442 if (!PageCompound(imu->bvec[j].bv_page))
8443 continue;
8444 if (compound_head(imu->bvec[j].bv_page) == hpage)
8445 return true;
8446 }
8447 }
8448
8449 return false;
8450 }
8451
8452 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8453 int nr_pages, struct io_mapped_ubuf *imu,
8454 struct page **last_hpage)
8455 {
8456 int i, ret;
8457
8458 for (i = 0; i < nr_pages; i++) {
8459 if (!PageCompound(pages[i])) {
8460 imu->acct_pages++;
8461 } else {
8462 struct page *hpage;
8463
8464 hpage = compound_head(pages[i]);
8465 if (hpage == *last_hpage)
8466 continue;
8467 *last_hpage = hpage;
8468 if (headpage_already_acct(ctx, pages, i, hpage))
8469 continue;
8470 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8471 }
8472 }
8473
8474 if (!imu->acct_pages)
8475 return 0;
8476
8477 ret = io_account_mem(ctx, imu->acct_pages);
8478 if (ret)
8479 imu->acct_pages = 0;
8480 return ret;
8481 }
8482
8483 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
8484 struct io_mapped_ubuf *imu,
8485 struct page **last_hpage)
8486 {
8487 struct vm_area_struct **vmas = NULL;
8488 struct page **pages = NULL;
8489 unsigned long off, start, end, ubuf;
8490 size_t size;
8491 int ret, pret, nr_pages, i;
8492
8493 ubuf = (unsigned long) iov->iov_base;
8494 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8495 start = ubuf >> PAGE_SHIFT;
8496 nr_pages = end - start;
8497
8498 ret = -ENOMEM;
8499
8500 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8501 if (!pages)
8502 goto done;
8503
8504 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8505 GFP_KERNEL);
8506 if (!vmas)
8507 goto done;
8508
8509 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
8510 GFP_KERNEL);
8511 if (!imu->bvec)
8512 goto done;
8513
8514 ret = 0;
8515 mmap_read_lock(current->mm);
8516 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8517 pages, vmas);
8518 if (pret == nr_pages) {
8519 /* don't support file backed memory */
8520 for (i = 0; i < nr_pages; i++) {
8521 struct vm_area_struct *vma = vmas[i];
8522
8523 if (vma->vm_file &&
8524 !is_file_hugepages(vma->vm_file)) {
8525 ret = -EOPNOTSUPP;
8526 break;
8527 }
8528 }
8529 } else {
8530 ret = pret < 0 ? pret : -EFAULT;
8531 }
8532 mmap_read_unlock(current->mm);
8533 if (ret) {
8534 /*
8535 * if we did partial map, or found file backed vmas,
8536 * release any pages we did get
8537 */
8538 if (pret > 0)
8539 unpin_user_pages(pages, pret);
8540 kvfree(imu->bvec);
8541 goto done;
8542 }
8543
8544 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8545 if (ret) {
8546 unpin_user_pages(pages, pret);
8547 kvfree(imu->bvec);
8548 goto done;
8549 }
8550
8551 off = ubuf & ~PAGE_MASK;
8552 size = iov->iov_len;
8553 for (i = 0; i < nr_pages; i++) {
8554 size_t vec_len;
8555
8556 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8557 imu->bvec[i].bv_page = pages[i];
8558 imu->bvec[i].bv_len = vec_len;
8559 imu->bvec[i].bv_offset = off;
8560 off = 0;
8561 size -= vec_len;
8562 }
8563 /* store original address for later verification */
8564 imu->ubuf = ubuf;
8565 imu->len = iov->iov_len;
8566 imu->nr_bvecs = nr_pages;
8567 ret = 0;
8568 done:
8569 kvfree(pages);
8570 kvfree(vmas);
8571 return ret;
8572 }
8573
8574 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
8575 {
8576 if (ctx->user_bufs)
8577 return -EBUSY;
8578 if (!nr_args || nr_args > UIO_MAXIOV)
8579 return -EINVAL;
8580
8581 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8582 GFP_KERNEL);
8583 if (!ctx->user_bufs)
8584 return -ENOMEM;
8585
8586 return 0;
8587 }
8588
8589 static int io_buffer_validate(struct iovec *iov)
8590 {
8591 /*
8592 * Don't impose further limits on the size and buffer
8593 * constraints here, we'll -EINVAL later when IO is
8594 * submitted if they are wrong.
8595 */
8596 if (!iov->iov_base || !iov->iov_len)
8597 return -EFAULT;
8598
8599 /* arbitrary limit, but we need something */
8600 if (iov->iov_len > SZ_1G)
8601 return -EFAULT;
8602
8603 return 0;
8604 }
8605
8606 static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8607 unsigned int nr_args)
8608 {
8609 int i, ret;
8610 struct iovec iov;
8611 struct page *last_hpage = NULL;
8612
8613 ret = io_buffers_map_alloc(ctx, nr_args);
8614 if (ret)
8615 return ret;
8616
8617 for (i = 0; i < nr_args; i++) {
8618 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8619
8620 ret = io_copy_iov(ctx, &iov, arg, i);
8621 if (ret)
8622 break;
8623
8624 ret = io_buffer_validate(&iov);
8625 if (ret)
8626 break;
8627
8628 ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
8629 if (ret)
8630 break;
8631
8632 ctx->nr_user_bufs++;
8633 }
8634
8635 if (ret)
8636 io_sqe_buffers_unregister(ctx);
8637
8638 return ret;
8639 }
8640
8641 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8642 {
8643 __s32 __user *fds = arg;
8644 int fd;
8645
8646 if (ctx->cq_ev_fd)
8647 return -EBUSY;
8648
8649 if (copy_from_user(&fd, fds, sizeof(*fds)))
8650 return -EFAULT;
8651
8652 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8653 if (IS_ERR(ctx->cq_ev_fd)) {
8654 int ret = PTR_ERR(ctx->cq_ev_fd);
8655 ctx->cq_ev_fd = NULL;
8656 return ret;
8657 }
8658
8659 return 0;
8660 }
8661
8662 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8663 {
8664 if (ctx->cq_ev_fd) {
8665 eventfd_ctx_put(ctx->cq_ev_fd);
8666 ctx->cq_ev_fd = NULL;
8667 return 0;
8668 }
8669
8670 return -ENXIO;
8671 }
8672
8673 static int __io_destroy_buffers(int id, void *p, void *data)
8674 {
8675 struct io_ring_ctx *ctx = data;
8676 struct io_buffer *buf = p;
8677
8678 __io_remove_buffers(ctx, buf, id, -1U);
8679 return 0;
8680 }
8681
8682 static void io_destroy_buffers(struct io_ring_ctx *ctx)
8683 {
8684 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
8685 idr_destroy(&ctx->io_buffer_idr);
8686 }
8687
8688 static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
8689 {
8690 struct io_kiocb *req, *nxt;
8691
8692 list_for_each_entry_safe(req, nxt, list, compl.list) {
8693 if (tsk && req->task != tsk)
8694 continue;
8695 list_del(&req->compl.list);
8696 kmem_cache_free(req_cachep, req);
8697 }
8698 }
8699
8700 static void io_req_caches_free(struct io_ring_ctx *ctx, struct task_struct *tsk)
8701 {
8702 struct io_submit_state *submit_state = &ctx->submit_state;
8703
8704 mutex_lock(&ctx->uring_lock);
8705
8706 if (submit_state->free_reqs)
8707 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8708 submit_state->reqs);
8709
8710 io_req_cache_free(&submit_state->comp.free_list, NULL);
8711
8712 spin_lock_irq(&ctx->completion_lock);
8713 io_req_cache_free(&submit_state->comp.locked_free_list, NULL);
8714 spin_unlock_irq(&ctx->completion_lock);
8715
8716 mutex_unlock(&ctx->uring_lock);
8717 }
8718
8719 static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8720 {
8721 /*
8722 * Some may use context even when all refs and requests have been put,
8723 * and they are free to do so while still holding uring_lock, see
8724 * __io_req_task_submit(). Wait for them to finish.
8725 */
8726 mutex_lock(&ctx->uring_lock);
8727 mutex_unlock(&ctx->uring_lock);
8728
8729 io_finish_async(ctx);
8730 io_sqe_buffers_unregister(ctx);
8731
8732 if (ctx->sqo_task) {
8733 put_task_struct(ctx->sqo_task);
8734 ctx->sqo_task = NULL;
8735 mmdrop(ctx->mm_account);
8736 ctx->mm_account = NULL;
8737 }
8738
8739 #ifdef CONFIG_BLK_CGROUP
8740 if (ctx->sqo_blkcg_css)
8741 css_put(ctx->sqo_blkcg_css);
8742 #endif
8743
8744 io_sqe_files_unregister(ctx);
8745 io_eventfd_unregister(ctx);
8746 io_destroy_buffers(ctx);
8747 idr_destroy(&ctx->personality_idr);
8748
8749 #if defined(CONFIG_UNIX)
8750 if (ctx->ring_sock) {
8751 ctx->ring_sock->file = NULL; /* so that iput() is called */
8752 sock_release(ctx->ring_sock);
8753 }
8754 #endif
8755
8756 io_mem_free(ctx->rings);
8757 io_mem_free(ctx->sq_sqes);
8758
8759 percpu_ref_exit(&ctx->refs);
8760 free_uid(ctx->user);
8761 put_cred(ctx->creds);
8762 io_req_caches_free(ctx, NULL);
8763 kfree(ctx->cancel_hash);
8764 kfree(ctx);
8765 }
8766
8767 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8768 {
8769 struct io_ring_ctx *ctx = file->private_data;
8770 __poll_t mask = 0;
8771
8772 poll_wait(file, &ctx->cq_wait, wait);
8773 /*
8774 * synchronizes with barrier from wq_has_sleeper call in
8775 * io_commit_cqring
8776 */
8777 smp_rmb();
8778 if (!io_sqring_full(ctx))
8779 mask |= EPOLLOUT | EPOLLWRNORM;
8780
8781 /*
8782 * Don't flush cqring overflow list here, just do a simple check.
8783 * Otherwise there could possible be ABBA deadlock:
8784 * CPU0 CPU1
8785 * ---- ----
8786 * lock(&ctx->uring_lock);
8787 * lock(&ep->mtx);
8788 * lock(&ctx->uring_lock);
8789 * lock(&ep->mtx);
8790 *
8791 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8792 * pushs them to do the flush.
8793 */
8794 if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
8795 mask |= EPOLLIN | EPOLLRDNORM;
8796
8797 return mask;
8798 }
8799
8800 static int io_uring_fasync(int fd, struct file *file, int on)
8801 {
8802 struct io_ring_ctx *ctx = file->private_data;
8803
8804 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8805 }
8806
8807 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
8808 {
8809 struct io_identity *iod;
8810
8811 iod = idr_remove(&ctx->personality_idr, id);
8812 if (iod) {
8813 put_cred(iod->creds);
8814 if (refcount_dec_and_test(&iod->count))
8815 kfree(iod);
8816 return 0;
8817 }
8818
8819 return -EINVAL;
8820 }
8821
8822 static int io_remove_personalities(int id, void *p, void *data)
8823 {
8824 struct io_ring_ctx *ctx = data;
8825
8826 io_unregister_personality(ctx, id);
8827 return 0;
8828 }
8829
8830 static void io_ring_exit_work(struct work_struct *work)
8831 {
8832 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
8833 exit_work);
8834
8835 /*
8836 * If we're doing polled IO and end up having requests being
8837 * submitted async (out-of-line), then completions can come in while
8838 * we're waiting for refs to drop. We need to reap these manually,
8839 * as nobody else will be looking for them.
8840 */
8841 do {
8842 io_uring_try_cancel_requests(ctx, NULL, NULL);
8843 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
8844 io_ring_ctx_free(ctx);
8845 }
8846
8847 static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8848 {
8849 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8850
8851 return req->ctx == data;
8852 }
8853
8854 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8855 {
8856 mutex_lock(&ctx->uring_lock);
8857 percpu_ref_kill(&ctx->refs);
8858
8859 if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
8860 ctx->sqo_dead = 1;
8861
8862 /* if force is set, the ring is going away. always drop after that */
8863 ctx->cq_overflow_flushed = 1;
8864 if (ctx->rings)
8865 __io_cqring_overflow_flush(ctx, true, NULL, NULL);
8866 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
8867 mutex_unlock(&ctx->uring_lock);
8868
8869 io_kill_timeouts(ctx, NULL, NULL);
8870 io_poll_remove_all(ctx, NULL, NULL);
8871
8872 if (ctx->io_wq)
8873 io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true);
8874
8875 /* if we failed setting up the ctx, we might not have any rings */
8876 io_iopoll_try_reap_events(ctx);
8877
8878 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
8879 /*
8880 * Use system_unbound_wq to avoid spawning tons of event kworkers
8881 * if we're exiting a ton of rings at the same time. It just adds
8882 * noise and overhead, there's no discernable change in runtime
8883 * over using system_wq.
8884 */
8885 queue_work(system_unbound_wq, &ctx->exit_work);
8886 }
8887
8888 static int io_uring_release(struct inode *inode, struct file *file)
8889 {
8890 struct io_ring_ctx *ctx = file->private_data;
8891
8892 file->private_data = NULL;
8893 io_ring_ctx_wait_and_kill(ctx);
8894 return 0;
8895 }
8896
8897 struct io_task_cancel {
8898 struct task_struct *task;
8899 struct files_struct *files;
8900 };
8901
8902 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
8903 {
8904 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8905 struct io_task_cancel *cancel = data;
8906 bool ret;
8907
8908 if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
8909 unsigned long flags;
8910 struct io_ring_ctx *ctx = req->ctx;
8911
8912 /* protect against races with linked timeouts */
8913 spin_lock_irqsave(&ctx->completion_lock, flags);
8914 ret = io_match_task(req, cancel->task, cancel->files);
8915 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8916 } else {
8917 ret = io_match_task(req, cancel->task, cancel->files);
8918 }
8919 return ret;
8920 }
8921
8922 static void io_cancel_defer_files(struct io_ring_ctx *ctx,
8923 struct task_struct *task,
8924 struct files_struct *files)
8925 {
8926 struct io_defer_entry *de = NULL;
8927 LIST_HEAD(list);
8928
8929 spin_lock_irq(&ctx->completion_lock);
8930 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
8931 if (io_match_task(de->req, task, files)) {
8932 list_cut_position(&list, &ctx->defer_list, &de->list);
8933 break;
8934 }
8935 }
8936 spin_unlock_irq(&ctx->completion_lock);
8937
8938 while (!list_empty(&list)) {
8939 de = list_first_entry(&list, struct io_defer_entry, list);
8940 list_del_init(&de->list);
8941 req_set_fail_links(de->req);
8942 io_put_req(de->req);
8943 io_req_complete(de->req, -ECANCELED);
8944 kfree(de);
8945 }
8946 }
8947
8948 static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
8949 struct task_struct *task,
8950 struct files_struct *files)
8951 {
8952 struct io_task_cancel cancel = { .task = task, .files = files, };
8953
8954 while (1) {
8955 enum io_wq_cancel cret;
8956 bool ret = false;
8957
8958 if (ctx->io_wq) {
8959 cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
8960 &cancel, true);
8961 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8962 }
8963
8964 /* SQPOLL thread does its own polling */
8965 if (!(ctx->flags & IORING_SETUP_SQPOLL) && !files) {
8966 while (!list_empty_careful(&ctx->iopoll_list)) {
8967 io_iopoll_try_reap_events(ctx);
8968 ret = true;
8969 }
8970 }
8971
8972 ret |= io_poll_remove_all(ctx, task, files);
8973 ret |= io_kill_timeouts(ctx, task, files);
8974 ret |= io_run_task_work();
8975 io_cqring_overflow_flush(ctx, true, task, files);
8976 if (!ret)
8977 break;
8978 cond_resched();
8979 }
8980 }
8981
8982 static int io_uring_count_inflight(struct io_ring_ctx *ctx,
8983 struct task_struct *task,
8984 struct files_struct *files)
8985 {
8986 struct io_kiocb *req;
8987 int cnt = 0;
8988
8989 spin_lock_irq(&ctx->inflight_lock);
8990 list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
8991 cnt += io_match_task(req, task, files);
8992 spin_unlock_irq(&ctx->inflight_lock);
8993 return cnt;
8994 }
8995
8996 static void io_uring_cancel_files(struct io_ring_ctx *ctx,
8997 struct task_struct *task,
8998 struct files_struct *files)
8999 {
9000 while (!list_empty_careful(&ctx->inflight_list)) {
9001 DEFINE_WAIT(wait);
9002 int inflight;
9003
9004 inflight = io_uring_count_inflight(ctx, task, files);
9005 if (!inflight)
9006 break;
9007
9008 io_uring_try_cancel_requests(ctx, task, files);
9009
9010 if (ctx->sq_data)
9011 io_sq_thread_unpark(ctx->sq_data);
9012 prepare_to_wait(&task->io_uring->wait, &wait,
9013 TASK_UNINTERRUPTIBLE);
9014 if (inflight == io_uring_count_inflight(ctx, task, files))
9015 schedule();
9016 finish_wait(&task->io_uring->wait, &wait);
9017 if (ctx->sq_data)
9018 io_sq_thread_park(ctx->sq_data);
9019 }
9020 }
9021
9022 static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
9023 {
9024 mutex_lock(&ctx->uring_lock);
9025 ctx->sqo_dead = 1;
9026 mutex_unlock(&ctx->uring_lock);
9027
9028 /* make sure callers enter the ring to get error */
9029 if (ctx->rings)
9030 io_ring_set_wakeup_flag(ctx);
9031 }
9032
9033 /*
9034 * We need to iteratively cancel requests, in case a request has dependent
9035 * hard links. These persist even for failure of cancelations, hence keep
9036 * looping until none are found.
9037 */
9038 static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
9039 struct files_struct *files)
9040 {
9041 struct task_struct *task = current;
9042
9043 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
9044 io_disable_sqo_submit(ctx);
9045 task = ctx->sq_data->thread;
9046 atomic_inc(&task->io_uring->in_idle);
9047 io_sq_thread_park(ctx->sq_data);
9048 }
9049
9050 io_cancel_defer_files(ctx, task, files);
9051
9052 io_uring_cancel_files(ctx, task, files);
9053 if (!files)
9054 io_uring_try_cancel_requests(ctx, task, NULL);
9055
9056 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
9057 atomic_dec(&task->io_uring->in_idle);
9058 /*
9059 * If the files that are going away are the ones in the thread
9060 * identity, clear them out.
9061 */
9062 if (task->io_uring->identity->files == files)
9063 task->io_uring->identity->files = NULL;
9064 io_sq_thread_unpark(ctx->sq_data);
9065 }
9066 }
9067
9068 /*
9069 * Note that this task has used io_uring. We use it for cancelation purposes.
9070 */
9071 static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
9072 {
9073 struct io_uring_task *tctx = current->io_uring;
9074 int ret;
9075
9076 if (unlikely(!tctx)) {
9077 ret = io_uring_alloc_task_context(current);
9078 if (unlikely(ret))
9079 return ret;
9080 tctx = current->io_uring;
9081 }
9082 if (tctx->last != file) {
9083 void *old = xa_load(&tctx->xa, (unsigned long)file);
9084
9085 if (!old) {
9086 get_file(file);
9087 ret = xa_err(xa_store(&tctx->xa, (unsigned long)file,
9088 file, GFP_KERNEL));
9089 if (ret) {
9090 fput(file);
9091 return ret;
9092 }
9093
9094 /* one and only SQPOLL file note, held by sqo_task */
9095 WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) &&
9096 current != ctx->sqo_task);
9097 }
9098 tctx->last = file;
9099 }
9100
9101 /*
9102 * This is race safe in that the task itself is doing this, hence it
9103 * cannot be going through the exit/cancel paths at the same time.
9104 * This cannot be modified while exit/cancel is running.
9105 */
9106 if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
9107 tctx->sqpoll = true;
9108
9109 return 0;
9110 }
9111
9112 /*
9113 * Remove this io_uring_file -> task mapping.
9114 */
9115 static void io_uring_del_task_file(struct file *file)
9116 {
9117 struct io_uring_task *tctx = current->io_uring;
9118
9119 if (tctx->last == file)
9120 tctx->last = NULL;
9121 file = xa_erase(&tctx->xa, (unsigned long)file);
9122 if (file)
9123 fput(file);
9124 }
9125
9126 static void io_uring_remove_task_files(struct io_uring_task *tctx)
9127 {
9128 struct file *file;
9129 unsigned long index;
9130
9131 xa_for_each(&tctx->xa, index, file)
9132 io_uring_del_task_file(file);
9133 }
9134
9135 void __io_uring_files_cancel(struct files_struct *files)
9136 {
9137 struct io_uring_task *tctx = current->io_uring;
9138 struct file *file;
9139 unsigned long index;
9140
9141 /* make sure overflow events are dropped */
9142 atomic_inc(&tctx->in_idle);
9143 xa_for_each(&tctx->xa, index, file)
9144 io_uring_cancel_task_requests(file->private_data, files);
9145 atomic_dec(&tctx->in_idle);
9146
9147 if (files)
9148 io_uring_remove_task_files(tctx);
9149 }
9150
9151 static s64 tctx_inflight(struct io_uring_task *tctx)
9152 {
9153 return percpu_counter_sum(&tctx->inflight);
9154 }
9155
9156 static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
9157 {
9158 struct io_uring_task *tctx;
9159 s64 inflight;
9160 DEFINE_WAIT(wait);
9161
9162 if (!ctx->sq_data)
9163 return;
9164 tctx = ctx->sq_data->thread->io_uring;
9165 io_disable_sqo_submit(ctx);
9166
9167 atomic_inc(&tctx->in_idle);
9168 do {
9169 /* read completions before cancelations */
9170 inflight = tctx_inflight(tctx);
9171 if (!inflight)
9172 break;
9173 io_uring_cancel_task_requests(ctx, NULL);
9174
9175 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9176 /*
9177 * If we've seen completions, retry without waiting. This
9178 * avoids a race where a completion comes in before we did
9179 * prepare_to_wait().
9180 */
9181 if (inflight == tctx_inflight(tctx))
9182 schedule();
9183 finish_wait(&tctx->wait, &wait);
9184 } while (1);
9185 atomic_dec(&tctx->in_idle);
9186 }
9187
9188 /*
9189 * Find any io_uring fd that this task has registered or done IO on, and cancel
9190 * requests.
9191 */
9192 void __io_uring_task_cancel(void)
9193 {
9194 struct io_uring_task *tctx = current->io_uring;
9195 DEFINE_WAIT(wait);
9196 s64 inflight;
9197
9198 /* make sure overflow events are dropped */
9199 atomic_inc(&tctx->in_idle);
9200
9201 /* trigger io_disable_sqo_submit() */
9202 if (tctx->sqpoll) {
9203 struct file *file;
9204 unsigned long index;
9205
9206 xa_for_each(&tctx->xa, index, file)
9207 io_uring_cancel_sqpoll(file->private_data);
9208 }
9209
9210 do {
9211 /* read completions before cancelations */
9212 inflight = tctx_inflight(tctx);
9213 if (!inflight)
9214 break;
9215 __io_uring_files_cancel(NULL);
9216
9217 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9218
9219 /*
9220 * If we've seen completions, retry without waiting. This
9221 * avoids a race where a completion comes in before we did
9222 * prepare_to_wait().
9223 */
9224 if (inflight == tctx_inflight(tctx))
9225 schedule();
9226 finish_wait(&tctx->wait, &wait);
9227 } while (1);
9228
9229 atomic_dec(&tctx->in_idle);
9230
9231 io_uring_remove_task_files(tctx);
9232 }
9233
9234 static int io_uring_flush(struct file *file, void *data)
9235 {
9236 struct io_uring_task *tctx = current->io_uring;
9237 struct io_ring_ctx *ctx = file->private_data;
9238
9239 if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
9240 io_uring_cancel_task_requests(ctx, NULL);
9241 io_req_caches_free(ctx, current);
9242 }
9243
9244 if (!tctx)
9245 return 0;
9246
9247 /* we should have cancelled and erased it before PF_EXITING */
9248 WARN_ON_ONCE((current->flags & PF_EXITING) &&
9249 xa_load(&tctx->xa, (unsigned long)file));
9250
9251 /*
9252 * fput() is pending, will be 2 if the only other ref is our potential
9253 * task file note. If the task is exiting, drop regardless of count.
9254 */
9255 if (atomic_long_read(&file->f_count) != 2)
9256 return 0;
9257
9258 if (ctx->flags & IORING_SETUP_SQPOLL) {
9259 /* there is only one file note, which is owned by sqo_task */
9260 WARN_ON_ONCE(ctx->sqo_task != current &&
9261 xa_load(&tctx->xa, (unsigned long)file));
9262 /* sqo_dead check is for when this happens after cancellation */
9263 WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
9264 !xa_load(&tctx->xa, (unsigned long)file));
9265
9266 io_disable_sqo_submit(ctx);
9267 }
9268
9269 if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
9270 io_uring_del_task_file(file);
9271 return 0;
9272 }
9273
9274 static void *io_uring_validate_mmap_request(struct file *file,
9275 loff_t pgoff, size_t sz)
9276 {
9277 struct io_ring_ctx *ctx = file->private_data;
9278 loff_t offset = pgoff << PAGE_SHIFT;
9279 struct page *page;
9280 void *ptr;
9281
9282 switch (offset) {
9283 case IORING_OFF_SQ_RING:
9284 case IORING_OFF_CQ_RING:
9285 ptr = ctx->rings;
9286 break;
9287 case IORING_OFF_SQES:
9288 ptr = ctx->sq_sqes;
9289 break;
9290 default:
9291 return ERR_PTR(-EINVAL);
9292 }
9293
9294 page = virt_to_head_page(ptr);
9295 if (sz > page_size(page))
9296 return ERR_PTR(-EINVAL);
9297
9298 return ptr;
9299 }
9300
9301 #ifdef CONFIG_MMU
9302
9303 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9304 {
9305 size_t sz = vma->vm_end - vma->vm_start;
9306 unsigned long pfn;
9307 void *ptr;
9308
9309 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9310 if (IS_ERR(ptr))
9311 return PTR_ERR(ptr);
9312
9313 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9314 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9315 }
9316
9317 #else /* !CONFIG_MMU */
9318
9319 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9320 {
9321 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9322 }
9323
9324 static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9325 {
9326 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9327 }
9328
9329 static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9330 unsigned long addr, unsigned long len,
9331 unsigned long pgoff, unsigned long flags)
9332 {
9333 void *ptr;
9334
9335 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9336 if (IS_ERR(ptr))
9337 return PTR_ERR(ptr);
9338
9339 return (unsigned long) ptr;
9340 }
9341
9342 #endif /* !CONFIG_MMU */
9343
9344 static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
9345 {
9346 int ret = 0;
9347 DEFINE_WAIT(wait);
9348
9349 do {
9350 if (!io_sqring_full(ctx))
9351 break;
9352
9353 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9354
9355 if (unlikely(ctx->sqo_dead)) {
9356 ret = -EOWNERDEAD;
9357 goto out;
9358 }
9359
9360 if (!io_sqring_full(ctx))
9361 break;
9362
9363 schedule();
9364 } while (!signal_pending(current));
9365
9366 finish_wait(&ctx->sqo_sq_wait, &wait);
9367 out:
9368 return ret;
9369 }
9370
9371 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9372 struct __kernel_timespec __user **ts,
9373 const sigset_t __user **sig)
9374 {
9375 struct io_uring_getevents_arg arg;
9376
9377 /*
9378 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9379 * is just a pointer to the sigset_t.
9380 */
9381 if (!(flags & IORING_ENTER_EXT_ARG)) {
9382 *sig = (const sigset_t __user *) argp;
9383 *ts = NULL;
9384 return 0;
9385 }
9386
9387 /*
9388 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9389 * timespec and sigset_t pointers if good.
9390 */
9391 if (*argsz != sizeof(arg))
9392 return -EINVAL;
9393 if (copy_from_user(&arg, argp, sizeof(arg)))
9394 return -EFAULT;
9395 *sig = u64_to_user_ptr(arg.sigmask);
9396 *argsz = arg.sigmask_sz;
9397 *ts = u64_to_user_ptr(arg.ts);
9398 return 0;
9399 }
9400
9401 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
9402 u32, min_complete, u32, flags, const void __user *, argp,
9403 size_t, argsz)
9404 {
9405 struct io_ring_ctx *ctx;
9406 long ret = -EBADF;
9407 int submitted = 0;
9408 struct fd f;
9409
9410 io_run_task_work();
9411
9412 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9413 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))
9414 return -EINVAL;
9415
9416 f = fdget(fd);
9417 if (!f.file)
9418 return -EBADF;
9419
9420 ret = -EOPNOTSUPP;
9421 if (f.file->f_op != &io_uring_fops)
9422 goto out_fput;
9423
9424 ret = -ENXIO;
9425 ctx = f.file->private_data;
9426 if (!percpu_ref_tryget(&ctx->refs))
9427 goto out_fput;
9428
9429 ret = -EBADFD;
9430 if (ctx->flags & IORING_SETUP_R_DISABLED)
9431 goto out;
9432
9433 /*
9434 * For SQ polling, the thread will do all submissions and completions.
9435 * Just return the requested submit count, and wake the thread if
9436 * we were asked to.
9437 */
9438 ret = 0;
9439 if (ctx->flags & IORING_SETUP_SQPOLL) {
9440 io_cqring_overflow_flush(ctx, false, NULL, NULL);
9441
9442 ret = -EOWNERDEAD;
9443 if (unlikely(ctx->sqo_dead))
9444 goto out;
9445 if (flags & IORING_ENTER_SQ_WAKEUP)
9446 wake_up(&ctx->sq_data->wait);
9447 if (flags & IORING_ENTER_SQ_WAIT) {
9448 ret = io_sqpoll_wait_sq(ctx);
9449 if (ret)
9450 goto out;
9451 }
9452 submitted = to_submit;
9453 } else if (to_submit) {
9454 ret = io_uring_add_task_file(ctx, f.file);
9455 if (unlikely(ret))
9456 goto out;
9457 mutex_lock(&ctx->uring_lock);
9458 submitted = io_submit_sqes(ctx, to_submit);
9459 mutex_unlock(&ctx->uring_lock);
9460
9461 if (submitted != to_submit)
9462 goto out;
9463 }
9464 if (flags & IORING_ENTER_GETEVENTS) {
9465 const sigset_t __user *sig;
9466 struct __kernel_timespec __user *ts;
9467
9468 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9469 if (unlikely(ret))
9470 goto out;
9471
9472 min_complete = min(min_complete, ctx->cq_entries);
9473
9474 /*
9475 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9476 * space applications don't need to do io completion events
9477 * polling again, they can rely on io_sq_thread to do polling
9478 * work, which can reduce cpu usage and uring_lock contention.
9479 */
9480 if (ctx->flags & IORING_SETUP_IOPOLL &&
9481 !(ctx->flags & IORING_SETUP_SQPOLL)) {
9482 ret = io_iopoll_check(ctx, min_complete);
9483 } else {
9484 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
9485 }
9486 }
9487
9488 out:
9489 percpu_ref_put(&ctx->refs);
9490 out_fput:
9491 fdput(f);
9492 return submitted ? submitted : ret;
9493 }
9494
9495 #ifdef CONFIG_PROC_FS
9496 static int io_uring_show_cred(int id, void *p, void *data)
9497 {
9498 struct io_identity *iod = p;
9499 const struct cred *cred = iod->creds;
9500 struct seq_file *m = data;
9501 struct user_namespace *uns = seq_user_ns(m);
9502 struct group_info *gi;
9503 kernel_cap_t cap;
9504 unsigned __capi;
9505 int g;
9506
9507 seq_printf(m, "%5d\n", id);
9508 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9509 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9510 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9511 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9512 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9513 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9514 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9515 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9516 seq_puts(m, "\n\tGroups:\t");
9517 gi = cred->group_info;
9518 for (g = 0; g < gi->ngroups; g++) {
9519 seq_put_decimal_ull(m, g ? " " : "",
9520 from_kgid_munged(uns, gi->gid[g]));
9521 }
9522 seq_puts(m, "\n\tCapEff:\t");
9523 cap = cred->cap_effective;
9524 CAP_FOR_EACH_U32(__capi)
9525 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9526 seq_putc(m, '\n');
9527 return 0;
9528 }
9529
9530 static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9531 {
9532 struct io_sq_data *sq = NULL;
9533 bool has_lock;
9534 int i;
9535
9536 /*
9537 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9538 * since fdinfo case grabs it in the opposite direction of normal use
9539 * cases. If we fail to get the lock, we just don't iterate any
9540 * structures that could be going away outside the io_uring mutex.
9541 */
9542 has_lock = mutex_trylock(&ctx->uring_lock);
9543
9544 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL))
9545 sq = ctx->sq_data;
9546
9547 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9548 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
9549 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
9550 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
9551 struct file *f = *io_fixed_file_slot(ctx->file_data, i);
9552
9553 if (f)
9554 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9555 else
9556 seq_printf(m, "%5u: <none>\n", i);
9557 }
9558 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
9559 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
9560 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9561
9562 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9563 (unsigned int) buf->len);
9564 }
9565 if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
9566 seq_printf(m, "Personalities:\n");
9567 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
9568 }
9569 seq_printf(m, "PollList:\n");
9570 spin_lock_irq(&ctx->completion_lock);
9571 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9572 struct hlist_head *list = &ctx->cancel_hash[i];
9573 struct io_kiocb *req;
9574
9575 hlist_for_each_entry(req, list, hash_node)
9576 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9577 req->task->task_works != NULL);
9578 }
9579 spin_unlock_irq(&ctx->completion_lock);
9580 if (has_lock)
9581 mutex_unlock(&ctx->uring_lock);
9582 }
9583
9584 static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9585 {
9586 struct io_ring_ctx *ctx = f->private_data;
9587
9588 if (percpu_ref_tryget(&ctx->refs)) {
9589 __io_uring_show_fdinfo(ctx, m);
9590 percpu_ref_put(&ctx->refs);
9591 }
9592 }
9593 #endif
9594
9595 static const struct file_operations io_uring_fops = {
9596 .release = io_uring_release,
9597 .flush = io_uring_flush,
9598 .mmap = io_uring_mmap,
9599 #ifndef CONFIG_MMU
9600 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9601 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9602 #endif
9603 .poll = io_uring_poll,
9604 .fasync = io_uring_fasync,
9605 #ifdef CONFIG_PROC_FS
9606 .show_fdinfo = io_uring_show_fdinfo,
9607 #endif
9608 };
9609
9610 static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9611 struct io_uring_params *p)
9612 {
9613 struct io_rings *rings;
9614 size_t size, sq_array_offset;
9615
9616 /* make sure these are sane, as we already accounted them */
9617 ctx->sq_entries = p->sq_entries;
9618 ctx->cq_entries = p->cq_entries;
9619
9620 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9621 if (size == SIZE_MAX)
9622 return -EOVERFLOW;
9623
9624 rings = io_mem_alloc(size);
9625 if (!rings)
9626 return -ENOMEM;
9627
9628 ctx->rings = rings;
9629 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9630 rings->sq_ring_mask = p->sq_entries - 1;
9631 rings->cq_ring_mask = p->cq_entries - 1;
9632 rings->sq_ring_entries = p->sq_entries;
9633 rings->cq_ring_entries = p->cq_entries;
9634 ctx->sq_mask = rings->sq_ring_mask;
9635 ctx->cq_mask = rings->cq_ring_mask;
9636
9637 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
9638 if (size == SIZE_MAX) {
9639 io_mem_free(ctx->rings);
9640 ctx->rings = NULL;
9641 return -EOVERFLOW;
9642 }
9643
9644 ctx->sq_sqes = io_mem_alloc(size);
9645 if (!ctx->sq_sqes) {
9646 io_mem_free(ctx->rings);
9647 ctx->rings = NULL;
9648 return -ENOMEM;
9649 }
9650
9651 return 0;
9652 }
9653
9654 static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9655 {
9656 int ret, fd;
9657
9658 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9659 if (fd < 0)
9660 return fd;
9661
9662 ret = io_uring_add_task_file(ctx, file);
9663 if (ret) {
9664 put_unused_fd(fd);
9665 return ret;
9666 }
9667 fd_install(fd, file);
9668 return fd;
9669 }
9670
9671 /*
9672 * Allocate an anonymous fd, this is what constitutes the application
9673 * visible backing of an io_uring instance. The application mmaps this
9674 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9675 * we have to tie this fd to a socket for file garbage collection purposes.
9676 */
9677 static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
9678 {
9679 struct file *file;
9680 #if defined(CONFIG_UNIX)
9681 int ret;
9682
9683 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9684 &ctx->ring_sock);
9685 if (ret)
9686 return ERR_PTR(ret);
9687 #endif
9688
9689 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9690 O_RDWR | O_CLOEXEC);
9691 #if defined(CONFIG_UNIX)
9692 if (IS_ERR(file)) {
9693 sock_release(ctx->ring_sock);
9694 ctx->ring_sock = NULL;
9695 } else {
9696 ctx->ring_sock->file = file;
9697 }
9698 #endif
9699 return file;
9700 }
9701
9702 static int io_uring_create(unsigned entries, struct io_uring_params *p,
9703 struct io_uring_params __user *params)
9704 {
9705 struct user_struct *user = NULL;
9706 struct io_ring_ctx *ctx;
9707 struct file *file;
9708 int ret;
9709
9710 if (!entries)
9711 return -EINVAL;
9712 if (entries > IORING_MAX_ENTRIES) {
9713 if (!(p->flags & IORING_SETUP_CLAMP))
9714 return -EINVAL;
9715 entries = IORING_MAX_ENTRIES;
9716 }
9717
9718 /*
9719 * Use twice as many entries for the CQ ring. It's possible for the
9720 * application to drive a higher depth than the size of the SQ ring,
9721 * since the sqes are only used at submission time. This allows for
9722 * some flexibility in overcommitting a bit. If the application has
9723 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9724 * of CQ ring entries manually.
9725 */
9726 p->sq_entries = roundup_pow_of_two(entries);
9727 if (p->flags & IORING_SETUP_CQSIZE) {
9728 /*
9729 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9730 * to a power-of-two, if it isn't already. We do NOT impose
9731 * any cq vs sq ring sizing.
9732 */
9733 if (!p->cq_entries)
9734 return -EINVAL;
9735 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9736 if (!(p->flags & IORING_SETUP_CLAMP))
9737 return -EINVAL;
9738 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9739 }
9740 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9741 if (p->cq_entries < p->sq_entries)
9742 return -EINVAL;
9743 } else {
9744 p->cq_entries = 2 * p->sq_entries;
9745 }
9746
9747 user = get_uid(current_user());
9748
9749 ctx = io_ring_ctx_alloc(p);
9750 if (!ctx) {
9751 free_uid(user);
9752 return -ENOMEM;
9753 }
9754 ctx->compat = in_compat_syscall();
9755 ctx->limit_mem = !capable(CAP_IPC_LOCK);
9756 ctx->user = user;
9757 ctx->creds = get_current_cred();
9758 #ifdef CONFIG_AUDIT
9759 ctx->loginuid = current->loginuid;
9760 ctx->sessionid = current->sessionid;
9761 #endif
9762 ctx->sqo_task = get_task_struct(current);
9763
9764 /*
9765 * This is just grabbed for accounting purposes. When a process exits,
9766 * the mm is exited and dropped before the files, hence we need to hang
9767 * on to this mm purely for the purposes of being able to unaccount
9768 * memory (locked/pinned vm). It's not used for anything else.
9769 */
9770 mmgrab(current->mm);
9771 ctx->mm_account = current->mm;
9772
9773 #ifdef CONFIG_BLK_CGROUP
9774 /*
9775 * The sq thread will belong to the original cgroup it was inited in.
9776 * If the cgroup goes offline (e.g. disabling the io controller), then
9777 * issued bios will be associated with the closest cgroup later in the
9778 * block layer.
9779 */
9780 rcu_read_lock();
9781 ctx->sqo_blkcg_css = blkcg_css();
9782 ret = css_tryget_online(ctx->sqo_blkcg_css);
9783 rcu_read_unlock();
9784 if (!ret) {
9785 /* don't init against a dying cgroup, have the user try again */
9786 ctx->sqo_blkcg_css = NULL;
9787 ret = -ENODEV;
9788 goto err;
9789 }
9790 #endif
9791 ret = io_allocate_scq_urings(ctx, p);
9792 if (ret)
9793 goto err;
9794
9795 ret = io_sq_offload_create(ctx, p);
9796 if (ret)
9797 goto err;
9798
9799 if (!(p->flags & IORING_SETUP_R_DISABLED))
9800 io_sq_offload_start(ctx);
9801
9802 memset(&p->sq_off, 0, sizeof(p->sq_off));
9803 p->sq_off.head = offsetof(struct io_rings, sq.head);
9804 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9805 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9806 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9807 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9808 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9809 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
9810
9811 memset(&p->cq_off, 0, sizeof(p->cq_off));
9812 p->cq_off.head = offsetof(struct io_rings, cq.head);
9813 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9814 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9815 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9816 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9817 p->cq_off.cqes = offsetof(struct io_rings, cqes);
9818 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
9819
9820 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9821 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
9822 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
9823 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
9824 IORING_FEAT_EXT_ARG;
9825
9826 if (copy_to_user(params, p, sizeof(*p))) {
9827 ret = -EFAULT;
9828 goto err;
9829 }
9830
9831 file = io_uring_get_file(ctx);
9832 if (IS_ERR(file)) {
9833 ret = PTR_ERR(file);
9834 goto err;
9835 }
9836
9837 /*
9838 * Install ring fd as the very last thing, so we don't risk someone
9839 * having closed it before we finish setup
9840 */
9841 ret = io_uring_install_fd(ctx, file);
9842 if (ret < 0) {
9843 io_disable_sqo_submit(ctx);
9844 /* fput will clean it up */
9845 fput(file);
9846 return ret;
9847 }
9848
9849 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
9850 return ret;
9851 err:
9852 io_disable_sqo_submit(ctx);
9853 io_ring_ctx_wait_and_kill(ctx);
9854 return ret;
9855 }
9856
9857 /*
9858 * Sets up an aio uring context, and returns the fd. Applications asks for a
9859 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9860 * params structure passed in.
9861 */
9862 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9863 {
9864 struct io_uring_params p;
9865 int i;
9866
9867 if (copy_from_user(&p, params, sizeof(p)))
9868 return -EFAULT;
9869 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9870 if (p.resv[i])
9871 return -EINVAL;
9872 }
9873
9874 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
9875 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
9876 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9877 IORING_SETUP_R_DISABLED))
9878 return -EINVAL;
9879
9880 return io_uring_create(entries, &p, params);
9881 }
9882
9883 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9884 struct io_uring_params __user *, params)
9885 {
9886 return io_uring_setup(entries, params);
9887 }
9888
9889 static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9890 {
9891 struct io_uring_probe *p;
9892 size_t size;
9893 int i, ret;
9894
9895 size = struct_size(p, ops, nr_args);
9896 if (size == SIZE_MAX)
9897 return -EOVERFLOW;
9898 p = kzalloc(size, GFP_KERNEL);
9899 if (!p)
9900 return -ENOMEM;
9901
9902 ret = -EFAULT;
9903 if (copy_from_user(p, arg, size))
9904 goto out;
9905 ret = -EINVAL;
9906 if (memchr_inv(p, 0, size))
9907 goto out;
9908
9909 p->last_op = IORING_OP_LAST - 1;
9910 if (nr_args > IORING_OP_LAST)
9911 nr_args = IORING_OP_LAST;
9912
9913 for (i = 0; i < nr_args; i++) {
9914 p->ops[i].op = i;
9915 if (!io_op_defs[i].not_supported)
9916 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9917 }
9918 p->ops_len = i;
9919
9920 ret = 0;
9921 if (copy_to_user(arg, p, size))
9922 ret = -EFAULT;
9923 out:
9924 kfree(p);
9925 return ret;
9926 }
9927
9928 static int io_register_personality(struct io_ring_ctx *ctx)
9929 {
9930 struct io_identity *id;
9931 int ret;
9932
9933 id = kmalloc(sizeof(*id), GFP_KERNEL);
9934 if (unlikely(!id))
9935 return -ENOMEM;
9936
9937 io_init_identity(id);
9938 id->creds = get_current_cred();
9939
9940 ret = idr_alloc_cyclic(&ctx->personality_idr, id, 1, USHRT_MAX, GFP_KERNEL);
9941 if (ret < 0) {
9942 put_cred(id->creds);
9943 kfree(id);
9944 }
9945 return ret;
9946 }
9947
9948 static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9949 unsigned int nr_args)
9950 {
9951 struct io_uring_restriction *res;
9952 size_t size;
9953 int i, ret;
9954
9955 /* Restrictions allowed only if rings started disabled */
9956 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9957 return -EBADFD;
9958
9959 /* We allow only a single restrictions registration */
9960 if (ctx->restrictions.registered)
9961 return -EBUSY;
9962
9963 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9964 return -EINVAL;
9965
9966 size = array_size(nr_args, sizeof(*res));
9967 if (size == SIZE_MAX)
9968 return -EOVERFLOW;
9969
9970 res = memdup_user(arg, size);
9971 if (IS_ERR(res))
9972 return PTR_ERR(res);
9973
9974 ret = 0;
9975
9976 for (i = 0; i < nr_args; i++) {
9977 switch (res[i].opcode) {
9978 case IORING_RESTRICTION_REGISTER_OP:
9979 if (res[i].register_op >= IORING_REGISTER_LAST) {
9980 ret = -EINVAL;
9981 goto out;
9982 }
9983
9984 __set_bit(res[i].register_op,
9985 ctx->restrictions.register_op);
9986 break;
9987 case IORING_RESTRICTION_SQE_OP:
9988 if (res[i].sqe_op >= IORING_OP_LAST) {
9989 ret = -EINVAL;
9990 goto out;
9991 }
9992
9993 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9994 break;
9995 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9996 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9997 break;
9998 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9999 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
10000 break;
10001 default:
10002 ret = -EINVAL;
10003 goto out;
10004 }
10005 }
10006
10007 out:
10008 /* Reset all restrictions if an error happened */
10009 if (ret != 0)
10010 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
10011 else
10012 ctx->restrictions.registered = true;
10013
10014 kfree(res);
10015 return ret;
10016 }
10017
10018 static int io_register_enable_rings(struct io_ring_ctx *ctx)
10019 {
10020 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
10021 return -EBADFD;
10022
10023 if (ctx->restrictions.registered)
10024 ctx->restricted = 1;
10025
10026 ctx->flags &= ~IORING_SETUP_R_DISABLED;
10027
10028 io_sq_offload_start(ctx);
10029
10030 return 0;
10031 }
10032
10033 static bool io_register_op_must_quiesce(int op)
10034 {
10035 switch (op) {
10036 case IORING_UNREGISTER_FILES:
10037 case IORING_REGISTER_FILES_UPDATE:
10038 case IORING_REGISTER_PROBE:
10039 case IORING_REGISTER_PERSONALITY:
10040 case IORING_UNREGISTER_PERSONALITY:
10041 return false;
10042 default:
10043 return true;
10044 }
10045 }
10046
10047 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
10048 void __user *arg, unsigned nr_args)
10049 __releases(ctx->uring_lock)
10050 __acquires(ctx->uring_lock)
10051 {
10052 int ret;
10053
10054 /*
10055 * We're inside the ring mutex, if the ref is already dying, then
10056 * someone else killed the ctx or is already going through
10057 * io_uring_register().
10058 */
10059 if (percpu_ref_is_dying(&ctx->refs))
10060 return -ENXIO;
10061
10062 if (io_register_op_must_quiesce(opcode)) {
10063 percpu_ref_kill(&ctx->refs);
10064
10065 /*
10066 * Drop uring mutex before waiting for references to exit. If
10067 * another thread is currently inside io_uring_enter() it might
10068 * need to grab the uring_lock to make progress. If we hold it
10069 * here across the drain wait, then we can deadlock. It's safe
10070 * to drop the mutex here, since no new references will come in
10071 * after we've killed the percpu ref.
10072 */
10073 mutex_unlock(&ctx->uring_lock);
10074 do {
10075 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10076 if (!ret)
10077 break;
10078 ret = io_run_task_work_sig();
10079 if (ret < 0)
10080 break;
10081 } while (1);
10082
10083 mutex_lock(&ctx->uring_lock);
10084
10085 if (ret) {
10086 percpu_ref_resurrect(&ctx->refs);
10087 goto out_quiesce;
10088 }
10089 }
10090
10091 if (ctx->restricted) {
10092 if (opcode >= IORING_REGISTER_LAST) {
10093 ret = -EINVAL;
10094 goto out;
10095 }
10096
10097 if (!test_bit(opcode, ctx->restrictions.register_op)) {
10098 ret = -EACCES;
10099 goto out;
10100 }
10101 }
10102
10103 switch (opcode) {
10104 case IORING_REGISTER_BUFFERS:
10105 ret = io_sqe_buffers_register(ctx, arg, nr_args);
10106 break;
10107 case IORING_UNREGISTER_BUFFERS:
10108 ret = -EINVAL;
10109 if (arg || nr_args)
10110 break;
10111 ret = io_sqe_buffers_unregister(ctx);
10112 break;
10113 case IORING_REGISTER_FILES:
10114 ret = io_sqe_files_register(ctx, arg, nr_args);
10115 break;
10116 case IORING_UNREGISTER_FILES:
10117 ret = -EINVAL;
10118 if (arg || nr_args)
10119 break;
10120 ret = io_sqe_files_unregister(ctx);
10121 break;
10122 case IORING_REGISTER_FILES_UPDATE:
10123 ret = io_sqe_files_update(ctx, arg, nr_args);
10124 break;
10125 case IORING_REGISTER_EVENTFD:
10126 case IORING_REGISTER_EVENTFD_ASYNC:
10127 ret = -EINVAL;
10128 if (nr_args != 1)
10129 break;
10130 ret = io_eventfd_register(ctx, arg);
10131 if (ret)
10132 break;
10133 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
10134 ctx->eventfd_async = 1;
10135 else
10136 ctx->eventfd_async = 0;
10137 break;
10138 case IORING_UNREGISTER_EVENTFD:
10139 ret = -EINVAL;
10140 if (arg || nr_args)
10141 break;
10142 ret = io_eventfd_unregister(ctx);
10143 break;
10144 case IORING_REGISTER_PROBE:
10145 ret = -EINVAL;
10146 if (!arg || nr_args > 256)
10147 break;
10148 ret = io_probe(ctx, arg, nr_args);
10149 break;
10150 case IORING_REGISTER_PERSONALITY:
10151 ret = -EINVAL;
10152 if (arg || nr_args)
10153 break;
10154 ret = io_register_personality(ctx);
10155 break;
10156 case IORING_UNREGISTER_PERSONALITY:
10157 ret = -EINVAL;
10158 if (arg)
10159 break;
10160 ret = io_unregister_personality(ctx, nr_args);
10161 break;
10162 case IORING_REGISTER_ENABLE_RINGS:
10163 ret = -EINVAL;
10164 if (arg || nr_args)
10165 break;
10166 ret = io_register_enable_rings(ctx);
10167 break;
10168 case IORING_REGISTER_RESTRICTIONS:
10169 ret = io_register_restrictions(ctx, arg, nr_args);
10170 break;
10171 default:
10172 ret = -EINVAL;
10173 break;
10174 }
10175
10176 out:
10177 if (io_register_op_must_quiesce(opcode)) {
10178 /* bring the ctx back to life */
10179 percpu_ref_reinit(&ctx->refs);
10180 out_quiesce:
10181 reinit_completion(&ctx->ref_comp);
10182 }
10183 return ret;
10184 }
10185
10186 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
10187 void __user *, arg, unsigned int, nr_args)
10188 {
10189 struct io_ring_ctx *ctx;
10190 long ret = -EBADF;
10191 struct fd f;
10192
10193 f = fdget(fd);
10194 if (!f.file)
10195 return -EBADF;
10196
10197 ret = -EOPNOTSUPP;
10198 if (f.file->f_op != &io_uring_fops)
10199 goto out_fput;
10200
10201 ctx = f.file->private_data;
10202
10203 mutex_lock(&ctx->uring_lock);
10204 ret = __io_uring_register(ctx, opcode, arg, nr_args);
10205 mutex_unlock(&ctx->uring_lock);
10206 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
10207 ctx->cq_ev_fd != NULL, ret);
10208 out_fput:
10209 fdput(f);
10210 return ret;
10211 }
10212
10213 static int __init io_uring_init(void)
10214 {
10215 #define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10216 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10217 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10218 } while (0)
10219
10220 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10221 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10222 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10223 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10224 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10225 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10226 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10227 BUILD_BUG_SQE_ELEM(8, __u64, off);
10228 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10229 BUILD_BUG_SQE_ELEM(16, __u64, addr);
10230 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
10231 BUILD_BUG_SQE_ELEM(24, __u32, len);
10232 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10233 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
10234 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
10235 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
10236 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
10237 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
10238 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10239 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10240 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10241 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10242 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10243 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10244 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10245 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
10246 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
10247 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10248 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
10249 BUILD_BUG_SQE_ELEM(42, __u16, personality);
10250 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
10251
10252 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
10253 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
10254 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
10255 SLAB_ACCOUNT);
10256 return 0;
10257 };
10258 __initcall(io_uring_init);