1 // SPDX-License-Identifier: GPL-2.0
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side.
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
30 * Also see the examples in the liburing library:
32 * git://git.kernel.dk/liburing
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
39 * Copyright (C) 2018-2019 Jens Axboe
40 * Copyright (c) 2018-2019 Christoph Hellwig
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <linux/compat.h>
47 #include <linux/refcount.h>
48 #include <linux/uio.h>
49 #include <linux/bits.h>
51 #include <linux/sched/signal.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
56 #include <linux/mman.h>
57 #include <linux/mmu_context.h>
58 #include <linux/percpu.h>
59 #include <linux/slab.h>
60 #include <linux/kthread.h>
61 #include <linux/blkdev.h>
62 #include <linux/bvec.h>
63 #include <linux/net.h>
65 #include <net/af_unix.h>
67 #include <linux/anon_inodes.h>
68 #include <linux/sched/mm.h>
69 #include <linux/uaccess.h>
70 #include <linux/nospec.h>
71 #include <linux/sizes.h>
72 #include <linux/hugetlb.h>
73 #include <linux/highmem.h>
74 #include <linux/namei.h>
75 #include <linux/fsnotify.h>
76 #include <linux/fadvise.h>
77 #include <linux/eventpoll.h>
78 #include <linux/fs_struct.h>
80 #define CREATE_TRACE_POINTS
81 #include <trace/events/io_uring.h>
83 #include <uapi/linux/io_uring.h>
88 #define IORING_MAX_ENTRIES 32768
89 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
92 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
94 #define IORING_FILE_TABLE_SHIFT 9
95 #define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
96 #define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
97 #define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
100 u32 head ____cacheline_aligned_in_smp
;
101 u32 tail ____cacheline_aligned_in_smp
;
105 * This data is shared with the application through the mmap at offsets
106 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
108 * The offsets to the member fields are published through struct
109 * io_sqring_offsets when calling io_uring_setup.
113 * Head and tail offsets into the ring; the offsets need to be
114 * masked to get valid indices.
116 * The kernel controls head of the sq ring and the tail of the cq ring,
117 * and the application controls tail of the sq ring and the head of the
120 struct io_uring sq
, cq
;
122 * Bitmasks to apply to head and tail offsets (constant, equals
125 u32 sq_ring_mask
, cq_ring_mask
;
126 /* Ring sizes (constant, power of 2) */
127 u32 sq_ring_entries
, cq_ring_entries
;
129 * Number of invalid entries dropped by the kernel due to
130 * invalid index stored in array
132 * Written by the kernel, shouldn't be modified by the
133 * application (i.e. get number of "new events" by comparing to
136 * After a new SQ head value was read by the application this
137 * counter includes all submissions that were dropped reaching
138 * the new SQ head (and possibly more).
144 * Written by the kernel, shouldn't be modified by the
147 * The application needs a full memory barrier before checking
148 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
152 * Number of completion events lost because the queue was full;
153 * this should be avoided by the application by making sure
154 * there are not more requests pending than there is space in
155 * the completion queue.
157 * Written by the kernel, shouldn't be modified by the
158 * application (i.e. get number of "new events" by comparing to
161 * As completion events come in out of order this counter is not
162 * ordered with any other data.
166 * Ring buffer of completion events.
168 * The kernel writes completion events fresh every time they are
169 * produced, so the application is allowed to modify pending
172 struct io_uring_cqe cqes
[] ____cacheline_aligned_in_smp
;
175 struct io_mapped_ubuf
{
178 struct bio_vec
*bvec
;
179 unsigned int nr_bvecs
;
182 struct fixed_file_table
{
186 struct fixed_file_data
{
187 struct fixed_file_table
*table
;
188 struct io_ring_ctx
*ctx
;
190 struct percpu_ref refs
;
191 struct llist_head put_llist
;
192 struct work_struct ref_work
;
193 struct completion done
;
198 struct percpu_ref refs
;
199 } ____cacheline_aligned_in_smp
;
203 unsigned int compat
: 1;
204 unsigned int account_mem
: 1;
205 unsigned int cq_overflow_flushed
: 1;
206 unsigned int drain_next
: 1;
207 unsigned int eventfd_async
: 1;
210 * Ring buffer of indices into array of io_uring_sqe, which is
211 * mmapped by the application using the IORING_OFF_SQES offset.
213 * This indirection could e.g. be used to assign fixed
214 * io_uring_sqe entries to operations and only submit them to
215 * the queue when needed.
217 * The kernel modifies neither the indices array nor the entries
221 unsigned cached_sq_head
;
224 unsigned sq_thread_idle
;
225 unsigned cached_sq_dropped
;
226 atomic_t cached_cq_overflow
;
227 unsigned long sq_check_overflow
;
229 struct list_head defer_list
;
230 struct list_head timeout_list
;
231 struct list_head cq_overflow_list
;
233 wait_queue_head_t inflight_wait
;
234 struct io_uring_sqe
*sq_sqes
;
235 } ____cacheline_aligned_in_smp
;
237 struct io_rings
*rings
;
241 struct task_struct
*sqo_thread
; /* if using sq thread polling */
242 struct mm_struct
*sqo_mm
;
243 wait_queue_head_t sqo_wait
;
246 * If used, fixed file set. Writers must ensure that ->refs is dead,
247 * readers must ensure that ->refs is alive as long as the file* is
248 * used. Only updated through io_uring_register(2).
250 struct fixed_file_data
*file_data
;
251 unsigned nr_user_files
;
253 struct file
*ring_file
;
255 /* if used, fixed mapped user buffers */
256 unsigned nr_user_bufs
;
257 struct io_mapped_ubuf
*user_bufs
;
259 struct user_struct
*user
;
261 const struct cred
*creds
;
263 /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
264 struct completion
*completions
;
266 /* if all else fails... */
267 struct io_kiocb
*fallback_req
;
269 #if defined(CONFIG_UNIX)
270 struct socket
*ring_sock
;
273 struct idr personality_idr
;
276 unsigned cached_cq_tail
;
279 atomic_t cq_timeouts
;
280 unsigned long cq_check_overflow
;
281 struct wait_queue_head cq_wait
;
282 struct fasync_struct
*cq_fasync
;
283 struct eventfd_ctx
*cq_ev_fd
;
284 } ____cacheline_aligned_in_smp
;
287 struct mutex uring_lock
;
288 wait_queue_head_t wait
;
289 } ____cacheline_aligned_in_smp
;
292 spinlock_t completion_lock
;
293 struct llist_head poll_llist
;
296 * ->poll_list is protected by the ctx->uring_lock for
297 * io_uring instances that don't use IORING_SETUP_SQPOLL.
298 * For SQPOLL, only the single threaded io_sq_thread() will
299 * manipulate the list, hence no extra locking is needed there.
301 struct list_head poll_list
;
302 struct hlist_head
*cancel_hash
;
303 unsigned cancel_hash_bits
;
304 bool poll_multi_file
;
306 spinlock_t inflight_lock
;
307 struct list_head inflight_list
;
308 } ____cacheline_aligned_in_smp
;
312 * First field must be the file pointer in all the
313 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
315 struct io_poll_iocb
{
318 struct wait_queue_head
*head
;
324 struct wait_queue_entry wait
;
329 struct file
*put_file
;
333 struct io_timeout_data
{
334 struct io_kiocb
*req
;
335 struct hrtimer timer
;
336 struct timespec64 ts
;
337 enum hrtimer_mode mode
;
343 struct sockaddr __user
*addr
;
344 int __user
*addr_len
;
369 /* NOTE: kiocb has the file as the first member, so don't do it here */
377 struct sockaddr __user
*addr
;
384 struct user_msghdr __user
*msg
;
397 struct filename
*filename
;
398 struct statx __user
*buffer
;
402 struct io_files_update
{
428 struct epoll_event event
;
431 struct io_async_connect
{
432 struct sockaddr_storage address
;
435 struct io_async_msghdr
{
436 struct iovec fast_iov
[UIO_FASTIOV
];
438 struct sockaddr __user
*uaddr
;
440 struct sockaddr_storage addr
;
444 struct iovec fast_iov
[UIO_FASTIOV
];
450 struct io_async_ctx
{
452 struct io_async_rw rw
;
453 struct io_async_msghdr msg
;
454 struct io_async_connect connect
;
455 struct io_timeout_data timeout
;
460 REQ_F_FIXED_FILE_BIT
= IOSQE_FIXED_FILE_BIT
,
461 REQ_F_IO_DRAIN_BIT
= IOSQE_IO_DRAIN_BIT
,
462 REQ_F_LINK_BIT
= IOSQE_IO_LINK_BIT
,
463 REQ_F_HARDLINK_BIT
= IOSQE_IO_HARDLINK_BIT
,
464 REQ_F_FORCE_ASYNC_BIT
= IOSQE_ASYNC_BIT
,
471 REQ_F_IOPOLL_COMPLETED_BIT
,
472 REQ_F_LINK_TIMEOUT_BIT
,
476 REQ_F_TIMEOUT_NOSEQ_BIT
,
477 REQ_F_COMP_LOCKED_BIT
,
478 REQ_F_NEED_CLEANUP_BIT
,
484 REQ_F_FIXED_FILE
= BIT(REQ_F_FIXED_FILE_BIT
),
485 /* drain existing IO first */
486 REQ_F_IO_DRAIN
= BIT(REQ_F_IO_DRAIN_BIT
),
488 REQ_F_LINK
= BIT(REQ_F_LINK_BIT
),
489 /* doesn't sever on completion < 0 */
490 REQ_F_HARDLINK
= BIT(REQ_F_HARDLINK_BIT
),
492 REQ_F_FORCE_ASYNC
= BIT(REQ_F_FORCE_ASYNC_BIT
),
494 /* already grabbed next link */
495 REQ_F_LINK_NEXT
= BIT(REQ_F_LINK_NEXT_BIT
),
496 /* fail rest of links */
497 REQ_F_FAIL_LINK
= BIT(REQ_F_FAIL_LINK_BIT
),
498 /* on inflight list */
499 REQ_F_INFLIGHT
= BIT(REQ_F_INFLIGHT_BIT
),
500 /* read/write uses file position */
501 REQ_F_CUR_POS
= BIT(REQ_F_CUR_POS_BIT
),
502 /* must not punt to workers */
503 REQ_F_NOWAIT
= BIT(REQ_F_NOWAIT_BIT
),
504 /* polled IO has completed */
505 REQ_F_IOPOLL_COMPLETED
= BIT(REQ_F_IOPOLL_COMPLETED_BIT
),
506 /* has linked timeout */
507 REQ_F_LINK_TIMEOUT
= BIT(REQ_F_LINK_TIMEOUT_BIT
),
508 /* timeout request */
509 REQ_F_TIMEOUT
= BIT(REQ_F_TIMEOUT_BIT
),
511 REQ_F_ISREG
= BIT(REQ_F_ISREG_BIT
),
512 /* must be punted even for NONBLOCK */
513 REQ_F_MUST_PUNT
= BIT(REQ_F_MUST_PUNT_BIT
),
514 /* no timeout sequence */
515 REQ_F_TIMEOUT_NOSEQ
= BIT(REQ_F_TIMEOUT_NOSEQ_BIT
),
516 /* completion under lock */
517 REQ_F_COMP_LOCKED
= BIT(REQ_F_COMP_LOCKED_BIT
),
519 REQ_F_NEED_CLEANUP
= BIT(REQ_F_NEED_CLEANUP_BIT
),
520 /* in overflow list */
521 REQ_F_OVERFLOW
= BIT(REQ_F_OVERFLOW_BIT
),
525 * NOTE! Each of the iocb union members has the file pointer
526 * as the first entry in their struct definition. So you can
527 * access the file pointer through any of the sub-structs,
528 * or directly as just 'ki_filp' in this struct.
534 struct io_poll_iocb poll
;
535 struct io_accept accept
;
537 struct io_cancel cancel
;
538 struct io_timeout timeout
;
539 struct io_connect connect
;
540 struct io_sr_msg sr_msg
;
542 struct io_close close
;
543 struct io_files_update files_update
;
544 struct io_fadvise fadvise
;
545 struct io_madvise madvise
;
546 struct io_epoll epoll
;
549 struct io_async_ctx
*io
;
551 * llist_node is only used for poll deferred completions
553 struct llist_node llist_node
;
555 bool needs_fixed_file
;
558 struct io_ring_ctx
*ctx
;
560 struct list_head list
;
561 struct hlist_node hash_node
;
563 struct list_head link_list
;
570 struct list_head inflight_entry
;
572 struct io_wq_work work
;
575 #define IO_PLUG_THRESHOLD 2
576 #define IO_IOPOLL_BATCH 8
578 struct io_submit_state
{
579 struct blk_plug plug
;
582 * io_kiocb alloc cache
584 void *reqs
[IO_IOPOLL_BATCH
];
585 unsigned int free_reqs
;
588 * File reference cache
592 unsigned int has_refs
;
593 unsigned int used_refs
;
594 unsigned int ios_left
;
598 /* needs req->io allocated for deferral/async */
599 unsigned async_ctx
: 1;
600 /* needs current->mm setup, does mm access */
601 unsigned needs_mm
: 1;
602 /* needs req->file assigned */
603 unsigned needs_file
: 1;
604 /* needs req->file assigned IFF fd is >= 0 */
605 unsigned fd_non_neg
: 1;
606 /* hash wq insertion if file is a regular file */
607 unsigned hash_reg_file
: 1;
608 /* unbound wq insertion if file is a non-regular file */
609 unsigned unbound_nonreg_file
: 1;
610 /* opcode is not supported by this kernel */
611 unsigned not_supported
: 1;
612 /* needs file table */
613 unsigned file_table
: 1;
615 unsigned needs_fs
: 1;
618 static const struct io_op_def io_op_defs
[] = {
619 [IORING_OP_NOP
] = {},
620 [IORING_OP_READV
] = {
624 .unbound_nonreg_file
= 1,
626 [IORING_OP_WRITEV
] = {
631 .unbound_nonreg_file
= 1,
633 [IORING_OP_FSYNC
] = {
636 [IORING_OP_READ_FIXED
] = {
638 .unbound_nonreg_file
= 1,
640 [IORING_OP_WRITE_FIXED
] = {
643 .unbound_nonreg_file
= 1,
645 [IORING_OP_POLL_ADD
] = {
647 .unbound_nonreg_file
= 1,
649 [IORING_OP_POLL_REMOVE
] = {},
650 [IORING_OP_SYNC_FILE_RANGE
] = {
653 [IORING_OP_SENDMSG
] = {
657 .unbound_nonreg_file
= 1,
660 [IORING_OP_RECVMSG
] = {
664 .unbound_nonreg_file
= 1,
667 [IORING_OP_TIMEOUT
] = {
671 [IORING_OP_TIMEOUT_REMOVE
] = {},
672 [IORING_OP_ACCEPT
] = {
675 .unbound_nonreg_file
= 1,
678 [IORING_OP_ASYNC_CANCEL
] = {},
679 [IORING_OP_LINK_TIMEOUT
] = {
683 [IORING_OP_CONNECT
] = {
687 .unbound_nonreg_file
= 1,
689 [IORING_OP_FALLOCATE
] = {
692 [IORING_OP_OPENAT
] = {
698 [IORING_OP_CLOSE
] = {
702 [IORING_OP_FILES_UPDATE
] = {
706 [IORING_OP_STATX
] = {
715 .unbound_nonreg_file
= 1,
717 [IORING_OP_WRITE
] = {
720 .unbound_nonreg_file
= 1,
722 [IORING_OP_FADVISE
] = {
725 [IORING_OP_MADVISE
] = {
731 .unbound_nonreg_file
= 1,
736 .unbound_nonreg_file
= 1,
738 [IORING_OP_OPENAT2
] = {
744 [IORING_OP_EPOLL_CTL
] = {
745 .unbound_nonreg_file
= 1,
750 static void io_wq_submit_work(struct io_wq_work
**workptr
);
751 static void io_cqring_fill_event(struct io_kiocb
*req
, long res
);
752 static void io_put_req(struct io_kiocb
*req
);
753 static void __io_double_put_req(struct io_kiocb
*req
);
754 static struct io_kiocb
*io_prep_linked_timeout(struct io_kiocb
*req
);
755 static void io_queue_linked_timeout(struct io_kiocb
*req
);
756 static int __io_sqe_files_update(struct io_ring_ctx
*ctx
,
757 struct io_uring_files_update
*ip
,
759 static int io_grab_files(struct io_kiocb
*req
);
760 static void io_ring_file_ref_flush(struct fixed_file_data
*data
);
761 static void io_cleanup_req(struct io_kiocb
*req
);
763 static struct kmem_cache
*req_cachep
;
765 static const struct file_operations io_uring_fops
;
767 struct sock
*io_uring_get_socket(struct file
*file
)
769 #if defined(CONFIG_UNIX)
770 if (file
->f_op
== &io_uring_fops
) {
771 struct io_ring_ctx
*ctx
= file
->private_data
;
773 return ctx
->ring_sock
->sk
;
778 EXPORT_SYMBOL(io_uring_get_socket
);
780 static void io_ring_ctx_ref_free(struct percpu_ref
*ref
)
782 struct io_ring_ctx
*ctx
= container_of(ref
, struct io_ring_ctx
, refs
);
784 complete(&ctx
->completions
[0]);
787 static struct io_ring_ctx
*io_ring_ctx_alloc(struct io_uring_params
*p
)
789 struct io_ring_ctx
*ctx
;
792 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
796 ctx
->fallback_req
= kmem_cache_alloc(req_cachep
, GFP_KERNEL
);
797 if (!ctx
->fallback_req
)
800 ctx
->completions
= kmalloc(2 * sizeof(struct completion
), GFP_KERNEL
);
801 if (!ctx
->completions
)
805 * Use 5 bits less than the max cq entries, that should give us around
806 * 32 entries per hash list if totally full and uniformly spread.
808 hash_bits
= ilog2(p
->cq_entries
);
812 ctx
->cancel_hash_bits
= hash_bits
;
813 ctx
->cancel_hash
= kmalloc((1U << hash_bits
) * sizeof(struct hlist_head
),
815 if (!ctx
->cancel_hash
)
817 __hash_init(ctx
->cancel_hash
, 1U << hash_bits
);
819 if (percpu_ref_init(&ctx
->refs
, io_ring_ctx_ref_free
,
820 PERCPU_REF_ALLOW_REINIT
, GFP_KERNEL
))
823 ctx
->flags
= p
->flags
;
824 init_waitqueue_head(&ctx
->cq_wait
);
825 INIT_LIST_HEAD(&ctx
->cq_overflow_list
);
826 init_completion(&ctx
->completions
[0]);
827 init_completion(&ctx
->completions
[1]);
828 idr_init(&ctx
->personality_idr
);
829 mutex_init(&ctx
->uring_lock
);
830 init_waitqueue_head(&ctx
->wait
);
831 spin_lock_init(&ctx
->completion_lock
);
832 init_llist_head(&ctx
->poll_llist
);
833 INIT_LIST_HEAD(&ctx
->poll_list
);
834 INIT_LIST_HEAD(&ctx
->defer_list
);
835 INIT_LIST_HEAD(&ctx
->timeout_list
);
836 init_waitqueue_head(&ctx
->inflight_wait
);
837 spin_lock_init(&ctx
->inflight_lock
);
838 INIT_LIST_HEAD(&ctx
->inflight_list
);
841 if (ctx
->fallback_req
)
842 kmem_cache_free(req_cachep
, ctx
->fallback_req
);
843 kfree(ctx
->completions
);
844 kfree(ctx
->cancel_hash
);
849 static inline bool __req_need_defer(struct io_kiocb
*req
)
851 struct io_ring_ctx
*ctx
= req
->ctx
;
853 return req
->sequence
!= ctx
->cached_cq_tail
+ ctx
->cached_sq_dropped
854 + atomic_read(&ctx
->cached_cq_overflow
);
857 static inline bool req_need_defer(struct io_kiocb
*req
)
859 if (unlikely(req
->flags
& REQ_F_IO_DRAIN
))
860 return __req_need_defer(req
);
865 static struct io_kiocb
*io_get_deferred_req(struct io_ring_ctx
*ctx
)
867 struct io_kiocb
*req
;
869 req
= list_first_entry_or_null(&ctx
->defer_list
, struct io_kiocb
, list
);
870 if (req
&& !req_need_defer(req
)) {
871 list_del_init(&req
->list
);
878 static struct io_kiocb
*io_get_timeout_req(struct io_ring_ctx
*ctx
)
880 struct io_kiocb
*req
;
882 req
= list_first_entry_or_null(&ctx
->timeout_list
, struct io_kiocb
, list
);
884 if (req
->flags
& REQ_F_TIMEOUT_NOSEQ
)
886 if (!__req_need_defer(req
)) {
887 list_del_init(&req
->list
);
895 static void __io_commit_cqring(struct io_ring_ctx
*ctx
)
897 struct io_rings
*rings
= ctx
->rings
;
899 /* order cqe stores with ring update */
900 smp_store_release(&rings
->cq
.tail
, ctx
->cached_cq_tail
);
902 if (wq_has_sleeper(&ctx
->cq_wait
)) {
903 wake_up_interruptible(&ctx
->cq_wait
);
904 kill_fasync(&ctx
->cq_fasync
, SIGIO
, POLL_IN
);
908 static inline void io_req_work_grab_env(struct io_kiocb
*req
,
909 const struct io_op_def
*def
)
911 if (!req
->work
.mm
&& def
->needs_mm
) {
913 req
->work
.mm
= current
->mm
;
915 if (!req
->work
.creds
)
916 req
->work
.creds
= get_current_cred();
917 if (!req
->work
.fs
&& def
->needs_fs
) {
918 spin_lock(¤t
->fs
->lock
);
919 if (!current
->fs
->in_exec
) {
920 req
->work
.fs
= current
->fs
;
921 req
->work
.fs
->users
++;
923 req
->work
.flags
|= IO_WQ_WORK_CANCEL
;
925 spin_unlock(¤t
->fs
->lock
);
927 if (!req
->work
.task_pid
)
928 req
->work
.task_pid
= task_pid_vnr(current
);
931 static inline void io_req_work_drop_env(struct io_kiocb
*req
)
934 mmdrop(req
->work
.mm
);
937 if (req
->work
.creds
) {
938 put_cred(req
->work
.creds
);
939 req
->work
.creds
= NULL
;
942 struct fs_struct
*fs
= req
->work
.fs
;
944 spin_lock(&req
->work
.fs
->lock
);
947 spin_unlock(&req
->work
.fs
->lock
);
953 static inline bool io_prep_async_work(struct io_kiocb
*req
,
954 struct io_kiocb
**link
)
956 const struct io_op_def
*def
= &io_op_defs
[req
->opcode
];
957 bool do_hashed
= false;
959 if (req
->flags
& REQ_F_ISREG
) {
960 if (def
->hash_reg_file
)
963 if (def
->unbound_nonreg_file
)
964 req
->work
.flags
|= IO_WQ_WORK_UNBOUND
;
967 io_req_work_grab_env(req
, def
);
969 *link
= io_prep_linked_timeout(req
);
973 static inline void io_queue_async_work(struct io_kiocb
*req
)
975 struct io_ring_ctx
*ctx
= req
->ctx
;
976 struct io_kiocb
*link
;
979 do_hashed
= io_prep_async_work(req
, &link
);
981 trace_io_uring_queue_async_work(ctx
, do_hashed
, req
, &req
->work
,
984 io_wq_enqueue(ctx
->io_wq
, &req
->work
);
986 io_wq_enqueue_hashed(ctx
->io_wq
, &req
->work
,
987 file_inode(req
->file
));
991 io_queue_linked_timeout(link
);
994 static void io_kill_timeout(struct io_kiocb
*req
)
998 ret
= hrtimer_try_to_cancel(&req
->io
->timeout
.timer
);
1000 atomic_inc(&req
->ctx
->cq_timeouts
);
1001 list_del_init(&req
->list
);
1002 req
->flags
|= REQ_F_COMP_LOCKED
;
1003 io_cqring_fill_event(req
, 0);
1008 static void io_kill_timeouts(struct io_ring_ctx
*ctx
)
1010 struct io_kiocb
*req
, *tmp
;
1012 spin_lock_irq(&ctx
->completion_lock
);
1013 list_for_each_entry_safe(req
, tmp
, &ctx
->timeout_list
, list
)
1014 io_kill_timeout(req
);
1015 spin_unlock_irq(&ctx
->completion_lock
);
1018 static void io_commit_cqring(struct io_ring_ctx
*ctx
)
1020 struct io_kiocb
*req
;
1022 while ((req
= io_get_timeout_req(ctx
)) != NULL
)
1023 io_kill_timeout(req
);
1025 __io_commit_cqring(ctx
);
1027 while ((req
= io_get_deferred_req(ctx
)) != NULL
)
1028 io_queue_async_work(req
);
1031 static struct io_uring_cqe
*io_get_cqring(struct io_ring_ctx
*ctx
)
1033 struct io_rings
*rings
= ctx
->rings
;
1036 tail
= ctx
->cached_cq_tail
;
1038 * writes to the cq entry need to come after reading head; the
1039 * control dependency is enough as we're using WRITE_ONCE to
1042 if (tail
- READ_ONCE(rings
->cq
.head
) == rings
->cq_ring_entries
)
1045 ctx
->cached_cq_tail
++;
1046 return &rings
->cqes
[tail
& ctx
->cq_mask
];
1049 static inline bool io_should_trigger_evfd(struct io_ring_ctx
*ctx
)
1053 if (!ctx
->eventfd_async
)
1055 return io_wq_current_is_worker() || in_interrupt();
1058 static void __io_cqring_ev_posted(struct io_ring_ctx
*ctx
, bool trigger_ev
)
1060 if (waitqueue_active(&ctx
->wait
))
1061 wake_up(&ctx
->wait
);
1062 if (waitqueue_active(&ctx
->sqo_wait
))
1063 wake_up(&ctx
->sqo_wait
);
1065 eventfd_signal(ctx
->cq_ev_fd
, 1);
1068 static void io_cqring_ev_posted(struct io_ring_ctx
*ctx
)
1070 __io_cqring_ev_posted(ctx
, io_should_trigger_evfd(ctx
));
1073 /* Returns true if there are no backlogged entries after the flush */
1074 static bool io_cqring_overflow_flush(struct io_ring_ctx
*ctx
, bool force
)
1076 struct io_rings
*rings
= ctx
->rings
;
1077 struct io_uring_cqe
*cqe
;
1078 struct io_kiocb
*req
;
1079 unsigned long flags
;
1083 if (list_empty_careful(&ctx
->cq_overflow_list
))
1085 if ((ctx
->cached_cq_tail
- READ_ONCE(rings
->cq
.head
) ==
1086 rings
->cq_ring_entries
))
1090 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1092 /* if force is set, the ring is going away. always drop after that */
1094 ctx
->cq_overflow_flushed
= 1;
1097 while (!list_empty(&ctx
->cq_overflow_list
)) {
1098 cqe
= io_get_cqring(ctx
);
1102 req
= list_first_entry(&ctx
->cq_overflow_list
, struct io_kiocb
,
1104 list_move(&req
->list
, &list
);
1105 req
->flags
&= ~REQ_F_OVERFLOW
;
1107 WRITE_ONCE(cqe
->user_data
, req
->user_data
);
1108 WRITE_ONCE(cqe
->res
, req
->result
);
1109 WRITE_ONCE(cqe
->flags
, 0);
1111 WRITE_ONCE(ctx
->rings
->cq_overflow
,
1112 atomic_inc_return(&ctx
->cached_cq_overflow
));
1116 io_commit_cqring(ctx
);
1118 clear_bit(0, &ctx
->sq_check_overflow
);
1119 clear_bit(0, &ctx
->cq_check_overflow
);
1121 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1122 io_cqring_ev_posted(ctx
);
1124 while (!list_empty(&list
)) {
1125 req
= list_first_entry(&list
, struct io_kiocb
, list
);
1126 list_del(&req
->list
);
1133 static void io_cqring_fill_event(struct io_kiocb
*req
, long res
)
1135 struct io_ring_ctx
*ctx
= req
->ctx
;
1136 struct io_uring_cqe
*cqe
;
1138 trace_io_uring_complete(ctx
, req
->user_data
, res
);
1141 * If we can't get a cq entry, userspace overflowed the
1142 * submission (by quite a lot). Increment the overflow count in
1145 cqe
= io_get_cqring(ctx
);
1147 WRITE_ONCE(cqe
->user_data
, req
->user_data
);
1148 WRITE_ONCE(cqe
->res
, res
);
1149 WRITE_ONCE(cqe
->flags
, 0);
1150 } else if (ctx
->cq_overflow_flushed
) {
1151 WRITE_ONCE(ctx
->rings
->cq_overflow
,
1152 atomic_inc_return(&ctx
->cached_cq_overflow
));
1154 if (list_empty(&ctx
->cq_overflow_list
)) {
1155 set_bit(0, &ctx
->sq_check_overflow
);
1156 set_bit(0, &ctx
->cq_check_overflow
);
1158 req
->flags
|= REQ_F_OVERFLOW
;
1159 refcount_inc(&req
->refs
);
1161 list_add_tail(&req
->list
, &ctx
->cq_overflow_list
);
1165 static void io_cqring_add_event(struct io_kiocb
*req
, long res
)
1167 struct io_ring_ctx
*ctx
= req
->ctx
;
1168 unsigned long flags
;
1170 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1171 io_cqring_fill_event(req
, res
);
1172 io_commit_cqring(ctx
);
1173 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1175 io_cqring_ev_posted(ctx
);
1178 static inline bool io_is_fallback_req(struct io_kiocb
*req
)
1180 return req
== (struct io_kiocb
*)
1181 ((unsigned long) req
->ctx
->fallback_req
& ~1UL);
1184 static struct io_kiocb
*io_get_fallback_req(struct io_ring_ctx
*ctx
)
1186 struct io_kiocb
*req
;
1188 req
= ctx
->fallback_req
;
1189 if (!test_and_set_bit_lock(0, (unsigned long *) ctx
->fallback_req
))
1195 static struct io_kiocb
*io_get_req(struct io_ring_ctx
*ctx
,
1196 struct io_submit_state
*state
)
1198 gfp_t gfp
= GFP_KERNEL
| __GFP_NOWARN
;
1199 struct io_kiocb
*req
;
1202 req
= kmem_cache_alloc(req_cachep
, gfp
);
1205 } else if (!state
->free_reqs
) {
1209 sz
= min_t(size_t, state
->ios_left
, ARRAY_SIZE(state
->reqs
));
1210 ret
= kmem_cache_alloc_bulk(req_cachep
, gfp
, sz
, state
->reqs
);
1213 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1214 * retry single alloc to be on the safe side.
1216 if (unlikely(ret
<= 0)) {
1217 state
->reqs
[0] = kmem_cache_alloc(req_cachep
, gfp
);
1218 if (!state
->reqs
[0])
1222 state
->free_reqs
= ret
- 1;
1223 req
= state
->reqs
[ret
- 1];
1226 req
= state
->reqs
[state
->free_reqs
];
1234 /* one is dropped after submission, the other at completion */
1235 refcount_set(&req
->refs
, 2);
1237 INIT_IO_WORK(&req
->work
, io_wq_submit_work
);
1240 req
= io_get_fallback_req(ctx
);
1243 percpu_ref_put(&ctx
->refs
);
1247 static void __io_req_do_free(struct io_kiocb
*req
)
1249 if (likely(!io_is_fallback_req(req
)))
1250 kmem_cache_free(req_cachep
, req
);
1252 clear_bit_unlock(0, (unsigned long *) req
->ctx
->fallback_req
);
1255 static void __io_req_aux_free(struct io_kiocb
*req
)
1257 struct io_ring_ctx
*ctx
= req
->ctx
;
1259 if (req
->flags
& REQ_F_NEED_CLEANUP
)
1260 io_cleanup_req(req
);
1264 if (req
->flags
& REQ_F_FIXED_FILE
)
1265 percpu_ref_put(&ctx
->file_data
->refs
);
1270 io_req_work_drop_env(req
);
1273 static void __io_free_req(struct io_kiocb
*req
)
1275 __io_req_aux_free(req
);
1277 if (req
->flags
& REQ_F_INFLIGHT
) {
1278 struct io_ring_ctx
*ctx
= req
->ctx
;
1279 unsigned long flags
;
1281 spin_lock_irqsave(&ctx
->inflight_lock
, flags
);
1282 list_del(&req
->inflight_entry
);
1283 if (waitqueue_active(&ctx
->inflight_wait
))
1284 wake_up(&ctx
->inflight_wait
);
1285 spin_unlock_irqrestore(&ctx
->inflight_lock
, flags
);
1288 percpu_ref_put(&req
->ctx
->refs
);
1289 __io_req_do_free(req
);
1293 void *reqs
[IO_IOPOLL_BATCH
];
1298 static void io_free_req_many(struct io_ring_ctx
*ctx
, struct req_batch
*rb
)
1300 int fixed_refs
= rb
->to_free
;
1304 if (rb
->need_iter
) {
1305 int i
, inflight
= 0;
1306 unsigned long flags
;
1309 for (i
= 0; i
< rb
->to_free
; i
++) {
1310 struct io_kiocb
*req
= rb
->reqs
[i
];
1312 if (req
->flags
& REQ_F_FIXED_FILE
) {
1316 if (req
->flags
& REQ_F_INFLIGHT
)
1318 __io_req_aux_free(req
);
1323 spin_lock_irqsave(&ctx
->inflight_lock
, flags
);
1324 for (i
= 0; i
< rb
->to_free
; i
++) {
1325 struct io_kiocb
*req
= rb
->reqs
[i
];
1327 if (req
->flags
& REQ_F_INFLIGHT
) {
1328 list_del(&req
->inflight_entry
);
1333 spin_unlock_irqrestore(&ctx
->inflight_lock
, flags
);
1335 if (waitqueue_active(&ctx
->inflight_wait
))
1336 wake_up(&ctx
->inflight_wait
);
1339 kmem_cache_free_bulk(req_cachep
, rb
->to_free
, rb
->reqs
);
1341 percpu_ref_put_many(&ctx
->file_data
->refs
, fixed_refs
);
1342 percpu_ref_put_many(&ctx
->refs
, rb
->to_free
);
1343 rb
->to_free
= rb
->need_iter
= 0;
1346 static bool io_link_cancel_timeout(struct io_kiocb
*req
)
1348 struct io_ring_ctx
*ctx
= req
->ctx
;
1351 ret
= hrtimer_try_to_cancel(&req
->io
->timeout
.timer
);
1353 io_cqring_fill_event(req
, -ECANCELED
);
1354 io_commit_cqring(ctx
);
1355 req
->flags
&= ~REQ_F_LINK
;
1363 static void io_req_link_next(struct io_kiocb
*req
, struct io_kiocb
**nxtptr
)
1365 struct io_ring_ctx
*ctx
= req
->ctx
;
1366 bool wake_ev
= false;
1368 /* Already got next link */
1369 if (req
->flags
& REQ_F_LINK_NEXT
)
1373 * The list should never be empty when we are called here. But could
1374 * potentially happen if the chain is messed up, check to be on the
1377 while (!list_empty(&req
->link_list
)) {
1378 struct io_kiocb
*nxt
= list_first_entry(&req
->link_list
,
1379 struct io_kiocb
, link_list
);
1381 if (unlikely((req
->flags
& REQ_F_LINK_TIMEOUT
) &&
1382 (nxt
->flags
& REQ_F_TIMEOUT
))) {
1383 list_del_init(&nxt
->link_list
);
1384 wake_ev
|= io_link_cancel_timeout(nxt
);
1385 req
->flags
&= ~REQ_F_LINK_TIMEOUT
;
1389 list_del_init(&req
->link_list
);
1390 if (!list_empty(&nxt
->link_list
))
1391 nxt
->flags
|= REQ_F_LINK
;
1396 req
->flags
|= REQ_F_LINK_NEXT
;
1398 io_cqring_ev_posted(ctx
);
1402 * Called if REQ_F_LINK is set, and we fail the head request
1404 static void io_fail_links(struct io_kiocb
*req
)
1406 struct io_ring_ctx
*ctx
= req
->ctx
;
1407 unsigned long flags
;
1409 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1411 while (!list_empty(&req
->link_list
)) {
1412 struct io_kiocb
*link
= list_first_entry(&req
->link_list
,
1413 struct io_kiocb
, link_list
);
1415 list_del_init(&link
->link_list
);
1416 trace_io_uring_fail_link(req
, link
);
1418 if ((req
->flags
& REQ_F_LINK_TIMEOUT
) &&
1419 link
->opcode
== IORING_OP_LINK_TIMEOUT
) {
1420 io_link_cancel_timeout(link
);
1422 io_cqring_fill_event(link
, -ECANCELED
);
1423 __io_double_put_req(link
);
1425 req
->flags
&= ~REQ_F_LINK_TIMEOUT
;
1428 io_commit_cqring(ctx
);
1429 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1430 io_cqring_ev_posted(ctx
);
1433 static void io_req_find_next(struct io_kiocb
*req
, struct io_kiocb
**nxt
)
1435 if (likely(!(req
->flags
& REQ_F_LINK
)))
1439 * If LINK is set, we have dependent requests in this chain. If we
1440 * didn't fail this request, queue the first one up, moving any other
1441 * dependencies to the next request. In case of failure, fail the rest
1444 if (req
->flags
& REQ_F_FAIL_LINK
) {
1446 } else if ((req
->flags
& (REQ_F_LINK_TIMEOUT
| REQ_F_COMP_LOCKED
)) ==
1447 REQ_F_LINK_TIMEOUT
) {
1448 struct io_ring_ctx
*ctx
= req
->ctx
;
1449 unsigned long flags
;
1452 * If this is a timeout link, we could be racing with the
1453 * timeout timer. Grab the completion lock for this case to
1454 * protect against that.
1456 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1457 io_req_link_next(req
, nxt
);
1458 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1460 io_req_link_next(req
, nxt
);
1464 static void io_free_req(struct io_kiocb
*req
)
1466 struct io_kiocb
*nxt
= NULL
;
1468 io_req_find_next(req
, &nxt
);
1472 io_queue_async_work(nxt
);
1476 * Drop reference to request, return next in chain (if there is one) if this
1477 * was the last reference to this request.
1479 __attribute__((nonnull
))
1480 static void io_put_req_find_next(struct io_kiocb
*req
, struct io_kiocb
**nxtptr
)
1482 if (refcount_dec_and_test(&req
->refs
)) {
1483 io_req_find_next(req
, nxtptr
);
1488 static void io_put_req(struct io_kiocb
*req
)
1490 if (refcount_dec_and_test(&req
->refs
))
1495 * Must only be used if we don't need to care about links, usually from
1496 * within the completion handling itself.
1498 static void __io_double_put_req(struct io_kiocb
*req
)
1500 /* drop both submit and complete references */
1501 if (refcount_sub_and_test(2, &req
->refs
))
1505 static void io_double_put_req(struct io_kiocb
*req
)
1507 /* drop both submit and complete references */
1508 if (refcount_sub_and_test(2, &req
->refs
))
1512 static unsigned io_cqring_events(struct io_ring_ctx
*ctx
, bool noflush
)
1514 struct io_rings
*rings
= ctx
->rings
;
1516 if (test_bit(0, &ctx
->cq_check_overflow
)) {
1518 * noflush == true is from the waitqueue handler, just ensure
1519 * we wake up the task, and the next invocation will flush the
1520 * entries. We cannot safely to it from here.
1522 if (noflush
&& !list_empty(&ctx
->cq_overflow_list
))
1525 io_cqring_overflow_flush(ctx
, false);
1528 /* See comment at the top of this file */
1530 return ctx
->cached_cq_tail
- READ_ONCE(rings
->cq
.head
);
1533 static inline unsigned int io_sqring_entries(struct io_ring_ctx
*ctx
)
1535 struct io_rings
*rings
= ctx
->rings
;
1537 /* make sure SQ entry isn't read before tail */
1538 return smp_load_acquire(&rings
->sq
.tail
) - ctx
->cached_sq_head
;
1541 static inline bool io_req_multi_free(struct req_batch
*rb
, struct io_kiocb
*req
)
1543 if ((req
->flags
& REQ_F_LINK
) || io_is_fallback_req(req
))
1546 if (!(req
->flags
& REQ_F_FIXED_FILE
) || req
->io
)
1549 rb
->reqs
[rb
->to_free
++] = req
;
1550 if (unlikely(rb
->to_free
== ARRAY_SIZE(rb
->reqs
)))
1551 io_free_req_many(req
->ctx
, rb
);
1556 * Find and free completed poll iocbs
1558 static void io_iopoll_complete(struct io_ring_ctx
*ctx
, unsigned int *nr_events
,
1559 struct list_head
*done
)
1561 struct req_batch rb
;
1562 struct io_kiocb
*req
;
1564 rb
.to_free
= rb
.need_iter
= 0;
1565 while (!list_empty(done
)) {
1566 req
= list_first_entry(done
, struct io_kiocb
, list
);
1567 list_del(&req
->list
);
1569 io_cqring_fill_event(req
, req
->result
);
1572 if (refcount_dec_and_test(&req
->refs
) &&
1573 !io_req_multi_free(&rb
, req
))
1577 io_commit_cqring(ctx
);
1578 io_free_req_many(ctx
, &rb
);
1581 static int io_do_iopoll(struct io_ring_ctx
*ctx
, unsigned int *nr_events
,
1584 struct io_kiocb
*req
, *tmp
;
1590 * Only spin for completions if we don't have multiple devices hanging
1591 * off our complete list, and we're under the requested amount.
1593 spin
= !ctx
->poll_multi_file
&& *nr_events
< min
;
1596 list_for_each_entry_safe(req
, tmp
, &ctx
->poll_list
, list
) {
1597 struct kiocb
*kiocb
= &req
->rw
.kiocb
;
1600 * Move completed entries to our local list. If we find a
1601 * request that requires polling, break out and complete
1602 * the done list first, if we have entries there.
1604 if (req
->flags
& REQ_F_IOPOLL_COMPLETED
) {
1605 list_move_tail(&req
->list
, &done
);
1608 if (!list_empty(&done
))
1611 ret
= kiocb
->ki_filp
->f_op
->iopoll(kiocb
, spin
);
1620 if (!list_empty(&done
))
1621 io_iopoll_complete(ctx
, nr_events
, &done
);
1627 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
1628 * non-spinning poll check - we'll still enter the driver poll loop, but only
1629 * as a non-spinning completion check.
1631 static int io_iopoll_getevents(struct io_ring_ctx
*ctx
, unsigned int *nr_events
,
1634 while (!list_empty(&ctx
->poll_list
) && !need_resched()) {
1637 ret
= io_do_iopoll(ctx
, nr_events
, min
);
1640 if (!min
|| *nr_events
>= min
)
1648 * We can't just wait for polled events to come to us, we have to actively
1649 * find and complete them.
1651 static void io_iopoll_reap_events(struct io_ring_ctx
*ctx
)
1653 if (!(ctx
->flags
& IORING_SETUP_IOPOLL
))
1656 mutex_lock(&ctx
->uring_lock
);
1657 while (!list_empty(&ctx
->poll_list
)) {
1658 unsigned int nr_events
= 0;
1660 io_iopoll_getevents(ctx
, &nr_events
, 1);
1663 * Ensure we allow local-to-the-cpu processing to take place,
1664 * in this case we need to ensure that we reap all events.
1668 mutex_unlock(&ctx
->uring_lock
);
1671 static int io_iopoll_check(struct io_ring_ctx
*ctx
, unsigned *nr_events
,
1674 int iters
= 0, ret
= 0;
1677 * We disallow the app entering submit/complete with polling, but we
1678 * still need to lock the ring to prevent racing with polled issue
1679 * that got punted to a workqueue.
1681 mutex_lock(&ctx
->uring_lock
);
1686 * Don't enter poll loop if we already have events pending.
1687 * If we do, we can potentially be spinning for commands that
1688 * already triggered a CQE (eg in error).
1690 if (io_cqring_events(ctx
, false))
1694 * If a submit got punted to a workqueue, we can have the
1695 * application entering polling for a command before it gets
1696 * issued. That app will hold the uring_lock for the duration
1697 * of the poll right here, so we need to take a breather every
1698 * now and then to ensure that the issue has a chance to add
1699 * the poll to the issued list. Otherwise we can spin here
1700 * forever, while the workqueue is stuck trying to acquire the
1703 if (!(++iters
& 7)) {
1704 mutex_unlock(&ctx
->uring_lock
);
1705 mutex_lock(&ctx
->uring_lock
);
1708 if (*nr_events
< min
)
1709 tmin
= min
- *nr_events
;
1711 ret
= io_iopoll_getevents(ctx
, nr_events
, tmin
);
1715 } while (min
&& !*nr_events
&& !need_resched());
1717 mutex_unlock(&ctx
->uring_lock
);
1721 static void kiocb_end_write(struct io_kiocb
*req
)
1724 * Tell lockdep we inherited freeze protection from submission
1727 if (req
->flags
& REQ_F_ISREG
) {
1728 struct inode
*inode
= file_inode(req
->file
);
1730 __sb_writers_acquired(inode
->i_sb
, SB_FREEZE_WRITE
);
1732 file_end_write(req
->file
);
1735 static inline void req_set_fail_links(struct io_kiocb
*req
)
1737 if ((req
->flags
& (REQ_F_LINK
| REQ_F_HARDLINK
)) == REQ_F_LINK
)
1738 req
->flags
|= REQ_F_FAIL_LINK
;
1741 static void io_complete_rw_common(struct kiocb
*kiocb
, long res
)
1743 struct io_kiocb
*req
= container_of(kiocb
, struct io_kiocb
, rw
.kiocb
);
1745 if (kiocb
->ki_flags
& IOCB_WRITE
)
1746 kiocb_end_write(req
);
1748 if (res
!= req
->result
)
1749 req_set_fail_links(req
);
1750 io_cqring_add_event(req
, res
);
1753 static void io_complete_rw(struct kiocb
*kiocb
, long res
, long res2
)
1755 struct io_kiocb
*req
= container_of(kiocb
, struct io_kiocb
, rw
.kiocb
);
1757 io_complete_rw_common(kiocb
, res
);
1761 static struct io_kiocb
*__io_complete_rw(struct kiocb
*kiocb
, long res
)
1763 struct io_kiocb
*req
= container_of(kiocb
, struct io_kiocb
, rw
.kiocb
);
1764 struct io_kiocb
*nxt
= NULL
;
1766 io_complete_rw_common(kiocb
, res
);
1767 io_put_req_find_next(req
, &nxt
);
1772 static void io_complete_rw_iopoll(struct kiocb
*kiocb
, long res
, long res2
)
1774 struct io_kiocb
*req
= container_of(kiocb
, struct io_kiocb
, rw
.kiocb
);
1776 if (kiocb
->ki_flags
& IOCB_WRITE
)
1777 kiocb_end_write(req
);
1779 if (res
!= req
->result
)
1780 req_set_fail_links(req
);
1783 req
->flags
|= REQ_F_IOPOLL_COMPLETED
;
1787 * After the iocb has been issued, it's safe to be found on the poll list.
1788 * Adding the kiocb to the list AFTER submission ensures that we don't
1789 * find it from a io_iopoll_getevents() thread before the issuer is done
1790 * accessing the kiocb cookie.
1792 static void io_iopoll_req_issued(struct io_kiocb
*req
)
1794 struct io_ring_ctx
*ctx
= req
->ctx
;
1797 * Track whether we have multiple files in our lists. This will impact
1798 * how we do polling eventually, not spinning if we're on potentially
1799 * different devices.
1801 if (list_empty(&ctx
->poll_list
)) {
1802 ctx
->poll_multi_file
= false;
1803 } else if (!ctx
->poll_multi_file
) {
1804 struct io_kiocb
*list_req
;
1806 list_req
= list_first_entry(&ctx
->poll_list
, struct io_kiocb
,
1808 if (list_req
->file
!= req
->file
)
1809 ctx
->poll_multi_file
= true;
1813 * For fast devices, IO may have already completed. If it has, add
1814 * it to the front so we find it first.
1816 if (req
->flags
& REQ_F_IOPOLL_COMPLETED
)
1817 list_add(&req
->list
, &ctx
->poll_list
);
1819 list_add_tail(&req
->list
, &ctx
->poll_list
);
1821 if ((ctx
->flags
& IORING_SETUP_SQPOLL
) &&
1822 wq_has_sleeper(&ctx
->sqo_wait
))
1823 wake_up(&ctx
->sqo_wait
);
1826 static void io_file_put(struct io_submit_state
*state
)
1829 int diff
= state
->has_refs
- state
->used_refs
;
1832 fput_many(state
->file
, diff
);
1838 * Get as many references to a file as we have IOs left in this submission,
1839 * assuming most submissions are for one file, or at least that each file
1840 * has more than one submission.
1842 static struct file
*io_file_get(struct io_submit_state
*state
, int fd
)
1848 if (state
->fd
== fd
) {
1855 state
->file
= fget_many(fd
, state
->ios_left
);
1860 state
->has_refs
= state
->ios_left
;
1861 state
->used_refs
= 1;
1867 * If we tracked the file through the SCM inflight mechanism, we could support
1868 * any file. For now, just ensure that anything potentially problematic is done
1871 static bool io_file_supports_async(struct file
*file
)
1873 umode_t mode
= file_inode(file
)->i_mode
;
1875 if (S_ISBLK(mode
) || S_ISCHR(mode
) || S_ISSOCK(mode
))
1877 if (S_ISREG(mode
) && file
->f_op
!= &io_uring_fops
)
1883 static int io_prep_rw(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
1884 bool force_nonblock
)
1886 struct io_ring_ctx
*ctx
= req
->ctx
;
1887 struct kiocb
*kiocb
= &req
->rw
.kiocb
;
1891 if (S_ISREG(file_inode(req
->file
)->i_mode
))
1892 req
->flags
|= REQ_F_ISREG
;
1894 kiocb
->ki_pos
= READ_ONCE(sqe
->off
);
1895 if (kiocb
->ki_pos
== -1 && !(req
->file
->f_mode
& FMODE_STREAM
)) {
1896 req
->flags
|= REQ_F_CUR_POS
;
1897 kiocb
->ki_pos
= req
->file
->f_pos
;
1899 kiocb
->ki_hint
= ki_hint_validate(file_write_hint(kiocb
->ki_filp
));
1900 kiocb
->ki_flags
= iocb_flags(kiocb
->ki_filp
);
1901 ret
= kiocb_set_rw_flags(kiocb
, READ_ONCE(sqe
->rw_flags
));
1905 ioprio
= READ_ONCE(sqe
->ioprio
);
1907 ret
= ioprio_check_cap(ioprio
);
1911 kiocb
->ki_ioprio
= ioprio
;
1913 kiocb
->ki_ioprio
= get_current_ioprio();
1915 /* don't allow async punt if RWF_NOWAIT was requested */
1916 if ((kiocb
->ki_flags
& IOCB_NOWAIT
) ||
1917 (req
->file
->f_flags
& O_NONBLOCK
))
1918 req
->flags
|= REQ_F_NOWAIT
;
1921 kiocb
->ki_flags
|= IOCB_NOWAIT
;
1923 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
1924 if (!(kiocb
->ki_flags
& IOCB_DIRECT
) ||
1925 !kiocb
->ki_filp
->f_op
->iopoll
)
1928 kiocb
->ki_flags
|= IOCB_HIPRI
;
1929 kiocb
->ki_complete
= io_complete_rw_iopoll
;
1932 if (kiocb
->ki_flags
& IOCB_HIPRI
)
1934 kiocb
->ki_complete
= io_complete_rw
;
1937 req
->rw
.addr
= READ_ONCE(sqe
->addr
);
1938 req
->rw
.len
= READ_ONCE(sqe
->len
);
1939 /* we own ->private, reuse it for the buffer index */
1940 req
->rw
.kiocb
.private = (void *) (unsigned long)
1941 READ_ONCE(sqe
->buf_index
);
1945 static inline void io_rw_done(struct kiocb
*kiocb
, ssize_t ret
)
1951 case -ERESTARTNOINTR
:
1952 case -ERESTARTNOHAND
:
1953 case -ERESTART_RESTARTBLOCK
:
1955 * We can't just restart the syscall, since previously
1956 * submitted sqes may already be in progress. Just fail this
1962 kiocb
->ki_complete(kiocb
, ret
, 0);
1966 static void kiocb_done(struct kiocb
*kiocb
, ssize_t ret
, struct io_kiocb
**nxt
,
1969 struct io_kiocb
*req
= container_of(kiocb
, struct io_kiocb
, rw
.kiocb
);
1971 if (req
->flags
& REQ_F_CUR_POS
)
1972 req
->file
->f_pos
= kiocb
->ki_pos
;
1973 if (in_async
&& ret
>= 0 && kiocb
->ki_complete
== io_complete_rw
)
1974 *nxt
= __io_complete_rw(kiocb
, ret
);
1976 io_rw_done(kiocb
, ret
);
1979 static ssize_t
io_import_fixed(struct io_kiocb
*req
, int rw
,
1980 struct iov_iter
*iter
)
1982 struct io_ring_ctx
*ctx
= req
->ctx
;
1983 size_t len
= req
->rw
.len
;
1984 struct io_mapped_ubuf
*imu
;
1985 unsigned index
, buf_index
;
1989 /* attempt to use fixed buffers without having provided iovecs */
1990 if (unlikely(!ctx
->user_bufs
))
1993 buf_index
= (unsigned long) req
->rw
.kiocb
.private;
1994 if (unlikely(buf_index
>= ctx
->nr_user_bufs
))
1997 index
= array_index_nospec(buf_index
, ctx
->nr_user_bufs
);
1998 imu
= &ctx
->user_bufs
[index
];
1999 buf_addr
= req
->rw
.addr
;
2002 if (buf_addr
+ len
< buf_addr
)
2004 /* not inside the mapped region */
2005 if (buf_addr
< imu
->ubuf
|| buf_addr
+ len
> imu
->ubuf
+ imu
->len
)
2009 * May not be a start of buffer, set size appropriately
2010 * and advance us to the beginning.
2012 offset
= buf_addr
- imu
->ubuf
;
2013 iov_iter_bvec(iter
, rw
, imu
->bvec
, imu
->nr_bvecs
, offset
+ len
);
2017 * Don't use iov_iter_advance() here, as it's really slow for
2018 * using the latter parts of a big fixed buffer - it iterates
2019 * over each segment manually. We can cheat a bit here, because
2022 * 1) it's a BVEC iter, we set it up
2023 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2024 * first and last bvec
2026 * So just find our index, and adjust the iterator afterwards.
2027 * If the offset is within the first bvec (or the whole first
2028 * bvec, just use iov_iter_advance(). This makes it easier
2029 * since we can just skip the first segment, which may not
2030 * be PAGE_SIZE aligned.
2032 const struct bio_vec
*bvec
= imu
->bvec
;
2034 if (offset
<= bvec
->bv_len
) {
2035 iov_iter_advance(iter
, offset
);
2037 unsigned long seg_skip
;
2039 /* skip first vec */
2040 offset
-= bvec
->bv_len
;
2041 seg_skip
= 1 + (offset
>> PAGE_SHIFT
);
2043 iter
->bvec
= bvec
+ seg_skip
;
2044 iter
->nr_segs
-= seg_skip
;
2045 iter
->count
-= bvec
->bv_len
+ offset
;
2046 iter
->iov_offset
= offset
& ~PAGE_MASK
;
2053 static ssize_t
io_import_iovec(int rw
, struct io_kiocb
*req
,
2054 struct iovec
**iovec
, struct iov_iter
*iter
)
2056 void __user
*buf
= u64_to_user_ptr(req
->rw
.addr
);
2057 size_t sqe_len
= req
->rw
.len
;
2060 opcode
= req
->opcode
;
2061 if (opcode
== IORING_OP_READ_FIXED
|| opcode
== IORING_OP_WRITE_FIXED
) {
2063 return io_import_fixed(req
, rw
, iter
);
2066 /* buffer index only valid with fixed read/write */
2067 if (req
->rw
.kiocb
.private)
2070 if (opcode
== IORING_OP_READ
|| opcode
== IORING_OP_WRITE
) {
2072 ret
= import_single_range(rw
, buf
, sqe_len
, *iovec
, iter
);
2074 return ret
< 0 ? ret
: sqe_len
;
2078 struct io_async_rw
*iorw
= &req
->io
->rw
;
2081 iov_iter_init(iter
, rw
, *iovec
, iorw
->nr_segs
, iorw
->size
);
2082 if (iorw
->iov
== iorw
->fast_iov
)
2087 #ifdef CONFIG_COMPAT
2088 if (req
->ctx
->compat
)
2089 return compat_import_iovec(rw
, buf
, sqe_len
, UIO_FASTIOV
,
2093 return import_iovec(rw
, buf
, sqe_len
, UIO_FASTIOV
, iovec
, iter
);
2097 * For files that don't have ->read_iter() and ->write_iter(), handle them
2098 * by looping over ->read() or ->write() manually.
2100 static ssize_t
loop_rw_iter(int rw
, struct file
*file
, struct kiocb
*kiocb
,
2101 struct iov_iter
*iter
)
2106 * Don't support polled IO through this interface, and we can't
2107 * support non-blocking either. For the latter, this just causes
2108 * the kiocb to be handled from an async context.
2110 if (kiocb
->ki_flags
& IOCB_HIPRI
)
2112 if (kiocb
->ki_flags
& IOCB_NOWAIT
)
2115 while (iov_iter_count(iter
)) {
2119 if (!iov_iter_is_bvec(iter
)) {
2120 iovec
= iov_iter_iovec(iter
);
2122 /* fixed buffers import bvec */
2123 iovec
.iov_base
= kmap(iter
->bvec
->bv_page
)
2125 iovec
.iov_len
= min(iter
->count
,
2126 iter
->bvec
->bv_len
- iter
->iov_offset
);
2130 nr
= file
->f_op
->read(file
, iovec
.iov_base
,
2131 iovec
.iov_len
, &kiocb
->ki_pos
);
2133 nr
= file
->f_op
->write(file
, iovec
.iov_base
,
2134 iovec
.iov_len
, &kiocb
->ki_pos
);
2137 if (iov_iter_is_bvec(iter
))
2138 kunmap(iter
->bvec
->bv_page
);
2146 if (nr
!= iovec
.iov_len
)
2148 iov_iter_advance(iter
, nr
);
2154 static void io_req_map_rw(struct io_kiocb
*req
, ssize_t io_size
,
2155 struct iovec
*iovec
, struct iovec
*fast_iov
,
2156 struct iov_iter
*iter
)
2158 req
->io
->rw
.nr_segs
= iter
->nr_segs
;
2159 req
->io
->rw
.size
= io_size
;
2160 req
->io
->rw
.iov
= iovec
;
2161 if (!req
->io
->rw
.iov
) {
2162 req
->io
->rw
.iov
= req
->io
->rw
.fast_iov
;
2163 memcpy(req
->io
->rw
.iov
, fast_iov
,
2164 sizeof(struct iovec
) * iter
->nr_segs
);
2166 req
->flags
|= REQ_F_NEED_CLEANUP
;
2170 static int io_alloc_async_ctx(struct io_kiocb
*req
)
2172 if (!io_op_defs
[req
->opcode
].async_ctx
)
2174 req
->io
= kmalloc(sizeof(*req
->io
), GFP_KERNEL
);
2175 return req
->io
== NULL
;
2178 static int io_setup_async_rw(struct io_kiocb
*req
, ssize_t io_size
,
2179 struct iovec
*iovec
, struct iovec
*fast_iov
,
2180 struct iov_iter
*iter
)
2182 if (!io_op_defs
[req
->opcode
].async_ctx
)
2185 if (io_alloc_async_ctx(req
))
2188 io_req_map_rw(req
, io_size
, iovec
, fast_iov
, iter
);
2193 static int io_read_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
2194 bool force_nonblock
)
2196 struct io_async_ctx
*io
;
2197 struct iov_iter iter
;
2200 ret
= io_prep_rw(req
, sqe
, force_nonblock
);
2204 if (unlikely(!(req
->file
->f_mode
& FMODE_READ
)))
2207 /* either don't need iovec imported or already have it */
2208 if (!req
->io
|| req
->flags
& REQ_F_NEED_CLEANUP
)
2212 io
->rw
.iov
= io
->rw
.fast_iov
;
2214 ret
= io_import_iovec(READ
, req
, &io
->rw
.iov
, &iter
);
2219 io_req_map_rw(req
, ret
, io
->rw
.iov
, io
->rw
.fast_iov
, &iter
);
2223 static int io_read(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2224 bool force_nonblock
)
2226 struct iovec inline_vecs
[UIO_FASTIOV
], *iovec
= inline_vecs
;
2227 struct kiocb
*kiocb
= &req
->rw
.kiocb
;
2228 struct iov_iter iter
;
2230 ssize_t io_size
, ret
;
2232 ret
= io_import_iovec(READ
, req
, &iovec
, &iter
);
2236 /* Ensure we clear previously set non-block flag */
2237 if (!force_nonblock
)
2238 req
->rw
.kiocb
.ki_flags
&= ~IOCB_NOWAIT
;
2242 if (req
->flags
& REQ_F_LINK
)
2243 req
->result
= io_size
;
2246 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2247 * we know to async punt it even if it was opened O_NONBLOCK
2249 if (force_nonblock
&& !io_file_supports_async(req
->file
)) {
2250 req
->flags
|= REQ_F_MUST_PUNT
;
2254 iov_count
= iov_iter_count(&iter
);
2255 ret
= rw_verify_area(READ
, req
->file
, &kiocb
->ki_pos
, iov_count
);
2259 if (req
->file
->f_op
->read_iter
)
2260 ret2
= call_read_iter(req
->file
, kiocb
, &iter
);
2262 ret2
= loop_rw_iter(READ
, req
->file
, kiocb
, &iter
);
2264 /* Catch -EAGAIN return for forced non-blocking submission */
2265 if (!force_nonblock
|| ret2
!= -EAGAIN
) {
2266 kiocb_done(kiocb
, ret2
, nxt
, req
->in_async
);
2269 ret
= io_setup_async_rw(req
, io_size
, iovec
,
2270 inline_vecs
, &iter
);
2278 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
2282 static int io_write_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
2283 bool force_nonblock
)
2285 struct io_async_ctx
*io
;
2286 struct iov_iter iter
;
2289 ret
= io_prep_rw(req
, sqe
, force_nonblock
);
2293 if (unlikely(!(req
->file
->f_mode
& FMODE_WRITE
)))
2296 /* either don't need iovec imported or already have it */
2297 if (!req
->io
|| req
->flags
& REQ_F_NEED_CLEANUP
)
2301 io
->rw
.iov
= io
->rw
.fast_iov
;
2303 ret
= io_import_iovec(WRITE
, req
, &io
->rw
.iov
, &iter
);
2308 io_req_map_rw(req
, ret
, io
->rw
.iov
, io
->rw
.fast_iov
, &iter
);
2312 static int io_write(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2313 bool force_nonblock
)
2315 struct iovec inline_vecs
[UIO_FASTIOV
], *iovec
= inline_vecs
;
2316 struct kiocb
*kiocb
= &req
->rw
.kiocb
;
2317 struct iov_iter iter
;
2319 ssize_t ret
, io_size
;
2321 ret
= io_import_iovec(WRITE
, req
, &iovec
, &iter
);
2325 /* Ensure we clear previously set non-block flag */
2326 if (!force_nonblock
)
2327 req
->rw
.kiocb
.ki_flags
&= ~IOCB_NOWAIT
;
2331 if (req
->flags
& REQ_F_LINK
)
2332 req
->result
= io_size
;
2335 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2336 * we know to async punt it even if it was opened O_NONBLOCK
2338 if (force_nonblock
&& !io_file_supports_async(req
->file
)) {
2339 req
->flags
|= REQ_F_MUST_PUNT
;
2343 /* file path doesn't support NOWAIT for non-direct_IO */
2344 if (force_nonblock
&& !(kiocb
->ki_flags
& IOCB_DIRECT
) &&
2345 (req
->flags
& REQ_F_ISREG
))
2348 iov_count
= iov_iter_count(&iter
);
2349 ret
= rw_verify_area(WRITE
, req
->file
, &kiocb
->ki_pos
, iov_count
);
2354 * Open-code file_start_write here to grab freeze protection,
2355 * which will be released by another thread in
2356 * io_complete_rw(). Fool lockdep by telling it the lock got
2357 * released so that it doesn't complain about the held lock when
2358 * we return to userspace.
2360 if (req
->flags
& REQ_F_ISREG
) {
2361 __sb_start_write(file_inode(req
->file
)->i_sb
,
2362 SB_FREEZE_WRITE
, true);
2363 __sb_writers_release(file_inode(req
->file
)->i_sb
,
2366 kiocb
->ki_flags
|= IOCB_WRITE
;
2368 if (req
->file
->f_op
->write_iter
)
2369 ret2
= call_write_iter(req
->file
, kiocb
, &iter
);
2371 ret2
= loop_rw_iter(WRITE
, req
->file
, kiocb
, &iter
);
2373 * Raw bdev writes will -EOPNOTSUPP for IOCB_NOWAIT. Just
2374 * retry them without IOCB_NOWAIT.
2376 if (ret2
== -EOPNOTSUPP
&& (kiocb
->ki_flags
& IOCB_NOWAIT
))
2378 if (!force_nonblock
|| ret2
!= -EAGAIN
) {
2379 kiocb_done(kiocb
, ret2
, nxt
, req
->in_async
);
2382 ret
= io_setup_async_rw(req
, io_size
, iovec
,
2383 inline_vecs
, &iter
);
2390 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
2396 * IORING_OP_NOP just posts a completion event, nothing else.
2398 static int io_nop(struct io_kiocb
*req
)
2400 struct io_ring_ctx
*ctx
= req
->ctx
;
2402 if (unlikely(ctx
->flags
& IORING_SETUP_IOPOLL
))
2405 io_cqring_add_event(req
, 0);
2410 static int io_prep_fsync(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2412 struct io_ring_ctx
*ctx
= req
->ctx
;
2417 if (unlikely(ctx
->flags
& IORING_SETUP_IOPOLL
))
2419 if (unlikely(sqe
->addr
|| sqe
->ioprio
|| sqe
->buf_index
))
2422 req
->sync
.flags
= READ_ONCE(sqe
->fsync_flags
);
2423 if (unlikely(req
->sync
.flags
& ~IORING_FSYNC_DATASYNC
))
2426 req
->sync
.off
= READ_ONCE(sqe
->off
);
2427 req
->sync
.len
= READ_ONCE(sqe
->len
);
2431 static bool io_req_cancelled(struct io_kiocb
*req
)
2433 if (req
->work
.flags
& IO_WQ_WORK_CANCEL
) {
2434 req_set_fail_links(req
);
2435 io_cqring_add_event(req
, -ECANCELED
);
2443 static void io_link_work_cb(struct io_wq_work
**workptr
)
2445 struct io_wq_work
*work
= *workptr
;
2446 struct io_kiocb
*link
= work
->data
;
2448 io_queue_linked_timeout(link
);
2449 work
->func
= io_wq_submit_work
;
2452 static void io_wq_assign_next(struct io_wq_work
**workptr
, struct io_kiocb
*nxt
)
2454 struct io_kiocb
*link
;
2456 io_prep_async_work(nxt
, &link
);
2457 *workptr
= &nxt
->work
;
2459 nxt
->work
.flags
|= IO_WQ_WORK_CB
;
2460 nxt
->work
.func
= io_link_work_cb
;
2461 nxt
->work
.data
= link
;
2465 static void io_fsync_finish(struct io_wq_work
**workptr
)
2467 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
2468 loff_t end
= req
->sync
.off
+ req
->sync
.len
;
2469 struct io_kiocb
*nxt
= NULL
;
2472 if (io_req_cancelled(req
))
2475 ret
= vfs_fsync_range(req
->file
, req
->sync
.off
,
2476 end
> 0 ? end
: LLONG_MAX
,
2477 req
->sync
.flags
& IORING_FSYNC_DATASYNC
);
2479 req_set_fail_links(req
);
2480 io_cqring_add_event(req
, ret
);
2481 io_put_req_find_next(req
, &nxt
);
2483 io_wq_assign_next(workptr
, nxt
);
2486 static int io_fsync(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2487 bool force_nonblock
)
2489 struct io_wq_work
*work
, *old_work
;
2491 /* fsync always requires a blocking context */
2492 if (force_nonblock
) {
2494 req
->work
.func
= io_fsync_finish
;
2498 work
= old_work
= &req
->work
;
2499 io_fsync_finish(&work
);
2500 if (work
&& work
!= old_work
)
2501 *nxt
= container_of(work
, struct io_kiocb
, work
);
2505 static void io_fallocate_finish(struct io_wq_work
**workptr
)
2507 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
2508 struct io_kiocb
*nxt
= NULL
;
2511 if (io_req_cancelled(req
))
2514 ret
= vfs_fallocate(req
->file
, req
->sync
.mode
, req
->sync
.off
,
2517 req_set_fail_links(req
);
2518 io_cqring_add_event(req
, ret
);
2519 io_put_req_find_next(req
, &nxt
);
2521 io_wq_assign_next(workptr
, nxt
);
2524 static int io_fallocate_prep(struct io_kiocb
*req
,
2525 const struct io_uring_sqe
*sqe
)
2527 if (sqe
->ioprio
|| sqe
->buf_index
|| sqe
->rw_flags
)
2530 req
->sync
.off
= READ_ONCE(sqe
->off
);
2531 req
->sync
.len
= READ_ONCE(sqe
->addr
);
2532 req
->sync
.mode
= READ_ONCE(sqe
->len
);
2536 static int io_fallocate(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2537 bool force_nonblock
)
2539 struct io_wq_work
*work
, *old_work
;
2541 /* fallocate always requiring blocking context */
2542 if (force_nonblock
) {
2544 req
->work
.func
= io_fallocate_finish
;
2548 work
= old_work
= &req
->work
;
2549 io_fallocate_finish(&work
);
2550 if (work
&& work
!= old_work
)
2551 *nxt
= container_of(work
, struct io_kiocb
, work
);
2556 static int io_openat_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2558 const char __user
*fname
;
2561 if (sqe
->ioprio
|| sqe
->buf_index
)
2563 if (sqe
->flags
& IOSQE_FIXED_FILE
)
2565 if (req
->flags
& REQ_F_NEED_CLEANUP
)
2568 req
->open
.dfd
= READ_ONCE(sqe
->fd
);
2569 req
->open
.how
.mode
= READ_ONCE(sqe
->len
);
2570 fname
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
2571 req
->open
.how
.flags
= READ_ONCE(sqe
->open_flags
);
2573 req
->open
.filename
= getname(fname
);
2574 if (IS_ERR(req
->open
.filename
)) {
2575 ret
= PTR_ERR(req
->open
.filename
);
2576 req
->open
.filename
= NULL
;
2580 req
->flags
|= REQ_F_NEED_CLEANUP
;
2584 static int io_openat2_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2586 struct open_how __user
*how
;
2587 const char __user
*fname
;
2591 if (sqe
->ioprio
|| sqe
->buf_index
)
2593 if (sqe
->flags
& IOSQE_FIXED_FILE
)
2595 if (req
->flags
& REQ_F_NEED_CLEANUP
)
2598 req
->open
.dfd
= READ_ONCE(sqe
->fd
);
2599 fname
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
2600 how
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
2601 len
= READ_ONCE(sqe
->len
);
2603 if (len
< OPEN_HOW_SIZE_VER0
)
2606 ret
= copy_struct_from_user(&req
->open
.how
, sizeof(req
->open
.how
), how
,
2611 if (!(req
->open
.how
.flags
& O_PATH
) && force_o_largefile())
2612 req
->open
.how
.flags
|= O_LARGEFILE
;
2614 req
->open
.filename
= getname(fname
);
2615 if (IS_ERR(req
->open
.filename
)) {
2616 ret
= PTR_ERR(req
->open
.filename
);
2617 req
->open
.filename
= NULL
;
2621 req
->flags
|= REQ_F_NEED_CLEANUP
;
2625 static int io_openat2(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2626 bool force_nonblock
)
2628 struct open_flags op
;
2635 ret
= build_open_flags(&req
->open
.how
, &op
);
2639 ret
= get_unused_fd_flags(req
->open
.how
.flags
);
2643 file
= do_filp_open(req
->open
.dfd
, req
->open
.filename
, &op
);
2646 ret
= PTR_ERR(file
);
2648 fsnotify_open(file
);
2649 fd_install(ret
, file
);
2652 putname(req
->open
.filename
);
2653 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
2655 req_set_fail_links(req
);
2656 io_cqring_add_event(req
, ret
);
2657 io_put_req_find_next(req
, nxt
);
2661 static int io_openat(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2662 bool force_nonblock
)
2664 req
->open
.how
= build_open_how(req
->open
.how
.flags
, req
->open
.how
.mode
);
2665 return io_openat2(req
, nxt
, force_nonblock
);
2668 static int io_epoll_ctl_prep(struct io_kiocb
*req
,
2669 const struct io_uring_sqe
*sqe
)
2671 #if defined(CONFIG_EPOLL)
2672 if (sqe
->ioprio
|| sqe
->buf_index
)
2675 req
->epoll
.epfd
= READ_ONCE(sqe
->fd
);
2676 req
->epoll
.op
= READ_ONCE(sqe
->len
);
2677 req
->epoll
.fd
= READ_ONCE(sqe
->off
);
2679 if (ep_op_has_event(req
->epoll
.op
)) {
2680 struct epoll_event __user
*ev
;
2682 ev
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
2683 if (copy_from_user(&req
->epoll
.event
, ev
, sizeof(*ev
)))
2693 static int io_epoll_ctl(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2694 bool force_nonblock
)
2696 #if defined(CONFIG_EPOLL)
2697 struct io_epoll
*ie
= &req
->epoll
;
2700 ret
= do_epoll_ctl(ie
->epfd
, ie
->op
, ie
->fd
, &ie
->event
, force_nonblock
);
2701 if (force_nonblock
&& ret
== -EAGAIN
)
2705 req_set_fail_links(req
);
2706 io_cqring_add_event(req
, ret
);
2707 io_put_req_find_next(req
, nxt
);
2714 static int io_madvise_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2716 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
2717 if (sqe
->ioprio
|| sqe
->buf_index
|| sqe
->off
)
2720 req
->madvise
.addr
= READ_ONCE(sqe
->addr
);
2721 req
->madvise
.len
= READ_ONCE(sqe
->len
);
2722 req
->madvise
.advice
= READ_ONCE(sqe
->fadvise_advice
);
2729 static int io_madvise(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2730 bool force_nonblock
)
2732 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
2733 struct io_madvise
*ma
= &req
->madvise
;
2739 ret
= do_madvise(ma
->addr
, ma
->len
, ma
->advice
);
2741 req_set_fail_links(req
);
2742 io_cqring_add_event(req
, ret
);
2743 io_put_req_find_next(req
, nxt
);
2750 static int io_fadvise_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2752 if (sqe
->ioprio
|| sqe
->buf_index
|| sqe
->addr
)
2755 req
->fadvise
.offset
= READ_ONCE(sqe
->off
);
2756 req
->fadvise
.len
= READ_ONCE(sqe
->len
);
2757 req
->fadvise
.advice
= READ_ONCE(sqe
->fadvise_advice
);
2761 static int io_fadvise(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2762 bool force_nonblock
)
2764 struct io_fadvise
*fa
= &req
->fadvise
;
2767 if (force_nonblock
) {
2768 switch (fa
->advice
) {
2769 case POSIX_FADV_NORMAL
:
2770 case POSIX_FADV_RANDOM
:
2771 case POSIX_FADV_SEQUENTIAL
:
2778 ret
= vfs_fadvise(req
->file
, fa
->offset
, fa
->len
, fa
->advice
);
2780 req_set_fail_links(req
);
2781 io_cqring_add_event(req
, ret
);
2782 io_put_req_find_next(req
, nxt
);
2786 static int io_statx_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2788 const char __user
*fname
;
2789 unsigned lookup_flags
;
2792 if (sqe
->ioprio
|| sqe
->buf_index
)
2794 if (sqe
->flags
& IOSQE_FIXED_FILE
)
2796 if (req
->flags
& REQ_F_NEED_CLEANUP
)
2799 req
->open
.dfd
= READ_ONCE(sqe
->fd
);
2800 req
->open
.mask
= READ_ONCE(sqe
->len
);
2801 fname
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
2802 req
->open
.buffer
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
2803 req
->open
.how
.flags
= READ_ONCE(sqe
->statx_flags
);
2805 if (vfs_stat_set_lookup_flags(&lookup_flags
, req
->open
.how
.flags
))
2808 req
->open
.filename
= getname_flags(fname
, lookup_flags
, NULL
);
2809 if (IS_ERR(req
->open
.filename
)) {
2810 ret
= PTR_ERR(req
->open
.filename
);
2811 req
->open
.filename
= NULL
;
2815 req
->flags
|= REQ_F_NEED_CLEANUP
;
2819 static int io_statx(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2820 bool force_nonblock
)
2822 struct io_open
*ctx
= &req
->open
;
2823 unsigned lookup_flags
;
2831 if (vfs_stat_set_lookup_flags(&lookup_flags
, ctx
->how
.flags
))
2835 /* filename_lookup() drops it, keep a reference */
2836 ctx
->filename
->refcnt
++;
2838 ret
= filename_lookup(ctx
->dfd
, ctx
->filename
, lookup_flags
, &path
,
2843 ret
= vfs_getattr(&path
, &stat
, ctx
->mask
, ctx
->how
.flags
);
2845 if (retry_estale(ret
, lookup_flags
)) {
2846 lookup_flags
|= LOOKUP_REVAL
;
2850 ret
= cp_statx(&stat
, ctx
->buffer
);
2852 putname(ctx
->filename
);
2853 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
2855 req_set_fail_links(req
);
2856 io_cqring_add_event(req
, ret
);
2857 io_put_req_find_next(req
, nxt
);
2861 static int io_close_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2864 * If we queue this for async, it must not be cancellable. That would
2865 * leave the 'file' in an undeterminate state.
2867 req
->work
.flags
|= IO_WQ_WORK_NO_CANCEL
;
2869 if (sqe
->ioprio
|| sqe
->off
|| sqe
->addr
|| sqe
->len
||
2870 sqe
->rw_flags
|| sqe
->buf_index
)
2872 if (sqe
->flags
& IOSQE_FIXED_FILE
)
2875 req
->close
.fd
= READ_ONCE(sqe
->fd
);
2876 if (req
->file
->f_op
== &io_uring_fops
||
2877 req
->close
.fd
== req
->ctx
->ring_fd
)
2883 /* only called when __close_fd_get_file() is done */
2884 static void __io_close_finish(struct io_kiocb
*req
, struct io_kiocb
**nxt
)
2888 ret
= filp_close(req
->close
.put_file
, req
->work
.files
);
2890 req_set_fail_links(req
);
2891 io_cqring_add_event(req
, ret
);
2892 fput(req
->close
.put_file
);
2893 io_put_req_find_next(req
, nxt
);
2896 static void io_close_finish(struct io_wq_work
**workptr
)
2898 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
2899 struct io_kiocb
*nxt
= NULL
;
2901 /* not cancellable, don't do io_req_cancelled() */
2902 __io_close_finish(req
, &nxt
);
2904 io_wq_assign_next(workptr
, nxt
);
2907 static int io_close(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2908 bool force_nonblock
)
2912 req
->close
.put_file
= NULL
;
2913 ret
= __close_fd_get_file(req
->close
.fd
, &req
->close
.put_file
);
2917 /* if the file has a flush method, be safe and punt to async */
2918 if (req
->close
.put_file
->f_op
->flush
&& !io_wq_current_is_worker())
2922 * No ->flush(), safely close from here and just punt the
2923 * fput() to async context.
2925 __io_close_finish(req
, nxt
);
2928 req
->work
.func
= io_close_finish
;
2930 * Do manual async queue here to avoid grabbing files - we don't
2931 * need the files, and it'll cause io_close_finish() to close
2932 * the file again and cause a double CQE entry for this request
2934 io_queue_async_work(req
);
2938 static int io_prep_sfr(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2940 struct io_ring_ctx
*ctx
= req
->ctx
;
2945 if (unlikely(ctx
->flags
& IORING_SETUP_IOPOLL
))
2947 if (unlikely(sqe
->addr
|| sqe
->ioprio
|| sqe
->buf_index
))
2950 req
->sync
.off
= READ_ONCE(sqe
->off
);
2951 req
->sync
.len
= READ_ONCE(sqe
->len
);
2952 req
->sync
.flags
= READ_ONCE(sqe
->sync_range_flags
);
2956 static void io_sync_file_range_finish(struct io_wq_work
**workptr
)
2958 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
2959 struct io_kiocb
*nxt
= NULL
;
2962 if (io_req_cancelled(req
))
2965 ret
= sync_file_range(req
->file
, req
->sync
.off
, req
->sync
.len
,
2968 req_set_fail_links(req
);
2969 io_cqring_add_event(req
, ret
);
2970 io_put_req_find_next(req
, &nxt
);
2972 io_wq_assign_next(workptr
, nxt
);
2975 static int io_sync_file_range(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2976 bool force_nonblock
)
2978 struct io_wq_work
*work
, *old_work
;
2980 /* sync_file_range always requires a blocking context */
2981 if (force_nonblock
) {
2983 req
->work
.func
= io_sync_file_range_finish
;
2987 work
= old_work
= &req
->work
;
2988 io_sync_file_range_finish(&work
);
2989 if (work
&& work
!= old_work
)
2990 *nxt
= container_of(work
, struct io_kiocb
, work
);
2994 static int io_sendmsg_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2996 #if defined(CONFIG_NET)
2997 struct io_sr_msg
*sr
= &req
->sr_msg
;
2998 struct io_async_ctx
*io
= req
->io
;
3001 sr
->msg_flags
= READ_ONCE(sqe
->msg_flags
);
3002 sr
->msg
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
3003 sr
->len
= READ_ONCE(sqe
->len
);
3005 #ifdef CONFIG_COMPAT
3006 if (req
->ctx
->compat
)
3007 sr
->msg_flags
|= MSG_CMSG_COMPAT
;
3010 if (!io
|| req
->opcode
== IORING_OP_SEND
)
3012 /* iovec is already imported */
3013 if (req
->flags
& REQ_F_NEED_CLEANUP
)
3016 io
->msg
.iov
= io
->msg
.fast_iov
;
3017 ret
= sendmsg_copy_msghdr(&io
->msg
.msg
, sr
->msg
, sr
->msg_flags
,
3020 req
->flags
|= REQ_F_NEED_CLEANUP
;
3027 static int io_sendmsg(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3028 bool force_nonblock
)
3030 #if defined(CONFIG_NET)
3031 struct io_async_msghdr
*kmsg
= NULL
;
3032 struct socket
*sock
;
3035 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3038 sock
= sock_from_file(req
->file
, &ret
);
3040 struct io_async_ctx io
;
3044 kmsg
= &req
->io
->msg
;
3045 kmsg
->msg
.msg_name
= &req
->io
->msg
.addr
;
3046 /* if iov is set, it's allocated already */
3048 kmsg
->iov
= kmsg
->fast_iov
;
3049 kmsg
->msg
.msg_iter
.iov
= kmsg
->iov
;
3051 struct io_sr_msg
*sr
= &req
->sr_msg
;
3054 kmsg
->msg
.msg_name
= &io
.msg
.addr
;
3056 io
.msg
.iov
= io
.msg
.fast_iov
;
3057 ret
= sendmsg_copy_msghdr(&io
.msg
.msg
, sr
->msg
,
3058 sr
->msg_flags
, &io
.msg
.iov
);
3063 flags
= req
->sr_msg
.msg_flags
;
3064 if (flags
& MSG_DONTWAIT
)
3065 req
->flags
|= REQ_F_NOWAIT
;
3066 else if (force_nonblock
)
3067 flags
|= MSG_DONTWAIT
;
3069 ret
= __sys_sendmsg_sock(sock
, &kmsg
->msg
, flags
);
3070 if (force_nonblock
&& ret
== -EAGAIN
) {
3073 if (io_alloc_async_ctx(req
)) {
3074 if (kmsg
->iov
!= kmsg
->fast_iov
)
3078 req
->flags
|= REQ_F_NEED_CLEANUP
;
3079 memcpy(&req
->io
->msg
, &io
.msg
, sizeof(io
.msg
));
3082 if (ret
== -ERESTARTSYS
)
3086 if (kmsg
&& kmsg
->iov
!= kmsg
->fast_iov
)
3088 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
3089 io_cqring_add_event(req
, ret
);
3091 req_set_fail_links(req
);
3092 io_put_req_find_next(req
, nxt
);
3099 static int io_send(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3100 bool force_nonblock
)
3102 #if defined(CONFIG_NET)
3103 struct socket
*sock
;
3106 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3109 sock
= sock_from_file(req
->file
, &ret
);
3111 struct io_sr_msg
*sr
= &req
->sr_msg
;
3116 ret
= import_single_range(WRITE
, sr
->buf
, sr
->len
, &iov
,
3121 msg
.msg_name
= NULL
;
3122 msg
.msg_control
= NULL
;
3123 msg
.msg_controllen
= 0;
3124 msg
.msg_namelen
= 0;
3126 flags
= req
->sr_msg
.msg_flags
;
3127 if (flags
& MSG_DONTWAIT
)
3128 req
->flags
|= REQ_F_NOWAIT
;
3129 else if (force_nonblock
)
3130 flags
|= MSG_DONTWAIT
;
3132 msg
.msg_flags
= flags
;
3133 ret
= sock_sendmsg(sock
, &msg
);
3134 if (force_nonblock
&& ret
== -EAGAIN
)
3136 if (ret
== -ERESTARTSYS
)
3140 io_cqring_add_event(req
, ret
);
3142 req_set_fail_links(req
);
3143 io_put_req_find_next(req
, nxt
);
3150 static int io_recvmsg_prep(struct io_kiocb
*req
,
3151 const struct io_uring_sqe
*sqe
)
3153 #if defined(CONFIG_NET)
3154 struct io_sr_msg
*sr
= &req
->sr_msg
;
3155 struct io_async_ctx
*io
= req
->io
;
3158 sr
->msg_flags
= READ_ONCE(sqe
->msg_flags
);
3159 sr
->msg
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
3160 sr
->len
= READ_ONCE(sqe
->len
);
3162 #ifdef CONFIG_COMPAT
3163 if (req
->ctx
->compat
)
3164 sr
->msg_flags
|= MSG_CMSG_COMPAT
;
3167 if (!io
|| req
->opcode
== IORING_OP_RECV
)
3169 /* iovec is already imported */
3170 if (req
->flags
& REQ_F_NEED_CLEANUP
)
3173 io
->msg
.iov
= io
->msg
.fast_iov
;
3174 ret
= recvmsg_copy_msghdr(&io
->msg
.msg
, sr
->msg
, sr
->msg_flags
,
3175 &io
->msg
.uaddr
, &io
->msg
.iov
);
3177 req
->flags
|= REQ_F_NEED_CLEANUP
;
3184 static int io_recvmsg(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3185 bool force_nonblock
)
3187 #if defined(CONFIG_NET)
3188 struct io_async_msghdr
*kmsg
= NULL
;
3189 struct socket
*sock
;
3192 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3195 sock
= sock_from_file(req
->file
, &ret
);
3197 struct io_async_ctx io
;
3201 kmsg
= &req
->io
->msg
;
3202 kmsg
->msg
.msg_name
= &req
->io
->msg
.addr
;
3203 /* if iov is set, it's allocated already */
3205 kmsg
->iov
= kmsg
->fast_iov
;
3206 kmsg
->msg
.msg_iter
.iov
= kmsg
->iov
;
3208 struct io_sr_msg
*sr
= &req
->sr_msg
;
3211 kmsg
->msg
.msg_name
= &io
.msg
.addr
;
3213 io
.msg
.iov
= io
.msg
.fast_iov
;
3214 ret
= recvmsg_copy_msghdr(&io
.msg
.msg
, sr
->msg
,
3215 sr
->msg_flags
, &io
.msg
.uaddr
,
3221 flags
= req
->sr_msg
.msg_flags
;
3222 if (flags
& MSG_DONTWAIT
)
3223 req
->flags
|= REQ_F_NOWAIT
;
3224 else if (force_nonblock
)
3225 flags
|= MSG_DONTWAIT
;
3227 ret
= __sys_recvmsg_sock(sock
, &kmsg
->msg
, req
->sr_msg
.msg
,
3228 kmsg
->uaddr
, flags
);
3229 if (force_nonblock
&& ret
== -EAGAIN
) {
3232 if (io_alloc_async_ctx(req
)) {
3233 if (kmsg
->iov
!= kmsg
->fast_iov
)
3237 memcpy(&req
->io
->msg
, &io
.msg
, sizeof(io
.msg
));
3238 req
->flags
|= REQ_F_NEED_CLEANUP
;
3241 if (ret
== -ERESTARTSYS
)
3245 if (kmsg
&& kmsg
->iov
!= kmsg
->fast_iov
)
3247 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
3248 io_cqring_add_event(req
, ret
);
3250 req_set_fail_links(req
);
3251 io_put_req_find_next(req
, nxt
);
3258 static int io_recv(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3259 bool force_nonblock
)
3261 #if defined(CONFIG_NET)
3262 struct socket
*sock
;
3265 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3268 sock
= sock_from_file(req
->file
, &ret
);
3270 struct io_sr_msg
*sr
= &req
->sr_msg
;
3275 ret
= import_single_range(READ
, sr
->buf
, sr
->len
, &iov
,
3280 msg
.msg_name
= NULL
;
3281 msg
.msg_control
= NULL
;
3282 msg
.msg_controllen
= 0;
3283 msg
.msg_namelen
= 0;
3284 msg
.msg_iocb
= NULL
;
3287 flags
= req
->sr_msg
.msg_flags
;
3288 if (flags
& MSG_DONTWAIT
)
3289 req
->flags
|= REQ_F_NOWAIT
;
3290 else if (force_nonblock
)
3291 flags
|= MSG_DONTWAIT
;
3293 ret
= sock_recvmsg(sock
, &msg
, flags
);
3294 if (force_nonblock
&& ret
== -EAGAIN
)
3296 if (ret
== -ERESTARTSYS
)
3300 io_cqring_add_event(req
, ret
);
3302 req_set_fail_links(req
);
3303 io_put_req_find_next(req
, nxt
);
3311 static int io_accept_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
3313 #if defined(CONFIG_NET)
3314 struct io_accept
*accept
= &req
->accept
;
3316 if (unlikely(req
->ctx
->flags
& (IORING_SETUP_IOPOLL
|IORING_SETUP_SQPOLL
)))
3318 if (sqe
->ioprio
|| sqe
->len
|| sqe
->buf_index
)
3321 accept
->addr
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
3322 accept
->addr_len
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
3323 accept
->flags
= READ_ONCE(sqe
->accept_flags
);
3330 #if defined(CONFIG_NET)
3331 static int __io_accept(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3332 bool force_nonblock
)
3334 struct io_accept
*accept
= &req
->accept
;
3335 unsigned file_flags
;
3338 file_flags
= force_nonblock
? O_NONBLOCK
: 0;
3339 ret
= __sys_accept4_file(req
->file
, file_flags
, accept
->addr
,
3340 accept
->addr_len
, accept
->flags
);
3341 if (ret
== -EAGAIN
&& force_nonblock
)
3343 if (ret
== -ERESTARTSYS
)
3346 req_set_fail_links(req
);
3347 io_cqring_add_event(req
, ret
);
3348 io_put_req_find_next(req
, nxt
);
3352 static void io_accept_finish(struct io_wq_work
**workptr
)
3354 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
3355 struct io_kiocb
*nxt
= NULL
;
3357 if (io_req_cancelled(req
))
3359 __io_accept(req
, &nxt
, false);
3361 io_wq_assign_next(workptr
, nxt
);
3365 static int io_accept(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3366 bool force_nonblock
)
3368 #if defined(CONFIG_NET)
3371 ret
= __io_accept(req
, nxt
, force_nonblock
);
3372 if (ret
== -EAGAIN
&& force_nonblock
) {
3373 req
->work
.func
= io_accept_finish
;
3383 static int io_connect_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
3385 #if defined(CONFIG_NET)
3386 struct io_connect
*conn
= &req
->connect
;
3387 struct io_async_ctx
*io
= req
->io
;
3389 if (unlikely(req
->ctx
->flags
& (IORING_SETUP_IOPOLL
|IORING_SETUP_SQPOLL
)))
3391 if (sqe
->ioprio
|| sqe
->len
|| sqe
->buf_index
|| sqe
->rw_flags
)
3394 conn
->addr
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
3395 conn
->addr_len
= READ_ONCE(sqe
->addr2
);
3400 return move_addr_to_kernel(conn
->addr
, conn
->addr_len
,
3401 &io
->connect
.address
);
3407 static int io_connect(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3408 bool force_nonblock
)
3410 #if defined(CONFIG_NET)
3411 struct io_async_ctx __io
, *io
;
3412 unsigned file_flags
;
3418 ret
= move_addr_to_kernel(req
->connect
.addr
,
3419 req
->connect
.addr_len
,
3420 &__io
.connect
.address
);
3426 file_flags
= force_nonblock
? O_NONBLOCK
: 0;
3428 ret
= __sys_connect_file(req
->file
, &io
->connect
.address
,
3429 req
->connect
.addr_len
, file_flags
);
3430 if ((ret
== -EAGAIN
|| ret
== -EINPROGRESS
) && force_nonblock
) {
3433 if (io_alloc_async_ctx(req
)) {
3437 memcpy(&req
->io
->connect
, &__io
.connect
, sizeof(__io
.connect
));
3440 if (ret
== -ERESTARTSYS
)
3444 req_set_fail_links(req
);
3445 io_cqring_add_event(req
, ret
);
3446 io_put_req_find_next(req
, nxt
);
3453 static void io_poll_remove_one(struct io_kiocb
*req
)
3455 struct io_poll_iocb
*poll
= &req
->poll
;
3457 spin_lock(&poll
->head
->lock
);
3458 WRITE_ONCE(poll
->canceled
, true);
3459 if (!list_empty(&poll
->wait
.entry
)) {
3460 list_del_init(&poll
->wait
.entry
);
3461 io_queue_async_work(req
);
3463 spin_unlock(&poll
->head
->lock
);
3464 hash_del(&req
->hash_node
);
3467 static void io_poll_remove_all(struct io_ring_ctx
*ctx
)
3469 struct hlist_node
*tmp
;
3470 struct io_kiocb
*req
;
3473 spin_lock_irq(&ctx
->completion_lock
);
3474 for (i
= 0; i
< (1U << ctx
->cancel_hash_bits
); i
++) {
3475 struct hlist_head
*list
;
3477 list
= &ctx
->cancel_hash
[i
];
3478 hlist_for_each_entry_safe(req
, tmp
, list
, hash_node
)
3479 io_poll_remove_one(req
);
3481 spin_unlock_irq(&ctx
->completion_lock
);
3484 static int io_poll_cancel(struct io_ring_ctx
*ctx
, __u64 sqe_addr
)
3486 struct hlist_head
*list
;
3487 struct io_kiocb
*req
;
3489 list
= &ctx
->cancel_hash
[hash_long(sqe_addr
, ctx
->cancel_hash_bits
)];
3490 hlist_for_each_entry(req
, list
, hash_node
) {
3491 if (sqe_addr
== req
->user_data
) {
3492 io_poll_remove_one(req
);
3500 static int io_poll_remove_prep(struct io_kiocb
*req
,
3501 const struct io_uring_sqe
*sqe
)
3503 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3505 if (sqe
->ioprio
|| sqe
->off
|| sqe
->len
|| sqe
->buf_index
||
3509 req
->poll
.addr
= READ_ONCE(sqe
->addr
);
3514 * Find a running poll command that matches one specified in sqe->addr,
3515 * and remove it if found.
3517 static int io_poll_remove(struct io_kiocb
*req
)
3519 struct io_ring_ctx
*ctx
= req
->ctx
;
3523 addr
= req
->poll
.addr
;
3524 spin_lock_irq(&ctx
->completion_lock
);
3525 ret
= io_poll_cancel(ctx
, addr
);
3526 spin_unlock_irq(&ctx
->completion_lock
);
3528 io_cqring_add_event(req
, ret
);
3530 req_set_fail_links(req
);
3535 static void io_poll_complete(struct io_kiocb
*req
, __poll_t mask
, int error
)
3537 struct io_ring_ctx
*ctx
= req
->ctx
;
3539 req
->poll
.done
= true;
3541 io_cqring_fill_event(req
, error
);
3543 io_cqring_fill_event(req
, mangle_poll(mask
));
3544 io_commit_cqring(ctx
);
3547 static void io_poll_complete_work(struct io_wq_work
**workptr
)
3549 struct io_wq_work
*work
= *workptr
;
3550 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
3551 struct io_poll_iocb
*poll
= &req
->poll
;
3552 struct poll_table_struct pt
= { ._key
= poll
->events
};
3553 struct io_ring_ctx
*ctx
= req
->ctx
;
3554 struct io_kiocb
*nxt
= NULL
;
3558 if (work
->flags
& IO_WQ_WORK_CANCEL
) {
3559 WRITE_ONCE(poll
->canceled
, true);
3561 } else if (READ_ONCE(poll
->canceled
)) {
3565 if (ret
!= -ECANCELED
)
3566 mask
= vfs_poll(poll
->file
, &pt
) & poll
->events
;
3569 * Note that ->ki_cancel callers also delete iocb from active_reqs after
3570 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
3571 * synchronize with them. In the cancellation case the list_del_init
3572 * itself is not actually needed, but harmless so we keep it in to
3573 * avoid further branches in the fast path.
3575 spin_lock_irq(&ctx
->completion_lock
);
3576 if (!mask
&& ret
!= -ECANCELED
) {
3577 add_wait_queue(poll
->head
, &poll
->wait
);
3578 spin_unlock_irq(&ctx
->completion_lock
);
3581 hash_del(&req
->hash_node
);
3582 io_poll_complete(req
, mask
, ret
);
3583 spin_unlock_irq(&ctx
->completion_lock
);
3585 io_cqring_ev_posted(ctx
);
3588 req_set_fail_links(req
);
3589 io_put_req_find_next(req
, &nxt
);
3591 io_wq_assign_next(workptr
, nxt
);
3594 static void __io_poll_flush(struct io_ring_ctx
*ctx
, struct llist_node
*nodes
)
3596 struct io_kiocb
*req
, *tmp
;
3597 struct req_batch rb
;
3599 rb
.to_free
= rb
.need_iter
= 0;
3600 spin_lock_irq(&ctx
->completion_lock
);
3601 llist_for_each_entry_safe(req
, tmp
, nodes
, llist_node
) {
3602 hash_del(&req
->hash_node
);
3603 io_poll_complete(req
, req
->result
, 0);
3605 if (refcount_dec_and_test(&req
->refs
) &&
3606 !io_req_multi_free(&rb
, req
)) {
3607 req
->flags
|= REQ_F_COMP_LOCKED
;
3611 spin_unlock_irq(&ctx
->completion_lock
);
3613 io_cqring_ev_posted(ctx
);
3614 io_free_req_many(ctx
, &rb
);
3617 static void io_poll_flush(struct io_wq_work
**workptr
)
3619 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
3620 struct llist_node
*nodes
;
3622 nodes
= llist_del_all(&req
->ctx
->poll_llist
);
3624 __io_poll_flush(req
->ctx
, nodes
);
3627 static void io_poll_trigger_evfd(struct io_wq_work
**workptr
)
3629 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
3631 eventfd_signal(req
->ctx
->cq_ev_fd
, 1);
3635 static int io_poll_wake(struct wait_queue_entry
*wait
, unsigned mode
, int sync
,
3638 struct io_poll_iocb
*poll
= wait
->private;
3639 struct io_kiocb
*req
= container_of(poll
, struct io_kiocb
, poll
);
3640 struct io_ring_ctx
*ctx
= req
->ctx
;
3641 __poll_t mask
= key_to_poll(key
);
3643 /* for instances that support it check for an event match first: */
3644 if (mask
&& !(mask
& poll
->events
))
3647 list_del_init(&poll
->wait
.entry
);
3650 * Run completion inline if we can. We're using trylock here because
3651 * we are violating the completion_lock -> poll wq lock ordering.
3652 * If we have a link timeout we're going to need the completion_lock
3653 * for finalizing the request, mark us as having grabbed that already.
3656 unsigned long flags
;
3658 if (llist_empty(&ctx
->poll_llist
) &&
3659 spin_trylock_irqsave(&ctx
->completion_lock
, flags
)) {
3662 hash_del(&req
->hash_node
);
3663 io_poll_complete(req
, mask
, 0);
3665 trigger_ev
= io_should_trigger_evfd(ctx
);
3666 if (trigger_ev
&& eventfd_signal_count()) {
3668 req
->work
.func
= io_poll_trigger_evfd
;
3670 req
->flags
|= REQ_F_COMP_LOCKED
;
3674 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
3675 __io_cqring_ev_posted(ctx
, trigger_ev
);
3678 req
->llist_node
.next
= NULL
;
3679 /* if the list wasn't empty, we're done */
3680 if (!llist_add(&req
->llist_node
, &ctx
->poll_llist
))
3683 req
->work
.func
= io_poll_flush
;
3687 io_queue_async_work(req
);
3692 struct io_poll_table
{
3693 struct poll_table_struct pt
;
3694 struct io_kiocb
*req
;
3698 static void io_poll_queue_proc(struct file
*file
, struct wait_queue_head
*head
,
3699 struct poll_table_struct
*p
)
3701 struct io_poll_table
*pt
= container_of(p
, struct io_poll_table
, pt
);
3703 if (unlikely(pt
->req
->poll
.head
)) {
3704 pt
->error
= -EINVAL
;
3709 pt
->req
->poll
.head
= head
;
3710 add_wait_queue(head
, &pt
->req
->poll
.wait
);
3713 static void io_poll_req_insert(struct io_kiocb
*req
)
3715 struct io_ring_ctx
*ctx
= req
->ctx
;
3716 struct hlist_head
*list
;
3718 list
= &ctx
->cancel_hash
[hash_long(req
->user_data
, ctx
->cancel_hash_bits
)];
3719 hlist_add_head(&req
->hash_node
, list
);
3722 static int io_poll_add_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
3724 struct io_poll_iocb
*poll
= &req
->poll
;
3727 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3729 if (sqe
->addr
|| sqe
->ioprio
|| sqe
->off
|| sqe
->len
|| sqe
->buf_index
)
3734 events
= READ_ONCE(sqe
->poll_events
);
3735 poll
->events
= demangle_poll(events
) | EPOLLERR
| EPOLLHUP
;
3739 static int io_poll_add(struct io_kiocb
*req
, struct io_kiocb
**nxt
)
3741 struct io_poll_iocb
*poll
= &req
->poll
;
3742 struct io_ring_ctx
*ctx
= req
->ctx
;
3743 struct io_poll_table ipt
;
3744 bool cancel
= false;
3747 INIT_IO_WORK(&req
->work
, io_poll_complete_work
);
3748 INIT_HLIST_NODE(&req
->hash_node
);
3752 poll
->canceled
= false;
3754 ipt
.pt
._qproc
= io_poll_queue_proc
;
3755 ipt
.pt
._key
= poll
->events
;
3757 ipt
.error
= -EINVAL
; /* same as no support for IOCB_CMD_POLL */
3759 /* initialized the list so that we can do list_empty checks */
3760 INIT_LIST_HEAD(&poll
->wait
.entry
);
3761 init_waitqueue_func_entry(&poll
->wait
, io_poll_wake
);
3762 poll
->wait
.private = poll
;
3764 INIT_LIST_HEAD(&req
->list
);
3766 mask
= vfs_poll(poll
->file
, &ipt
.pt
) & poll
->events
;
3768 spin_lock_irq(&ctx
->completion_lock
);
3769 if (likely(poll
->head
)) {
3770 spin_lock(&poll
->head
->lock
);
3771 if (unlikely(list_empty(&poll
->wait
.entry
))) {
3777 if (mask
|| ipt
.error
)
3778 list_del_init(&poll
->wait
.entry
);
3780 WRITE_ONCE(poll
->canceled
, true);
3781 else if (!poll
->done
) /* actually waiting for an event */
3782 io_poll_req_insert(req
);
3783 spin_unlock(&poll
->head
->lock
);
3785 if (mask
) { /* no async, we'd stolen it */
3787 io_poll_complete(req
, mask
, 0);
3789 spin_unlock_irq(&ctx
->completion_lock
);
3792 io_cqring_ev_posted(ctx
);
3793 io_put_req_find_next(req
, nxt
);
3798 static enum hrtimer_restart
io_timeout_fn(struct hrtimer
*timer
)
3800 struct io_timeout_data
*data
= container_of(timer
,
3801 struct io_timeout_data
, timer
);
3802 struct io_kiocb
*req
= data
->req
;
3803 struct io_ring_ctx
*ctx
= req
->ctx
;
3804 unsigned long flags
;
3806 atomic_inc(&ctx
->cq_timeouts
);
3808 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
3810 * We could be racing with timeout deletion. If the list is empty,
3811 * then timeout lookup already found it and will be handling it.
3813 if (!list_empty(&req
->list
)) {
3814 struct io_kiocb
*prev
;
3817 * Adjust the reqs sequence before the current one because it
3818 * will consume a slot in the cq_ring and the cq_tail
3819 * pointer will be increased, otherwise other timeout reqs may
3820 * return in advance without waiting for enough wait_nr.
3823 list_for_each_entry_continue_reverse(prev
, &ctx
->timeout_list
, list
)
3825 list_del_init(&req
->list
);
3828 io_cqring_fill_event(req
, -ETIME
);
3829 io_commit_cqring(ctx
);
3830 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
3832 io_cqring_ev_posted(ctx
);
3833 req_set_fail_links(req
);
3835 return HRTIMER_NORESTART
;
3838 static int io_timeout_cancel(struct io_ring_ctx
*ctx
, __u64 user_data
)
3840 struct io_kiocb
*req
;
3843 list_for_each_entry(req
, &ctx
->timeout_list
, list
) {
3844 if (user_data
== req
->user_data
) {
3845 list_del_init(&req
->list
);
3854 ret
= hrtimer_try_to_cancel(&req
->io
->timeout
.timer
);
3858 req_set_fail_links(req
);
3859 io_cqring_fill_event(req
, -ECANCELED
);
3864 static int io_timeout_remove_prep(struct io_kiocb
*req
,
3865 const struct io_uring_sqe
*sqe
)
3867 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3869 if (sqe
->flags
|| sqe
->ioprio
|| sqe
->buf_index
|| sqe
->len
)
3872 req
->timeout
.addr
= READ_ONCE(sqe
->addr
);
3873 req
->timeout
.flags
= READ_ONCE(sqe
->timeout_flags
);
3874 if (req
->timeout
.flags
)
3881 * Remove or update an existing timeout command
3883 static int io_timeout_remove(struct io_kiocb
*req
)
3885 struct io_ring_ctx
*ctx
= req
->ctx
;
3888 spin_lock_irq(&ctx
->completion_lock
);
3889 ret
= io_timeout_cancel(ctx
, req
->timeout
.addr
);
3891 io_cqring_fill_event(req
, ret
);
3892 io_commit_cqring(ctx
);
3893 spin_unlock_irq(&ctx
->completion_lock
);
3894 io_cqring_ev_posted(ctx
);
3896 req_set_fail_links(req
);
3901 static int io_timeout_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
3902 bool is_timeout_link
)
3904 struct io_timeout_data
*data
;
3907 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3909 if (sqe
->ioprio
|| sqe
->buf_index
|| sqe
->len
!= 1)
3911 if (sqe
->off
&& is_timeout_link
)
3913 flags
= READ_ONCE(sqe
->timeout_flags
);
3914 if (flags
& ~IORING_TIMEOUT_ABS
)
3917 req
->timeout
.count
= READ_ONCE(sqe
->off
);
3919 if (!req
->io
&& io_alloc_async_ctx(req
))
3922 data
= &req
->io
->timeout
;
3924 req
->flags
|= REQ_F_TIMEOUT
;
3926 if (get_timespec64(&data
->ts
, u64_to_user_ptr(sqe
->addr
)))
3929 if (flags
& IORING_TIMEOUT_ABS
)
3930 data
->mode
= HRTIMER_MODE_ABS
;
3932 data
->mode
= HRTIMER_MODE_REL
;
3934 hrtimer_init(&data
->timer
, CLOCK_MONOTONIC
, data
->mode
);
3938 static int io_timeout(struct io_kiocb
*req
)
3941 struct io_ring_ctx
*ctx
= req
->ctx
;
3942 struct io_timeout_data
*data
;
3943 struct list_head
*entry
;
3946 data
= &req
->io
->timeout
;
3949 * sqe->off holds how many events that need to occur for this
3950 * timeout event to be satisfied. If it isn't set, then this is
3951 * a pure timeout request, sequence isn't used.
3953 count
= req
->timeout
.count
;
3955 req
->flags
|= REQ_F_TIMEOUT_NOSEQ
;
3956 spin_lock_irq(&ctx
->completion_lock
);
3957 entry
= ctx
->timeout_list
.prev
;
3961 req
->sequence
= ctx
->cached_sq_head
+ count
- 1;
3962 data
->seq_offset
= count
;
3965 * Insertion sort, ensuring the first entry in the list is always
3966 * the one we need first.
3968 spin_lock_irq(&ctx
->completion_lock
);
3969 list_for_each_prev(entry
, &ctx
->timeout_list
) {
3970 struct io_kiocb
*nxt
= list_entry(entry
, struct io_kiocb
, list
);
3971 unsigned nxt_sq_head
;
3972 long long tmp
, tmp_nxt
;
3973 u32 nxt_offset
= nxt
->io
->timeout
.seq_offset
;
3975 if (nxt
->flags
& REQ_F_TIMEOUT_NOSEQ
)
3979 * Since cached_sq_head + count - 1 can overflow, use type long
3982 tmp
= (long long)ctx
->cached_sq_head
+ count
- 1;
3983 nxt_sq_head
= nxt
->sequence
- nxt_offset
+ 1;
3984 tmp_nxt
= (long long)nxt_sq_head
+ nxt_offset
- 1;
3987 * cached_sq_head may overflow, and it will never overflow twice
3988 * once there is some timeout req still be valid.
3990 if (ctx
->cached_sq_head
< nxt_sq_head
)
3997 * Sequence of reqs after the insert one and itself should
3998 * be adjusted because each timeout req consumes a slot.
4003 req
->sequence
-= span
;
4005 list_add(&req
->list
, entry
);
4006 data
->timer
.function
= io_timeout_fn
;
4007 hrtimer_start(&data
->timer
, timespec64_to_ktime(data
->ts
), data
->mode
);
4008 spin_unlock_irq(&ctx
->completion_lock
);
4012 static bool io_cancel_cb(struct io_wq_work
*work
, void *data
)
4014 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
4016 return req
->user_data
== (unsigned long) data
;
4019 static int io_async_cancel_one(struct io_ring_ctx
*ctx
, void *sqe_addr
)
4021 enum io_wq_cancel cancel_ret
;
4024 cancel_ret
= io_wq_cancel_cb(ctx
->io_wq
, io_cancel_cb
, sqe_addr
);
4025 switch (cancel_ret
) {
4026 case IO_WQ_CANCEL_OK
:
4029 case IO_WQ_CANCEL_RUNNING
:
4032 case IO_WQ_CANCEL_NOTFOUND
:
4040 static void io_async_find_and_cancel(struct io_ring_ctx
*ctx
,
4041 struct io_kiocb
*req
, __u64 sqe_addr
,
4042 struct io_kiocb
**nxt
, int success_ret
)
4044 unsigned long flags
;
4047 ret
= io_async_cancel_one(ctx
, (void *) (unsigned long) sqe_addr
);
4048 if (ret
!= -ENOENT
) {
4049 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
4053 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
4054 ret
= io_timeout_cancel(ctx
, sqe_addr
);
4057 ret
= io_poll_cancel(ctx
, sqe_addr
);
4061 io_cqring_fill_event(req
, ret
);
4062 io_commit_cqring(ctx
);
4063 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
4064 io_cqring_ev_posted(ctx
);
4067 req_set_fail_links(req
);
4068 io_put_req_find_next(req
, nxt
);
4071 static int io_async_cancel_prep(struct io_kiocb
*req
,
4072 const struct io_uring_sqe
*sqe
)
4074 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
4076 if (sqe
->flags
|| sqe
->ioprio
|| sqe
->off
|| sqe
->len
||
4080 req
->cancel
.addr
= READ_ONCE(sqe
->addr
);
4084 static int io_async_cancel(struct io_kiocb
*req
, struct io_kiocb
**nxt
)
4086 struct io_ring_ctx
*ctx
= req
->ctx
;
4088 io_async_find_and_cancel(ctx
, req
, req
->cancel
.addr
, nxt
, 0);
4092 static int io_files_update_prep(struct io_kiocb
*req
,
4093 const struct io_uring_sqe
*sqe
)
4095 if (sqe
->flags
|| sqe
->ioprio
|| sqe
->rw_flags
)
4098 req
->files_update
.offset
= READ_ONCE(sqe
->off
);
4099 req
->files_update
.nr_args
= READ_ONCE(sqe
->len
);
4100 if (!req
->files_update
.nr_args
)
4102 req
->files_update
.arg
= READ_ONCE(sqe
->addr
);
4106 static int io_files_update(struct io_kiocb
*req
, bool force_nonblock
)
4108 struct io_ring_ctx
*ctx
= req
->ctx
;
4109 struct io_uring_files_update up
;
4115 up
.offset
= req
->files_update
.offset
;
4116 up
.fds
= req
->files_update
.arg
;
4118 mutex_lock(&ctx
->uring_lock
);
4119 ret
= __io_sqe_files_update(ctx
, &up
, req
->files_update
.nr_args
);
4120 mutex_unlock(&ctx
->uring_lock
);
4123 req_set_fail_links(req
);
4124 io_cqring_add_event(req
, ret
);
4129 static int io_req_defer_prep(struct io_kiocb
*req
,
4130 const struct io_uring_sqe
*sqe
)
4134 if (io_op_defs
[req
->opcode
].file_table
) {
4135 ret
= io_grab_files(req
);
4140 io_req_work_grab_env(req
, &io_op_defs
[req
->opcode
]);
4142 switch (req
->opcode
) {
4145 case IORING_OP_READV
:
4146 case IORING_OP_READ_FIXED
:
4147 case IORING_OP_READ
:
4148 ret
= io_read_prep(req
, sqe
, true);
4150 case IORING_OP_WRITEV
:
4151 case IORING_OP_WRITE_FIXED
:
4152 case IORING_OP_WRITE
:
4153 ret
= io_write_prep(req
, sqe
, true);
4155 case IORING_OP_POLL_ADD
:
4156 ret
= io_poll_add_prep(req
, sqe
);
4158 case IORING_OP_POLL_REMOVE
:
4159 ret
= io_poll_remove_prep(req
, sqe
);
4161 case IORING_OP_FSYNC
:
4162 ret
= io_prep_fsync(req
, sqe
);
4164 case IORING_OP_SYNC_FILE_RANGE
:
4165 ret
= io_prep_sfr(req
, sqe
);
4167 case IORING_OP_SENDMSG
:
4168 case IORING_OP_SEND
:
4169 ret
= io_sendmsg_prep(req
, sqe
);
4171 case IORING_OP_RECVMSG
:
4172 case IORING_OP_RECV
:
4173 ret
= io_recvmsg_prep(req
, sqe
);
4175 case IORING_OP_CONNECT
:
4176 ret
= io_connect_prep(req
, sqe
);
4178 case IORING_OP_TIMEOUT
:
4179 ret
= io_timeout_prep(req
, sqe
, false);
4181 case IORING_OP_TIMEOUT_REMOVE
:
4182 ret
= io_timeout_remove_prep(req
, sqe
);
4184 case IORING_OP_ASYNC_CANCEL
:
4185 ret
= io_async_cancel_prep(req
, sqe
);
4187 case IORING_OP_LINK_TIMEOUT
:
4188 ret
= io_timeout_prep(req
, sqe
, true);
4190 case IORING_OP_ACCEPT
:
4191 ret
= io_accept_prep(req
, sqe
);
4193 case IORING_OP_FALLOCATE
:
4194 ret
= io_fallocate_prep(req
, sqe
);
4196 case IORING_OP_OPENAT
:
4197 ret
= io_openat_prep(req
, sqe
);
4199 case IORING_OP_CLOSE
:
4200 ret
= io_close_prep(req
, sqe
);
4202 case IORING_OP_FILES_UPDATE
:
4203 ret
= io_files_update_prep(req
, sqe
);
4205 case IORING_OP_STATX
:
4206 ret
= io_statx_prep(req
, sqe
);
4208 case IORING_OP_FADVISE
:
4209 ret
= io_fadvise_prep(req
, sqe
);
4211 case IORING_OP_MADVISE
:
4212 ret
= io_madvise_prep(req
, sqe
);
4214 case IORING_OP_OPENAT2
:
4215 ret
= io_openat2_prep(req
, sqe
);
4217 case IORING_OP_EPOLL_CTL
:
4218 ret
= io_epoll_ctl_prep(req
, sqe
);
4221 printk_once(KERN_WARNING
"io_uring: unhandled opcode %d\n",
4230 static int io_req_defer(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
4232 struct io_ring_ctx
*ctx
= req
->ctx
;
4235 /* Still need defer if there is pending req in defer list. */
4236 if (!req_need_defer(req
) && list_empty(&ctx
->defer_list
))
4239 if (!req
->io
&& io_alloc_async_ctx(req
))
4242 ret
= io_req_defer_prep(req
, sqe
);
4246 spin_lock_irq(&ctx
->completion_lock
);
4247 if (!req_need_defer(req
) && list_empty(&ctx
->defer_list
)) {
4248 spin_unlock_irq(&ctx
->completion_lock
);
4252 trace_io_uring_defer(ctx
, req
, req
->user_data
);
4253 list_add_tail(&req
->list
, &ctx
->defer_list
);
4254 spin_unlock_irq(&ctx
->completion_lock
);
4255 return -EIOCBQUEUED
;
4258 static void io_cleanup_req(struct io_kiocb
*req
)
4260 struct io_async_ctx
*io
= req
->io
;
4262 switch (req
->opcode
) {
4263 case IORING_OP_READV
:
4264 case IORING_OP_READ_FIXED
:
4265 case IORING_OP_READ
:
4266 case IORING_OP_WRITEV
:
4267 case IORING_OP_WRITE_FIXED
:
4268 case IORING_OP_WRITE
:
4269 if (io
->rw
.iov
!= io
->rw
.fast_iov
)
4272 case IORING_OP_SENDMSG
:
4273 case IORING_OP_RECVMSG
:
4274 if (io
->msg
.iov
!= io
->msg
.fast_iov
)
4277 case IORING_OP_OPENAT
:
4278 case IORING_OP_OPENAT2
:
4279 case IORING_OP_STATX
:
4280 putname(req
->open
.filename
);
4284 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
4287 static int io_issue_sqe(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
4288 struct io_kiocb
**nxt
, bool force_nonblock
)
4290 struct io_ring_ctx
*ctx
= req
->ctx
;
4293 switch (req
->opcode
) {
4297 case IORING_OP_READV
:
4298 case IORING_OP_READ_FIXED
:
4299 case IORING_OP_READ
:
4301 ret
= io_read_prep(req
, sqe
, force_nonblock
);
4305 ret
= io_read(req
, nxt
, force_nonblock
);
4307 case IORING_OP_WRITEV
:
4308 case IORING_OP_WRITE_FIXED
:
4309 case IORING_OP_WRITE
:
4311 ret
= io_write_prep(req
, sqe
, force_nonblock
);
4315 ret
= io_write(req
, nxt
, force_nonblock
);
4317 case IORING_OP_FSYNC
:
4319 ret
= io_prep_fsync(req
, sqe
);
4323 ret
= io_fsync(req
, nxt
, force_nonblock
);
4325 case IORING_OP_POLL_ADD
:
4327 ret
= io_poll_add_prep(req
, sqe
);
4331 ret
= io_poll_add(req
, nxt
);
4333 case IORING_OP_POLL_REMOVE
:
4335 ret
= io_poll_remove_prep(req
, sqe
);
4339 ret
= io_poll_remove(req
);
4341 case IORING_OP_SYNC_FILE_RANGE
:
4343 ret
= io_prep_sfr(req
, sqe
);
4347 ret
= io_sync_file_range(req
, nxt
, force_nonblock
);
4349 case IORING_OP_SENDMSG
:
4350 case IORING_OP_SEND
:
4352 ret
= io_sendmsg_prep(req
, sqe
);
4356 if (req
->opcode
== IORING_OP_SENDMSG
)
4357 ret
= io_sendmsg(req
, nxt
, force_nonblock
);
4359 ret
= io_send(req
, nxt
, force_nonblock
);
4361 case IORING_OP_RECVMSG
:
4362 case IORING_OP_RECV
:
4364 ret
= io_recvmsg_prep(req
, sqe
);
4368 if (req
->opcode
== IORING_OP_RECVMSG
)
4369 ret
= io_recvmsg(req
, nxt
, force_nonblock
);
4371 ret
= io_recv(req
, nxt
, force_nonblock
);
4373 case IORING_OP_TIMEOUT
:
4375 ret
= io_timeout_prep(req
, sqe
, false);
4379 ret
= io_timeout(req
);
4381 case IORING_OP_TIMEOUT_REMOVE
:
4383 ret
= io_timeout_remove_prep(req
, sqe
);
4387 ret
= io_timeout_remove(req
);
4389 case IORING_OP_ACCEPT
:
4391 ret
= io_accept_prep(req
, sqe
);
4395 ret
= io_accept(req
, nxt
, force_nonblock
);
4397 case IORING_OP_CONNECT
:
4399 ret
= io_connect_prep(req
, sqe
);
4403 ret
= io_connect(req
, nxt
, force_nonblock
);
4405 case IORING_OP_ASYNC_CANCEL
:
4407 ret
= io_async_cancel_prep(req
, sqe
);
4411 ret
= io_async_cancel(req
, nxt
);
4413 case IORING_OP_FALLOCATE
:
4415 ret
= io_fallocate_prep(req
, sqe
);
4419 ret
= io_fallocate(req
, nxt
, force_nonblock
);
4421 case IORING_OP_OPENAT
:
4423 ret
= io_openat_prep(req
, sqe
);
4427 ret
= io_openat(req
, nxt
, force_nonblock
);
4429 case IORING_OP_CLOSE
:
4431 ret
= io_close_prep(req
, sqe
);
4435 ret
= io_close(req
, nxt
, force_nonblock
);
4437 case IORING_OP_FILES_UPDATE
:
4439 ret
= io_files_update_prep(req
, sqe
);
4443 ret
= io_files_update(req
, force_nonblock
);
4445 case IORING_OP_STATX
:
4447 ret
= io_statx_prep(req
, sqe
);
4451 ret
= io_statx(req
, nxt
, force_nonblock
);
4453 case IORING_OP_FADVISE
:
4455 ret
= io_fadvise_prep(req
, sqe
);
4459 ret
= io_fadvise(req
, nxt
, force_nonblock
);
4461 case IORING_OP_MADVISE
:
4463 ret
= io_madvise_prep(req
, sqe
);
4467 ret
= io_madvise(req
, nxt
, force_nonblock
);
4469 case IORING_OP_OPENAT2
:
4471 ret
= io_openat2_prep(req
, sqe
);
4475 ret
= io_openat2(req
, nxt
, force_nonblock
);
4477 case IORING_OP_EPOLL_CTL
:
4479 ret
= io_epoll_ctl_prep(req
, sqe
);
4483 ret
= io_epoll_ctl(req
, nxt
, force_nonblock
);
4493 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
4494 const bool in_async
= io_wq_current_is_worker();
4496 if (req
->result
== -EAGAIN
)
4499 /* workqueue context doesn't hold uring_lock, grab it now */
4501 mutex_lock(&ctx
->uring_lock
);
4503 io_iopoll_req_issued(req
);
4506 mutex_unlock(&ctx
->uring_lock
);
4512 static void io_wq_submit_work(struct io_wq_work
**workptr
)
4514 struct io_wq_work
*work
= *workptr
;
4515 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
4516 struct io_kiocb
*nxt
= NULL
;
4519 /* if NO_CANCEL is set, we must still run the work */
4520 if ((work
->flags
& (IO_WQ_WORK_CANCEL
|IO_WQ_WORK_NO_CANCEL
)) ==
4521 IO_WQ_WORK_CANCEL
) {
4526 req
->in_async
= true;
4528 ret
= io_issue_sqe(req
, NULL
, &nxt
, false);
4530 * We can get EAGAIN for polled IO even though we're
4531 * forcing a sync submission from here, since we can't
4532 * wait for request slots on the block side.
4540 /* drop submission reference */
4544 req_set_fail_links(req
);
4545 io_cqring_add_event(req
, ret
);
4549 /* if a dependent link is ready, pass it back */
4551 io_wq_assign_next(workptr
, nxt
);
4554 static int io_req_needs_file(struct io_kiocb
*req
, int fd
)
4556 if (!io_op_defs
[req
->opcode
].needs_file
)
4558 if ((fd
== -1 || fd
== AT_FDCWD
) && io_op_defs
[req
->opcode
].fd_non_neg
)
4563 static inline struct file
*io_file_from_index(struct io_ring_ctx
*ctx
,
4566 struct fixed_file_table
*table
;
4568 table
= &ctx
->file_data
->table
[index
>> IORING_FILE_TABLE_SHIFT
];
4569 return table
->files
[index
& IORING_FILE_TABLE_MASK
];;
4572 static int io_req_set_file(struct io_submit_state
*state
, struct io_kiocb
*req
,
4573 const struct io_uring_sqe
*sqe
)
4575 struct io_ring_ctx
*ctx
= req
->ctx
;
4579 flags
= READ_ONCE(sqe
->flags
);
4580 fd
= READ_ONCE(sqe
->fd
);
4582 if (!io_req_needs_file(req
, fd
))
4585 if (flags
& IOSQE_FIXED_FILE
) {
4586 if (unlikely(!ctx
->file_data
||
4587 (unsigned) fd
>= ctx
->nr_user_files
))
4589 fd
= array_index_nospec(fd
, ctx
->nr_user_files
);
4590 req
->file
= io_file_from_index(ctx
, fd
);
4593 req
->flags
|= REQ_F_FIXED_FILE
;
4594 percpu_ref_get(&ctx
->file_data
->refs
);
4596 if (req
->needs_fixed_file
)
4598 trace_io_uring_file_get(ctx
, fd
);
4599 req
->file
= io_file_get(state
, fd
);
4600 if (unlikely(!req
->file
))
4607 static int io_grab_files(struct io_kiocb
*req
)
4610 struct io_ring_ctx
*ctx
= req
->ctx
;
4612 if (req
->work
.files
)
4614 if (!ctx
->ring_file
)
4618 spin_lock_irq(&ctx
->inflight_lock
);
4620 * We use the f_ops->flush() handler to ensure that we can flush
4621 * out work accessing these files if the fd is closed. Check if
4622 * the fd has changed since we started down this path, and disallow
4623 * this operation if it has.
4625 if (fcheck(ctx
->ring_fd
) == ctx
->ring_file
) {
4626 list_add(&req
->inflight_entry
, &ctx
->inflight_list
);
4627 req
->flags
|= REQ_F_INFLIGHT
;
4628 req
->work
.files
= current
->files
;
4631 spin_unlock_irq(&ctx
->inflight_lock
);
4637 static enum hrtimer_restart
io_link_timeout_fn(struct hrtimer
*timer
)
4639 struct io_timeout_data
*data
= container_of(timer
,
4640 struct io_timeout_data
, timer
);
4641 struct io_kiocb
*req
= data
->req
;
4642 struct io_ring_ctx
*ctx
= req
->ctx
;
4643 struct io_kiocb
*prev
= NULL
;
4644 unsigned long flags
;
4646 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
4649 * We don't expect the list to be empty, that will only happen if we
4650 * race with the completion of the linked work.
4652 if (!list_empty(&req
->link_list
)) {
4653 prev
= list_entry(req
->link_list
.prev
, struct io_kiocb
,
4655 if (refcount_inc_not_zero(&prev
->refs
)) {
4656 list_del_init(&req
->link_list
);
4657 prev
->flags
&= ~REQ_F_LINK_TIMEOUT
;
4662 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
4665 req_set_fail_links(prev
);
4666 io_async_find_and_cancel(ctx
, req
, prev
->user_data
, NULL
,
4670 io_cqring_add_event(req
, -ETIME
);
4673 return HRTIMER_NORESTART
;
4676 static void io_queue_linked_timeout(struct io_kiocb
*req
)
4678 struct io_ring_ctx
*ctx
= req
->ctx
;
4681 * If the list is now empty, then our linked request finished before
4682 * we got a chance to setup the timer
4684 spin_lock_irq(&ctx
->completion_lock
);
4685 if (!list_empty(&req
->link_list
)) {
4686 struct io_timeout_data
*data
= &req
->io
->timeout
;
4688 data
->timer
.function
= io_link_timeout_fn
;
4689 hrtimer_start(&data
->timer
, timespec64_to_ktime(data
->ts
),
4692 spin_unlock_irq(&ctx
->completion_lock
);
4694 /* drop submission reference */
4698 static struct io_kiocb
*io_prep_linked_timeout(struct io_kiocb
*req
)
4700 struct io_kiocb
*nxt
;
4702 if (!(req
->flags
& REQ_F_LINK
))
4705 nxt
= list_first_entry_or_null(&req
->link_list
, struct io_kiocb
,
4707 if (!nxt
|| nxt
->opcode
!= IORING_OP_LINK_TIMEOUT
)
4710 req
->flags
|= REQ_F_LINK_TIMEOUT
;
4714 static void __io_queue_sqe(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
4716 struct io_kiocb
*linked_timeout
;
4717 struct io_kiocb
*nxt
= NULL
;
4718 const struct cred
*old_creds
= NULL
;
4722 linked_timeout
= io_prep_linked_timeout(req
);
4724 if (req
->work
.creds
&& req
->work
.creds
!= current_cred()) {
4726 revert_creds(old_creds
);
4727 if (old_creds
== req
->work
.creds
)
4728 old_creds
= NULL
; /* restored original creds */
4730 old_creds
= override_creds(req
->work
.creds
);
4733 ret
= io_issue_sqe(req
, sqe
, &nxt
, true);
4736 * We async punt it if the file wasn't marked NOWAIT, or if the file
4737 * doesn't support non-blocking read/write attempts
4739 if (ret
== -EAGAIN
&& (!(req
->flags
& REQ_F_NOWAIT
) ||
4740 (req
->flags
& REQ_F_MUST_PUNT
))) {
4742 if (io_op_defs
[req
->opcode
].file_table
) {
4743 ret
= io_grab_files(req
);
4749 * Queued up for async execution, worker will release
4750 * submit reference when the iocb is actually submitted.
4752 io_queue_async_work(req
);
4757 /* drop submission reference */
4758 io_put_req_find_next(req
, &nxt
);
4760 if (linked_timeout
) {
4762 io_queue_linked_timeout(linked_timeout
);
4764 io_put_req(linked_timeout
);
4767 /* and drop final reference, if we failed */
4769 io_cqring_add_event(req
, ret
);
4770 req_set_fail_links(req
);
4778 if (req
->flags
& REQ_F_FORCE_ASYNC
)
4783 revert_creds(old_creds
);
4786 static void io_queue_sqe(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
4790 ret
= io_req_defer(req
, sqe
);
4792 if (ret
!= -EIOCBQUEUED
) {
4794 io_cqring_add_event(req
, ret
);
4795 req_set_fail_links(req
);
4796 io_double_put_req(req
);
4798 } else if (req
->flags
& REQ_F_FORCE_ASYNC
) {
4799 ret
= io_req_defer_prep(req
, sqe
);
4800 if (unlikely(ret
< 0))
4803 * Never try inline submit of IOSQE_ASYNC is set, go straight
4804 * to async execution.
4806 req
->work
.flags
|= IO_WQ_WORK_CONCURRENT
;
4807 io_queue_async_work(req
);
4809 __io_queue_sqe(req
, sqe
);
4813 static inline void io_queue_link_head(struct io_kiocb
*req
)
4815 if (unlikely(req
->flags
& REQ_F_FAIL_LINK
)) {
4816 io_cqring_add_event(req
, -ECANCELED
);
4817 io_double_put_req(req
);
4819 io_queue_sqe(req
, NULL
);
4822 #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
4823 IOSQE_IO_HARDLINK | IOSQE_ASYNC)
4825 static bool io_submit_sqe(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
4826 struct io_submit_state
*state
, struct io_kiocb
**link
)
4828 struct io_ring_ctx
*ctx
= req
->ctx
;
4829 unsigned int sqe_flags
;
4832 sqe_flags
= READ_ONCE(sqe
->flags
);
4834 /* enforce forwards compatibility on users */
4835 if (unlikely(sqe_flags
& ~SQE_VALID_FLAGS
)) {
4840 id
= READ_ONCE(sqe
->personality
);
4842 req
->work
.creds
= idr_find(&ctx
->personality_idr
, id
);
4843 if (unlikely(!req
->work
.creds
)) {
4847 get_cred(req
->work
.creds
);
4850 /* same numerical values with corresponding REQ_F_*, safe to copy */
4851 req
->flags
|= sqe_flags
& (IOSQE_IO_DRAIN
|IOSQE_IO_HARDLINK
|
4854 ret
= io_req_set_file(state
, req
, sqe
);
4855 if (unlikely(ret
)) {
4857 io_cqring_add_event(req
, ret
);
4858 io_double_put_req(req
);
4863 * If we already have a head request, queue this one for async
4864 * submittal once the head completes. If we don't have a head but
4865 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
4866 * submitted sync once the chain is complete. If none of those
4867 * conditions are true (normal request), then just queue it.
4870 struct io_kiocb
*head
= *link
;
4873 * Taking sequential execution of a link, draining both sides
4874 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
4875 * requests in the link. So, it drains the head and the
4876 * next after the link request. The last one is done via
4877 * drain_next flag to persist the effect across calls.
4879 if (sqe_flags
& IOSQE_IO_DRAIN
) {
4880 head
->flags
|= REQ_F_IO_DRAIN
;
4881 ctx
->drain_next
= 1;
4883 if (io_alloc_async_ctx(req
)) {
4888 ret
= io_req_defer_prep(req
, sqe
);
4890 /* fail even hard links since we don't submit */
4891 head
->flags
|= REQ_F_FAIL_LINK
;
4894 trace_io_uring_link(ctx
, req
, head
);
4895 list_add_tail(&req
->link_list
, &head
->link_list
);
4897 /* last request of a link, enqueue the link */
4898 if (!(sqe_flags
& (IOSQE_IO_LINK
|IOSQE_IO_HARDLINK
))) {
4899 io_queue_link_head(head
);
4903 if (unlikely(ctx
->drain_next
)) {
4904 req
->flags
|= REQ_F_IO_DRAIN
;
4905 req
->ctx
->drain_next
= 0;
4907 if (sqe_flags
& (IOSQE_IO_LINK
|IOSQE_IO_HARDLINK
)) {
4908 req
->flags
|= REQ_F_LINK
;
4909 INIT_LIST_HEAD(&req
->link_list
);
4910 ret
= io_req_defer_prep(req
, sqe
);
4912 req
->flags
|= REQ_F_FAIL_LINK
;
4915 io_queue_sqe(req
, sqe
);
4923 * Batched submission is done, ensure local IO is flushed out.
4925 static void io_submit_state_end(struct io_submit_state
*state
)
4927 blk_finish_plug(&state
->plug
);
4929 if (state
->free_reqs
)
4930 kmem_cache_free_bulk(req_cachep
, state
->free_reqs
, state
->reqs
);
4934 * Start submission side cache.
4936 static void io_submit_state_start(struct io_submit_state
*state
,
4937 unsigned int max_ios
)
4939 blk_start_plug(&state
->plug
);
4940 state
->free_reqs
= 0;
4942 state
->ios_left
= max_ios
;
4945 static void io_commit_sqring(struct io_ring_ctx
*ctx
)
4947 struct io_rings
*rings
= ctx
->rings
;
4950 * Ensure any loads from the SQEs are done at this point,
4951 * since once we write the new head, the application could
4952 * write new data to them.
4954 smp_store_release(&rings
->sq
.head
, ctx
->cached_sq_head
);
4958 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
4959 * that is mapped by userspace. This means that care needs to be taken to
4960 * ensure that reads are stable, as we cannot rely on userspace always
4961 * being a good citizen. If members of the sqe are validated and then later
4962 * used, it's important that those reads are done through READ_ONCE() to
4963 * prevent a re-load down the line.
4965 static bool io_get_sqring(struct io_ring_ctx
*ctx
, struct io_kiocb
*req
,
4966 const struct io_uring_sqe
**sqe_ptr
)
4968 u32
*sq_array
= ctx
->sq_array
;
4972 * The cached sq head (or cq tail) serves two purposes:
4974 * 1) allows us to batch the cost of updating the user visible
4976 * 2) allows the kernel side to track the head on its own, even
4977 * though the application is the one updating it.
4979 head
= READ_ONCE(sq_array
[ctx
->cached_sq_head
& ctx
->sq_mask
]);
4980 if (likely(head
< ctx
->sq_entries
)) {
4982 * All io need record the previous position, if LINK vs DARIN,
4983 * it can be used to mark the position of the first IO in the
4986 req
->sequence
= ctx
->cached_sq_head
;
4987 *sqe_ptr
= &ctx
->sq_sqes
[head
];
4988 req
->opcode
= READ_ONCE((*sqe_ptr
)->opcode
);
4989 req
->user_data
= READ_ONCE((*sqe_ptr
)->user_data
);
4990 ctx
->cached_sq_head
++;
4994 /* drop invalid entries */
4995 ctx
->cached_sq_head
++;
4996 ctx
->cached_sq_dropped
++;
4997 WRITE_ONCE(ctx
->rings
->sq_dropped
, ctx
->cached_sq_dropped
);
5001 static int io_submit_sqes(struct io_ring_ctx
*ctx
, unsigned int nr
,
5002 struct file
*ring_file
, int ring_fd
,
5003 struct mm_struct
**mm
, bool async
)
5005 struct io_submit_state state
, *statep
= NULL
;
5006 struct io_kiocb
*link
= NULL
;
5007 int i
, submitted
= 0;
5008 bool mm_fault
= false;
5010 /* if we have a backlog and couldn't flush it all, return BUSY */
5011 if (test_bit(0, &ctx
->sq_check_overflow
)) {
5012 if (!list_empty(&ctx
->cq_overflow_list
) &&
5013 !io_cqring_overflow_flush(ctx
, false))
5017 /* make sure SQ entry isn't read before tail */
5018 nr
= min3(nr
, ctx
->sq_entries
, io_sqring_entries(ctx
));
5020 if (!percpu_ref_tryget_many(&ctx
->refs
, nr
))
5023 if (nr
> IO_PLUG_THRESHOLD
) {
5024 io_submit_state_start(&state
, nr
);
5028 ctx
->ring_fd
= ring_fd
;
5029 ctx
->ring_file
= ring_file
;
5031 for (i
= 0; i
< nr
; i
++) {
5032 const struct io_uring_sqe
*sqe
;
5033 struct io_kiocb
*req
;
5036 req
= io_get_req(ctx
, statep
);
5037 if (unlikely(!req
)) {
5039 submitted
= -EAGAIN
;
5042 if (!io_get_sqring(ctx
, req
, &sqe
)) {
5043 __io_req_do_free(req
);
5047 /* will complete beyond this point, count as submitted */
5050 if (unlikely(req
->opcode
>= IORING_OP_LAST
)) {
5053 io_cqring_add_event(req
, err
);
5054 io_double_put_req(req
);
5058 if (io_op_defs
[req
->opcode
].needs_mm
&& !*mm
) {
5059 mm_fault
= mm_fault
|| !mmget_not_zero(ctx
->sqo_mm
);
5060 if (unlikely(mm_fault
)) {
5064 use_mm(ctx
->sqo_mm
);
5068 req
->in_async
= async
;
5069 req
->needs_fixed_file
= async
;
5070 trace_io_uring_submit_sqe(ctx
, req
->opcode
, req
->user_data
,
5072 if (!io_submit_sqe(req
, sqe
, statep
, &link
))
5076 if (unlikely(submitted
!= nr
)) {
5077 int ref_used
= (submitted
== -EAGAIN
) ? 0 : submitted
;
5079 percpu_ref_put_many(&ctx
->refs
, nr
- ref_used
);
5082 io_queue_link_head(link
);
5084 io_submit_state_end(&state
);
5086 /* Commit SQ ring head once we've consumed and submitted all SQEs */
5087 io_commit_sqring(ctx
);
5092 static int io_sq_thread(void *data
)
5094 struct io_ring_ctx
*ctx
= data
;
5095 struct mm_struct
*cur_mm
= NULL
;
5096 const struct cred
*old_cred
;
5097 mm_segment_t old_fs
;
5099 unsigned long timeout
;
5102 complete(&ctx
->completions
[1]);
5106 old_cred
= override_creds(ctx
->creds
);
5108 timeout
= jiffies
+ ctx
->sq_thread_idle
;
5109 while (!kthread_should_park()) {
5110 unsigned int to_submit
;
5112 if (!list_empty(&ctx
->poll_list
)) {
5113 unsigned nr_events
= 0;
5115 mutex_lock(&ctx
->uring_lock
);
5116 if (!list_empty(&ctx
->poll_list
))
5117 io_iopoll_getevents(ctx
, &nr_events
, 0);
5119 timeout
= jiffies
+ ctx
->sq_thread_idle
;
5120 mutex_unlock(&ctx
->uring_lock
);
5123 to_submit
= io_sqring_entries(ctx
);
5126 * If submit got -EBUSY, flag us as needing the application
5127 * to enter the kernel to reap and flush events.
5129 if (!to_submit
|| ret
== -EBUSY
) {
5131 * Drop cur_mm before scheduling, we can't hold it for
5132 * long periods (or over schedule()). Do this before
5133 * adding ourselves to the waitqueue, as the unuse/drop
5143 * We're polling. If we're within the defined idle
5144 * period, then let us spin without work before going
5145 * to sleep. The exception is if we got EBUSY doing
5146 * more IO, we should wait for the application to
5147 * reap events and wake us up.
5149 if (!list_empty(&ctx
->poll_list
) ||
5150 (!time_after(jiffies
, timeout
) && ret
!= -EBUSY
&&
5151 !percpu_ref_is_dying(&ctx
->refs
))) {
5156 prepare_to_wait(&ctx
->sqo_wait
, &wait
,
5157 TASK_INTERRUPTIBLE
);
5160 * While doing polled IO, before going to sleep, we need
5161 * to check if there are new reqs added to poll_list, it
5162 * is because reqs may have been punted to io worker and
5163 * will be added to poll_list later, hence check the
5166 if ((ctx
->flags
& IORING_SETUP_IOPOLL
) &&
5167 !list_empty_careful(&ctx
->poll_list
)) {
5168 finish_wait(&ctx
->sqo_wait
, &wait
);
5172 /* Tell userspace we may need a wakeup call */
5173 ctx
->rings
->sq_flags
|= IORING_SQ_NEED_WAKEUP
;
5174 /* make sure to read SQ tail after writing flags */
5177 to_submit
= io_sqring_entries(ctx
);
5178 if (!to_submit
|| ret
== -EBUSY
) {
5179 if (kthread_should_park()) {
5180 finish_wait(&ctx
->sqo_wait
, &wait
);
5183 if (signal_pending(current
))
5184 flush_signals(current
);
5186 finish_wait(&ctx
->sqo_wait
, &wait
);
5188 ctx
->rings
->sq_flags
&= ~IORING_SQ_NEED_WAKEUP
;
5191 finish_wait(&ctx
->sqo_wait
, &wait
);
5193 ctx
->rings
->sq_flags
&= ~IORING_SQ_NEED_WAKEUP
;
5196 mutex_lock(&ctx
->uring_lock
);
5197 ret
= io_submit_sqes(ctx
, to_submit
, NULL
, -1, &cur_mm
, true);
5198 mutex_unlock(&ctx
->uring_lock
);
5199 timeout
= jiffies
+ ctx
->sq_thread_idle
;
5207 revert_creds(old_cred
);
5214 struct io_wait_queue
{
5215 struct wait_queue_entry wq
;
5216 struct io_ring_ctx
*ctx
;
5218 unsigned nr_timeouts
;
5221 static inline bool io_should_wake(struct io_wait_queue
*iowq
, bool noflush
)
5223 struct io_ring_ctx
*ctx
= iowq
->ctx
;
5226 * Wake up if we have enough events, or if a timeout occurred since we
5227 * started waiting. For timeouts, we always want to return to userspace,
5228 * regardless of event count.
5230 return io_cqring_events(ctx
, noflush
) >= iowq
->to_wait
||
5231 atomic_read(&ctx
->cq_timeouts
) != iowq
->nr_timeouts
;
5234 static int io_wake_function(struct wait_queue_entry
*curr
, unsigned int mode
,
5235 int wake_flags
, void *key
)
5237 struct io_wait_queue
*iowq
= container_of(curr
, struct io_wait_queue
,
5240 /* use noflush == true, as we can't safely rely on locking context */
5241 if (!io_should_wake(iowq
, true))
5244 return autoremove_wake_function(curr
, mode
, wake_flags
, key
);
5248 * Wait until events become available, if we don't already have some. The
5249 * application must reap them itself, as they reside on the shared cq ring.
5251 static int io_cqring_wait(struct io_ring_ctx
*ctx
, int min_events
,
5252 const sigset_t __user
*sig
, size_t sigsz
)
5254 struct io_wait_queue iowq
= {
5257 .func
= io_wake_function
,
5258 .entry
= LIST_HEAD_INIT(iowq
.wq
.entry
),
5261 .to_wait
= min_events
,
5263 struct io_rings
*rings
= ctx
->rings
;
5266 if (io_cqring_events(ctx
, false) >= min_events
)
5270 #ifdef CONFIG_COMPAT
5271 if (in_compat_syscall())
5272 ret
= set_compat_user_sigmask((const compat_sigset_t __user
*)sig
,
5276 ret
= set_user_sigmask(sig
, sigsz
);
5282 iowq
.nr_timeouts
= atomic_read(&ctx
->cq_timeouts
);
5283 trace_io_uring_cqring_wait(ctx
, min_events
);
5285 prepare_to_wait_exclusive(&ctx
->wait
, &iowq
.wq
,
5286 TASK_INTERRUPTIBLE
);
5287 if (io_should_wake(&iowq
, false))
5290 if (signal_pending(current
)) {
5295 finish_wait(&ctx
->wait
, &iowq
.wq
);
5297 restore_saved_sigmask_unless(ret
== -EINTR
);
5299 return READ_ONCE(rings
->cq
.head
) == READ_ONCE(rings
->cq
.tail
) ? ret
: 0;
5302 static void __io_sqe_files_unregister(struct io_ring_ctx
*ctx
)
5304 #if defined(CONFIG_UNIX)
5305 if (ctx
->ring_sock
) {
5306 struct sock
*sock
= ctx
->ring_sock
->sk
;
5307 struct sk_buff
*skb
;
5309 while ((skb
= skb_dequeue(&sock
->sk_receive_queue
)) != NULL
)
5315 for (i
= 0; i
< ctx
->nr_user_files
; i
++) {
5318 file
= io_file_from_index(ctx
, i
);
5325 static void io_file_ref_kill(struct percpu_ref
*ref
)
5327 struct fixed_file_data
*data
;
5329 data
= container_of(ref
, struct fixed_file_data
, refs
);
5330 complete(&data
->done
);
5333 static void io_file_ref_exit_and_free(struct work_struct
*work
)
5335 struct fixed_file_data
*data
;
5337 data
= container_of(work
, struct fixed_file_data
, ref_work
);
5340 * Ensure any percpu-ref atomic switch callback has run, it could have
5341 * been in progress when the files were being unregistered. Once
5342 * that's done, we can safely exit and free the ref and containing
5346 percpu_ref_exit(&data
->refs
);
5350 static int io_sqe_files_unregister(struct io_ring_ctx
*ctx
)
5352 struct fixed_file_data
*data
= ctx
->file_data
;
5353 unsigned nr_tables
, i
;
5358 percpu_ref_kill_and_confirm(&data
->refs
, io_file_ref_kill
);
5359 flush_work(&data
->ref_work
);
5360 wait_for_completion(&data
->done
);
5361 io_ring_file_ref_flush(data
);
5363 __io_sqe_files_unregister(ctx
);
5364 nr_tables
= DIV_ROUND_UP(ctx
->nr_user_files
, IORING_MAX_FILES_TABLE
);
5365 for (i
= 0; i
< nr_tables
; i
++)
5366 kfree(data
->table
[i
].files
);
5368 INIT_WORK(&data
->ref_work
, io_file_ref_exit_and_free
);
5369 queue_work(system_wq
, &data
->ref_work
);
5370 ctx
->file_data
= NULL
;
5371 ctx
->nr_user_files
= 0;
5375 static void io_sq_thread_stop(struct io_ring_ctx
*ctx
)
5377 if (ctx
->sqo_thread
) {
5378 wait_for_completion(&ctx
->completions
[1]);
5380 * The park is a bit of a work-around, without it we get
5381 * warning spews on shutdown with SQPOLL set and affinity
5382 * set to a single CPU.
5384 kthread_park(ctx
->sqo_thread
);
5385 kthread_stop(ctx
->sqo_thread
);
5386 ctx
->sqo_thread
= NULL
;
5390 static void io_finish_async(struct io_ring_ctx
*ctx
)
5392 io_sq_thread_stop(ctx
);
5395 io_wq_destroy(ctx
->io_wq
);
5400 #if defined(CONFIG_UNIX)
5402 * Ensure the UNIX gc is aware of our file set, so we are certain that
5403 * the io_uring can be safely unregistered on process exit, even if we have
5404 * loops in the file referencing.
5406 static int __io_sqe_files_scm(struct io_ring_ctx
*ctx
, int nr
, int offset
)
5408 struct sock
*sk
= ctx
->ring_sock
->sk
;
5409 struct scm_fp_list
*fpl
;
5410 struct sk_buff
*skb
;
5413 if (!capable(CAP_SYS_RESOURCE
) && !capable(CAP_SYS_ADMIN
)) {
5414 unsigned long inflight
= ctx
->user
->unix_inflight
+ nr
;
5416 if (inflight
> task_rlimit(current
, RLIMIT_NOFILE
))
5420 fpl
= kzalloc(sizeof(*fpl
), GFP_KERNEL
);
5424 skb
= alloc_skb(0, GFP_KERNEL
);
5433 fpl
->user
= get_uid(ctx
->user
);
5434 for (i
= 0; i
< nr
; i
++) {
5435 struct file
*file
= io_file_from_index(ctx
, i
+ offset
);
5439 fpl
->fp
[nr_files
] = get_file(file
);
5440 unix_inflight(fpl
->user
, fpl
->fp
[nr_files
]);
5445 fpl
->max
= SCM_MAX_FD
;
5446 fpl
->count
= nr_files
;
5447 UNIXCB(skb
).fp
= fpl
;
5448 skb
->destructor
= unix_destruct_scm
;
5449 refcount_add(skb
->truesize
, &sk
->sk_wmem_alloc
);
5450 skb_queue_head(&sk
->sk_receive_queue
, skb
);
5452 for (i
= 0; i
< nr_files
; i
++)
5463 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
5464 * causes regular reference counting to break down. We rely on the UNIX
5465 * garbage collection to take care of this problem for us.
5467 static int io_sqe_files_scm(struct io_ring_ctx
*ctx
)
5469 unsigned left
, total
;
5473 left
= ctx
->nr_user_files
;
5475 unsigned this_files
= min_t(unsigned, left
, SCM_MAX_FD
);
5477 ret
= __io_sqe_files_scm(ctx
, this_files
, total
);
5481 total
+= this_files
;
5487 while (total
< ctx
->nr_user_files
) {
5488 struct file
*file
= io_file_from_index(ctx
, total
);
5498 static int io_sqe_files_scm(struct io_ring_ctx
*ctx
)
5504 static int io_sqe_alloc_file_tables(struct io_ring_ctx
*ctx
, unsigned nr_tables
,
5509 for (i
= 0; i
< nr_tables
; i
++) {
5510 struct fixed_file_table
*table
= &ctx
->file_data
->table
[i
];
5511 unsigned this_files
;
5513 this_files
= min(nr_files
, IORING_MAX_FILES_TABLE
);
5514 table
->files
= kcalloc(this_files
, sizeof(struct file
*),
5518 nr_files
-= this_files
;
5524 for (i
= 0; i
< nr_tables
; i
++) {
5525 struct fixed_file_table
*table
= &ctx
->file_data
->table
[i
];
5526 kfree(table
->files
);
5531 static void io_ring_file_put(struct io_ring_ctx
*ctx
, struct file
*file
)
5533 #if defined(CONFIG_UNIX)
5534 struct sock
*sock
= ctx
->ring_sock
->sk
;
5535 struct sk_buff_head list
, *head
= &sock
->sk_receive_queue
;
5536 struct sk_buff
*skb
;
5539 __skb_queue_head_init(&list
);
5542 * Find the skb that holds this file in its SCM_RIGHTS. When found,
5543 * remove this entry and rearrange the file array.
5545 skb
= skb_dequeue(head
);
5547 struct scm_fp_list
*fp
;
5549 fp
= UNIXCB(skb
).fp
;
5550 for (i
= 0; i
< fp
->count
; i
++) {
5553 if (fp
->fp
[i
] != file
)
5556 unix_notinflight(fp
->user
, fp
->fp
[i
]);
5557 left
= fp
->count
- 1 - i
;
5559 memmove(&fp
->fp
[i
], &fp
->fp
[i
+ 1],
5560 left
* sizeof(struct file
*));
5567 __skb_queue_tail(&list
, skb
);
5577 __skb_queue_tail(&list
, skb
);
5579 skb
= skb_dequeue(head
);
5582 if (skb_peek(&list
)) {
5583 spin_lock_irq(&head
->lock
);
5584 while ((skb
= __skb_dequeue(&list
)) != NULL
)
5585 __skb_queue_tail(head
, skb
);
5586 spin_unlock_irq(&head
->lock
);
5593 struct io_file_put
{
5594 struct llist_node llist
;
5596 struct completion
*done
;
5599 static void io_ring_file_ref_flush(struct fixed_file_data
*data
)
5601 struct io_file_put
*pfile
, *tmp
;
5602 struct llist_node
*node
;
5604 while ((node
= llist_del_all(&data
->put_llist
)) != NULL
) {
5605 llist_for_each_entry_safe(pfile
, tmp
, node
, llist
) {
5606 io_ring_file_put(data
->ctx
, pfile
->file
);
5608 complete(pfile
->done
);
5615 static void io_ring_file_ref_switch(struct work_struct
*work
)
5617 struct fixed_file_data
*data
;
5619 data
= container_of(work
, struct fixed_file_data
, ref_work
);
5620 io_ring_file_ref_flush(data
);
5621 percpu_ref_switch_to_percpu(&data
->refs
);
5624 static void io_file_data_ref_zero(struct percpu_ref
*ref
)
5626 struct fixed_file_data
*data
;
5628 data
= container_of(ref
, struct fixed_file_data
, refs
);
5631 * We can't safely switch from inside this context, punt to wq. If
5632 * the table ref is going away, the table is being unregistered.
5633 * Don't queue up the async work for that case, the caller will
5636 if (!percpu_ref_is_dying(&data
->refs
))
5637 queue_work(system_wq
, &data
->ref_work
);
5640 static int io_sqe_files_register(struct io_ring_ctx
*ctx
, void __user
*arg
,
5643 __s32 __user
*fds
= (__s32 __user
*) arg
;
5653 if (nr_args
> IORING_MAX_FIXED_FILES
)
5656 ctx
->file_data
= kzalloc(sizeof(*ctx
->file_data
), GFP_KERNEL
);
5657 if (!ctx
->file_data
)
5659 ctx
->file_data
->ctx
= ctx
;
5660 init_completion(&ctx
->file_data
->done
);
5662 nr_tables
= DIV_ROUND_UP(nr_args
, IORING_MAX_FILES_TABLE
);
5663 ctx
->file_data
->table
= kcalloc(nr_tables
,
5664 sizeof(struct fixed_file_table
),
5666 if (!ctx
->file_data
->table
) {
5667 kfree(ctx
->file_data
);
5668 ctx
->file_data
= NULL
;
5672 if (percpu_ref_init(&ctx
->file_data
->refs
, io_file_data_ref_zero
,
5673 PERCPU_REF_ALLOW_REINIT
, GFP_KERNEL
)) {
5674 kfree(ctx
->file_data
->table
);
5675 kfree(ctx
->file_data
);
5676 ctx
->file_data
= NULL
;
5679 ctx
->file_data
->put_llist
.first
= NULL
;
5680 INIT_WORK(&ctx
->file_data
->ref_work
, io_ring_file_ref_switch
);
5682 if (io_sqe_alloc_file_tables(ctx
, nr_tables
, nr_args
)) {
5683 percpu_ref_exit(&ctx
->file_data
->refs
);
5684 kfree(ctx
->file_data
->table
);
5685 kfree(ctx
->file_data
);
5686 ctx
->file_data
= NULL
;
5690 for (i
= 0; i
< nr_args
; i
++, ctx
->nr_user_files
++) {
5691 struct fixed_file_table
*table
;
5695 if (copy_from_user(&fd
, &fds
[i
], sizeof(fd
)))
5697 /* allow sparse sets */
5703 table
= &ctx
->file_data
->table
[i
>> IORING_FILE_TABLE_SHIFT
];
5704 index
= i
& IORING_FILE_TABLE_MASK
;
5712 * Don't allow io_uring instances to be registered. If UNIX
5713 * isn't enabled, then this causes a reference cycle and this
5714 * instance can never get freed. If UNIX is enabled we'll
5715 * handle it just fine, but there's still no point in allowing
5716 * a ring fd as it doesn't support regular read/write anyway.
5718 if (file
->f_op
== &io_uring_fops
) {
5723 table
->files
[index
] = file
;
5727 for (i
= 0; i
< ctx
->nr_user_files
; i
++) {
5728 file
= io_file_from_index(ctx
, i
);
5732 for (i
= 0; i
< nr_tables
; i
++)
5733 kfree(ctx
->file_data
->table
[i
].files
);
5735 kfree(ctx
->file_data
->table
);
5736 kfree(ctx
->file_data
);
5737 ctx
->file_data
= NULL
;
5738 ctx
->nr_user_files
= 0;
5742 ret
= io_sqe_files_scm(ctx
);
5744 io_sqe_files_unregister(ctx
);
5749 static int io_sqe_file_register(struct io_ring_ctx
*ctx
, struct file
*file
,
5752 #if defined(CONFIG_UNIX)
5753 struct sock
*sock
= ctx
->ring_sock
->sk
;
5754 struct sk_buff_head
*head
= &sock
->sk_receive_queue
;
5755 struct sk_buff
*skb
;
5758 * See if we can merge this file into an existing skb SCM_RIGHTS
5759 * file set. If there's no room, fall back to allocating a new skb
5760 * and filling it in.
5762 spin_lock_irq(&head
->lock
);
5763 skb
= skb_peek(head
);
5765 struct scm_fp_list
*fpl
= UNIXCB(skb
).fp
;
5767 if (fpl
->count
< SCM_MAX_FD
) {
5768 __skb_unlink(skb
, head
);
5769 spin_unlock_irq(&head
->lock
);
5770 fpl
->fp
[fpl
->count
] = get_file(file
);
5771 unix_inflight(fpl
->user
, fpl
->fp
[fpl
->count
]);
5773 spin_lock_irq(&head
->lock
);
5774 __skb_queue_head(head
, skb
);
5779 spin_unlock_irq(&head
->lock
);
5786 return __io_sqe_files_scm(ctx
, 1, index
);
5792 static void io_atomic_switch(struct percpu_ref
*ref
)
5794 struct fixed_file_data
*data
;
5797 * Juggle reference to ensure we hit zero, if needed, so we can
5798 * switch back to percpu mode
5800 data
= container_of(ref
, struct fixed_file_data
, refs
);
5801 percpu_ref_put(&data
->refs
);
5802 percpu_ref_get(&data
->refs
);
5805 static bool io_queue_file_removal(struct fixed_file_data
*data
,
5808 struct io_file_put
*pfile
, pfile_stack
;
5809 DECLARE_COMPLETION_ONSTACK(done
);
5812 * If we fail allocating the struct we need for doing async reomval
5813 * of this file, just punt to sync and wait for it.
5815 pfile
= kzalloc(sizeof(*pfile
), GFP_KERNEL
);
5817 pfile
= &pfile_stack
;
5818 pfile
->done
= &done
;
5822 llist_add(&pfile
->llist
, &data
->put_llist
);
5824 if (pfile
== &pfile_stack
) {
5825 percpu_ref_switch_to_atomic(&data
->refs
, io_atomic_switch
);
5826 wait_for_completion(&done
);
5827 flush_work(&data
->ref_work
);
5834 static int __io_sqe_files_update(struct io_ring_ctx
*ctx
,
5835 struct io_uring_files_update
*up
,
5838 struct fixed_file_data
*data
= ctx
->file_data
;
5839 bool ref_switch
= false;
5845 if (check_add_overflow(up
->offset
, nr_args
, &done
))
5847 if (done
> ctx
->nr_user_files
)
5851 fds
= u64_to_user_ptr(up
->fds
);
5853 struct fixed_file_table
*table
;
5857 if (copy_from_user(&fd
, &fds
[done
], sizeof(fd
))) {
5861 i
= array_index_nospec(up
->offset
, ctx
->nr_user_files
);
5862 table
= &ctx
->file_data
->table
[i
>> IORING_FILE_TABLE_SHIFT
];
5863 index
= i
& IORING_FILE_TABLE_MASK
;
5864 if (table
->files
[index
]) {
5865 file
= io_file_from_index(ctx
, index
);
5866 table
->files
[index
] = NULL
;
5867 if (io_queue_file_removal(data
, file
))
5877 * Don't allow io_uring instances to be registered. If
5878 * UNIX isn't enabled, then this causes a reference
5879 * cycle and this instance can never get freed. If UNIX
5880 * is enabled we'll handle it just fine, but there's
5881 * still no point in allowing a ring fd as it doesn't
5882 * support regular read/write anyway.
5884 if (file
->f_op
== &io_uring_fops
) {
5889 table
->files
[index
] = file
;
5890 err
= io_sqe_file_register(ctx
, file
, i
);
5900 percpu_ref_switch_to_atomic(&data
->refs
, io_atomic_switch
);
5902 return done
? done
: err
;
5904 static int io_sqe_files_update(struct io_ring_ctx
*ctx
, void __user
*arg
,
5907 struct io_uring_files_update up
;
5909 if (!ctx
->file_data
)
5913 if (copy_from_user(&up
, arg
, sizeof(up
)))
5918 return __io_sqe_files_update(ctx
, &up
, nr_args
);
5921 static void io_put_work(struct io_wq_work
*work
)
5923 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
5928 static void io_get_work(struct io_wq_work
*work
)
5930 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
5932 refcount_inc(&req
->refs
);
5935 static int io_init_wq_offload(struct io_ring_ctx
*ctx
,
5936 struct io_uring_params
*p
)
5938 struct io_wq_data data
;
5940 struct io_ring_ctx
*ctx_attach
;
5941 unsigned int concurrency
;
5944 data
.user
= ctx
->user
;
5945 data
.get_work
= io_get_work
;
5946 data
.put_work
= io_put_work
;
5948 if (!(p
->flags
& IORING_SETUP_ATTACH_WQ
)) {
5949 /* Do QD, or 4 * CPUS, whatever is smallest */
5950 concurrency
= min(ctx
->sq_entries
, 4 * num_online_cpus());
5952 ctx
->io_wq
= io_wq_create(concurrency
, &data
);
5953 if (IS_ERR(ctx
->io_wq
)) {
5954 ret
= PTR_ERR(ctx
->io_wq
);
5960 f
= fdget(p
->wq_fd
);
5964 if (f
.file
->f_op
!= &io_uring_fops
) {
5969 ctx_attach
= f
.file
->private_data
;
5970 /* @io_wq is protected by holding the fd */
5971 if (!io_wq_get(ctx_attach
->io_wq
, &data
)) {
5976 ctx
->io_wq
= ctx_attach
->io_wq
;
5982 static int io_sq_offload_start(struct io_ring_ctx
*ctx
,
5983 struct io_uring_params
*p
)
5987 init_waitqueue_head(&ctx
->sqo_wait
);
5988 mmgrab(current
->mm
);
5989 ctx
->sqo_mm
= current
->mm
;
5991 if (ctx
->flags
& IORING_SETUP_SQPOLL
) {
5993 if (!capable(CAP_SYS_ADMIN
))
5996 ctx
->sq_thread_idle
= msecs_to_jiffies(p
->sq_thread_idle
);
5997 if (!ctx
->sq_thread_idle
)
5998 ctx
->sq_thread_idle
= HZ
;
6000 if (p
->flags
& IORING_SETUP_SQ_AFF
) {
6001 int cpu
= p
->sq_thread_cpu
;
6004 if (cpu
>= nr_cpu_ids
)
6006 if (!cpu_online(cpu
))
6009 ctx
->sqo_thread
= kthread_create_on_cpu(io_sq_thread
,
6013 ctx
->sqo_thread
= kthread_create(io_sq_thread
, ctx
,
6016 if (IS_ERR(ctx
->sqo_thread
)) {
6017 ret
= PTR_ERR(ctx
->sqo_thread
);
6018 ctx
->sqo_thread
= NULL
;
6021 wake_up_process(ctx
->sqo_thread
);
6022 } else if (p
->flags
& IORING_SETUP_SQ_AFF
) {
6023 /* Can't have SQ_AFF without SQPOLL */
6028 ret
= io_init_wq_offload(ctx
, p
);
6034 io_finish_async(ctx
);
6035 mmdrop(ctx
->sqo_mm
);
6040 static void io_unaccount_mem(struct user_struct
*user
, unsigned long nr_pages
)
6042 atomic_long_sub(nr_pages
, &user
->locked_vm
);
6045 static int io_account_mem(struct user_struct
*user
, unsigned long nr_pages
)
6047 unsigned long page_limit
, cur_pages
, new_pages
;
6049 /* Don't allow more pages than we can safely lock */
6050 page_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
6053 cur_pages
= atomic_long_read(&user
->locked_vm
);
6054 new_pages
= cur_pages
+ nr_pages
;
6055 if (new_pages
> page_limit
)
6057 } while (atomic_long_cmpxchg(&user
->locked_vm
, cur_pages
,
6058 new_pages
) != cur_pages
);
6063 static void io_mem_free(void *ptr
)
6070 page
= virt_to_head_page(ptr
);
6071 if (put_page_testzero(page
))
6072 free_compound_page(page
);
6075 static void *io_mem_alloc(size_t size
)
6077 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_ZERO
| __GFP_NOWARN
| __GFP_COMP
|
6080 return (void *) __get_free_pages(gfp_flags
, get_order(size
));
6083 static unsigned long rings_size(unsigned sq_entries
, unsigned cq_entries
,
6086 struct io_rings
*rings
;
6087 size_t off
, sq_array_size
;
6089 off
= struct_size(rings
, cqes
, cq_entries
);
6090 if (off
== SIZE_MAX
)
6094 off
= ALIGN(off
, SMP_CACHE_BYTES
);
6099 sq_array_size
= array_size(sizeof(u32
), sq_entries
);
6100 if (sq_array_size
== SIZE_MAX
)
6103 if (check_add_overflow(off
, sq_array_size
, &off
))
6112 static unsigned long ring_pages(unsigned sq_entries
, unsigned cq_entries
)
6116 pages
= (size_t)1 << get_order(
6117 rings_size(sq_entries
, cq_entries
, NULL
));
6118 pages
+= (size_t)1 << get_order(
6119 array_size(sizeof(struct io_uring_sqe
), sq_entries
));
6124 static int io_sqe_buffer_unregister(struct io_ring_ctx
*ctx
)
6128 if (!ctx
->user_bufs
)
6131 for (i
= 0; i
< ctx
->nr_user_bufs
; i
++) {
6132 struct io_mapped_ubuf
*imu
= &ctx
->user_bufs
[i
];
6134 for (j
= 0; j
< imu
->nr_bvecs
; j
++)
6135 unpin_user_page(imu
->bvec
[j
].bv_page
);
6137 if (ctx
->account_mem
)
6138 io_unaccount_mem(ctx
->user
, imu
->nr_bvecs
);
6143 kfree(ctx
->user_bufs
);
6144 ctx
->user_bufs
= NULL
;
6145 ctx
->nr_user_bufs
= 0;
6149 static int io_copy_iov(struct io_ring_ctx
*ctx
, struct iovec
*dst
,
6150 void __user
*arg
, unsigned index
)
6152 struct iovec __user
*src
;
6154 #ifdef CONFIG_COMPAT
6156 struct compat_iovec __user
*ciovs
;
6157 struct compat_iovec ciov
;
6159 ciovs
= (struct compat_iovec __user
*) arg
;
6160 if (copy_from_user(&ciov
, &ciovs
[index
], sizeof(ciov
)))
6163 dst
->iov_base
= u64_to_user_ptr((u64
)ciov
.iov_base
);
6164 dst
->iov_len
= ciov
.iov_len
;
6168 src
= (struct iovec __user
*) arg
;
6169 if (copy_from_user(dst
, &src
[index
], sizeof(*dst
)))
6174 static int io_sqe_buffer_register(struct io_ring_ctx
*ctx
, void __user
*arg
,
6177 struct vm_area_struct
**vmas
= NULL
;
6178 struct page
**pages
= NULL
;
6179 int i
, j
, got_pages
= 0;
6184 if (!nr_args
|| nr_args
> UIO_MAXIOV
)
6187 ctx
->user_bufs
= kcalloc(nr_args
, sizeof(struct io_mapped_ubuf
),
6189 if (!ctx
->user_bufs
)
6192 for (i
= 0; i
< nr_args
; i
++) {
6193 struct io_mapped_ubuf
*imu
= &ctx
->user_bufs
[i
];
6194 unsigned long off
, start
, end
, ubuf
;
6199 ret
= io_copy_iov(ctx
, &iov
, arg
, i
);
6204 * Don't impose further limits on the size and buffer
6205 * constraints here, we'll -EINVAL later when IO is
6206 * submitted if they are wrong.
6209 if (!iov
.iov_base
|| !iov
.iov_len
)
6212 /* arbitrary limit, but we need something */
6213 if (iov
.iov_len
> SZ_1G
)
6216 ubuf
= (unsigned long) iov
.iov_base
;
6217 end
= (ubuf
+ iov
.iov_len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
6218 start
= ubuf
>> PAGE_SHIFT
;
6219 nr_pages
= end
- start
;
6221 if (ctx
->account_mem
) {
6222 ret
= io_account_mem(ctx
->user
, nr_pages
);
6228 if (!pages
|| nr_pages
> got_pages
) {
6231 pages
= kvmalloc_array(nr_pages
, sizeof(struct page
*),
6233 vmas
= kvmalloc_array(nr_pages
,
6234 sizeof(struct vm_area_struct
*),
6236 if (!pages
|| !vmas
) {
6238 if (ctx
->account_mem
)
6239 io_unaccount_mem(ctx
->user
, nr_pages
);
6242 got_pages
= nr_pages
;
6245 imu
->bvec
= kvmalloc_array(nr_pages
, sizeof(struct bio_vec
),
6249 if (ctx
->account_mem
)
6250 io_unaccount_mem(ctx
->user
, nr_pages
);
6255 down_read(¤t
->mm
->mmap_sem
);
6256 pret
= pin_user_pages(ubuf
, nr_pages
,
6257 FOLL_WRITE
| FOLL_LONGTERM
,
6259 if (pret
== nr_pages
) {
6260 /* don't support file backed memory */
6261 for (j
= 0; j
< nr_pages
; j
++) {
6262 struct vm_area_struct
*vma
= vmas
[j
];
6265 !is_file_hugepages(vma
->vm_file
)) {
6271 ret
= pret
< 0 ? pret
: -EFAULT
;
6273 up_read(¤t
->mm
->mmap_sem
);
6276 * if we did partial map, or found file backed vmas,
6277 * release any pages we did get
6280 unpin_user_pages(pages
, pret
);
6281 if (ctx
->account_mem
)
6282 io_unaccount_mem(ctx
->user
, nr_pages
);
6287 off
= ubuf
& ~PAGE_MASK
;
6289 for (j
= 0; j
< nr_pages
; j
++) {
6292 vec_len
= min_t(size_t, size
, PAGE_SIZE
- off
);
6293 imu
->bvec
[j
].bv_page
= pages
[j
];
6294 imu
->bvec
[j
].bv_len
= vec_len
;
6295 imu
->bvec
[j
].bv_offset
= off
;
6299 /* store original address for later verification */
6301 imu
->len
= iov
.iov_len
;
6302 imu
->nr_bvecs
= nr_pages
;
6304 ctx
->nr_user_bufs
++;
6312 io_sqe_buffer_unregister(ctx
);
6316 static int io_eventfd_register(struct io_ring_ctx
*ctx
, void __user
*arg
)
6318 __s32 __user
*fds
= arg
;
6324 if (copy_from_user(&fd
, fds
, sizeof(*fds
)))
6327 ctx
->cq_ev_fd
= eventfd_ctx_fdget(fd
);
6328 if (IS_ERR(ctx
->cq_ev_fd
)) {
6329 int ret
= PTR_ERR(ctx
->cq_ev_fd
);
6330 ctx
->cq_ev_fd
= NULL
;
6337 static int io_eventfd_unregister(struct io_ring_ctx
*ctx
)
6339 if (ctx
->cq_ev_fd
) {
6340 eventfd_ctx_put(ctx
->cq_ev_fd
);
6341 ctx
->cq_ev_fd
= NULL
;
6348 static void io_ring_ctx_free(struct io_ring_ctx
*ctx
)
6350 io_finish_async(ctx
);
6352 mmdrop(ctx
->sqo_mm
);
6354 io_iopoll_reap_events(ctx
);
6355 io_sqe_buffer_unregister(ctx
);
6356 io_sqe_files_unregister(ctx
);
6357 io_eventfd_unregister(ctx
);
6358 idr_destroy(&ctx
->personality_idr
);
6360 #if defined(CONFIG_UNIX)
6361 if (ctx
->ring_sock
) {
6362 ctx
->ring_sock
->file
= NULL
; /* so that iput() is called */
6363 sock_release(ctx
->ring_sock
);
6367 io_mem_free(ctx
->rings
);
6368 io_mem_free(ctx
->sq_sqes
);
6370 percpu_ref_exit(&ctx
->refs
);
6371 if (ctx
->account_mem
)
6372 io_unaccount_mem(ctx
->user
,
6373 ring_pages(ctx
->sq_entries
, ctx
->cq_entries
));
6374 free_uid(ctx
->user
);
6375 put_cred(ctx
->creds
);
6376 kfree(ctx
->completions
);
6377 kfree(ctx
->cancel_hash
);
6378 kmem_cache_free(req_cachep
, ctx
->fallback_req
);
6382 static __poll_t
io_uring_poll(struct file
*file
, poll_table
*wait
)
6384 struct io_ring_ctx
*ctx
= file
->private_data
;
6387 poll_wait(file
, &ctx
->cq_wait
, wait
);
6389 * synchronizes with barrier from wq_has_sleeper call in
6393 if (READ_ONCE(ctx
->rings
->sq
.tail
) - ctx
->cached_sq_head
!=
6394 ctx
->rings
->sq_ring_entries
)
6395 mask
|= EPOLLOUT
| EPOLLWRNORM
;
6396 if (io_cqring_events(ctx
, false))
6397 mask
|= EPOLLIN
| EPOLLRDNORM
;
6402 static int io_uring_fasync(int fd
, struct file
*file
, int on
)
6404 struct io_ring_ctx
*ctx
= file
->private_data
;
6406 return fasync_helper(fd
, file
, on
, &ctx
->cq_fasync
);
6409 static int io_remove_personalities(int id
, void *p
, void *data
)
6411 struct io_ring_ctx
*ctx
= data
;
6412 const struct cred
*cred
;
6414 cred
= idr_remove(&ctx
->personality_idr
, id
);
6420 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx
*ctx
)
6422 mutex_lock(&ctx
->uring_lock
);
6423 percpu_ref_kill(&ctx
->refs
);
6424 mutex_unlock(&ctx
->uring_lock
);
6427 * Wait for sq thread to idle, if we have one. It won't spin on new
6428 * work after we've killed the ctx ref above. This is important to do
6429 * before we cancel existing commands, as the thread could otherwise
6430 * be queueing new work post that. If that's work we need to cancel,
6431 * it could cause shutdown to hang.
6433 while (ctx
->sqo_thread
&& !wq_has_sleeper(&ctx
->sqo_wait
))
6436 io_kill_timeouts(ctx
);
6437 io_poll_remove_all(ctx
);
6440 io_wq_cancel_all(ctx
->io_wq
);
6442 io_iopoll_reap_events(ctx
);
6443 /* if we failed setting up the ctx, we might not have any rings */
6445 io_cqring_overflow_flush(ctx
, true);
6446 idr_for_each(&ctx
->personality_idr
, io_remove_personalities
, ctx
);
6447 wait_for_completion(&ctx
->completions
[0]);
6448 io_ring_ctx_free(ctx
);
6451 static int io_uring_release(struct inode
*inode
, struct file
*file
)
6453 struct io_ring_ctx
*ctx
= file
->private_data
;
6455 file
->private_data
= NULL
;
6456 io_ring_ctx_wait_and_kill(ctx
);
6460 static void io_uring_cancel_files(struct io_ring_ctx
*ctx
,
6461 struct files_struct
*files
)
6463 struct io_kiocb
*req
;
6466 while (!list_empty_careful(&ctx
->inflight_list
)) {
6467 struct io_kiocb
*cancel_req
= NULL
;
6469 spin_lock_irq(&ctx
->inflight_lock
);
6470 list_for_each_entry(req
, &ctx
->inflight_list
, inflight_entry
) {
6471 if (req
->work
.files
!= files
)
6473 /* req is being completed, ignore */
6474 if (!refcount_inc_not_zero(&req
->refs
))
6480 prepare_to_wait(&ctx
->inflight_wait
, &wait
,
6481 TASK_UNINTERRUPTIBLE
);
6482 spin_unlock_irq(&ctx
->inflight_lock
);
6484 /* We need to keep going until we don't find a matching req */
6488 if (cancel_req
->flags
& REQ_F_OVERFLOW
) {
6489 spin_lock_irq(&ctx
->completion_lock
);
6490 list_del(&cancel_req
->list
);
6491 cancel_req
->flags
&= ~REQ_F_OVERFLOW
;
6492 if (list_empty(&ctx
->cq_overflow_list
)) {
6493 clear_bit(0, &ctx
->sq_check_overflow
);
6494 clear_bit(0, &ctx
->cq_check_overflow
);
6496 spin_unlock_irq(&ctx
->completion_lock
);
6498 WRITE_ONCE(ctx
->rings
->cq_overflow
,
6499 atomic_inc_return(&ctx
->cached_cq_overflow
));
6502 * Put inflight ref and overflow ref. If that's
6503 * all we had, then we're done with this request.
6505 if (refcount_sub_and_test(2, &cancel_req
->refs
)) {
6506 io_put_req(cancel_req
);
6511 io_wq_cancel_work(ctx
->io_wq
, &cancel_req
->work
);
6512 io_put_req(cancel_req
);
6515 finish_wait(&ctx
->inflight_wait
, &wait
);
6518 static int io_uring_flush(struct file
*file
, void *data
)
6520 struct io_ring_ctx
*ctx
= file
->private_data
;
6522 io_uring_cancel_files(ctx
, data
);
6525 * If the task is going away, cancel work it may have pending
6527 if (fatal_signal_pending(current
) || (current
->flags
& PF_EXITING
))
6528 io_wq_cancel_pid(ctx
->io_wq
, task_pid_vnr(current
));
6533 static void *io_uring_validate_mmap_request(struct file
*file
,
6534 loff_t pgoff
, size_t sz
)
6536 struct io_ring_ctx
*ctx
= file
->private_data
;
6537 loff_t offset
= pgoff
<< PAGE_SHIFT
;
6542 case IORING_OFF_SQ_RING
:
6543 case IORING_OFF_CQ_RING
:
6546 case IORING_OFF_SQES
:
6550 return ERR_PTR(-EINVAL
);
6553 page
= virt_to_head_page(ptr
);
6554 if (sz
> page_size(page
))
6555 return ERR_PTR(-EINVAL
);
6562 static int io_uring_mmap(struct file
*file
, struct vm_area_struct
*vma
)
6564 size_t sz
= vma
->vm_end
- vma
->vm_start
;
6568 ptr
= io_uring_validate_mmap_request(file
, vma
->vm_pgoff
, sz
);
6570 return PTR_ERR(ptr
);
6572 pfn
= virt_to_phys(ptr
) >> PAGE_SHIFT
;
6573 return remap_pfn_range(vma
, vma
->vm_start
, pfn
, sz
, vma
->vm_page_prot
);
6576 #else /* !CONFIG_MMU */
6578 static int io_uring_mmap(struct file
*file
, struct vm_area_struct
*vma
)
6580 return vma
->vm_flags
& (VM_SHARED
| VM_MAYSHARE
) ? 0 : -EINVAL
;
6583 static unsigned int io_uring_nommu_mmap_capabilities(struct file
*file
)
6585 return NOMMU_MAP_DIRECT
| NOMMU_MAP_READ
| NOMMU_MAP_WRITE
;
6588 static unsigned long io_uring_nommu_get_unmapped_area(struct file
*file
,
6589 unsigned long addr
, unsigned long len
,
6590 unsigned long pgoff
, unsigned long flags
)
6594 ptr
= io_uring_validate_mmap_request(file
, pgoff
, len
);
6596 return PTR_ERR(ptr
);
6598 return (unsigned long) ptr
;
6601 #endif /* !CONFIG_MMU */
6603 SYSCALL_DEFINE6(io_uring_enter
, unsigned int, fd
, u32
, to_submit
,
6604 u32
, min_complete
, u32
, flags
, const sigset_t __user
*, sig
,
6607 struct io_ring_ctx
*ctx
;
6612 if (flags
& ~(IORING_ENTER_GETEVENTS
| IORING_ENTER_SQ_WAKEUP
))
6620 if (f
.file
->f_op
!= &io_uring_fops
)
6624 ctx
= f
.file
->private_data
;
6625 if (!percpu_ref_tryget(&ctx
->refs
))
6629 * For SQ polling, the thread will do all submissions and completions.
6630 * Just return the requested submit count, and wake the thread if
6634 if (ctx
->flags
& IORING_SETUP_SQPOLL
) {
6635 if (!list_empty_careful(&ctx
->cq_overflow_list
))
6636 io_cqring_overflow_flush(ctx
, false);
6637 if (flags
& IORING_ENTER_SQ_WAKEUP
)
6638 wake_up(&ctx
->sqo_wait
);
6639 submitted
= to_submit
;
6640 } else if (to_submit
) {
6641 struct mm_struct
*cur_mm
;
6643 mutex_lock(&ctx
->uring_lock
);
6644 /* already have mm, so io_submit_sqes() won't try to grab it */
6645 cur_mm
= ctx
->sqo_mm
;
6646 submitted
= io_submit_sqes(ctx
, to_submit
, f
.file
, fd
,
6648 mutex_unlock(&ctx
->uring_lock
);
6650 if (submitted
!= to_submit
)
6653 if (flags
& IORING_ENTER_GETEVENTS
) {
6654 unsigned nr_events
= 0;
6656 min_complete
= min(min_complete
, ctx
->cq_entries
);
6658 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
6659 ret
= io_iopoll_check(ctx
, &nr_events
, min_complete
);
6661 ret
= io_cqring_wait(ctx
, min_complete
, sig
, sigsz
);
6666 percpu_ref_put(&ctx
->refs
);
6669 return submitted
? submitted
: ret
;
6672 #ifdef CONFIG_PROC_FS
6673 static int io_uring_show_cred(int id
, void *p
, void *data
)
6675 const struct cred
*cred
= p
;
6676 struct seq_file
*m
= data
;
6677 struct user_namespace
*uns
= seq_user_ns(m
);
6678 struct group_info
*gi
;
6683 seq_printf(m
, "%5d\n", id
);
6684 seq_put_decimal_ull(m
, "\tUid:\t", from_kuid_munged(uns
, cred
->uid
));
6685 seq_put_decimal_ull(m
, "\t\t", from_kuid_munged(uns
, cred
->euid
));
6686 seq_put_decimal_ull(m
, "\t\t", from_kuid_munged(uns
, cred
->suid
));
6687 seq_put_decimal_ull(m
, "\t\t", from_kuid_munged(uns
, cred
->fsuid
));
6688 seq_put_decimal_ull(m
, "\n\tGid:\t", from_kgid_munged(uns
, cred
->gid
));
6689 seq_put_decimal_ull(m
, "\t\t", from_kgid_munged(uns
, cred
->egid
));
6690 seq_put_decimal_ull(m
, "\t\t", from_kgid_munged(uns
, cred
->sgid
));
6691 seq_put_decimal_ull(m
, "\t\t", from_kgid_munged(uns
, cred
->fsgid
));
6692 seq_puts(m
, "\n\tGroups:\t");
6693 gi
= cred
->group_info
;
6694 for (g
= 0; g
< gi
->ngroups
; g
++) {
6695 seq_put_decimal_ull(m
, g
? " " : "",
6696 from_kgid_munged(uns
, gi
->gid
[g
]));
6698 seq_puts(m
, "\n\tCapEff:\t");
6699 cap
= cred
->cap_effective
;
6700 CAP_FOR_EACH_U32(__capi
)
6701 seq_put_hex_ll(m
, NULL
, cap
.cap
[CAP_LAST_U32
- __capi
], 8);
6706 static void __io_uring_show_fdinfo(struct io_ring_ctx
*ctx
, struct seq_file
*m
)
6710 mutex_lock(&ctx
->uring_lock
);
6711 seq_printf(m
, "UserFiles:\t%u\n", ctx
->nr_user_files
);
6712 for (i
= 0; i
< ctx
->nr_user_files
; i
++) {
6713 struct fixed_file_table
*table
;
6716 table
= &ctx
->file_data
->table
[i
>> IORING_FILE_TABLE_SHIFT
];
6717 f
= table
->files
[i
& IORING_FILE_TABLE_MASK
];
6719 seq_printf(m
, "%5u: %s\n", i
, file_dentry(f
)->d_iname
);
6721 seq_printf(m
, "%5u: <none>\n", i
);
6723 seq_printf(m
, "UserBufs:\t%u\n", ctx
->nr_user_bufs
);
6724 for (i
= 0; i
< ctx
->nr_user_bufs
; i
++) {
6725 struct io_mapped_ubuf
*buf
= &ctx
->user_bufs
[i
];
6727 seq_printf(m
, "%5u: 0x%llx/%u\n", i
, buf
->ubuf
,
6728 (unsigned int) buf
->len
);
6730 if (!idr_is_empty(&ctx
->personality_idr
)) {
6731 seq_printf(m
, "Personalities:\n");
6732 idr_for_each(&ctx
->personality_idr
, io_uring_show_cred
, m
);
6734 mutex_unlock(&ctx
->uring_lock
);
6737 static void io_uring_show_fdinfo(struct seq_file
*m
, struct file
*f
)
6739 struct io_ring_ctx
*ctx
= f
->private_data
;
6741 if (percpu_ref_tryget(&ctx
->refs
)) {
6742 __io_uring_show_fdinfo(ctx
, m
);
6743 percpu_ref_put(&ctx
->refs
);
6748 static const struct file_operations io_uring_fops
= {
6749 .release
= io_uring_release
,
6750 .flush
= io_uring_flush
,
6751 .mmap
= io_uring_mmap
,
6753 .get_unmapped_area
= io_uring_nommu_get_unmapped_area
,
6754 .mmap_capabilities
= io_uring_nommu_mmap_capabilities
,
6756 .poll
= io_uring_poll
,
6757 .fasync
= io_uring_fasync
,
6758 #ifdef CONFIG_PROC_FS
6759 .show_fdinfo
= io_uring_show_fdinfo
,
6763 static int io_allocate_scq_urings(struct io_ring_ctx
*ctx
,
6764 struct io_uring_params
*p
)
6766 struct io_rings
*rings
;
6767 size_t size
, sq_array_offset
;
6769 size
= rings_size(p
->sq_entries
, p
->cq_entries
, &sq_array_offset
);
6770 if (size
== SIZE_MAX
)
6773 rings
= io_mem_alloc(size
);
6778 ctx
->sq_array
= (u32
*)((char *)rings
+ sq_array_offset
);
6779 rings
->sq_ring_mask
= p
->sq_entries
- 1;
6780 rings
->cq_ring_mask
= p
->cq_entries
- 1;
6781 rings
->sq_ring_entries
= p
->sq_entries
;
6782 rings
->cq_ring_entries
= p
->cq_entries
;
6783 ctx
->sq_mask
= rings
->sq_ring_mask
;
6784 ctx
->cq_mask
= rings
->cq_ring_mask
;
6785 ctx
->sq_entries
= rings
->sq_ring_entries
;
6786 ctx
->cq_entries
= rings
->cq_ring_entries
;
6788 size
= array_size(sizeof(struct io_uring_sqe
), p
->sq_entries
);
6789 if (size
== SIZE_MAX
) {
6790 io_mem_free(ctx
->rings
);
6795 ctx
->sq_sqes
= io_mem_alloc(size
);
6796 if (!ctx
->sq_sqes
) {
6797 io_mem_free(ctx
->rings
);
6806 * Allocate an anonymous fd, this is what constitutes the application
6807 * visible backing of an io_uring instance. The application mmaps this
6808 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
6809 * we have to tie this fd to a socket for file garbage collection purposes.
6811 static int io_uring_get_fd(struct io_ring_ctx
*ctx
)
6816 #if defined(CONFIG_UNIX)
6817 ret
= sock_create_kern(&init_net
, PF_UNIX
, SOCK_RAW
, IPPROTO_IP
,
6823 ret
= get_unused_fd_flags(O_RDWR
| O_CLOEXEC
);
6827 file
= anon_inode_getfile("[io_uring]", &io_uring_fops
, ctx
,
6828 O_RDWR
| O_CLOEXEC
);
6831 ret
= PTR_ERR(file
);
6835 #if defined(CONFIG_UNIX)
6836 ctx
->ring_sock
->file
= file
;
6838 fd_install(ret
, file
);
6841 #if defined(CONFIG_UNIX)
6842 sock_release(ctx
->ring_sock
);
6843 ctx
->ring_sock
= NULL
;
6848 static int io_uring_create(unsigned entries
, struct io_uring_params
*p
)
6850 struct user_struct
*user
= NULL
;
6851 struct io_ring_ctx
*ctx
;
6857 if (entries
> IORING_MAX_ENTRIES
) {
6858 if (!(p
->flags
& IORING_SETUP_CLAMP
))
6860 entries
= IORING_MAX_ENTRIES
;
6864 * Use twice as many entries for the CQ ring. It's possible for the
6865 * application to drive a higher depth than the size of the SQ ring,
6866 * since the sqes are only used at submission time. This allows for
6867 * some flexibility in overcommitting a bit. If the application has
6868 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
6869 * of CQ ring entries manually.
6871 p
->sq_entries
= roundup_pow_of_two(entries
);
6872 if (p
->flags
& IORING_SETUP_CQSIZE
) {
6874 * If IORING_SETUP_CQSIZE is set, we do the same roundup
6875 * to a power-of-two, if it isn't already. We do NOT impose
6876 * any cq vs sq ring sizing.
6878 if (p
->cq_entries
< p
->sq_entries
)
6880 if (p
->cq_entries
> IORING_MAX_CQ_ENTRIES
) {
6881 if (!(p
->flags
& IORING_SETUP_CLAMP
))
6883 p
->cq_entries
= IORING_MAX_CQ_ENTRIES
;
6885 p
->cq_entries
= roundup_pow_of_two(p
->cq_entries
);
6887 p
->cq_entries
= 2 * p
->sq_entries
;
6890 user
= get_uid(current_user());
6891 account_mem
= !capable(CAP_IPC_LOCK
);
6894 ret
= io_account_mem(user
,
6895 ring_pages(p
->sq_entries
, p
->cq_entries
));
6902 ctx
= io_ring_ctx_alloc(p
);
6905 io_unaccount_mem(user
, ring_pages(p
->sq_entries
,
6910 ctx
->compat
= in_compat_syscall();
6911 ctx
->account_mem
= account_mem
;
6913 ctx
->creds
= get_current_cred();
6915 ret
= io_allocate_scq_urings(ctx
, p
);
6919 ret
= io_sq_offload_start(ctx
, p
);
6923 memset(&p
->sq_off
, 0, sizeof(p
->sq_off
));
6924 p
->sq_off
.head
= offsetof(struct io_rings
, sq
.head
);
6925 p
->sq_off
.tail
= offsetof(struct io_rings
, sq
.tail
);
6926 p
->sq_off
.ring_mask
= offsetof(struct io_rings
, sq_ring_mask
);
6927 p
->sq_off
.ring_entries
= offsetof(struct io_rings
, sq_ring_entries
);
6928 p
->sq_off
.flags
= offsetof(struct io_rings
, sq_flags
);
6929 p
->sq_off
.dropped
= offsetof(struct io_rings
, sq_dropped
);
6930 p
->sq_off
.array
= (char *)ctx
->sq_array
- (char *)ctx
->rings
;
6932 memset(&p
->cq_off
, 0, sizeof(p
->cq_off
));
6933 p
->cq_off
.head
= offsetof(struct io_rings
, cq
.head
);
6934 p
->cq_off
.tail
= offsetof(struct io_rings
, cq
.tail
);
6935 p
->cq_off
.ring_mask
= offsetof(struct io_rings
, cq_ring_mask
);
6936 p
->cq_off
.ring_entries
= offsetof(struct io_rings
, cq_ring_entries
);
6937 p
->cq_off
.overflow
= offsetof(struct io_rings
, cq_overflow
);
6938 p
->cq_off
.cqes
= offsetof(struct io_rings
, cqes
);
6941 * Install ring fd as the very last thing, so we don't risk someone
6942 * having closed it before we finish setup
6944 ret
= io_uring_get_fd(ctx
);
6948 p
->features
= IORING_FEAT_SINGLE_MMAP
| IORING_FEAT_NODROP
|
6949 IORING_FEAT_SUBMIT_STABLE
| IORING_FEAT_RW_CUR_POS
|
6950 IORING_FEAT_CUR_PERSONALITY
;
6951 trace_io_uring_create(ret
, ctx
, p
->sq_entries
, p
->cq_entries
, p
->flags
);
6954 io_ring_ctx_wait_and_kill(ctx
);
6959 * Sets up an aio uring context, and returns the fd. Applications asks for a
6960 * ring size, we return the actual sq/cq ring sizes (among other things) in the
6961 * params structure passed in.
6963 static long io_uring_setup(u32 entries
, struct io_uring_params __user
*params
)
6965 struct io_uring_params p
;
6969 if (copy_from_user(&p
, params
, sizeof(p
)))
6971 for (i
= 0; i
< ARRAY_SIZE(p
.resv
); i
++) {
6976 if (p
.flags
& ~(IORING_SETUP_IOPOLL
| IORING_SETUP_SQPOLL
|
6977 IORING_SETUP_SQ_AFF
| IORING_SETUP_CQSIZE
|
6978 IORING_SETUP_CLAMP
| IORING_SETUP_ATTACH_WQ
))
6981 ret
= io_uring_create(entries
, &p
);
6985 if (copy_to_user(params
, &p
, sizeof(p
)))
6991 SYSCALL_DEFINE2(io_uring_setup
, u32
, entries
,
6992 struct io_uring_params __user
*, params
)
6994 return io_uring_setup(entries
, params
);
6997 static int io_probe(struct io_ring_ctx
*ctx
, void __user
*arg
, unsigned nr_args
)
6999 struct io_uring_probe
*p
;
7003 size
= struct_size(p
, ops
, nr_args
);
7004 if (size
== SIZE_MAX
)
7006 p
= kzalloc(size
, GFP_KERNEL
);
7011 if (copy_from_user(p
, arg
, size
))
7014 if (memchr_inv(p
, 0, size
))
7017 p
->last_op
= IORING_OP_LAST
- 1;
7018 if (nr_args
> IORING_OP_LAST
)
7019 nr_args
= IORING_OP_LAST
;
7021 for (i
= 0; i
< nr_args
; i
++) {
7023 if (!io_op_defs
[i
].not_supported
)
7024 p
->ops
[i
].flags
= IO_URING_OP_SUPPORTED
;
7029 if (copy_to_user(arg
, p
, size
))
7036 static int io_register_personality(struct io_ring_ctx
*ctx
)
7038 const struct cred
*creds
= get_current_cred();
7041 id
= idr_alloc_cyclic(&ctx
->personality_idr
, (void *) creds
, 1,
7042 USHRT_MAX
, GFP_KERNEL
);
7048 static int io_unregister_personality(struct io_ring_ctx
*ctx
, unsigned id
)
7050 const struct cred
*old_creds
;
7052 old_creds
= idr_remove(&ctx
->personality_idr
, id
);
7054 put_cred(old_creds
);
7061 static bool io_register_op_must_quiesce(int op
)
7064 case IORING_UNREGISTER_FILES
:
7065 case IORING_REGISTER_FILES_UPDATE
:
7066 case IORING_REGISTER_PROBE
:
7067 case IORING_REGISTER_PERSONALITY
:
7068 case IORING_UNREGISTER_PERSONALITY
:
7075 static int __io_uring_register(struct io_ring_ctx
*ctx
, unsigned opcode
,
7076 void __user
*arg
, unsigned nr_args
)
7077 __releases(ctx
->uring_lock
)
7078 __acquires(ctx
->uring_lock
)
7083 * We're inside the ring mutex, if the ref is already dying, then
7084 * someone else killed the ctx or is already going through
7085 * io_uring_register().
7087 if (percpu_ref_is_dying(&ctx
->refs
))
7090 if (io_register_op_must_quiesce(opcode
)) {
7091 percpu_ref_kill(&ctx
->refs
);
7094 * Drop uring mutex before waiting for references to exit. If
7095 * another thread is currently inside io_uring_enter() it might
7096 * need to grab the uring_lock to make progress. If we hold it
7097 * here across the drain wait, then we can deadlock. It's safe
7098 * to drop the mutex here, since no new references will come in
7099 * after we've killed the percpu ref.
7101 mutex_unlock(&ctx
->uring_lock
);
7102 ret
= wait_for_completion_interruptible(&ctx
->completions
[0]);
7103 mutex_lock(&ctx
->uring_lock
);
7105 percpu_ref_resurrect(&ctx
->refs
);
7112 case IORING_REGISTER_BUFFERS
:
7113 ret
= io_sqe_buffer_register(ctx
, arg
, nr_args
);
7115 case IORING_UNREGISTER_BUFFERS
:
7119 ret
= io_sqe_buffer_unregister(ctx
);
7121 case IORING_REGISTER_FILES
:
7122 ret
= io_sqe_files_register(ctx
, arg
, nr_args
);
7124 case IORING_UNREGISTER_FILES
:
7128 ret
= io_sqe_files_unregister(ctx
);
7130 case IORING_REGISTER_FILES_UPDATE
:
7131 ret
= io_sqe_files_update(ctx
, arg
, nr_args
);
7133 case IORING_REGISTER_EVENTFD
:
7134 case IORING_REGISTER_EVENTFD_ASYNC
:
7138 ret
= io_eventfd_register(ctx
, arg
);
7141 if (opcode
== IORING_REGISTER_EVENTFD_ASYNC
)
7142 ctx
->eventfd_async
= 1;
7144 ctx
->eventfd_async
= 0;
7146 case IORING_UNREGISTER_EVENTFD
:
7150 ret
= io_eventfd_unregister(ctx
);
7152 case IORING_REGISTER_PROBE
:
7154 if (!arg
|| nr_args
> 256)
7156 ret
= io_probe(ctx
, arg
, nr_args
);
7158 case IORING_REGISTER_PERSONALITY
:
7162 ret
= io_register_personality(ctx
);
7164 case IORING_UNREGISTER_PERSONALITY
:
7168 ret
= io_unregister_personality(ctx
, nr_args
);
7175 if (io_register_op_must_quiesce(opcode
)) {
7176 /* bring the ctx back to life */
7177 percpu_ref_reinit(&ctx
->refs
);
7179 reinit_completion(&ctx
->completions
[0]);
7184 SYSCALL_DEFINE4(io_uring_register
, unsigned int, fd
, unsigned int, opcode
,
7185 void __user
*, arg
, unsigned int, nr_args
)
7187 struct io_ring_ctx
*ctx
;
7196 if (f
.file
->f_op
!= &io_uring_fops
)
7199 ctx
= f
.file
->private_data
;
7201 mutex_lock(&ctx
->uring_lock
);
7202 ret
= __io_uring_register(ctx
, opcode
, arg
, nr_args
);
7203 mutex_unlock(&ctx
->uring_lock
);
7204 trace_io_uring_register(ctx
, opcode
, ctx
->nr_user_files
, ctx
->nr_user_bufs
,
7205 ctx
->cq_ev_fd
!= NULL
, ret
);
7211 static int __init
io_uring_init(void)
7213 #define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
7214 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
7215 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
7218 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
7219 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
7220 BUILD_BUG_ON(sizeof(struct io_uring_sqe
) != 64);
7221 BUILD_BUG_SQE_ELEM(0, __u8
, opcode
);
7222 BUILD_BUG_SQE_ELEM(1, __u8
, flags
);
7223 BUILD_BUG_SQE_ELEM(2, __u16
, ioprio
);
7224 BUILD_BUG_SQE_ELEM(4, __s32
, fd
);
7225 BUILD_BUG_SQE_ELEM(8, __u64
, off
);
7226 BUILD_BUG_SQE_ELEM(8, __u64
, addr2
);
7227 BUILD_BUG_SQE_ELEM(16, __u64
, addr
);
7228 BUILD_BUG_SQE_ELEM(24, __u32
, len
);
7229 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t
, rw_flags
);
7230 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags
);
7231 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32
, rw_flags
);
7232 BUILD_BUG_SQE_ELEM(28, __u32
, fsync_flags
);
7233 BUILD_BUG_SQE_ELEM(28, __u16
, poll_events
);
7234 BUILD_BUG_SQE_ELEM(28, __u32
, sync_range_flags
);
7235 BUILD_BUG_SQE_ELEM(28, __u32
, msg_flags
);
7236 BUILD_BUG_SQE_ELEM(28, __u32
, timeout_flags
);
7237 BUILD_BUG_SQE_ELEM(28, __u32
, accept_flags
);
7238 BUILD_BUG_SQE_ELEM(28, __u32
, cancel_flags
);
7239 BUILD_BUG_SQE_ELEM(28, __u32
, open_flags
);
7240 BUILD_BUG_SQE_ELEM(28, __u32
, statx_flags
);
7241 BUILD_BUG_SQE_ELEM(28, __u32
, fadvise_advice
);
7242 BUILD_BUG_SQE_ELEM(32, __u64
, user_data
);
7243 BUILD_BUG_SQE_ELEM(40, __u16
, buf_index
);
7244 BUILD_BUG_SQE_ELEM(42, __u16
, personality
);
7246 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs
) != IORING_OP_LAST
);
7247 req_cachep
= KMEM_CACHE(io_kiocb
, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
);
7250 __initcall(io_uring_init
);