2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
5 * Implements an efficient asynchronous io interface.
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
9 * See ../COPYING for licensing terms.
11 #define pr_fmt(fmt) "%s: " fmt, __func__
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/time.h>
17 #include <linux/aio_abi.h>
18 #include <linux/export.h>
19 #include <linux/syscalls.h>
20 #include <linux/backing-dev.h>
21 #include <linux/uio.h>
23 #include <linux/sched.h>
25 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/mmu_context.h>
29 #include <linux/slab.h>
30 #include <linux/timer.h>
31 #include <linux/aio.h>
32 #include <linux/highmem.h>
33 #include <linux/workqueue.h>
34 #include <linux/security.h>
35 #include <linux/eventfd.h>
36 #include <linux/blkdev.h>
37 #include <linux/compat.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/migrate.h>
40 #include <linux/ramfs.h>
42 #include <asm/kmap_types.h>
43 #include <asm/uaccess.h>
47 #define AIO_RING_MAGIC 0xa10a10a1
48 #define AIO_RING_COMPAT_FEATURES 1
49 #define AIO_RING_INCOMPAT_FEATURES 0
51 unsigned id
; /* kernel internal index number */
52 unsigned nr
; /* number of io_events */
57 unsigned compat_features
;
58 unsigned incompat_features
;
59 unsigned header_length
; /* size of aio_ring */
62 struct io_event io_events
[0];
63 }; /* 128 bytes + ring size */
65 #define AIO_RING_PAGES 8
71 /* This needs improving */
72 unsigned long user_id
;
73 struct hlist_node list
;
76 * This is what userspace passed to io_setup(), it's not used for
77 * anything but counting against the global max_reqs quota.
79 * The real limit is nr_events - 1, which will be larger (see
84 /* Size of ringbuffer, in units of struct io_event */
87 unsigned long mmap_base
;
88 unsigned long mmap_size
;
90 struct page
**ring_pages
;
93 struct rcu_head rcu_head
;
94 struct work_struct rcu_work
;
98 } ____cacheline_aligned_in_smp
;
102 struct list_head active_reqs
; /* used for cancellation */
103 } ____cacheline_aligned_in_smp
;
106 struct mutex ring_lock
;
107 wait_queue_head_t wait
;
108 } ____cacheline_aligned_in_smp
;
112 spinlock_t completion_lock
;
113 } ____cacheline_aligned_in_smp
;
115 struct page
*internal_pages
[AIO_RING_PAGES
];
116 struct file
*aio_ring_file
;
119 /*------ sysctl variables----*/
120 static DEFINE_SPINLOCK(aio_nr_lock
);
121 unsigned long aio_nr
; /* current system wide number of aio requests */
122 unsigned long aio_max_nr
= 0x10000; /* system wide maximum number of aio requests */
123 /*----end sysctl variables---*/
125 static struct kmem_cache
*kiocb_cachep
;
126 static struct kmem_cache
*kioctx_cachep
;
129 * Creates the slab caches used by the aio routines, panic on
130 * failure as this is done early during the boot sequence.
132 static int __init
aio_setup(void)
134 kiocb_cachep
= KMEM_CACHE(kiocb
, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
135 kioctx_cachep
= KMEM_CACHE(kioctx
,SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
137 pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page
));
141 __initcall(aio_setup
);
143 static void aio_free_ring(struct kioctx
*ctx
)
146 struct file
*aio_ring_file
= ctx
->aio_ring_file
;
148 for (i
= 0; i
< ctx
->nr_pages
; i
++) {
149 pr_debug("pid(%d) [%d] page->count=%d\n", current
->pid
, i
,
150 page_count(ctx
->ring_pages
[i
]));
151 put_page(ctx
->ring_pages
[i
]);
154 if (ctx
->ring_pages
&& ctx
->ring_pages
!= ctx
->internal_pages
)
155 kfree(ctx
->ring_pages
);
158 truncate_setsize(aio_ring_file
->f_inode
, 0);
159 pr_debug("pid(%d) i_nlink=%u d_count=%d d_unhashed=%d i_count=%d\n",
160 current
->pid
, aio_ring_file
->f_inode
->i_nlink
,
161 aio_ring_file
->f_path
.dentry
->d_count
,
162 d_unhashed(aio_ring_file
->f_path
.dentry
),
163 atomic_read(&aio_ring_file
->f_inode
->i_count
));
165 ctx
->aio_ring_file
= NULL
;
169 static int aio_ring_mmap(struct file
*file
, struct vm_area_struct
*vma
)
171 vma
->vm_ops
= &generic_file_vm_ops
;
175 static const struct file_operations aio_ring_fops
= {
176 .mmap
= aio_ring_mmap
,
179 static int aio_set_page_dirty(struct page
*page
)
184 #if IS_ENABLED(CONFIG_MIGRATION)
185 static int aio_migratepage(struct address_space
*mapping
, struct page
*new,
186 struct page
*old
, enum migrate_mode mode
)
188 struct kioctx
*ctx
= mapping
->private_data
;
190 unsigned idx
= old
->index
;
193 /* Writeback must be complete */
194 BUG_ON(PageWriteback(old
));
197 rc
= migrate_page_move_mapping(mapping
, new, old
, NULL
, mode
);
198 if (rc
!= MIGRATEPAGE_SUCCESS
) {
205 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
206 migrate_page_copy(new, old
);
207 ctx
->ring_pages
[idx
] = new;
208 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
214 static const struct address_space_operations aio_ctx_aops
= {
215 .set_page_dirty
= aio_set_page_dirty
,
216 #if IS_ENABLED(CONFIG_MIGRATION)
217 .migratepage
= aio_migratepage
,
221 static int aio_setup_ring(struct kioctx
*ctx
)
223 struct aio_ring
*ring
;
224 unsigned nr_events
= ctx
->max_reqs
;
225 struct mm_struct
*mm
= current
->mm
;
226 unsigned long size
, populate
;
231 /* Compensate for the ring buffer's head/tail overlap entry */
232 nr_events
+= 2; /* 1 is required, 2 for good luck */
234 size
= sizeof(struct aio_ring
);
235 size
+= sizeof(struct io_event
) * nr_events
;
237 nr_pages
= PFN_UP(size
);
241 file
= anon_inode_getfile_private("[aio]", &aio_ring_fops
, ctx
, O_RDWR
);
243 ctx
->aio_ring_file
= NULL
;
247 file
->f_inode
->i_mapping
->a_ops
= &aio_ctx_aops
;
248 file
->f_inode
->i_mapping
->private_data
= ctx
;
249 file
->f_inode
->i_size
= PAGE_SIZE
* (loff_t
)nr_pages
;
251 for (i
= 0; i
< nr_pages
; i
++) {
253 page
= find_or_create_page(file
->f_inode
->i_mapping
,
254 i
, GFP_HIGHUSER
| __GFP_ZERO
);
257 pr_debug("pid(%d) page[%d]->count=%d\n",
258 current
->pid
, i
, page_count(page
));
259 SetPageUptodate(page
);
263 ctx
->aio_ring_file
= file
;
264 nr_events
= (PAGE_SIZE
* nr_pages
- sizeof(struct aio_ring
))
265 / sizeof(struct io_event
);
267 ctx
->ring_pages
= ctx
->internal_pages
;
268 if (nr_pages
> AIO_RING_PAGES
) {
269 ctx
->ring_pages
= kcalloc(nr_pages
, sizeof(struct page
*),
271 if (!ctx
->ring_pages
)
275 ctx
->mmap_size
= nr_pages
* PAGE_SIZE
;
276 pr_debug("attempting mmap of %lu bytes\n", ctx
->mmap_size
);
278 down_write(&mm
->mmap_sem
);
279 ctx
->mmap_base
= do_mmap_pgoff(ctx
->aio_ring_file
, 0, ctx
->mmap_size
,
280 PROT_READ
| PROT_WRITE
,
281 MAP_SHARED
| MAP_POPULATE
, 0, &populate
);
282 if (IS_ERR((void *)ctx
->mmap_base
)) {
283 up_write(&mm
->mmap_sem
);
288 up_write(&mm
->mmap_sem
);
290 mm_populate(ctx
->mmap_base
, populate
);
292 pr_debug("mmap address: 0x%08lx\n", ctx
->mmap_base
);
293 ctx
->nr_pages
= get_user_pages(current
, mm
, ctx
->mmap_base
, nr_pages
,
294 1, 0, ctx
->ring_pages
, NULL
);
295 for (i
= 0; i
< ctx
->nr_pages
; i
++)
296 put_page(ctx
->ring_pages
[i
]);
298 if (unlikely(ctx
->nr_pages
!= nr_pages
)) {
303 ctx
->user_id
= ctx
->mmap_base
;
304 ctx
->nr_events
= nr_events
; /* trusted copy */
306 ring
= kmap_atomic(ctx
->ring_pages
[0]);
307 ring
->nr
= nr_events
; /* user copy */
308 ring
->id
= ctx
->user_id
;
309 ring
->head
= ring
->tail
= 0;
310 ring
->magic
= AIO_RING_MAGIC
;
311 ring
->compat_features
= AIO_RING_COMPAT_FEATURES
;
312 ring
->incompat_features
= AIO_RING_INCOMPAT_FEATURES
;
313 ring
->header_length
= sizeof(struct aio_ring
);
315 flush_dcache_page(ctx
->ring_pages
[0]);
320 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
321 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
322 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
324 void kiocb_set_cancel_fn(struct kiocb
*req
, kiocb_cancel_fn
*cancel
)
326 struct kioctx
*ctx
= req
->ki_ctx
;
329 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
331 if (!req
->ki_list
.next
)
332 list_add(&req
->ki_list
, &ctx
->active_reqs
);
334 req
->ki_cancel
= cancel
;
336 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
338 EXPORT_SYMBOL(kiocb_set_cancel_fn
);
340 static int kiocb_cancel(struct kioctx
*ctx
, struct kiocb
*kiocb
,
341 struct io_event
*res
)
343 kiocb_cancel_fn
*old
, *cancel
;
347 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it
348 * actually has a cancel function, hence the cmpxchg()
351 cancel
= ACCESS_ONCE(kiocb
->ki_cancel
);
353 if (!cancel
|| cancel
== KIOCB_CANCELLED
)
357 cancel
= cmpxchg(&kiocb
->ki_cancel
, old
, KIOCB_CANCELLED
);
358 } while (cancel
!= old
);
360 atomic_inc(&kiocb
->ki_users
);
361 spin_unlock_irq(&ctx
->ctx_lock
);
363 memset(res
, 0, sizeof(*res
));
364 res
->obj
= (u64
)(unsigned long)kiocb
->ki_obj
.user
;
365 res
->data
= kiocb
->ki_user_data
;
366 ret
= cancel(kiocb
, res
);
368 spin_lock_irq(&ctx
->ctx_lock
);
373 static void free_ioctx_rcu(struct rcu_head
*head
)
375 struct kioctx
*ctx
= container_of(head
, struct kioctx
, rcu_head
);
376 kmem_cache_free(kioctx_cachep
, ctx
);
380 * When this function runs, the kioctx has been removed from the "hash table"
381 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
382 * now it's safe to cancel any that need to be.
384 static void free_ioctx(struct kioctx
*ctx
)
386 struct aio_ring
*ring
;
389 unsigned head
, avail
;
391 spin_lock_irq(&ctx
->ctx_lock
);
393 while (!list_empty(&ctx
->active_reqs
)) {
394 req
= list_first_entry(&ctx
->active_reqs
,
395 struct kiocb
, ki_list
);
397 list_del_init(&req
->ki_list
);
398 kiocb_cancel(ctx
, req
, &res
);
401 spin_unlock_irq(&ctx
->ctx_lock
);
403 ring
= kmap_atomic(ctx
->ring_pages
[0]);
407 while (atomic_read(&ctx
->reqs_active
) > 0) {
408 wait_event(ctx
->wait
,
410 atomic_read(&ctx
->reqs_active
) <= 0);
412 avail
= (head
<= ctx
->tail
? ctx
->tail
: ctx
->nr_events
) - head
;
414 atomic_sub(avail
, &ctx
->reqs_active
);
416 head
%= ctx
->nr_events
;
419 WARN_ON(atomic_read(&ctx
->reqs_active
) < 0);
423 pr_debug("freeing %p\n", ctx
);
426 * Here the call_rcu() is between the wait_event() for reqs_active to
427 * hit 0, and freeing the ioctx.
429 * aio_complete() decrements reqs_active, but it has to touch the ioctx
430 * after to issue a wakeup so we use rcu.
432 call_rcu(&ctx
->rcu_head
, free_ioctx_rcu
);
435 static void put_ioctx(struct kioctx
*ctx
)
437 if (unlikely(atomic_dec_and_test(&ctx
->users
)))
442 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
444 static struct kioctx
*ioctx_alloc(unsigned nr_events
)
446 struct mm_struct
*mm
= current
->mm
;
450 /* Prevent overflows */
451 if ((nr_events
> (0x10000000U
/ sizeof(struct io_event
))) ||
452 (nr_events
> (0x10000000U
/ sizeof(struct kiocb
)))) {
453 pr_debug("ENOMEM: nr_events too high\n");
454 return ERR_PTR(-EINVAL
);
457 if (!nr_events
|| (unsigned long)nr_events
> aio_max_nr
)
458 return ERR_PTR(-EAGAIN
);
460 ctx
= kmem_cache_zalloc(kioctx_cachep
, GFP_KERNEL
);
462 return ERR_PTR(-ENOMEM
);
464 ctx
->max_reqs
= nr_events
;
466 atomic_set(&ctx
->users
, 2);
467 atomic_set(&ctx
->dead
, 0);
468 spin_lock_init(&ctx
->ctx_lock
);
469 spin_lock_init(&ctx
->completion_lock
);
470 mutex_init(&ctx
->ring_lock
);
471 init_waitqueue_head(&ctx
->wait
);
473 INIT_LIST_HEAD(&ctx
->active_reqs
);
475 if (aio_setup_ring(ctx
) < 0)
478 /* limit the number of system wide aios */
479 spin_lock(&aio_nr_lock
);
480 if (aio_nr
+ nr_events
> aio_max_nr
||
481 aio_nr
+ nr_events
< aio_nr
) {
482 spin_unlock(&aio_nr_lock
);
485 aio_nr
+= ctx
->max_reqs
;
486 spin_unlock(&aio_nr_lock
);
488 /* now link into global list. */
489 spin_lock(&mm
->ioctx_lock
);
490 hlist_add_head_rcu(&ctx
->list
, &mm
->ioctx_list
);
491 spin_unlock(&mm
->ioctx_lock
);
493 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
494 ctx
, ctx
->user_id
, mm
, ctx
->nr_events
);
501 if (ctx
->aio_ring_file
)
502 fput(ctx
->aio_ring_file
);
503 kmem_cache_free(kioctx_cachep
, ctx
);
504 pr_debug("error allocating ioctx %d\n", err
);
508 static void kill_ioctx_work(struct work_struct
*work
)
510 struct kioctx
*ctx
= container_of(work
, struct kioctx
, rcu_work
);
512 wake_up_all(&ctx
->wait
);
516 static void kill_ioctx_rcu(struct rcu_head
*head
)
518 struct kioctx
*ctx
= container_of(head
, struct kioctx
, rcu_head
);
520 INIT_WORK(&ctx
->rcu_work
, kill_ioctx_work
);
521 schedule_work(&ctx
->rcu_work
);
525 * Cancels all outstanding aio requests on an aio context. Used
526 * when the processes owning a context have all exited to encourage
527 * the rapid destruction of the kioctx.
529 static void kill_ioctx(struct kioctx
*ctx
)
531 if (!atomic_xchg(&ctx
->dead
, 1)) {
532 hlist_del_rcu(&ctx
->list
);
535 * It'd be more correct to do this in free_ioctx(), after all
536 * the outstanding kiocbs have finished - but by then io_destroy
537 * has already returned, so io_setup() could potentially return
538 * -EAGAIN with no ioctxs actually in use (as far as userspace
541 spin_lock(&aio_nr_lock
);
542 BUG_ON(aio_nr
- ctx
->max_reqs
> aio_nr
);
543 aio_nr
-= ctx
->max_reqs
;
544 spin_unlock(&aio_nr_lock
);
547 vm_munmap(ctx
->mmap_base
, ctx
->mmap_size
);
549 /* Between hlist_del_rcu() and dropping the initial ref */
550 call_rcu(&ctx
->rcu_head
, kill_ioctx_rcu
);
554 /* wait_on_sync_kiocb:
555 * Waits on the given sync kiocb to complete.
557 ssize_t
wait_on_sync_kiocb(struct kiocb
*iocb
)
559 while (atomic_read(&iocb
->ki_users
)) {
560 set_current_state(TASK_UNINTERRUPTIBLE
);
561 if (!atomic_read(&iocb
->ki_users
))
565 __set_current_state(TASK_RUNNING
);
566 return iocb
->ki_user_data
;
568 EXPORT_SYMBOL(wait_on_sync_kiocb
);
571 * exit_aio: called when the last user of mm goes away. At this point, there is
572 * no way for any new requests to be submited or any of the io_* syscalls to be
573 * called on the context.
575 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
578 void exit_aio(struct mm_struct
*mm
)
581 struct hlist_node
*n
;
583 hlist_for_each_entry_safe(ctx
, n
, &mm
->ioctx_list
, list
) {
584 if (1 != atomic_read(&ctx
->users
))
586 "exit_aio:ioctx still alive: %d %d %d\n",
587 atomic_read(&ctx
->users
),
588 atomic_read(&ctx
->dead
),
589 atomic_read(&ctx
->reqs_active
));
591 * We don't need to bother with munmap() here -
592 * exit_mmap(mm) is coming and it'll unmap everything.
593 * Since aio_free_ring() uses non-zero ->mmap_size
594 * as indicator that it needs to unmap the area,
595 * just set it to 0; aio_free_ring() is the only
596 * place that uses ->mmap_size, so it's safe.
605 * Allocate a slot for an aio request. Increments the ki_users count
606 * of the kioctx so that the kioctx stays around until all requests are
607 * complete. Returns NULL if no requests are free.
609 * Returns with kiocb->ki_users set to 2. The io submit code path holds
610 * an extra reference while submitting the i/o.
611 * This prevents races between the aio code path referencing the
612 * req (after submitting it) and aio_complete() freeing the req.
614 static inline struct kiocb
*aio_get_req(struct kioctx
*ctx
)
618 if (atomic_read(&ctx
->reqs_active
) >= ctx
->nr_events
)
621 if (atomic_inc_return(&ctx
->reqs_active
) > ctx
->nr_events
- 1)
624 req
= kmem_cache_alloc(kiocb_cachep
, GFP_KERNEL
|__GFP_ZERO
);
628 atomic_set(&req
->ki_users
, 2);
633 atomic_dec(&ctx
->reqs_active
);
637 static void kiocb_free(struct kiocb
*req
)
641 if (req
->ki_eventfd
!= NULL
)
642 eventfd_ctx_put(req
->ki_eventfd
);
645 if (req
->ki_iovec
!= &req
->ki_inline_vec
)
646 kfree(req
->ki_iovec
);
647 kmem_cache_free(kiocb_cachep
, req
);
650 void aio_put_req(struct kiocb
*req
)
652 if (atomic_dec_and_test(&req
->ki_users
))
655 EXPORT_SYMBOL(aio_put_req
);
657 static struct kioctx
*lookup_ioctx(unsigned long ctx_id
)
659 struct mm_struct
*mm
= current
->mm
;
660 struct kioctx
*ctx
, *ret
= NULL
;
664 hlist_for_each_entry_rcu(ctx
, &mm
->ioctx_list
, list
) {
665 if (ctx
->user_id
== ctx_id
) {
666 atomic_inc(&ctx
->users
);
677 * Called when the io request on the given iocb is complete.
679 void aio_complete(struct kiocb
*iocb
, long res
, long res2
)
681 struct kioctx
*ctx
= iocb
->ki_ctx
;
682 struct aio_ring
*ring
;
683 struct io_event
*ev_page
, *event
;
688 * Special case handling for sync iocbs:
689 * - events go directly into the iocb for fast handling
690 * - the sync task with the iocb in its stack holds the single iocb
691 * ref, no other paths have a way to get another ref
692 * - the sync task helpfully left a reference to itself in the iocb
694 if (is_sync_kiocb(iocb
)) {
695 BUG_ON(atomic_read(&iocb
->ki_users
) != 1);
696 iocb
->ki_user_data
= res
;
697 atomic_set(&iocb
->ki_users
, 0);
698 wake_up_process(iocb
->ki_obj
.tsk
);
703 * Take rcu_read_lock() in case the kioctx is being destroyed, as we
704 * need to issue a wakeup after decrementing reqs_active.
708 if (iocb
->ki_list
.next
) {
711 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
712 list_del(&iocb
->ki_list
);
713 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
717 * cancelled requests don't get events, userland was given one
718 * when the event got cancelled.
720 if (unlikely(xchg(&iocb
->ki_cancel
,
721 KIOCB_CANCELLED
) == KIOCB_CANCELLED
)) {
722 atomic_dec(&ctx
->reqs_active
);
723 /* Still need the wake_up in case free_ioctx is waiting */
728 * Add a completion event to the ring buffer. Must be done holding
729 * ctx->completion_lock to prevent other code from messing with the tail
730 * pointer since we might be called from irq context.
732 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
735 pos
= tail
+ AIO_EVENTS_OFFSET
;
737 if (++tail
>= ctx
->nr_events
)
740 ev_page
= kmap_atomic(ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
]);
741 event
= ev_page
+ pos
% AIO_EVENTS_PER_PAGE
;
743 event
->obj
= (u64
)(unsigned long)iocb
->ki_obj
.user
;
744 event
->data
= iocb
->ki_user_data
;
748 kunmap_atomic(ev_page
);
749 flush_dcache_page(ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
]);
751 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
752 ctx
, tail
, iocb
, iocb
->ki_obj
.user
, iocb
->ki_user_data
,
755 /* after flagging the request as done, we
756 * must never even look at it again
758 smp_wmb(); /* make event visible before updating tail */
762 ring
= kmap_atomic(ctx
->ring_pages
[0]);
765 flush_dcache_page(ctx
->ring_pages
[0]);
767 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
769 pr_debug("added to ring %p at [%u]\n", iocb
, tail
);
772 * Check if the user asked us to deliver the result through an
773 * eventfd. The eventfd_signal() function is safe to be called
776 if (iocb
->ki_eventfd
!= NULL
)
777 eventfd_signal(iocb
->ki_eventfd
, 1);
780 /* everything turned out well, dispose of the aiocb. */
784 * We have to order our ring_info tail store above and test
785 * of the wait list below outside the wait lock. This is
786 * like in wake_up_bit() where clearing a bit has to be
787 * ordered with the unlocked test.
791 if (waitqueue_active(&ctx
->wait
))
796 EXPORT_SYMBOL(aio_complete
);
799 * Pull an event off of the ioctx's event ring. Returns the number of
802 static long aio_read_events_ring(struct kioctx
*ctx
,
803 struct io_event __user
*event
, long nr
)
805 struct aio_ring
*ring
;
810 mutex_lock(&ctx
->ring_lock
);
812 ring
= kmap_atomic(ctx
->ring_pages
[0]);
816 pr_debug("h%u t%u m%u\n", head
, ctx
->tail
, ctx
->nr_events
);
818 if (head
== ctx
->tail
)
826 avail
= (head
<= ctx
->tail
? ctx
->tail
: ctx
->nr_events
) - head
;
827 if (head
== ctx
->tail
)
830 avail
= min(avail
, nr
- ret
);
831 avail
= min_t(long, avail
, AIO_EVENTS_PER_PAGE
-
832 ((head
+ AIO_EVENTS_OFFSET
) % AIO_EVENTS_PER_PAGE
));
834 pos
= head
+ AIO_EVENTS_OFFSET
;
835 page
= ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
];
836 pos
%= AIO_EVENTS_PER_PAGE
;
839 copy_ret
= copy_to_user(event
+ ret
, ev
+ pos
,
840 sizeof(*ev
) * avail
);
843 if (unlikely(copy_ret
)) {
850 head
%= ctx
->nr_events
;
853 ring
= kmap_atomic(ctx
->ring_pages
[0]);
856 flush_dcache_page(ctx
->ring_pages
[0]);
858 pr_debug("%li h%u t%u\n", ret
, head
, ctx
->tail
);
860 atomic_sub(ret
, &ctx
->reqs_active
);
862 mutex_unlock(&ctx
->ring_lock
);
867 static bool aio_read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
868 struct io_event __user
*event
, long *i
)
870 long ret
= aio_read_events_ring(ctx
, event
+ *i
, nr
- *i
);
875 if (unlikely(atomic_read(&ctx
->dead
)))
881 return ret
< 0 || *i
>= min_nr
;
884 static long read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
885 struct io_event __user
*event
,
886 struct timespec __user
*timeout
)
888 ktime_t until
= { .tv64
= KTIME_MAX
};
894 if (unlikely(copy_from_user(&ts
, timeout
, sizeof(ts
))))
897 until
= timespec_to_ktime(ts
);
901 * Note that aio_read_events() is being called as the conditional - i.e.
902 * we're calling it after prepare_to_wait() has set task state to
903 * TASK_INTERRUPTIBLE.
905 * But aio_read_events() can block, and if it blocks it's going to flip
906 * the task state back to TASK_RUNNING.
908 * This should be ok, provided it doesn't flip the state back to
909 * TASK_RUNNING and return 0 too much - that causes us to spin. That
910 * will only happen if the mutex_lock() call blocks, and we then find
911 * the ringbuffer empty. So in practice we should be ok, but it's
912 * something to be aware of when touching this code.
914 wait_event_interruptible_hrtimeout(ctx
->wait
,
915 aio_read_events(ctx
, min_nr
, nr
, event
, &ret
), until
);
917 if (!ret
&& signal_pending(current
))
924 * Create an aio_context capable of receiving at least nr_events.
925 * ctxp must not point to an aio_context that already exists, and
926 * must be initialized to 0 prior to the call. On successful
927 * creation of the aio_context, *ctxp is filled in with the resulting
928 * handle. May fail with -EINVAL if *ctxp is not initialized,
929 * if the specified nr_events exceeds internal limits. May fail
930 * with -EAGAIN if the specified nr_events exceeds the user's limit
931 * of available events. May fail with -ENOMEM if insufficient kernel
932 * resources are available. May fail with -EFAULT if an invalid
933 * pointer is passed for ctxp. Will fail with -ENOSYS if not
936 SYSCALL_DEFINE2(io_setup
, unsigned, nr_events
, aio_context_t __user
*, ctxp
)
938 struct kioctx
*ioctx
= NULL
;
942 ret
= get_user(ctx
, ctxp
);
947 if (unlikely(ctx
|| nr_events
== 0)) {
948 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
953 ioctx
= ioctx_alloc(nr_events
);
954 ret
= PTR_ERR(ioctx
);
955 if (!IS_ERR(ioctx
)) {
956 ret
= put_user(ioctx
->user_id
, ctxp
);
967 * Destroy the aio_context specified. May cancel any outstanding
968 * AIOs and block on completion. Will fail with -ENOSYS if not
969 * implemented. May fail with -EINVAL if the context pointed to
972 SYSCALL_DEFINE1(io_destroy
, aio_context_t
, ctx
)
974 struct kioctx
*ioctx
= lookup_ioctx(ctx
);
975 if (likely(NULL
!= ioctx
)) {
980 pr_debug("EINVAL: io_destroy: invalid context id\n");
984 static void aio_advance_iovec(struct kiocb
*iocb
, ssize_t ret
)
986 struct iovec
*iov
= &iocb
->ki_iovec
[iocb
->ki_cur_seg
];
990 while (iocb
->ki_cur_seg
< iocb
->ki_nr_segs
&& ret
> 0) {
991 ssize_t
this = min((ssize_t
)iov
->iov_len
, ret
);
992 iov
->iov_base
+= this;
993 iov
->iov_len
-= this;
994 iocb
->ki_left
-= this;
996 if (iov
->iov_len
== 0) {
1002 /* the caller should not have done more io than what fit in
1003 * the remaining iovecs */
1004 BUG_ON(ret
> 0 && iocb
->ki_left
== 0);
1007 typedef ssize_t (aio_rw_op
)(struct kiocb
*, const struct iovec
*,
1008 unsigned long, loff_t
);
1010 static ssize_t
aio_rw_vect_retry(struct kiocb
*iocb
, int rw
, aio_rw_op
*rw_op
)
1012 struct file
*file
= iocb
->ki_filp
;
1013 struct address_space
*mapping
= file
->f_mapping
;
1014 struct inode
*inode
= mapping
->host
;
1017 /* This matches the pread()/pwrite() logic */
1018 if (iocb
->ki_pos
< 0)
1022 file_start_write(file
);
1024 ret
= rw_op(iocb
, &iocb
->ki_iovec
[iocb
->ki_cur_seg
],
1025 iocb
->ki_nr_segs
- iocb
->ki_cur_seg
,
1028 aio_advance_iovec(iocb
, ret
);
1030 /* retry all partial writes. retry partial reads as long as its a
1032 } while (ret
> 0 && iocb
->ki_left
> 0 &&
1034 (!S_ISFIFO(inode
->i_mode
) && !S_ISSOCK(inode
->i_mode
))));
1036 file_end_write(file
);
1038 /* This means we must have transferred all that we could */
1039 /* No need to retry anymore */
1040 if ((ret
== 0) || (iocb
->ki_left
== 0))
1041 ret
= iocb
->ki_nbytes
- iocb
->ki_left
;
1043 /* If we managed to write some out we return that, rather than
1044 * the eventual error. */
1046 && ret
< 0 && ret
!= -EIOCBQUEUED
1047 && iocb
->ki_nbytes
- iocb
->ki_left
)
1048 ret
= iocb
->ki_nbytes
- iocb
->ki_left
;
1053 static ssize_t
aio_setup_vectored_rw(int rw
, struct kiocb
*kiocb
, bool compat
)
1057 kiocb
->ki_nr_segs
= kiocb
->ki_nbytes
;
1059 #ifdef CONFIG_COMPAT
1061 ret
= compat_rw_copy_check_uvector(rw
,
1062 (struct compat_iovec __user
*)kiocb
->ki_buf
,
1063 kiocb
->ki_nr_segs
, 1, &kiocb
->ki_inline_vec
,
1067 ret
= rw_copy_check_uvector(rw
,
1068 (struct iovec __user
*)kiocb
->ki_buf
,
1069 kiocb
->ki_nr_segs
, 1, &kiocb
->ki_inline_vec
,
1074 /* ki_nbytes now reflect bytes instead of segs */
1075 kiocb
->ki_nbytes
= ret
;
1079 static ssize_t
aio_setup_single_vector(int rw
, struct kiocb
*kiocb
)
1081 if (unlikely(!access_ok(!rw
, kiocb
->ki_buf
, kiocb
->ki_nbytes
)))
1084 kiocb
->ki_iovec
= &kiocb
->ki_inline_vec
;
1085 kiocb
->ki_iovec
->iov_base
= kiocb
->ki_buf
;
1086 kiocb
->ki_iovec
->iov_len
= kiocb
->ki_nbytes
;
1087 kiocb
->ki_nr_segs
= 1;
1093 * Performs the initial checks and aio retry method
1094 * setup for the kiocb at the time of io submission.
1096 static ssize_t
aio_run_iocb(struct kiocb
*req
, bool compat
)
1098 struct file
*file
= req
->ki_filp
;
1104 switch (req
->ki_opcode
) {
1105 case IOCB_CMD_PREAD
:
1106 case IOCB_CMD_PREADV
:
1109 rw_op
= file
->f_op
->aio_read
;
1112 case IOCB_CMD_PWRITE
:
1113 case IOCB_CMD_PWRITEV
:
1116 rw_op
= file
->f_op
->aio_write
;
1119 if (unlikely(!(file
->f_mode
& mode
)))
1125 ret
= (req
->ki_opcode
== IOCB_CMD_PREADV
||
1126 req
->ki_opcode
== IOCB_CMD_PWRITEV
)
1127 ? aio_setup_vectored_rw(rw
, req
, compat
)
1128 : aio_setup_single_vector(rw
, req
);
1132 ret
= rw_verify_area(rw
, file
, &req
->ki_pos
, req
->ki_nbytes
);
1136 req
->ki_nbytes
= ret
;
1139 ret
= aio_rw_vect_retry(req
, rw
, rw_op
);
1142 case IOCB_CMD_FDSYNC
:
1143 if (!file
->f_op
->aio_fsync
)
1146 ret
= file
->f_op
->aio_fsync(req
, 1);
1149 case IOCB_CMD_FSYNC
:
1150 if (!file
->f_op
->aio_fsync
)
1153 ret
= file
->f_op
->aio_fsync(req
, 0);
1157 pr_debug("EINVAL: no operation provided\n");
1161 if (ret
!= -EIOCBQUEUED
) {
1163 * There's no easy way to restart the syscall since other AIO's
1164 * may be already running. Just fail this IO with EINTR.
1166 if (unlikely(ret
== -ERESTARTSYS
|| ret
== -ERESTARTNOINTR
||
1167 ret
== -ERESTARTNOHAND
||
1168 ret
== -ERESTART_RESTARTBLOCK
))
1170 aio_complete(req
, ret
, 0);
1176 static int io_submit_one(struct kioctx
*ctx
, struct iocb __user
*user_iocb
,
1177 struct iocb
*iocb
, bool compat
)
1182 /* enforce forwards compatibility on users */
1183 if (unlikely(iocb
->aio_reserved1
|| iocb
->aio_reserved2
)) {
1184 pr_debug("EINVAL: reserve field set\n");
1188 /* prevent overflows */
1190 (iocb
->aio_buf
!= (unsigned long)iocb
->aio_buf
) ||
1191 (iocb
->aio_nbytes
!= (size_t)iocb
->aio_nbytes
) ||
1192 ((ssize_t
)iocb
->aio_nbytes
< 0)
1194 pr_debug("EINVAL: io_submit: overflow check\n");
1198 req
= aio_get_req(ctx
);
1202 req
->ki_filp
= fget(iocb
->aio_fildes
);
1203 if (unlikely(!req
->ki_filp
)) {
1208 if (iocb
->aio_flags
& IOCB_FLAG_RESFD
) {
1210 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1211 * instance of the file* now. The file descriptor must be
1212 * an eventfd() fd, and will be signaled for each completed
1213 * event using the eventfd_signal() function.
1215 req
->ki_eventfd
= eventfd_ctx_fdget((int) iocb
->aio_resfd
);
1216 if (IS_ERR(req
->ki_eventfd
)) {
1217 ret
= PTR_ERR(req
->ki_eventfd
);
1218 req
->ki_eventfd
= NULL
;
1223 ret
= put_user(KIOCB_KEY
, &user_iocb
->aio_key
);
1224 if (unlikely(ret
)) {
1225 pr_debug("EFAULT: aio_key\n");
1229 req
->ki_obj
.user
= user_iocb
;
1230 req
->ki_user_data
= iocb
->aio_data
;
1231 req
->ki_pos
= iocb
->aio_offset
;
1233 req
->ki_buf
= (char __user
*)(unsigned long)iocb
->aio_buf
;
1234 req
->ki_left
= req
->ki_nbytes
= iocb
->aio_nbytes
;
1235 req
->ki_opcode
= iocb
->aio_lio_opcode
;
1237 ret
= aio_run_iocb(req
, compat
);
1241 aio_put_req(req
); /* drop extra ref to req */
1244 atomic_dec(&ctx
->reqs_active
);
1245 aio_put_req(req
); /* drop extra ref to req */
1246 aio_put_req(req
); /* drop i/o ref to req */
1250 long do_io_submit(aio_context_t ctx_id
, long nr
,
1251 struct iocb __user
*__user
*iocbpp
, bool compat
)
1256 struct blk_plug plug
;
1258 if (unlikely(nr
< 0))
1261 if (unlikely(nr
> LONG_MAX
/sizeof(*iocbpp
)))
1262 nr
= LONG_MAX
/sizeof(*iocbpp
);
1264 if (unlikely(!access_ok(VERIFY_READ
, iocbpp
, (nr
*sizeof(*iocbpp
)))))
1267 ctx
= lookup_ioctx(ctx_id
);
1268 if (unlikely(!ctx
)) {
1269 pr_debug("EINVAL: invalid context id\n");
1273 blk_start_plug(&plug
);
1276 * AKPM: should this return a partial result if some of the IOs were
1277 * successfully submitted?
1279 for (i
=0; i
<nr
; i
++) {
1280 struct iocb __user
*user_iocb
;
1283 if (unlikely(__get_user(user_iocb
, iocbpp
+ i
))) {
1288 if (unlikely(copy_from_user(&tmp
, user_iocb
, sizeof(tmp
)))) {
1293 ret
= io_submit_one(ctx
, user_iocb
, &tmp
, compat
);
1297 blk_finish_plug(&plug
);
1304 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1305 * the number of iocbs queued. May return -EINVAL if the aio_context
1306 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1307 * *iocbpp[0] is not properly initialized, if the operation specified
1308 * is invalid for the file descriptor in the iocb. May fail with
1309 * -EFAULT if any of the data structures point to invalid data. May
1310 * fail with -EBADF if the file descriptor specified in the first
1311 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1312 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1313 * fail with -ENOSYS if not implemented.
1315 SYSCALL_DEFINE3(io_submit
, aio_context_t
, ctx_id
, long, nr
,
1316 struct iocb __user
* __user
*, iocbpp
)
1318 return do_io_submit(ctx_id
, nr
, iocbpp
, 0);
1322 * Finds a given iocb for cancellation.
1324 static struct kiocb
*lookup_kiocb(struct kioctx
*ctx
, struct iocb __user
*iocb
,
1327 struct list_head
*pos
;
1329 assert_spin_locked(&ctx
->ctx_lock
);
1331 if (key
!= KIOCB_KEY
)
1334 /* TODO: use a hash or array, this sucks. */
1335 list_for_each(pos
, &ctx
->active_reqs
) {
1336 struct kiocb
*kiocb
= list_kiocb(pos
);
1337 if (kiocb
->ki_obj
.user
== iocb
)
1344 * Attempts to cancel an iocb previously passed to io_submit. If
1345 * the operation is successfully cancelled, the resulting event is
1346 * copied into the memory pointed to by result without being placed
1347 * into the completion queue and 0 is returned. May fail with
1348 * -EFAULT if any of the data structures pointed to are invalid.
1349 * May fail with -EINVAL if aio_context specified by ctx_id is
1350 * invalid. May fail with -EAGAIN if the iocb specified was not
1351 * cancelled. Will fail with -ENOSYS if not implemented.
1353 SYSCALL_DEFINE3(io_cancel
, aio_context_t
, ctx_id
, struct iocb __user
*, iocb
,
1354 struct io_event __user
*, result
)
1356 struct io_event res
;
1358 struct kiocb
*kiocb
;
1362 ret
= get_user(key
, &iocb
->aio_key
);
1366 ctx
= lookup_ioctx(ctx_id
);
1370 spin_lock_irq(&ctx
->ctx_lock
);
1372 kiocb
= lookup_kiocb(ctx
, iocb
, key
);
1374 ret
= kiocb_cancel(ctx
, kiocb
, &res
);
1378 spin_unlock_irq(&ctx
->ctx_lock
);
1381 /* Cancellation succeeded -- copy the result
1382 * into the user's buffer.
1384 if (copy_to_user(result
, &res
, sizeof(res
)))
1394 * Attempts to read at least min_nr events and up to nr events from
1395 * the completion queue for the aio_context specified by ctx_id. If
1396 * it succeeds, the number of read events is returned. May fail with
1397 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1398 * out of range, if timeout is out of range. May fail with -EFAULT
1399 * if any of the memory specified is invalid. May return 0 or
1400 * < min_nr if the timeout specified by timeout has elapsed
1401 * before sufficient events are available, where timeout == NULL
1402 * specifies an infinite timeout. Note that the timeout pointed to by
1403 * timeout is relative. Will fail with -ENOSYS if not implemented.
1405 SYSCALL_DEFINE5(io_getevents
, aio_context_t
, ctx_id
,
1408 struct io_event __user
*, events
,
1409 struct timespec __user
*, timeout
)
1411 struct kioctx
*ioctx
= lookup_ioctx(ctx_id
);
1414 if (likely(ioctx
)) {
1415 if (likely(min_nr
<= nr
&& min_nr
>= 0))
1416 ret
= read_events(ioctx
, min_nr
, nr
, events
, timeout
);