2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
5 * Implements an efficient asynchronous io interface.
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
9 * See ../COPYING for licensing terms.
11 #define pr_fmt(fmt) "%s: " fmt, __func__
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/time.h>
17 #include <linux/aio_abi.h>
18 #include <linux/export.h>
19 #include <linux/syscalls.h>
20 #include <linux/backing-dev.h>
21 #include <linux/uio.h>
23 #include <linux/sched.h>
25 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/mmu_context.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
31 #include <linux/timer.h>
32 #include <linux/aio.h>
33 #include <linux/highmem.h>
34 #include <linux/workqueue.h>
35 #include <linux/security.h>
36 #include <linux/eventfd.h>
37 #include <linux/blkdev.h>
38 #include <linux/compat.h>
39 #include <linux/migrate.h>
40 #include <linux/ramfs.h>
41 #include <linux/percpu-refcount.h>
42 #include <linux/mount.h>
44 #include <asm/kmap_types.h>
45 #include <asm/uaccess.h>
49 #define AIO_RING_MAGIC 0xa10a10a1
50 #define AIO_RING_COMPAT_FEATURES 1
51 #define AIO_RING_INCOMPAT_FEATURES 0
53 unsigned id
; /* kernel internal index number */
54 unsigned nr
; /* number of io_events */
55 unsigned head
; /* Written to by userland or under ring_lock
56 * mutex by aio_read_events_ring(). */
60 unsigned compat_features
;
61 unsigned incompat_features
;
62 unsigned header_length
; /* size of aio_ring */
65 struct io_event io_events
[0];
66 }; /* 128 bytes + ring size */
68 #define AIO_RING_PAGES 8
73 struct kioctx
*table
[];
77 unsigned reqs_available
;
81 struct percpu_ref users
;
84 struct percpu_ref reqs
;
86 unsigned long user_id
;
88 struct __percpu kioctx_cpu
*cpu
;
91 * For percpu reqs_available, number of slots we move to/from global
96 * This is what userspace passed to io_setup(), it's not used for
97 * anything but counting against the global max_reqs quota.
99 * The real limit is nr_events - 1, which will be larger (see
104 /* Size of ringbuffer, in units of struct io_event */
107 unsigned long mmap_base
;
108 unsigned long mmap_size
;
110 struct page
**ring_pages
;
113 struct work_struct free_work
;
116 * signals when all in-flight requests are done
118 struct completion
*requests_done
;
122 * This counts the number of available slots in the ringbuffer,
123 * so we avoid overflowing it: it's decremented (if positive)
124 * when allocating a kiocb and incremented when the resulting
125 * io_event is pulled off the ringbuffer.
127 * We batch accesses to it with a percpu version.
129 atomic_t reqs_available
;
130 } ____cacheline_aligned_in_smp
;
134 struct list_head active_reqs
; /* used for cancellation */
135 } ____cacheline_aligned_in_smp
;
138 struct mutex ring_lock
;
139 wait_queue_head_t wait
;
140 } ____cacheline_aligned_in_smp
;
144 unsigned completed_events
;
145 spinlock_t completion_lock
;
146 } ____cacheline_aligned_in_smp
;
148 struct page
*internal_pages
[AIO_RING_PAGES
];
149 struct file
*aio_ring_file
;
154 /*------ sysctl variables----*/
155 static DEFINE_SPINLOCK(aio_nr_lock
);
156 unsigned long aio_nr
; /* current system wide number of aio requests */
157 unsigned long aio_max_nr
= 0x10000; /* system wide maximum number of aio requests */
158 /*----end sysctl variables---*/
160 static struct kmem_cache
*kiocb_cachep
;
161 static struct kmem_cache
*kioctx_cachep
;
163 static struct vfsmount
*aio_mnt
;
165 static const struct file_operations aio_ring_fops
;
166 static const struct address_space_operations aio_ctx_aops
;
168 static struct file
*aio_private_file(struct kioctx
*ctx
, loff_t nr_pages
)
170 struct qstr
this = QSTR_INIT("[aio]", 5);
173 struct inode
*inode
= alloc_anon_inode(aio_mnt
->mnt_sb
);
175 return ERR_CAST(inode
);
177 inode
->i_mapping
->a_ops
= &aio_ctx_aops
;
178 inode
->i_mapping
->private_data
= ctx
;
179 inode
->i_size
= PAGE_SIZE
* nr_pages
;
181 path
.dentry
= d_alloc_pseudo(aio_mnt
->mnt_sb
, &this);
184 return ERR_PTR(-ENOMEM
);
186 path
.mnt
= mntget(aio_mnt
);
188 d_instantiate(path
.dentry
, inode
);
189 file
= alloc_file(&path
, FMODE_READ
| FMODE_WRITE
, &aio_ring_fops
);
195 file
->f_flags
= O_RDWR
;
199 static struct dentry
*aio_mount(struct file_system_type
*fs_type
,
200 int flags
, const char *dev_name
, void *data
)
202 static const struct dentry_operations ops
= {
203 .d_dname
= simple_dname
,
205 return mount_pseudo(fs_type
, "aio:", NULL
, &ops
, AIO_RING_MAGIC
);
209 * Creates the slab caches used by the aio routines, panic on
210 * failure as this is done early during the boot sequence.
212 static int __init
aio_setup(void)
214 static struct file_system_type aio_fs
= {
217 .kill_sb
= kill_anon_super
,
219 aio_mnt
= kern_mount(&aio_fs
);
221 panic("Failed to create aio fs mount.");
223 kiocb_cachep
= KMEM_CACHE(kiocb
, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
224 kioctx_cachep
= KMEM_CACHE(kioctx
,SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
226 pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page
));
230 __initcall(aio_setup
);
232 static void put_aio_ring_file(struct kioctx
*ctx
)
234 struct file
*aio_ring_file
= ctx
->aio_ring_file
;
236 truncate_setsize(aio_ring_file
->f_inode
, 0);
238 /* Prevent further access to the kioctx from migratepages */
239 spin_lock(&aio_ring_file
->f_inode
->i_mapping
->private_lock
);
240 aio_ring_file
->f_inode
->i_mapping
->private_data
= NULL
;
241 ctx
->aio_ring_file
= NULL
;
242 spin_unlock(&aio_ring_file
->f_inode
->i_mapping
->private_lock
);
248 static void aio_free_ring(struct kioctx
*ctx
)
252 /* Disconnect the kiotx from the ring file. This prevents future
253 * accesses to the kioctx from page migration.
255 put_aio_ring_file(ctx
);
257 for (i
= 0; i
< ctx
->nr_pages
; i
++) {
259 pr_debug("pid(%d) [%d] page->count=%d\n", current
->pid
, i
,
260 page_count(ctx
->ring_pages
[i
]));
261 page
= ctx
->ring_pages
[i
];
264 ctx
->ring_pages
[i
] = NULL
;
268 if (ctx
->ring_pages
&& ctx
->ring_pages
!= ctx
->internal_pages
) {
269 kfree(ctx
->ring_pages
);
270 ctx
->ring_pages
= NULL
;
274 static int aio_ring_mmap(struct file
*file
, struct vm_area_struct
*vma
)
276 vma
->vm_flags
|= VM_DONTEXPAND
;
277 vma
->vm_ops
= &generic_file_vm_ops
;
281 static int aio_ring_remap(struct file
*file
, struct vm_area_struct
*vma
)
283 struct mm_struct
*mm
= vma
->vm_mm
;
284 struct kioctx_table
*table
;
285 int i
, res
= -EINVAL
;
287 spin_lock(&mm
->ioctx_lock
);
289 table
= rcu_dereference(mm
->ioctx_table
);
290 for (i
= 0; i
< table
->nr
; i
++) {
293 ctx
= table
->table
[i
];
294 if (ctx
&& ctx
->aio_ring_file
== file
) {
295 if (!atomic_read(&ctx
->dead
)) {
296 ctx
->user_id
= ctx
->mmap_base
= vma
->vm_start
;
304 spin_unlock(&mm
->ioctx_lock
);
308 static const struct file_operations aio_ring_fops
= {
309 .mmap
= aio_ring_mmap
,
310 .mremap
= aio_ring_remap
,
313 #if IS_ENABLED(CONFIG_MIGRATION)
314 static int aio_migratepage(struct address_space
*mapping
, struct page
*new,
315 struct page
*old
, enum migrate_mode mode
)
324 /* mapping->private_lock here protects against the kioctx teardown. */
325 spin_lock(&mapping
->private_lock
);
326 ctx
= mapping
->private_data
;
332 /* The ring_lock mutex. The prevents aio_read_events() from writing
333 * to the ring's head, and prevents page migration from mucking in
334 * a partially initialized kiotx.
336 if (!mutex_trylock(&ctx
->ring_lock
)) {
342 if (idx
< (pgoff_t
)ctx
->nr_pages
) {
343 /* Make sure the old page hasn't already been changed */
344 if (ctx
->ring_pages
[idx
] != old
)
352 /* Writeback must be complete */
353 BUG_ON(PageWriteback(old
));
356 rc
= migrate_page_move_mapping(mapping
, new, old
, NULL
, mode
, 1);
357 if (rc
!= MIGRATEPAGE_SUCCESS
) {
362 /* Take completion_lock to prevent other writes to the ring buffer
363 * while the old page is copied to the new. This prevents new
364 * events from being lost.
366 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
367 migrate_page_copy(new, old
);
368 BUG_ON(ctx
->ring_pages
[idx
] != old
);
369 ctx
->ring_pages
[idx
] = new;
370 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
372 /* The old page is no longer accessible. */
376 mutex_unlock(&ctx
->ring_lock
);
378 spin_unlock(&mapping
->private_lock
);
383 static const struct address_space_operations aio_ctx_aops
= {
384 .set_page_dirty
= __set_page_dirty_no_writeback
,
385 #if IS_ENABLED(CONFIG_MIGRATION)
386 .migratepage
= aio_migratepage
,
390 static int aio_setup_ring(struct kioctx
*ctx
)
392 struct aio_ring
*ring
;
393 unsigned nr_events
= ctx
->max_reqs
;
394 struct mm_struct
*mm
= current
->mm
;
395 unsigned long size
, unused
;
400 /* Compensate for the ring buffer's head/tail overlap entry */
401 nr_events
+= 2; /* 1 is required, 2 for good luck */
403 size
= sizeof(struct aio_ring
);
404 size
+= sizeof(struct io_event
) * nr_events
;
406 nr_pages
= PFN_UP(size
);
410 file
= aio_private_file(ctx
, nr_pages
);
412 ctx
->aio_ring_file
= NULL
;
416 ctx
->aio_ring_file
= file
;
417 nr_events
= (PAGE_SIZE
* nr_pages
- sizeof(struct aio_ring
))
418 / sizeof(struct io_event
);
420 ctx
->ring_pages
= ctx
->internal_pages
;
421 if (nr_pages
> AIO_RING_PAGES
) {
422 ctx
->ring_pages
= kcalloc(nr_pages
, sizeof(struct page
*),
424 if (!ctx
->ring_pages
) {
425 put_aio_ring_file(ctx
);
430 for (i
= 0; i
< nr_pages
; i
++) {
432 page
= find_or_create_page(file
->f_inode
->i_mapping
,
433 i
, GFP_HIGHUSER
| __GFP_ZERO
);
436 pr_debug("pid(%d) page[%d]->count=%d\n",
437 current
->pid
, i
, page_count(page
));
438 SetPageUptodate(page
);
441 ctx
->ring_pages
[i
] = page
;
445 if (unlikely(i
!= nr_pages
)) {
450 ctx
->mmap_size
= nr_pages
* PAGE_SIZE
;
451 pr_debug("attempting mmap of %lu bytes\n", ctx
->mmap_size
);
453 down_write(&mm
->mmap_sem
);
454 ctx
->mmap_base
= do_mmap_pgoff(ctx
->aio_ring_file
, 0, ctx
->mmap_size
,
455 PROT_READ
| PROT_WRITE
,
456 MAP_SHARED
, 0, &unused
);
457 up_write(&mm
->mmap_sem
);
458 if (IS_ERR((void *)ctx
->mmap_base
)) {
464 pr_debug("mmap address: 0x%08lx\n", ctx
->mmap_base
);
466 ctx
->user_id
= ctx
->mmap_base
;
467 ctx
->nr_events
= nr_events
; /* trusted copy */
469 ring
= kmap_atomic(ctx
->ring_pages
[0]);
470 ring
->nr
= nr_events
; /* user copy */
472 ring
->head
= ring
->tail
= 0;
473 ring
->magic
= AIO_RING_MAGIC
;
474 ring
->compat_features
= AIO_RING_COMPAT_FEATURES
;
475 ring
->incompat_features
= AIO_RING_INCOMPAT_FEATURES
;
476 ring
->header_length
= sizeof(struct aio_ring
);
478 flush_dcache_page(ctx
->ring_pages
[0]);
483 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
484 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
485 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
487 void kiocb_set_cancel_fn(struct kiocb
*req
, kiocb_cancel_fn
*cancel
)
489 struct kioctx
*ctx
= req
->ki_ctx
;
492 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
494 if (!req
->ki_list
.next
)
495 list_add(&req
->ki_list
, &ctx
->active_reqs
);
497 req
->ki_cancel
= cancel
;
499 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
501 EXPORT_SYMBOL(kiocb_set_cancel_fn
);
503 static int kiocb_cancel(struct kiocb
*kiocb
)
505 kiocb_cancel_fn
*old
, *cancel
;
508 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it
509 * actually has a cancel function, hence the cmpxchg()
512 cancel
= ACCESS_ONCE(kiocb
->ki_cancel
);
514 if (!cancel
|| cancel
== KIOCB_CANCELLED
)
518 cancel
= cmpxchg(&kiocb
->ki_cancel
, old
, KIOCB_CANCELLED
);
519 } while (cancel
!= old
);
521 return cancel(kiocb
);
524 static void free_ioctx(struct work_struct
*work
)
526 struct kioctx
*ctx
= container_of(work
, struct kioctx
, free_work
);
528 pr_debug("freeing %p\n", ctx
);
531 free_percpu(ctx
->cpu
);
532 percpu_ref_exit(&ctx
->reqs
);
533 percpu_ref_exit(&ctx
->users
);
534 kmem_cache_free(kioctx_cachep
, ctx
);
537 static void free_ioctx_reqs(struct percpu_ref
*ref
)
539 struct kioctx
*ctx
= container_of(ref
, struct kioctx
, reqs
);
541 /* At this point we know that there are no any in-flight requests */
542 if (ctx
->requests_done
)
543 complete(ctx
->requests_done
);
545 INIT_WORK(&ctx
->free_work
, free_ioctx
);
546 schedule_work(&ctx
->free_work
);
550 * When this function runs, the kioctx has been removed from the "hash table"
551 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
552 * now it's safe to cancel any that need to be.
554 static void free_ioctx_users(struct percpu_ref
*ref
)
556 struct kioctx
*ctx
= container_of(ref
, struct kioctx
, users
);
559 spin_lock_irq(&ctx
->ctx_lock
);
561 while (!list_empty(&ctx
->active_reqs
)) {
562 req
= list_first_entry(&ctx
->active_reqs
,
563 struct kiocb
, ki_list
);
565 list_del_init(&req
->ki_list
);
569 spin_unlock_irq(&ctx
->ctx_lock
);
571 percpu_ref_kill(&ctx
->reqs
);
572 percpu_ref_put(&ctx
->reqs
);
575 static int ioctx_add_table(struct kioctx
*ctx
, struct mm_struct
*mm
)
578 struct kioctx_table
*table
, *old
;
579 struct aio_ring
*ring
;
581 spin_lock(&mm
->ioctx_lock
);
582 table
= rcu_dereference_raw(mm
->ioctx_table
);
586 for (i
= 0; i
< table
->nr
; i
++)
587 if (!table
->table
[i
]) {
589 table
->table
[i
] = ctx
;
590 spin_unlock(&mm
->ioctx_lock
);
592 /* While kioctx setup is in progress,
593 * we are protected from page migration
594 * changes ring_pages by ->ring_lock.
596 ring
= kmap_atomic(ctx
->ring_pages
[0]);
602 new_nr
= (table
? table
->nr
: 1) * 4;
603 spin_unlock(&mm
->ioctx_lock
);
605 table
= kzalloc(sizeof(*table
) + sizeof(struct kioctx
*) *
612 spin_lock(&mm
->ioctx_lock
);
613 old
= rcu_dereference_raw(mm
->ioctx_table
);
616 rcu_assign_pointer(mm
->ioctx_table
, table
);
617 } else if (table
->nr
> old
->nr
) {
618 memcpy(table
->table
, old
->table
,
619 old
->nr
* sizeof(struct kioctx
*));
621 rcu_assign_pointer(mm
->ioctx_table
, table
);
630 static void aio_nr_sub(unsigned nr
)
632 spin_lock(&aio_nr_lock
);
633 if (WARN_ON(aio_nr
- nr
> aio_nr
))
637 spin_unlock(&aio_nr_lock
);
641 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
643 static struct kioctx
*ioctx_alloc(unsigned nr_events
)
645 struct mm_struct
*mm
= current
->mm
;
650 * We keep track of the number of available ringbuffer slots, to prevent
651 * overflow (reqs_available), and we also use percpu counters for this.
653 * So since up to half the slots might be on other cpu's percpu counters
654 * and unavailable, double nr_events so userspace sees what they
655 * expected: additionally, we move req_batch slots to/from percpu
656 * counters at a time, so make sure that isn't 0:
658 nr_events
= max(nr_events
, num_possible_cpus() * 4);
661 /* Prevent overflows */
662 if ((nr_events
> (0x10000000U
/ sizeof(struct io_event
))) ||
663 (nr_events
> (0x10000000U
/ sizeof(struct kiocb
)))) {
664 pr_debug("ENOMEM: nr_events too high\n");
665 return ERR_PTR(-EINVAL
);
668 if (!nr_events
|| (unsigned long)nr_events
> (aio_max_nr
* 2UL))
669 return ERR_PTR(-EAGAIN
);
671 ctx
= kmem_cache_zalloc(kioctx_cachep
, GFP_KERNEL
);
673 return ERR_PTR(-ENOMEM
);
675 ctx
->max_reqs
= nr_events
;
677 spin_lock_init(&ctx
->ctx_lock
);
678 spin_lock_init(&ctx
->completion_lock
);
679 mutex_init(&ctx
->ring_lock
);
680 /* Protect against page migration throughout kiotx setup by keeping
681 * the ring_lock mutex held until setup is complete. */
682 mutex_lock(&ctx
->ring_lock
);
683 init_waitqueue_head(&ctx
->wait
);
685 INIT_LIST_HEAD(&ctx
->active_reqs
);
687 if (percpu_ref_init(&ctx
->users
, free_ioctx_users
, 0, GFP_KERNEL
))
690 if (percpu_ref_init(&ctx
->reqs
, free_ioctx_reqs
, 0, GFP_KERNEL
))
693 ctx
->cpu
= alloc_percpu(struct kioctx_cpu
);
697 err
= aio_setup_ring(ctx
);
701 atomic_set(&ctx
->reqs_available
, ctx
->nr_events
- 1);
702 ctx
->req_batch
= (ctx
->nr_events
- 1) / (num_possible_cpus() * 4);
703 if (ctx
->req_batch
< 1)
706 /* limit the number of system wide aios */
707 spin_lock(&aio_nr_lock
);
708 if (aio_nr
+ nr_events
> (aio_max_nr
* 2UL) ||
709 aio_nr
+ nr_events
< aio_nr
) {
710 spin_unlock(&aio_nr_lock
);
714 aio_nr
+= ctx
->max_reqs
;
715 spin_unlock(&aio_nr_lock
);
717 percpu_ref_get(&ctx
->users
); /* io_setup() will drop this ref */
718 percpu_ref_get(&ctx
->reqs
); /* free_ioctx_users() will drop this */
720 err
= ioctx_add_table(ctx
, mm
);
724 /* Release the ring_lock mutex now that all setup is complete. */
725 mutex_unlock(&ctx
->ring_lock
);
727 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
728 ctx
, ctx
->user_id
, mm
, ctx
->nr_events
);
732 aio_nr_sub(ctx
->max_reqs
);
734 atomic_set(&ctx
->dead
, 1);
736 vm_munmap(ctx
->mmap_base
, ctx
->mmap_size
);
739 mutex_unlock(&ctx
->ring_lock
);
740 free_percpu(ctx
->cpu
);
741 percpu_ref_exit(&ctx
->reqs
);
742 percpu_ref_exit(&ctx
->users
);
743 kmem_cache_free(kioctx_cachep
, ctx
);
744 pr_debug("error allocating ioctx %d\n", err
);
749 * Cancels all outstanding aio requests on an aio context. Used
750 * when the processes owning a context have all exited to encourage
751 * the rapid destruction of the kioctx.
753 static int kill_ioctx(struct mm_struct
*mm
, struct kioctx
*ctx
,
754 struct completion
*requests_done
)
756 struct kioctx_table
*table
;
758 spin_lock(&mm
->ioctx_lock
);
759 if (atomic_xchg(&ctx
->dead
, 1)) {
760 spin_unlock(&mm
->ioctx_lock
);
764 table
= rcu_dereference_raw(mm
->ioctx_table
);
765 WARN_ON(ctx
!= table
->table
[ctx
->id
]);
766 table
->table
[ctx
->id
] = NULL
;
767 spin_unlock(&mm
->ioctx_lock
);
769 /* percpu_ref_kill() will do the necessary call_rcu() */
770 wake_up_all(&ctx
->wait
);
773 * It'd be more correct to do this in free_ioctx(), after all
774 * the outstanding kiocbs have finished - but by then io_destroy
775 * has already returned, so io_setup() could potentially return
776 * -EAGAIN with no ioctxs actually in use (as far as userspace
779 aio_nr_sub(ctx
->max_reqs
);
782 vm_munmap(ctx
->mmap_base
, ctx
->mmap_size
);
784 ctx
->requests_done
= requests_done
;
785 percpu_ref_kill(&ctx
->users
);
789 /* wait_on_sync_kiocb:
790 * Waits on the given sync kiocb to complete.
792 ssize_t
wait_on_sync_kiocb(struct kiocb
*req
)
794 while (!req
->ki_ctx
) {
795 set_current_state(TASK_UNINTERRUPTIBLE
);
800 __set_current_state(TASK_RUNNING
);
801 return req
->ki_user_data
;
803 EXPORT_SYMBOL(wait_on_sync_kiocb
);
806 * exit_aio: called when the last user of mm goes away. At this point, there is
807 * no way for any new requests to be submited or any of the io_* syscalls to be
808 * called on the context.
810 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
813 void exit_aio(struct mm_struct
*mm
)
815 struct kioctx_table
*table
= rcu_dereference_raw(mm
->ioctx_table
);
821 for (i
= 0; i
< table
->nr
; ++i
) {
822 struct kioctx
*ctx
= table
->table
[i
];
823 struct completion requests_done
=
824 COMPLETION_INITIALIZER_ONSTACK(requests_done
);
829 * We don't need to bother with munmap() here - exit_mmap(mm)
830 * is coming and it'll unmap everything. And we simply can't,
831 * this is not necessarily our ->mm.
832 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
833 * that it needs to unmap the area, just set it to 0.
836 kill_ioctx(mm
, ctx
, &requests_done
);
838 /* Wait until all IO for the context are done. */
839 wait_for_completion(&requests_done
);
842 RCU_INIT_POINTER(mm
->ioctx_table
, NULL
);
846 static void put_reqs_available(struct kioctx
*ctx
, unsigned nr
)
848 struct kioctx_cpu
*kcpu
;
851 local_irq_save(flags
);
852 kcpu
= this_cpu_ptr(ctx
->cpu
);
853 kcpu
->reqs_available
+= nr
;
855 while (kcpu
->reqs_available
>= ctx
->req_batch
* 2) {
856 kcpu
->reqs_available
-= ctx
->req_batch
;
857 atomic_add(ctx
->req_batch
, &ctx
->reqs_available
);
860 local_irq_restore(flags
);
863 static bool get_reqs_available(struct kioctx
*ctx
)
865 struct kioctx_cpu
*kcpu
;
869 local_irq_save(flags
);
870 kcpu
= this_cpu_ptr(ctx
->cpu
);
871 if (!kcpu
->reqs_available
) {
872 int old
, avail
= atomic_read(&ctx
->reqs_available
);
875 if (avail
< ctx
->req_batch
)
879 avail
= atomic_cmpxchg(&ctx
->reqs_available
,
880 avail
, avail
- ctx
->req_batch
);
881 } while (avail
!= old
);
883 kcpu
->reqs_available
+= ctx
->req_batch
;
887 kcpu
->reqs_available
--;
889 local_irq_restore(flags
);
893 /* refill_reqs_available
894 * Updates the reqs_available reference counts used for tracking the
895 * number of free slots in the completion ring. This can be called
896 * from aio_complete() (to optimistically update reqs_available) or
897 * from aio_get_req() (the we're out of events case). It must be
898 * called holding ctx->completion_lock.
900 static void refill_reqs_available(struct kioctx
*ctx
, unsigned head
,
903 unsigned events_in_ring
, completed
;
905 /* Clamp head since userland can write to it. */
906 head
%= ctx
->nr_events
;
908 events_in_ring
= tail
- head
;
910 events_in_ring
= ctx
->nr_events
- (head
- tail
);
912 completed
= ctx
->completed_events
;
913 if (events_in_ring
< completed
)
914 completed
-= events_in_ring
;
921 ctx
->completed_events
-= completed
;
922 put_reqs_available(ctx
, completed
);
925 /* user_refill_reqs_available
926 * Called to refill reqs_available when aio_get_req() encounters an
927 * out of space in the completion ring.
929 static void user_refill_reqs_available(struct kioctx
*ctx
)
931 spin_lock_irq(&ctx
->completion_lock
);
932 if (ctx
->completed_events
) {
933 struct aio_ring
*ring
;
936 /* Access of ring->head may race with aio_read_events_ring()
937 * here, but that's okay since whether we read the old version
938 * or the new version, and either will be valid. The important
939 * part is that head cannot pass tail since we prevent
940 * aio_complete() from updating tail by holding
941 * ctx->completion_lock. Even if head is invalid, the check
942 * against ctx->completed_events below will make sure we do the
945 ring
= kmap_atomic(ctx
->ring_pages
[0]);
949 refill_reqs_available(ctx
, head
, ctx
->tail
);
952 spin_unlock_irq(&ctx
->completion_lock
);
956 * Allocate a slot for an aio request.
957 * Returns NULL if no requests are free.
959 static inline struct kiocb
*aio_get_req(struct kioctx
*ctx
)
963 if (!get_reqs_available(ctx
)) {
964 user_refill_reqs_available(ctx
);
965 if (!get_reqs_available(ctx
))
969 req
= kmem_cache_alloc(kiocb_cachep
, GFP_KERNEL
|__GFP_ZERO
);
973 percpu_ref_get(&ctx
->reqs
);
978 put_reqs_available(ctx
, 1);
982 static void kiocb_free(struct kiocb
*req
)
986 if (req
->ki_eventfd
!= NULL
)
987 eventfd_ctx_put(req
->ki_eventfd
);
988 kmem_cache_free(kiocb_cachep
, req
);
991 static struct kioctx
*lookup_ioctx(unsigned long ctx_id
)
993 struct aio_ring __user
*ring
= (void __user
*)ctx_id
;
994 struct mm_struct
*mm
= current
->mm
;
995 struct kioctx
*ctx
, *ret
= NULL
;
996 struct kioctx_table
*table
;
999 if (get_user(id
, &ring
->id
))
1003 table
= rcu_dereference(mm
->ioctx_table
);
1005 if (!table
|| id
>= table
->nr
)
1008 ctx
= table
->table
[id
];
1009 if (ctx
&& ctx
->user_id
== ctx_id
) {
1010 percpu_ref_get(&ctx
->users
);
1019 * Called when the io request on the given iocb is complete.
1021 void aio_complete(struct kiocb
*iocb
, long res
, long res2
)
1023 struct kioctx
*ctx
= iocb
->ki_ctx
;
1024 struct aio_ring
*ring
;
1025 struct io_event
*ev_page
, *event
;
1026 unsigned tail
, pos
, head
;
1027 unsigned long flags
;
1030 * Special case handling for sync iocbs:
1031 * - events go directly into the iocb for fast handling
1032 * - the sync task with the iocb in its stack holds the single iocb
1033 * ref, no other paths have a way to get another ref
1034 * - the sync task helpfully left a reference to itself in the iocb
1036 if (is_sync_kiocb(iocb
)) {
1037 iocb
->ki_user_data
= res
;
1039 iocb
->ki_ctx
= ERR_PTR(-EXDEV
);
1040 wake_up_process(iocb
->ki_obj
.tsk
);
1044 if (iocb
->ki_list
.next
) {
1045 unsigned long flags
;
1047 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
1048 list_del(&iocb
->ki_list
);
1049 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
1053 * Add a completion event to the ring buffer. Must be done holding
1054 * ctx->completion_lock to prevent other code from messing with the tail
1055 * pointer since we might be called from irq context.
1057 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1060 pos
= tail
+ AIO_EVENTS_OFFSET
;
1062 if (++tail
>= ctx
->nr_events
)
1065 ev_page
= kmap_atomic(ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
]);
1066 event
= ev_page
+ pos
% AIO_EVENTS_PER_PAGE
;
1068 event
->obj
= (u64
)(unsigned long)iocb
->ki_obj
.user
;
1069 event
->data
= iocb
->ki_user_data
;
1073 kunmap_atomic(ev_page
);
1074 flush_dcache_page(ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
]);
1076 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
1077 ctx
, tail
, iocb
, iocb
->ki_obj
.user
, iocb
->ki_user_data
,
1080 /* after flagging the request as done, we
1081 * must never even look at it again
1083 smp_wmb(); /* make event visible before updating tail */
1087 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1090 kunmap_atomic(ring
);
1091 flush_dcache_page(ctx
->ring_pages
[0]);
1093 ctx
->completed_events
++;
1094 if (ctx
->completed_events
> 1)
1095 refill_reqs_available(ctx
, head
, tail
);
1096 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1098 pr_debug("added to ring %p at [%u]\n", iocb
, tail
);
1101 * Check if the user asked us to deliver the result through an
1102 * eventfd. The eventfd_signal() function is safe to be called
1105 if (iocb
->ki_eventfd
!= NULL
)
1106 eventfd_signal(iocb
->ki_eventfd
, 1);
1108 /* everything turned out well, dispose of the aiocb. */
1112 * We have to order our ring_info tail store above and test
1113 * of the wait list below outside the wait lock. This is
1114 * like in wake_up_bit() where clearing a bit has to be
1115 * ordered with the unlocked test.
1119 if (waitqueue_active(&ctx
->wait
))
1120 wake_up(&ctx
->wait
);
1122 percpu_ref_put(&ctx
->reqs
);
1124 EXPORT_SYMBOL(aio_complete
);
1126 /* aio_read_events_ring
1127 * Pull an event off of the ioctx's event ring. Returns the number of
1130 static long aio_read_events_ring(struct kioctx
*ctx
,
1131 struct io_event __user
*event
, long nr
)
1133 struct aio_ring
*ring
;
1134 unsigned head
, tail
, pos
;
1139 * The mutex can block and wake us up and that will cause
1140 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1141 * and repeat. This should be rare enough that it doesn't cause
1142 * peformance issues. See the comment in read_events() for more detail.
1144 sched_annotate_sleep();
1145 mutex_lock(&ctx
->ring_lock
);
1147 /* Access to ->ring_pages here is protected by ctx->ring_lock. */
1148 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1151 kunmap_atomic(ring
);
1154 * Ensure that once we've read the current tail pointer, that
1155 * we also see the events that were stored up to the tail.
1159 pr_debug("h%u t%u m%u\n", head
, tail
, ctx
->nr_events
);
1164 head
%= ctx
->nr_events
;
1165 tail
%= ctx
->nr_events
;
1169 struct io_event
*ev
;
1172 avail
= (head
<= tail
? tail
: ctx
->nr_events
) - head
;
1176 avail
= min(avail
, nr
- ret
);
1177 avail
= min_t(long, avail
, AIO_EVENTS_PER_PAGE
-
1178 ((head
+ AIO_EVENTS_OFFSET
) % AIO_EVENTS_PER_PAGE
));
1180 pos
= head
+ AIO_EVENTS_OFFSET
;
1181 page
= ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
];
1182 pos
%= AIO_EVENTS_PER_PAGE
;
1185 copy_ret
= copy_to_user(event
+ ret
, ev
+ pos
,
1186 sizeof(*ev
) * avail
);
1189 if (unlikely(copy_ret
)) {
1196 head
%= ctx
->nr_events
;
1199 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1201 kunmap_atomic(ring
);
1202 flush_dcache_page(ctx
->ring_pages
[0]);
1204 pr_debug("%li h%u t%u\n", ret
, head
, tail
);
1206 mutex_unlock(&ctx
->ring_lock
);
1211 static bool aio_read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
1212 struct io_event __user
*event
, long *i
)
1214 long ret
= aio_read_events_ring(ctx
, event
+ *i
, nr
- *i
);
1219 if (unlikely(atomic_read(&ctx
->dead
)))
1225 return ret
< 0 || *i
>= min_nr
;
1228 static long read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
1229 struct io_event __user
*event
,
1230 struct timespec __user
*timeout
)
1232 ktime_t until
= { .tv64
= KTIME_MAX
};
1238 if (unlikely(copy_from_user(&ts
, timeout
, sizeof(ts
))))
1241 until
= timespec_to_ktime(ts
);
1245 * Note that aio_read_events() is being called as the conditional - i.e.
1246 * we're calling it after prepare_to_wait() has set task state to
1247 * TASK_INTERRUPTIBLE.
1249 * But aio_read_events() can block, and if it blocks it's going to flip
1250 * the task state back to TASK_RUNNING.
1252 * This should be ok, provided it doesn't flip the state back to
1253 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1254 * will only happen if the mutex_lock() call blocks, and we then find
1255 * the ringbuffer empty. So in practice we should be ok, but it's
1256 * something to be aware of when touching this code.
1258 if (until
.tv64
== 0)
1259 aio_read_events(ctx
, min_nr
, nr
, event
, &ret
);
1261 wait_event_interruptible_hrtimeout(ctx
->wait
,
1262 aio_read_events(ctx
, min_nr
, nr
, event
, &ret
),
1265 if (!ret
&& signal_pending(current
))
1272 * Create an aio_context capable of receiving at least nr_events.
1273 * ctxp must not point to an aio_context that already exists, and
1274 * must be initialized to 0 prior to the call. On successful
1275 * creation of the aio_context, *ctxp is filled in with the resulting
1276 * handle. May fail with -EINVAL if *ctxp is not initialized,
1277 * if the specified nr_events exceeds internal limits. May fail
1278 * with -EAGAIN if the specified nr_events exceeds the user's limit
1279 * of available events. May fail with -ENOMEM if insufficient kernel
1280 * resources are available. May fail with -EFAULT if an invalid
1281 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1284 SYSCALL_DEFINE2(io_setup
, unsigned, nr_events
, aio_context_t __user
*, ctxp
)
1286 struct kioctx
*ioctx
= NULL
;
1290 ret
= get_user(ctx
, ctxp
);
1295 if (unlikely(ctx
|| nr_events
== 0)) {
1296 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1301 ioctx
= ioctx_alloc(nr_events
);
1302 ret
= PTR_ERR(ioctx
);
1303 if (!IS_ERR(ioctx
)) {
1304 ret
= put_user(ioctx
->user_id
, ctxp
);
1306 kill_ioctx(current
->mm
, ioctx
, NULL
);
1307 percpu_ref_put(&ioctx
->users
);
1315 * Destroy the aio_context specified. May cancel any outstanding
1316 * AIOs and block on completion. Will fail with -ENOSYS if not
1317 * implemented. May fail with -EINVAL if the context pointed to
1320 SYSCALL_DEFINE1(io_destroy
, aio_context_t
, ctx
)
1322 struct kioctx
*ioctx
= lookup_ioctx(ctx
);
1323 if (likely(NULL
!= ioctx
)) {
1324 struct completion requests_done
=
1325 COMPLETION_INITIALIZER_ONSTACK(requests_done
);
1328 /* Pass requests_done to kill_ioctx() where it can be set
1329 * in a thread-safe way. If we try to set it here then we have
1330 * a race condition if two io_destroy() called simultaneously.
1332 ret
= kill_ioctx(current
->mm
, ioctx
, &requests_done
);
1333 percpu_ref_put(&ioctx
->users
);
1335 /* Wait until all IO for the context are done. Otherwise kernel
1336 * keep using user-space buffers even if user thinks the context
1340 wait_for_completion(&requests_done
);
1344 pr_debug("EINVAL: invalid context id\n");
1348 typedef ssize_t (aio_rw_op
)(struct kiocb
*, const struct iovec
*,
1349 unsigned long, loff_t
);
1350 typedef ssize_t (rw_iter_op
)(struct kiocb
*, struct iov_iter
*);
1352 static ssize_t
aio_setup_vectored_rw(struct kiocb
*kiocb
,
1353 int rw
, char __user
*buf
,
1354 unsigned long *nr_segs
,
1355 struct iovec
**iovec
,
1360 *nr_segs
= kiocb
->ki_nbytes
;
1362 #ifdef CONFIG_COMPAT
1364 ret
= compat_rw_copy_check_uvector(rw
,
1365 (struct compat_iovec __user
*)buf
,
1366 *nr_segs
, UIO_FASTIOV
, *iovec
, iovec
);
1369 ret
= rw_copy_check_uvector(rw
,
1370 (struct iovec __user
*)buf
,
1371 *nr_segs
, UIO_FASTIOV
, *iovec
, iovec
);
1375 /* ki_nbytes now reflect bytes instead of segs */
1376 kiocb
->ki_nbytes
= ret
;
1380 static ssize_t
aio_setup_single_vector(struct kiocb
*kiocb
,
1381 int rw
, char __user
*buf
,
1382 unsigned long *nr_segs
,
1383 struct iovec
*iovec
)
1385 if (unlikely(!access_ok(!rw
, buf
, kiocb
->ki_nbytes
)))
1388 iovec
->iov_base
= buf
;
1389 iovec
->iov_len
= kiocb
->ki_nbytes
;
1396 * Performs the initial checks and io submission.
1398 static ssize_t
aio_run_iocb(struct kiocb
*req
, unsigned opcode
,
1399 char __user
*buf
, bool compat
)
1401 struct file
*file
= req
->ki_filp
;
1403 unsigned long nr_segs
;
1407 rw_iter_op
*iter_op
;
1408 struct iovec inline_vecs
[UIO_FASTIOV
], *iovec
= inline_vecs
;
1409 struct iov_iter iter
;
1412 case IOCB_CMD_PREAD
:
1413 case IOCB_CMD_PREADV
:
1416 rw_op
= file
->f_op
->aio_read
;
1417 iter_op
= file
->f_op
->read_iter
;
1420 case IOCB_CMD_PWRITE
:
1421 case IOCB_CMD_PWRITEV
:
1424 rw_op
= file
->f_op
->aio_write
;
1425 iter_op
= file
->f_op
->write_iter
;
1428 if (unlikely(!(file
->f_mode
& mode
)))
1431 if (!rw_op
&& !iter_op
)
1434 ret
= (opcode
== IOCB_CMD_PREADV
||
1435 opcode
== IOCB_CMD_PWRITEV
)
1436 ? aio_setup_vectored_rw(req
, rw
, buf
, &nr_segs
,
1438 : aio_setup_single_vector(req
, rw
, buf
, &nr_segs
,
1441 ret
= rw_verify_area(rw
, file
, &req
->ki_pos
, req
->ki_nbytes
);
1443 if (iovec
!= inline_vecs
)
1448 req
->ki_nbytes
= ret
;
1450 /* XXX: move/kill - rw_verify_area()? */
1451 /* This matches the pread()/pwrite() logic */
1452 if (req
->ki_pos
< 0) {
1458 file_start_write(file
);
1461 iov_iter_init(&iter
, rw
, iovec
, nr_segs
, req
->ki_nbytes
);
1462 ret
= iter_op(req
, &iter
);
1464 ret
= rw_op(req
, iovec
, nr_segs
, req
->ki_pos
);
1468 file_end_write(file
);
1471 case IOCB_CMD_FDSYNC
:
1472 if (!file
->f_op
->aio_fsync
)
1475 ret
= file
->f_op
->aio_fsync(req
, 1);
1478 case IOCB_CMD_FSYNC
:
1479 if (!file
->f_op
->aio_fsync
)
1482 ret
= file
->f_op
->aio_fsync(req
, 0);
1486 pr_debug("EINVAL: no operation provided\n");
1490 if (iovec
!= inline_vecs
)
1493 if (ret
!= -EIOCBQUEUED
) {
1495 * There's no easy way to restart the syscall since other AIO's
1496 * may be already running. Just fail this IO with EINTR.
1498 if (unlikely(ret
== -ERESTARTSYS
|| ret
== -ERESTARTNOINTR
||
1499 ret
== -ERESTARTNOHAND
||
1500 ret
== -ERESTART_RESTARTBLOCK
))
1502 aio_complete(req
, ret
, 0);
1508 static int io_submit_one(struct kioctx
*ctx
, struct iocb __user
*user_iocb
,
1509 struct iocb
*iocb
, bool compat
)
1514 /* enforce forwards compatibility on users */
1515 if (unlikely(iocb
->aio_reserved1
|| iocb
->aio_reserved2
)) {
1516 pr_debug("EINVAL: reserve field set\n");
1520 /* prevent overflows */
1522 (iocb
->aio_buf
!= (unsigned long)iocb
->aio_buf
) ||
1523 (iocb
->aio_nbytes
!= (size_t)iocb
->aio_nbytes
) ||
1524 ((ssize_t
)iocb
->aio_nbytes
< 0)
1526 pr_debug("EINVAL: overflow check\n");
1530 req
= aio_get_req(ctx
);
1534 req
->ki_filp
= fget(iocb
->aio_fildes
);
1535 if (unlikely(!req
->ki_filp
)) {
1540 if (iocb
->aio_flags
& IOCB_FLAG_RESFD
) {
1542 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1543 * instance of the file* now. The file descriptor must be
1544 * an eventfd() fd, and will be signaled for each completed
1545 * event using the eventfd_signal() function.
1547 req
->ki_eventfd
= eventfd_ctx_fdget((int) iocb
->aio_resfd
);
1548 if (IS_ERR(req
->ki_eventfd
)) {
1549 ret
= PTR_ERR(req
->ki_eventfd
);
1550 req
->ki_eventfd
= NULL
;
1555 ret
= put_user(KIOCB_KEY
, &user_iocb
->aio_key
);
1556 if (unlikely(ret
)) {
1557 pr_debug("EFAULT: aio_key\n");
1561 req
->ki_obj
.user
= user_iocb
;
1562 req
->ki_user_data
= iocb
->aio_data
;
1563 req
->ki_pos
= iocb
->aio_offset
;
1564 req
->ki_nbytes
= iocb
->aio_nbytes
;
1566 ret
= aio_run_iocb(req
, iocb
->aio_lio_opcode
,
1567 (char __user
*)(unsigned long)iocb
->aio_buf
,
1574 put_reqs_available(ctx
, 1);
1575 percpu_ref_put(&ctx
->reqs
);
1580 long do_io_submit(aio_context_t ctx_id
, long nr
,
1581 struct iocb __user
*__user
*iocbpp
, bool compat
)
1586 struct blk_plug plug
;
1588 if (unlikely(nr
< 0))
1591 if (unlikely(nr
> LONG_MAX
/sizeof(*iocbpp
)))
1592 nr
= LONG_MAX
/sizeof(*iocbpp
);
1594 if (unlikely(!access_ok(VERIFY_READ
, iocbpp
, (nr
*sizeof(*iocbpp
)))))
1597 ctx
= lookup_ioctx(ctx_id
);
1598 if (unlikely(!ctx
)) {
1599 pr_debug("EINVAL: invalid context id\n");
1603 blk_start_plug(&plug
);
1606 * AKPM: should this return a partial result if some of the IOs were
1607 * successfully submitted?
1609 for (i
=0; i
<nr
; i
++) {
1610 struct iocb __user
*user_iocb
;
1613 if (unlikely(__get_user(user_iocb
, iocbpp
+ i
))) {
1618 if (unlikely(copy_from_user(&tmp
, user_iocb
, sizeof(tmp
)))) {
1623 ret
= io_submit_one(ctx
, user_iocb
, &tmp
, compat
);
1627 blk_finish_plug(&plug
);
1629 percpu_ref_put(&ctx
->users
);
1634 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1635 * the number of iocbs queued. May return -EINVAL if the aio_context
1636 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1637 * *iocbpp[0] is not properly initialized, if the operation specified
1638 * is invalid for the file descriptor in the iocb. May fail with
1639 * -EFAULT if any of the data structures point to invalid data. May
1640 * fail with -EBADF if the file descriptor specified in the first
1641 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1642 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1643 * fail with -ENOSYS if not implemented.
1645 SYSCALL_DEFINE3(io_submit
, aio_context_t
, ctx_id
, long, nr
,
1646 struct iocb __user
* __user
*, iocbpp
)
1648 return do_io_submit(ctx_id
, nr
, iocbpp
, 0);
1652 * Finds a given iocb for cancellation.
1654 static struct kiocb
*lookup_kiocb(struct kioctx
*ctx
, struct iocb __user
*iocb
,
1657 struct list_head
*pos
;
1659 assert_spin_locked(&ctx
->ctx_lock
);
1661 if (key
!= KIOCB_KEY
)
1664 /* TODO: use a hash or array, this sucks. */
1665 list_for_each(pos
, &ctx
->active_reqs
) {
1666 struct kiocb
*kiocb
= list_kiocb(pos
);
1667 if (kiocb
->ki_obj
.user
== iocb
)
1674 * Attempts to cancel an iocb previously passed to io_submit. If
1675 * the operation is successfully cancelled, the resulting event is
1676 * copied into the memory pointed to by result without being placed
1677 * into the completion queue and 0 is returned. May fail with
1678 * -EFAULT if any of the data structures pointed to are invalid.
1679 * May fail with -EINVAL if aio_context specified by ctx_id is
1680 * invalid. May fail with -EAGAIN if the iocb specified was not
1681 * cancelled. Will fail with -ENOSYS if not implemented.
1683 SYSCALL_DEFINE3(io_cancel
, aio_context_t
, ctx_id
, struct iocb __user
*, iocb
,
1684 struct io_event __user
*, result
)
1687 struct kiocb
*kiocb
;
1691 ret
= get_user(key
, &iocb
->aio_key
);
1695 ctx
= lookup_ioctx(ctx_id
);
1699 spin_lock_irq(&ctx
->ctx_lock
);
1701 kiocb
= lookup_kiocb(ctx
, iocb
, key
);
1703 ret
= kiocb_cancel(kiocb
);
1707 spin_unlock_irq(&ctx
->ctx_lock
);
1711 * The result argument is no longer used - the io_event is
1712 * always delivered via the ring buffer. -EINPROGRESS indicates
1713 * cancellation is progress:
1718 percpu_ref_put(&ctx
->users
);
1724 * Attempts to read at least min_nr events and up to nr events from
1725 * the completion queue for the aio_context specified by ctx_id. If
1726 * it succeeds, the number of read events is returned. May fail with
1727 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1728 * out of range, if timeout is out of range. May fail with -EFAULT
1729 * if any of the memory specified is invalid. May return 0 or
1730 * < min_nr if the timeout specified by timeout has elapsed
1731 * before sufficient events are available, where timeout == NULL
1732 * specifies an infinite timeout. Note that the timeout pointed to by
1733 * timeout is relative. Will fail with -ENOSYS if not implemented.
1735 SYSCALL_DEFINE5(io_getevents
, aio_context_t
, ctx_id
,
1738 struct io_event __user
*, events
,
1739 struct timespec __user
*, timeout
)
1741 struct kioctx
*ioctx
= lookup_ioctx(ctx_id
);
1744 if (likely(ioctx
)) {
1745 if (likely(min_nr
<= nr
&& min_nr
>= 0))
1746 ret
= read_events(ioctx
, min_nr
, nr
, events
, timeout
);
1747 percpu_ref_put(&ioctx
->users
);