]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * An async IO implementation for Linux | |
3 | * Written by Benjamin LaHaise <bcrl@kvack.org> | |
4 | * | |
5 | * Implements an efficient asynchronous io interface. | |
6 | * | |
7 | * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. | |
8 | * | |
9 | * See ../COPYING for licensing terms. | |
10 | */ | |
caf4167a KO |
11 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
12 | ||
1da177e4 LT |
13 | #include <linux/kernel.h> |
14 | #include <linux/init.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/time.h> | |
17 | #include <linux/aio_abi.h> | |
630d9c47 | 18 | #include <linux/export.h> |
1da177e4 | 19 | #include <linux/syscalls.h> |
b9d128f1 | 20 | #include <linux/backing-dev.h> |
027445c3 | 21 | #include <linux/uio.h> |
1da177e4 | 22 | |
1da177e4 LT |
23 | #include <linux/sched.h> |
24 | #include <linux/fs.h> | |
25 | #include <linux/file.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/mman.h> | |
3d2d827f | 28 | #include <linux/mmu_context.h> |
e1bdd5f2 | 29 | #include <linux/percpu.h> |
1da177e4 LT |
30 | #include <linux/slab.h> |
31 | #include <linux/timer.h> | |
32 | #include <linux/aio.h> | |
33 | #include <linux/highmem.h> | |
34 | #include <linux/workqueue.h> | |
35 | #include <linux/security.h> | |
9c3060be | 36 | #include <linux/eventfd.h> |
cfb1e33e | 37 | #include <linux/blkdev.h> |
9d85cba7 | 38 | #include <linux/compat.h> |
36bc08cc GZ |
39 | #include <linux/migrate.h> |
40 | #include <linux/ramfs.h> | |
723be6e3 | 41 | #include <linux/percpu-refcount.h> |
71ad7490 | 42 | #include <linux/mount.h> |
1da177e4 LT |
43 | |
44 | #include <asm/kmap_types.h> | |
45 | #include <asm/uaccess.h> | |
1da177e4 | 46 | |
68d70d03 AV |
47 | #include "internal.h" |
48 | ||
4e179bca KO |
49 | #define AIO_RING_MAGIC 0xa10a10a1 |
50 | #define AIO_RING_COMPAT_FEATURES 1 | |
51 | #define AIO_RING_INCOMPAT_FEATURES 0 | |
52 | struct aio_ring { | |
53 | unsigned id; /* kernel internal index number */ | |
54 | unsigned nr; /* number of io_events */ | |
55 | unsigned head; | |
56 | unsigned tail; | |
57 | ||
58 | unsigned magic; | |
59 | unsigned compat_features; | |
60 | unsigned incompat_features; | |
61 | unsigned header_length; /* size of aio_ring */ | |
62 | ||
63 | ||
64 | struct io_event io_events[0]; | |
65 | }; /* 128 bytes + ring size */ | |
66 | ||
67 | #define AIO_RING_PAGES 8 | |
4e179bca | 68 | |
db446a08 BL |
69 | struct kioctx_table { |
70 | struct rcu_head rcu; | |
71 | unsigned nr; | |
72 | struct kioctx *table[]; | |
73 | }; | |
74 | ||
e1bdd5f2 KO |
75 | struct kioctx_cpu { |
76 | unsigned reqs_available; | |
77 | }; | |
78 | ||
4e179bca | 79 | struct kioctx { |
723be6e3 | 80 | struct percpu_ref users; |
36f55889 | 81 | atomic_t dead; |
4e179bca | 82 | |
4e179bca | 83 | unsigned long user_id; |
4e179bca | 84 | |
e1bdd5f2 KO |
85 | struct __percpu kioctx_cpu *cpu; |
86 | ||
87 | /* | |
88 | * For percpu reqs_available, number of slots we move to/from global | |
89 | * counter at a time: | |
90 | */ | |
91 | unsigned req_batch; | |
3e845ce0 KO |
92 | /* |
93 | * This is what userspace passed to io_setup(), it's not used for | |
94 | * anything but counting against the global max_reqs quota. | |
95 | * | |
58c85dc2 | 96 | * The real limit is nr_events - 1, which will be larger (see |
3e845ce0 KO |
97 | * aio_setup_ring()) |
98 | */ | |
4e179bca KO |
99 | unsigned max_reqs; |
100 | ||
58c85dc2 KO |
101 | /* Size of ringbuffer, in units of struct io_event */ |
102 | unsigned nr_events; | |
4e179bca | 103 | |
58c85dc2 KO |
104 | unsigned long mmap_base; |
105 | unsigned long mmap_size; | |
106 | ||
107 | struct page **ring_pages; | |
108 | long nr_pages; | |
109 | ||
4e23bcae | 110 | struct rcu_head rcu_head; |
723be6e3 | 111 | struct work_struct free_work; |
4e23bcae KO |
112 | |
113 | struct { | |
34e83fc6 KO |
114 | /* |
115 | * This counts the number of available slots in the ringbuffer, | |
116 | * so we avoid overflowing it: it's decremented (if positive) | |
117 | * when allocating a kiocb and incremented when the resulting | |
118 | * io_event is pulled off the ringbuffer. | |
e1bdd5f2 KO |
119 | * |
120 | * We batch accesses to it with a percpu version. | |
34e83fc6 KO |
121 | */ |
122 | atomic_t reqs_available; | |
4e23bcae KO |
123 | } ____cacheline_aligned_in_smp; |
124 | ||
125 | struct { | |
126 | spinlock_t ctx_lock; | |
127 | struct list_head active_reqs; /* used for cancellation */ | |
128 | } ____cacheline_aligned_in_smp; | |
129 | ||
58c85dc2 KO |
130 | struct { |
131 | struct mutex ring_lock; | |
4e23bcae KO |
132 | wait_queue_head_t wait; |
133 | } ____cacheline_aligned_in_smp; | |
58c85dc2 KO |
134 | |
135 | struct { | |
136 | unsigned tail; | |
137 | spinlock_t completion_lock; | |
4e23bcae | 138 | } ____cacheline_aligned_in_smp; |
58c85dc2 KO |
139 | |
140 | struct page *internal_pages[AIO_RING_PAGES]; | |
36bc08cc | 141 | struct file *aio_ring_file; |
db446a08 BL |
142 | |
143 | unsigned id; | |
4e179bca KO |
144 | }; |
145 | ||
1da177e4 | 146 | /*------ sysctl variables----*/ |
d55b5fda ZB |
147 | static DEFINE_SPINLOCK(aio_nr_lock); |
148 | unsigned long aio_nr; /* current system wide number of aio requests */ | |
149 | unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ | |
1da177e4 LT |
150 | /*----end sysctl variables---*/ |
151 | ||
e18b890b CL |
152 | static struct kmem_cache *kiocb_cachep; |
153 | static struct kmem_cache *kioctx_cachep; | |
1da177e4 | 154 | |
71ad7490 BL |
155 | static struct vfsmount *aio_mnt; |
156 | ||
157 | static const struct file_operations aio_ring_fops; | |
158 | static const struct address_space_operations aio_ctx_aops; | |
159 | ||
160 | static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) | |
161 | { | |
162 | struct qstr this = QSTR_INIT("[aio]", 5); | |
163 | struct file *file; | |
164 | struct path path; | |
165 | struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); | |
7f62656b DC |
166 | if (IS_ERR(inode)) |
167 | return ERR_CAST(inode); | |
71ad7490 BL |
168 | |
169 | inode->i_mapping->a_ops = &aio_ctx_aops; | |
170 | inode->i_mapping->private_data = ctx; | |
171 | inode->i_size = PAGE_SIZE * nr_pages; | |
172 | ||
173 | path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); | |
174 | if (!path.dentry) { | |
175 | iput(inode); | |
176 | return ERR_PTR(-ENOMEM); | |
177 | } | |
178 | path.mnt = mntget(aio_mnt); | |
179 | ||
180 | d_instantiate(path.dentry, inode); | |
181 | file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops); | |
182 | if (IS_ERR(file)) { | |
183 | path_put(&path); | |
184 | return file; | |
185 | } | |
186 | ||
187 | file->f_flags = O_RDWR; | |
188 | file->private_data = ctx; | |
189 | return file; | |
190 | } | |
191 | ||
192 | static struct dentry *aio_mount(struct file_system_type *fs_type, | |
193 | int flags, const char *dev_name, void *data) | |
194 | { | |
195 | static const struct dentry_operations ops = { | |
196 | .d_dname = simple_dname, | |
197 | }; | |
198 | return mount_pseudo(fs_type, "aio:", NULL, &ops, 0xa10a10a1); | |
199 | } | |
200 | ||
1da177e4 LT |
201 | /* aio_setup |
202 | * Creates the slab caches used by the aio routines, panic on | |
203 | * failure as this is done early during the boot sequence. | |
204 | */ | |
205 | static int __init aio_setup(void) | |
206 | { | |
71ad7490 BL |
207 | static struct file_system_type aio_fs = { |
208 | .name = "aio", | |
209 | .mount = aio_mount, | |
210 | .kill_sb = kill_anon_super, | |
211 | }; | |
212 | aio_mnt = kern_mount(&aio_fs); | |
213 | if (IS_ERR(aio_mnt)) | |
214 | panic("Failed to create aio fs mount."); | |
215 | ||
0a31bd5f CL |
216 | kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
217 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); | |
1da177e4 | 218 | |
caf4167a | 219 | pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); |
1da177e4 LT |
220 | |
221 | return 0; | |
222 | } | |
385773e0 | 223 | __initcall(aio_setup); |
1da177e4 | 224 | |
5e9ae2e5 BL |
225 | static void put_aio_ring_file(struct kioctx *ctx) |
226 | { | |
227 | struct file *aio_ring_file = ctx->aio_ring_file; | |
228 | if (aio_ring_file) { | |
229 | truncate_setsize(aio_ring_file->f_inode, 0); | |
230 | ||
231 | /* Prevent further access to the kioctx from migratepages */ | |
232 | spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock); | |
233 | aio_ring_file->f_inode->i_mapping->private_data = NULL; | |
234 | ctx->aio_ring_file = NULL; | |
235 | spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock); | |
236 | ||
237 | fput(aio_ring_file); | |
238 | } | |
239 | } | |
240 | ||
1da177e4 LT |
241 | static void aio_free_ring(struct kioctx *ctx) |
242 | { | |
36bc08cc | 243 | int i; |
1da177e4 | 244 | |
36bc08cc GZ |
245 | for (i = 0; i < ctx->nr_pages; i++) { |
246 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, | |
247 | page_count(ctx->ring_pages[i])); | |
58c85dc2 | 248 | put_page(ctx->ring_pages[i]); |
36bc08cc | 249 | } |
1da177e4 | 250 | |
5e9ae2e5 BL |
251 | put_aio_ring_file(ctx); |
252 | ||
58c85dc2 KO |
253 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) |
254 | kfree(ctx->ring_pages); | |
36bc08cc GZ |
255 | } |
256 | ||
257 | static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) | |
258 | { | |
259 | vma->vm_ops = &generic_file_vm_ops; | |
260 | return 0; | |
261 | } | |
262 | ||
263 | static const struct file_operations aio_ring_fops = { | |
264 | .mmap = aio_ring_mmap, | |
265 | }; | |
266 | ||
267 | static int aio_set_page_dirty(struct page *page) | |
268 | { | |
269 | return 0; | |
270 | } | |
271 | ||
0c45355f | 272 | #if IS_ENABLED(CONFIG_MIGRATION) |
36bc08cc GZ |
273 | static int aio_migratepage(struct address_space *mapping, struct page *new, |
274 | struct page *old, enum migrate_mode mode) | |
275 | { | |
5e9ae2e5 | 276 | struct kioctx *ctx; |
36bc08cc | 277 | unsigned long flags; |
36bc08cc GZ |
278 | int rc; |
279 | ||
280 | /* Writeback must be complete */ | |
281 | BUG_ON(PageWriteback(old)); | |
282 | put_page(old); | |
283 | ||
284 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); | |
285 | if (rc != MIGRATEPAGE_SUCCESS) { | |
286 | get_page(old); | |
287 | return rc; | |
288 | } | |
289 | ||
290 | get_page(new); | |
291 | ||
5e9ae2e5 BL |
292 | /* We can potentially race against kioctx teardown here. Use the |
293 | * address_space's private data lock to protect the mapping's | |
294 | * private_data. | |
295 | */ | |
296 | spin_lock(&mapping->private_lock); | |
297 | ctx = mapping->private_data; | |
298 | if (ctx) { | |
299 | pgoff_t idx; | |
300 | spin_lock_irqsave(&ctx->completion_lock, flags); | |
301 | migrate_page_copy(new, old); | |
302 | idx = old->index; | |
303 | if (idx < (pgoff_t)ctx->nr_pages) | |
304 | ctx->ring_pages[idx] = new; | |
305 | spin_unlock_irqrestore(&ctx->completion_lock, flags); | |
306 | } else | |
307 | rc = -EBUSY; | |
308 | spin_unlock(&mapping->private_lock); | |
36bc08cc GZ |
309 | |
310 | return rc; | |
1da177e4 | 311 | } |
0c45355f | 312 | #endif |
1da177e4 | 313 | |
36bc08cc GZ |
314 | static const struct address_space_operations aio_ctx_aops = { |
315 | .set_page_dirty = aio_set_page_dirty, | |
0c45355f | 316 | #if IS_ENABLED(CONFIG_MIGRATION) |
36bc08cc | 317 | .migratepage = aio_migratepage, |
0c45355f | 318 | #endif |
36bc08cc GZ |
319 | }; |
320 | ||
1da177e4 LT |
321 | static int aio_setup_ring(struct kioctx *ctx) |
322 | { | |
323 | struct aio_ring *ring; | |
1da177e4 | 324 | unsigned nr_events = ctx->max_reqs; |
41003a7b | 325 | struct mm_struct *mm = current->mm; |
41badc15 | 326 | unsigned long size, populate; |
1da177e4 | 327 | int nr_pages; |
36bc08cc GZ |
328 | int i; |
329 | struct file *file; | |
1da177e4 LT |
330 | |
331 | /* Compensate for the ring buffer's head/tail overlap entry */ | |
332 | nr_events += 2; /* 1 is required, 2 for good luck */ | |
333 | ||
334 | size = sizeof(struct aio_ring); | |
335 | size += sizeof(struct io_event) * nr_events; | |
1da177e4 | 336 | |
36bc08cc | 337 | nr_pages = PFN_UP(size); |
1da177e4 LT |
338 | if (nr_pages < 0) |
339 | return -EINVAL; | |
340 | ||
71ad7490 | 341 | file = aio_private_file(ctx, nr_pages); |
36bc08cc GZ |
342 | if (IS_ERR(file)) { |
343 | ctx->aio_ring_file = NULL; | |
344 | return -EAGAIN; | |
345 | } | |
346 | ||
36bc08cc GZ |
347 | for (i = 0; i < nr_pages; i++) { |
348 | struct page *page; | |
349 | page = find_or_create_page(file->f_inode->i_mapping, | |
350 | i, GFP_HIGHUSER | __GFP_ZERO); | |
351 | if (!page) | |
352 | break; | |
353 | pr_debug("pid(%d) page[%d]->count=%d\n", | |
354 | current->pid, i, page_count(page)); | |
355 | SetPageUptodate(page); | |
356 | SetPageDirty(page); | |
357 | unlock_page(page); | |
358 | } | |
359 | ctx->aio_ring_file = file; | |
360 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) | |
361 | / sizeof(struct io_event); | |
1da177e4 | 362 | |
58c85dc2 | 363 | ctx->ring_pages = ctx->internal_pages; |
1da177e4 | 364 | if (nr_pages > AIO_RING_PAGES) { |
58c85dc2 KO |
365 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), |
366 | GFP_KERNEL); | |
367 | if (!ctx->ring_pages) | |
1da177e4 | 368 | return -ENOMEM; |
1da177e4 LT |
369 | } |
370 | ||
58c85dc2 KO |
371 | ctx->mmap_size = nr_pages * PAGE_SIZE; |
372 | pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); | |
36bc08cc | 373 | |
41003a7b | 374 | down_write(&mm->mmap_sem); |
36bc08cc GZ |
375 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, |
376 | PROT_READ | PROT_WRITE, | |
377 | MAP_SHARED | MAP_POPULATE, 0, &populate); | |
58c85dc2 | 378 | if (IS_ERR((void *)ctx->mmap_base)) { |
41003a7b | 379 | up_write(&mm->mmap_sem); |
58c85dc2 | 380 | ctx->mmap_size = 0; |
1da177e4 LT |
381 | aio_free_ring(ctx); |
382 | return -EAGAIN; | |
383 | } | |
384 | ||
58c85dc2 | 385 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); |
d6c355c7 BL |
386 | |
387 | /* We must do this while still holding mmap_sem for write, as we | |
388 | * need to be protected against userspace attempting to mremap() | |
389 | * or munmap() the ring buffer. | |
390 | */ | |
58c85dc2 KO |
391 | ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages, |
392 | 1, 0, ctx->ring_pages, NULL); | |
d6c355c7 BL |
393 | |
394 | /* Dropping the reference here is safe as the page cache will hold | |
395 | * onto the pages for us. It is also required so that page migration | |
396 | * can unmap the pages and get the right reference count. | |
397 | */ | |
36bc08cc GZ |
398 | for (i = 0; i < ctx->nr_pages; i++) |
399 | put_page(ctx->ring_pages[i]); | |
1da177e4 | 400 | |
d6c355c7 BL |
401 | up_write(&mm->mmap_sem); |
402 | ||
58c85dc2 | 403 | if (unlikely(ctx->nr_pages != nr_pages)) { |
1da177e4 LT |
404 | aio_free_ring(ctx); |
405 | return -EAGAIN; | |
406 | } | |
407 | ||
58c85dc2 KO |
408 | ctx->user_id = ctx->mmap_base; |
409 | ctx->nr_events = nr_events; /* trusted copy */ | |
1da177e4 | 410 | |
58c85dc2 | 411 | ring = kmap_atomic(ctx->ring_pages[0]); |
1da177e4 | 412 | ring->nr = nr_events; /* user copy */ |
db446a08 | 413 | ring->id = ~0U; |
1da177e4 LT |
414 | ring->head = ring->tail = 0; |
415 | ring->magic = AIO_RING_MAGIC; | |
416 | ring->compat_features = AIO_RING_COMPAT_FEATURES; | |
417 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; | |
418 | ring->header_length = sizeof(struct aio_ring); | |
e8e3c3d6 | 419 | kunmap_atomic(ring); |
58c85dc2 | 420 | flush_dcache_page(ctx->ring_pages[0]); |
1da177e4 LT |
421 | |
422 | return 0; | |
423 | } | |
424 | ||
1da177e4 LT |
425 | #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) |
426 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) | |
427 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) | |
428 | ||
0460fef2 KO |
429 | void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) |
430 | { | |
431 | struct kioctx *ctx = req->ki_ctx; | |
432 | unsigned long flags; | |
433 | ||
434 | spin_lock_irqsave(&ctx->ctx_lock, flags); | |
435 | ||
436 | if (!req->ki_list.next) | |
437 | list_add(&req->ki_list, &ctx->active_reqs); | |
438 | ||
439 | req->ki_cancel = cancel; | |
440 | ||
441 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | |
442 | } | |
443 | EXPORT_SYMBOL(kiocb_set_cancel_fn); | |
444 | ||
bec68faa | 445 | static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) |
906b973c | 446 | { |
0460fef2 | 447 | kiocb_cancel_fn *old, *cancel; |
906b973c | 448 | |
0460fef2 KO |
449 | /* |
450 | * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it | |
451 | * actually has a cancel function, hence the cmpxchg() | |
452 | */ | |
453 | ||
454 | cancel = ACCESS_ONCE(kiocb->ki_cancel); | |
455 | do { | |
456 | if (!cancel || cancel == KIOCB_CANCELLED) | |
57282d8f | 457 | return -EINVAL; |
906b973c | 458 | |
0460fef2 KO |
459 | old = cancel; |
460 | cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); | |
461 | } while (cancel != old); | |
906b973c | 462 | |
57282d8f | 463 | return cancel(kiocb); |
906b973c KO |
464 | } |
465 | ||
36f55889 KO |
466 | static void free_ioctx_rcu(struct rcu_head *head) |
467 | { | |
468 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); | |
e1bdd5f2 KO |
469 | |
470 | free_percpu(ctx->cpu); | |
36f55889 KO |
471 | kmem_cache_free(kioctx_cachep, ctx); |
472 | } | |
473 | ||
474 | /* | |
475 | * When this function runs, the kioctx has been removed from the "hash table" | |
476 | * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - | |
477 | * now it's safe to cancel any that need to be. | |
478 | */ | |
723be6e3 | 479 | static void free_ioctx(struct work_struct *work) |
36f55889 | 480 | { |
723be6e3 | 481 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); |
3e845ce0 | 482 | struct aio_ring *ring; |
36f55889 | 483 | struct kiocb *req; |
5ffac122 KO |
484 | unsigned cpu, avail; |
485 | DEFINE_WAIT(wait); | |
36f55889 KO |
486 | |
487 | spin_lock_irq(&ctx->ctx_lock); | |
488 | ||
489 | while (!list_empty(&ctx->active_reqs)) { | |
490 | req = list_first_entry(&ctx->active_reqs, | |
491 | struct kiocb, ki_list); | |
492 | ||
493 | list_del_init(&req->ki_list); | |
bec68faa | 494 | kiocb_cancel(ctx, req); |
36f55889 KO |
495 | } |
496 | ||
497 | spin_unlock_irq(&ctx->ctx_lock); | |
498 | ||
e1bdd5f2 KO |
499 | for_each_possible_cpu(cpu) { |
500 | struct kioctx_cpu *kcpu = per_cpu_ptr(ctx->cpu, cpu); | |
501 | ||
502 | atomic_add(kcpu->reqs_available, &ctx->reqs_available); | |
503 | kcpu->reqs_available = 0; | |
504 | } | |
505 | ||
5ffac122 KO |
506 | while (1) { |
507 | prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE); | |
3e845ce0 | 508 | |
5ffac122 KO |
509 | ring = kmap_atomic(ctx->ring_pages[0]); |
510 | avail = (ring->head <= ring->tail) | |
511 | ? ring->tail - ring->head | |
512 | : ctx->nr_events - ring->head + ring->tail; | |
3e845ce0 | 513 | |
34e83fc6 | 514 | atomic_add(avail, &ctx->reqs_available); |
5ffac122 KO |
515 | ring->head = ring->tail; |
516 | kunmap_atomic(ring); | |
517 | ||
518 | if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1) | |
519 | break; | |
520 | ||
521 | schedule(); | |
3e845ce0 | 522 | } |
5ffac122 | 523 | finish_wait(&ctx->wait, &wait); |
3e845ce0 | 524 | |
34e83fc6 | 525 | WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1); |
36f55889 KO |
526 | |
527 | aio_free_ring(ctx); | |
528 | ||
36f55889 KO |
529 | pr_debug("freeing %p\n", ctx); |
530 | ||
531 | /* | |
532 | * Here the call_rcu() is between the wait_event() for reqs_active to | |
533 | * hit 0, and freeing the ioctx. | |
534 | * | |
535 | * aio_complete() decrements reqs_active, but it has to touch the ioctx | |
536 | * after to issue a wakeup so we use rcu. | |
537 | */ | |
538 | call_rcu(&ctx->rcu_head, free_ioctx_rcu); | |
539 | } | |
540 | ||
723be6e3 | 541 | static void free_ioctx_ref(struct percpu_ref *ref) |
36f55889 | 542 | { |
723be6e3 KO |
543 | struct kioctx *ctx = container_of(ref, struct kioctx, users); |
544 | ||
545 | INIT_WORK(&ctx->free_work, free_ioctx); | |
546 | schedule_work(&ctx->free_work); | |
36f55889 KO |
547 | } |
548 | ||
db446a08 BL |
549 | static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) |
550 | { | |
551 | unsigned i, new_nr; | |
552 | struct kioctx_table *table, *old; | |
553 | struct aio_ring *ring; | |
554 | ||
555 | spin_lock(&mm->ioctx_lock); | |
d9b2c871 | 556 | rcu_read_lock(); |
77d30b14 | 557 | table = rcu_dereference(mm->ioctx_table); |
db446a08 BL |
558 | |
559 | while (1) { | |
560 | if (table) | |
561 | for (i = 0; i < table->nr; i++) | |
562 | if (!table->table[i]) { | |
563 | ctx->id = i; | |
564 | table->table[i] = ctx; | |
d9b2c871 | 565 | rcu_read_unlock(); |
db446a08 BL |
566 | spin_unlock(&mm->ioctx_lock); |
567 | ||
568 | ring = kmap_atomic(ctx->ring_pages[0]); | |
569 | ring->id = ctx->id; | |
570 | kunmap_atomic(ring); | |
571 | return 0; | |
572 | } | |
573 | ||
574 | new_nr = (table ? table->nr : 1) * 4; | |
575 | ||
d9b2c871 | 576 | rcu_read_unlock(); |
db446a08 BL |
577 | spin_unlock(&mm->ioctx_lock); |
578 | ||
579 | table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * | |
580 | new_nr, GFP_KERNEL); | |
581 | if (!table) | |
582 | return -ENOMEM; | |
583 | ||
584 | table->nr = new_nr; | |
585 | ||
586 | spin_lock(&mm->ioctx_lock); | |
d9b2c871 | 587 | rcu_read_lock(); |
77d30b14 | 588 | old = rcu_dereference(mm->ioctx_table); |
db446a08 BL |
589 | |
590 | if (!old) { | |
591 | rcu_assign_pointer(mm->ioctx_table, table); | |
592 | } else if (table->nr > old->nr) { | |
593 | memcpy(table->table, old->table, | |
594 | old->nr * sizeof(struct kioctx *)); | |
595 | ||
596 | rcu_assign_pointer(mm->ioctx_table, table); | |
597 | kfree_rcu(old, rcu); | |
598 | } else { | |
599 | kfree(table); | |
600 | table = old; | |
601 | } | |
602 | } | |
603 | } | |
604 | ||
1da177e4 LT |
605 | /* ioctx_alloc |
606 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. | |
607 | */ | |
608 | static struct kioctx *ioctx_alloc(unsigned nr_events) | |
609 | { | |
41003a7b | 610 | struct mm_struct *mm = current->mm; |
1da177e4 | 611 | struct kioctx *ctx; |
e23754f8 | 612 | int err = -ENOMEM; |
1da177e4 | 613 | |
e1bdd5f2 KO |
614 | /* |
615 | * We keep track of the number of available ringbuffer slots, to prevent | |
616 | * overflow (reqs_available), and we also use percpu counters for this. | |
617 | * | |
618 | * So since up to half the slots might be on other cpu's percpu counters | |
619 | * and unavailable, double nr_events so userspace sees what they | |
620 | * expected: additionally, we move req_batch slots to/from percpu | |
621 | * counters at a time, so make sure that isn't 0: | |
622 | */ | |
623 | nr_events = max(nr_events, num_possible_cpus() * 4); | |
624 | nr_events *= 2; | |
625 | ||
1da177e4 LT |
626 | /* Prevent overflows */ |
627 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || | |
628 | (nr_events > (0x10000000U / sizeof(struct kiocb)))) { | |
629 | pr_debug("ENOMEM: nr_events too high\n"); | |
630 | return ERR_PTR(-EINVAL); | |
631 | } | |
632 | ||
4cd81c3d | 633 | if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL)) |
1da177e4 LT |
634 | return ERR_PTR(-EAGAIN); |
635 | ||
c3762229 | 636 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); |
1da177e4 LT |
637 | if (!ctx) |
638 | return ERR_PTR(-ENOMEM); | |
639 | ||
1da177e4 | 640 | ctx->max_reqs = nr_events; |
1da177e4 | 641 | |
723be6e3 KO |
642 | if (percpu_ref_init(&ctx->users, free_ioctx_ref)) |
643 | goto out_freectx; | |
644 | ||
1da177e4 | 645 | spin_lock_init(&ctx->ctx_lock); |
0460fef2 | 646 | spin_lock_init(&ctx->completion_lock); |
58c85dc2 | 647 | mutex_init(&ctx->ring_lock); |
1da177e4 LT |
648 | init_waitqueue_head(&ctx->wait); |
649 | ||
650 | INIT_LIST_HEAD(&ctx->active_reqs); | |
1da177e4 | 651 | |
e1bdd5f2 KO |
652 | ctx->cpu = alloc_percpu(struct kioctx_cpu); |
653 | if (!ctx->cpu) | |
723be6e3 | 654 | goto out_freeref; |
1da177e4 | 655 | |
e1bdd5f2 KO |
656 | if (aio_setup_ring(ctx) < 0) |
657 | goto out_freepcpu; | |
658 | ||
34e83fc6 | 659 | atomic_set(&ctx->reqs_available, ctx->nr_events - 1); |
e1bdd5f2 | 660 | ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); |
6878ea72 BL |
661 | if (ctx->req_batch < 1) |
662 | ctx->req_batch = 1; | |
34e83fc6 | 663 | |
1da177e4 | 664 | /* limit the number of system wide aios */ |
9fa1cb39 | 665 | spin_lock(&aio_nr_lock); |
4cd81c3d | 666 | if (aio_nr + nr_events > (aio_max_nr * 2UL) || |
2dd542b7 | 667 | aio_nr + nr_events < aio_nr) { |
9fa1cb39 | 668 | spin_unlock(&aio_nr_lock); |
1da177e4 | 669 | goto out_cleanup; |
2dd542b7 AV |
670 | } |
671 | aio_nr += ctx->max_reqs; | |
9fa1cb39 | 672 | spin_unlock(&aio_nr_lock); |
1da177e4 | 673 | |
723be6e3 KO |
674 | percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ |
675 | ||
da90382c BL |
676 | err = ioctx_add_table(ctx, mm); |
677 | if (err) | |
678 | goto out_cleanup_put; | |
679 | ||
caf4167a | 680 | pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", |
58c85dc2 | 681 | ctx, ctx->user_id, mm, ctx->nr_events); |
1da177e4 LT |
682 | return ctx; |
683 | ||
da90382c BL |
684 | out_cleanup_put: |
685 | percpu_ref_put(&ctx->users); | |
1da177e4 | 686 | out_cleanup: |
e23754f8 AV |
687 | err = -EAGAIN; |
688 | aio_free_ring(ctx); | |
e1bdd5f2 KO |
689 | out_freepcpu: |
690 | free_percpu(ctx->cpu); | |
723be6e3 KO |
691 | out_freeref: |
692 | free_percpu(ctx->users.pcpu_count); | |
1da177e4 | 693 | out_freectx: |
5e9ae2e5 | 694 | put_aio_ring_file(ctx); |
1da177e4 | 695 | kmem_cache_free(kioctx_cachep, ctx); |
caf4167a | 696 | pr_debug("error allocating ioctx %d\n", err); |
e23754f8 | 697 | return ERR_PTR(err); |
1da177e4 LT |
698 | } |
699 | ||
36f55889 KO |
700 | /* kill_ioctx |
701 | * Cancels all outstanding aio requests on an aio context. Used | |
702 | * when the processes owning a context have all exited to encourage | |
703 | * the rapid destruction of the kioctx. | |
704 | */ | |
db446a08 | 705 | static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) |
36f55889 KO |
706 | { |
707 | if (!atomic_xchg(&ctx->dead, 1)) { | |
db446a08 BL |
708 | struct kioctx_table *table; |
709 | ||
710 | spin_lock(&mm->ioctx_lock); | |
d9b2c871 | 711 | rcu_read_lock(); |
77d30b14 | 712 | table = rcu_dereference(mm->ioctx_table); |
db446a08 BL |
713 | |
714 | WARN_ON(ctx != table->table[ctx->id]); | |
715 | table->table[ctx->id] = NULL; | |
d9b2c871 | 716 | rcu_read_unlock(); |
db446a08 BL |
717 | spin_unlock(&mm->ioctx_lock); |
718 | ||
723be6e3 KO |
719 | /* percpu_ref_kill() will do the necessary call_rcu() */ |
720 | wake_up_all(&ctx->wait); | |
dee11c23 | 721 | |
36f55889 | 722 | /* |
4fcc712f KO |
723 | * It'd be more correct to do this in free_ioctx(), after all |
724 | * the outstanding kiocbs have finished - but by then io_destroy | |
725 | * has already returned, so io_setup() could potentially return | |
726 | * -EAGAIN with no ioctxs actually in use (as far as userspace | |
727 | * could tell). | |
36f55889 | 728 | */ |
4fcc712f KO |
729 | spin_lock(&aio_nr_lock); |
730 | BUG_ON(aio_nr - ctx->max_reqs > aio_nr); | |
731 | aio_nr -= ctx->max_reqs; | |
732 | spin_unlock(&aio_nr_lock); | |
733 | ||
734 | if (ctx->mmap_size) | |
735 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | |
736 | ||
723be6e3 | 737 | percpu_ref_kill(&ctx->users); |
36f55889 | 738 | } |
1da177e4 LT |
739 | } |
740 | ||
741 | /* wait_on_sync_kiocb: | |
742 | * Waits on the given sync kiocb to complete. | |
743 | */ | |
57282d8f | 744 | ssize_t wait_on_sync_kiocb(struct kiocb *req) |
1da177e4 | 745 | { |
57282d8f | 746 | while (!req->ki_ctx) { |
1da177e4 | 747 | set_current_state(TASK_UNINTERRUPTIBLE); |
57282d8f | 748 | if (req->ki_ctx) |
1da177e4 | 749 | break; |
41d10da3 | 750 | io_schedule(); |
1da177e4 LT |
751 | } |
752 | __set_current_state(TASK_RUNNING); | |
57282d8f | 753 | return req->ki_user_data; |
1da177e4 | 754 | } |
385773e0 | 755 | EXPORT_SYMBOL(wait_on_sync_kiocb); |
1da177e4 | 756 | |
36f55889 KO |
757 | /* |
758 | * exit_aio: called when the last user of mm goes away. At this point, there is | |
759 | * no way for any new requests to be submited or any of the io_* syscalls to be | |
760 | * called on the context. | |
761 | * | |
762 | * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on | |
763 | * them. | |
1da177e4 | 764 | */ |
fc9b52cd | 765 | void exit_aio(struct mm_struct *mm) |
1da177e4 | 766 | { |
db446a08 | 767 | struct kioctx_table *table; |
abf137dd | 768 | struct kioctx *ctx; |
db446a08 BL |
769 | unsigned i = 0; |
770 | ||
771 | while (1) { | |
772 | rcu_read_lock(); | |
773 | table = rcu_dereference(mm->ioctx_table); | |
774 | ||
775 | do { | |
776 | if (!table || i >= table->nr) { | |
777 | rcu_read_unlock(); | |
778 | rcu_assign_pointer(mm->ioctx_table, NULL); | |
779 | if (table) | |
780 | kfree(table); | |
781 | return; | |
782 | } | |
783 | ||
784 | ctx = table->table[i++]; | |
785 | } while (!ctx); | |
786 | ||
787 | rcu_read_unlock(); | |
abf137dd | 788 | |
936af157 AV |
789 | /* |
790 | * We don't need to bother with munmap() here - | |
791 | * exit_mmap(mm) is coming and it'll unmap everything. | |
792 | * Since aio_free_ring() uses non-zero ->mmap_size | |
793 | * as indicator that it needs to unmap the area, | |
794 | * just set it to 0; aio_free_ring() is the only | |
795 | * place that uses ->mmap_size, so it's safe. | |
936af157 | 796 | */ |
58c85dc2 | 797 | ctx->mmap_size = 0; |
36f55889 | 798 | |
db446a08 | 799 | kill_ioctx(mm, ctx); |
1da177e4 LT |
800 | } |
801 | } | |
802 | ||
e1bdd5f2 KO |
803 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) |
804 | { | |
805 | struct kioctx_cpu *kcpu; | |
806 | ||
807 | preempt_disable(); | |
808 | kcpu = this_cpu_ptr(ctx->cpu); | |
809 | ||
810 | kcpu->reqs_available += nr; | |
811 | while (kcpu->reqs_available >= ctx->req_batch * 2) { | |
812 | kcpu->reqs_available -= ctx->req_batch; | |
813 | atomic_add(ctx->req_batch, &ctx->reqs_available); | |
814 | } | |
815 | ||
816 | preempt_enable(); | |
817 | } | |
818 | ||
819 | static bool get_reqs_available(struct kioctx *ctx) | |
820 | { | |
821 | struct kioctx_cpu *kcpu; | |
822 | bool ret = false; | |
823 | ||
824 | preempt_disable(); | |
825 | kcpu = this_cpu_ptr(ctx->cpu); | |
826 | ||
827 | if (!kcpu->reqs_available) { | |
828 | int old, avail = atomic_read(&ctx->reqs_available); | |
829 | ||
830 | do { | |
831 | if (avail < ctx->req_batch) | |
832 | goto out; | |
833 | ||
834 | old = avail; | |
835 | avail = atomic_cmpxchg(&ctx->reqs_available, | |
836 | avail, avail - ctx->req_batch); | |
837 | } while (avail != old); | |
838 | ||
839 | kcpu->reqs_available += ctx->req_batch; | |
840 | } | |
841 | ||
842 | ret = true; | |
843 | kcpu->reqs_available--; | |
844 | out: | |
845 | preempt_enable(); | |
846 | return ret; | |
847 | } | |
848 | ||
1da177e4 | 849 | /* aio_get_req |
57282d8f KO |
850 | * Allocate a slot for an aio request. |
851 | * Returns NULL if no requests are free. | |
1da177e4 | 852 | */ |
a1c8eae7 | 853 | static inline struct kiocb *aio_get_req(struct kioctx *ctx) |
1da177e4 | 854 | { |
a1c8eae7 KO |
855 | struct kiocb *req; |
856 | ||
e1bdd5f2 | 857 | if (!get_reqs_available(ctx)) |
a1c8eae7 KO |
858 | return NULL; |
859 | ||
0460fef2 | 860 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); |
1da177e4 | 861 | if (unlikely(!req)) |
a1c8eae7 | 862 | goto out_put; |
1da177e4 | 863 | |
1da177e4 | 864 | req->ki_ctx = ctx; |
080d676d | 865 | return req; |
a1c8eae7 | 866 | out_put: |
e1bdd5f2 | 867 | put_reqs_available(ctx, 1); |
a1c8eae7 | 868 | return NULL; |
1da177e4 LT |
869 | } |
870 | ||
11599eba | 871 | static void kiocb_free(struct kiocb *req) |
1da177e4 | 872 | { |
1d98ebfc KO |
873 | if (req->ki_filp) |
874 | fput(req->ki_filp); | |
13389010 DL |
875 | if (req->ki_eventfd != NULL) |
876 | eventfd_ctx_put(req->ki_eventfd); | |
1da177e4 | 877 | kmem_cache_free(kiocb_cachep, req); |
1da177e4 LT |
878 | } |
879 | ||
d5470b59 | 880 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
1da177e4 | 881 | { |
db446a08 | 882 | struct aio_ring __user *ring = (void __user *)ctx_id; |
abf137dd | 883 | struct mm_struct *mm = current->mm; |
65c24491 | 884 | struct kioctx *ctx, *ret = NULL; |
db446a08 BL |
885 | struct kioctx_table *table; |
886 | unsigned id; | |
887 | ||
888 | if (get_user(id, &ring->id)) | |
889 | return NULL; | |
1da177e4 | 890 | |
abf137dd | 891 | rcu_read_lock(); |
db446a08 | 892 | table = rcu_dereference(mm->ioctx_table); |
abf137dd | 893 | |
db446a08 BL |
894 | if (!table || id >= table->nr) |
895 | goto out; | |
1da177e4 | 896 | |
db446a08 | 897 | ctx = table->table[id]; |
f30d704f | 898 | if (ctx && ctx->user_id == ctx_id) { |
db446a08 BL |
899 | percpu_ref_get(&ctx->users); |
900 | ret = ctx; | |
901 | } | |
902 | out: | |
abf137dd | 903 | rcu_read_unlock(); |
65c24491 | 904 | return ret; |
1da177e4 LT |
905 | } |
906 | ||
1da177e4 LT |
907 | /* aio_complete |
908 | * Called when the io request on the given iocb is complete. | |
1da177e4 | 909 | */ |
2d68449e | 910 | void aio_complete(struct kiocb *iocb, long res, long res2) |
1da177e4 LT |
911 | { |
912 | struct kioctx *ctx = iocb->ki_ctx; | |
1da177e4 | 913 | struct aio_ring *ring; |
21b40200 | 914 | struct io_event *ev_page, *event; |
1da177e4 | 915 | unsigned long flags; |
21b40200 | 916 | unsigned tail, pos; |
1da177e4 | 917 | |
20dcae32 ZB |
918 | /* |
919 | * Special case handling for sync iocbs: | |
920 | * - events go directly into the iocb for fast handling | |
921 | * - the sync task with the iocb in its stack holds the single iocb | |
922 | * ref, no other paths have a way to get another ref | |
923 | * - the sync task helpfully left a reference to itself in the iocb | |
1da177e4 LT |
924 | */ |
925 | if (is_sync_kiocb(iocb)) { | |
1da177e4 | 926 | iocb->ki_user_data = res; |
57282d8f KO |
927 | smp_wmb(); |
928 | iocb->ki_ctx = ERR_PTR(-EXDEV); | |
1da177e4 | 929 | wake_up_process(iocb->ki_obj.tsk); |
2d68449e | 930 | return; |
1da177e4 LT |
931 | } |
932 | ||
36f55889 | 933 | /* |
36f55889 | 934 | * Take rcu_read_lock() in case the kioctx is being destroyed, as we |
34e83fc6 | 935 | * need to issue a wakeup after incrementing reqs_available. |
1da177e4 | 936 | */ |
36f55889 | 937 | rcu_read_lock(); |
1da177e4 | 938 | |
0460fef2 KO |
939 | if (iocb->ki_list.next) { |
940 | unsigned long flags; | |
941 | ||
942 | spin_lock_irqsave(&ctx->ctx_lock, flags); | |
943 | list_del(&iocb->ki_list); | |
944 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | |
945 | } | |
11599eba | 946 | |
0460fef2 KO |
947 | /* |
948 | * Add a completion event to the ring buffer. Must be done holding | |
4b30f07e | 949 | * ctx->completion_lock to prevent other code from messing with the tail |
0460fef2 KO |
950 | * pointer since we might be called from irq context. |
951 | */ | |
952 | spin_lock_irqsave(&ctx->completion_lock, flags); | |
953 | ||
58c85dc2 | 954 | tail = ctx->tail; |
21b40200 KO |
955 | pos = tail + AIO_EVENTS_OFFSET; |
956 | ||
58c85dc2 | 957 | if (++tail >= ctx->nr_events) |
4bf69b2a | 958 | tail = 0; |
1da177e4 | 959 | |
58c85dc2 | 960 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
21b40200 KO |
961 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; |
962 | ||
1da177e4 LT |
963 | event->obj = (u64)(unsigned long)iocb->ki_obj.user; |
964 | event->data = iocb->ki_user_data; | |
965 | event->res = res; | |
966 | event->res2 = res2; | |
967 | ||
21b40200 | 968 | kunmap_atomic(ev_page); |
58c85dc2 | 969 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
21b40200 KO |
970 | |
971 | pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", | |
caf4167a KO |
972 | ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, |
973 | res, res2); | |
1da177e4 LT |
974 | |
975 | /* after flagging the request as done, we | |
976 | * must never even look at it again | |
977 | */ | |
978 | smp_wmb(); /* make event visible before updating tail */ | |
979 | ||
58c85dc2 | 980 | ctx->tail = tail; |
1da177e4 | 981 | |
58c85dc2 | 982 | ring = kmap_atomic(ctx->ring_pages[0]); |
21b40200 | 983 | ring->tail = tail; |
e8e3c3d6 | 984 | kunmap_atomic(ring); |
58c85dc2 | 985 | flush_dcache_page(ctx->ring_pages[0]); |
1da177e4 | 986 | |
0460fef2 KO |
987 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
988 | ||
21b40200 | 989 | pr_debug("added to ring %p at [%u]\n", iocb, tail); |
8d1c98b0 DL |
990 | |
991 | /* | |
992 | * Check if the user asked us to deliver the result through an | |
993 | * eventfd. The eventfd_signal() function is safe to be called | |
994 | * from IRQ context. | |
995 | */ | |
87c3a86e | 996 | if (iocb->ki_eventfd != NULL) |
8d1c98b0 DL |
997 | eventfd_signal(iocb->ki_eventfd, 1); |
998 | ||
1da177e4 | 999 | /* everything turned out well, dispose of the aiocb. */ |
57282d8f | 1000 | kiocb_free(iocb); |
1da177e4 | 1001 | |
6cb2a210 QB |
1002 | /* |
1003 | * We have to order our ring_info tail store above and test | |
1004 | * of the wait list below outside the wait lock. This is | |
1005 | * like in wake_up_bit() where clearing a bit has to be | |
1006 | * ordered with the unlocked test. | |
1007 | */ | |
1008 | smp_mb(); | |
1009 | ||
1da177e4 LT |
1010 | if (waitqueue_active(&ctx->wait)) |
1011 | wake_up(&ctx->wait); | |
1012 | ||
36f55889 | 1013 | rcu_read_unlock(); |
1da177e4 | 1014 | } |
385773e0 | 1015 | EXPORT_SYMBOL(aio_complete); |
1da177e4 | 1016 | |
a31ad380 KO |
1017 | /* aio_read_events |
1018 | * Pull an event off of the ioctx's event ring. Returns the number of | |
1019 | * events fetched | |
1da177e4 | 1020 | */ |
a31ad380 KO |
1021 | static long aio_read_events_ring(struct kioctx *ctx, |
1022 | struct io_event __user *event, long nr) | |
1da177e4 | 1023 | { |
1da177e4 | 1024 | struct aio_ring *ring; |
5ffac122 | 1025 | unsigned head, tail, pos; |
a31ad380 KO |
1026 | long ret = 0; |
1027 | int copy_ret; | |
1028 | ||
58c85dc2 | 1029 | mutex_lock(&ctx->ring_lock); |
1da177e4 | 1030 | |
58c85dc2 | 1031 | ring = kmap_atomic(ctx->ring_pages[0]); |
a31ad380 | 1032 | head = ring->head; |
5ffac122 | 1033 | tail = ring->tail; |
a31ad380 KO |
1034 | kunmap_atomic(ring); |
1035 | ||
5ffac122 | 1036 | pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); |
1da177e4 | 1037 | |
5ffac122 | 1038 | if (head == tail) |
1da177e4 LT |
1039 | goto out; |
1040 | ||
a31ad380 KO |
1041 | while (ret < nr) { |
1042 | long avail; | |
1043 | struct io_event *ev; | |
1044 | struct page *page; | |
1045 | ||
5ffac122 KO |
1046 | avail = (head <= tail ? tail : ctx->nr_events) - head; |
1047 | if (head == tail) | |
a31ad380 KO |
1048 | break; |
1049 | ||
1050 | avail = min(avail, nr - ret); | |
1051 | avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - | |
1052 | ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE)); | |
1053 | ||
1054 | pos = head + AIO_EVENTS_OFFSET; | |
58c85dc2 | 1055 | page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; |
a31ad380 KO |
1056 | pos %= AIO_EVENTS_PER_PAGE; |
1057 | ||
1058 | ev = kmap(page); | |
1059 | copy_ret = copy_to_user(event + ret, ev + pos, | |
1060 | sizeof(*ev) * avail); | |
1061 | kunmap(page); | |
1062 | ||
1063 | if (unlikely(copy_ret)) { | |
1064 | ret = -EFAULT; | |
1065 | goto out; | |
1066 | } | |
1067 | ||
1068 | ret += avail; | |
1069 | head += avail; | |
58c85dc2 | 1070 | head %= ctx->nr_events; |
1da177e4 | 1071 | } |
1da177e4 | 1072 | |
58c85dc2 | 1073 | ring = kmap_atomic(ctx->ring_pages[0]); |
a31ad380 | 1074 | ring->head = head; |
91d80a84 | 1075 | kunmap_atomic(ring); |
58c85dc2 | 1076 | flush_dcache_page(ctx->ring_pages[0]); |
a31ad380 | 1077 | |
5ffac122 | 1078 | pr_debug("%li h%u t%u\n", ret, head, tail); |
3e845ce0 | 1079 | |
e1bdd5f2 | 1080 | put_reqs_available(ctx, ret); |
a31ad380 | 1081 | out: |
58c85dc2 | 1082 | mutex_unlock(&ctx->ring_lock); |
a31ad380 | 1083 | |
1da177e4 LT |
1084 | return ret; |
1085 | } | |
1086 | ||
a31ad380 KO |
1087 | static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, |
1088 | struct io_event __user *event, long *i) | |
1da177e4 | 1089 | { |
a31ad380 | 1090 | long ret = aio_read_events_ring(ctx, event + *i, nr - *i); |
1da177e4 | 1091 | |
a31ad380 KO |
1092 | if (ret > 0) |
1093 | *i += ret; | |
1da177e4 | 1094 | |
a31ad380 KO |
1095 | if (unlikely(atomic_read(&ctx->dead))) |
1096 | ret = -EINVAL; | |
1da177e4 | 1097 | |
a31ad380 KO |
1098 | if (!*i) |
1099 | *i = ret; | |
1da177e4 | 1100 | |
a31ad380 | 1101 | return ret < 0 || *i >= min_nr; |
1da177e4 LT |
1102 | } |
1103 | ||
a31ad380 | 1104 | static long read_events(struct kioctx *ctx, long min_nr, long nr, |
1da177e4 LT |
1105 | struct io_event __user *event, |
1106 | struct timespec __user *timeout) | |
1107 | { | |
a31ad380 KO |
1108 | ktime_t until = { .tv64 = KTIME_MAX }; |
1109 | long ret = 0; | |
1da177e4 | 1110 | |
1da177e4 LT |
1111 | if (timeout) { |
1112 | struct timespec ts; | |
a31ad380 | 1113 | |
1da177e4 | 1114 | if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) |
a31ad380 | 1115 | return -EFAULT; |
1da177e4 | 1116 | |
a31ad380 | 1117 | until = timespec_to_ktime(ts); |
1da177e4 LT |
1118 | } |
1119 | ||
a31ad380 KO |
1120 | /* |
1121 | * Note that aio_read_events() is being called as the conditional - i.e. | |
1122 | * we're calling it after prepare_to_wait() has set task state to | |
1123 | * TASK_INTERRUPTIBLE. | |
1124 | * | |
1125 | * But aio_read_events() can block, and if it blocks it's going to flip | |
1126 | * the task state back to TASK_RUNNING. | |
1127 | * | |
1128 | * This should be ok, provided it doesn't flip the state back to | |
1129 | * TASK_RUNNING and return 0 too much - that causes us to spin. That | |
1130 | * will only happen if the mutex_lock() call blocks, and we then find | |
1131 | * the ringbuffer empty. So in practice we should be ok, but it's | |
1132 | * something to be aware of when touching this code. | |
1133 | */ | |
1134 | wait_event_interruptible_hrtimeout(ctx->wait, | |
1135 | aio_read_events(ctx, min_nr, nr, event, &ret), until); | |
1da177e4 | 1136 | |
a31ad380 KO |
1137 | if (!ret && signal_pending(current)) |
1138 | ret = -EINTR; | |
1da177e4 | 1139 | |
a31ad380 | 1140 | return ret; |
1da177e4 LT |
1141 | } |
1142 | ||
1da177e4 LT |
1143 | /* sys_io_setup: |
1144 | * Create an aio_context capable of receiving at least nr_events. | |
1145 | * ctxp must not point to an aio_context that already exists, and | |
1146 | * must be initialized to 0 prior to the call. On successful | |
1147 | * creation of the aio_context, *ctxp is filled in with the resulting | |
1148 | * handle. May fail with -EINVAL if *ctxp is not initialized, | |
1149 | * if the specified nr_events exceeds internal limits. May fail | |
1150 | * with -EAGAIN if the specified nr_events exceeds the user's limit | |
1151 | * of available events. May fail with -ENOMEM if insufficient kernel | |
1152 | * resources are available. May fail with -EFAULT if an invalid | |
1153 | * pointer is passed for ctxp. Will fail with -ENOSYS if not | |
1154 | * implemented. | |
1155 | */ | |
002c8976 | 1156 | SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) |
1da177e4 LT |
1157 | { |
1158 | struct kioctx *ioctx = NULL; | |
1159 | unsigned long ctx; | |
1160 | long ret; | |
1161 | ||
1162 | ret = get_user(ctx, ctxp); | |
1163 | if (unlikely(ret)) | |
1164 | goto out; | |
1165 | ||
1166 | ret = -EINVAL; | |
d55b5fda ZB |
1167 | if (unlikely(ctx || nr_events == 0)) { |
1168 | pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", | |
1169 | ctx, nr_events); | |
1da177e4 LT |
1170 | goto out; |
1171 | } | |
1172 | ||
1173 | ioctx = ioctx_alloc(nr_events); | |
1174 | ret = PTR_ERR(ioctx); | |
1175 | if (!IS_ERR(ioctx)) { | |
1176 | ret = put_user(ioctx->user_id, ctxp); | |
a2e1859a | 1177 | if (ret) |
db446a08 | 1178 | kill_ioctx(current->mm, ioctx); |
723be6e3 | 1179 | percpu_ref_put(&ioctx->users); |
1da177e4 LT |
1180 | } |
1181 | ||
1182 | out: | |
1183 | return ret; | |
1184 | } | |
1185 | ||
1186 | /* sys_io_destroy: | |
1187 | * Destroy the aio_context specified. May cancel any outstanding | |
1188 | * AIOs and block on completion. Will fail with -ENOSYS if not | |
642b5123 | 1189 | * implemented. May fail with -EINVAL if the context pointed to |
1da177e4 LT |
1190 | * is invalid. |
1191 | */ | |
002c8976 | 1192 | SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) |
1da177e4 LT |
1193 | { |
1194 | struct kioctx *ioctx = lookup_ioctx(ctx); | |
1195 | if (likely(NULL != ioctx)) { | |
db446a08 | 1196 | kill_ioctx(current->mm, ioctx); |
723be6e3 | 1197 | percpu_ref_put(&ioctx->users); |
1da177e4 LT |
1198 | return 0; |
1199 | } | |
1200 | pr_debug("EINVAL: io_destroy: invalid context id\n"); | |
1201 | return -EINVAL; | |
1202 | } | |
1203 | ||
41ef4eb8 KO |
1204 | typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *, |
1205 | unsigned long, loff_t); | |
1206 | ||
8bc92afc KO |
1207 | static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb, |
1208 | int rw, char __user *buf, | |
1209 | unsigned long *nr_segs, | |
1210 | struct iovec **iovec, | |
1211 | bool compat) | |
eed4e51f BP |
1212 | { |
1213 | ssize_t ret; | |
1214 | ||
8bc92afc | 1215 | *nr_segs = kiocb->ki_nbytes; |
41ef4eb8 | 1216 | |
9d85cba7 JM |
1217 | #ifdef CONFIG_COMPAT |
1218 | if (compat) | |
41ef4eb8 | 1219 | ret = compat_rw_copy_check_uvector(rw, |
8bc92afc KO |
1220 | (struct compat_iovec __user *)buf, |
1221 | *nr_segs, 1, *iovec, iovec); | |
9d85cba7 JM |
1222 | else |
1223 | #endif | |
41ef4eb8 | 1224 | ret = rw_copy_check_uvector(rw, |
8bc92afc KO |
1225 | (struct iovec __user *)buf, |
1226 | *nr_segs, 1, *iovec, iovec); | |
eed4e51f | 1227 | if (ret < 0) |
41ef4eb8 | 1228 | return ret; |
a70b52ec | 1229 | |
41ef4eb8 | 1230 | /* ki_nbytes now reflect bytes instead of segs */ |
eed4e51f | 1231 | kiocb->ki_nbytes = ret; |
41ef4eb8 | 1232 | return 0; |
eed4e51f BP |
1233 | } |
1234 | ||
8bc92afc KO |
1235 | static ssize_t aio_setup_single_vector(struct kiocb *kiocb, |
1236 | int rw, char __user *buf, | |
1237 | unsigned long *nr_segs, | |
1238 | struct iovec *iovec) | |
eed4e51f | 1239 | { |
8bc92afc | 1240 | if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes))) |
41ef4eb8 | 1241 | return -EFAULT; |
a70b52ec | 1242 | |
8bc92afc KO |
1243 | iovec->iov_base = buf; |
1244 | iovec->iov_len = kiocb->ki_nbytes; | |
1245 | *nr_segs = 1; | |
eed4e51f BP |
1246 | return 0; |
1247 | } | |
1248 | ||
1da177e4 LT |
1249 | /* |
1250 | * aio_setup_iocb: | |
1251 | * Performs the initial checks and aio retry method | |
1252 | * setup for the kiocb at the time of io submission. | |
1253 | */ | |
8bc92afc KO |
1254 | static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, |
1255 | char __user *buf, bool compat) | |
1da177e4 | 1256 | { |
41ef4eb8 KO |
1257 | struct file *file = req->ki_filp; |
1258 | ssize_t ret; | |
8bc92afc | 1259 | unsigned long nr_segs; |
41ef4eb8 KO |
1260 | int rw; |
1261 | fmode_t mode; | |
1262 | aio_rw_op *rw_op; | |
8bc92afc | 1263 | struct iovec inline_vec, *iovec = &inline_vec; |
1da177e4 | 1264 | |
8bc92afc | 1265 | switch (opcode) { |
1da177e4 | 1266 | case IOCB_CMD_PREAD: |
eed4e51f | 1267 | case IOCB_CMD_PREADV: |
41ef4eb8 KO |
1268 | mode = FMODE_READ; |
1269 | rw = READ; | |
1270 | rw_op = file->f_op->aio_read; | |
1271 | goto rw_common; | |
1272 | ||
1273 | case IOCB_CMD_PWRITE: | |
eed4e51f | 1274 | case IOCB_CMD_PWRITEV: |
41ef4eb8 KO |
1275 | mode = FMODE_WRITE; |
1276 | rw = WRITE; | |
1277 | rw_op = file->f_op->aio_write; | |
1278 | goto rw_common; | |
1279 | rw_common: | |
1280 | if (unlikely(!(file->f_mode & mode))) | |
1281 | return -EBADF; | |
1282 | ||
1283 | if (!rw_op) | |
1284 | return -EINVAL; | |
1285 | ||
8bc92afc KO |
1286 | ret = (opcode == IOCB_CMD_PREADV || |
1287 | opcode == IOCB_CMD_PWRITEV) | |
1288 | ? aio_setup_vectored_rw(req, rw, buf, &nr_segs, | |
1289 | &iovec, compat) | |
1290 | : aio_setup_single_vector(req, rw, buf, &nr_segs, | |
1291 | iovec); | |
eed4e51f | 1292 | if (ret) |
41ef4eb8 KO |
1293 | return ret; |
1294 | ||
1295 | ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); | |
8bc92afc KO |
1296 | if (ret < 0) { |
1297 | if (iovec != &inline_vec) | |
1298 | kfree(iovec); | |
41ef4eb8 | 1299 | return ret; |
8bc92afc | 1300 | } |
41ef4eb8 KO |
1301 | |
1302 | req->ki_nbytes = ret; | |
41ef4eb8 | 1303 | |
73a7075e KO |
1304 | /* XXX: move/kill - rw_verify_area()? */ |
1305 | /* This matches the pread()/pwrite() logic */ | |
1306 | if (req->ki_pos < 0) { | |
1307 | ret = -EINVAL; | |
1308 | break; | |
1309 | } | |
1310 | ||
1311 | if (rw == WRITE) | |
1312 | file_start_write(file); | |
1313 | ||
8bc92afc | 1314 | ret = rw_op(req, iovec, nr_segs, req->ki_pos); |
73a7075e KO |
1315 | |
1316 | if (rw == WRITE) | |
1317 | file_end_write(file); | |
1da177e4 | 1318 | break; |
41ef4eb8 | 1319 | |
1da177e4 | 1320 | case IOCB_CMD_FDSYNC: |
41ef4eb8 KO |
1321 | if (!file->f_op->aio_fsync) |
1322 | return -EINVAL; | |
1323 | ||
1324 | ret = file->f_op->aio_fsync(req, 1); | |
1da177e4 | 1325 | break; |
41ef4eb8 | 1326 | |
1da177e4 | 1327 | case IOCB_CMD_FSYNC: |
41ef4eb8 KO |
1328 | if (!file->f_op->aio_fsync) |
1329 | return -EINVAL; | |
1330 | ||
1331 | ret = file->f_op->aio_fsync(req, 0); | |
1da177e4 | 1332 | break; |
41ef4eb8 | 1333 | |
1da177e4 | 1334 | default: |
caf4167a | 1335 | pr_debug("EINVAL: no operation provided\n"); |
41ef4eb8 | 1336 | return -EINVAL; |
1da177e4 LT |
1337 | } |
1338 | ||
8bc92afc KO |
1339 | if (iovec != &inline_vec) |
1340 | kfree(iovec); | |
1341 | ||
41ef4eb8 KO |
1342 | if (ret != -EIOCBQUEUED) { |
1343 | /* | |
1344 | * There's no easy way to restart the syscall since other AIO's | |
1345 | * may be already running. Just fail this IO with EINTR. | |
1346 | */ | |
1347 | if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || | |
1348 | ret == -ERESTARTNOHAND || | |
1349 | ret == -ERESTART_RESTARTBLOCK)) | |
1350 | ret = -EINTR; | |
1351 | aio_complete(req, ret, 0); | |
1352 | } | |
1da177e4 LT |
1353 | |
1354 | return 0; | |
1355 | } | |
1356 | ||
d5470b59 | 1357 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
a1c8eae7 | 1358 | struct iocb *iocb, bool compat) |
1da177e4 LT |
1359 | { |
1360 | struct kiocb *req; | |
1da177e4 LT |
1361 | ssize_t ret; |
1362 | ||
1363 | /* enforce forwards compatibility on users */ | |
9c3060be | 1364 | if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { |
caf4167a | 1365 | pr_debug("EINVAL: reserve field set\n"); |
1da177e4 LT |
1366 | return -EINVAL; |
1367 | } | |
1368 | ||
1369 | /* prevent overflows */ | |
1370 | if (unlikely( | |
1371 | (iocb->aio_buf != (unsigned long)iocb->aio_buf) || | |
1372 | (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || | |
1373 | ((ssize_t)iocb->aio_nbytes < 0) | |
1374 | )) { | |
1375 | pr_debug("EINVAL: io_submit: overflow check\n"); | |
1376 | return -EINVAL; | |
1377 | } | |
1378 | ||
41ef4eb8 | 1379 | req = aio_get_req(ctx); |
1d98ebfc | 1380 | if (unlikely(!req)) |
1da177e4 | 1381 | return -EAGAIN; |
1d98ebfc KO |
1382 | |
1383 | req->ki_filp = fget(iocb->aio_fildes); | |
1384 | if (unlikely(!req->ki_filp)) { | |
1385 | ret = -EBADF; | |
1386 | goto out_put_req; | |
1da177e4 | 1387 | } |
1d98ebfc | 1388 | |
9c3060be DL |
1389 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { |
1390 | /* | |
1391 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an | |
1392 | * instance of the file* now. The file descriptor must be | |
1393 | * an eventfd() fd, and will be signaled for each completed | |
1394 | * event using the eventfd_signal() function. | |
1395 | */ | |
13389010 | 1396 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); |
801678c5 | 1397 | if (IS_ERR(req->ki_eventfd)) { |
9c3060be | 1398 | ret = PTR_ERR(req->ki_eventfd); |
87c3a86e | 1399 | req->ki_eventfd = NULL; |
9c3060be DL |
1400 | goto out_put_req; |
1401 | } | |
1402 | } | |
1da177e4 | 1403 | |
8a660890 | 1404 | ret = put_user(KIOCB_KEY, &user_iocb->aio_key); |
1da177e4 | 1405 | if (unlikely(ret)) { |
caf4167a | 1406 | pr_debug("EFAULT: aio_key\n"); |
1da177e4 LT |
1407 | goto out_put_req; |
1408 | } | |
1409 | ||
1410 | req->ki_obj.user = user_iocb; | |
1411 | req->ki_user_data = iocb->aio_data; | |
1412 | req->ki_pos = iocb->aio_offset; | |
73a7075e | 1413 | req->ki_nbytes = iocb->aio_nbytes; |
1da177e4 | 1414 | |
8bc92afc KO |
1415 | ret = aio_run_iocb(req, iocb->aio_lio_opcode, |
1416 | (char __user *)(unsigned long)iocb->aio_buf, | |
1417 | compat); | |
41003a7b | 1418 | if (ret) |
7137c6bd | 1419 | goto out_put_req; |
41003a7b | 1420 | |
1da177e4 | 1421 | return 0; |
1da177e4 | 1422 | out_put_req: |
e1bdd5f2 | 1423 | put_reqs_available(ctx, 1); |
57282d8f | 1424 | kiocb_free(req); |
1da177e4 LT |
1425 | return ret; |
1426 | } | |
1427 | ||
9d85cba7 JM |
1428 | long do_io_submit(aio_context_t ctx_id, long nr, |
1429 | struct iocb __user *__user *iocbpp, bool compat) | |
1da177e4 LT |
1430 | { |
1431 | struct kioctx *ctx; | |
1432 | long ret = 0; | |
080d676d | 1433 | int i = 0; |
9f5b9425 | 1434 | struct blk_plug plug; |
1da177e4 LT |
1435 | |
1436 | if (unlikely(nr < 0)) | |
1437 | return -EINVAL; | |
1438 | ||
75e1c70f JM |
1439 | if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) |
1440 | nr = LONG_MAX/sizeof(*iocbpp); | |
1441 | ||
1da177e4 LT |
1442 | if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) |
1443 | return -EFAULT; | |
1444 | ||
1445 | ctx = lookup_ioctx(ctx_id); | |
1446 | if (unlikely(!ctx)) { | |
caf4167a | 1447 | pr_debug("EINVAL: invalid context id\n"); |
1da177e4 LT |
1448 | return -EINVAL; |
1449 | } | |
1450 | ||
9f5b9425 SL |
1451 | blk_start_plug(&plug); |
1452 | ||
1da177e4 LT |
1453 | /* |
1454 | * AKPM: should this return a partial result if some of the IOs were | |
1455 | * successfully submitted? | |
1456 | */ | |
1457 | for (i=0; i<nr; i++) { | |
1458 | struct iocb __user *user_iocb; | |
1459 | struct iocb tmp; | |
1460 | ||
1461 | if (unlikely(__get_user(user_iocb, iocbpp + i))) { | |
1462 | ret = -EFAULT; | |
1463 | break; | |
1464 | } | |
1465 | ||
1466 | if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { | |
1467 | ret = -EFAULT; | |
1468 | break; | |
1469 | } | |
1470 | ||
a1c8eae7 | 1471 | ret = io_submit_one(ctx, user_iocb, &tmp, compat); |
1da177e4 LT |
1472 | if (ret) |
1473 | break; | |
1474 | } | |
9f5b9425 | 1475 | blk_finish_plug(&plug); |
1da177e4 | 1476 | |
723be6e3 | 1477 | percpu_ref_put(&ctx->users); |
1da177e4 LT |
1478 | return i ? i : ret; |
1479 | } | |
1480 | ||
9d85cba7 JM |
1481 | /* sys_io_submit: |
1482 | * Queue the nr iocbs pointed to by iocbpp for processing. Returns | |
1483 | * the number of iocbs queued. May return -EINVAL if the aio_context | |
1484 | * specified by ctx_id is invalid, if nr is < 0, if the iocb at | |
1485 | * *iocbpp[0] is not properly initialized, if the operation specified | |
1486 | * is invalid for the file descriptor in the iocb. May fail with | |
1487 | * -EFAULT if any of the data structures point to invalid data. May | |
1488 | * fail with -EBADF if the file descriptor specified in the first | |
1489 | * iocb is invalid. May fail with -EAGAIN if insufficient resources | |
1490 | * are available to queue any iocbs. Will return 0 if nr is 0. Will | |
1491 | * fail with -ENOSYS if not implemented. | |
1492 | */ | |
1493 | SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, | |
1494 | struct iocb __user * __user *, iocbpp) | |
1495 | { | |
1496 | return do_io_submit(ctx_id, nr, iocbpp, 0); | |
1497 | } | |
1498 | ||
1da177e4 LT |
1499 | /* lookup_kiocb |
1500 | * Finds a given iocb for cancellation. | |
1da177e4 | 1501 | */ |
25ee7e38 AB |
1502 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, |
1503 | u32 key) | |
1da177e4 LT |
1504 | { |
1505 | struct list_head *pos; | |
d00689af ZB |
1506 | |
1507 | assert_spin_locked(&ctx->ctx_lock); | |
1508 | ||
8a660890 KO |
1509 | if (key != KIOCB_KEY) |
1510 | return NULL; | |
1511 | ||
1da177e4 LT |
1512 | /* TODO: use a hash or array, this sucks. */ |
1513 | list_for_each(pos, &ctx->active_reqs) { | |
1514 | struct kiocb *kiocb = list_kiocb(pos); | |
8a660890 | 1515 | if (kiocb->ki_obj.user == iocb) |
1da177e4 LT |
1516 | return kiocb; |
1517 | } | |
1518 | return NULL; | |
1519 | } | |
1520 | ||
1521 | /* sys_io_cancel: | |
1522 | * Attempts to cancel an iocb previously passed to io_submit. If | |
1523 | * the operation is successfully cancelled, the resulting event is | |
1524 | * copied into the memory pointed to by result without being placed | |
1525 | * into the completion queue and 0 is returned. May fail with | |
1526 | * -EFAULT if any of the data structures pointed to are invalid. | |
1527 | * May fail with -EINVAL if aio_context specified by ctx_id is | |
1528 | * invalid. May fail with -EAGAIN if the iocb specified was not | |
1529 | * cancelled. Will fail with -ENOSYS if not implemented. | |
1530 | */ | |
002c8976 HC |
1531 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, |
1532 | struct io_event __user *, result) | |
1da177e4 | 1533 | { |
1da177e4 LT |
1534 | struct kioctx *ctx; |
1535 | struct kiocb *kiocb; | |
1536 | u32 key; | |
1537 | int ret; | |
1538 | ||
1539 | ret = get_user(key, &iocb->aio_key); | |
1540 | if (unlikely(ret)) | |
1541 | return -EFAULT; | |
1542 | ||
1543 | ctx = lookup_ioctx(ctx_id); | |
1544 | if (unlikely(!ctx)) | |
1545 | return -EINVAL; | |
1546 | ||
1547 | spin_lock_irq(&ctx->ctx_lock); | |
906b973c | 1548 | |
1da177e4 | 1549 | kiocb = lookup_kiocb(ctx, iocb, key); |
906b973c | 1550 | if (kiocb) |
bec68faa | 1551 | ret = kiocb_cancel(ctx, kiocb); |
906b973c KO |
1552 | else |
1553 | ret = -EINVAL; | |
1554 | ||
1da177e4 LT |
1555 | spin_unlock_irq(&ctx->ctx_lock); |
1556 | ||
906b973c | 1557 | if (!ret) { |
bec68faa KO |
1558 | /* |
1559 | * The result argument is no longer used - the io_event is | |
1560 | * always delivered via the ring buffer. -EINPROGRESS indicates | |
1561 | * cancellation is progress: | |
906b973c | 1562 | */ |
bec68faa | 1563 | ret = -EINPROGRESS; |
906b973c | 1564 | } |
1da177e4 | 1565 | |
723be6e3 | 1566 | percpu_ref_put(&ctx->users); |
1da177e4 LT |
1567 | |
1568 | return ret; | |
1569 | } | |
1570 | ||
1571 | /* io_getevents: | |
1572 | * Attempts to read at least min_nr events and up to nr events from | |
642b5123 ST |
1573 | * the completion queue for the aio_context specified by ctx_id. If |
1574 | * it succeeds, the number of read events is returned. May fail with | |
1575 | * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is | |
1576 | * out of range, if timeout is out of range. May fail with -EFAULT | |
1577 | * if any of the memory specified is invalid. May return 0 or | |
1578 | * < min_nr if the timeout specified by timeout has elapsed | |
1579 | * before sufficient events are available, where timeout == NULL | |
1580 | * specifies an infinite timeout. Note that the timeout pointed to by | |
6900807c | 1581 | * timeout is relative. Will fail with -ENOSYS if not implemented. |
1da177e4 | 1582 | */ |
002c8976 HC |
1583 | SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, |
1584 | long, min_nr, | |
1585 | long, nr, | |
1586 | struct io_event __user *, events, | |
1587 | struct timespec __user *, timeout) | |
1da177e4 LT |
1588 | { |
1589 | struct kioctx *ioctx = lookup_ioctx(ctx_id); | |
1590 | long ret = -EINVAL; | |
1591 | ||
1592 | if (likely(ioctx)) { | |
2e410255 | 1593 | if (likely(min_nr <= nr && min_nr >= 0)) |
1da177e4 | 1594 | ret = read_events(ioctx, min_nr, nr, events, timeout); |
723be6e3 | 1595 | percpu_ref_put(&ioctx->users); |
1da177e4 | 1596 | } |
1da177e4 LT |
1597 | return ret; |
1598 | } |