]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * An async IO implementation for Linux | |
3 | * Written by Benjamin LaHaise <bcrl@kvack.org> | |
4 | * | |
5 | * Implements an efficient asynchronous io interface. | |
6 | * | |
7 | * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. | |
8 | * | |
9 | * See ../COPYING for licensing terms. | |
10 | */ | |
11 | #define pr_fmt(fmt) "%s: " fmt, __func__ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/time.h> | |
17 | #include <linux/aio_abi.h> | |
18 | #include <linux/export.h> | |
19 | #include <linux/syscalls.h> | |
20 | #include <linux/backing-dev.h> | |
21 | #include <linux/uio.h> | |
22 | ||
23 | #include <linux/sched.h> | |
24 | #include <linux/fs.h> | |
25 | #include <linux/file.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/mman.h> | |
28 | #include <linux/mmu_context.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/timer.h> | |
31 | #include <linux/aio.h> | |
32 | #include <linux/highmem.h> | |
33 | #include <linux/workqueue.h> | |
34 | #include <linux/security.h> | |
35 | #include <linux/eventfd.h> | |
36 | #include <linux/blkdev.h> | |
37 | #include <linux/compat.h> | |
38 | ||
39 | #include <asm/kmap_types.h> | |
40 | #include <asm/uaccess.h> | |
41 | ||
42 | #define AIO_RING_MAGIC 0xa10a10a1 | |
43 | #define AIO_RING_COMPAT_FEATURES 1 | |
44 | #define AIO_RING_INCOMPAT_FEATURES 0 | |
45 | struct aio_ring { | |
46 | unsigned id; /* kernel internal index number */ | |
47 | unsigned nr; /* number of io_events */ | |
48 | unsigned head; | |
49 | unsigned tail; | |
50 | ||
51 | unsigned magic; | |
52 | unsigned compat_features; | |
53 | unsigned incompat_features; | |
54 | unsigned header_length; /* size of aio_ring */ | |
55 | ||
56 | ||
57 | struct io_event io_events[0]; | |
58 | }; /* 128 bytes + ring size */ | |
59 | ||
60 | #define AIO_RING_PAGES 8 | |
61 | struct aio_ring_info { | |
62 | unsigned long mmap_base; | |
63 | unsigned long mmap_size; | |
64 | ||
65 | struct page **ring_pages; | |
66 | struct mutex ring_lock; | |
67 | long nr_pages; | |
68 | ||
69 | unsigned nr, tail; | |
70 | ||
71 | struct page *internal_pages[AIO_RING_PAGES]; | |
72 | }; | |
73 | ||
74 | struct kioctx { | |
75 | atomic_t users; | |
76 | atomic_t dead; | |
77 | ||
78 | /* This needs improving */ | |
79 | unsigned long user_id; | |
80 | struct hlist_node list; | |
81 | ||
82 | wait_queue_head_t wait; | |
83 | ||
84 | spinlock_t ctx_lock; | |
85 | ||
86 | atomic_t reqs_active; | |
87 | struct list_head active_reqs; /* used for cancellation */ | |
88 | ||
89 | /* | |
90 | * This is what userspace passed to io_setup(), it's not used for | |
91 | * anything but counting against the global max_reqs quota. | |
92 | * | |
93 | * The real limit is ring->nr - 1, which will be larger (see | |
94 | * aio_setup_ring()) | |
95 | */ | |
96 | unsigned max_reqs; | |
97 | ||
98 | struct aio_ring_info ring_info; | |
99 | ||
100 | spinlock_t completion_lock; | |
101 | ||
102 | struct rcu_head rcu_head; | |
103 | struct work_struct rcu_work; | |
104 | }; | |
105 | ||
106 | /*------ sysctl variables----*/ | |
107 | static DEFINE_SPINLOCK(aio_nr_lock); | |
108 | unsigned long aio_nr; /* current system wide number of aio requests */ | |
109 | unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ | |
110 | /*----end sysctl variables---*/ | |
111 | ||
112 | static struct kmem_cache *kiocb_cachep; | |
113 | static struct kmem_cache *kioctx_cachep; | |
114 | ||
115 | /* aio_setup | |
116 | * Creates the slab caches used by the aio routines, panic on | |
117 | * failure as this is done early during the boot sequence. | |
118 | */ | |
119 | static int __init aio_setup(void) | |
120 | { | |
121 | kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); | |
122 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); | |
123 | ||
124 | pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); | |
125 | ||
126 | return 0; | |
127 | } | |
128 | __initcall(aio_setup); | |
129 | ||
130 | static void aio_free_ring(struct kioctx *ctx) | |
131 | { | |
132 | struct aio_ring_info *info = &ctx->ring_info; | |
133 | long i; | |
134 | ||
135 | for (i=0; i<info->nr_pages; i++) | |
136 | put_page(info->ring_pages[i]); | |
137 | ||
138 | if (info->mmap_size) { | |
139 | vm_munmap(info->mmap_base, info->mmap_size); | |
140 | } | |
141 | ||
142 | if (info->ring_pages && info->ring_pages != info->internal_pages) | |
143 | kfree(info->ring_pages); | |
144 | info->ring_pages = NULL; | |
145 | info->nr = 0; | |
146 | } | |
147 | ||
148 | static int aio_setup_ring(struct kioctx *ctx) | |
149 | { | |
150 | struct aio_ring *ring; | |
151 | struct aio_ring_info *info = &ctx->ring_info; | |
152 | unsigned nr_events = ctx->max_reqs; | |
153 | struct mm_struct *mm = current->mm; | |
154 | unsigned long size, populate; | |
155 | int nr_pages; | |
156 | ||
157 | /* Compensate for the ring buffer's head/tail overlap entry */ | |
158 | nr_events += 2; /* 1 is required, 2 for good luck */ | |
159 | ||
160 | size = sizeof(struct aio_ring); | |
161 | size += sizeof(struct io_event) * nr_events; | |
162 | nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; | |
163 | ||
164 | if (nr_pages < 0) | |
165 | return -EINVAL; | |
166 | ||
167 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); | |
168 | ||
169 | info->nr = 0; | |
170 | info->ring_pages = info->internal_pages; | |
171 | if (nr_pages > AIO_RING_PAGES) { | |
172 | info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); | |
173 | if (!info->ring_pages) | |
174 | return -ENOMEM; | |
175 | } | |
176 | ||
177 | info->mmap_size = nr_pages * PAGE_SIZE; | |
178 | pr_debug("attempting mmap of %lu bytes\n", info->mmap_size); | |
179 | down_write(&mm->mmap_sem); | |
180 | info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size, | |
181 | PROT_READ|PROT_WRITE, | |
182 | MAP_ANONYMOUS|MAP_PRIVATE, 0, | |
183 | &populate); | |
184 | if (IS_ERR((void *)info->mmap_base)) { | |
185 | up_write(&mm->mmap_sem); | |
186 | info->mmap_size = 0; | |
187 | aio_free_ring(ctx); | |
188 | return -EAGAIN; | |
189 | } | |
190 | ||
191 | pr_debug("mmap address: 0x%08lx\n", info->mmap_base); | |
192 | info->nr_pages = get_user_pages(current, mm, info->mmap_base, nr_pages, | |
193 | 1, 0, info->ring_pages, NULL); | |
194 | up_write(&mm->mmap_sem); | |
195 | ||
196 | if (unlikely(info->nr_pages != nr_pages)) { | |
197 | aio_free_ring(ctx); | |
198 | return -EAGAIN; | |
199 | } | |
200 | if (populate) | |
201 | mm_populate(info->mmap_base, populate); | |
202 | ||
203 | ctx->user_id = info->mmap_base; | |
204 | ||
205 | info->nr = nr_events; /* trusted copy */ | |
206 | ||
207 | ring = kmap_atomic(info->ring_pages[0]); | |
208 | ring->nr = nr_events; /* user copy */ | |
209 | ring->id = ctx->user_id; | |
210 | ring->head = ring->tail = 0; | |
211 | ring->magic = AIO_RING_MAGIC; | |
212 | ring->compat_features = AIO_RING_COMPAT_FEATURES; | |
213 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; | |
214 | ring->header_length = sizeof(struct aio_ring); | |
215 | kunmap_atomic(ring); | |
216 | flush_dcache_page(info->ring_pages[0]); | |
217 | ||
218 | return 0; | |
219 | } | |
220 | ||
221 | #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) | |
222 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) | |
223 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) | |
224 | ||
225 | void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) | |
226 | { | |
227 | struct kioctx *ctx = req->ki_ctx; | |
228 | unsigned long flags; | |
229 | ||
230 | spin_lock_irqsave(&ctx->ctx_lock, flags); | |
231 | ||
232 | if (!req->ki_list.next) | |
233 | list_add(&req->ki_list, &ctx->active_reqs); | |
234 | ||
235 | req->ki_cancel = cancel; | |
236 | ||
237 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | |
238 | } | |
239 | EXPORT_SYMBOL(kiocb_set_cancel_fn); | |
240 | ||
241 | static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, | |
242 | struct io_event *res) | |
243 | { | |
244 | kiocb_cancel_fn *old, *cancel; | |
245 | int ret = -EINVAL; | |
246 | ||
247 | /* | |
248 | * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it | |
249 | * actually has a cancel function, hence the cmpxchg() | |
250 | */ | |
251 | ||
252 | cancel = ACCESS_ONCE(kiocb->ki_cancel); | |
253 | do { | |
254 | if (!cancel || cancel == KIOCB_CANCELLED) | |
255 | return ret; | |
256 | ||
257 | old = cancel; | |
258 | cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); | |
259 | } while (cancel != old); | |
260 | ||
261 | atomic_inc(&kiocb->ki_users); | |
262 | spin_unlock_irq(&ctx->ctx_lock); | |
263 | ||
264 | memset(res, 0, sizeof(*res)); | |
265 | res->obj = (u64)(unsigned long)kiocb->ki_obj.user; | |
266 | res->data = kiocb->ki_user_data; | |
267 | ret = cancel(kiocb, res); | |
268 | ||
269 | spin_lock_irq(&ctx->ctx_lock); | |
270 | ||
271 | return ret; | |
272 | } | |
273 | ||
274 | static void free_ioctx_rcu(struct rcu_head *head) | |
275 | { | |
276 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); | |
277 | kmem_cache_free(kioctx_cachep, ctx); | |
278 | } | |
279 | ||
280 | /* | |
281 | * When this function runs, the kioctx has been removed from the "hash table" | |
282 | * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - | |
283 | * now it's safe to cancel any that need to be. | |
284 | */ | |
285 | static void free_ioctx(struct kioctx *ctx) | |
286 | { | |
287 | struct aio_ring_info *info = &ctx->ring_info; | |
288 | struct aio_ring *ring; | |
289 | struct io_event res; | |
290 | struct kiocb *req; | |
291 | unsigned head, avail; | |
292 | ||
293 | spin_lock_irq(&ctx->ctx_lock); | |
294 | ||
295 | while (!list_empty(&ctx->active_reqs)) { | |
296 | req = list_first_entry(&ctx->active_reqs, | |
297 | struct kiocb, ki_list); | |
298 | ||
299 | list_del_init(&req->ki_list); | |
300 | kiocb_cancel(ctx, req, &res); | |
301 | } | |
302 | ||
303 | spin_unlock_irq(&ctx->ctx_lock); | |
304 | ||
305 | ring = kmap_atomic(info->ring_pages[0]); | |
306 | head = ring->head; | |
307 | kunmap_atomic(ring); | |
308 | ||
309 | while (atomic_read(&ctx->reqs_active) > 0) { | |
310 | wait_event(ctx->wait, head != info->tail); | |
311 | ||
312 | avail = (head <= info->tail ? info->tail : info->nr) - head; | |
313 | ||
314 | atomic_sub(avail, &ctx->reqs_active); | |
315 | head += avail; | |
316 | head %= info->nr; | |
317 | } | |
318 | ||
319 | WARN_ON(atomic_read(&ctx->reqs_active) < 0); | |
320 | ||
321 | aio_free_ring(ctx); | |
322 | ||
323 | spin_lock(&aio_nr_lock); | |
324 | BUG_ON(aio_nr - ctx->max_reqs > aio_nr); | |
325 | aio_nr -= ctx->max_reqs; | |
326 | spin_unlock(&aio_nr_lock); | |
327 | ||
328 | pr_debug("freeing %p\n", ctx); | |
329 | ||
330 | /* | |
331 | * Here the call_rcu() is between the wait_event() for reqs_active to | |
332 | * hit 0, and freeing the ioctx. | |
333 | * | |
334 | * aio_complete() decrements reqs_active, but it has to touch the ioctx | |
335 | * after to issue a wakeup so we use rcu. | |
336 | */ | |
337 | call_rcu(&ctx->rcu_head, free_ioctx_rcu); | |
338 | } | |
339 | ||
340 | static void put_ioctx(struct kioctx *ctx) | |
341 | { | |
342 | if (unlikely(atomic_dec_and_test(&ctx->users))) | |
343 | free_ioctx(ctx); | |
344 | } | |
345 | ||
346 | /* ioctx_alloc | |
347 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. | |
348 | */ | |
349 | static struct kioctx *ioctx_alloc(unsigned nr_events) | |
350 | { | |
351 | struct mm_struct *mm = current->mm; | |
352 | struct kioctx *ctx; | |
353 | int err = -ENOMEM; | |
354 | ||
355 | /* Prevent overflows */ | |
356 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || | |
357 | (nr_events > (0x10000000U / sizeof(struct kiocb)))) { | |
358 | pr_debug("ENOMEM: nr_events too high\n"); | |
359 | return ERR_PTR(-EINVAL); | |
360 | } | |
361 | ||
362 | if (!nr_events || (unsigned long)nr_events > aio_max_nr) | |
363 | return ERR_PTR(-EAGAIN); | |
364 | ||
365 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); | |
366 | if (!ctx) | |
367 | return ERR_PTR(-ENOMEM); | |
368 | ||
369 | ctx->max_reqs = nr_events; | |
370 | ||
371 | atomic_set(&ctx->users, 2); | |
372 | atomic_set(&ctx->dead, 0); | |
373 | spin_lock_init(&ctx->ctx_lock); | |
374 | spin_lock_init(&ctx->completion_lock); | |
375 | mutex_init(&ctx->ring_info.ring_lock); | |
376 | init_waitqueue_head(&ctx->wait); | |
377 | ||
378 | INIT_LIST_HEAD(&ctx->active_reqs); | |
379 | ||
380 | if (aio_setup_ring(ctx) < 0) | |
381 | goto out_freectx; | |
382 | ||
383 | /* limit the number of system wide aios */ | |
384 | spin_lock(&aio_nr_lock); | |
385 | if (aio_nr + nr_events > aio_max_nr || | |
386 | aio_nr + nr_events < aio_nr) { | |
387 | spin_unlock(&aio_nr_lock); | |
388 | goto out_cleanup; | |
389 | } | |
390 | aio_nr += ctx->max_reqs; | |
391 | spin_unlock(&aio_nr_lock); | |
392 | ||
393 | /* now link into global list. */ | |
394 | spin_lock(&mm->ioctx_lock); | |
395 | hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); | |
396 | spin_unlock(&mm->ioctx_lock); | |
397 | ||
398 | pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", | |
399 | ctx, ctx->user_id, mm, ctx->ring_info.nr); | |
400 | return ctx; | |
401 | ||
402 | out_cleanup: | |
403 | err = -EAGAIN; | |
404 | aio_free_ring(ctx); | |
405 | out_freectx: | |
406 | kmem_cache_free(kioctx_cachep, ctx); | |
407 | pr_debug("error allocating ioctx %d\n", err); | |
408 | return ERR_PTR(err); | |
409 | } | |
410 | ||
411 | static void kill_ioctx_work(struct work_struct *work) | |
412 | { | |
413 | struct kioctx *ctx = container_of(work, struct kioctx, rcu_work); | |
414 | ||
415 | wake_up_all(&ctx->wait); | |
416 | put_ioctx(ctx); | |
417 | } | |
418 | ||
419 | static void kill_ioctx_rcu(struct rcu_head *head) | |
420 | { | |
421 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); | |
422 | ||
423 | INIT_WORK(&ctx->rcu_work, kill_ioctx_work); | |
424 | schedule_work(&ctx->rcu_work); | |
425 | } | |
426 | ||
427 | /* kill_ioctx | |
428 | * Cancels all outstanding aio requests on an aio context. Used | |
429 | * when the processes owning a context have all exited to encourage | |
430 | * the rapid destruction of the kioctx. | |
431 | */ | |
432 | static void kill_ioctx(struct kioctx *ctx) | |
433 | { | |
434 | if (!atomic_xchg(&ctx->dead, 1)) { | |
435 | hlist_del_rcu(&ctx->list); | |
436 | /* Between hlist_del_rcu() and dropping the initial ref */ | |
437 | synchronize_rcu(); | |
438 | ||
439 | /* | |
440 | * We can't punt to workqueue here because put_ioctx() -> | |
441 | * free_ioctx() will unmap the ringbuffer, and that has to be | |
442 | * done in the original process's context. kill_ioctx_rcu/work() | |
443 | * exist for exit_aio(), as in that path free_ioctx() won't do | |
444 | * the unmap. | |
445 | */ | |
446 | kill_ioctx_work(&ctx->rcu_work); | |
447 | } | |
448 | } | |
449 | ||
450 | /* wait_on_sync_kiocb: | |
451 | * Waits on the given sync kiocb to complete. | |
452 | */ | |
453 | ssize_t wait_on_sync_kiocb(struct kiocb *iocb) | |
454 | { | |
455 | while (atomic_read(&iocb->ki_users)) { | |
456 | set_current_state(TASK_UNINTERRUPTIBLE); | |
457 | if (!atomic_read(&iocb->ki_users)) | |
458 | break; | |
459 | io_schedule(); | |
460 | } | |
461 | __set_current_state(TASK_RUNNING); | |
462 | return iocb->ki_user_data; | |
463 | } | |
464 | EXPORT_SYMBOL(wait_on_sync_kiocb); | |
465 | ||
466 | /* | |
467 | * exit_aio: called when the last user of mm goes away. At this point, there is | |
468 | * no way for any new requests to be submited or any of the io_* syscalls to be | |
469 | * called on the context. | |
470 | * | |
471 | * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on | |
472 | * them. | |
473 | */ | |
474 | void exit_aio(struct mm_struct *mm) | |
475 | { | |
476 | struct kioctx *ctx; | |
477 | struct hlist_node *n; | |
478 | ||
479 | hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) { | |
480 | if (1 != atomic_read(&ctx->users)) | |
481 | printk(KERN_DEBUG | |
482 | "exit_aio:ioctx still alive: %d %d %d\n", | |
483 | atomic_read(&ctx->users), | |
484 | atomic_read(&ctx->dead), | |
485 | atomic_read(&ctx->reqs_active)); | |
486 | /* | |
487 | * We don't need to bother with munmap() here - | |
488 | * exit_mmap(mm) is coming and it'll unmap everything. | |
489 | * Since aio_free_ring() uses non-zero ->mmap_size | |
490 | * as indicator that it needs to unmap the area, | |
491 | * just set it to 0; aio_free_ring() is the only | |
492 | * place that uses ->mmap_size, so it's safe. | |
493 | */ | |
494 | ctx->ring_info.mmap_size = 0; | |
495 | ||
496 | if (!atomic_xchg(&ctx->dead, 1)) { | |
497 | hlist_del_rcu(&ctx->list); | |
498 | call_rcu(&ctx->rcu_head, kill_ioctx_rcu); | |
499 | } | |
500 | } | |
501 | } | |
502 | ||
503 | /* aio_get_req | |
504 | * Allocate a slot for an aio request. Increments the ki_users count | |
505 | * of the kioctx so that the kioctx stays around until all requests are | |
506 | * complete. Returns NULL if no requests are free. | |
507 | * | |
508 | * Returns with kiocb->ki_users set to 2. The io submit code path holds | |
509 | * an extra reference while submitting the i/o. | |
510 | * This prevents races between the aio code path referencing the | |
511 | * req (after submitting it) and aio_complete() freeing the req. | |
512 | */ | |
513 | static struct kiocb *__aio_get_req(struct kioctx *ctx) | |
514 | { | |
515 | struct kiocb *req = NULL; | |
516 | ||
517 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); | |
518 | if (unlikely(!req)) | |
519 | return NULL; | |
520 | ||
521 | atomic_set(&req->ki_users, 2); | |
522 | req->ki_ctx = ctx; | |
523 | ||
524 | return req; | |
525 | } | |
526 | ||
527 | /* | |
528 | * struct kiocb's are allocated in batches to reduce the number of | |
529 | * times the ctx lock is acquired and released. | |
530 | */ | |
531 | #define KIOCB_BATCH_SIZE 32L | |
532 | struct kiocb_batch { | |
533 | struct list_head head; | |
534 | long count; /* number of requests left to allocate */ | |
535 | }; | |
536 | ||
537 | static void kiocb_batch_init(struct kiocb_batch *batch, long total) | |
538 | { | |
539 | INIT_LIST_HEAD(&batch->head); | |
540 | batch->count = total; | |
541 | } | |
542 | ||
543 | static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch) | |
544 | { | |
545 | struct kiocb *req, *n; | |
546 | ||
547 | if (list_empty(&batch->head)) | |
548 | return; | |
549 | ||
550 | spin_lock_irq(&ctx->ctx_lock); | |
551 | list_for_each_entry_safe(req, n, &batch->head, ki_batch) { | |
552 | list_del(&req->ki_batch); | |
553 | kmem_cache_free(kiocb_cachep, req); | |
554 | atomic_dec(&ctx->reqs_active); | |
555 | } | |
556 | spin_unlock_irq(&ctx->ctx_lock); | |
557 | } | |
558 | ||
559 | /* | |
560 | * Allocate a batch of kiocbs. This avoids taking and dropping the | |
561 | * context lock a lot during setup. | |
562 | */ | |
563 | static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch) | |
564 | { | |
565 | unsigned short allocated, to_alloc; | |
566 | long avail; | |
567 | struct kiocb *req, *n; | |
568 | ||
569 | to_alloc = min(batch->count, KIOCB_BATCH_SIZE); | |
570 | for (allocated = 0; allocated < to_alloc; allocated++) { | |
571 | req = __aio_get_req(ctx); | |
572 | if (!req) | |
573 | /* allocation failed, go with what we've got */ | |
574 | break; | |
575 | list_add(&req->ki_batch, &batch->head); | |
576 | } | |
577 | ||
578 | if (allocated == 0) | |
579 | goto out; | |
580 | ||
581 | spin_lock_irq(&ctx->ctx_lock); | |
582 | ||
583 | avail = ctx->ring_info.nr - atomic_read(&ctx->reqs_active) - 1; | |
584 | BUG_ON(avail < 0); | |
585 | if (avail < allocated) { | |
586 | /* Trim back the number of requests. */ | |
587 | list_for_each_entry_safe(req, n, &batch->head, ki_batch) { | |
588 | list_del(&req->ki_batch); | |
589 | kmem_cache_free(kiocb_cachep, req); | |
590 | if (--allocated <= avail) | |
591 | break; | |
592 | } | |
593 | } | |
594 | ||
595 | batch->count -= allocated; | |
596 | atomic_add(allocated, &ctx->reqs_active); | |
597 | ||
598 | spin_unlock_irq(&ctx->ctx_lock); | |
599 | ||
600 | out: | |
601 | return allocated; | |
602 | } | |
603 | ||
604 | static inline struct kiocb *aio_get_req(struct kioctx *ctx, | |
605 | struct kiocb_batch *batch) | |
606 | { | |
607 | struct kiocb *req; | |
608 | ||
609 | if (list_empty(&batch->head)) | |
610 | if (kiocb_batch_refill(ctx, batch) == 0) | |
611 | return NULL; | |
612 | req = list_first_entry(&batch->head, struct kiocb, ki_batch); | |
613 | list_del(&req->ki_batch); | |
614 | return req; | |
615 | } | |
616 | ||
617 | static void kiocb_free(struct kiocb *req) | |
618 | { | |
619 | if (req->ki_filp) | |
620 | fput(req->ki_filp); | |
621 | if (req->ki_eventfd != NULL) | |
622 | eventfd_ctx_put(req->ki_eventfd); | |
623 | if (req->ki_dtor) | |
624 | req->ki_dtor(req); | |
625 | if (req->ki_iovec != &req->ki_inline_vec) | |
626 | kfree(req->ki_iovec); | |
627 | kmem_cache_free(kiocb_cachep, req); | |
628 | } | |
629 | ||
630 | void aio_put_req(struct kiocb *req) | |
631 | { | |
632 | if (atomic_dec_and_test(&req->ki_users)) | |
633 | kiocb_free(req); | |
634 | } | |
635 | EXPORT_SYMBOL(aio_put_req); | |
636 | ||
637 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) | |
638 | { | |
639 | struct mm_struct *mm = current->mm; | |
640 | struct kioctx *ctx, *ret = NULL; | |
641 | ||
642 | rcu_read_lock(); | |
643 | ||
644 | hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) { | |
645 | if (ctx->user_id == ctx_id) { | |
646 | atomic_inc(&ctx->users); | |
647 | ret = ctx; | |
648 | break; | |
649 | } | |
650 | } | |
651 | ||
652 | rcu_read_unlock(); | |
653 | return ret; | |
654 | } | |
655 | ||
656 | /* aio_complete | |
657 | * Called when the io request on the given iocb is complete. | |
658 | */ | |
659 | void aio_complete(struct kiocb *iocb, long res, long res2) | |
660 | { | |
661 | struct kioctx *ctx = iocb->ki_ctx; | |
662 | struct aio_ring_info *info; | |
663 | struct aio_ring *ring; | |
664 | struct io_event *ev_page, *event; | |
665 | unsigned long flags; | |
666 | unsigned tail, pos; | |
667 | ||
668 | /* | |
669 | * Special case handling for sync iocbs: | |
670 | * - events go directly into the iocb for fast handling | |
671 | * - the sync task with the iocb in its stack holds the single iocb | |
672 | * ref, no other paths have a way to get another ref | |
673 | * - the sync task helpfully left a reference to itself in the iocb | |
674 | */ | |
675 | if (is_sync_kiocb(iocb)) { | |
676 | BUG_ON(atomic_read(&iocb->ki_users) != 1); | |
677 | iocb->ki_user_data = res; | |
678 | atomic_set(&iocb->ki_users, 0); | |
679 | wake_up_process(iocb->ki_obj.tsk); | |
680 | return; | |
681 | } | |
682 | ||
683 | info = &ctx->ring_info; | |
684 | ||
685 | /* | |
686 | * Take rcu_read_lock() in case the kioctx is being destroyed, as we | |
687 | * need to issue a wakeup after decrementing reqs_active. | |
688 | */ | |
689 | rcu_read_lock(); | |
690 | ||
691 | if (iocb->ki_list.next) { | |
692 | unsigned long flags; | |
693 | ||
694 | spin_lock_irqsave(&ctx->ctx_lock, flags); | |
695 | list_del(&iocb->ki_list); | |
696 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | |
697 | } | |
698 | ||
699 | /* | |
700 | * cancelled requests don't get events, userland was given one | |
701 | * when the event got cancelled. | |
702 | */ | |
703 | if (unlikely(xchg(&iocb->ki_cancel, | |
704 | KIOCB_CANCELLED) == KIOCB_CANCELLED)) { | |
705 | atomic_dec(&ctx->reqs_active); | |
706 | /* Still need the wake_up in case free_ioctx is waiting */ | |
707 | goto put_rq; | |
708 | } | |
709 | ||
710 | /* | |
711 | * Add a completion event to the ring buffer. Must be done holding | |
712 | * ctx->ctx_lock to prevent other code from messing with the tail | |
713 | * pointer since we might be called from irq context. | |
714 | */ | |
715 | spin_lock_irqsave(&ctx->completion_lock, flags); | |
716 | ||
717 | tail = info->tail; | |
718 | pos = tail + AIO_EVENTS_OFFSET; | |
719 | ||
720 | if (++tail >= info->nr) | |
721 | tail = 0; | |
722 | ||
723 | ev_page = kmap_atomic(info->ring_pages[pos / AIO_EVENTS_PER_PAGE]); | |
724 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; | |
725 | ||
726 | event->obj = (u64)(unsigned long)iocb->ki_obj.user; | |
727 | event->data = iocb->ki_user_data; | |
728 | event->res = res; | |
729 | event->res2 = res2; | |
730 | ||
731 | kunmap_atomic(ev_page); | |
732 | flush_dcache_page(info->ring_pages[pos / AIO_EVENTS_PER_PAGE]); | |
733 | ||
734 | pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", | |
735 | ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, | |
736 | res, res2); | |
737 | ||
738 | /* after flagging the request as done, we | |
739 | * must never even look at it again | |
740 | */ | |
741 | smp_wmb(); /* make event visible before updating tail */ | |
742 | ||
743 | info->tail = tail; | |
744 | ||
745 | ring = kmap_atomic(info->ring_pages[0]); | |
746 | ring->tail = tail; | |
747 | kunmap_atomic(ring); | |
748 | flush_dcache_page(info->ring_pages[0]); | |
749 | ||
750 | spin_unlock_irqrestore(&ctx->completion_lock, flags); | |
751 | ||
752 | pr_debug("added to ring %p at [%u]\n", iocb, tail); | |
753 | ||
754 | /* | |
755 | * Check if the user asked us to deliver the result through an | |
756 | * eventfd. The eventfd_signal() function is safe to be called | |
757 | * from IRQ context. | |
758 | */ | |
759 | if (iocb->ki_eventfd != NULL) | |
760 | eventfd_signal(iocb->ki_eventfd, 1); | |
761 | ||
762 | put_rq: | |
763 | /* everything turned out well, dispose of the aiocb. */ | |
764 | aio_put_req(iocb); | |
765 | ||
766 | /* | |
767 | * We have to order our ring_info tail store above and test | |
768 | * of the wait list below outside the wait lock. This is | |
769 | * like in wake_up_bit() where clearing a bit has to be | |
770 | * ordered with the unlocked test. | |
771 | */ | |
772 | smp_mb(); | |
773 | ||
774 | if (waitqueue_active(&ctx->wait)) | |
775 | wake_up(&ctx->wait); | |
776 | ||
777 | rcu_read_unlock(); | |
778 | } | |
779 | EXPORT_SYMBOL(aio_complete); | |
780 | ||
781 | /* aio_read_events | |
782 | * Pull an event off of the ioctx's event ring. Returns the number of | |
783 | * events fetched | |
784 | */ | |
785 | static long aio_read_events_ring(struct kioctx *ctx, | |
786 | struct io_event __user *event, long nr) | |
787 | { | |
788 | struct aio_ring_info *info = &ctx->ring_info; | |
789 | struct aio_ring *ring; | |
790 | unsigned head, pos; | |
791 | long ret = 0; | |
792 | int copy_ret; | |
793 | ||
794 | mutex_lock(&info->ring_lock); | |
795 | ||
796 | ring = kmap_atomic(info->ring_pages[0]); | |
797 | head = ring->head; | |
798 | kunmap_atomic(ring); | |
799 | ||
800 | pr_debug("h%u t%u m%u\n", head, info->tail, info->nr); | |
801 | ||
802 | if (head == info->tail) | |
803 | goto out; | |
804 | ||
805 | while (ret < nr) { | |
806 | long avail; | |
807 | struct io_event *ev; | |
808 | struct page *page; | |
809 | ||
810 | avail = (head <= info->tail ? info->tail : info->nr) - head; | |
811 | if (head == info->tail) | |
812 | break; | |
813 | ||
814 | avail = min(avail, nr - ret); | |
815 | avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - | |
816 | ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE)); | |
817 | ||
818 | pos = head + AIO_EVENTS_OFFSET; | |
819 | page = info->ring_pages[pos / AIO_EVENTS_PER_PAGE]; | |
820 | pos %= AIO_EVENTS_PER_PAGE; | |
821 | ||
822 | ev = kmap(page); | |
823 | copy_ret = copy_to_user(event + ret, ev + pos, | |
824 | sizeof(*ev) * avail); | |
825 | kunmap(page); | |
826 | ||
827 | if (unlikely(copy_ret)) { | |
828 | ret = -EFAULT; | |
829 | goto out; | |
830 | } | |
831 | ||
832 | ret += avail; | |
833 | head += avail; | |
834 | head %= info->nr; | |
835 | } | |
836 | ||
837 | ring = kmap_atomic(info->ring_pages[0]); | |
838 | ring->head = head; | |
839 | kunmap_atomic(ring); | |
840 | flush_dcache_page(info->ring_pages[0]); | |
841 | ||
842 | pr_debug("%li h%u t%u\n", ret, head, info->tail); | |
843 | ||
844 | atomic_sub(ret, &ctx->reqs_active); | |
845 | out: | |
846 | mutex_unlock(&info->ring_lock); | |
847 | ||
848 | return ret; | |
849 | } | |
850 | ||
851 | static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, | |
852 | struct io_event __user *event, long *i) | |
853 | { | |
854 | long ret = aio_read_events_ring(ctx, event + *i, nr - *i); | |
855 | ||
856 | if (ret > 0) | |
857 | *i += ret; | |
858 | ||
859 | if (unlikely(atomic_read(&ctx->dead))) | |
860 | ret = -EINVAL; | |
861 | ||
862 | if (!*i) | |
863 | *i = ret; | |
864 | ||
865 | return ret < 0 || *i >= min_nr; | |
866 | } | |
867 | ||
868 | static long read_events(struct kioctx *ctx, long min_nr, long nr, | |
869 | struct io_event __user *event, | |
870 | struct timespec __user *timeout) | |
871 | { | |
872 | ktime_t until = { .tv64 = KTIME_MAX }; | |
873 | long ret = 0; | |
874 | ||
875 | if (timeout) { | |
876 | struct timespec ts; | |
877 | ||
878 | if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) | |
879 | return -EFAULT; | |
880 | ||
881 | until = timespec_to_ktime(ts); | |
882 | } | |
883 | ||
884 | /* | |
885 | * Note that aio_read_events() is being called as the conditional - i.e. | |
886 | * we're calling it after prepare_to_wait() has set task state to | |
887 | * TASK_INTERRUPTIBLE. | |
888 | * | |
889 | * But aio_read_events() can block, and if it blocks it's going to flip | |
890 | * the task state back to TASK_RUNNING. | |
891 | * | |
892 | * This should be ok, provided it doesn't flip the state back to | |
893 | * TASK_RUNNING and return 0 too much - that causes us to spin. That | |
894 | * will only happen if the mutex_lock() call blocks, and we then find | |
895 | * the ringbuffer empty. So in practice we should be ok, but it's | |
896 | * something to be aware of when touching this code. | |
897 | */ | |
898 | wait_event_interruptible_hrtimeout(ctx->wait, | |
899 | aio_read_events(ctx, min_nr, nr, event, &ret), until); | |
900 | ||
901 | if (!ret && signal_pending(current)) | |
902 | ret = -EINTR; | |
903 | ||
904 | return ret; | |
905 | } | |
906 | ||
907 | /* sys_io_setup: | |
908 | * Create an aio_context capable of receiving at least nr_events. | |
909 | * ctxp must not point to an aio_context that already exists, and | |
910 | * must be initialized to 0 prior to the call. On successful | |
911 | * creation of the aio_context, *ctxp is filled in with the resulting | |
912 | * handle. May fail with -EINVAL if *ctxp is not initialized, | |
913 | * if the specified nr_events exceeds internal limits. May fail | |
914 | * with -EAGAIN if the specified nr_events exceeds the user's limit | |
915 | * of available events. May fail with -ENOMEM if insufficient kernel | |
916 | * resources are available. May fail with -EFAULT if an invalid | |
917 | * pointer is passed for ctxp. Will fail with -ENOSYS if not | |
918 | * implemented. | |
919 | */ | |
920 | SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) | |
921 | { | |
922 | struct kioctx *ioctx = NULL; | |
923 | unsigned long ctx; | |
924 | long ret; | |
925 | ||
926 | ret = get_user(ctx, ctxp); | |
927 | if (unlikely(ret)) | |
928 | goto out; | |
929 | ||
930 | ret = -EINVAL; | |
931 | if (unlikely(ctx || nr_events == 0)) { | |
932 | pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", | |
933 | ctx, nr_events); | |
934 | goto out; | |
935 | } | |
936 | ||
937 | ioctx = ioctx_alloc(nr_events); | |
938 | ret = PTR_ERR(ioctx); | |
939 | if (!IS_ERR(ioctx)) { | |
940 | ret = put_user(ioctx->user_id, ctxp); | |
941 | if (ret) | |
942 | kill_ioctx(ioctx); | |
943 | put_ioctx(ioctx); | |
944 | } | |
945 | ||
946 | out: | |
947 | return ret; | |
948 | } | |
949 | ||
950 | /* sys_io_destroy: | |
951 | * Destroy the aio_context specified. May cancel any outstanding | |
952 | * AIOs and block on completion. Will fail with -ENOSYS if not | |
953 | * implemented. May fail with -EINVAL if the context pointed to | |
954 | * is invalid. | |
955 | */ | |
956 | SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) | |
957 | { | |
958 | struct kioctx *ioctx = lookup_ioctx(ctx); | |
959 | if (likely(NULL != ioctx)) { | |
960 | kill_ioctx(ioctx); | |
961 | put_ioctx(ioctx); | |
962 | return 0; | |
963 | } | |
964 | pr_debug("EINVAL: io_destroy: invalid context id\n"); | |
965 | return -EINVAL; | |
966 | } | |
967 | ||
968 | static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) | |
969 | { | |
970 | struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; | |
971 | ||
972 | BUG_ON(ret <= 0); | |
973 | ||
974 | while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { | |
975 | ssize_t this = min((ssize_t)iov->iov_len, ret); | |
976 | iov->iov_base += this; | |
977 | iov->iov_len -= this; | |
978 | iocb->ki_left -= this; | |
979 | ret -= this; | |
980 | if (iov->iov_len == 0) { | |
981 | iocb->ki_cur_seg++; | |
982 | iov++; | |
983 | } | |
984 | } | |
985 | ||
986 | /* the caller should not have done more io than what fit in | |
987 | * the remaining iovecs */ | |
988 | BUG_ON(ret > 0 && iocb->ki_left == 0); | |
989 | } | |
990 | ||
991 | static ssize_t aio_rw_vect_retry(struct kiocb *iocb) | |
992 | { | |
993 | struct file *file = iocb->ki_filp; | |
994 | struct address_space *mapping = file->f_mapping; | |
995 | struct inode *inode = mapping->host; | |
996 | ssize_t (*rw_op)(struct kiocb *, const struct iovec *, | |
997 | unsigned long, loff_t); | |
998 | ssize_t ret = 0; | |
999 | unsigned short opcode; | |
1000 | ||
1001 | if ((iocb->ki_opcode == IOCB_CMD_PREADV) || | |
1002 | (iocb->ki_opcode == IOCB_CMD_PREAD)) { | |
1003 | rw_op = file->f_op->aio_read; | |
1004 | opcode = IOCB_CMD_PREADV; | |
1005 | } else { | |
1006 | rw_op = file->f_op->aio_write; | |
1007 | opcode = IOCB_CMD_PWRITEV; | |
1008 | } | |
1009 | ||
1010 | /* This matches the pread()/pwrite() logic */ | |
1011 | if (iocb->ki_pos < 0) | |
1012 | return -EINVAL; | |
1013 | ||
1014 | if (opcode == IOCB_CMD_PWRITEV) | |
1015 | file_start_write(file); | |
1016 | do { | |
1017 | ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], | |
1018 | iocb->ki_nr_segs - iocb->ki_cur_seg, | |
1019 | iocb->ki_pos); | |
1020 | if (ret > 0) | |
1021 | aio_advance_iovec(iocb, ret); | |
1022 | ||
1023 | /* retry all partial writes. retry partial reads as long as its a | |
1024 | * regular file. */ | |
1025 | } while (ret > 0 && iocb->ki_left > 0 && | |
1026 | (opcode == IOCB_CMD_PWRITEV || | |
1027 | (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); | |
1028 | if (opcode == IOCB_CMD_PWRITEV) | |
1029 | file_end_write(file); | |
1030 | ||
1031 | /* This means we must have transferred all that we could */ | |
1032 | /* No need to retry anymore */ | |
1033 | if ((ret == 0) || (iocb->ki_left == 0)) | |
1034 | ret = iocb->ki_nbytes - iocb->ki_left; | |
1035 | ||
1036 | /* If we managed to write some out we return that, rather than | |
1037 | * the eventual error. */ | |
1038 | if (opcode == IOCB_CMD_PWRITEV | |
1039 | && ret < 0 && ret != -EIOCBQUEUED | |
1040 | && iocb->ki_nbytes - iocb->ki_left) | |
1041 | ret = iocb->ki_nbytes - iocb->ki_left; | |
1042 | ||
1043 | return ret; | |
1044 | } | |
1045 | ||
1046 | static ssize_t aio_fdsync(struct kiocb *iocb) | |
1047 | { | |
1048 | struct file *file = iocb->ki_filp; | |
1049 | ssize_t ret = -EINVAL; | |
1050 | ||
1051 | if (file->f_op->aio_fsync) | |
1052 | ret = file->f_op->aio_fsync(iocb, 1); | |
1053 | return ret; | |
1054 | } | |
1055 | ||
1056 | static ssize_t aio_fsync(struct kiocb *iocb) | |
1057 | { | |
1058 | struct file *file = iocb->ki_filp; | |
1059 | ssize_t ret = -EINVAL; | |
1060 | ||
1061 | if (file->f_op->aio_fsync) | |
1062 | ret = file->f_op->aio_fsync(iocb, 0); | |
1063 | return ret; | |
1064 | } | |
1065 | ||
1066 | static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) | |
1067 | { | |
1068 | ssize_t ret; | |
1069 | ||
1070 | #ifdef CONFIG_COMPAT | |
1071 | if (compat) | |
1072 | ret = compat_rw_copy_check_uvector(type, | |
1073 | (struct compat_iovec __user *)kiocb->ki_buf, | |
1074 | kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, | |
1075 | &kiocb->ki_iovec); | |
1076 | else | |
1077 | #endif | |
1078 | ret = rw_copy_check_uvector(type, | |
1079 | (struct iovec __user *)kiocb->ki_buf, | |
1080 | kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, | |
1081 | &kiocb->ki_iovec); | |
1082 | if (ret < 0) | |
1083 | goto out; | |
1084 | ||
1085 | ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret); | |
1086 | if (ret < 0) | |
1087 | goto out; | |
1088 | ||
1089 | kiocb->ki_nr_segs = kiocb->ki_nbytes; | |
1090 | kiocb->ki_cur_seg = 0; | |
1091 | /* ki_nbytes/left now reflect bytes instead of segs */ | |
1092 | kiocb->ki_nbytes = ret; | |
1093 | kiocb->ki_left = ret; | |
1094 | ||
1095 | ret = 0; | |
1096 | out: | |
1097 | return ret; | |
1098 | } | |
1099 | ||
1100 | static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb) | |
1101 | { | |
1102 | int bytes; | |
1103 | ||
1104 | bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left); | |
1105 | if (bytes < 0) | |
1106 | return bytes; | |
1107 | ||
1108 | kiocb->ki_iovec = &kiocb->ki_inline_vec; | |
1109 | kiocb->ki_iovec->iov_base = kiocb->ki_buf; | |
1110 | kiocb->ki_iovec->iov_len = bytes; | |
1111 | kiocb->ki_nr_segs = 1; | |
1112 | kiocb->ki_cur_seg = 0; | |
1113 | return 0; | |
1114 | } | |
1115 | ||
1116 | /* | |
1117 | * aio_setup_iocb: | |
1118 | * Performs the initial checks and aio retry method | |
1119 | * setup for the kiocb at the time of io submission. | |
1120 | */ | |
1121 | static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) | |
1122 | { | |
1123 | struct file *file = kiocb->ki_filp; | |
1124 | ssize_t ret = 0; | |
1125 | ||
1126 | switch (kiocb->ki_opcode) { | |
1127 | case IOCB_CMD_PREAD: | |
1128 | ret = -EBADF; | |
1129 | if (unlikely(!(file->f_mode & FMODE_READ))) | |
1130 | break; | |
1131 | ret = -EFAULT; | |
1132 | if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, | |
1133 | kiocb->ki_left))) | |
1134 | break; | |
1135 | ret = aio_setup_single_vector(READ, file, kiocb); | |
1136 | if (ret) | |
1137 | break; | |
1138 | ret = -EINVAL; | |
1139 | if (file->f_op->aio_read) | |
1140 | kiocb->ki_retry = aio_rw_vect_retry; | |
1141 | break; | |
1142 | case IOCB_CMD_PWRITE: | |
1143 | ret = -EBADF; | |
1144 | if (unlikely(!(file->f_mode & FMODE_WRITE))) | |
1145 | break; | |
1146 | ret = -EFAULT; | |
1147 | if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, | |
1148 | kiocb->ki_left))) | |
1149 | break; | |
1150 | ret = aio_setup_single_vector(WRITE, file, kiocb); | |
1151 | if (ret) | |
1152 | break; | |
1153 | ret = -EINVAL; | |
1154 | if (file->f_op->aio_write) | |
1155 | kiocb->ki_retry = aio_rw_vect_retry; | |
1156 | break; | |
1157 | case IOCB_CMD_PREADV: | |
1158 | ret = -EBADF; | |
1159 | if (unlikely(!(file->f_mode & FMODE_READ))) | |
1160 | break; | |
1161 | ret = aio_setup_vectored_rw(READ, kiocb, compat); | |
1162 | if (ret) | |
1163 | break; | |
1164 | ret = -EINVAL; | |
1165 | if (file->f_op->aio_read) | |
1166 | kiocb->ki_retry = aio_rw_vect_retry; | |
1167 | break; | |
1168 | case IOCB_CMD_PWRITEV: | |
1169 | ret = -EBADF; | |
1170 | if (unlikely(!(file->f_mode & FMODE_WRITE))) | |
1171 | break; | |
1172 | ret = aio_setup_vectored_rw(WRITE, kiocb, compat); | |
1173 | if (ret) | |
1174 | break; | |
1175 | ret = -EINVAL; | |
1176 | if (file->f_op->aio_write) | |
1177 | kiocb->ki_retry = aio_rw_vect_retry; | |
1178 | break; | |
1179 | case IOCB_CMD_FDSYNC: | |
1180 | ret = -EINVAL; | |
1181 | if (file->f_op->aio_fsync) | |
1182 | kiocb->ki_retry = aio_fdsync; | |
1183 | break; | |
1184 | case IOCB_CMD_FSYNC: | |
1185 | ret = -EINVAL; | |
1186 | if (file->f_op->aio_fsync) | |
1187 | kiocb->ki_retry = aio_fsync; | |
1188 | break; | |
1189 | default: | |
1190 | pr_debug("EINVAL: no operation provided\n"); | |
1191 | ret = -EINVAL; | |
1192 | } | |
1193 | ||
1194 | if (!kiocb->ki_retry) | |
1195 | return ret; | |
1196 | ||
1197 | return 0; | |
1198 | } | |
1199 | ||
1200 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |
1201 | struct iocb *iocb, struct kiocb_batch *batch, | |
1202 | bool compat) | |
1203 | { | |
1204 | struct kiocb *req; | |
1205 | ssize_t ret; | |
1206 | ||
1207 | /* enforce forwards compatibility on users */ | |
1208 | if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { | |
1209 | pr_debug("EINVAL: reserve field set\n"); | |
1210 | return -EINVAL; | |
1211 | } | |
1212 | ||
1213 | /* prevent overflows */ | |
1214 | if (unlikely( | |
1215 | (iocb->aio_buf != (unsigned long)iocb->aio_buf) || | |
1216 | (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || | |
1217 | ((ssize_t)iocb->aio_nbytes < 0) | |
1218 | )) { | |
1219 | pr_debug("EINVAL: io_submit: overflow check\n"); | |
1220 | return -EINVAL; | |
1221 | } | |
1222 | ||
1223 | req = aio_get_req(ctx, batch); /* returns with 2 references to req */ | |
1224 | if (unlikely(!req)) | |
1225 | return -EAGAIN; | |
1226 | ||
1227 | req->ki_filp = fget(iocb->aio_fildes); | |
1228 | if (unlikely(!req->ki_filp)) { | |
1229 | ret = -EBADF; | |
1230 | goto out_put_req; | |
1231 | } | |
1232 | ||
1233 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { | |
1234 | /* | |
1235 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an | |
1236 | * instance of the file* now. The file descriptor must be | |
1237 | * an eventfd() fd, and will be signaled for each completed | |
1238 | * event using the eventfd_signal() function. | |
1239 | */ | |
1240 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); | |
1241 | if (IS_ERR(req->ki_eventfd)) { | |
1242 | ret = PTR_ERR(req->ki_eventfd); | |
1243 | req->ki_eventfd = NULL; | |
1244 | goto out_put_req; | |
1245 | } | |
1246 | } | |
1247 | ||
1248 | ret = put_user(req->ki_key, &user_iocb->aio_key); | |
1249 | if (unlikely(ret)) { | |
1250 | pr_debug("EFAULT: aio_key\n"); | |
1251 | goto out_put_req; | |
1252 | } | |
1253 | ||
1254 | req->ki_obj.user = user_iocb; | |
1255 | req->ki_user_data = iocb->aio_data; | |
1256 | req->ki_pos = iocb->aio_offset; | |
1257 | ||
1258 | req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; | |
1259 | req->ki_left = req->ki_nbytes = iocb->aio_nbytes; | |
1260 | req->ki_opcode = iocb->aio_lio_opcode; | |
1261 | ||
1262 | ret = aio_setup_iocb(req, compat); | |
1263 | if (ret) | |
1264 | goto out_put_req; | |
1265 | ||
1266 | ret = req->ki_retry(req); | |
1267 | if (ret != -EIOCBQUEUED) { | |
1268 | /* | |
1269 | * There's no easy way to restart the syscall since other AIO's | |
1270 | * may be already running. Just fail this IO with EINTR. | |
1271 | */ | |
1272 | if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || | |
1273 | ret == -ERESTARTNOHAND || | |
1274 | ret == -ERESTART_RESTARTBLOCK)) | |
1275 | ret = -EINTR; | |
1276 | aio_complete(req, ret, 0); | |
1277 | } | |
1278 | ||
1279 | aio_put_req(req); /* drop extra ref to req */ | |
1280 | return 0; | |
1281 | ||
1282 | out_put_req: | |
1283 | atomic_dec(&ctx->reqs_active); | |
1284 | aio_put_req(req); /* drop extra ref to req */ | |
1285 | aio_put_req(req); /* drop i/o ref to req */ | |
1286 | return ret; | |
1287 | } | |
1288 | ||
1289 | long do_io_submit(aio_context_t ctx_id, long nr, | |
1290 | struct iocb __user *__user *iocbpp, bool compat) | |
1291 | { | |
1292 | struct kioctx *ctx; | |
1293 | long ret = 0; | |
1294 | int i = 0; | |
1295 | struct blk_plug plug; | |
1296 | struct kiocb_batch batch; | |
1297 | ||
1298 | if (unlikely(nr < 0)) | |
1299 | return -EINVAL; | |
1300 | ||
1301 | if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) | |
1302 | nr = LONG_MAX/sizeof(*iocbpp); | |
1303 | ||
1304 | if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) | |
1305 | return -EFAULT; | |
1306 | ||
1307 | ctx = lookup_ioctx(ctx_id); | |
1308 | if (unlikely(!ctx)) { | |
1309 | pr_debug("EINVAL: invalid context id\n"); | |
1310 | return -EINVAL; | |
1311 | } | |
1312 | ||
1313 | kiocb_batch_init(&batch, nr); | |
1314 | ||
1315 | blk_start_plug(&plug); | |
1316 | ||
1317 | /* | |
1318 | * AKPM: should this return a partial result if some of the IOs were | |
1319 | * successfully submitted? | |
1320 | */ | |
1321 | for (i=0; i<nr; i++) { | |
1322 | struct iocb __user *user_iocb; | |
1323 | struct iocb tmp; | |
1324 | ||
1325 | if (unlikely(__get_user(user_iocb, iocbpp + i))) { | |
1326 | ret = -EFAULT; | |
1327 | break; | |
1328 | } | |
1329 | ||
1330 | if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { | |
1331 | ret = -EFAULT; | |
1332 | break; | |
1333 | } | |
1334 | ||
1335 | ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat); | |
1336 | if (ret) | |
1337 | break; | |
1338 | } | |
1339 | blk_finish_plug(&plug); | |
1340 | ||
1341 | kiocb_batch_free(ctx, &batch); | |
1342 | put_ioctx(ctx); | |
1343 | return i ? i : ret; | |
1344 | } | |
1345 | ||
1346 | /* sys_io_submit: | |
1347 | * Queue the nr iocbs pointed to by iocbpp for processing. Returns | |
1348 | * the number of iocbs queued. May return -EINVAL if the aio_context | |
1349 | * specified by ctx_id is invalid, if nr is < 0, if the iocb at | |
1350 | * *iocbpp[0] is not properly initialized, if the operation specified | |
1351 | * is invalid for the file descriptor in the iocb. May fail with | |
1352 | * -EFAULT if any of the data structures point to invalid data. May | |
1353 | * fail with -EBADF if the file descriptor specified in the first | |
1354 | * iocb is invalid. May fail with -EAGAIN if insufficient resources | |
1355 | * are available to queue any iocbs. Will return 0 if nr is 0. Will | |
1356 | * fail with -ENOSYS if not implemented. | |
1357 | */ | |
1358 | SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, | |
1359 | struct iocb __user * __user *, iocbpp) | |
1360 | { | |
1361 | return do_io_submit(ctx_id, nr, iocbpp, 0); | |
1362 | } | |
1363 | ||
1364 | /* lookup_kiocb | |
1365 | * Finds a given iocb for cancellation. | |
1366 | */ | |
1367 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, | |
1368 | u32 key) | |
1369 | { | |
1370 | struct list_head *pos; | |
1371 | ||
1372 | assert_spin_locked(&ctx->ctx_lock); | |
1373 | ||
1374 | /* TODO: use a hash or array, this sucks. */ | |
1375 | list_for_each(pos, &ctx->active_reqs) { | |
1376 | struct kiocb *kiocb = list_kiocb(pos); | |
1377 | if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key) | |
1378 | return kiocb; | |
1379 | } | |
1380 | return NULL; | |
1381 | } | |
1382 | ||
1383 | /* sys_io_cancel: | |
1384 | * Attempts to cancel an iocb previously passed to io_submit. If | |
1385 | * the operation is successfully cancelled, the resulting event is | |
1386 | * copied into the memory pointed to by result without being placed | |
1387 | * into the completion queue and 0 is returned. May fail with | |
1388 | * -EFAULT if any of the data structures pointed to are invalid. | |
1389 | * May fail with -EINVAL if aio_context specified by ctx_id is | |
1390 | * invalid. May fail with -EAGAIN if the iocb specified was not | |
1391 | * cancelled. Will fail with -ENOSYS if not implemented. | |
1392 | */ | |
1393 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | |
1394 | struct io_event __user *, result) | |
1395 | { | |
1396 | struct io_event res; | |
1397 | struct kioctx *ctx; | |
1398 | struct kiocb *kiocb; | |
1399 | u32 key; | |
1400 | int ret; | |
1401 | ||
1402 | ret = get_user(key, &iocb->aio_key); | |
1403 | if (unlikely(ret)) | |
1404 | return -EFAULT; | |
1405 | ||
1406 | ctx = lookup_ioctx(ctx_id); | |
1407 | if (unlikely(!ctx)) | |
1408 | return -EINVAL; | |
1409 | ||
1410 | spin_lock_irq(&ctx->ctx_lock); | |
1411 | ||
1412 | kiocb = lookup_kiocb(ctx, iocb, key); | |
1413 | if (kiocb) | |
1414 | ret = kiocb_cancel(ctx, kiocb, &res); | |
1415 | else | |
1416 | ret = -EINVAL; | |
1417 | ||
1418 | spin_unlock_irq(&ctx->ctx_lock); | |
1419 | ||
1420 | if (!ret) { | |
1421 | /* Cancellation succeeded -- copy the result | |
1422 | * into the user's buffer. | |
1423 | */ | |
1424 | if (copy_to_user(result, &res, sizeof(res))) | |
1425 | ret = -EFAULT; | |
1426 | } | |
1427 | ||
1428 | put_ioctx(ctx); | |
1429 | ||
1430 | return ret; | |
1431 | } | |
1432 | ||
1433 | /* io_getevents: | |
1434 | * Attempts to read at least min_nr events and up to nr events from | |
1435 | * the completion queue for the aio_context specified by ctx_id. If | |
1436 | * it succeeds, the number of read events is returned. May fail with | |
1437 | * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is | |
1438 | * out of range, if timeout is out of range. May fail with -EFAULT | |
1439 | * if any of the memory specified is invalid. May return 0 or | |
1440 | * < min_nr if the timeout specified by timeout has elapsed | |
1441 | * before sufficient events are available, where timeout == NULL | |
1442 | * specifies an infinite timeout. Note that the timeout pointed to by | |
1443 | * timeout is relative and will be updated if not NULL and the | |
1444 | * operation blocks. Will fail with -ENOSYS if not implemented. | |
1445 | */ | |
1446 | SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, | |
1447 | long, min_nr, | |
1448 | long, nr, | |
1449 | struct io_event __user *, events, | |
1450 | struct timespec __user *, timeout) | |
1451 | { | |
1452 | struct kioctx *ioctx = lookup_ioctx(ctx_id); | |
1453 | long ret = -EINVAL; | |
1454 | ||
1455 | if (likely(ioctx)) { | |
1456 | if (likely(min_nr <= nr && min_nr >= 0)) | |
1457 | ret = read_events(ioctx, min_nr, nr, events, timeout); | |
1458 | put_ioctx(ioctx); | |
1459 | } | |
1460 | return ret; | |
1461 | } |