]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/userfaultfd.c
userfaultfd: shmem: add userfaultfd hook for shared memory faults
[mirror_ubuntu-artful-kernel.git] / fs / userfaultfd.c
CommitLineData
86039bd3
AA
1/*
2 * fs/userfaultfd.c
3 *
4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
5 * Copyright (C) 2008-2009 Red Hat, Inc.
6 * Copyright (C) 2015 Red Hat, Inc.
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2. See
9 * the COPYING file in the top-level directory.
10 *
11 * Some part derived from fs/eventfd.c (anon inode setup) and
12 * mm/ksm.c (mm hashing).
13 */
14
9cd75c3c 15#include <linux/list.h>
86039bd3
AA
16#include <linux/hashtable.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/poll.h>
20#include <linux/slab.h>
21#include <linux/seq_file.h>
22#include <linux/file.h>
23#include <linux/bug.h>
24#include <linux/anon_inodes.h>
25#include <linux/syscalls.h>
26#include <linux/userfaultfd_k.h>
27#include <linux/mempolicy.h>
28#include <linux/ioctl.h>
29#include <linux/security.h>
cab350af 30#include <linux/hugetlb.h>
86039bd3 31
3004ec9c
AA
32static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
33
86039bd3
AA
34enum userfaultfd_state {
35 UFFD_STATE_WAIT_API,
36 UFFD_STATE_RUNNING,
37};
38
3004ec9c
AA
39/*
40 * Start with fault_pending_wqh and fault_wqh so they're more likely
41 * to be in the same cacheline.
42 */
86039bd3 43struct userfaultfd_ctx {
15b726ef
AA
44 /* waitqueue head for the pending (i.e. not read) userfaults */
45 wait_queue_head_t fault_pending_wqh;
46 /* waitqueue head for the userfaults */
86039bd3
AA
47 wait_queue_head_t fault_wqh;
48 /* waitqueue head for the pseudo fd to wakeup poll/read */
49 wait_queue_head_t fd_wqh;
9cd75c3c
PE
50 /* waitqueue head for events */
51 wait_queue_head_t event_wqh;
2c5b7e1b
AA
52 /* a refile sequence protected by fault_pending_wqh lock */
53 struct seqcount refile_seq;
3004ec9c
AA
54 /* pseudo fd refcounting */
55 atomic_t refcount;
86039bd3
AA
56 /* userfaultfd syscall flags */
57 unsigned int flags;
9cd75c3c
PE
58 /* features requested from the userspace */
59 unsigned int features;
86039bd3
AA
60 /* state machine */
61 enum userfaultfd_state state;
62 /* released */
63 bool released;
64 /* mm with one ore more vmas attached to this userfaultfd_ctx */
65 struct mm_struct *mm;
66};
67
893e26e6
PE
68struct userfaultfd_fork_ctx {
69 struct userfaultfd_ctx *orig;
70 struct userfaultfd_ctx *new;
71 struct list_head list;
72};
73
86039bd3 74struct userfaultfd_wait_queue {
a9b85f94 75 struct uffd_msg msg;
86039bd3 76 wait_queue_t wq;
86039bd3 77 struct userfaultfd_ctx *ctx;
15a77c6f 78 bool waken;
86039bd3
AA
79};
80
81struct userfaultfd_wake_range {
82 unsigned long start;
83 unsigned long len;
84};
85
86static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
87 int wake_flags, void *key)
88{
89 struct userfaultfd_wake_range *range = key;
90 int ret;
91 struct userfaultfd_wait_queue *uwq;
92 unsigned long start, len;
93
94 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
95 ret = 0;
86039bd3
AA
96 /* len == 0 means wake all */
97 start = range->start;
98 len = range->len;
a9b85f94
AA
99 if (len && (start > uwq->msg.arg.pagefault.address ||
100 start + len <= uwq->msg.arg.pagefault.address))
86039bd3 101 goto out;
15a77c6f
AA
102 WRITE_ONCE(uwq->waken, true);
103 /*
104 * The implicit smp_mb__before_spinlock in try_to_wake_up()
105 * renders uwq->waken visible to other CPUs before the task is
106 * waken.
107 */
86039bd3
AA
108 ret = wake_up_state(wq->private, mode);
109 if (ret)
110 /*
111 * Wake only once, autoremove behavior.
112 *
113 * After the effect of list_del_init is visible to the
114 * other CPUs, the waitqueue may disappear from under
115 * us, see the !list_empty_careful() in
116 * handle_userfault(). try_to_wake_up() has an
117 * implicit smp_mb__before_spinlock, and the
118 * wq->private is read before calling the extern
119 * function "wake_up_state" (which in turns calls
120 * try_to_wake_up). While the spin_lock;spin_unlock;
121 * wouldn't be enough, the smp_mb__before_spinlock is
122 * enough to avoid an explicit smp_mb() here.
123 */
124 list_del_init(&wq->task_list);
125out:
126 return ret;
127}
128
129/**
130 * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
131 * context.
132 * @ctx: [in] Pointer to the userfaultfd context.
133 *
134 * Returns: In case of success, returns not zero.
135 */
136static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
137{
138 if (!atomic_inc_not_zero(&ctx->refcount))
139 BUG();
140}
141
142/**
143 * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
144 * context.
145 * @ctx: [in] Pointer to userfaultfd context.
146 *
147 * The userfaultfd context reference must have been previously acquired either
148 * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
149 */
150static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
151{
152 if (atomic_dec_and_test(&ctx->refcount)) {
153 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
154 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
155 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
156 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
9cd75c3c
PE
157 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
158 VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
86039bd3
AA
159 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
160 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
d2005e3f 161 mmdrop(ctx->mm);
3004ec9c 162 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
86039bd3
AA
163 }
164}
165
a9b85f94 166static inline void msg_init(struct uffd_msg *msg)
86039bd3 167{
a9b85f94
AA
168 BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
169 /*
170 * Must use memset to zero out the paddings or kernel data is
171 * leaked to userland.
172 */
173 memset(msg, 0, sizeof(struct uffd_msg));
174}
175
176static inline struct uffd_msg userfault_msg(unsigned long address,
177 unsigned int flags,
178 unsigned long reason)
179{
180 struct uffd_msg msg;
181 msg_init(&msg);
182 msg.event = UFFD_EVENT_PAGEFAULT;
183 msg.arg.pagefault.address = address;
86039bd3
AA
184 if (flags & FAULT_FLAG_WRITE)
185 /*
a4605a61 186 * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
a9b85f94
AA
187 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE
188 * was not set in a UFFD_EVENT_PAGEFAULT, it means it
189 * was a read fault, otherwise if set it means it's
190 * a write fault.
86039bd3 191 */
a9b85f94 192 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
86039bd3
AA
193 if (reason & VM_UFFD_WP)
194 /*
a9b85f94
AA
195 * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
196 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was
197 * not set in a UFFD_EVENT_PAGEFAULT, it means it was
198 * a missing fault, otherwise if set it means it's a
199 * write protect fault.
86039bd3 200 */
a9b85f94
AA
201 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
202 return msg;
86039bd3
AA
203}
204
369cd212
MK
205#ifdef CONFIG_HUGETLB_PAGE
206/*
207 * Same functionality as userfaultfd_must_wait below with modifications for
208 * hugepmd ranges.
209 */
210static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
211 unsigned long address,
212 unsigned long flags,
213 unsigned long reason)
214{
215 struct mm_struct *mm = ctx->mm;
216 pte_t *pte;
217 bool ret = true;
218
219 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
220
221 pte = huge_pte_offset(mm, address);
222 if (!pte)
223 goto out;
224
225 ret = false;
226
227 /*
228 * Lockless access: we're in a wait_event so it's ok if it
229 * changes under us.
230 */
231 if (huge_pte_none(*pte))
232 ret = true;
233 if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP))
234 ret = true;
235out:
236 return ret;
237}
238#else
239static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
240 unsigned long address,
241 unsigned long flags,
242 unsigned long reason)
243{
244 return false; /* should never get here */
245}
246#endif /* CONFIG_HUGETLB_PAGE */
247
8d2afd96
AA
248/*
249 * Verify the pagetables are still not ok after having reigstered into
250 * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
251 * userfault that has already been resolved, if userfaultfd_read and
252 * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
253 * threads.
254 */
255static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
256 unsigned long address,
257 unsigned long flags,
258 unsigned long reason)
259{
260 struct mm_struct *mm = ctx->mm;
261 pgd_t *pgd;
262 pud_t *pud;
263 pmd_t *pmd, _pmd;
264 pte_t *pte;
265 bool ret = true;
266
267 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
268
269 pgd = pgd_offset(mm, address);
270 if (!pgd_present(*pgd))
271 goto out;
272 pud = pud_offset(pgd, address);
273 if (!pud_present(*pud))
274 goto out;
275 pmd = pmd_offset(pud, address);
276 /*
277 * READ_ONCE must function as a barrier with narrower scope
278 * and it must be equivalent to:
279 * _pmd = *pmd; barrier();
280 *
281 * This is to deal with the instability (as in
282 * pmd_trans_unstable) of the pmd.
283 */
284 _pmd = READ_ONCE(*pmd);
285 if (!pmd_present(_pmd))
286 goto out;
287
288 ret = false;
289 if (pmd_trans_huge(_pmd))
290 goto out;
291
292 /*
293 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
294 * and use the standard pte_offset_map() instead of parsing _pmd.
295 */
296 pte = pte_offset_map(pmd, address);
297 /*
298 * Lockless access: we're in a wait_event so it's ok if it
299 * changes under us.
300 */
301 if (pte_none(*pte))
302 ret = true;
303 pte_unmap(pte);
304
305out:
306 return ret;
307}
308
86039bd3
AA
309/*
310 * The locking rules involved in returning VM_FAULT_RETRY depending on
311 * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
312 * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
313 * recommendation in __lock_page_or_retry is not an understatement.
314 *
315 * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released
316 * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
317 * not set.
318 *
319 * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
320 * set, VM_FAULT_RETRY can still be returned if and only if there are
321 * fatal_signal_pending()s, and the mmap_sem must be released before
322 * returning it.
323 */
82b0f8c3 324int handle_userfault(struct vm_fault *vmf, unsigned long reason)
86039bd3 325{
82b0f8c3 326 struct mm_struct *mm = vmf->vma->vm_mm;
86039bd3
AA
327 struct userfaultfd_ctx *ctx;
328 struct userfaultfd_wait_queue uwq;
ba85c702 329 int ret;
dfa37dc3 330 bool must_wait, return_to_userland;
15a77c6f 331 long blocking_state;
86039bd3
AA
332
333 BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
334
ba85c702 335 ret = VM_FAULT_SIGBUS;
82b0f8c3 336 ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
86039bd3 337 if (!ctx)
ba85c702 338 goto out;
86039bd3
AA
339
340 BUG_ON(ctx->mm != mm);
341
342 VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP));
343 VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP));
344
345 /*
346 * If it's already released don't get it. This avoids to loop
347 * in __get_user_pages if userfaultfd_release waits on the
348 * caller of handle_userfault to release the mmap_sem.
349 */
350 if (unlikely(ACCESS_ONCE(ctx->released)))
ba85c702 351 goto out;
86039bd3 352
39680f50
LT
353 /*
354 * We don't do userfault handling for the final child pid update.
355 */
356 if (current->flags & PF_EXITING)
357 goto out;
358
86039bd3
AA
359 /*
360 * Check that we can return VM_FAULT_RETRY.
361 *
362 * NOTE: it should become possible to return VM_FAULT_RETRY
363 * even if FAULT_FLAG_TRIED is set without leading to gup()
364 * -EBUSY failures, if the userfaultfd is to be extended for
365 * VM_UFFD_WP tracking and we intend to arm the userfault
366 * without first stopping userland access to the memory. For
367 * VM_UFFD_MISSING userfaults this is enough for now.
368 */
82b0f8c3 369 if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
86039bd3
AA
370 /*
371 * Validate the invariant that nowait must allow retry
372 * to be sure not to return SIGBUS erroneously on
373 * nowait invocations.
374 */
82b0f8c3 375 BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
86039bd3
AA
376#ifdef CONFIG_DEBUG_VM
377 if (printk_ratelimit()) {
378 printk(KERN_WARNING
82b0f8c3
JK
379 "FAULT_FLAG_ALLOW_RETRY missing %x\n",
380 vmf->flags);
86039bd3
AA
381 dump_stack();
382 }
383#endif
ba85c702 384 goto out;
86039bd3
AA
385 }
386
387 /*
388 * Handle nowait, not much to do other than tell it to retry
389 * and wait.
390 */
ba85c702 391 ret = VM_FAULT_RETRY;
82b0f8c3 392 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
ba85c702 393 goto out;
86039bd3
AA
394
395 /* take the reference before dropping the mmap_sem */
396 userfaultfd_ctx_get(ctx);
397
86039bd3
AA
398 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
399 uwq.wq.private = current;
82b0f8c3 400 uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
86039bd3 401 uwq.ctx = ctx;
15a77c6f 402 uwq.waken = false;
86039bd3 403
bae473a4 404 return_to_userland =
82b0f8c3 405 (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
dfa37dc3 406 (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
15a77c6f
AA
407 blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
408 TASK_KILLABLE;
dfa37dc3 409
15b726ef 410 spin_lock(&ctx->fault_pending_wqh.lock);
86039bd3
AA
411 /*
412 * After the __add_wait_queue the uwq is visible to userland
413 * through poll/read().
414 */
15b726ef
AA
415 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
416 /*
417 * The smp_mb() after __set_current_state prevents the reads
418 * following the spin_unlock to happen before the list_add in
419 * __add_wait_queue.
420 */
15a77c6f 421 set_current_state(blocking_state);
15b726ef 422 spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3 423
369cd212
MK
424 if (!is_vm_hugetlb_page(vmf->vma))
425 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
426 reason);
427 else
428 must_wait = userfaultfd_huge_must_wait(ctx, vmf->address,
429 vmf->flags, reason);
8d2afd96
AA
430 up_read(&mm->mmap_sem);
431
432 if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
dfa37dc3
AA
433 (return_to_userland ? !signal_pending(current) :
434 !fatal_signal_pending(current)))) {
86039bd3
AA
435 wake_up_poll(&ctx->fd_wqh, POLLIN);
436 schedule();
ba85c702 437 ret |= VM_FAULT_MAJOR;
15a77c6f
AA
438
439 /*
440 * False wakeups can orginate even from rwsem before
441 * up_read() however userfaults will wait either for a
442 * targeted wakeup on the specific uwq waitqueue from
443 * wake_userfault() or for signals or for uffd
444 * release.
445 */
446 while (!READ_ONCE(uwq.waken)) {
447 /*
448 * This needs the full smp_store_mb()
449 * guarantee as the state write must be
450 * visible to other CPUs before reading
451 * uwq.waken from other CPUs.
452 */
453 set_current_state(blocking_state);
454 if (READ_ONCE(uwq.waken) ||
455 READ_ONCE(ctx->released) ||
456 (return_to_userland ? signal_pending(current) :
457 fatal_signal_pending(current)))
458 break;
459 schedule();
460 }
ba85c702 461 }
86039bd3 462
ba85c702 463 __set_current_state(TASK_RUNNING);
15b726ef 464
dfa37dc3
AA
465 if (return_to_userland) {
466 if (signal_pending(current) &&
467 !fatal_signal_pending(current)) {
468 /*
469 * If we got a SIGSTOP or SIGCONT and this is
470 * a normal userland page fault, just let
471 * userland return so the signal will be
472 * handled and gdb debugging works. The page
473 * fault code immediately after we return from
474 * this function is going to release the
475 * mmap_sem and it's not depending on it
476 * (unlike gup would if we were not to return
477 * VM_FAULT_RETRY).
478 *
479 * If a fatal signal is pending we still take
480 * the streamlined VM_FAULT_RETRY failure path
481 * and there's no need to retake the mmap_sem
482 * in such case.
483 */
484 down_read(&mm->mmap_sem);
485 ret = 0;
486 }
487 }
488
15b726ef
AA
489 /*
490 * Here we race with the list_del; list_add in
491 * userfaultfd_ctx_read(), however because we don't ever run
492 * list_del_init() to refile across the two lists, the prev
493 * and next pointers will never point to self. list_add also
494 * would never let any of the two pointers to point to
495 * self. So list_empty_careful won't risk to see both pointers
496 * pointing to self at any time during the list refile. The
497 * only case where list_del_init() is called is the full
498 * removal in the wake function and there we don't re-list_add
499 * and it's fine not to block on the spinlock. The uwq on this
500 * kernel stack can be released after the list_del_init.
501 */
ba85c702 502 if (!list_empty_careful(&uwq.wq.task_list)) {
15b726ef
AA
503 spin_lock(&ctx->fault_pending_wqh.lock);
504 /*
505 * No need of list_del_init(), the uwq on the stack
506 * will be freed shortly anyway.
507 */
508 list_del(&uwq.wq.task_list);
509 spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3 510 }
86039bd3
AA
511
512 /*
513 * ctx may go away after this if the userfault pseudo fd is
514 * already released.
515 */
516 userfaultfd_ctx_put(ctx);
517
ba85c702
AA
518out:
519 return ret;
86039bd3
AA
520}
521
893e26e6
PE
522static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
523 struct userfaultfd_wait_queue *ewq)
9cd75c3c
PE
524{
525 int ret = 0;
526
527 ewq->ctx = ctx;
528 init_waitqueue_entry(&ewq->wq, current);
529
530 spin_lock(&ctx->event_wqh.lock);
531 /*
532 * After the __add_wait_queue the uwq is visible to userland
533 * through poll/read().
534 */
535 __add_wait_queue(&ctx->event_wqh, &ewq->wq);
536 for (;;) {
537 set_current_state(TASK_KILLABLE);
538 if (ewq->msg.event == 0)
539 break;
540 if (ACCESS_ONCE(ctx->released) ||
541 fatal_signal_pending(current)) {
542 ret = -1;
543 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
544 break;
545 }
546
547 spin_unlock(&ctx->event_wqh.lock);
548
549 wake_up_poll(&ctx->fd_wqh, POLLIN);
550 schedule();
551
552 spin_lock(&ctx->event_wqh.lock);
553 }
554 __set_current_state(TASK_RUNNING);
555 spin_unlock(&ctx->event_wqh.lock);
556
557 /*
558 * ctx may go away after this if the userfault pseudo fd is
559 * already released.
560 */
561
562 userfaultfd_ctx_put(ctx);
563 return ret;
564}
565
566static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
567 struct userfaultfd_wait_queue *ewq)
568{
569 ewq->msg.event = 0;
570 wake_up_locked(&ctx->event_wqh);
571 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
572}
573
893e26e6
PE
574int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
575{
576 struct userfaultfd_ctx *ctx = NULL, *octx;
577 struct userfaultfd_fork_ctx *fctx;
578
579 octx = vma->vm_userfaultfd_ctx.ctx;
580 if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
581 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
582 vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
583 return 0;
584 }
585
586 list_for_each_entry(fctx, fcs, list)
587 if (fctx->orig == octx) {
588 ctx = fctx->new;
589 break;
590 }
591
592 if (!ctx) {
593 fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
594 if (!fctx)
595 return -ENOMEM;
596
597 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
598 if (!ctx) {
599 kfree(fctx);
600 return -ENOMEM;
601 }
602
603 atomic_set(&ctx->refcount, 1);
604 ctx->flags = octx->flags;
605 ctx->state = UFFD_STATE_RUNNING;
606 ctx->features = octx->features;
607 ctx->released = false;
608 ctx->mm = vma->vm_mm;
d3aadc8e 609 atomic_inc(&ctx->mm->mm_count);
893e26e6
PE
610
611 userfaultfd_ctx_get(octx);
612 fctx->orig = octx;
613 fctx->new = ctx;
614 list_add_tail(&fctx->list, fcs);
615 }
616
617 vma->vm_userfaultfd_ctx.ctx = ctx;
618 return 0;
619}
620
621static int dup_fctx(struct userfaultfd_fork_ctx *fctx)
622{
623 struct userfaultfd_ctx *ctx = fctx->orig;
624 struct userfaultfd_wait_queue ewq;
625
626 msg_init(&ewq.msg);
627
628 ewq.msg.event = UFFD_EVENT_FORK;
629 ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
630
631 return userfaultfd_event_wait_completion(ctx, &ewq);
632}
633
634void dup_userfaultfd_complete(struct list_head *fcs)
635{
636 int ret = 0;
637 struct userfaultfd_fork_ctx *fctx, *n;
638
639 list_for_each_entry_safe(fctx, n, fcs, list) {
640 if (!ret)
641 ret = dup_fctx(fctx);
642 list_del(&fctx->list);
643 kfree(fctx);
644 }
645}
646
72f87654
PE
647void mremap_userfaultfd_prep(struct vm_area_struct *vma,
648 struct vm_userfaultfd_ctx *vm_ctx)
649{
650 struct userfaultfd_ctx *ctx;
651
652 ctx = vma->vm_userfaultfd_ctx.ctx;
653 if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
654 vm_ctx->ctx = ctx;
655 userfaultfd_ctx_get(ctx);
656 }
657}
658
90794bf1 659void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
72f87654
PE
660 unsigned long from, unsigned long to,
661 unsigned long len)
662{
90794bf1 663 struct userfaultfd_ctx *ctx = vm_ctx->ctx;
72f87654
PE
664 struct userfaultfd_wait_queue ewq;
665
666 if (!ctx)
667 return;
668
669 if (to & ~PAGE_MASK) {
670 userfaultfd_ctx_put(ctx);
671 return;
672 }
673
674 msg_init(&ewq.msg);
675
676 ewq.msg.event = UFFD_EVENT_REMAP;
677 ewq.msg.arg.remap.from = from;
678 ewq.msg.arg.remap.to = to;
679 ewq.msg.arg.remap.len = len;
680
681 userfaultfd_event_wait_completion(ctx, &ewq);
682}
683
05ce7724
PE
684void madvise_userfault_dontneed(struct vm_area_struct *vma,
685 struct vm_area_struct **prev,
686 unsigned long start, unsigned long end)
687{
688 struct mm_struct *mm = vma->vm_mm;
689 struct userfaultfd_ctx *ctx;
690 struct userfaultfd_wait_queue ewq;
691
692 ctx = vma->vm_userfaultfd_ctx.ctx;
693 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_MADVDONTNEED))
694 return;
695
696 userfaultfd_ctx_get(ctx);
697 up_read(&mm->mmap_sem);
698
699 *prev = NULL; /* We wait for ACK w/o the mmap semaphore */
700
701 msg_init(&ewq.msg);
702
703 ewq.msg.event = UFFD_EVENT_MADVDONTNEED;
704 ewq.msg.arg.madv_dn.start = start;
705 ewq.msg.arg.madv_dn.end = end;
706
707 userfaultfd_event_wait_completion(ctx, &ewq);
708
709 down_read(&mm->mmap_sem);
710}
711
86039bd3
AA
712static int userfaultfd_release(struct inode *inode, struct file *file)
713{
714 struct userfaultfd_ctx *ctx = file->private_data;
715 struct mm_struct *mm = ctx->mm;
716 struct vm_area_struct *vma, *prev;
717 /* len == 0 means wake all */
718 struct userfaultfd_wake_range range = { .len = 0, };
719 unsigned long new_flags;
720
721 ACCESS_ONCE(ctx->released) = true;
722
d2005e3f
ON
723 if (!mmget_not_zero(mm))
724 goto wakeup;
725
86039bd3
AA
726 /*
727 * Flush page faults out of all CPUs. NOTE: all page faults
728 * must be retried without returning VM_FAULT_SIGBUS if
729 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
730 * changes while handle_userfault released the mmap_sem. So
731 * it's critical that released is set to true (above), before
732 * taking the mmap_sem for writing.
733 */
734 down_write(&mm->mmap_sem);
735 prev = NULL;
736 for (vma = mm->mmap; vma; vma = vma->vm_next) {
737 cond_resched();
738 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
739 !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
740 if (vma->vm_userfaultfd_ctx.ctx != ctx) {
741 prev = vma;
742 continue;
743 }
744 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
745 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
746 new_flags, vma->anon_vma,
747 vma->vm_file, vma->vm_pgoff,
748 vma_policy(vma),
749 NULL_VM_UFFD_CTX);
750 if (prev)
751 vma = prev;
752 else
753 prev = vma;
754 vma->vm_flags = new_flags;
755 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
756 }
757 up_write(&mm->mmap_sem);
d2005e3f
ON
758 mmput(mm);
759wakeup:
86039bd3 760 /*
15b726ef 761 * After no new page faults can wait on this fault_*wqh, flush
86039bd3 762 * the last page faults that may have been already waiting on
15b726ef 763 * the fault_*wqh.
86039bd3 764 */
15b726ef 765 spin_lock(&ctx->fault_pending_wqh.lock);
ac5be6b4
AA
766 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
767 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
15b726ef 768 spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3
AA
769
770 wake_up_poll(&ctx->fd_wqh, POLLHUP);
771 userfaultfd_ctx_put(ctx);
772 return 0;
773}
774
15b726ef 775/* fault_pending_wqh.lock must be hold by the caller */
6dcc27fd
PE
776static inline struct userfaultfd_wait_queue *find_userfault_in(
777 wait_queue_head_t *wqh)
86039bd3
AA
778{
779 wait_queue_t *wq;
15b726ef 780 struct userfaultfd_wait_queue *uwq;
86039bd3 781
6dcc27fd 782 VM_BUG_ON(!spin_is_locked(&wqh->lock));
86039bd3 783
15b726ef 784 uwq = NULL;
6dcc27fd 785 if (!waitqueue_active(wqh))
15b726ef
AA
786 goto out;
787 /* walk in reverse to provide FIFO behavior to read userfaults */
6dcc27fd 788 wq = list_last_entry(&wqh->task_list, typeof(*wq), task_list);
15b726ef
AA
789 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
790out:
791 return uwq;
86039bd3 792}
6dcc27fd
PE
793
794static inline struct userfaultfd_wait_queue *find_userfault(
795 struct userfaultfd_ctx *ctx)
796{
797 return find_userfault_in(&ctx->fault_pending_wqh);
798}
86039bd3 799
9cd75c3c
PE
800static inline struct userfaultfd_wait_queue *find_userfault_evt(
801 struct userfaultfd_ctx *ctx)
802{
803 return find_userfault_in(&ctx->event_wqh);
804}
805
86039bd3
AA
806static unsigned int userfaultfd_poll(struct file *file, poll_table *wait)
807{
808 struct userfaultfd_ctx *ctx = file->private_data;
809 unsigned int ret;
810
811 poll_wait(file, &ctx->fd_wqh, wait);
812
813 switch (ctx->state) {
814 case UFFD_STATE_WAIT_API:
815 return POLLERR;
816 case UFFD_STATE_RUNNING:
ba85c702
AA
817 /*
818 * poll() never guarantees that read won't block.
819 * userfaults can be waken before they're read().
820 */
821 if (unlikely(!(file->f_flags & O_NONBLOCK)))
822 return POLLERR;
15b726ef
AA
823 /*
824 * lockless access to see if there are pending faults
825 * __pollwait last action is the add_wait_queue but
826 * the spin_unlock would allow the waitqueue_active to
827 * pass above the actual list_add inside
828 * add_wait_queue critical section. So use a full
829 * memory barrier to serialize the list_add write of
830 * add_wait_queue() with the waitqueue_active read
831 * below.
832 */
833 ret = 0;
834 smp_mb();
835 if (waitqueue_active(&ctx->fault_pending_wqh))
836 ret = POLLIN;
9cd75c3c
PE
837 else if (waitqueue_active(&ctx->event_wqh))
838 ret = POLLIN;
839
86039bd3
AA
840 return ret;
841 default:
8474901a
AA
842 WARN_ON_ONCE(1);
843 return POLLERR;
86039bd3
AA
844 }
845}
846
893e26e6
PE
847static const struct file_operations userfaultfd_fops;
848
849static int resolve_userfault_fork(struct userfaultfd_ctx *ctx,
850 struct userfaultfd_ctx *new,
851 struct uffd_msg *msg)
852{
853 int fd;
854 struct file *file;
855 unsigned int flags = new->flags & UFFD_SHARED_FCNTL_FLAGS;
856
857 fd = get_unused_fd_flags(flags);
858 if (fd < 0)
859 return fd;
860
861 file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, new,
862 O_RDWR | flags);
863 if (IS_ERR(file)) {
864 put_unused_fd(fd);
865 return PTR_ERR(file);
866 }
867
868 fd_install(fd, file);
869 msg->arg.reserved.reserved1 = 0;
870 msg->arg.fork.ufd = fd;
871
872 return 0;
873}
874
86039bd3 875static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
a9b85f94 876 struct uffd_msg *msg)
86039bd3
AA
877{
878 ssize_t ret;
879 DECLARE_WAITQUEUE(wait, current);
15b726ef 880 struct userfaultfd_wait_queue *uwq;
893e26e6
PE
881 /*
882 * Handling fork event requires sleeping operations, so
883 * we drop the event_wqh lock, then do these ops, then
884 * lock it back and wake up the waiter. While the lock is
885 * dropped the ewq may go away so we keep track of it
886 * carefully.
887 */
888 LIST_HEAD(fork_event);
889 struct userfaultfd_ctx *fork_nctx = NULL;
86039bd3 890
15b726ef 891 /* always take the fd_wqh lock before the fault_pending_wqh lock */
86039bd3
AA
892 spin_lock(&ctx->fd_wqh.lock);
893 __add_wait_queue(&ctx->fd_wqh, &wait);
894 for (;;) {
895 set_current_state(TASK_INTERRUPTIBLE);
15b726ef
AA
896 spin_lock(&ctx->fault_pending_wqh.lock);
897 uwq = find_userfault(ctx);
898 if (uwq) {
2c5b7e1b
AA
899 /*
900 * Use a seqcount to repeat the lockless check
901 * in wake_userfault() to avoid missing
902 * wakeups because during the refile both
903 * waitqueue could become empty if this is the
904 * only userfault.
905 */
906 write_seqcount_begin(&ctx->refile_seq);
907
86039bd3 908 /*
15b726ef
AA
909 * The fault_pending_wqh.lock prevents the uwq
910 * to disappear from under us.
911 *
912 * Refile this userfault from
913 * fault_pending_wqh to fault_wqh, it's not
914 * pending anymore after we read it.
915 *
916 * Use list_del() by hand (as
917 * userfaultfd_wake_function also uses
918 * list_del_init() by hand) to be sure nobody
919 * changes __remove_wait_queue() to use
920 * list_del_init() in turn breaking the
921 * !list_empty_careful() check in
922 * handle_userfault(). The uwq->wq.task_list
923 * must never be empty at any time during the
924 * refile, or the waitqueue could disappear
925 * from under us. The "wait_queue_head_t"
926 * parameter of __remove_wait_queue() is unused
927 * anyway.
86039bd3 928 */
15b726ef
AA
929 list_del(&uwq->wq.task_list);
930 __add_wait_queue(&ctx->fault_wqh, &uwq->wq);
931
2c5b7e1b
AA
932 write_seqcount_end(&ctx->refile_seq);
933
a9b85f94
AA
934 /* careful to always initialize msg if ret == 0 */
935 *msg = uwq->msg;
15b726ef 936 spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3
AA
937 ret = 0;
938 break;
939 }
15b726ef 940 spin_unlock(&ctx->fault_pending_wqh.lock);
9cd75c3c
PE
941
942 spin_lock(&ctx->event_wqh.lock);
943 uwq = find_userfault_evt(ctx);
944 if (uwq) {
945 *msg = uwq->msg;
946
893e26e6
PE
947 if (uwq->msg.event == UFFD_EVENT_FORK) {
948 fork_nctx = (struct userfaultfd_ctx *)
949 (unsigned long)
950 uwq->msg.arg.reserved.reserved1;
951 list_move(&uwq->wq.task_list, &fork_event);
952 spin_unlock(&ctx->event_wqh.lock);
953 ret = 0;
954 break;
955 }
956
9cd75c3c
PE
957 userfaultfd_event_complete(ctx, uwq);
958 spin_unlock(&ctx->event_wqh.lock);
959 ret = 0;
960 break;
961 }
962 spin_unlock(&ctx->event_wqh.lock);
963
86039bd3
AA
964 if (signal_pending(current)) {
965 ret = -ERESTARTSYS;
966 break;
967 }
968 if (no_wait) {
969 ret = -EAGAIN;
970 break;
971 }
972 spin_unlock(&ctx->fd_wqh.lock);
973 schedule();
974 spin_lock(&ctx->fd_wqh.lock);
975 }
976 __remove_wait_queue(&ctx->fd_wqh, &wait);
977 __set_current_state(TASK_RUNNING);
978 spin_unlock(&ctx->fd_wqh.lock);
979
893e26e6
PE
980 if (!ret && msg->event == UFFD_EVENT_FORK) {
981 ret = resolve_userfault_fork(ctx, fork_nctx, msg);
982
983 if (!ret) {
984 spin_lock(&ctx->event_wqh.lock);
985 if (!list_empty(&fork_event)) {
986 uwq = list_first_entry(&fork_event,
987 typeof(*uwq),
988 wq.task_list);
989 list_del(&uwq->wq.task_list);
990 __add_wait_queue(&ctx->event_wqh, &uwq->wq);
991 userfaultfd_event_complete(ctx, uwq);
992 }
993 spin_unlock(&ctx->event_wqh.lock);
994 }
995 }
996
86039bd3
AA
997 return ret;
998}
999
1000static ssize_t userfaultfd_read(struct file *file, char __user *buf,
1001 size_t count, loff_t *ppos)
1002{
1003 struct userfaultfd_ctx *ctx = file->private_data;
1004 ssize_t _ret, ret = 0;
a9b85f94 1005 struct uffd_msg msg;
86039bd3
AA
1006 int no_wait = file->f_flags & O_NONBLOCK;
1007
1008 if (ctx->state == UFFD_STATE_WAIT_API)
1009 return -EINVAL;
86039bd3
AA
1010
1011 for (;;) {
a9b85f94 1012 if (count < sizeof(msg))
86039bd3 1013 return ret ? ret : -EINVAL;
a9b85f94 1014 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
86039bd3
AA
1015 if (_ret < 0)
1016 return ret ? ret : _ret;
a9b85f94 1017 if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
86039bd3 1018 return ret ? ret : -EFAULT;
a9b85f94
AA
1019 ret += sizeof(msg);
1020 buf += sizeof(msg);
1021 count -= sizeof(msg);
86039bd3
AA
1022 /*
1023 * Allow to read more than one fault at time but only
1024 * block if waiting for the very first one.
1025 */
1026 no_wait = O_NONBLOCK;
1027 }
1028}
1029
1030static void __wake_userfault(struct userfaultfd_ctx *ctx,
1031 struct userfaultfd_wake_range *range)
1032{
1033 unsigned long start, end;
1034
1035 start = range->start;
1036 end = range->start + range->len;
1037
15b726ef 1038 spin_lock(&ctx->fault_pending_wqh.lock);
86039bd3 1039 /* wake all in the range and autoremove */
15b726ef 1040 if (waitqueue_active(&ctx->fault_pending_wqh))
ac5be6b4 1041 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
15b726ef
AA
1042 range);
1043 if (waitqueue_active(&ctx->fault_wqh))
ac5be6b4 1044 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
15b726ef 1045 spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3
AA
1046}
1047
1048static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
1049 struct userfaultfd_wake_range *range)
1050{
2c5b7e1b
AA
1051 unsigned seq;
1052 bool need_wakeup;
1053
86039bd3
AA
1054 /*
1055 * To be sure waitqueue_active() is not reordered by the CPU
1056 * before the pagetable update, use an explicit SMP memory
1057 * barrier here. PT lock release or up_read(mmap_sem) still
1058 * have release semantics that can allow the
1059 * waitqueue_active() to be reordered before the pte update.
1060 */
1061 smp_mb();
1062
1063 /*
1064 * Use waitqueue_active because it's very frequent to
1065 * change the address space atomically even if there are no
1066 * userfaults yet. So we take the spinlock only when we're
1067 * sure we've userfaults to wake.
1068 */
2c5b7e1b
AA
1069 do {
1070 seq = read_seqcount_begin(&ctx->refile_seq);
1071 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
1072 waitqueue_active(&ctx->fault_wqh);
1073 cond_resched();
1074 } while (read_seqcount_retry(&ctx->refile_seq, seq));
1075 if (need_wakeup)
86039bd3
AA
1076 __wake_userfault(ctx, range);
1077}
1078
1079static __always_inline int validate_range(struct mm_struct *mm,
1080 __u64 start, __u64 len)
1081{
1082 __u64 task_size = mm->task_size;
1083
1084 if (start & ~PAGE_MASK)
1085 return -EINVAL;
1086 if (len & ~PAGE_MASK)
1087 return -EINVAL;
1088 if (!len)
1089 return -EINVAL;
1090 if (start < mmap_min_addr)
1091 return -EINVAL;
1092 if (start >= task_size)
1093 return -EINVAL;
1094 if (len > task_size - start)
1095 return -EINVAL;
1096 return 0;
1097}
1098
ba6907db
MR
1099static inline bool vma_can_userfault(struct vm_area_struct *vma)
1100{
1101 return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma);
1102}
1103
86039bd3
AA
1104static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1105 unsigned long arg)
1106{
1107 struct mm_struct *mm = ctx->mm;
1108 struct vm_area_struct *vma, *prev, *cur;
1109 int ret;
1110 struct uffdio_register uffdio_register;
1111 struct uffdio_register __user *user_uffdio_register;
1112 unsigned long vm_flags, new_flags;
1113 bool found;
cab350af 1114 bool huge_pages;
86039bd3
AA
1115 unsigned long start, end, vma_end;
1116
1117 user_uffdio_register = (struct uffdio_register __user *) arg;
1118
1119 ret = -EFAULT;
1120 if (copy_from_user(&uffdio_register, user_uffdio_register,
1121 sizeof(uffdio_register)-sizeof(__u64)))
1122 goto out;
1123
1124 ret = -EINVAL;
1125 if (!uffdio_register.mode)
1126 goto out;
1127 if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING|
1128 UFFDIO_REGISTER_MODE_WP))
1129 goto out;
1130 vm_flags = 0;
1131 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
1132 vm_flags |= VM_UFFD_MISSING;
1133 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
1134 vm_flags |= VM_UFFD_WP;
1135 /*
1136 * FIXME: remove the below error constraint by
1137 * implementing the wprotect tracking mode.
1138 */
1139 ret = -EINVAL;
1140 goto out;
1141 }
1142
1143 ret = validate_range(mm, uffdio_register.range.start,
1144 uffdio_register.range.len);
1145 if (ret)
1146 goto out;
1147
1148 start = uffdio_register.range.start;
1149 end = start + uffdio_register.range.len;
1150
d2005e3f
ON
1151 ret = -ENOMEM;
1152 if (!mmget_not_zero(mm))
1153 goto out;
1154
86039bd3
AA
1155 down_write(&mm->mmap_sem);
1156 vma = find_vma_prev(mm, start, &prev);
86039bd3
AA
1157 if (!vma)
1158 goto out_unlock;
1159
1160 /* check that there's at least one vma in the range */
1161 ret = -EINVAL;
1162 if (vma->vm_start >= end)
1163 goto out_unlock;
1164
cab350af
MK
1165 /*
1166 * If the first vma contains huge pages, make sure start address
1167 * is aligned to huge page size.
1168 */
1169 if (is_vm_hugetlb_page(vma)) {
1170 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1171
1172 if (start & (vma_hpagesize - 1))
1173 goto out_unlock;
1174 }
1175
86039bd3
AA
1176 /*
1177 * Search for not compatible vmas.
1178 *
1179 * FIXME: this shall be relaxed later so that it doesn't fail
1180 * on tmpfs backed vmas (in addition to the current allowance
1181 * on anonymous vmas).
1182 */
1183 found = false;
cab350af 1184 huge_pages = false;
86039bd3
AA
1185 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
1186 cond_resched();
1187
1188 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1189 !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
1190
1191 /* check not compatible vmas */
1192 ret = -EINVAL;
ba6907db 1193 if (!vma_can_userfault(cur))
86039bd3 1194 goto out_unlock;
cab350af
MK
1195 /*
1196 * If this vma contains ending address, and huge pages
1197 * check alignment.
1198 */
1199 if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1200 end > cur->vm_start) {
1201 unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1202
1203 ret = -EINVAL;
1204
1205 if (end & (vma_hpagesize - 1))
1206 goto out_unlock;
1207 }
86039bd3
AA
1208
1209 /*
1210 * Check that this vma isn't already owned by a
1211 * different userfaultfd. We can't allow more than one
1212 * userfaultfd to own a single vma simultaneously or we
1213 * wouldn't know which one to deliver the userfaults to.
1214 */
1215 ret = -EBUSY;
1216 if (cur->vm_userfaultfd_ctx.ctx &&
1217 cur->vm_userfaultfd_ctx.ctx != ctx)
1218 goto out_unlock;
1219
cab350af
MK
1220 /*
1221 * Note vmas containing huge pages
1222 */
1223 if (is_vm_hugetlb_page(cur))
1224 huge_pages = true;
1225
86039bd3
AA
1226 found = true;
1227 }
1228 BUG_ON(!found);
1229
1230 if (vma->vm_start < start)
1231 prev = vma;
1232
1233 ret = 0;
1234 do {
1235 cond_resched();
1236
ba6907db 1237 BUG_ON(!vma_can_userfault(vma));
86039bd3
AA
1238 BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1239 vma->vm_userfaultfd_ctx.ctx != ctx);
1240
1241 /*
1242 * Nothing to do: this vma is already registered into this
1243 * userfaultfd and with the right tracking mode too.
1244 */
1245 if (vma->vm_userfaultfd_ctx.ctx == ctx &&
1246 (vma->vm_flags & vm_flags) == vm_flags)
1247 goto skip;
1248
1249 if (vma->vm_start > start)
1250 start = vma->vm_start;
1251 vma_end = min(end, vma->vm_end);
1252
1253 new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
1254 prev = vma_merge(mm, prev, start, vma_end, new_flags,
1255 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1256 vma_policy(vma),
1257 ((struct vm_userfaultfd_ctx){ ctx }));
1258 if (prev) {
1259 vma = prev;
1260 goto next;
1261 }
1262 if (vma->vm_start < start) {
1263 ret = split_vma(mm, vma, start, 1);
1264 if (ret)
1265 break;
1266 }
1267 if (vma->vm_end > end) {
1268 ret = split_vma(mm, vma, end, 0);
1269 if (ret)
1270 break;
1271 }
1272 next:
1273 /*
1274 * In the vma_merge() successful mprotect-like case 8:
1275 * the next vma was merged into the current one and
1276 * the current one has not been updated yet.
1277 */
1278 vma->vm_flags = new_flags;
1279 vma->vm_userfaultfd_ctx.ctx = ctx;
1280
1281 skip:
1282 prev = vma;
1283 start = vma->vm_end;
1284 vma = vma->vm_next;
1285 } while (vma && vma->vm_start < end);
1286out_unlock:
1287 up_write(&mm->mmap_sem);
d2005e3f 1288 mmput(mm);
86039bd3
AA
1289 if (!ret) {
1290 /*
1291 * Now that we scanned all vmas we can already tell
1292 * userland which ioctls methods are guaranteed to
1293 * succeed on this range.
1294 */
cab350af
MK
1295 if (put_user(huge_pages ? UFFD_API_RANGE_IOCTLS_HPAGE :
1296 UFFD_API_RANGE_IOCTLS,
86039bd3
AA
1297 &user_uffdio_register->ioctls))
1298 ret = -EFAULT;
1299 }
1300out:
1301 return ret;
1302}
1303
1304static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1305 unsigned long arg)
1306{
1307 struct mm_struct *mm = ctx->mm;
1308 struct vm_area_struct *vma, *prev, *cur;
1309 int ret;
1310 struct uffdio_range uffdio_unregister;
1311 unsigned long new_flags;
1312 bool found;
1313 unsigned long start, end, vma_end;
1314 const void __user *buf = (void __user *)arg;
1315
1316 ret = -EFAULT;
1317 if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
1318 goto out;
1319
1320 ret = validate_range(mm, uffdio_unregister.start,
1321 uffdio_unregister.len);
1322 if (ret)
1323 goto out;
1324
1325 start = uffdio_unregister.start;
1326 end = start + uffdio_unregister.len;
1327
d2005e3f
ON
1328 ret = -ENOMEM;
1329 if (!mmget_not_zero(mm))
1330 goto out;
1331
86039bd3
AA
1332 down_write(&mm->mmap_sem);
1333 vma = find_vma_prev(mm, start, &prev);
86039bd3
AA
1334 if (!vma)
1335 goto out_unlock;
1336
1337 /* check that there's at least one vma in the range */
1338 ret = -EINVAL;
1339 if (vma->vm_start >= end)
1340 goto out_unlock;
1341
cab350af
MK
1342 /*
1343 * If the first vma contains huge pages, make sure start address
1344 * is aligned to huge page size.
1345 */
1346 if (is_vm_hugetlb_page(vma)) {
1347 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1348
1349 if (start & (vma_hpagesize - 1))
1350 goto out_unlock;
1351 }
1352
86039bd3
AA
1353 /*
1354 * Search for not compatible vmas.
1355 *
1356 * FIXME: this shall be relaxed later so that it doesn't fail
1357 * on tmpfs backed vmas (in addition to the current allowance
1358 * on anonymous vmas).
1359 */
1360 found = false;
1361 ret = -EINVAL;
1362 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
1363 cond_resched();
1364
1365 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1366 !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
1367
1368 /*
1369 * Check not compatible vmas, not strictly required
1370 * here as not compatible vmas cannot have an
1371 * userfaultfd_ctx registered on them, but this
1372 * provides for more strict behavior to notice
1373 * unregistration errors.
1374 */
ba6907db 1375 if (!vma_can_userfault(cur))
86039bd3
AA
1376 goto out_unlock;
1377
1378 found = true;
1379 }
1380 BUG_ON(!found);
1381
1382 if (vma->vm_start < start)
1383 prev = vma;
1384
1385 ret = 0;
1386 do {
1387 cond_resched();
1388
ba6907db 1389 BUG_ON(!vma_can_userfault(vma));
86039bd3
AA
1390
1391 /*
1392 * Nothing to do: this vma is already registered into this
1393 * userfaultfd and with the right tracking mode too.
1394 */
1395 if (!vma->vm_userfaultfd_ctx.ctx)
1396 goto skip;
1397
1398 if (vma->vm_start > start)
1399 start = vma->vm_start;
1400 vma_end = min(end, vma->vm_end);
1401
09fa5296
AA
1402 if (userfaultfd_missing(vma)) {
1403 /*
1404 * Wake any concurrent pending userfault while
1405 * we unregister, so they will not hang
1406 * permanently and it avoids userland to call
1407 * UFFDIO_WAKE explicitly.
1408 */
1409 struct userfaultfd_wake_range range;
1410 range.start = start;
1411 range.len = vma_end - start;
1412 wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
1413 }
1414
86039bd3
AA
1415 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
1416 prev = vma_merge(mm, prev, start, vma_end, new_flags,
1417 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1418 vma_policy(vma),
1419 NULL_VM_UFFD_CTX);
1420 if (prev) {
1421 vma = prev;
1422 goto next;
1423 }
1424 if (vma->vm_start < start) {
1425 ret = split_vma(mm, vma, start, 1);
1426 if (ret)
1427 break;
1428 }
1429 if (vma->vm_end > end) {
1430 ret = split_vma(mm, vma, end, 0);
1431 if (ret)
1432 break;
1433 }
1434 next:
1435 /*
1436 * In the vma_merge() successful mprotect-like case 8:
1437 * the next vma was merged into the current one and
1438 * the current one has not been updated yet.
1439 */
1440 vma->vm_flags = new_flags;
1441 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1442
1443 skip:
1444 prev = vma;
1445 start = vma->vm_end;
1446 vma = vma->vm_next;
1447 } while (vma && vma->vm_start < end);
1448out_unlock:
1449 up_write(&mm->mmap_sem);
d2005e3f 1450 mmput(mm);
86039bd3
AA
1451out:
1452 return ret;
1453}
1454
1455/*
ba85c702
AA
1456 * userfaultfd_wake may be used in combination with the
1457 * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
86039bd3
AA
1458 */
1459static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1460 unsigned long arg)
1461{
1462 int ret;
1463 struct uffdio_range uffdio_wake;
1464 struct userfaultfd_wake_range range;
1465 const void __user *buf = (void __user *)arg;
1466
1467 ret = -EFAULT;
1468 if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
1469 goto out;
1470
1471 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
1472 if (ret)
1473 goto out;
1474
1475 range.start = uffdio_wake.start;
1476 range.len = uffdio_wake.len;
1477
1478 /*
1479 * len == 0 means wake all and we don't want to wake all here,
1480 * so check it again to be sure.
1481 */
1482 VM_BUG_ON(!range.len);
1483
1484 wake_userfault(ctx, &range);
1485 ret = 0;
1486
1487out:
1488 return ret;
1489}
1490
ad465cae
AA
1491static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1492 unsigned long arg)
1493{
1494 __s64 ret;
1495 struct uffdio_copy uffdio_copy;
1496 struct uffdio_copy __user *user_uffdio_copy;
1497 struct userfaultfd_wake_range range;
1498
1499 user_uffdio_copy = (struct uffdio_copy __user *) arg;
1500
1501 ret = -EFAULT;
1502 if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1503 /* don't copy "copy" last field */
1504 sizeof(uffdio_copy)-sizeof(__s64)))
1505 goto out;
1506
1507 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1508 if (ret)
1509 goto out;
1510 /*
1511 * double check for wraparound just in case. copy_from_user()
1512 * will later check uffdio_copy.src + uffdio_copy.len to fit
1513 * in the userland range.
1514 */
1515 ret = -EINVAL;
1516 if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
1517 goto out;
1518 if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE)
1519 goto out;
d2005e3f
ON
1520 if (mmget_not_zero(ctx->mm)) {
1521 ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
1522 uffdio_copy.len);
1523 mmput(ctx->mm);
1524 }
ad465cae
AA
1525 if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1526 return -EFAULT;
1527 if (ret < 0)
1528 goto out;
1529 BUG_ON(!ret);
1530 /* len == 0 would wake all */
1531 range.len = ret;
1532 if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1533 range.start = uffdio_copy.dst;
1534 wake_userfault(ctx, &range);
1535 }
1536 ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1537out:
1538 return ret;
1539}
1540
1541static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1542 unsigned long arg)
1543{
1544 __s64 ret;
1545 struct uffdio_zeropage uffdio_zeropage;
1546 struct uffdio_zeropage __user *user_uffdio_zeropage;
1547 struct userfaultfd_wake_range range;
1548
1549 user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1550
1551 ret = -EFAULT;
1552 if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1553 /* don't copy "zeropage" last field */
1554 sizeof(uffdio_zeropage)-sizeof(__s64)))
1555 goto out;
1556
1557 ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1558 uffdio_zeropage.range.len);
1559 if (ret)
1560 goto out;
1561 ret = -EINVAL;
1562 if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1563 goto out;
1564
d2005e3f
ON
1565 if (mmget_not_zero(ctx->mm)) {
1566 ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
1567 uffdio_zeropage.range.len);
1568 mmput(ctx->mm);
1569 }
ad465cae
AA
1570 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1571 return -EFAULT;
1572 if (ret < 0)
1573 goto out;
1574 /* len == 0 would wake all */
1575 BUG_ON(!ret);
1576 range.len = ret;
1577 if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1578 range.start = uffdio_zeropage.range.start;
1579 wake_userfault(ctx, &range);
1580 }
1581 ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1582out:
1583 return ret;
1584}
1585
9cd75c3c
PE
1586static inline unsigned int uffd_ctx_features(__u64 user_features)
1587{
1588 /*
1589 * For the current set of features the bits just coincide
1590 */
1591 return (unsigned int)user_features;
1592}
1593
86039bd3
AA
1594/*
1595 * userland asks for a certain API version and we return which bits
1596 * and ioctl commands are implemented in this kernel for such API
1597 * version or -EINVAL if unknown.
1598 */
1599static int userfaultfd_api(struct userfaultfd_ctx *ctx,
1600 unsigned long arg)
1601{
1602 struct uffdio_api uffdio_api;
1603 void __user *buf = (void __user *)arg;
1604 int ret;
65603144 1605 __u64 features;
86039bd3
AA
1606
1607 ret = -EINVAL;
1608 if (ctx->state != UFFD_STATE_WAIT_API)
1609 goto out;
1610 ret = -EFAULT;
a9b85f94 1611 if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
86039bd3 1612 goto out;
65603144
AA
1613 features = uffdio_api.features;
1614 if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) {
86039bd3
AA
1615 memset(&uffdio_api, 0, sizeof(uffdio_api));
1616 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1617 goto out;
1618 ret = -EINVAL;
1619 goto out;
1620 }
65603144
AA
1621 /* report all available features and ioctls to userland */
1622 uffdio_api.features = UFFD_API_FEATURES;
86039bd3
AA
1623 uffdio_api.ioctls = UFFD_API_IOCTLS;
1624 ret = -EFAULT;
1625 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1626 goto out;
1627 ctx->state = UFFD_STATE_RUNNING;
65603144
AA
1628 /* only enable the requested features for this uffd context */
1629 ctx->features = uffd_ctx_features(features);
86039bd3
AA
1630 ret = 0;
1631out:
1632 return ret;
1633}
1634
1635static long userfaultfd_ioctl(struct file *file, unsigned cmd,
1636 unsigned long arg)
1637{
1638 int ret = -EINVAL;
1639 struct userfaultfd_ctx *ctx = file->private_data;
1640
e6485a47
AA
1641 if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
1642 return -EINVAL;
1643
86039bd3
AA
1644 switch(cmd) {
1645 case UFFDIO_API:
1646 ret = userfaultfd_api(ctx, arg);
1647 break;
1648 case UFFDIO_REGISTER:
1649 ret = userfaultfd_register(ctx, arg);
1650 break;
1651 case UFFDIO_UNREGISTER:
1652 ret = userfaultfd_unregister(ctx, arg);
1653 break;
1654 case UFFDIO_WAKE:
1655 ret = userfaultfd_wake(ctx, arg);
1656 break;
ad465cae
AA
1657 case UFFDIO_COPY:
1658 ret = userfaultfd_copy(ctx, arg);
1659 break;
1660 case UFFDIO_ZEROPAGE:
1661 ret = userfaultfd_zeropage(ctx, arg);
1662 break;
86039bd3
AA
1663 }
1664 return ret;
1665}
1666
1667#ifdef CONFIG_PROC_FS
1668static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
1669{
1670 struct userfaultfd_ctx *ctx = f->private_data;
1671 wait_queue_t *wq;
1672 struct userfaultfd_wait_queue *uwq;
1673 unsigned long pending = 0, total = 0;
1674
15b726ef
AA
1675 spin_lock(&ctx->fault_pending_wqh.lock);
1676 list_for_each_entry(wq, &ctx->fault_pending_wqh.task_list, task_list) {
1677 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
1678 pending++;
1679 total++;
1680 }
86039bd3
AA
1681 list_for_each_entry(wq, &ctx->fault_wqh.task_list, task_list) {
1682 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
86039bd3
AA
1683 total++;
1684 }
15b726ef 1685 spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3
AA
1686
1687 /*
1688 * If more protocols will be added, there will be all shown
1689 * separated by a space. Like this:
1690 * protocols: aa:... bb:...
1691 */
1692 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
3f602d27 1693 pending, total, UFFD_API, UFFD_API_FEATURES,
86039bd3
AA
1694 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
1695}
1696#endif
1697
1698static const struct file_operations userfaultfd_fops = {
1699#ifdef CONFIG_PROC_FS
1700 .show_fdinfo = userfaultfd_show_fdinfo,
1701#endif
1702 .release = userfaultfd_release,
1703 .poll = userfaultfd_poll,
1704 .read = userfaultfd_read,
1705 .unlocked_ioctl = userfaultfd_ioctl,
1706 .compat_ioctl = userfaultfd_ioctl,
1707 .llseek = noop_llseek,
1708};
1709
3004ec9c
AA
1710static void init_once_userfaultfd_ctx(void *mem)
1711{
1712 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
1713
1714 init_waitqueue_head(&ctx->fault_pending_wqh);
1715 init_waitqueue_head(&ctx->fault_wqh);
9cd75c3c 1716 init_waitqueue_head(&ctx->event_wqh);
3004ec9c 1717 init_waitqueue_head(&ctx->fd_wqh);
2c5b7e1b 1718 seqcount_init(&ctx->refile_seq);
3004ec9c
AA
1719}
1720
86039bd3
AA
1721/**
1722 * userfaultfd_file_create - Creates an userfaultfd file pointer.
1723 * @flags: Flags for the userfaultfd file.
1724 *
1725 * This function creates an userfaultfd file pointer, w/out installing
1726 * it into the fd table. This is useful when the userfaultfd file is
1727 * used during the initialization of data structures that require
1728 * extra setup after the userfaultfd creation. So the userfaultfd
1729 * creation is split into the file pointer creation phase, and the
1730 * file descriptor installation phase. In this way races with
1731 * userspace closing the newly installed file descriptor can be
1732 * avoided. Returns an userfaultfd file pointer, or a proper error
1733 * pointer.
1734 */
1735static struct file *userfaultfd_file_create(int flags)
1736{
1737 struct file *file;
1738 struct userfaultfd_ctx *ctx;
1739
1740 BUG_ON(!current->mm);
1741
1742 /* Check the UFFD_* constants for consistency. */
1743 BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
1744 BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
1745
1746 file = ERR_PTR(-EINVAL);
1747 if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
1748 goto out;
1749
1750 file = ERR_PTR(-ENOMEM);
3004ec9c 1751 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
86039bd3
AA
1752 if (!ctx)
1753 goto out;
1754
1755 atomic_set(&ctx->refcount, 1);
86039bd3 1756 ctx->flags = flags;
9cd75c3c 1757 ctx->features = 0;
86039bd3
AA
1758 ctx->state = UFFD_STATE_WAIT_API;
1759 ctx->released = false;
1760 ctx->mm = current->mm;
1761 /* prevent the mm struct to be freed */
d2005e3f 1762 atomic_inc(&ctx->mm->mm_count);
86039bd3
AA
1763
1764 file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
1765 O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
c03e946f 1766 if (IS_ERR(file)) {
d2005e3f 1767 mmdrop(ctx->mm);
3004ec9c 1768 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
c03e946f 1769 }
86039bd3
AA
1770out:
1771 return file;
1772}
1773
1774SYSCALL_DEFINE1(userfaultfd, int, flags)
1775{
1776 int fd, error;
1777 struct file *file;
1778
1779 error = get_unused_fd_flags(flags & UFFD_SHARED_FCNTL_FLAGS);
1780 if (error < 0)
1781 return error;
1782 fd = error;
1783
1784 file = userfaultfd_file_create(flags);
1785 if (IS_ERR(file)) {
1786 error = PTR_ERR(file);
1787 goto err_put_unused_fd;
1788 }
1789 fd_install(fd, file);
1790
1791 return fd;
1792
1793err_put_unused_fd:
1794 put_unused_fd(fd);
1795
1796 return error;
1797}
3004ec9c
AA
1798
1799static int __init userfaultfd_init(void)
1800{
1801 userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
1802 sizeof(struct userfaultfd_ctx),
1803 0,
1804 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1805 init_once_userfaultfd_ctx);
1806 return 0;
1807}
1808__initcall(userfaultfd_init);