1 // SPDX-License-Identifier: GPL-2.0
5 * (C) Copyright 1994 Linus Torvalds
6 * (C) Copyright 2002 Christoph Hellwig
8 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
9 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
12 #include <linux/pagewalk.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/mempolicy.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/migrate.h>
26 #include <linux/perf_event.h>
27 #include <linux/pkeys.h>
28 #include <linux/ksm.h>
29 #include <linux/uaccess.h>
30 #include <linux/mm_inline.h>
31 #include <linux/pgtable.h>
32 #include <asm/cacheflush.h>
33 #include <asm/mmu_context.h>
34 #include <asm/tlbflush.h>
38 static unsigned long change_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
39 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
40 unsigned long cp_flags
)
44 unsigned long pages
= 0;
45 int target_node
= NUMA_NO_NODE
;
46 bool dirty_accountable
= cp_flags
& MM_CP_DIRTY_ACCT
;
47 bool prot_numa
= cp_flags
& MM_CP_PROT_NUMA
;
48 bool uffd_wp
= cp_flags
& MM_CP_UFFD_WP
;
49 bool uffd_wp_resolve
= cp_flags
& MM_CP_UFFD_WP_RESOLVE
;
52 * Can be called with only the mmap_lock for reading by
53 * prot_numa so we must check the pmd isn't constantly
54 * changing from under us from pmd_none to pmd_trans_huge
55 * and/or the other way around.
57 if (pmd_trans_unstable(pmd
))
61 * The pmd points to a regular pte so the pmd can't change
62 * from under us even if the mmap_lock is only hold for
65 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
67 /* Get target node for single threaded private VMAs */
68 if (prot_numa
&& !(vma
->vm_flags
& VM_SHARED
) &&
69 atomic_read(&vma
->vm_mm
->mm_users
) == 1)
70 target_node
= numa_node_id();
72 flush_tlb_batched_pending(vma
->vm_mm
);
73 arch_enter_lazy_mmu_mode();
76 if (pte_present(oldpte
)) {
78 bool preserve_write
= prot_numa
&& pte_write(oldpte
);
81 * Avoid trapping faults against the zero or KSM
82 * pages. See similar comment in change_huge_pmd.
87 /* Avoid TLB flush if possible */
88 if (pte_protnone(oldpte
))
91 page
= vm_normal_page(vma
, addr
, oldpte
);
92 if (!page
|| PageKsm(page
))
95 /* Also skip shared copy-on-write pages */
96 if (is_cow_mapping(vma
->vm_flags
) &&
97 page_mapcount(page
) != 1)
101 * While migration can move some dirty pages,
102 * it cannot move them all from MIGRATE_ASYNC
105 if (page_is_file_lru(page
) && PageDirty(page
))
109 * Don't mess with PTEs if page is already on the node
110 * a single-threaded process is running on.
112 if (target_node
== page_to_nid(page
))
116 oldpte
= ptep_modify_prot_start(vma
, addr
, pte
);
117 ptent
= pte_modify(oldpte
, newprot
);
119 ptent
= pte_mk_savedwrite(ptent
);
122 ptent
= pte_wrprotect(ptent
);
123 ptent
= pte_mkuffd_wp(ptent
);
124 } else if (uffd_wp_resolve
) {
126 * Leave the write bit to be handled
127 * by PF interrupt handler, then
128 * things like COW could be properly
131 ptent
= pte_clear_uffd_wp(ptent
);
134 /* Avoid taking write faults for known dirty pages */
135 if (dirty_accountable
&& pte_dirty(ptent
) &&
136 (pte_soft_dirty(ptent
) ||
137 !(vma
->vm_flags
& VM_SOFTDIRTY
))) {
138 ptent
= pte_mkwrite(ptent
);
140 ptep_modify_prot_commit(vma
, addr
, pte
, oldpte
, ptent
);
142 } else if (is_swap_pte(oldpte
)) {
143 swp_entry_t entry
= pte_to_swp_entry(oldpte
);
146 if (is_writable_migration_entry(entry
)) {
148 * A protection check is difficult so
149 * just be safe and disable write
151 entry
= make_readable_migration_entry(
153 newpte
= swp_entry_to_pte(entry
);
154 if (pte_swp_soft_dirty(oldpte
))
155 newpte
= pte_swp_mksoft_dirty(newpte
);
156 if (pte_swp_uffd_wp(oldpte
))
157 newpte
= pte_swp_mkuffd_wp(newpte
);
158 } else if (is_writable_device_private_entry(entry
)) {
160 * We do not preserve soft-dirtiness. See
161 * copy_one_pte() for explanation.
163 entry
= make_readable_device_private_entry(
165 newpte
= swp_entry_to_pte(entry
);
166 if (pte_swp_uffd_wp(oldpte
))
167 newpte
= pte_swp_mkuffd_wp(newpte
);
168 } else if (is_writable_device_exclusive_entry(entry
)) {
169 entry
= make_readable_device_exclusive_entry(
171 newpte
= swp_entry_to_pte(entry
);
172 if (pte_swp_soft_dirty(oldpte
))
173 newpte
= pte_swp_mksoft_dirty(newpte
);
174 if (pte_swp_uffd_wp(oldpte
))
175 newpte
= pte_swp_mkuffd_wp(newpte
);
181 newpte
= pte_swp_mkuffd_wp(newpte
);
182 else if (uffd_wp_resolve
)
183 newpte
= pte_swp_clear_uffd_wp(newpte
);
185 if (!pte_same(oldpte
, newpte
)) {
186 set_pte_at(vma
->vm_mm
, addr
, pte
, newpte
);
190 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
191 arch_leave_lazy_mmu_mode();
192 pte_unmap_unlock(pte
- 1, ptl
);
198 * Used when setting automatic NUMA hinting protection where it is
199 * critical that a numa hinting PMD is not confused with a bad PMD.
201 static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t
*pmd
)
203 pmd_t pmdval
= pmd_read_atomic(pmd
);
205 /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
206 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
210 if (pmd_none(pmdval
))
212 if (pmd_trans_huge(pmdval
))
214 if (unlikely(pmd_bad(pmdval
))) {
222 static inline unsigned long change_pmd_range(struct vm_area_struct
*vma
,
223 pud_t
*pud
, unsigned long addr
, unsigned long end
,
224 pgprot_t newprot
, unsigned long cp_flags
)
228 unsigned long pages
= 0;
229 unsigned long nr_huge_updates
= 0;
230 struct mmu_notifier_range range
;
234 pmd
= pmd_offset(pud
, addr
);
236 unsigned long this_pages
;
238 next
= pmd_addr_end(addr
, end
);
241 * Automatic NUMA balancing walks the tables with mmap_lock
242 * held for read. It's possible a parallel update to occur
243 * between pmd_trans_huge() and a pmd_none_or_clear_bad()
244 * check leading to a false positive and clearing.
245 * Hence, it's necessary to atomically read the PMD value
246 * for all the checks.
248 if (!is_swap_pmd(*pmd
) && !pmd_devmap(*pmd
) &&
249 pmd_none_or_clear_bad_unless_trans_huge(pmd
))
252 /* invoke the mmu notifier if the pmd is populated */
254 mmu_notifier_range_init(&range
,
255 MMU_NOTIFY_PROTECTION_VMA
, 0,
256 vma
, vma
->vm_mm
, addr
, end
);
257 mmu_notifier_invalidate_range_start(&range
);
260 if (is_swap_pmd(*pmd
) || pmd_trans_huge(*pmd
) || pmd_devmap(*pmd
)) {
261 if (next
- addr
!= HPAGE_PMD_SIZE
) {
262 __split_huge_pmd(vma
, pmd
, addr
, false, NULL
);
264 int nr_ptes
= change_huge_pmd(vma
, pmd
, addr
,
268 if (nr_ptes
== HPAGE_PMD_NR
) {
269 pages
+= HPAGE_PMD_NR
;
273 /* huge pmd was handled */
277 /* fall through, the trans huge pmd just split */
279 this_pages
= change_pte_range(vma
, pmd
, addr
, next
, newprot
,
284 } while (pmd
++, addr
= next
, addr
!= end
);
287 mmu_notifier_invalidate_range_end(&range
);
290 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES
, nr_huge_updates
);
294 static inline unsigned long change_pud_range(struct vm_area_struct
*vma
,
295 p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
296 pgprot_t newprot
, unsigned long cp_flags
)
300 unsigned long pages
= 0;
302 pud
= pud_offset(p4d
, addr
);
304 next
= pud_addr_end(addr
, end
);
305 if (pud_none_or_clear_bad(pud
))
307 pages
+= change_pmd_range(vma
, pud
, addr
, next
, newprot
,
309 } while (pud
++, addr
= next
, addr
!= end
);
314 static inline unsigned long change_p4d_range(struct vm_area_struct
*vma
,
315 pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
316 pgprot_t newprot
, unsigned long cp_flags
)
320 unsigned long pages
= 0;
322 p4d
= p4d_offset(pgd
, addr
);
324 next
= p4d_addr_end(addr
, end
);
325 if (p4d_none_or_clear_bad(p4d
))
327 pages
+= change_pud_range(vma
, p4d
, addr
, next
, newprot
,
329 } while (p4d
++, addr
= next
, addr
!= end
);
334 static unsigned long change_protection_range(struct vm_area_struct
*vma
,
335 unsigned long addr
, unsigned long end
, pgprot_t newprot
,
336 unsigned long cp_flags
)
338 struct mm_struct
*mm
= vma
->vm_mm
;
341 unsigned long start
= addr
;
342 unsigned long pages
= 0;
345 pgd
= pgd_offset(mm
, addr
);
346 flush_cache_range(vma
, addr
, end
);
347 inc_tlb_flush_pending(mm
);
349 next
= pgd_addr_end(addr
, end
);
350 if (pgd_none_or_clear_bad(pgd
))
352 pages
+= change_p4d_range(vma
, pgd
, addr
, next
, newprot
,
354 } while (pgd
++, addr
= next
, addr
!= end
);
356 /* Only flush the TLB if we actually modified any entries: */
358 flush_tlb_range(vma
, start
, end
);
359 dec_tlb_flush_pending(mm
);
364 unsigned long change_protection(struct vm_area_struct
*vma
, unsigned long start
,
365 unsigned long end
, pgprot_t newprot
,
366 unsigned long cp_flags
)
370 BUG_ON((cp_flags
& MM_CP_UFFD_WP_ALL
) == MM_CP_UFFD_WP_ALL
);
372 if (is_vm_hugetlb_page(vma
))
373 pages
= hugetlb_change_protection(vma
, start
, end
, newprot
);
375 pages
= change_protection_range(vma
, start
, end
, newprot
,
381 static int prot_none_pte_entry(pte_t
*pte
, unsigned long addr
,
382 unsigned long next
, struct mm_walk
*walk
)
384 return pfn_modify_allowed(pte_pfn(*pte
), *(pgprot_t
*)(walk
->private)) ?
388 static int prot_none_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
389 unsigned long addr
, unsigned long next
,
390 struct mm_walk
*walk
)
392 return pfn_modify_allowed(pte_pfn(*pte
), *(pgprot_t
*)(walk
->private)) ?
396 static int prot_none_test(unsigned long addr
, unsigned long next
,
397 struct mm_walk
*walk
)
402 static const struct mm_walk_ops prot_none_walk_ops
= {
403 .pte_entry
= prot_none_pte_entry
,
404 .hugetlb_entry
= prot_none_hugetlb_entry
,
405 .test_walk
= prot_none_test
,
409 mprotect_fixup(struct vm_area_struct
*vma
, struct vm_area_struct
**pprev
,
410 unsigned long start
, unsigned long end
, unsigned long newflags
)
412 struct mm_struct
*mm
= vma
->vm_mm
;
413 unsigned long oldflags
= vma
->vm_flags
;
414 long nrpages
= (end
- start
) >> PAGE_SHIFT
;
415 unsigned long charged
= 0;
418 int dirty_accountable
= 0;
420 if (newflags
== oldflags
) {
426 * Do PROT_NONE PFN permission checks here when we can still
427 * bail out without undoing a lot of state. This is a rather
428 * uncommon case, so doesn't need to be very optimized.
430 if (arch_has_pfn_modify_check() &&
431 (vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) &&
432 (newflags
& VM_ACCESS_FLAGS
) == 0) {
433 pgprot_t new_pgprot
= vm_get_page_prot(newflags
);
435 error
= walk_page_range(current
->mm
, start
, end
,
436 &prot_none_walk_ops
, &new_pgprot
);
442 * If we make a private mapping writable we increase our commit;
443 * but (without finer accounting) cannot reduce our commit if we
444 * make it unwritable again. hugetlb mapping were accounted for
445 * even if read-only so there is no need to account for them here
447 if (newflags
& VM_WRITE
) {
448 /* Check space limits when area turns into data. */
449 if (!may_expand_vm(mm
, newflags
, nrpages
) &&
450 may_expand_vm(mm
, oldflags
, nrpages
))
452 if (!(oldflags
& (VM_ACCOUNT
|VM_WRITE
|VM_HUGETLB
|
453 VM_SHARED
|VM_NORESERVE
))) {
455 if (security_vm_enough_memory_mm(mm
, charged
))
457 newflags
|= VM_ACCOUNT
;
462 * First try to merge with previous and/or next vma.
464 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
465 *pprev
= vma_merge(mm
, *pprev
, start
, end
, newflags
,
466 vma
->anon_vma
, vma
->vm_file
, pgoff
, vma_policy(vma
),
467 vma
->vm_userfaultfd_ctx
);
470 VM_WARN_ON((vma
->vm_flags
^ newflags
) & ~VM_SOFTDIRTY
);
476 if (start
!= vma
->vm_start
) {
477 error
= split_vma(mm
, vma
, start
, 1);
482 if (end
!= vma
->vm_end
) {
483 error
= split_vma(mm
, vma
, end
, 0);
490 * vm_flags and vm_page_prot are protected by the mmap_lock
491 * held in write mode.
493 vma
->vm_flags
= newflags
;
494 dirty_accountable
= vma_wants_writenotify(vma
, vma
->vm_page_prot
);
495 vma_set_page_prot(vma
);
497 change_protection(vma
, start
, end
, vma
->vm_page_prot
,
498 dirty_accountable
? MM_CP_DIRTY_ACCT
: 0);
501 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
504 if ((oldflags
& (VM_WRITE
| VM_SHARED
| VM_LOCKED
)) == VM_LOCKED
&&
505 (newflags
& VM_WRITE
)) {
506 populate_vma_page_range(vma
, start
, end
, NULL
);
509 vm_stat_account(mm
, oldflags
, -nrpages
);
510 vm_stat_account(mm
, newflags
, nrpages
);
511 perf_event_mmap(vma
);
515 vm_unacct_memory(charged
);
520 * pkey==-1 when doing a legacy mprotect()
522 static int do_mprotect_pkey(unsigned long start
, size_t len
,
523 unsigned long prot
, int pkey
)
525 unsigned long nstart
, end
, tmp
, reqprot
;
526 struct vm_area_struct
*vma
, *prev
;
528 const int grows
= prot
& (PROT_GROWSDOWN
|PROT_GROWSUP
);
529 const bool rier
= (current
->personality
& READ_IMPLIES_EXEC
) &&
532 start
= untagged_addr(start
);
534 prot
&= ~(PROT_GROWSDOWN
|PROT_GROWSUP
);
535 if (grows
== (PROT_GROWSDOWN
|PROT_GROWSUP
)) /* can't be both */
538 if (start
& ~PAGE_MASK
)
542 len
= PAGE_ALIGN(len
);
546 if (!arch_validate_prot(prot
, start
))
551 if (mmap_write_lock_killable(current
->mm
))
555 * If userspace did not allocate the pkey, do not let
559 if ((pkey
!= -1) && !mm_pkey_is_allocated(current
->mm
, pkey
))
562 vma
= find_vma(current
->mm
, start
);
567 if (unlikely(grows
& PROT_GROWSDOWN
)) {
568 if (vma
->vm_start
>= end
)
570 start
= vma
->vm_start
;
572 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
575 if (vma
->vm_start
> start
)
577 if (unlikely(grows
& PROT_GROWSUP
)) {
580 if (!(vma
->vm_flags
& VM_GROWSUP
))
584 if (start
> vma
->vm_start
)
587 for (nstart
= start
; ; ) {
588 unsigned long mask_off_old_flags
;
589 unsigned long newflags
;
592 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
594 /* Does the application expect PROT_READ to imply PROT_EXEC */
595 if (rier
&& (vma
->vm_flags
& VM_MAYEXEC
))
599 * Each mprotect() call explicitly passes r/w/x permissions.
600 * If a permission is not passed to mprotect(), it must be
601 * cleared from the VMA.
603 mask_off_old_flags
= VM_READ
| VM_WRITE
| VM_EXEC
|
606 new_vma_pkey
= arch_override_mprotect_pkey(vma
, prot
, pkey
);
607 newflags
= calc_vm_prot_bits(prot
, new_vma_pkey
);
608 newflags
|= (vma
->vm_flags
& ~mask_off_old_flags
);
610 /* newflags >> 4 shift VM_MAY% in place of VM_% */
611 if ((newflags
& ~(newflags
>> 4)) & VM_ACCESS_FLAGS
) {
616 /* Allow architectures to sanity-check the new flags */
617 if (!arch_validate_flags(newflags
)) {
622 error
= security_file_mprotect(vma
, reqprot
, prot
);
630 if (vma
->vm_ops
&& vma
->vm_ops
->mprotect
) {
631 error
= vma
->vm_ops
->mprotect(vma
, nstart
, tmp
, newflags
);
636 error
= mprotect_fixup(vma
, &prev
, nstart
, tmp
, newflags
);
642 if (nstart
< prev
->vm_end
)
643 nstart
= prev
->vm_end
;
648 if (!vma
|| vma
->vm_start
!= nstart
) {
655 mmap_write_unlock(current
->mm
);
659 SYSCALL_DEFINE3(mprotect
, unsigned long, start
, size_t, len
,
662 return do_mprotect_pkey(start
, len
, prot
, -1);
665 #ifdef CONFIG_ARCH_HAS_PKEYS
667 SYSCALL_DEFINE4(pkey_mprotect
, unsigned long, start
, size_t, len
,
668 unsigned long, prot
, int, pkey
)
670 return do_mprotect_pkey(start
, len
, prot
, pkey
);
673 SYSCALL_DEFINE2(pkey_alloc
, unsigned long, flags
, unsigned long, init_val
)
678 /* No flags supported yet. */
681 /* check for unsupported init values */
682 if (init_val
& ~PKEY_ACCESS_MASK
)
685 mmap_write_lock(current
->mm
);
686 pkey
= mm_pkey_alloc(current
->mm
);
692 ret
= arch_set_user_pkey_access(current
, pkey
, init_val
);
694 mm_pkey_free(current
->mm
, pkey
);
699 mmap_write_unlock(current
->mm
);
703 SYSCALL_DEFINE1(pkey_free
, int, pkey
)
707 mmap_write_lock(current
->mm
);
708 ret
= mm_pkey_free(current
->mm
, pkey
);
709 mmap_write_unlock(current
->mm
);
712 * We could provide warnings or errors if any VMA still
713 * has the pkey set here.
718 #endif /* CONFIG_ARCH_HAS_PKEYS */