]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/powerpc/mm/pgtable-book3s64.c
powerpc/mm/hash: Don't memset pgd table if not needed
[mirror_ubuntu-eoan-kernel.git] / arch / powerpc / mm / pgtable-book3s64.c
1 /*
2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/mm_types.h>
12 #include <misc/cxl-base.h>
13
14 #include <asm/pgalloc.h>
15 #include <asm/tlb.h>
16
17 #include "mmu_decl.h"
18 #include <trace/events/thp.h>
19
20 int (*register_process_table)(unsigned long base, unsigned long page_size,
21 unsigned long tbl_size);
22
23 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
24 /*
25 * This is called when relaxing access to a hugepage. It's also called in the page
26 * fault path when we don't hit any of the major fault cases, ie, a minor
27 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
28 * handled those two for us, we additionally deal with missing execute
29 * permission here on some processors
30 */
31 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
32 pmd_t *pmdp, pmd_t entry, int dirty)
33 {
34 int changed;
35 #ifdef CONFIG_DEBUG_VM
36 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
37 assert_spin_locked(&vma->vm_mm->page_table_lock);
38 #endif
39 changed = !pmd_same(*(pmdp), entry);
40 if (changed) {
41 __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
42 pmd_pte(entry), address);
43 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
44 }
45 return changed;
46 }
47
48 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
49 unsigned long address, pmd_t *pmdp)
50 {
51 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
52 }
53 /*
54 * set a new huge pmd. We should not be called for updating
55 * an existing pmd entry. That should go via pmd_hugepage_update.
56 */
57 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
58 pmd_t *pmdp, pmd_t pmd)
59 {
60 #ifdef CONFIG_DEBUG_VM
61 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
62 assert_spin_locked(&mm->page_table_lock);
63 WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
64 #endif
65 trace_hugepage_set_pmd(addr, pmd_val(pmd));
66 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
67 }
68
69 static void do_nothing(void *unused)
70 {
71
72 }
73 /*
74 * Serialize against find_current_mm_pte which does lock-less
75 * lookup in page tables with local interrupts disabled. For huge pages
76 * it casts pmd_t to pte_t. Since format of pte_t is different from
77 * pmd_t we want to prevent transit from pmd pointing to page table
78 * to pmd pointing to huge page (and back) while interrupts are disabled.
79 * We clear pmd to possibly replace it with page table pointer in
80 * different code paths. So make sure we wait for the parallel
81 * find_current_mm_pte to finish.
82 */
83 void serialize_against_pte_lookup(struct mm_struct *mm)
84 {
85 smp_mb();
86 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
87 }
88
89 /*
90 * We use this to invalidate a pmdp entry before switching from a
91 * hugepte to regular pmd entry.
92 */
93 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
94 pmd_t *pmdp)
95 {
96 unsigned long old_pmd;
97
98 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
99 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
100 /*
101 * This ensures that generic code that rely on IRQ disabling
102 * to prevent a parallel THP split work as expected.
103 */
104 serialize_against_pte_lookup(vma->vm_mm);
105 return __pmd(old_pmd);
106 }
107
108 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
109 {
110 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
111 }
112
113 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
114 {
115 unsigned long pmdv;
116
117 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
118 return pmd_set_protbits(__pmd(pmdv), pgprot);
119 }
120
121 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
122 {
123 return pfn_pmd(page_to_pfn(page), pgprot);
124 }
125
126 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
127 {
128 unsigned long pmdv;
129
130 pmdv = pmd_val(pmd);
131 pmdv &= _HPAGE_CHG_MASK;
132 return pmd_set_protbits(__pmd(pmdv), newprot);
133 }
134
135 /*
136 * This is called at the end of handling a user page fault, when the
137 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
138 * We use it to preload an HPTE into the hash table corresponding to
139 * the updated linux HUGE PMD entry.
140 */
141 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
142 pmd_t *pmd)
143 {
144 return;
145 }
146 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
147
148 /* For use by kexec */
149 void mmu_cleanup_all(void)
150 {
151 if (radix_enabled())
152 radix__mmu_cleanup_all();
153 else if (mmu_hash_ops.hpte_clear_all)
154 mmu_hash_ops.hpte_clear_all();
155 }
156
157 #ifdef CONFIG_MEMORY_HOTPLUG
158 int __meminit create_section_mapping(unsigned long start, unsigned long end)
159 {
160 if (radix_enabled())
161 return radix__create_section_mapping(start, end);
162
163 return hash__create_section_mapping(start, end);
164 }
165
166 int __meminit remove_section_mapping(unsigned long start, unsigned long end)
167 {
168 if (radix_enabled())
169 return radix__remove_section_mapping(start, end);
170
171 return hash__remove_section_mapping(start, end);
172 }
173 #endif /* CONFIG_MEMORY_HOTPLUG */