]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/x86/include/asm/pgalloc.h
mm: treewide: remove unused address argument from pte_alloc functions
[mirror_ubuntu-eoan-kernel.git] / arch / x86 / include / asm / pgalloc.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGALLOC_H
3 #define _ASM_X86_PGALLOC_H
4
5 #include <linux/threads.h>
6 #include <linux/mm.h> /* for struct page */
7 #include <linux/pagemap.h>
8
9 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
10
11 #ifdef CONFIG_PARAVIRT_XXL
12 #include <asm/paravirt.h>
13 #else
14 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
15 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
16 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
17 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
18 static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
19 unsigned long start, unsigned long count) {}
20 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
21 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {}
22 static inline void paravirt_release_pte(unsigned long pfn) {}
23 static inline void paravirt_release_pmd(unsigned long pfn) {}
24 static inline void paravirt_release_pud(unsigned long pfn) {}
25 static inline void paravirt_release_p4d(unsigned long pfn) {}
26 #endif
27
28 /*
29 * Flags to use when allocating a user page table page.
30 */
31 extern gfp_t __userpte_alloc_gfp;
32
33 #ifdef CONFIG_PAGE_TABLE_ISOLATION
34 /*
35 * Instead of one PGD, we acquire two PGDs. Being order-1, it is
36 * both 8k in size and 8k-aligned. That lets us just flip bit 12
37 * in a pointer to swap between the two 4k halves.
38 */
39 #define PGD_ALLOCATION_ORDER 1
40 #else
41 #define PGD_ALLOCATION_ORDER 0
42 #endif
43
44 /*
45 * Allocate and free page tables.
46 */
47 extern pgd_t *pgd_alloc(struct mm_struct *);
48 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
49
50 extern pte_t *pte_alloc_one_kernel(struct mm_struct *);
51 extern pgtable_t pte_alloc_one(struct mm_struct *);
52
53 /* Should really implement gc for free page table pages. This could be
54 done with a reference count in struct page. */
55
56 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
57 {
58 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
59 free_page((unsigned long)pte);
60 }
61
62 static inline void pte_free(struct mm_struct *mm, struct page *pte)
63 {
64 pgtable_page_dtor(pte);
65 __free_page(pte);
66 }
67
68 extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
69
70 static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
71 unsigned long address)
72 {
73 ___pte_free_tlb(tlb, pte);
74 }
75
76 static inline void pmd_populate_kernel(struct mm_struct *mm,
77 pmd_t *pmd, pte_t *pte)
78 {
79 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
80 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
81 }
82
83 static inline void pmd_populate_kernel_safe(struct mm_struct *mm,
84 pmd_t *pmd, pte_t *pte)
85 {
86 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
87 set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
88 }
89
90 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
91 struct page *pte)
92 {
93 unsigned long pfn = page_to_pfn(pte);
94
95 paravirt_alloc_pte(mm, pfn);
96 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
97 }
98
99 #define pmd_pgtable(pmd) pmd_page(pmd)
100
101 #if CONFIG_PGTABLE_LEVELS > 2
102 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
103 {
104 struct page *page;
105 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
106
107 if (mm == &init_mm)
108 gfp &= ~__GFP_ACCOUNT;
109 page = alloc_pages(gfp, 0);
110 if (!page)
111 return NULL;
112 if (!pgtable_pmd_page_ctor(page)) {
113 __free_pages(page, 0);
114 return NULL;
115 }
116 return (pmd_t *)page_address(page);
117 }
118
119 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
120 {
121 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
122 pgtable_pmd_page_dtor(virt_to_page(pmd));
123 free_page((unsigned long)pmd);
124 }
125
126 extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
127
128 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
129 unsigned long address)
130 {
131 ___pmd_free_tlb(tlb, pmd);
132 }
133
134 #ifdef CONFIG_X86_PAE
135 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
136 #else /* !CONFIG_X86_PAE */
137 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
138 {
139 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
140 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
141 }
142
143 static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
144 {
145 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
146 set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd)));
147 }
148 #endif /* CONFIG_X86_PAE */
149
150 #if CONFIG_PGTABLE_LEVELS > 3
151 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
152 {
153 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
154 set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
155 }
156
157 static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
158 {
159 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
160 set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
161 }
162
163 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
164 {
165 gfp_t gfp = GFP_KERNEL_ACCOUNT;
166
167 if (mm == &init_mm)
168 gfp &= ~__GFP_ACCOUNT;
169 return (pud_t *)get_zeroed_page(gfp);
170 }
171
172 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
173 {
174 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
175 free_page((unsigned long)pud);
176 }
177
178 extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
179
180 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
181 unsigned long address)
182 {
183 ___pud_free_tlb(tlb, pud);
184 }
185
186 #if CONFIG_PGTABLE_LEVELS > 4
187 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
188 {
189 if (!pgtable_l5_enabled())
190 return;
191 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
192 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
193 }
194
195 static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
196 {
197 if (!pgtable_l5_enabled())
198 return;
199 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
200 set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
201 }
202
203 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
204 {
205 gfp_t gfp = GFP_KERNEL_ACCOUNT;
206
207 if (mm == &init_mm)
208 gfp &= ~__GFP_ACCOUNT;
209 return (p4d_t *)get_zeroed_page(gfp);
210 }
211
212 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
213 {
214 if (!pgtable_l5_enabled())
215 return;
216
217 BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
218 free_page((unsigned long)p4d);
219 }
220
221 extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
222
223 static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
224 unsigned long address)
225 {
226 if (pgtable_l5_enabled())
227 ___p4d_free_tlb(tlb, p4d);
228 }
229
230 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
231 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
232 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
233
234 #endif /* _ASM_X86_PGALLOC_H */