]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
4f04d8f0 CM |
2 | /* |
3 | * Copyright (C) 2012 ARM Ltd. | |
4f04d8f0 CM |
4 | */ |
5 | #ifndef __ASM_PGTABLE_H | |
6 | #define __ASM_PGTABLE_H | |
7 | ||
2f4b829c | 8 | #include <asm/bug.h> |
4f04d8f0 CM |
9 | #include <asm/proc-fns.h> |
10 | ||
11 | #include <asm/memory.h> | |
34bfeea4 | 12 | #include <asm/mte.h> |
4f04d8f0 | 13 | #include <asm/pgtable-hwdef.h> |
3eca86e7 | 14 | #include <asm/pgtable-prot.h> |
3403e56b | 15 | #include <asm/tlbflush.h> |
4f04d8f0 CM |
16 | |
17 | /* | |
3e1907d5 | 18 | * VMALLOC range. |
08375198 | 19 | * |
f9040773 | 20 | * VMALLOC_START: beginning of the kernel vmalloc space |
a5315819 | 21 | * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space |
3e1907d5 | 22 | * and fixed mappings |
4f04d8f0 | 23 | */ |
f9040773 | 24 | #define VMALLOC_START (MODULES_END) |
9ad7c6d5 | 25 | #define VMALLOC_END (VMEMMAP_START - SZ_256M) |
4f04d8f0 | 26 | |
7bc1a0f9 AB |
27 | #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) |
28 | ||
d016bf7e | 29 | #define FIRST_USER_ADDRESS 0UL |
4f04d8f0 CM |
30 | |
31 | #ifndef __ASSEMBLY__ | |
2f4b829c | 32 | |
3bbf7157 | 33 | #include <asm/cmpxchg.h> |
961faac1 | 34 | #include <asm/fixmap.h> |
2f4b829c | 35 | #include <linux/mmdebug.h> |
86c9e812 WD |
36 | #include <linux/mm_types.h> |
37 | #include <linux/sched.h> | |
2f4b829c | 38 | |
a7ac1cfa ZY |
39 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
40 | #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE | |
41 | ||
42 | /* Set stride and tlb_level in flush_*_tlb_range */ | |
43 | #define flush_pmd_tlb_range(vma, addr, end) \ | |
44 | __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) | |
45 | #define flush_pud_tlb_range(vma, addr, end) \ | |
46 | __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) | |
47 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
48 | ||
6a1bdb17 WD |
49 | /* |
50 | * Outside of a few very special situations (e.g. hibernation), we always | |
51 | * use broadcast TLB invalidation instructions, therefore a spurious page | |
52 | * fault on one CPU which has been handled concurrently by another CPU | |
53 | * does not need to perform additional invalidation. | |
54 | */ | |
55 | #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) | |
56 | ||
4f04d8f0 CM |
57 | /* |
58 | * ZERO_PAGE is a global shared page that is always zero: used | |
59 | * for zero-mapped memory areas etc.. | |
60 | */ | |
5227cfa7 | 61 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
2077be67 | 62 | #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) |
4f04d8f0 | 63 | |
2cf660eb GS |
64 | #define pte_ERROR(e) \ |
65 | pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) | |
7078db46 | 66 | |
75387b92 KM |
67 | /* |
68 | * Macros to convert between a physical address and its placement in a | |
69 | * page table entry, taking care of 52-bit addresses. | |
70 | */ | |
71 | #ifdef CONFIG_ARM64_PA_BITS_52 | |
72 | #define __pte_to_phys(pte) \ | |
73 | ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36)) | |
74 | #define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK) | |
75 | #else | |
76 | #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK) | |
77 | #define __phys_to_pte_val(phys) (phys) | |
78 | #endif | |
4f04d8f0 | 79 | |
75387b92 KM |
80 | #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) |
81 | #define pfn_pte(pfn,prot) \ | |
82 | __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) | |
4f04d8f0 CM |
83 | |
84 | #define pte_none(pte) (!pte_val(pte)) | |
85 | #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) | |
86 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | |
7078db46 | 87 | |
4f04d8f0 CM |
88 | /* |
89 | * The following only work if pte_present(). Undefined behaviour otherwise. | |
90 | */ | |
84fe6826 | 91 | #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) |
84fe6826 SC |
92 | #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) |
93 | #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) | |
94 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) | |
ec663d96 | 95 | #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
93ef666a | 96 | #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) |
73b20c84 | 97 | #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) |
34bfeea4 CM |
98 | #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ |
99 | PTE_ATTRINDX(MT_NORMAL_TAGGED)) | |
4f04d8f0 | 100 | |
d27cfa1f AB |
101 | #define pte_cont_addr_end(addr, end) \ |
102 | ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ | |
103 | (__boundary - 1 < (end) - 1) ? __boundary : (end); \ | |
104 | }) | |
105 | ||
106 | #define pmd_cont_addr_end(addr, end) \ | |
107 | ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ | |
108 | (__boundary - 1 < (end) - 1) ? __boundary : (end); \ | |
109 | }) | |
110 | ||
b847415c | 111 | #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) |
2f4b829c CM |
112 | #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) |
113 | #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) | |
114 | ||
766ffb69 | 115 | #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) |
ec663d96 | 116 | #define pte_valid_not_user(pte) \ |
24cecc37 | 117 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) |
76c714be WD |
118 | #define pte_valid_young(pte) \ |
119 | ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) | |
6218f96c CM |
120 | #define pte_valid_user(pte) \ |
121 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) | |
76c714be WD |
122 | |
123 | /* | |
124 | * Could the pte be present in the TLB? We must check mm_tlb_flush_pending | |
125 | * so that we don't erroneously return false for pages that have been | |
126 | * remapped as PROT_NONE but are yet to be flushed from the TLB. | |
127 | */ | |
128 | #define pte_accessible(mm, pte) \ | |
129 | (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) | |
4f04d8f0 | 130 | |
6218f96c CM |
131 | /* |
132 | * p??_access_permitted() is true for valid user mappings (subject to the | |
24cecc37 CM |
133 | * write permission check). PROT_NONE mappings do not have the PTE_VALID bit |
134 | * set. | |
6218f96c CM |
135 | */ |
136 | #define pte_access_permitted(pte, write) \ | |
137 | (pte_valid_user(pte) && (!(write) || pte_write(pte))) | |
138 | #define pmd_access_permitted(pmd, write) \ | |
139 | (pte_access_permitted(pmd_pte(pmd), (write))) | |
140 | #define pud_access_permitted(pud, write) \ | |
141 | (pte_access_permitted(pud_pte(pud), (write))) | |
142 | ||
b6d4f280 | 143 | static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) |
44b6dfc5 | 144 | { |
b6d4f280 | 145 | pte_val(pte) &= ~pgprot_val(prot); |
44b6dfc5 SC |
146 | return pte; |
147 | } | |
148 | ||
b6d4f280 | 149 | static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) |
44b6dfc5 | 150 | { |
b6d4f280 | 151 | pte_val(pte) |= pgprot_val(prot); |
44b6dfc5 SC |
152 | return pte; |
153 | } | |
154 | ||
b65399f6 AK |
155 | static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) |
156 | { | |
157 | pmd_val(pmd) &= ~pgprot_val(prot); | |
158 | return pmd; | |
159 | } | |
160 | ||
161 | static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) | |
162 | { | |
163 | pmd_val(pmd) |= pgprot_val(prot); | |
164 | return pmd; | |
165 | } | |
166 | ||
b6d4f280 LA |
167 | static inline pte_t pte_wrprotect(pte_t pte) |
168 | { | |
73e86cb0 CM |
169 | pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); |
170 | pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); | |
171 | return pte; | |
b6d4f280 LA |
172 | } |
173 | ||
174 | static inline pte_t pte_mkwrite(pte_t pte) | |
175 | { | |
73e86cb0 CM |
176 | pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); |
177 | pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); | |
178 | return pte; | |
b6d4f280 LA |
179 | } |
180 | ||
44b6dfc5 SC |
181 | static inline pte_t pte_mkclean(pte_t pte) |
182 | { | |
8781bcbc SC |
183 | pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); |
184 | pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); | |
185 | ||
186 | return pte; | |
44b6dfc5 SC |
187 | } |
188 | ||
189 | static inline pte_t pte_mkdirty(pte_t pte) | |
190 | { | |
8781bcbc SC |
191 | pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); |
192 | ||
193 | if (pte_write(pte)) | |
194 | pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); | |
195 | ||
196 | return pte; | |
44b6dfc5 SC |
197 | } |
198 | ||
199 | static inline pte_t pte_mkold(pte_t pte) | |
200 | { | |
b6d4f280 | 201 | return clear_pte_bit(pte, __pgprot(PTE_AF)); |
44b6dfc5 SC |
202 | } |
203 | ||
204 | static inline pte_t pte_mkyoung(pte_t pte) | |
205 | { | |
b6d4f280 | 206 | return set_pte_bit(pte, __pgprot(PTE_AF)); |
44b6dfc5 SC |
207 | } |
208 | ||
209 | static inline pte_t pte_mkspecial(pte_t pte) | |
210 | { | |
b6d4f280 | 211 | return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); |
44b6dfc5 | 212 | } |
4f04d8f0 | 213 | |
93ef666a JL |
214 | static inline pte_t pte_mkcont(pte_t pte) |
215 | { | |
66b3923a DW |
216 | pte = set_pte_bit(pte, __pgprot(PTE_CONT)); |
217 | return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE)); | |
93ef666a JL |
218 | } |
219 | ||
220 | static inline pte_t pte_mknoncont(pte_t pte) | |
221 | { | |
222 | return clear_pte_bit(pte, __pgprot(PTE_CONT)); | |
223 | } | |
224 | ||
5ebe3a44 JM |
225 | static inline pte_t pte_mkpresent(pte_t pte) |
226 | { | |
227 | return set_pte_bit(pte, __pgprot(PTE_VALID)); | |
228 | } | |
229 | ||
66b3923a DW |
230 | static inline pmd_t pmd_mkcont(pmd_t pmd) |
231 | { | |
232 | return __pmd(pmd_val(pmd) | PMD_SECT_CONT); | |
233 | } | |
234 | ||
73b20c84 RM |
235 | static inline pte_t pte_mkdevmap(pte_t pte) |
236 | { | |
30e23538 | 237 | return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); |
73b20c84 RM |
238 | } |
239 | ||
4f04d8f0 CM |
240 | static inline void set_pte(pte_t *ptep, pte_t pte) |
241 | { | |
20a004e7 | 242 | WRITE_ONCE(*ptep, pte); |
7f0b1bf0 CM |
243 | |
244 | /* | |
245 | * Only if the new pte is valid and kernel, otherwise TLB maintenance | |
246 | * or update_mmu_cache() have the necessary barriers. | |
247 | */ | |
d0b7a302 | 248 | if (pte_valid_not_user(pte)) { |
7f0b1bf0 | 249 | dsb(ishst); |
d0b7a302 WD |
250 | isb(); |
251 | } | |
4f04d8f0 CM |
252 | } |
253 | ||
907e21c1 | 254 | extern void __sync_icache_dcache(pte_t pteval); |
4f04d8f0 | 255 | |
2f4b829c CM |
256 | /* |
257 | * PTE bits configuration in the presence of hardware Dirty Bit Management | |
258 | * (PTE_WRITE == PTE_DBM): | |
259 | * | |
260 | * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) | |
261 | * 0 0 | 1 0 0 | |
262 | * 0 1 | 1 1 0 | |
263 | * 1 0 | 1 0 1 | |
264 | * 1 1 | 0 1 x | |
265 | * | |
266 | * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via | |
267 | * the page fault mechanism. Checking the dirty status of a pte becomes: | |
268 | * | |
b847415c | 269 | * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) |
2f4b829c | 270 | */ |
9b604722 MR |
271 | |
272 | static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep, | |
273 | pte_t pte) | |
4f04d8f0 | 274 | { |
20a004e7 WD |
275 | pte_t old_pte; |
276 | ||
9b604722 MR |
277 | if (!IS_ENABLED(CONFIG_DEBUG_VM)) |
278 | return; | |
279 | ||
280 | old_pte = READ_ONCE(*ptep); | |
281 | ||
282 | if (!pte_valid(old_pte) || !pte_valid(pte)) | |
283 | return; | |
284 | if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) | |
285 | return; | |
02522463 | 286 | |
2f4b829c | 287 | /* |
9b604722 MR |
288 | * Check for potential race with hardware updates of the pte |
289 | * (ptep_set_access_flags safely changes valid ptes without going | |
290 | * through an invalid entry). | |
2f4b829c | 291 | */ |
9b604722 MR |
292 | VM_WARN_ONCE(!pte_young(pte), |
293 | "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", | |
294 | __func__, pte_val(old_pte), pte_val(pte)); | |
295 | VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), | |
296 | "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", | |
297 | __func__, pte_val(old_pte), pte_val(pte)); | |
298 | } | |
299 | ||
300 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |
301 | pte_t *ptep, pte_t pte) | |
302 | { | |
303 | if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) | |
304 | __sync_icache_dcache(pte); | |
305 | ||
34bfeea4 CM |
306 | if (system_supports_mte() && |
307 | pte_present(pte) && pte_tagged(pte) && !pte_special(pte)) | |
308 | mte_sync_tags(ptep, pte); | |
309 | ||
9b604722 | 310 | __check_racy_pte_update(mm, ptep, pte); |
2f4b829c | 311 | |
4f04d8f0 CM |
312 | set_pte(ptep, pte); |
313 | } | |
314 | ||
315 | /* | |
316 | * Huge pte definitions. | |
317 | */ | |
084bd298 SC |
318 | #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) |
319 | ||
320 | /* | |
321 | * Hugetlb definitions. | |
322 | */ | |
66b3923a | 323 | #define HUGE_MAX_HSTATE 4 |
084bd298 SC |
324 | #define HPAGE_SHIFT PMD_SHIFT |
325 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | |
326 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | |
327 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | |
4f04d8f0 | 328 | |
75387b92 KM |
329 | static inline pte_t pgd_pte(pgd_t pgd) |
330 | { | |
331 | return __pte(pgd_val(pgd)); | |
332 | } | |
333 | ||
e9f63768 MR |
334 | static inline pte_t p4d_pte(p4d_t p4d) |
335 | { | |
336 | return __pte(p4d_val(p4d)); | |
337 | } | |
338 | ||
29e56940 SC |
339 | static inline pte_t pud_pte(pud_t pud) |
340 | { | |
341 | return __pte(pud_val(pud)); | |
342 | } | |
343 | ||
eb3f0624 PA |
344 | static inline pud_t pte_pud(pte_t pte) |
345 | { | |
346 | return __pud(pte_val(pte)); | |
347 | } | |
348 | ||
29e56940 SC |
349 | static inline pmd_t pud_pmd(pud_t pud) |
350 | { | |
351 | return __pmd(pud_val(pud)); | |
352 | } | |
353 | ||
9c7e535f SC |
354 | static inline pte_t pmd_pte(pmd_t pmd) |
355 | { | |
356 | return __pte(pmd_val(pmd)); | |
357 | } | |
af074848 | 358 | |
9c7e535f SC |
359 | static inline pmd_t pte_pmd(pte_t pte) |
360 | { | |
361 | return __pmd(pte_val(pte)); | |
362 | } | |
af074848 | 363 | |
f7f0097a | 364 | static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) |
8ce837ce | 365 | { |
f7f0097a AK |
366 | return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT); |
367 | } | |
368 | ||
369 | static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) | |
8ce837ce | 370 | { |
f7f0097a | 371 | return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT); |
8ce837ce AB |
372 | } |
373 | ||
56166230 GK |
374 | #ifdef CONFIG_NUMA_BALANCING |
375 | /* | |
ca5999fd | 376 | * See the comment in include/linux/pgtable.h |
56166230 GK |
377 | */ |
378 | static inline int pte_protnone(pte_t pte) | |
379 | { | |
380 | return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE; | |
381 | } | |
382 | ||
383 | static inline int pmd_protnone(pmd_t pmd) | |
384 | { | |
385 | return pte_protnone(pmd_pte(pmd)); | |
386 | } | |
387 | #endif | |
388 | ||
b65399f6 AK |
389 | #define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID)) |
390 | ||
391 | static inline int pmd_present(pmd_t pmd) | |
392 | { | |
393 | return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd); | |
394 | } | |
395 | ||
af074848 SC |
396 | /* |
397 | * THP definitions. | |
398 | */ | |
af074848 SC |
399 | |
400 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
b65399f6 AK |
401 | static inline int pmd_trans_huge(pmd_t pmd) |
402 | { | |
403 | return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); | |
404 | } | |
29e56940 | 405 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
af074848 | 406 | |
c164e038 | 407 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) |
9c7e535f | 408 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) |
0795edaf | 409 | #define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) |
9c7e535f | 410 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) |
9c7e535f SC |
411 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) |
412 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | |
ab4db1f2 | 413 | #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) |
9c7e535f SC |
414 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) |
415 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | |
b65399f6 AK |
416 | |
417 | static inline pmd_t pmd_mkinvalid(pmd_t pmd) | |
418 | { | |
419 | pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID)); | |
420 | pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID)); | |
421 | ||
422 | return pmd; | |
423 | } | |
af074848 | 424 | |
0dbd3b18 SP |
425 | #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) |
426 | ||
9c7e535f | 427 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) |
af074848 SC |
428 | |
429 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) | |
430 | ||
73b20c84 RM |
431 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
432 | #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) | |
433 | #endif | |
30e23538 JH |
434 | static inline pmd_t pmd_mkdevmap(pmd_t pmd) |
435 | { | |
436 | return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); | |
437 | } | |
73b20c84 | 438 | |
75387b92 KM |
439 | #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) |
440 | #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) | |
441 | #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) | |
442 | #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) | |
af074848 SC |
443 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) |
444 | ||
35a63966 | 445 | #define pud_young(pud) pte_young(pud_pte(pud)) |
eb3f0624 | 446 | #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) |
29e56940 | 447 | #define pud_write(pud) pte_write(pud_pte(pud)) |
75387b92 | 448 | |
b8e0ba7c PA |
449 | #define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT)) |
450 | ||
75387b92 KM |
451 | #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) |
452 | #define __phys_to_pud_val(phys) __phys_to_pte_val(phys) | |
453 | #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) | |
454 | #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) | |
af074848 | 455 | |
ceb21835 | 456 | #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) |
af074848 | 457 | |
e9f63768 MR |
458 | #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) |
459 | #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys) | |
460 | ||
75387b92 KM |
461 | #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) |
462 | #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) | |
463 | ||
a501e324 CM |
464 | #define __pgprot_modify(prot,mask,bits) \ |
465 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) | |
466 | ||
cca98e9f | 467 | #define pgprot_nx(prot) \ |
034aa9cd | 468 | __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN) |
cca98e9f | 469 | |
4f04d8f0 CM |
470 | /* |
471 | * Mark the prot value as uncacheable and unbufferable. | |
472 | */ | |
473 | #define pgprot_noncached(prot) \ | |
de2db743 | 474 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) |
4f04d8f0 | 475 | #define pgprot_writecombine(prot) \ |
de2db743 | 476 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) |
d1e6dc91 LD |
477 | #define pgprot_device(prot) \ |
478 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) | |
3e4e1d3f CH |
479 | /* |
480 | * DMA allocations for non-coherent devices use what the Arm architecture calls | |
481 | * "Normal non-cacheable" memory, which permits speculation, unaligned accesses | |
482 | * and merging of writes. This is different from "Device-nGnR[nE]" memory which | |
483 | * is intended for MMIO and thus forbids speculation, preserves access size, | |
484 | * requires strict alignment and can also force write responses to come from the | |
485 | * endpoint. | |
486 | */ | |
419e2f18 CH |
487 | #define pgprot_dmacoherent(prot) \ |
488 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ | |
489 | PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) | |
490 | ||
4f04d8f0 CM |
491 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
492 | struct file; | |
493 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
494 | unsigned long size, pgprot_t vma_prot); | |
495 | ||
496 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
4f04d8f0 | 497 | |
ab4db1f2 | 498 | #define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT)) |
4f04d8f0 | 499 | |
36311607 MZ |
500 | #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ |
501 | PMD_TYPE_TABLE) | |
502 | #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ | |
503 | PMD_TYPE_SECT) | |
8aa82df3 | 504 | #define pmd_leaf(pmd) pmd_sect(pmd) |
36311607 | 505 | |
cac4b8cd | 506 | #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 |
7d4e2dcf QC |
507 | static inline bool pud_sect(pud_t pud) { return false; } |
508 | static inline bool pud_table(pud_t pud) { return true; } | |
206a2a73 SC |
509 | #else |
510 | #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ | |
511 | PUD_TYPE_SECT) | |
523d6e9f | 512 | #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ |
513 | PUD_TYPE_TABLE) | |
206a2a73 | 514 | #endif |
36311607 | 515 | |
2330b7ca JY |
516 | extern pgd_t init_pg_dir[PTRS_PER_PGD]; |
517 | extern pgd_t init_pg_end[]; | |
518 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
519 | extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | |
9d2d75ed | 520 | extern pgd_t idmap_pg_end[]; |
2330b7ca JY |
521 | extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; |
522 | ||
523 | extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); | |
524 | ||
525 | static inline bool in_swapper_pgdir(void *addr) | |
526 | { | |
527 | return ((unsigned long)addr & PAGE_MASK) == | |
528 | ((unsigned long)swapper_pg_dir & PAGE_MASK); | |
529 | } | |
530 | ||
4f04d8f0 CM |
531 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
532 | { | |
e9ed821b JM |
533 | #ifdef __PAGETABLE_PMD_FOLDED |
534 | if (in_swapper_pgdir(pmdp)) { | |
2330b7ca JY |
535 | set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); |
536 | return; | |
537 | } | |
e9ed821b | 538 | #endif /* __PAGETABLE_PMD_FOLDED */ |
2330b7ca | 539 | |
20a004e7 | 540 | WRITE_ONCE(*pmdp, pmd); |
0795edaf | 541 | |
d0b7a302 | 542 | if (pmd_valid(pmd)) { |
0795edaf | 543 | dsb(ishst); |
d0b7a302 WD |
544 | isb(); |
545 | } | |
4f04d8f0 CM |
546 | } |
547 | ||
548 | static inline void pmd_clear(pmd_t *pmdp) | |
549 | { | |
550 | set_pmd(pmdp, __pmd(0)); | |
551 | } | |
552 | ||
dca56dca | 553 | static inline phys_addr_t pmd_page_paddr(pmd_t pmd) |
4f04d8f0 | 554 | { |
75387b92 | 555 | return __pmd_to_phys(pmd); |
4f04d8f0 CM |
556 | } |
557 | ||
974b9b2c MR |
558 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) |
559 | { | |
560 | return (unsigned long)__va(pmd_page_paddr(pmd)); | |
561 | } | |
74dd022f | 562 | |
053520f7 | 563 | /* Find an entry in the third-level page table. */ |
f069faba | 564 | #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) |
053520f7 | 565 | |
961faac1 MR |
566 | #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) |
567 | #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) | |
568 | #define pte_clear_fixmap() clear_fixmap(FIX_PTE) | |
569 | ||
68ecabd0 | 570 | #define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd)) |
4f04d8f0 | 571 | |
6533945a AB |
572 | /* use ONLY for statically allocated translation tables */ |
573 | #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) | |
574 | ||
4f04d8f0 CM |
575 | /* |
576 | * Conversion functions: convert a page and protection to a page entry, | |
577 | * and a page entry and page directory to the page they refer to. | |
578 | */ | |
579 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) | |
580 | ||
9f25e6ad | 581 | #if CONFIG_PGTABLE_LEVELS > 2 |
4f04d8f0 | 582 | |
2cf660eb GS |
583 | #define pmd_ERROR(e) \ |
584 | pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) | |
7078db46 | 585 | |
4f04d8f0 | 586 | #define pud_none(pud) (!pud_val(pud)) |
ab4db1f2 | 587 | #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) |
f02ab08a | 588 | #define pud_present(pud) pte_present(pud_pte(pud)) |
8aa82df3 | 589 | #define pud_leaf(pud) pud_sect(pud) |
0795edaf | 590 | #define pud_valid(pud) pte_valid(pud_pte(pud)) |
4f04d8f0 CM |
591 | |
592 | static inline void set_pud(pud_t *pudp, pud_t pud) | |
593 | { | |
e9ed821b JM |
594 | #ifdef __PAGETABLE_PUD_FOLDED |
595 | if (in_swapper_pgdir(pudp)) { | |
2330b7ca JY |
596 | set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); |
597 | return; | |
598 | } | |
e9ed821b | 599 | #endif /* __PAGETABLE_PUD_FOLDED */ |
2330b7ca | 600 | |
20a004e7 | 601 | WRITE_ONCE(*pudp, pud); |
0795edaf | 602 | |
d0b7a302 | 603 | if (pud_valid(pud)) { |
0795edaf | 604 | dsb(ishst); |
d0b7a302 WD |
605 | isb(); |
606 | } | |
4f04d8f0 CM |
607 | } |
608 | ||
609 | static inline void pud_clear(pud_t *pudp) | |
610 | { | |
611 | set_pud(pudp, __pud(0)); | |
612 | } | |
613 | ||
dca56dca | 614 | static inline phys_addr_t pud_page_paddr(pud_t pud) |
4f04d8f0 | 615 | { |
75387b92 | 616 | return __pud_to_phys(pud); |
4f04d8f0 CM |
617 | } |
618 | ||
974b9b2c MR |
619 | static inline unsigned long pud_page_vaddr(pud_t pud) |
620 | { | |
621 | return (unsigned long)__va(pud_page_paddr(pud)); | |
622 | } | |
7078db46 | 623 | |
974b9b2c | 624 | /* Find an entry in the second-level page table. */ |
20a004e7 | 625 | #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) |
7078db46 | 626 | |
961faac1 MR |
627 | #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) |
628 | #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) | |
629 | #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) | |
7078db46 | 630 | |
68ecabd0 | 631 | #define pud_page(pud) phys_to_page(__pud_to_phys(pud)) |
29e56940 | 632 | |
6533945a AB |
633 | /* use ONLY for statically allocated translation tables */ |
634 | #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) | |
635 | ||
dca56dca MR |
636 | #else |
637 | ||
638 | #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) | |
639 | ||
961faac1 MR |
640 | /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ |
641 | #define pmd_set_fixmap(addr) NULL | |
642 | #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) | |
643 | #define pmd_clear_fixmap() | |
644 | ||
6533945a AB |
645 | #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) |
646 | ||
9f25e6ad | 647 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
4f04d8f0 | 648 | |
9f25e6ad | 649 | #if CONFIG_PGTABLE_LEVELS > 3 |
c79b954b | 650 | |
2cf660eb GS |
651 | #define pud_ERROR(e) \ |
652 | pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) | |
7078db46 | 653 | |
e9f63768 MR |
654 | #define p4d_none(p4d) (!p4d_val(p4d)) |
655 | #define p4d_bad(p4d) (!(p4d_val(p4d) & 2)) | |
656 | #define p4d_present(p4d) (p4d_val(p4d)) | |
c79b954b | 657 | |
e9f63768 | 658 | static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) |
c79b954b | 659 | { |
e9f63768 MR |
660 | if (in_swapper_pgdir(p4dp)) { |
661 | set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d))); | |
2330b7ca JY |
662 | return; |
663 | } | |
664 | ||
e9f63768 | 665 | WRITE_ONCE(*p4dp, p4d); |
c79b954b | 666 | dsb(ishst); |
eb6a4dcc | 667 | isb(); |
c79b954b JL |
668 | } |
669 | ||
e9f63768 | 670 | static inline void p4d_clear(p4d_t *p4dp) |
c79b954b | 671 | { |
e9f63768 | 672 | set_p4d(p4dp, __p4d(0)); |
c79b954b JL |
673 | } |
674 | ||
e9f63768 | 675 | static inline phys_addr_t p4d_page_paddr(p4d_t p4d) |
c79b954b | 676 | { |
e9f63768 | 677 | return __p4d_to_phys(p4d); |
c79b954b JL |
678 | } |
679 | ||
974b9b2c MR |
680 | static inline unsigned long p4d_page_vaddr(p4d_t p4d) |
681 | { | |
682 | return (unsigned long)__va(p4d_page_paddr(p4d)); | |
683 | } | |
7078db46 | 684 | |
974b9b2c | 685 | /* Find an entry in the frst-level page table. */ |
e9f63768 | 686 | #define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t)) |
7078db46 | 687 | |
961faac1 | 688 | #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) |
e9f63768 | 689 | #define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr)) |
961faac1 | 690 | #define pud_clear_fixmap() clear_fixmap(FIX_PUD) |
7078db46 | 691 | |
e9f63768 | 692 | #define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d))) |
5d96e0cb | 693 | |
6533945a AB |
694 | /* use ONLY for statically allocated translation tables */ |
695 | #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr)))) | |
696 | ||
dca56dca MR |
697 | #else |
698 | ||
e9f63768 | 699 | #define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;}) |
dca56dca MR |
700 | #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) |
701 | ||
961faac1 MR |
702 | /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ |
703 | #define pud_set_fixmap(addr) NULL | |
704 | #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) | |
705 | #define pud_clear_fixmap() | |
706 | ||
6533945a AB |
707 | #define pud_offset_kimg(dir,addr) ((pud_t *)dir) |
708 | ||
9f25e6ad | 709 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
c79b954b | 710 | |
2cf660eb GS |
711 | #define pgd_ERROR(e) \ |
712 | pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) | |
7078db46 | 713 | |
961faac1 MR |
714 | #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) |
715 | #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) | |
716 | ||
4f04d8f0 CM |
717 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
718 | { | |
9f341931 CM |
719 | /* |
720 | * Normal and Normal-Tagged are two different memory types and indices | |
721 | * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK. | |
722 | */ | |
a6fadf7e | 723 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | |
9f341931 CM |
724 | PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP | |
725 | PTE_ATTRINDX_MASK; | |
2f4b829c CM |
726 | /* preserve the hardware dirty information */ |
727 | if (pte_hw_dirty(pte)) | |
62d96c71 | 728 | pte = pte_mkdirty(pte); |
4f04d8f0 CM |
729 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
730 | return pte; | |
731 | } | |
732 | ||
9c7e535f SC |
733 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
734 | { | |
735 | return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); | |
736 | } | |
737 | ||
66dbd6e6 CM |
738 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
739 | extern int ptep_set_access_flags(struct vm_area_struct *vma, | |
740 | unsigned long address, pte_t *ptep, | |
741 | pte_t entry, int dirty); | |
742 | ||
282aa705 CM |
743 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
744 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS | |
745 | static inline int pmdp_set_access_flags(struct vm_area_struct *vma, | |
746 | unsigned long address, pmd_t *pmdp, | |
747 | pmd_t entry, int dirty) | |
748 | { | |
749 | return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); | |
750 | } | |
73b20c84 RM |
751 | |
752 | static inline int pud_devmap(pud_t pud) | |
753 | { | |
754 | return 0; | |
755 | } | |
756 | ||
757 | static inline int pgd_devmap(pgd_t pgd) | |
758 | { | |
759 | return 0; | |
760 | } | |
282aa705 CM |
761 | #endif |
762 | ||
2f4b829c CM |
763 | /* |
764 | * Atomic pte/pmd modifications. | |
765 | */ | |
766 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
06485053 | 767 | static inline int __ptep_test_and_clear_young(pte_t *ptep) |
2f4b829c | 768 | { |
3bbf7157 | 769 | pte_t old_pte, pte; |
2f4b829c | 770 | |
3bbf7157 CM |
771 | pte = READ_ONCE(*ptep); |
772 | do { | |
773 | old_pte = pte; | |
774 | pte = pte_mkold(pte); | |
775 | pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), | |
776 | pte_val(old_pte), pte_val(pte)); | |
777 | } while (pte_val(pte) != pte_val(old_pte)); | |
2f4b829c | 778 | |
3bbf7157 | 779 | return pte_young(pte); |
2f4b829c CM |
780 | } |
781 | ||
06485053 CM |
782 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
783 | unsigned long address, | |
784 | pte_t *ptep) | |
785 | { | |
786 | return __ptep_test_and_clear_young(ptep); | |
787 | } | |
788 | ||
3403e56b AVB |
789 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
790 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | |
791 | unsigned long address, pte_t *ptep) | |
792 | { | |
793 | int young = ptep_test_and_clear_young(vma, address, ptep); | |
794 | ||
795 | if (young) { | |
796 | /* | |
797 | * We can elide the trailing DSB here since the worst that can | |
798 | * happen is that a CPU continues to use the young entry in its | |
799 | * TLB and we mistakenly reclaim the associated page. The | |
800 | * window for such an event is bounded by the next | |
801 | * context-switch, which provides a DSB to complete the TLB | |
802 | * invalidation. | |
803 | */ | |
804 | flush_tlb_page_nosync(vma, address); | |
805 | } | |
806 | ||
807 | return young; | |
808 | } | |
809 | ||
2f4b829c CM |
810 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
811 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | |
812 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
813 | unsigned long address, | |
814 | pmd_t *pmdp) | |
815 | { | |
816 | return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); | |
817 | } | |
818 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
819 | ||
820 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
821 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |
822 | unsigned long address, pte_t *ptep) | |
823 | { | |
3bbf7157 | 824 | return __pte(xchg_relaxed(&pte_val(*ptep), 0)); |
2f4b829c CM |
825 | } |
826 | ||
827 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
911f56ee CM |
828 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
829 | static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, | |
830 | unsigned long address, pmd_t *pmdp) | |
2f4b829c CM |
831 | { |
832 | return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp)); | |
833 | } | |
834 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
835 | ||
836 | /* | |
8781bcbc SC |
837 | * ptep_set_wrprotect - mark read-only while trasferring potential hardware |
838 | * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. | |
2f4b829c CM |
839 | */ |
840 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
841 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) | |
842 | { | |
3bbf7157 CM |
843 | pte_t old_pte, pte; |
844 | ||
845 | pte = READ_ONCE(*ptep); | |
846 | do { | |
847 | old_pte = pte; | |
8781bcbc SC |
848 | /* |
849 | * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY | |
850 | * clear), set the PTE_DIRTY bit. | |
851 | */ | |
852 | if (pte_hw_dirty(pte)) | |
853 | pte = pte_mkdirty(pte); | |
3bbf7157 CM |
854 | pte = pte_wrprotect(pte); |
855 | pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), | |
856 | pte_val(old_pte), pte_val(pte)); | |
857 | } while (pte_val(pte) != pte_val(old_pte)); | |
2f4b829c CM |
858 | } |
859 | ||
860 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
861 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | |
862 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |
863 | unsigned long address, pmd_t *pmdp) | |
864 | { | |
865 | ptep_set_wrprotect(mm, address, (pte_t *)pmdp); | |
866 | } | |
1d78a62c CM |
867 | |
868 | #define pmdp_establish pmdp_establish | |
869 | static inline pmd_t pmdp_establish(struct vm_area_struct *vma, | |
870 | unsigned long address, pmd_t *pmdp, pmd_t pmd) | |
871 | { | |
872 | return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); | |
873 | } | |
2f4b829c | 874 | #endif |
2f4b829c | 875 | |
4f04d8f0 CM |
876 | /* |
877 | * Encode and decode a swap entry: | |
3676f9ef | 878 | * bits 0-1: present (must be zero) |
9b3e661e KS |
879 | * bits 2-7: swap type |
880 | * bits 8-57: swap offset | |
fdc69e7d | 881 | * bit 58: PTE_PROT_NONE (must be zero) |
4f04d8f0 | 882 | */ |
9b3e661e | 883 | #define __SWP_TYPE_SHIFT 2 |
4f04d8f0 | 884 | #define __SWP_TYPE_BITS 6 |
9b3e661e | 885 | #define __SWP_OFFSET_BITS 50 |
4f04d8f0 CM |
886 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) |
887 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | |
3676f9ef | 888 | #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) |
4f04d8f0 CM |
889 | |
890 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) | |
3676f9ef | 891 | #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) |
4f04d8f0 CM |
892 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) |
893 | ||
894 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
895 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) | |
896 | ||
53fa117b AK |
897 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
898 | #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) | |
899 | #define __swp_entry_to_pmd(swp) __pmd((swp).val) | |
900 | #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ | |
901 | ||
4f04d8f0 CM |
902 | /* |
903 | * Ensure that there are not more swap files than can be encoded in the kernel | |
aad9061b | 904 | * PTEs. |
4f04d8f0 CM |
905 | */ |
906 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) | |
907 | ||
4f04d8f0 CM |
908 | extern int kern_addr_valid(unsigned long addr); |
909 | ||
36943aba SP |
910 | #ifdef CONFIG_ARM64_MTE |
911 | ||
912 | #define __HAVE_ARCH_PREPARE_TO_SWAP | |
913 | static inline int arch_prepare_to_swap(struct page *page) | |
914 | { | |
915 | if (system_supports_mte()) | |
916 | return mte_save_tags(page); | |
917 | return 0; | |
918 | } | |
919 | ||
920 | #define __HAVE_ARCH_SWAP_INVALIDATE | |
921 | static inline void arch_swap_invalidate_page(int type, pgoff_t offset) | |
922 | { | |
923 | if (system_supports_mte()) | |
924 | mte_invalidate_tags(type, offset); | |
925 | } | |
926 | ||
927 | static inline void arch_swap_invalidate_area(int type) | |
928 | { | |
929 | if (system_supports_mte()) | |
930 | mte_invalidate_tags_area(type); | |
931 | } | |
932 | ||
933 | #define __HAVE_ARCH_SWAP_RESTORE | |
934 | static inline void arch_swap_restore(swp_entry_t entry, struct page *page) | |
935 | { | |
936 | if (system_supports_mte() && mte_restore_tags(entry, page)) | |
937 | set_bit(PG_mte_tagged, &page->flags); | |
938 | } | |
939 | ||
940 | #endif /* CONFIG_ARM64_MTE */ | |
941 | ||
cba3574f WD |
942 | /* |
943 | * On AArch64, the cache coherency is handled via the set_pte_at() function. | |
944 | */ | |
945 | static inline void update_mmu_cache(struct vm_area_struct *vma, | |
946 | unsigned long addr, pte_t *ptep) | |
947 | { | |
948 | /* | |
120798d2 WD |
949 | * We don't do anything here, so there's a very small chance of |
950 | * us retaking a user fault which we just fixed up. The alternative | |
951 | * is doing a dsb(ishst), but that penalises the fastpath. | |
cba3574f | 952 | */ |
cba3574f WD |
953 | } |
954 | ||
955 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | |
956 | ||
529c4b05 KM |
957 | #ifdef CONFIG_ARM64_PA_BITS_52 |
958 | #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) | |
959 | #else | |
960 | #define phys_to_ttbr(addr) (addr) | |
961 | #endif | |
962 | ||
6af31226 JH |
963 | /* |
964 | * On arm64 without hardware Access Flag, copying from user will fail because | |
965 | * the pte is old and cannot be marked young. So we always end up with zeroed | |
966 | * page after fork() + CoW for pfn mappings. We don't always have a | |
967 | * hardware-managed access flag on arm64. | |
968 | */ | |
969 | static inline bool arch_faults_on_old_pte(void) | |
970 | { | |
971 | WARN_ON(preemptible()); | |
972 | ||
973 | return !cpu_has_hw_af(); | |
974 | } | |
975 | #define arch_faults_on_old_pte arch_faults_on_old_pte | |
976 | ||
4f04d8f0 CM |
977 | #endif /* !__ASSEMBLY__ */ |
978 | ||
979 | #endif /* __ASM_PGTABLE_H */ |