]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/include/asm/pgtable.h
Merge tag 'drm-misc-fixes-2017-07-27' of git://anongit.freedesktop.org/git/drm-misc...
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / include / asm / pgtable.h
1 /*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16 #ifndef __ASM_PGTABLE_H
17 #define __ASM_PGTABLE_H
18
19 #include <asm/bug.h>
20 #include <asm/proc-fns.h>
21
22 #include <asm/memory.h>
23 #include <asm/pgtable-hwdef.h>
24 #include <asm/pgtable-prot.h>
25
26 /*
27 * VMALLOC range.
28 *
29 * VMALLOC_START: beginning of the kernel vmalloc space
30 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
31 * and fixed mappings
32 */
33 #define VMALLOC_START (MODULES_END)
34 #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
35
36 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
37
38 #define FIRST_USER_ADDRESS 0UL
39
40 #ifndef __ASSEMBLY__
41
42 #include <asm/fixmap.h>
43 #include <linux/mmdebug.h>
44
45 extern void __pte_error(const char *file, int line, unsigned long val);
46 extern void __pmd_error(const char *file, int line, unsigned long val);
47 extern void __pud_error(const char *file, int line, unsigned long val);
48 extern void __pgd_error(const char *file, int line, unsigned long val);
49
50 /*
51 * ZERO_PAGE is a global shared page that is always zero: used
52 * for zero-mapped memory areas etc..
53 */
54 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
55 #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
56
57 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
58
59 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
60
61 #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
62
63 #define pte_none(pte) (!pte_val(pte))
64 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
65 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
66
67 /*
68 * The following only work if pte_present(). Undefined behaviour otherwise.
69 */
70 #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
71 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
72 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
73 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
74 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
75 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
76
77 #define pte_cont_addr_end(addr, end) \
78 ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
79 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
80 })
81
82 #define pmd_cont_addr_end(addr, end) \
83 ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
84 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
85 })
86
87 #ifdef CONFIG_ARM64_HW_AFDBM
88 #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
89 #else
90 #define pte_hw_dirty(pte) (0)
91 #endif
92 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
93 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
94
95 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
96 /*
97 * Execute-only user mappings do not have the PTE_USER bit set. All valid
98 * kernel mappings have the PTE_UXN bit set.
99 */
100 #define pte_valid_not_user(pte) \
101 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
102 #define pte_valid_young(pte) \
103 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
104
105 /*
106 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
107 * so that we don't erroneously return false for pages that have been
108 * remapped as PROT_NONE but are yet to be flushed from the TLB.
109 */
110 #define pte_accessible(mm, pte) \
111 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
112
113 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
114 {
115 pte_val(pte) &= ~pgprot_val(prot);
116 return pte;
117 }
118
119 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
120 {
121 pte_val(pte) |= pgprot_val(prot);
122 return pte;
123 }
124
125 static inline pte_t pte_wrprotect(pte_t pte)
126 {
127 return clear_pte_bit(pte, __pgprot(PTE_WRITE));
128 }
129
130 static inline pte_t pte_mkwrite(pte_t pte)
131 {
132 return set_pte_bit(pte, __pgprot(PTE_WRITE));
133 }
134
135 static inline pte_t pte_mkclean(pte_t pte)
136 {
137 return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
138 }
139
140 static inline pte_t pte_mkdirty(pte_t pte)
141 {
142 return set_pte_bit(pte, __pgprot(PTE_DIRTY));
143 }
144
145 static inline pte_t pte_mkold(pte_t pte)
146 {
147 return clear_pte_bit(pte, __pgprot(PTE_AF));
148 }
149
150 static inline pte_t pte_mkyoung(pte_t pte)
151 {
152 return set_pte_bit(pte, __pgprot(PTE_AF));
153 }
154
155 static inline pte_t pte_mkspecial(pte_t pte)
156 {
157 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
158 }
159
160 static inline pte_t pte_mkcont(pte_t pte)
161 {
162 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
163 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
164 }
165
166 static inline pte_t pte_mknoncont(pte_t pte)
167 {
168 return clear_pte_bit(pte, __pgprot(PTE_CONT));
169 }
170
171 static inline pte_t pte_clear_rdonly(pte_t pte)
172 {
173 return clear_pte_bit(pte, __pgprot(PTE_RDONLY));
174 }
175
176 static inline pte_t pte_mkpresent(pte_t pte)
177 {
178 return set_pte_bit(pte, __pgprot(PTE_VALID));
179 }
180
181 static inline pmd_t pmd_mkcont(pmd_t pmd)
182 {
183 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
184 }
185
186 static inline void set_pte(pte_t *ptep, pte_t pte)
187 {
188 *ptep = pte;
189
190 /*
191 * Only if the new pte is valid and kernel, otherwise TLB maintenance
192 * or update_mmu_cache() have the necessary barriers.
193 */
194 if (pte_valid_not_user(pte)) {
195 dsb(ishst);
196 isb();
197 }
198 }
199
200 struct mm_struct;
201 struct vm_area_struct;
202
203 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
204
205 /*
206 * PTE bits configuration in the presence of hardware Dirty Bit Management
207 * (PTE_WRITE == PTE_DBM):
208 *
209 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
210 * 0 0 | 1 0 0
211 * 0 1 | 1 1 0
212 * 1 0 | 1 0 1
213 * 1 1 | 0 1 x
214 *
215 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
216 * the page fault mechanism. Checking the dirty status of a pte becomes:
217 *
218 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
219 */
220 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
221 pte_t *ptep, pte_t pte)
222 {
223 if (pte_present(pte)) {
224 if (pte_sw_dirty(pte) && pte_write(pte))
225 pte_val(pte) &= ~PTE_RDONLY;
226 else
227 pte_val(pte) |= PTE_RDONLY;
228 if (pte_user_exec(pte) && !pte_special(pte))
229 __sync_icache_dcache(pte, addr);
230 }
231
232 /*
233 * If the existing pte is valid, check for potential race with
234 * hardware updates of the pte (ptep_set_access_flags safely changes
235 * valid ptes without going through an invalid entry).
236 */
237 if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
238 pte_valid(*ptep) && pte_valid(pte)) {
239 VM_WARN_ONCE(!pte_young(pte),
240 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
241 __func__, pte_val(*ptep), pte_val(pte));
242 VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
243 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
244 __func__, pte_val(*ptep), pte_val(pte));
245 }
246
247 set_pte(ptep, pte);
248 }
249
250 #define __HAVE_ARCH_PTE_SAME
251 static inline int pte_same(pte_t pte_a, pte_t pte_b)
252 {
253 pteval_t lhs, rhs;
254
255 lhs = pte_val(pte_a);
256 rhs = pte_val(pte_b);
257
258 if (pte_present(pte_a))
259 lhs &= ~PTE_RDONLY;
260
261 if (pte_present(pte_b))
262 rhs &= ~PTE_RDONLY;
263
264 return (lhs == rhs);
265 }
266
267 /*
268 * Huge pte definitions.
269 */
270 #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
271 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
272
273 /*
274 * Hugetlb definitions.
275 */
276 #define HUGE_MAX_HSTATE 4
277 #define HPAGE_SHIFT PMD_SHIFT
278 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
279 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
280 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
281
282 #define __HAVE_ARCH_PTE_SPECIAL
283
284 static inline pte_t pud_pte(pud_t pud)
285 {
286 return __pte(pud_val(pud));
287 }
288
289 static inline pmd_t pud_pmd(pud_t pud)
290 {
291 return __pmd(pud_val(pud));
292 }
293
294 static inline pte_t pmd_pte(pmd_t pmd)
295 {
296 return __pte(pmd_val(pmd));
297 }
298
299 static inline pmd_t pte_pmd(pte_t pte)
300 {
301 return __pmd(pte_val(pte));
302 }
303
304 static inline pgprot_t mk_sect_prot(pgprot_t prot)
305 {
306 return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
307 }
308
309 #ifdef CONFIG_NUMA_BALANCING
310 /*
311 * See the comment in include/asm-generic/pgtable.h
312 */
313 static inline int pte_protnone(pte_t pte)
314 {
315 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
316 }
317
318 static inline int pmd_protnone(pmd_t pmd)
319 {
320 return pte_protnone(pmd_pte(pmd));
321 }
322 #endif
323
324 /*
325 * THP definitions.
326 */
327
328 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
329 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
330 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
331
332 #define pmd_present(pmd) pte_present(pmd_pte(pmd))
333 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
334 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
335 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
336 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
337 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
338 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
339 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
340 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
341 #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
342
343 #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
344
345 #define __HAVE_ARCH_PMD_WRITE
346 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
347
348 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
349
350 #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
351 #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
352 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
353
354 #define pud_write(pud) pte_write(pud_pte(pud))
355 #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
356
357 #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
358
359 #define __pgprot_modify(prot,mask,bits) \
360 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
361
362 /*
363 * Mark the prot value as uncacheable and unbufferable.
364 */
365 #define pgprot_noncached(prot) \
366 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
367 #define pgprot_writecombine(prot) \
368 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
369 #define pgprot_device(prot) \
370 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
371 #define __HAVE_PHYS_MEM_ACCESS_PROT
372 struct file;
373 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
374 unsigned long size, pgprot_t vma_prot);
375
376 #define pmd_none(pmd) (!pmd_val(pmd))
377
378 #define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
379
380 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
381 PMD_TYPE_TABLE)
382 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
383 PMD_TYPE_SECT)
384
385 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
386 #define pud_sect(pud) (0)
387 #define pud_table(pud) (1)
388 #else
389 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
390 PUD_TYPE_SECT)
391 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
392 PUD_TYPE_TABLE)
393 #endif
394
395 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
396 {
397 *pmdp = pmd;
398 dsb(ishst);
399 isb();
400 }
401
402 static inline void pmd_clear(pmd_t *pmdp)
403 {
404 set_pmd(pmdp, __pmd(0));
405 }
406
407 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
408 {
409 return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
410 }
411
412 /* Find an entry in the third-level page table. */
413 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
414
415 #define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
416 #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
417
418 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
419 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
420 #define pte_unmap(pte) do { } while (0)
421 #define pte_unmap_nested(pte) do { } while (0)
422
423 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
424 #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
425 #define pte_clear_fixmap() clear_fixmap(FIX_PTE)
426
427 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
428
429 /* use ONLY for statically allocated translation tables */
430 #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
431
432 /*
433 * Conversion functions: convert a page and protection to a page entry,
434 * and a page entry and page directory to the page they refer to.
435 */
436 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
437
438 #if CONFIG_PGTABLE_LEVELS > 2
439
440 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
441
442 #define pud_none(pud) (!pud_val(pud))
443 #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
444 #define pud_present(pud) pte_present(pud_pte(pud))
445
446 static inline void set_pud(pud_t *pudp, pud_t pud)
447 {
448 *pudp = pud;
449 dsb(ishst);
450 isb();
451 }
452
453 static inline void pud_clear(pud_t *pudp)
454 {
455 set_pud(pudp, __pud(0));
456 }
457
458 static inline phys_addr_t pud_page_paddr(pud_t pud)
459 {
460 return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
461 }
462
463 /* Find an entry in the second-level page table. */
464 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
465
466 #define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
467 #define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
468
469 #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
470 #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
471 #define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
472
473 #define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
474
475 /* use ONLY for statically allocated translation tables */
476 #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
477
478 #else
479
480 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
481
482 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
483 #define pmd_set_fixmap(addr) NULL
484 #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
485 #define pmd_clear_fixmap()
486
487 #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
488
489 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
490
491 #if CONFIG_PGTABLE_LEVELS > 3
492
493 #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
494
495 #define pgd_none(pgd) (!pgd_val(pgd))
496 #define pgd_bad(pgd) (!(pgd_val(pgd) & 2))
497 #define pgd_present(pgd) (pgd_val(pgd))
498
499 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
500 {
501 *pgdp = pgd;
502 dsb(ishst);
503 }
504
505 static inline void pgd_clear(pgd_t *pgdp)
506 {
507 set_pgd(pgdp, __pgd(0));
508 }
509
510 static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
511 {
512 return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
513 }
514
515 /* Find an entry in the frst-level page table. */
516 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
517
518 #define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
519 #define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
520
521 #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
522 #define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
523 #define pud_clear_fixmap() clear_fixmap(FIX_PUD)
524
525 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
526
527 /* use ONLY for statically allocated translation tables */
528 #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
529
530 #else
531
532 #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
533
534 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
535 #define pud_set_fixmap(addr) NULL
536 #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
537 #define pud_clear_fixmap()
538
539 #define pud_offset_kimg(dir,addr) ((pud_t *)dir)
540
541 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
542
543 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
544
545 /* to find an entry in a page-table-directory */
546 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
547
548 #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
549
550 #define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
551
552 /* to find an entry in a kernel page-table-directory */
553 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
554
555 #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
556 #define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
557
558 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
559 {
560 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
561 PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
562 /* preserve the hardware dirty information */
563 if (pte_hw_dirty(pte))
564 pte = pte_mkdirty(pte);
565 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
566 return pte;
567 }
568
569 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
570 {
571 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
572 }
573
574 #ifdef CONFIG_ARM64_HW_AFDBM
575 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
576 extern int ptep_set_access_flags(struct vm_area_struct *vma,
577 unsigned long address, pte_t *ptep,
578 pte_t entry, int dirty);
579
580 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
581 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
582 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
583 unsigned long address, pmd_t *pmdp,
584 pmd_t entry, int dirty)
585 {
586 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
587 }
588 #endif
589
590 /*
591 * Atomic pte/pmd modifications.
592 */
593 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
594 static inline int __ptep_test_and_clear_young(pte_t *ptep)
595 {
596 pteval_t pteval;
597 unsigned int tmp, res;
598
599 asm volatile("// __ptep_test_and_clear_young\n"
600 " prfm pstl1strm, %2\n"
601 "1: ldxr %0, %2\n"
602 " ubfx %w3, %w0, %5, #1 // extract PTE_AF (young)\n"
603 " and %0, %0, %4 // clear PTE_AF\n"
604 " stxr %w1, %0, %2\n"
605 " cbnz %w1, 1b\n"
606 : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res)
607 : "L" (~PTE_AF), "I" (ilog2(PTE_AF)));
608
609 return res;
610 }
611
612 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
613 unsigned long address,
614 pte_t *ptep)
615 {
616 return __ptep_test_and_clear_young(ptep);
617 }
618
619 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
620 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
621 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
622 unsigned long address,
623 pmd_t *pmdp)
624 {
625 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
626 }
627 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
628
629 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
630 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
631 unsigned long address, pte_t *ptep)
632 {
633 pteval_t old_pteval;
634 unsigned int tmp;
635
636 asm volatile("// ptep_get_and_clear\n"
637 " prfm pstl1strm, %2\n"
638 "1: ldxr %0, %2\n"
639 " stxr %w1, xzr, %2\n"
640 " cbnz %w1, 1b\n"
641 : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)));
642
643 return __pte(old_pteval);
644 }
645
646 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
647 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
648 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
649 unsigned long address, pmd_t *pmdp)
650 {
651 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
652 }
653 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
654
655 /*
656 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
657 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
658 */
659 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
660 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
661 {
662 pteval_t pteval;
663 unsigned long tmp;
664
665 asm volatile("// ptep_set_wrprotect\n"
666 " prfm pstl1strm, %2\n"
667 "1: ldxr %0, %2\n"
668 " tst %0, %4 // check for hw dirty (!PTE_RDONLY)\n"
669 " csel %1, %3, xzr, eq // set PTE_DIRTY|PTE_RDONLY if dirty\n"
670 " orr %0, %0, %1 // if !dirty, PTE_RDONLY is already set\n"
671 " and %0, %0, %5 // clear PTE_WRITE/PTE_DBM\n"
672 " stxr %w1, %0, %2\n"
673 " cbnz %w1, 1b\n"
674 : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
675 : "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE)
676 : "cc");
677 }
678
679 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
680 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
681 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
682 unsigned long address, pmd_t *pmdp)
683 {
684 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
685 }
686 #endif
687 #endif /* CONFIG_ARM64_HW_AFDBM */
688
689 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
690 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
691
692 /*
693 * Encode and decode a swap entry:
694 * bits 0-1: present (must be zero)
695 * bits 2-7: swap type
696 * bits 8-57: swap offset
697 * bit 58: PTE_PROT_NONE (must be zero)
698 */
699 #define __SWP_TYPE_SHIFT 2
700 #define __SWP_TYPE_BITS 6
701 #define __SWP_OFFSET_BITS 50
702 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
703 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
704 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
705
706 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
707 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
708 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
709
710 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
711 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
712
713 /*
714 * Ensure that there are not more swap files than can be encoded in the kernel
715 * PTEs.
716 */
717 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
718
719 extern int kern_addr_valid(unsigned long addr);
720
721 #include <asm-generic/pgtable.h>
722
723 void pgd_cache_init(void);
724 #define pgtable_cache_init pgd_cache_init
725
726 /*
727 * On AArch64, the cache coherency is handled via the set_pte_at() function.
728 */
729 static inline void update_mmu_cache(struct vm_area_struct *vma,
730 unsigned long addr, pte_t *ptep)
731 {
732 /*
733 * We don't do anything here, so there's a very small chance of
734 * us retaking a user fault which we just fixed up. The alternative
735 * is doing a dsb(ishst), but that penalises the fastpath.
736 */
737 }
738
739 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
740
741 #define kc_vaddr_to_offset(v) ((v) & ~VA_START)
742 #define kc_offset_to_vaddr(o) ((o) | VA_START)
743
744 #endif /* !__ASSEMBLY__ */
745
746 #endif /* __ASM_PGTABLE_H */