]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/riscv/include/asm/pgtable.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
12 #include <asm/pgtable-bits.h>
15 #define KERNEL_LINK_ADDR PAGE_OFFSET
18 #define ADDRESS_SPACE_END (UL(-1))
21 /* Leave 2GB for kernel and BPF at the end of the address space */
22 #define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
24 #define KERNEL_LINK_ADDR PAGE_OFFSET
27 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
28 #define VMALLOC_END (PAGE_OFFSET - 1)
29 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
31 #define BPF_JIT_REGION_SIZE (SZ_128M)
33 #define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
34 #define BPF_JIT_REGION_END (MODULES_END)
36 #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
37 #define BPF_JIT_REGION_END (VMALLOC_END)
40 /* Modules always live before the kernel */
42 #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
43 #define MODULES_END (PFN_ALIGN((unsigned long)&_start))
47 * Roughly size the vmemmap space to be large enough to fit enough
48 * struct pages to map half the virtual address space. Then
49 * position vmemmap directly below the VMALLOC region.
51 #define VMEMMAP_SHIFT \
52 (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
53 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
54 #define VMEMMAP_END (VMALLOC_START - 1)
55 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
58 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
59 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
61 #define vmemmap ((struct page *)VMEMMAP_START)
63 #define PCI_IO_SIZE SZ_16M
64 #define PCI_IO_END VMEMMAP_START
65 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
67 #define FIXADDR_TOP PCI_IO_START
69 #define FIXADDR_SIZE PMD_SIZE
71 #define FIXADDR_SIZE PGDIR_SIZE
73 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
77 #ifdef CONFIG_XIP_KERNEL
78 #define XIP_OFFSET SZ_8M
85 /* Page Upper Directory not used in RISC-V */
86 #include <asm-generic/pgtable-nopud.h>
88 #include <asm/tlbflush.h>
89 #include <linux/mm_types.h>
92 #include <asm/pgtable-64.h>
94 #include <asm/pgtable-32.h>
95 #endif /* CONFIG_64BIT */
97 #ifdef CONFIG_XIP_KERNEL
98 #define XIP_FIXUP(addr) ({ \
99 uintptr_t __a = (uintptr_t)(addr); \
100 (__a >= CONFIG_XIP_PHYS_ADDR && __a < CONFIG_XIP_PHYS_ADDR + SZ_16M) ? \
101 __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
105 #define XIP_FIXUP(addr) (addr)
106 #endif /* CONFIG_XIP_KERNEL */
109 /* Number of entries in the page global directory */
110 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
111 /* Number of entries in the page table */
112 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
114 /* Number of PGD entries that a user-mode program can use */
115 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
117 /* Page protection bits */
118 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
120 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
121 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
122 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
123 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
124 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
125 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
126 _PAGE_EXEC | _PAGE_WRITE)
128 #define PAGE_COPY PAGE_READ
129 #define PAGE_COPY_EXEC PAGE_EXEC
130 #define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
131 #define PAGE_SHARED PAGE_WRITE
132 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
134 #define _PAGE_KERNEL (_PAGE_READ \
141 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
142 #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
143 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
144 #define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
147 #define PAGE_TABLE __pgprot(_PAGE_TABLE)
150 * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
151 * change the properties of memory regions.
153 #define _PAGE_IOREMAP _PAGE_KERNEL
155 extern pgd_t swapper_pg_dir
[];
157 /* MAP_PRIVATE permissions: xwr (copy-on-write) */
158 #define __P000 PAGE_NONE
159 #define __P001 PAGE_READ
160 #define __P010 PAGE_COPY
161 #define __P011 PAGE_COPY
162 #define __P100 PAGE_EXEC
163 #define __P101 PAGE_READ_EXEC
164 #define __P110 PAGE_COPY_EXEC
165 #define __P111 PAGE_COPY_READ_EXEC
167 /* MAP_SHARED permissions: xwr */
168 #define __S000 PAGE_NONE
169 #define __S001 PAGE_READ
170 #define __S010 PAGE_SHARED
171 #define __S011 PAGE_SHARED
172 #define __S100 PAGE_EXEC
173 #define __S101 PAGE_READ_EXEC
174 #define __S110 PAGE_SHARED_EXEC
175 #define __S111 PAGE_SHARED_EXEC
177 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
178 static inline int pmd_present(pmd_t pmd
)
181 * Checking for _PAGE_LEAF is needed too because:
182 * When splitting a THP, split_huge_page() will temporarily clear
183 * the present bit, in this situation, pmd_present() and
184 * pmd_trans_huge() still needs to return true.
186 return (pmd_val(pmd
) & (_PAGE_PRESENT
| _PAGE_PROT_NONE
| _PAGE_LEAF
));
189 static inline int pmd_present(pmd_t pmd
)
191 return (pmd_val(pmd
) & (_PAGE_PRESENT
| _PAGE_PROT_NONE
));
195 static inline int pmd_none(pmd_t pmd
)
197 return (pmd_val(pmd
) == 0);
200 static inline int pmd_bad(pmd_t pmd
)
202 return !pmd_present(pmd
) || (pmd_val(pmd
) & _PAGE_LEAF
);
205 #define pmd_leaf pmd_leaf
206 static inline int pmd_leaf(pmd_t pmd
)
208 return pmd_present(pmd
) && (pmd_val(pmd
) & _PAGE_LEAF
);
211 static inline void set_pmd(pmd_t
*pmdp
, pmd_t pmd
)
216 static inline void pmd_clear(pmd_t
*pmdp
)
218 set_pmd(pmdp
, __pmd(0));
221 static inline pgd_t
pfn_pgd(unsigned long pfn
, pgprot_t prot
)
223 return __pgd((pfn
<< _PAGE_PFN_SHIFT
) | pgprot_val(prot
));
226 static inline unsigned long _pgd_pfn(pgd_t pgd
)
228 return pgd_val(pgd
) >> _PAGE_PFN_SHIFT
;
231 static inline struct page
*pmd_page(pmd_t pmd
)
233 return pfn_to_page(pmd_val(pmd
) >> _PAGE_PFN_SHIFT
);
236 static inline unsigned long pmd_page_vaddr(pmd_t pmd
)
238 return (unsigned long)pfn_to_virt(pmd_val(pmd
) >> _PAGE_PFN_SHIFT
);
241 static inline pte_t
pmd_pte(pmd_t pmd
)
243 return __pte(pmd_val(pmd
));
246 static inline pte_t
pud_pte(pud_t pud
)
248 return __pte(pud_val(pud
));
251 /* Yields the page frame number (PFN) of a page table entry */
252 static inline unsigned long pte_pfn(pte_t pte
)
254 return (pte_val(pte
) >> _PAGE_PFN_SHIFT
);
257 #define pte_page(x) pfn_to_page(pte_pfn(x))
259 /* Constructs a page table entry */
260 static inline pte_t
pfn_pte(unsigned long pfn
, pgprot_t prot
)
262 return __pte((pfn
<< _PAGE_PFN_SHIFT
) | pgprot_val(prot
));
265 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
267 static inline int pte_present(pte_t pte
)
269 return (pte_val(pte
) & (_PAGE_PRESENT
| _PAGE_PROT_NONE
));
272 static inline int pte_none(pte_t pte
)
274 return (pte_val(pte
) == 0);
277 static inline int pte_write(pte_t pte
)
279 return pte_val(pte
) & _PAGE_WRITE
;
282 static inline int pte_exec(pte_t pte
)
284 return pte_val(pte
) & _PAGE_EXEC
;
287 static inline int pte_huge(pte_t pte
)
289 return pte_present(pte
) && (pte_val(pte
) & _PAGE_LEAF
);
292 static inline int pte_dirty(pte_t pte
)
294 return pte_val(pte
) & _PAGE_DIRTY
;
297 static inline int pte_young(pte_t pte
)
299 return pte_val(pte
) & _PAGE_ACCESSED
;
302 static inline int pte_special(pte_t pte
)
304 return pte_val(pte
) & _PAGE_SPECIAL
;
307 /* static inline pte_t pte_rdprotect(pte_t pte) */
309 static inline pte_t
pte_wrprotect(pte_t pte
)
311 return __pte(pte_val(pte
) & ~(_PAGE_WRITE
));
314 /* static inline pte_t pte_mkread(pte_t pte) */
316 static inline pte_t
pte_mkwrite(pte_t pte
)
318 return __pte(pte_val(pte
) | _PAGE_WRITE
);
321 /* static inline pte_t pte_mkexec(pte_t pte) */
323 static inline pte_t
pte_mkdirty(pte_t pte
)
325 return __pte(pte_val(pte
) | _PAGE_DIRTY
);
328 static inline pte_t
pte_mkclean(pte_t pte
)
330 return __pte(pte_val(pte
) & ~(_PAGE_DIRTY
));
333 static inline pte_t
pte_mkyoung(pte_t pte
)
335 return __pte(pte_val(pte
) | _PAGE_ACCESSED
);
338 static inline pte_t
pte_mkold(pte_t pte
)
340 return __pte(pte_val(pte
) & ~(_PAGE_ACCESSED
));
343 static inline pte_t
pte_mkspecial(pte_t pte
)
345 return __pte(pte_val(pte
) | _PAGE_SPECIAL
);
348 static inline pte_t
pte_mkhuge(pte_t pte
)
353 #ifdef CONFIG_NUMA_BALANCING
355 * See the comment in include/asm-generic/pgtable.h
357 static inline int pte_protnone(pte_t pte
)
359 return (pte_val(pte
) & (_PAGE_PRESENT
| _PAGE_PROT_NONE
)) == _PAGE_PROT_NONE
;
362 static inline int pmd_protnone(pmd_t pmd
)
364 return pte_protnone(pmd_pte(pmd
));
368 /* Modify page protection bits */
369 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
371 return __pte((pte_val(pte
) & _PAGE_CHG_MASK
) | pgprot_val(newprot
));
374 #define pgd_ERROR(e) \
375 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
378 /* Commit new configuration to MMU hardware */
379 static inline void update_mmu_cache(struct vm_area_struct
*vma
,
380 unsigned long address
, pte_t
*ptep
)
383 * The kernel assumes that TLBs don't cache invalid entries, but
384 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
385 * cache flush; it is necessary even after writing invalid entries.
386 * Relying on flush_tlb_fix_spurious_fault would suffice, but
387 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
389 local_flush_tlb_page(address
);
392 static inline void update_mmu_cache_pmd(struct vm_area_struct
*vma
,
393 unsigned long address
, pmd_t
*pmdp
)
395 pte_t
*ptep
= (pte_t
*)pmdp
;
397 update_mmu_cache(vma
, address
, ptep
);
400 #define __HAVE_ARCH_PTE_SAME
401 static inline int pte_same(pte_t pte_a
, pte_t pte_b
)
403 return pte_val(pte_a
) == pte_val(pte_b
);
407 * Certain architectures need to do special things when PTEs within
408 * a page table are directly modified. Thus, the following hook is
411 static inline void set_pte(pte_t
*ptep
, pte_t pteval
)
416 void flush_icache_pte(pte_t pte
);
418 static inline void set_pte_at(struct mm_struct
*mm
,
419 unsigned long addr
, pte_t
*ptep
, pte_t pteval
)
421 if (pte_present(pteval
) && pte_exec(pteval
))
422 flush_icache_pte(pteval
);
424 set_pte(ptep
, pteval
);
427 static inline void pte_clear(struct mm_struct
*mm
,
428 unsigned long addr
, pte_t
*ptep
)
430 set_pte_at(mm
, addr
, ptep
, __pte(0));
433 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
434 static inline int ptep_set_access_flags(struct vm_area_struct
*vma
,
435 unsigned long address
, pte_t
*ptep
,
436 pte_t entry
, int dirty
)
438 if (!pte_same(*ptep
, entry
))
439 set_pte_at(vma
->vm_mm
, address
, ptep
, entry
);
441 * update_mmu_cache will unconditionally execute, handling both
442 * the case that the PTE changed and the spurious fault case.
447 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
448 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
,
449 unsigned long address
, pte_t
*ptep
)
451 return __pte(atomic_long_xchg((atomic_long_t
*)ptep
, 0));
454 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
455 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
456 unsigned long address
,
459 if (!pte_young(*ptep
))
461 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET
, &pte_val(*ptep
));
464 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
465 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
466 unsigned long address
, pte_t
*ptep
)
468 atomic_long_and(~(unsigned long)_PAGE_WRITE
, (atomic_long_t
*)ptep
);
471 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
472 static inline int ptep_clear_flush_young(struct vm_area_struct
*vma
,
473 unsigned long address
, pte_t
*ptep
)
476 * This comment is borrowed from x86, but applies equally to RISC-V:
478 * Clearing the accessed bit without a TLB flush
479 * doesn't cause data corruption. [ It could cause incorrect
480 * page aging and the (mistaken) reclaim of hot pages, but the
481 * chance of that should be relatively low. ]
483 * So as a performance optimization don't flush the TLB when
484 * clearing the accessed bit, it will eventually be flushed by
485 * a context switch or a VM operation anyway. [ In the rare
486 * event of it not getting flushed for a long time the delay
487 * shouldn't really matter because there's no real memory
488 * pressure for swapout to react to. ]
490 return ptep_test_and_clear_young(vma
, address
, ptep
);
496 static inline pmd_t
pte_pmd(pte_t pte
)
498 return __pmd(pte_val(pte
));
501 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
506 static inline pmd_t
pmd_mkinvalid(pmd_t pmd
)
508 return __pmd(pmd_val(pmd
) & ~(_PAGE_PRESENT
|_PAGE_PROT_NONE
));
511 #define __pmd_to_phys(pmd) (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
513 static inline unsigned long pmd_pfn(pmd_t pmd
)
515 return ((__pmd_to_phys(pmd
) & PMD_MASK
) >> PAGE_SHIFT
);
518 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
520 return pte_pmd(pte_modify(pmd_pte(pmd
), newprot
));
523 #define pmd_write pmd_write
524 static inline int pmd_write(pmd_t pmd
)
526 return pte_write(pmd_pte(pmd
));
529 static inline int pmd_dirty(pmd_t pmd
)
531 return pte_dirty(pmd_pte(pmd
));
534 static inline int pmd_young(pmd_t pmd
)
536 return pte_young(pmd_pte(pmd
));
539 static inline pmd_t
pmd_mkold(pmd_t pmd
)
541 return pte_pmd(pte_mkold(pmd_pte(pmd
)));
544 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
546 return pte_pmd(pte_mkyoung(pmd_pte(pmd
)));
549 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
551 return pte_pmd(pte_mkwrite(pmd_pte(pmd
)));
554 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
556 return pte_pmd(pte_wrprotect(pmd_pte(pmd
)));
559 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
561 return pte_pmd(pte_mkclean(pmd_pte(pmd
)));
564 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
566 return pte_pmd(pte_mkdirty(pmd_pte(pmd
)));
569 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
570 pmd_t
*pmdp
, pmd_t pmd
)
572 return set_pte_at(mm
, addr
, (pte_t
*)pmdp
, pmd_pte(pmd
));
575 static inline void set_pud_at(struct mm_struct
*mm
, unsigned long addr
,
576 pud_t
*pudp
, pud_t pud
)
578 return set_pte_at(mm
, addr
, (pte_t
*)pudp
, pud_pte(pud
));
581 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
582 static inline int pmd_trans_huge(pmd_t pmd
)
584 return pmd_leaf(pmd
);
587 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
588 static inline int pmdp_set_access_flags(struct vm_area_struct
*vma
,
589 unsigned long address
, pmd_t
*pmdp
,
590 pmd_t entry
, int dirty
)
592 return ptep_set_access_flags(vma
, address
, (pte_t
*)pmdp
, pmd_pte(entry
), dirty
);
595 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
596 static inline int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
597 unsigned long address
, pmd_t
*pmdp
)
599 return ptep_test_and_clear_young(vma
, address
, (pte_t
*)pmdp
);
602 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
603 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
604 unsigned long address
, pmd_t
*pmdp
)
606 return pte_pmd(ptep_get_and_clear(mm
, address
, (pte_t
*)pmdp
));
609 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
610 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
611 unsigned long address
, pmd_t
*pmdp
)
613 ptep_set_wrprotect(mm
, address
, (pte_t
*)pmdp
);
616 #define pmdp_establish pmdp_establish
617 static inline pmd_t
pmdp_establish(struct vm_area_struct
*vma
,
618 unsigned long address
, pmd_t
*pmdp
, pmd_t pmd
)
620 return __pmd(atomic_long_xchg((atomic_long_t
*)pmdp
, pmd_val(pmd
)));
622 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
625 * Encode and decode a swap entry
627 * Format of swap PTE:
628 * bit 0: _PAGE_PRESENT (zero)
629 * bit 1: _PAGE_PROT_NONE (zero)
630 * bits 2 to 6: swap type
631 * bits 7 to XLEN-1: swap offset
633 #define __SWP_TYPE_SHIFT 2
634 #define __SWP_TYPE_BITS 5
635 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
636 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
638 #define MAX_SWAPFILES_CHECK() \
639 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
641 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
642 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
643 #define __swp_entry(type, offset) ((swp_entry_t) \
644 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
646 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
647 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
650 * In the RV64 Linux scheme, we give the user half of the virtual-address space
651 * and give the kernel the other (upper) half.
654 #define KERN_VIRT_START (-(BIT(CONFIG_VA_BITS)) + TASK_SIZE)
656 #define KERN_VIRT_START FIXADDR_START
660 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
661 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
664 #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
666 #define TASK_SIZE FIXADDR_START
669 #else /* CONFIG_MMU */
671 #define PAGE_SHARED __pgprot(0)
672 #define PAGE_KERNEL __pgprot(0)
673 #define swapper_pg_dir NULL
674 #define TASK_SIZE 0xffffffffUL
675 #define VMALLOC_START 0
676 #define VMALLOC_END TASK_SIZE
678 #endif /* !CONFIG_MMU */
680 #define kern_addr_valid(addr) (1) /* FIXME */
682 extern char _start
[];
683 extern void *_dtb_early_va
;
684 extern uintptr_t _dtb_early_pa
;
685 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
686 #define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
687 #define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
689 #define dtb_early_va _dtb_early_va
690 #define dtb_early_pa _dtb_early_pa
691 #endif /* CONFIG_XIP_KERNEL */
693 void paging_init(void);
694 void misc_mem_init(void);
697 * ZERO_PAGE is a global shared page that is always zero,
698 * used for zero-mapped memory areas, etc.
700 extern unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)];
701 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
703 #endif /* !__ASSEMBLY__ */
705 #endif /* _ASM_RISCV_PGTABLE_H */