]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/powerpc/include/asm/pgtable.h
mm: introduce include/linux/pgtable.h
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / include / asm / pgtable.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
047ea784
PM
2#ifndef _ASM_POWERPC_PGTABLE_H
3#define _ASM_POWERPC_PGTABLE_H
4
9c709f3b 5#ifndef __ASSEMBLY__
c34a51ce 6#include <linux/mmdebug.h>
1c98025c 7#include <linux/mmzone.h>
9c709f3b
DG
8#include <asm/processor.h> /* For TASK_SIZE */
9#include <asm/mmu.h>
10#include <asm/page.h>
bd5050e3 11#include <asm/tlbflush.h>
8d30c14c 12
9c709f3b 13struct mm_struct;
8d30c14c 14
9c709f3b
DG
15#endif /* !__ASSEMBLY__ */
16
3dfcb315
AK
17#ifdef CONFIG_PPC_BOOK3S
18#include <asm/book3s/pgtable.h>
19#else
17ed9e31 20#include <asm/nohash/pgtable.h>
3dfcb315 21#endif /* !CONFIG_PPC_BOOK3S */
1da177e4 22
f4805785
CL
23/* Note due to the way vm flags are laid out, the bits are XWR */
24#define __P000 PAGE_NONE
25#define __P001 PAGE_READONLY
26#define __P010 PAGE_COPY
27#define __P011 PAGE_COPY
28#define __P100 PAGE_READONLY_X
29#define __P101 PAGE_READONLY_X
30#define __P110 PAGE_COPY_X
31#define __P111 PAGE_COPY_X
32
33#define __S000 PAGE_NONE
34#define __S001 PAGE_READONLY
35#define __S010 PAGE_SHARED
36#define __S011 PAGE_SHARED
37#define __S100 PAGE_READONLY_X
38#define __S101 PAGE_READONLY_X
39#define __S110 PAGE_SHARED_X
40#define __S111 PAGE_SHARED_X
41
1da177e4 42#ifndef __ASSEMBLY__
64b3d0e8 43
0b1c524c
CL
44#ifdef CONFIG_PPC32
45static inline pmd_t *pmd_ptr(struct mm_struct *mm, unsigned long va)
46{
2fb47060 47 return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
0b1c524c
CL
48}
49
50static inline pmd_t *pmd_ptr_k(unsigned long va)
51{
2fb47060 52 return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
0b1c524c 53}
2efc7c08
CL
54
55static inline pte_t *virt_to_kpte(unsigned long vaddr)
56{
cc6f0e39
CL
57 pmd_t *pmd = pmd_ptr_k(vaddr);
58
59 return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
2efc7c08 60}
0b1c524c
CL
61#endif
62
78f1dbde
AK
63#include <asm/tlbflush.h>
64
71087002
BH
65/* Keep these as a macros to avoid include dependency mess */
66#define pte_page(x) pfn_to_page(pte_pfn(x))
67#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
b9fb4480
AK
68/*
69 * Select all bits except the pfn
70 */
71static inline pgprot_t pte_pgprot(pte_t pte)
72{
73 unsigned long pte_flags;
74
75 pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
76 return __pgprot(pte_flags);
77}
71087002 78
9c709f3b
DG
79/*
80 * ZERO_PAGE is a global shared page that is always zero: used
81 * for zero-mapped memory areas etc..
82 */
83extern unsigned long empty_zero_page[];
84#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
85
86extern pgd_t swapper_pg_dir[];
87
88extern void paging_init(void);
89
7cd9b317
CL
90extern unsigned long ioremap_bot;
91
9c709f3b
DG
92/*
93 * kern_addr_valid is intended to indicate whether an address is a valid
94 * kernel address. Most 32-bit archs define it as always true (like this)
95 * but most 64-bit archs actually perform a test. What should we do here?
96 */
97#define kern_addr_valid(addr) (1)
98
074c2eae
AK
99#ifndef CONFIG_TRANSPARENT_HUGEPAGE
100#define pmd_large(pmd) 0
074c2eae 101#endif
e9ab1a1c 102
94171b19 103/* can we use this in kvm */
e9ab1a1c
AK
104unsigned long vmalloc_to_phys(void *vmalloc_addr);
105
1e03c7e2 106void pgtable_cache_add(unsigned int shift);
029d9252 107
34536d78
CL
108pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
109
3184cc4b 110#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
029d9252
ME
111void mark_initmem_nx(void);
112#else
113static inline void mark_initmem_nx(void) { }
114#endif
115
a74791dd
CL
116/*
117 * When used, PTE_FRAG_NR is defined in subarch pgtable.h
118 * so we are sure it is included when arriving here.
119 */
120#ifdef PTE_FRAG_NR
121static inline void *pte_frag_get(mm_context_t *ctx)
122{
123 return ctx->pte_frag;
124}
125
126static inline void pte_frag_set(mm_context_t *ctx, void *p)
127{
128 ctx->pte_frag = p;
129}
130#else
32ea4c14
CL
131#define PTE_FRAG_NR 1
132#define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
133#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
134
a74791dd
CL
135static inline void *pte_frag_get(mm_context_t *ctx)
136{
137 return NULL;
138}
139
140static inline void pte_frag_set(mm_context_t *ctx, void *p)
141{
142}
143#endif
144
d6eacedd
AK
145#ifndef pmd_is_leaf
146#define pmd_is_leaf pmd_is_leaf
147static inline bool pmd_is_leaf(pmd_t pmd)
148{
149 return false;
150}
151#endif
152
153#ifndef pud_is_leaf
154#define pud_is_leaf pud_is_leaf
155static inline bool pud_is_leaf(pud_t pud)
156{
157 return false;
158}
159#endif
160
2fb47060
MR
161#ifndef p4d_is_leaf
162#define p4d_is_leaf p4d_is_leaf
163static inline bool p4d_is_leaf(p4d_t p4d)
d6eacedd
AK
164{
165 return false;
166}
167#endif
168
9bd3bb67
AK
169#ifdef CONFIG_PPC64
170#define is_ioremap_addr is_ioremap_addr
171static inline bool is_ioremap_addr(const void *x)
172{
9bd3bb67
AK
173 unsigned long addr = (unsigned long)x;
174
175 return addr >= IOREMAP_BASE && addr < IOREMAP_END;
9bd3bb67
AK
176}
177#endif /* CONFIG_PPC64 */
178
1da177e4
LT
179#endif /* __ASSEMBLY__ */
180
047ea784 181#endif /* _ASM_POWERPC_PGTABLE_H */