]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
17ed9e31 AK |
2 | #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H |
3 | #define _ASM_POWERPC_NOHASH_32_PGTABLE_H | |
f88df14b | 4 | |
d1953c88 | 5 | #include <asm-generic/pgtable-nopmd.h> |
f88df14b DG |
6 | |
7 | #ifndef __ASSEMBLY__ | |
8 | #include <linux/sched.h> | |
9 | #include <linux/threads.h> | |
6b622669 | 10 | #include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */ |
f88df14b | 11 | |
b98ac05d BH |
12 | #ifdef CONFIG_44x |
13 | extern int icache_44x_need_flush; | |
14 | #endif | |
15 | ||
f88df14b DG |
16 | #endif /* __ASSEMBLY__ */ |
17 | ||
9b081e10 CL |
18 | #define PTE_INDEX_SIZE PTE_SHIFT |
19 | #define PMD_INDEX_SIZE 0 | |
20 | #define PUD_INDEX_SIZE 0 | |
21 | #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) | |
22 | ||
23 | #define PMD_CACHE_INDEX PMD_INDEX_SIZE | |
fae22116 | 24 | #define PUD_CACHE_INDEX PUD_INDEX_SIZE |
9b081e10 CL |
25 | |
26 | #ifndef __ASSEMBLY__ | |
27 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | |
28 | #define PMD_TABLE_SIZE 0 | |
29 | #define PUD_TABLE_SIZE 0 | |
30 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | |
974b9b2c MR |
31 | |
32 | #define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1) | |
9b081e10 CL |
33 | #endif /* __ASSEMBLY__ */ |
34 | ||
35 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | |
36 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | |
37 | ||
f88df14b DG |
38 | /* |
39 | * The normal case is that PTEs are 32-bits and we have a 1-page | |
40 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus | |
41 | * | |
42 | * For any >32-bit physical address platform, we can use the following | |
43 | * two level page table layout where the pgdir is 8KB and the MS 13 bits | |
44 | * are an index to the second level table. The combined pgdir/pmd first | |
45 | * level has 2048 entries and the second level has 512 64-bit PTE entries. | |
46 | * -Matt | |
47 | */ | |
f88df14b | 48 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ |
9b081e10 | 49 | #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) |
f88df14b DG |
50 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
51 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
52 | ||
9b081e10 CL |
53 | /* Bits to mask out from a PGD to get to the PUD page */ |
54 | #define PGD_MASKED_BITS 0 | |
f88df14b DG |
55 | |
56 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | |
d016bf7e | 57 | #define FIRST_USER_ADDRESS 0UL |
f88df14b | 58 | |
f88df14b | 59 | #define pte_ERROR(e) \ |
a7696b36 | 60 | pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ |
0aeafb0c | 61 | (unsigned long long)pte_val(e)) |
f88df14b | 62 | #define pgd_ERROR(e) \ |
a7696b36 | 63 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
f88df14b | 64 | |
a67beca0 CL |
65 | #ifndef __ASSEMBLY__ |
66 | ||
67 | int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); | |
68 | ||
69 | #endif /* !__ASSEMBLY__ */ | |
70 | ||
71 | ||
f637a49e BH |
72 | /* |
73 | * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary | |
74 | * value (for now) on others, from where we can start layout kernel | |
75 | * virtual space that goes below PKMAP and FIXMAP | |
76 | */ | |
a67beca0 CL |
77 | #include <asm/fixmap.h> |
78 | ||
f637a49e BH |
79 | /* |
80 | * ioremap_bot starts at that address. Early ioremaps move down from there, | |
81 | * until mem_init() at which point this becomes the top of the vmalloc | |
82 | * and ioremap space | |
83 | */ | |
f2902a2f CH |
84 | #ifdef CONFIG_HIGHMEM |
85 | #define IOREMAP_TOP PKMAP_BASE | |
8b31e49d | 86 | #else |
f2902a2f | 87 | #define IOREMAP_TOP FIXADDR_START |
8b31e49d | 88 | #endif |
f637a49e | 89 | |
4a45b746 CL |
90 | /* PPC32 shares vmalloc area with ioremap */ |
91 | #define IOREMAP_START VMALLOC_START | |
92 | #define IOREMAP_END VMALLOC_END | |
93 | ||
f88df14b DG |
94 | /* |
95 | * Just any arbitrary offset to the start of the vmalloc VM area: the | |
f637a49e | 96 | * current 16MB value just means that there will be a 64MB "hole" after the |
f88df14b DG |
97 | * physical memory until the kernel virtual memory starts. That means that |
98 | * any out-of-bounds memory accesses will hopefully be caught. | |
99 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
100 | * area for the same reason. ;) | |
101 | * | |
102 | * We no longer map larger than phys RAM with the BATs so we don't have | |
103 | * to worry about the VMALLOC_OFFSET causing problems. We do have to worry | |
104 | * about clashes between our early calls to ioremap() that start growing down | |
e974cd4b | 105 | * from IOREMAP_TOP being run into the VM area allocations (growing upwards |
f88df14b DG |
106 | * from VMALLOC_START). For this reason we have ioremap_bot to check when |
107 | * we actually run into our mappings setup in the early boot with the VM | |
108 | * system. This really does become a problem for machines with good amounts | |
109 | * of RAM. -- Cort | |
110 | */ | |
111 | #define VMALLOC_OFFSET (0x1000000) /* 16M */ | |
112 | #ifdef PPC_PIN_SIZE | |
d3f3d3bf | 113 | #define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) |
f88df14b DG |
114 | #else |
115 | #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) | |
116 | #endif | |
3d4247fc CL |
117 | |
118 | #ifdef CONFIG_KASAN_VMALLOC | |
e96d904e | 119 | #define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) |
3d4247fc | 120 | #else |
f88df14b | 121 | #define VMALLOC_END ioremap_bot |
3d4247fc | 122 | #endif |
f88df14b DG |
123 | |
124 | /* | |
125 | * Bits in a linux-style PTE. These match the bits in the | |
126 | * (hardware-defined) PowerPC PTE as closely as possible. | |
127 | */ | |
128 | ||
129 | #if defined(CONFIG_40x) | |
17ed9e31 | 130 | #include <asm/nohash/32/pte-40x.h> |
f88df14b | 131 | #elif defined(CONFIG_44x) |
17ed9e31 | 132 | #include <asm/nohash/32/pte-44x.h> |
76acc2c1 | 133 | #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) |
17ed9e31 | 134 | #include <asm/nohash/pte-book3e.h> |
f88df14b | 135 | #elif defined(CONFIG_FSL_BOOKE) |
17ed9e31 | 136 | #include <asm/nohash/32/pte-fsl-booke.h> |
968159c0 | 137 | #elif defined(CONFIG_PPC_8xx) |
17ed9e31 | 138 | #include <asm/nohash/32/pte-8xx.h> |
4ee7084e | 139 | #endif |
f88df14b | 140 | |
56623153 CL |
141 | /* |
142 | * Location of the PFN in the PTE. Most 32-bit platforms use the same | |
143 | * as _PAGE_SHIFT here (ie, naturally aligned). | |
144 | * Platform who don't just pre-define the value so we don't override it here. | |
145 | */ | |
146 | #ifndef PTE_RPN_SHIFT | |
147 | #define PTE_RPN_SHIFT (PAGE_SHIFT) | |
148 | #endif | |
149 | ||
150 | /* | |
151 | * The mask covered by the RPN must be a ULL on 32-bit platforms with | |
152 | * 64-bit PTEs. | |
153 | */ | |
154 | #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) | |
155 | #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) | |
156 | #else | |
157 | #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) | |
158 | #endif | |
159 | ||
160 | /* | |
161 | * _PAGE_CHG_MASK masks of bits that are to be preserved across | |
162 | * pgprot changes. | |
163 | */ | |
164 | #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL) | |
f88df14b DG |
165 | |
166 | #ifndef __ASSEMBLY__ | |
f88df14b | 167 | |
9bf2b5cd | 168 | #define pte_clear(mm, addr, ptep) \ |
06f52524 | 169 | do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0) |
f88df14b | 170 | |
a0da4bc1 | 171 | #ifndef pte_mkwrite |
aa9cd505 CL |
172 | static inline pte_t pte_mkwrite(pte_t pte) |
173 | { | |
a0da4bc1 | 174 | return __pte(pte_val(pte) | _PAGE_RW); |
aa9cd505 | 175 | } |
a0da4bc1 | 176 | #endif |
aa9cd505 CL |
177 | |
178 | static inline pte_t pte_mkdirty(pte_t pte) | |
179 | { | |
180 | return __pte(pte_val(pte) | _PAGE_DIRTY); | |
181 | } | |
182 | ||
183 | static inline pte_t pte_mkyoung(pte_t pte) | |
184 | { | |
185 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | |
186 | } | |
187 | ||
a0da4bc1 | 188 | #ifndef pte_wrprotect |
aa9cd505 CL |
189 | static inline pte_t pte_wrprotect(pte_t pte) |
190 | { | |
a0da4bc1 | 191 | return __pte(pte_val(pte) & ~_PAGE_RW); |
aa9cd505 | 192 | } |
a0da4bc1 | 193 | #endif |
aa9cd505 | 194 | |
daba7902 CL |
195 | static inline pte_t pte_mkexec(pte_t pte) |
196 | { | |
197 | return __pte(pte_val(pte) | _PAGE_EXEC); | |
198 | } | |
199 | ||
f88df14b DG |
200 | #define pmd_none(pmd) (!pmd_val(pmd)) |
201 | #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) | |
202 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) | |
f281b5d5 AK |
203 | static inline void pmd_clear(pmd_t *pmdp) |
204 | { | |
205 | *pmdp = __pmd(0); | |
206 | } | |
207 | ||
f88df14b | 208 | /* |
c605782b BH |
209 | * PTE updates. This function is called whenever an existing |
210 | * valid PTE is updated. This does -not- include set_pte_at() | |
211 | * which nowadays only sets a new PTE. | |
212 | * | |
213 | * Depending on the type of MMU, we may need to use atomic updates | |
214 | * and the PTE may be either 32 or 64 bit wide. In the later case, | |
215 | * when using atomic updates, only the low part of the PTE is | |
216 | * accessed atomically. | |
f88df14b | 217 | * |
c605782b BH |
218 | * In addition, on 44x, we also maintain a global flag indicating |
219 | * that an executable user mapping was modified, which is needed | |
220 | * to properly flush the virtually tagged instruction cache of | |
221 | * those implementations. | |
6ad41bfb CL |
222 | * |
223 | * On the 8xx, the page tables are a bit special. For 16k pages, we have | |
b250c8c0 CL |
224 | * 4 identical entries. For 512k pages, we have 128 entries as if it was |
225 | * 4k pages, but they are flagged as 512k pages for the hardware. | |
226 | * For other page sizes, we have a single entry in the table. | |
f88df14b | 227 | */ |
6ad41bfb | 228 | #ifdef CONFIG_PPC_8xx |
687993cc MR |
229 | static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr); |
230 | ||
6ad41bfb CL |
231 | static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, |
232 | unsigned long clr, unsigned long set, int huge) | |
f88df14b | 233 | { |
6ad41bfb CL |
234 | pte_basic_t *entry = &p->pte; |
235 | pte_basic_t old = pte_val(*p); | |
236 | pte_basic_t new = (old & ~(pte_basic_t)clr) | set; | |
237 | int num, i; | |
687993cc | 238 | pmd_t *pmd = pmd_off(mm, addr); |
6ad41bfb CL |
239 | |
240 | if (!huge) | |
241 | num = PAGE_SIZE / SZ_4K; | |
b250c8c0 CL |
242 | else if ((pmd_val(*pmd) & _PMD_PAGE_MASK) != _PMD_PAGE_8M) |
243 | num = SZ_512K / SZ_4K; | |
6ad41bfb CL |
244 | else |
245 | num = 1; | |
246 | ||
b250c8c0 | 247 | for (i = 0; i < num; i++, entry++, new += SZ_4K) |
6ad41bfb | 248 | *entry = new; |
1bc54c03 | 249 | |
f88df14b DG |
250 | return old; |
251 | } | |
6ad41bfb | 252 | #else |
06f52524 CL |
253 | static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, |
254 | unsigned long clr, unsigned long set, int huge) | |
f88df14b | 255 | { |
2db99aeb CL |
256 | pte_basic_t old = pte_val(*p); |
257 | pte_basic_t new = (old & ~(pte_basic_t)clr) | set; | |
55c8fc3f | 258 | |
55c8fc3f | 259 | *p = __pte(new); |
1bc54c03 | 260 | |
b98ac05d | 261 | #ifdef CONFIG_44x |
ea3cc330 | 262 | if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) |
b98ac05d BH |
263 | icache_44x_need_flush = 1; |
264 | #endif | |
f88df14b DG |
265 | return old; |
266 | } | |
6ad41bfb | 267 | #endif |
f88df14b | 268 | |
f88df14b | 269 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
c7fa7701 CL |
270 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, |
271 | unsigned long addr, pte_t *ptep) | |
f88df14b DG |
272 | { |
273 | unsigned long old; | |
06f52524 | 274 | old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); |
f88df14b DG |
275 | return (old & _PAGE_ACCESSED) != 0; |
276 | } | |
277 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | |
c7fa7701 | 278 | __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep) |
f88df14b | 279 | |
f88df14b DG |
280 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
281 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |
282 | pte_t *ptep) | |
283 | { | |
06f52524 | 284 | return __pte(pte_update(mm, addr, ptep, ~0, 0, 0)); |
f88df14b DG |
285 | } |
286 | ||
c0e1c8c2 CL |
287 | #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) |
288 | #define __HAVE_ARCH_PTEP_GET | |
289 | static inline pte_t ptep_get(pte_t *ptep) | |
290 | { | |
291 | pte_t pte = {READ_ONCE(ptep->pte), 0, 0, 0}; | |
292 | ||
293 | return pte; | |
294 | } | |
295 | #endif | |
296 | ||
f88df14b DG |
297 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
298 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |
299 | pte_t *ptep) | |
300 | { | |
26973fa5 CL |
301 | unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0))); |
302 | unsigned long set = pte_val(pte_wrprotect(__pte(0))); | |
303 | ||
06f52524 | 304 | pte_update(mm, addr, ptep, clr, set, 0); |
f88df14b DG |
305 | } |
306 | ||
e4c1112c | 307 | static inline void __ptep_set_access_flags(struct vm_area_struct *vma, |
b3603e17 | 308 | pte_t *ptep, pte_t entry, |
e4c1112c AK |
309 | unsigned long address, |
310 | int psize) | |
f88df14b | 311 | { |
26973fa5 CL |
312 | pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0))))); |
313 | pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0))))); | |
314 | unsigned long set = pte_val(entry) & pte_val(pte_set); | |
315 | unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr); | |
b12c07a4 | 316 | int huge = psize > mmu_virtual_psize ? 1 : 0; |
a7b9f671 | 317 | |
b12c07a4 | 318 | pte_update(vma->vm_mm, address, ptep, clr, set, huge); |
bd5050e3 AK |
319 | |
320 | flush_tlb_page(vma, address); | |
f88df14b DG |
321 | } |
322 | ||
45201c87 CL |
323 | static inline int pte_young(pte_t pte) |
324 | { | |
325 | return pte_val(pte) & _PAGE_ACCESSED; | |
326 | } | |
327 | ||
f88df14b | 328 | #define __HAVE_ARCH_PTE_SAME |
45201c87 | 329 | #define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0) |
f88df14b DG |
330 | |
331 | /* | |
332 | * Note that on Book E processors, the pmd contains the kernel virtual | |
333 | * (lowmem) address of the pte page. The physical address is less useful | |
334 | * because everything runs with translation enabled (even the TLB miss | |
335 | * handler). On everything else the pmd contains the physical address | |
336 | * of the pte page. -- paulus | |
337 | */ | |
338 | #ifndef CONFIG_BOOKE | |
f88df14b | 339 | #define pmd_page(pmd) \ |
43b5fefc | 340 | pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) |
f88df14b DG |
341 | #else |
342 | #define pmd_page_vaddr(pmd) \ | |
32ea4c14 | 343 | ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) |
f88df14b | 344 | #define pmd_page(pmd) \ |
af892e0f | 345 | pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) |
f88df14b DG |
346 | #endif |
347 | ||
f88df14b DG |
348 | /* |
349 | * Encode and decode a swap entry. | |
350 | * Note that the bits we use in a PTE for representing a swap entry | |
45201c87 | 351 | * must not include the _PAGE_PRESENT bit. |
780fc564 | 352 | * -- paulus |
f88df14b DG |
353 | */ |
354 | #define __swp_type(entry) ((entry).val & 0x1f) | |
355 | #define __swp_offset(entry) ((entry).val >> 5) | |
356 | #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) | |
357 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) | |
358 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) | |
359 | ||
f88df14b DG |
360 | #endif /* !__ASSEMBLY__ */ |
361 | ||
17ed9e31 | 362 | #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ |