]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
1da177e4 | 2 | /* |
4baa9922 | 3 | * arch/arm/include/asm/pgtable.h |
1da177e4 LT |
4 | * |
5 | * Copyright (C) 1995-2002 Russell King | |
1da177e4 LT |
6 | */ |
7 | #ifndef _ASMARM_PGTABLE_H | |
8 | #define _ASMARM_PGTABLE_H | |
9 | ||
f6e3354d | 10 | #include <linux/const.h> |
002547b4 RK |
11 | #include <asm/proc-fns.h> |
12 | ||
13 | #ifndef CONFIG_MMU | |
14 | ||
aa662823 | 15 | #include <asm-generic/pgtable-nopud.h> |
a1ce3928 | 16 | #include <asm/pgtable-nommu.h> |
002547b4 RK |
17 | |
18 | #else | |
1da177e4 | 19 | |
a32618d2 | 20 | #include <asm-generic/pgtable-nopud.h> |
1da177e4 | 21 | #include <asm/memory.h> |
ad1ae2fe | 22 | #include <asm/pgtable-hwdef.h> |
1da177e4 | 23 | |
8d962507 CM |
24 | |
25 | #include <asm/tlbflush.h> | |
26 | ||
dcfdae04 CM |
27 | #ifdef CONFIG_ARM_LPAE |
28 | #include <asm/pgtable-3level.h> | |
29 | #else | |
17f57211 | 30 | #include <asm/pgtable-2level.h> |
dcfdae04 | 31 | #endif |
17f57211 | 32 | |
5c3073e6 RK |
33 | /* |
34 | * Just any arbitrary offset to the start of the vmalloc VM area: the | |
35 | * current 8MB value just means that there will be a 8MB "hole" after the | |
36 | * physical memory until the kernel virtual memory starts. That means that | |
37 | * any out-of-bounds memory accesses will hopefully be caught. | |
38 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
39 | * area for the same reason. ;) | |
5c3073e6 | 40 | */ |
5c3073e6 RK |
41 | #define VMALLOC_OFFSET (8*1024*1024) |
42 | #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) | |
6ff09660 | 43 | #define VMALLOC_END 0xff800000UL |
5c3073e6 | 44 | |
1da177e4 LT |
45 | #define LIBRARY_TEXT_START 0x0c000000 |
46 | ||
47 | #ifndef __ASSEMBLY__ | |
69529c0e RK |
48 | extern void __pte_error(const char *file, int line, pte_t); |
49 | extern void __pmd_error(const char *file, int line, pmd_t); | |
50 | extern void __pgd_error(const char *file, int line, pgd_t); | |
1da177e4 | 51 | |
69529c0e RK |
52 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) |
53 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) | |
54 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) | |
1da177e4 | 55 | |
6119be0b HD |
56 | /* |
57 | * This is the lowest virtual address we can permit any user space | |
58 | * mapping to be mapped at. This is particularly important for | |
59 | * non-high vector CPUs. | |
60 | */ | |
d8aa712c | 61 | #define FIRST_USER_ADDRESS (PAGE_SIZE * 2) |
6119be0b | 62 | |
104ad3b3 CM |
63 | /* |
64 | * Use TASK_SIZE as the ceiling argument for free_pgtables() and | |
65 | * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd | |
66 | * page shared between user and kernel). | |
67 | */ | |
68 | #ifdef CONFIG_ARM_LPAE | |
69 | #define USER_PGTABLES_CEILING TASK_SIZE | |
70 | #endif | |
71 | ||
1da177e4 | 72 | /* |
44b18693 I |
73 | * The pgprot_* and protection_map entries will be fixed up in runtime |
74 | * to include the cachable and bufferable bits based on memory policy, | |
75 | * as well as any architecture dependent bits like global/ASID and SMP | |
76 | * shared mapping bits. | |
1da177e4 | 77 | */ |
bb30f36f | 78 | #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG |
1da177e4 | 79 | |
44b18693 | 80 | extern pgprot_t pgprot_user; |
1da177e4 LT |
81 | extern pgprot_t pgprot_kernel; |
82 | ||
8ec53663 | 83 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) |
1da177e4 | 84 | |
26ffd0d4 | 85 | #define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE) |
36bb94ba RK |
86 | #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) |
87 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) | |
88 | #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) | |
89 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) | |
90 | #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) | |
91 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) | |
9522d7e4 RK |
92 | #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) |
93 | #define PAGE_KERNEL_EXEC pgprot_kernel | |
94 | ||
26ffd0d4 | 95 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) |
36bb94ba RK |
96 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) |
97 | #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) | |
98 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) | |
99 | #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) | |
100 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) | |
101 | #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) | |
44b18693 | 102 | |
eb9b2b69 RK |
103 | #define __pgprot_modify(prot,mask,bits) \ |
104 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) | |
105 | ||
106 | #define pgprot_noncached(prot) \ | |
107 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) | |
108 | ||
109 | #define pgprot_writecombine(prot) \ | |
110 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) | |
111 | ||
8fb54284 SS |
112 | #define pgprot_stronglyordered(prot) \ |
113 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) | |
114 | ||
58ca3382 VW |
115 | #define pgprot_device(prot) \ |
116 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN) | |
117 | ||
eb9b2b69 RK |
118 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE |
119 | #define pgprot_dmacoherent(prot) \ | |
9522d7e4 | 120 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) |
eb9b2b69 RK |
121 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
122 | struct file; | |
123 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
124 | unsigned long size, pgprot_t vma_prot); | |
125 | #else | |
126 | #define pgprot_dmacoherent(prot) \ | |
9522d7e4 | 127 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) |
eb9b2b69 RK |
128 | #endif |
129 | ||
1da177e4 LT |
130 | #endif /* __ASSEMBLY__ */ |
131 | ||
132 | /* | |
133 | * The table below defines the page protection levels that we insert into our | |
134 | * Linux page table version. These get translated into the best that the | |
135 | * architecture can perform. Note that on most ARM hardware: | |
136 | * 1) We cannot do execute protection | |
137 | * 2) If we could do execute protection, then read is implied | |
138 | * 3) write implies read permissions | |
139 | */ | |
44b18693 I |
140 | #define __P000 __PAGE_NONE |
141 | #define __P001 __PAGE_READONLY | |
142 | #define __P010 __PAGE_COPY | |
143 | #define __P011 __PAGE_COPY | |
8ec53663 RK |
144 | #define __P100 __PAGE_READONLY_EXEC |
145 | #define __P101 __PAGE_READONLY_EXEC | |
146 | #define __P110 __PAGE_COPY_EXEC | |
147 | #define __P111 __PAGE_COPY_EXEC | |
44b18693 I |
148 | |
149 | #define __S000 __PAGE_NONE | |
150 | #define __S001 __PAGE_READONLY | |
151 | #define __S010 __PAGE_SHARED | |
152 | #define __S011 __PAGE_SHARED | |
8ec53663 RK |
153 | #define __S100 __PAGE_READONLY_EXEC |
154 | #define __S101 __PAGE_READONLY_EXEC | |
155 | #define __S110 __PAGE_SHARED_EXEC | |
156 | #define __S111 __PAGE_SHARED_EXEC | |
1da177e4 LT |
157 | |
158 | #ifndef __ASSEMBLY__ | |
159 | /* | |
160 | * ZERO_PAGE is a global shared page that is always zero: used | |
161 | * for zero-mapped memory areas etc.. | |
162 | */ | |
163 | extern struct page *empty_zero_page; | |
164 | #define ZERO_PAGE(vaddr) (empty_zero_page) | |
165 | ||
4eec4b13 RK |
166 | |
167 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
168 | ||
169 | /* to find an entry in a page-table-directory */ | |
170 | #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) | |
171 | ||
172 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) | |
173 | ||
174 | /* to find an entry in a kernel page-table-directory */ | |
175 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | |
176 | ||
b510b049 | 177 | #define pmd_none(pmd) (!pmd_val(pmd)) |
b510b049 RK |
178 | |
179 | static inline pte_t *pmd_page_vaddr(pmd_t pmd) | |
180 | { | |
d7c5d0dc | 181 | return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); |
b510b049 RK |
182 | } |
183 | ||
d7c5d0dc | 184 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) |
b510b049 | 185 | |
65cec8e3 | 186 | #ifndef CONFIG_HIGHPTE |
b510b049 | 187 | #define __pte_map(pmd) pmd_page_vaddr(*(pmd)) |
ece0e2b6 | 188 | #define __pte_unmap(pte) do { } while (0) |
65cec8e3 | 189 | #else |
d30e45ee RK |
190 | #define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) |
191 | #define __pte_unmap(pte) kunmap_atomic(pte) | |
65cec8e3 | 192 | #endif |
1da177e4 | 193 | |
b510b049 RK |
194 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
195 | ||
196 | #define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) | |
197 | ||
198 | #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) | |
199 | #define pte_unmap(pte) __pte_unmap(pte) | |
200 | ||
d7c5d0dc | 201 | #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) |
cae6292b | 202 | #define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) |
b510b049 RK |
203 | |
204 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | |
205 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) | |
206 | ||
b510b049 | 207 | #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) |
ad1ae2fe | 208 | |
f2950706 SC |
209 | #define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \ |
210 | : !!(pte_val(pte) & (val))) | |
211 | #define pte_isclear(pte, val) (!(pte_val(pte) & (val))) | |
212 | ||
47f12043 | 213 | #define pte_none(pte) (!pte_val(pte)) |
f2950706 SC |
214 | #define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT)) |
215 | #define pte_valid(pte) (pte_isset((pte), L_PTE_VALID)) | |
1971188a | 216 | #define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) |
f2950706 SC |
217 | #define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY)) |
218 | #define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY)) | |
219 | #define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG)) | |
220 | #define pte_exec(pte) (pte_isclear((pte), L_PTE_XN)) | |
47f12043 | 221 | |
1971188a | 222 | #define pte_valid_user(pte) \ |
f2950706 | 223 | (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte)) |
47f12043 | 224 | |
1ee5e87f RK |
225 | static inline bool pte_access_permitted(pte_t pte, bool write) |
226 | { | |
227 | pteval_t mask = L_PTE_PRESENT | L_PTE_USER; | |
228 | pteval_t needed = mask; | |
229 | ||
230 | if (write) | |
231 | mask |= L_PTE_RDONLY; | |
232 | ||
233 | return (pte_val(pte) & mask) == needed; | |
234 | } | |
235 | #define pte_access_permitted pte_access_permitted | |
236 | ||
6012191a CM |
237 | #if __LINUX_ARM_ARCH__ < 6 |
238 | static inline void __sync_icache_dcache(pte_t pteval) | |
239 | { | |
240 | } | |
241 | #else | |
242 | extern void __sync_icache_dcache(pte_t pteval); | |
243 | #endif | |
244 | ||
78e7c5af AK |
245 | void set_pte_at(struct mm_struct *mm, unsigned long addr, |
246 | pte_t *ptep, pte_t pteval); | |
6012191a | 247 | |
1f92f77a JL |
248 | static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) |
249 | { | |
250 | pte_val(pte) &= ~pgprot_val(prot); | |
251 | return pte; | |
252 | } | |
253 | ||
254 | static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) | |
255 | { | |
256 | pte_val(pte) |= pgprot_val(prot); | |
257 | return pte; | |
258 | } | |
259 | ||
260 | static inline pte_t pte_wrprotect(pte_t pte) | |
261 | { | |
262 | return set_pte_bit(pte, __pgprot(L_PTE_RDONLY)); | |
263 | } | |
264 | ||
265 | static inline pte_t pte_mkwrite(pte_t pte) | |
266 | { | |
267 | return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY)); | |
268 | } | |
269 | ||
270 | static inline pte_t pte_mkclean(pte_t pte) | |
271 | { | |
272 | return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY)); | |
273 | } | |
274 | ||
275 | static inline pte_t pte_mkdirty(pte_t pte) | |
276 | { | |
277 | return set_pte_bit(pte, __pgprot(L_PTE_DIRTY)); | |
278 | } | |
279 | ||
280 | static inline pte_t pte_mkold(pte_t pte) | |
281 | { | |
282 | return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG)); | |
283 | } | |
284 | ||
285 | static inline pte_t pte_mkyoung(pte_t pte) | |
286 | { | |
287 | return set_pte_bit(pte, __pgprot(L_PTE_YOUNG)); | |
288 | } | |
289 | ||
290 | static inline pte_t pte_mkexec(pte_t pte) | |
291 | { | |
292 | return clear_pte_bit(pte, __pgprot(L_PTE_XN)); | |
293 | } | |
294 | ||
295 | static inline pte_t pte_mknexec(pte_t pte) | |
296 | { | |
297 | return set_pte_bit(pte, __pgprot(L_PTE_XN)); | |
298 | } | |
1da177e4 | 299 | |
1da177e4 LT |
300 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
301 | { | |
69dde4c5 CM |
302 | const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | |
303 | L_PTE_NONE | L_PTE_VALID; | |
1da177e4 LT |
304 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
305 | return pte; | |
306 | } | |
307 | ||
fb93a1c7 RK |
308 | /* |
309 | * Encode and decode a swap entry. Swap entries are stored in the Linux | |
310 | * page tables as follows: | |
311 | * | |
312 | * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 | |
313 | * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 | |
b007ea79 | 314 | * <--------------- offset ------------------------> < type -> 0 0 |
1da177e4 | 315 | * |
b007ea79 | 316 | * This gives us up to 31 swap files and 128GB per swap file. Note that |
fb93a1c7 | 317 | * the offset field is always non-zero. |
1da177e4 | 318 | */ |
b007ea79 | 319 | #define __SWP_TYPE_SHIFT 2 |
f5f2025e | 320 | #define __SWP_TYPE_BITS 5 |
fb93a1c7 RK |
321 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) |
322 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | |
323 | ||
324 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) | |
325 | #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) | |
326 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) | |
327 | ||
1da177e4 LT |
328 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
329 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) | |
330 | ||
fb93a1c7 RK |
331 | /* |
332 | * It is an error for the kernel to have more swap files than we can | |
333 | * encode in the PTEs. This ensures that we know when MAX_SWAPFILES | |
334 | * is increased beyond what we presently support. | |
335 | */ | |
336 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) | |
337 | ||
1da177e4 LT |
338 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ |
339 | /* FIXME: this is not correct */ | |
340 | #define kern_addr_valid(addr) (1) | |
341 | ||
1da177e4 LT |
342 | /* |
343 | * We provide our own arch_get_unmapped_area to cope with VIPT caches. | |
344 | */ | |
345 | #define HAVE_ARCH_UNMAPPED_AREA | |
7dbaa466 | 346 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
1da177e4 | 347 | |
1da177e4 LT |
348 | #endif /* !__ASSEMBLY__ */ |
349 | ||
002547b4 RK |
350 | #endif /* CONFIG_MMU */ |
351 | ||
1da177e4 | 352 | #endif /* _ASMARM_PGTABLE_H */ |