]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/arm64/include/asm/pgtable.h
arm64: rwsem: use asm-generic rwsem implementation
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / include / asm / pgtable.h
CommitLineData
4f04d8f0
CM
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_PGTABLE_H
17#define __ASM_PGTABLE_H
18
19#include <asm/proc-fns.h>
20
21#include <asm/memory.h>
22#include <asm/pgtable-hwdef.h>
23
24/*
25 * Software defined PTE bits definition.
26 */
a6fadf7e 27#define PTE_VALID (_AT(pteval_t, 1) << 0)
3676f9ef 28#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
4f04d8f0
CM
29#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
30#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
c2c93e5b 31#define PTE_WRITE (_AT(pteval_t, 1) << 57)
3676f9ef 32#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
4f04d8f0
CM
33
34/*
35 * VMALLOC and SPARSEMEM_VMEMMAP ranges.
36 */
847264fb 37#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS)
4f04d8f0
CM
38#define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K)
39
40#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
41
42#define FIRST_USER_ADDRESS 0
43
44#ifndef __ASSEMBLY__
45extern void __pte_error(const char *file, int line, unsigned long val);
46extern void __pmd_error(const char *file, int line, unsigned long val);
47extern void __pgd_error(const char *file, int line, unsigned long val);
48
49#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
50#ifndef CONFIG_ARM64_64K_PAGES
51#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
52#endif
53#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
54
55/*
56 * The pgprot_* and protection_map entries will be fixed up at runtime to
57 * include the cachable and bufferable bits based on memory policy, as well as
58 * any architecture dependent bits like global/ASID and SMP shared mapping
59 * bits.
60 */
61#define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF
62
63extern pgprot_t pgprot_default;
64
a6fadf7e
WD
65#define __pgprot_modify(prot,mask,bits) \
66 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
67
68#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b)
4f04d8f0 69
c2c93e5b
SC
70#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN)
71#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
72#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
73#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
74#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
75#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
76#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
77#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
78#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE)
8e620b04 79
36311607
MZ
80#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP)
81#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
82
83#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
84#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
85
c2c93e5b
SC
86#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
87#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
88#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
89#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
90#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
91#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
92#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
4f04d8f0
CM
93
94#endif /* __ASSEMBLY__ */
95
96#define __P000 __PAGE_NONE
97#define __P001 __PAGE_READONLY
98#define __P010 __PAGE_COPY
99#define __P011 __PAGE_COPY
100#define __P100 __PAGE_READONLY_EXEC
101#define __P101 __PAGE_READONLY_EXEC
102#define __P110 __PAGE_COPY_EXEC
103#define __P111 __PAGE_COPY_EXEC
104
105#define __S000 __PAGE_NONE
106#define __S001 __PAGE_READONLY
107#define __S010 __PAGE_SHARED
108#define __S011 __PAGE_SHARED
109#define __S100 __PAGE_READONLY_EXEC
110#define __S101 __PAGE_READONLY_EXEC
111#define __S110 __PAGE_SHARED_EXEC
112#define __S111 __PAGE_SHARED_EXEC
113
114#ifndef __ASSEMBLY__
115/*
116 * ZERO_PAGE is a global shared page that is always zero: used
117 * for zero-mapped memory areas etc..
118 */
119extern struct page *empty_zero_page;
120#define ZERO_PAGE(vaddr) (empty_zero_page)
121
122#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
123
124#define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
125
126#define pte_none(pte) (!pte_val(pte))
127#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
128#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
9ab6d02f 129#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr))
4f04d8f0
CM
130
131#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
132#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
133#define pte_unmap(pte) do { } while (0)
134#define pte_unmap_nested(pte) do { } while (0)
135
136/*
137 * The following only work if pte_present(). Undefined behaviour otherwise.
138 */
a6fadf7e 139#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
4f04d8f0
CM
140#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
141#define pte_young(pte) (pte_val(pte) & PTE_AF)
142#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
c2c93e5b 143#define pte_write(pte) (pte_val(pte) & PTE_WRITE)
8e620b04 144#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
4f04d8f0 145
a6fadf7e 146#define pte_valid_user(pte) \
02522463 147 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
4f04d8f0 148
44b6dfc5
SC
149static inline pte_t pte_wrprotect(pte_t pte)
150{
c2c93e5b 151 pte_val(pte) &= ~PTE_WRITE;
44b6dfc5
SC
152 return pte;
153}
154
155static inline pte_t pte_mkwrite(pte_t pte)
156{
c2c93e5b 157 pte_val(pte) |= PTE_WRITE;
44b6dfc5
SC
158 return pte;
159}
160
161static inline pte_t pte_mkclean(pte_t pte)
162{
163 pte_val(pte) &= ~PTE_DIRTY;
164 return pte;
165}
166
167static inline pte_t pte_mkdirty(pte_t pte)
168{
169 pte_val(pte) |= PTE_DIRTY;
170 return pte;
171}
172
173static inline pte_t pte_mkold(pte_t pte)
174{
175 pte_val(pte) &= ~PTE_AF;
176 return pte;
177}
178
179static inline pte_t pte_mkyoung(pte_t pte)
180{
181 pte_val(pte) |= PTE_AF;
182 return pte;
183}
184
185static inline pte_t pte_mkspecial(pte_t pte)
186{
187 pte_val(pte) |= PTE_SPECIAL;
188 return pte;
189}
4f04d8f0
CM
190
191static inline void set_pte(pte_t *ptep, pte_t pte)
192{
193 *ptep = pte;
194}
195
196extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
197
198static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
199 pte_t *ptep, pte_t pte)
200{
a6fadf7e 201 if (pte_valid_user(pte)) {
71fdb6bf 202 if (!pte_special(pte) && pte_exec(pte))
02522463 203 __sync_icache_dcache(pte, addr);
c2c93e5b
SC
204 if (pte_dirty(pte) && pte_write(pte))
205 pte_val(pte) &= ~PTE_RDONLY;
206 else
207 pte_val(pte) |= PTE_RDONLY;
02522463
WD
208 }
209
4f04d8f0
CM
210 set_pte(ptep, pte);
211}
212
213/*
214 * Huge pte definitions.
215 */
084bd298
SC
216#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
217#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
218
219/*
220 * Hugetlb definitions.
221 */
222#define HUGE_MAX_HSTATE 2
223#define HPAGE_SHIFT PMD_SHIFT
224#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
225#define HPAGE_MASK (~(HPAGE_SIZE - 1))
226#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
4f04d8f0 227
4f04d8f0
CM
228#define __HAVE_ARCH_PTE_SPECIAL
229
af074848
SC
230/*
231 * Software PMD bits for THP
232 */
233
234#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
235#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57)
236
237/*
238 * THP definitions.
239 */
240#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
241
242#define __HAVE_ARCH_PMD_WRITE
243#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
244
245#ifdef CONFIG_TRANSPARENT_HUGEPAGE
246#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
247#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
248#endif
249
250#define PMD_BIT_FUNC(fn,op) \
251static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
252
253PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
254PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
255PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
256PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
257PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
258PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
259PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
260
261#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
262
263#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
264#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
265#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
266
267#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
268
269static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
270{
271 const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN |
272 PMD_SECT_RDONLY | PMD_SECT_PROT_NONE |
273 PMD_SECT_VALID;
274 pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
275 return pmd;
276}
277
278#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd)
279
280static inline int has_transparent_hugepage(void)
281{
282 return 1;
283}
284
4f04d8f0
CM
285/*
286 * Mark the prot value as uncacheable and unbufferable.
287 */
288#define pgprot_noncached(prot) \
de2db743 289 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
4f04d8f0 290#define pgprot_writecombine(prot) \
de2db743 291 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
4f04d8f0 292#define pgprot_dmacoherent(prot) \
de2db743 293 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
4f04d8f0
CM
294#define __HAVE_PHYS_MEM_ACCESS_PROT
295struct file;
296extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
297 unsigned long size, pgprot_t vma_prot);
298
299#define pmd_none(pmd) (!pmd_val(pmd))
300#define pmd_present(pmd) (pmd_val(pmd))
301
302#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
303
36311607
MZ
304#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
305 PMD_TYPE_TABLE)
306#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
307 PMD_TYPE_SECT)
308
309
4f04d8f0
CM
310static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
311{
312 *pmdp = pmd;
313 dsb();
314}
315
316static inline void pmd_clear(pmd_t *pmdp)
317{
318 set_pmd(pmdp, __pmd(0));
319}
320
321static inline pte_t *pmd_page_vaddr(pmd_t pmd)
322{
323 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
324}
325
326#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
327
328/*
329 * Conversion functions: convert a page and protection to a page entry,
330 * and a page entry and page directory to the page they refer to.
331 */
332#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
333
334#ifndef CONFIG_ARM64_64K_PAGES
335
336#define pud_none(pud) (!pud_val(pud))
337#define pud_bad(pud) (!(pud_val(pud) & 2))
338#define pud_present(pud) (pud_val(pud))
339
340static inline void set_pud(pud_t *pudp, pud_t pud)
341{
342 *pudp = pud;
343 dsb();
344}
345
346static inline void pud_clear(pud_t *pudp)
347{
348 set_pud(pudp, __pud(0));
349}
350
351static inline pmd_t *pud_page_vaddr(pud_t pud)
352{
353 return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
354}
355
356#endif /* CONFIG_ARM64_64K_PAGES */
357
358/* to find an entry in a page-table-directory */
359#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
360
361#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
362
363/* to find an entry in a kernel page-table-directory */
364#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
365
366/* Find an entry in the second-level page table.. */
367#ifndef CONFIG_ARM64_64K_PAGES
368#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
369static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
370{
371 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
372}
373#endif
374
375/* Find an entry in the third-level page table.. */
9ab6d02f 376#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
4f04d8f0
CM
377
378static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
379{
a6fadf7e 380 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
c2c93e5b 381 PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
4f04d8f0
CM
382 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
383 return pte;
384}
385
386extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
387extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
388
389#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
390#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
391
392/*
393 * Encode and decode a swap entry:
3676f9ef
CM
394 * bits 0-1: present (must be zero)
395 * bit 2: PTE_FILE
396 * bits 3-8: swap type
397 * bits 9-57: swap offset
4f04d8f0 398 */
3676f9ef 399#define __SWP_TYPE_SHIFT 3
4f04d8f0 400#define __SWP_TYPE_BITS 6
3676f9ef 401#define __SWP_OFFSET_BITS 49
4f04d8f0
CM
402#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
403#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
3676f9ef 404#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
4f04d8f0
CM
405
406#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
3676f9ef 407#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
4f04d8f0
CM
408#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
409
410#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
411#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
412
413/*
414 * Ensure that there are not more swap files than can be encoded in the kernel
415 * the PTEs.
416 */
417#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
418
419/*
420 * Encode and decode a file entry:
3676f9ef
CM
421 * bits 0-1: present (must be zero)
422 * bit 2: PTE_FILE
423 * bits 3-57: file offset / PAGE_SIZE
4f04d8f0
CM
424 */
425#define pte_file(pte) (pte_val(pte) & PTE_FILE)
3676f9ef
CM
426#define pte_to_pgoff(x) (pte_val(x) >> 3)
427#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
4f04d8f0 428
3676f9ef 429#define PTE_FILE_MAX_BITS 55
4f04d8f0
CM
430
431extern int kern_addr_valid(unsigned long addr);
432
433#include <asm-generic/pgtable.h>
434
4f04d8f0
CM
435#define pgtable_cache_init() do { } while (0)
436
437#endif /* !__ASSEMBLY__ */
438
439#endif /* __ASM_PGTABLE_H */