]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/arc/include/asm/pgtable.h
mm: introduce include/linux/pgtable.h
[mirror_ubuntu-jammy-kernel.git] / arch / arc / include / asm / pgtable.h
CommitLineData
d2912cb1 1/* SPDX-License-Identifier: GPL-2.0-only */
5dda4dc5
VG
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 *
5dda4dc5
VG
5 * vineetg: May 2011
6 * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
7 * They are semantically the same although in different contexts
8 * VALID marks a TLB entry exists and it will only happen if PRESENT
9 * - Utilise some unused free bits to confine PTE flags to 12 bits
10 * This is a must for 4k pg-sz
11 *
7423cc0c 12 * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods
5dda4dc5
VG
13 * -TLB Locking never really existed, except for initial specs
14 * -SILENT_xxx not needed for our port
15 * -Per my request, MMU V3 changes the layout of some of the bits
16 * to avoid a few shifts in TLB Miss handlers.
17 *
18 * vineetg: April 2010
19 * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
20 * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
21 *
22 * vineetg: April 2010
23 * -Switched form 8:11:13 split for page table lookup to 11:8:13
24 * -this speeds up page table allocation itself as we now have to memset 1K
25 * instead of 8k per page table.
26 * -TODO: Right now page table alloc is 8K and rest 7K is unused
27 * need to optimise it
28 *
29 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
30 */
31
32#ifndef _ASM_ARC_PGTABLE_H
33#define _ASM_ARC_PGTABLE_H
34
fe6ba88b 35#include <linux/bits.h>
5dda4dc5 36#include <asm-generic/pgtable-nopmd.h>
868a6530
VG
37#include <asm/page.h>
38#include <asm/mmu.h> /* to propagate CONFIG_ARC_MMU_VER <n> */
5dda4dc5
VG
39
40/**************************************************************************
41 * Page Table Flags
42 *
43 * ARC700 MMU only deals with softare managed TLB entries.
44 * Page Tables are purely for Linux VM's consumption and the bits below are
45 * suited to that (uniqueness). Hence some are not implemented in the TLB and
46 * some have different value in TLB.
2547476a 47 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
5dda4dc5
VG
48 * seperate PD0 and PD1, which combined forms a translation entry)
49 * while for PTE perspective, they are 8 and 9 respectively
50 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
51 * (saves some bit shift ops in TLB Miss hdlrs)
52 */
53
54#if (CONFIG_ARC_MMU_VER <= 2)
55
56#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
57#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
64b703ef
VG
58#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
59#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
60#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
129cbed5 61#define _PAGE_DIRTY (1<<6) /* Page modified (dirty) (S) */
24830fc7 62#define _PAGE_SPECIAL (1<<7)
d091fcb9
VG
63#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
64#define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
5dda4dc5 65
64b703ef 66#else /* MMU v3 onwards */
5dda4dc5 67
5dda4dc5 68#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
64b703ef
VG
69#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
70#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
71#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
d091fcb9 72#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
129cbed5 73#define _PAGE_DIRTY (1<<5) /* Page modified (dirty) (S) */
24830fc7 74#define _PAGE_SPECIAL (1<<6)
d7a512bf
VG
75
76#if (CONFIG_ARC_MMU_VER >= 4)
77#define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */
78#endif
79
5dda4dc5
VG
80#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
81#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
d7a512bf
VG
82
83#if (CONFIG_ARC_MMU_VER >= 4)
fe6c1b86 84#define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */
d7a512bf
VG
85#endif
86
d091fcb9 87#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
5dda4dc5 88 usable for shared TLB entries (H) */
fe6c1b86
VG
89
90#define _PAGE_UNUSED_BIT (1<<12)
5dda4dc5
VG
91#endif
92
64b703ef
VG
93/* vmalloc permissions */
94#define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
a950549c 95 _PAGE_GLOBAL | _PAGE_PRESENT)
5dda4dc5 96
129cbed5
VG
97#ifndef CONFIG_ARC_CACHE_PAGES
98#undef _PAGE_CACHEABLE
99#define _PAGE_CACHEABLE 0
5dda4dc5
VG
100#endif
101
fe6c1b86
VG
102#ifndef _PAGE_HW_SZ
103#define _PAGE_HW_SZ 0
104#endif
105
129cbed5
VG
106/* Defaults for every user page */
107#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
a950549c 108
5dda4dc5 109/* Set of bits not changed in pte_modify */
3925a16a 110#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
5dda4dc5
VG
111
112/* More Abbrevaited helpers */
113#define PAGE_U_NONE __pgprot(___DEF)
114#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
115#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
116#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
117#define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
118 _PAGE_EXECUTE)
119
120#define PAGE_SHARED PAGE_U_W_R
121
64b703ef
VG
122/* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
123 * user vaddr space - visible in all addr spaces, but kernel mode only
5dda4dc5
VG
124 * Thus Global, all-kernel-access, no-user-access, cached
125 */
129cbed5 126#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
5dda4dc5
VG
127
128/* ioremap */
a950549c 129#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
5dda4dc5 130
da1677b0 131/* Masks for actual TLB "PD"s */
fe6c1b86 132#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
64b703ef 133#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
5a364c2a
VG
134
135#ifdef CONFIG_ARC_HAS_PAE40
136#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
137#else
64b703ef 138#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
5a364c2a 139#endif
da1677b0 140
5dda4dc5
VG
141/**************************************************************************
142 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
143 *
144 * Certain cases have 1:1 mapping
145 * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
146 * which directly corresponds to PAGE_U_X_R
147 *
148 * Other rules which cause the divergence from 1:1 mapping
149 *
150 * 1. Although ARC700 can do exclusive execute/write protection (meaning R
151 * can be tracked independet of X/W unlike some other CPUs), still to
152 * keep things consistent with other archs:
153 * -Write implies Read: W => R
154 * -Execute implies Read: X => R
155 *
156 * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
157 * This is to enable COW mechanism
158 */
159 /* xwr */
160#define __P000 PAGE_U_NONE
161#define __P001 PAGE_U_R
162#define __P010 PAGE_U_R /* Pvt-W => !W */
163#define __P011 PAGE_U_R /* Pvt-W => !W */
164#define __P100 PAGE_U_X_R /* X => R */
165#define __P101 PAGE_U_X_R
166#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
167#define __P111 PAGE_U_X_R /* Pvt-W => !W */
168
169#define __S000 PAGE_U_NONE
170#define __S001 PAGE_U_R
171#define __S010 PAGE_U_W_R /* W => R */
172#define __S011 PAGE_U_W_R
173#define __S100 PAGE_U_X_R /* X => R */
174#define __S101 PAGE_U_X_R
175#define __S110 PAGE_U_X_W_R /* X => R */
176#define __S111 PAGE_U_X_W_R
177
178/****************************************************************
37eda9df 179 * 2 tier (PGD:PTE) software page walker
5dda4dc5 180 *
37eda9df 181 * [31] 32 bit virtual address [0]
5dda4dc5 182 * -------------------------------------------------------
37eda9df
VG
183 * | | <------------ PGDIR_SHIFT ----------> |
184 * | | |
185 * | BITS_FOR_PGD | BITS_FOR_PTE | <-- PAGE_SHIFT --> |
5dda4dc5
VG
186 * -------------------------------------------------------
187 * | | |
188 * | | --> off in page frame
5dda4dc5 189 * | ---> index into Page Table
5dda4dc5 190 * ----> index into Page Directory
37eda9df
VG
191 *
192 * In a single page size configuration, only PAGE_SHIFT is fixed
193 * So both PGD and PTE sizing can be tweaked
194 * e.g. 8K page (PAGE_SHIFT 13) can have
195 * - PGDIR_SHIFT 21 -> 11:8:13 address split
196 * - PGDIR_SHIFT 24 -> 8:11:13 address split
197 *
198 * If Super Page is configured, PGDIR_SHIFT becomes fixed too,
199 * so the sizing flexibility is gone.
5dda4dc5
VG
200 */
201
37eda9df
VG
202#if defined(CONFIG_ARC_HUGEPAGE_16M)
203#define PGDIR_SHIFT 24
204#elif defined(CONFIG_ARC_HUGEPAGE_2M)
205#define PGDIR_SHIFT 21
206#else
207/*
208 * Only Normal page support so "hackable" (see comment above)
209 * Default value provides 11:8:13 (8K), 11:9:12 (4K)
210 */
211#define PGDIR_SHIFT 21
5dda4dc5
VG
212#endif
213
37eda9df
VG
214#define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT)
215#define BITS_FOR_PGD (32 - PGDIR_SHIFT)
5dda4dc5 216
fe6ba88b 217#define PGDIR_SIZE BIT(PGDIR_SHIFT) /* vaddr span, not PDG sz */
5dda4dc5
VG
218#define PGDIR_MASK (~(PGDIR_SIZE-1))
219
fe6ba88b
MY
220#define PTRS_PER_PTE BIT(BITS_FOR_PTE)
221#define PTRS_PER_PGD BIT(BITS_FOR_PGD)
d4084645 222
5dda4dc5
VG
223/*
224 * Number of entries a user land program use.
225 * TASK_SIZE is the maximum vaddr that can be used by a userland program.
226 */
227#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
228
229/*
230 * No special requirements for lowest virtual address we permit any user space
231 * mapping to be mapped at.
232 */
d016bf7e 233#define FIRST_USER_ADDRESS 0UL
5dda4dc5
VG
234
235
236/****************************************************************
237 * Bucket load of VM Helpers
238 */
239
240#ifndef __ASSEMBLY__
241
242#define pte_ERROR(e) \
243 pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
244#define pgd_ERROR(e) \
245 pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
246
247/* the zero page used for uninitialized and anonymous pages */
248extern char empty_zero_page[PAGE_SIZE];
249#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
250
251#define pte_unmap(pte) do { } while (0)
252#define pte_unmap_nested(pte) do { } while (0)
253
254#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
255#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
256
257/* find the page descriptor of the Page Tbl ref by PMD entry */
258#define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
259
260/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
261#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
262
263/* In a 2 level sys, setup the PGD entry with PTE value */
264static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
265{
266 pmd_val(*pmdp) = (unsigned long)ptep;
267}
268
269#define pte_none(x) (!pte_val(x))
270#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
271#define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
272
273#define pmd_none(x) (!pmd_val(x))
274#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
275#define pmd_present(x) (pmd_val(x))
4f6b2c08 276#define pmd_leaf(x) (pmd_val(x) & _PAGE_HW_SZ)
5dda4dc5
VG
277#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
278
2519d753 279#define pte_page(pte) pfn_to_page(pte_pfn(pte))
336e2136 280#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
6a8b2ca7 281#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
2519d753
VG
282
283/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
284#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
285#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
5dda4dc5
VG
286
287/*
288 * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
289 * and returns ptr to PTE entry corresponding to @addr
290 */
291#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
292 __pte_index(addr))
293
294/* No mapping of Page Tables in high mem etc, so following same as above */
295#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
296#define pte_offset_map(dir, addr) pte_offset(dir, addr)
297
298/* Zoo of pte_xxx function */
299#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
300#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
129cbed5 301#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
5dda4dc5 302#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
24830fc7 303#define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL)
5dda4dc5
VG
304
305#define PTE_BIT_FUNC(fn, op) \
306 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
307
fe6c1b86 308PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
5dda4dc5
VG
309PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
310PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
129cbed5
VG
311PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
312PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
5dda4dc5
VG
313PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
314PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
315PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
316PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
24830fc7 317PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
fe6c1b86 318PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
5dda4dc5 319
5dda4dc5
VG
320static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
321{
322 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
323}
324
325/* Macro to mark a page protection as uncacheable */
326#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
327
328static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
329 pte_t *ptep, pte_t pteval)
330{
331 set_pte(ptep, pteval);
332}
333
334/*
335 * All kernel related VM pages are in init's mm.
336 */
337#define pgd_offset_k(address) pgd_offset(&init_mm, address)
338#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
339#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
340
341/*
342 * Macro to quickly access the PGD entry, utlising the fact that some
343 * arch may cache the pointer to Page Directory of "current" task
344 * in a MMU register
345 *
346 * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
347 * becomes read a register
348 *
349 * ********CAUTION*******:
350 * Kernel code might be dealing with some mm_struct of NON "current"
351 * Thus use this macro only when you are certain that "current" is current
352 * e.g. when dealing with signal frame setup code etc
353 */
cfd9d70a 354#ifdef ARC_USE_SCRATCH_REG
5dda4dc5
VG
355#define pgd_offset_fast(mm, addr) \
356({ \
357 pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
358 pgd_base + pgd_index(addr); \
359})
41195d23
VG
360#else
361#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
362#endif
5dda4dc5 363
5dda4dc5
VG
364extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
365void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
366 pte_t *ptep);
367
368/* Encode swap {type,off} tuple into PTE
369 * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
18747151 370 * PAGE_PRESENT is zero in a PTE holding swap "identifier"
5dda4dc5
VG
371 */
372#define __swp_entry(type, off) ((swp_entry_t) { \
373 ((type) & 0x1f) | ((off) << 13) })
374
375/* Decode a PTE containing swap "identifier "into constituents */
376#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
6e376114 377#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
5dda4dc5
VG
378
379/* NOPs, to keep generic kernel happy */
380#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
381#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
382
383#define kern_addr_valid(addr) (1)
384
385/*
386 * remap a physical page `pfn' of size `size' with page protection `prot'
387 * into virtual address `from'
388 */
fe6c1b86
VG
389#ifdef CONFIG_TRANSPARENT_HUGEPAGE
390#include <asm/hugepage.h>
391#endif
392
5bba49f5
VG
393/* to cope with aliasing VIPT cache */
394#define HAVE_ARCH_UNMAPPED_AREA
395
5dda4dc5
VG
396#endif /* __ASSEMBLY__ */
397
398#endif