]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arc/include/asm/pgtable.h
ASoC: cs42xx8: fix the noise in the right dac channel with mono playback
[mirror_ubuntu-bionic-kernel.git] / arch / arc / include / asm / pgtable.h
1 /*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: May 2011
9 * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
10 * They are semantically the same although in different contexts
11 * VALID marks a TLB entry exists and it will only happen if PRESENT
12 * - Utilise some unused free bits to confine PTE flags to 12 bits
13 * This is a must for 4k pg-sz
14 *
15 * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
16 * -TLB Locking never really existed, except for initial specs
17 * -SILENT_xxx not needed for our port
18 * -Per my request, MMU V3 changes the layout of some of the bits
19 * to avoid a few shifts in TLB Miss handlers.
20 *
21 * vineetg: April 2010
22 * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
23 * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
24 *
25 * vineetg: April 2010
26 * -Switched form 8:11:13 split for page table lookup to 11:8:13
27 * -this speeds up page table allocation itself as we now have to memset 1K
28 * instead of 8k per page table.
29 * -TODO: Right now page table alloc is 8K and rest 7K is unused
30 * need to optimise it
31 *
32 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
33 */
34
35 #ifndef _ASM_ARC_PGTABLE_H
36 #define _ASM_ARC_PGTABLE_H
37
38 #include <asm/page.h>
39 #include <asm/mmu.h>
40 #include <asm-generic/pgtable-nopmd.h>
41 #include <linux/const.h>
42
43 /**************************************************************************
44 * Page Table Flags
45 *
46 * ARC700 MMU only deals with softare managed TLB entries.
47 * Page Tables are purely for Linux VM's consumption and the bits below are
48 * suited to that (uniqueness). Hence some are not implemented in the TLB and
49 * some have different value in TLB.
50 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
51 * seperate PD0 and PD1, which combined forms a translation entry)
52 * while for PTE perspective, they are 8 and 9 respectively
53 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
54 * (saves some bit shift ops in TLB Miss hdlrs)
55 */
56
57 #if (CONFIG_ARC_MMU_VER <= 2)
58
59 #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
60 #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
61 #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
62 #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
63 #define _PAGE_READ (1<<5) /* Page has user read perm (H) */
64 #define _PAGE_DIRTY (1<<6) /* Page modified (dirty) (S) */
65 #define _PAGE_SPECIAL (1<<7)
66 #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
67 #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
68
69 #else /* MMU v3 onwards */
70
71 #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
72 #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
73 #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
74 #define _PAGE_READ (1<<3) /* Page has user read perm (H) */
75 #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
76 #define _PAGE_DIRTY (1<<5) /* Page modified (dirty) (S) */
77 #define _PAGE_SPECIAL (1<<6)
78
79 #if (CONFIG_ARC_MMU_VER >= 4)
80 #define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */
81 #endif
82
83 #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
84 #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
85
86 #if (CONFIG_ARC_MMU_VER >= 4)
87 #define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */
88 #endif
89
90 #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
91 usable for shared TLB entries (H) */
92
93 #define _PAGE_UNUSED_BIT (1<<12)
94 #endif
95
96 /* vmalloc permissions */
97 #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
98 _PAGE_GLOBAL | _PAGE_PRESENT)
99
100 #ifndef CONFIG_ARC_CACHE_PAGES
101 #undef _PAGE_CACHEABLE
102 #define _PAGE_CACHEABLE 0
103 #endif
104
105 #ifndef _PAGE_HW_SZ
106 #define _PAGE_HW_SZ 0
107 #endif
108
109 /* Defaults for every user page */
110 #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
111
112 /* Set of bits not changed in pte_modify */
113 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
114
115 /* More Abbrevaited helpers */
116 #define PAGE_U_NONE __pgprot(___DEF)
117 #define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
118 #define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
119 #define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
120 #define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
121 _PAGE_EXECUTE)
122
123 #define PAGE_SHARED PAGE_U_W_R
124
125 /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
126 * user vaddr space - visible in all addr spaces, but kernel mode only
127 * Thus Global, all-kernel-access, no-user-access, cached
128 */
129 #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
130
131 /* ioremap */
132 #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
133
134 /* Masks for actual TLB "PD"s */
135 #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
136 #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
137
138 #ifdef CONFIG_ARC_HAS_PAE40
139 #define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
140 #else
141 #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
142 #endif
143
144 /**************************************************************************
145 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
146 *
147 * Certain cases have 1:1 mapping
148 * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
149 * which directly corresponds to PAGE_U_X_R
150 *
151 * Other rules which cause the divergence from 1:1 mapping
152 *
153 * 1. Although ARC700 can do exclusive execute/write protection (meaning R
154 * can be tracked independet of X/W unlike some other CPUs), still to
155 * keep things consistent with other archs:
156 * -Write implies Read: W => R
157 * -Execute implies Read: X => R
158 *
159 * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
160 * This is to enable COW mechanism
161 */
162 /* xwr */
163 #define __P000 PAGE_U_NONE
164 #define __P001 PAGE_U_R
165 #define __P010 PAGE_U_R /* Pvt-W => !W */
166 #define __P011 PAGE_U_R /* Pvt-W => !W */
167 #define __P100 PAGE_U_X_R /* X => R */
168 #define __P101 PAGE_U_X_R
169 #define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
170 #define __P111 PAGE_U_X_R /* Pvt-W => !W */
171
172 #define __S000 PAGE_U_NONE
173 #define __S001 PAGE_U_R
174 #define __S010 PAGE_U_W_R /* W => R */
175 #define __S011 PAGE_U_W_R
176 #define __S100 PAGE_U_X_R /* X => R */
177 #define __S101 PAGE_U_X_R
178 #define __S110 PAGE_U_X_W_R /* X => R */
179 #define __S111 PAGE_U_X_W_R
180
181 /****************************************************************
182 * Page Table Lookup split
183 *
184 * We implement 2 tier paging and since this is all software, we are free
185 * to customize the span of a PGD / PTE entry to suit us
186 *
187 * 32 bit virtual address
188 * -------------------------------------------------------
189 * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
190 * -------------------------------------------------------
191 * | | |
192 * | | --> off in page frame
193 * | |
194 * | ---> index into Page Table
195 * |
196 * ----> index into Page Directory
197 */
198
199 #define BITS_IN_PAGE PAGE_SHIFT
200
201 /* Optimal Sizing of Pg Tbl - based on MMU page size */
202 #if defined(CONFIG_ARC_PAGE_SIZE_8K)
203 #define BITS_FOR_PTE 8 /* 11:8:13 */
204 #elif defined(CONFIG_ARC_PAGE_SIZE_16K)
205 #define BITS_FOR_PTE 8 /* 10:8:14 */
206 #elif defined(CONFIG_ARC_PAGE_SIZE_4K)
207 #define BITS_FOR_PTE 9 /* 11:9:12 */
208 #endif
209
210 #define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
211
212 #define PGDIR_SHIFT (32 - BITS_FOR_PGD)
213 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
214 #define PGDIR_MASK (~(PGDIR_SIZE-1))
215
216 #define PTRS_PER_PTE _BITUL(BITS_FOR_PTE)
217 #define PTRS_PER_PGD _BITUL(BITS_FOR_PGD)
218
219 /*
220 * Number of entries a user land program use.
221 * TASK_SIZE is the maximum vaddr that can be used by a userland program.
222 */
223 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
224
225 /*
226 * No special requirements for lowest virtual address we permit any user space
227 * mapping to be mapped at.
228 */
229 #define FIRST_USER_ADDRESS 0UL
230
231
232 /****************************************************************
233 * Bucket load of VM Helpers
234 */
235
236 #ifndef __ASSEMBLY__
237
238 #define pte_ERROR(e) \
239 pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
240 #define pgd_ERROR(e) \
241 pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
242
243 /* the zero page used for uninitialized and anonymous pages */
244 extern char empty_zero_page[PAGE_SIZE];
245 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
246
247 #define pte_unmap(pte) do { } while (0)
248 #define pte_unmap_nested(pte) do { } while (0)
249
250 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
251 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
252
253 /* find the page descriptor of the Page Tbl ref by PMD entry */
254 #define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
255
256 /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
257 #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
258
259 /* In a 2 level sys, setup the PGD entry with PTE value */
260 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
261 {
262 pmd_val(*pmdp) = (unsigned long)ptep;
263 }
264
265 #define pte_none(x) (!pte_val(x))
266 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
267 #define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
268
269 #define pmd_none(x) (!pmd_val(x))
270 #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
271 #define pmd_present(x) (pmd_val(x))
272 #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
273
274 #define pte_page(x) (mem_map + \
275 (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
276 PAGE_SHIFT)))
277
278 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
279 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
280 #define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
281 pgprot_val(prot)))
282 #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
283
284 /*
285 * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
286 * and returns ptr to PTE entry corresponding to @addr
287 */
288 #define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
289 __pte_index(addr))
290
291 /* No mapping of Page Tables in high mem etc, so following same as above */
292 #define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
293 #define pte_offset_map(dir, addr) pte_offset(dir, addr)
294
295 /* Zoo of pte_xxx function */
296 #define pte_read(pte) (pte_val(pte) & _PAGE_READ)
297 #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
298 #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
299 #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
300 #define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL)
301
302 #define PTE_BIT_FUNC(fn, op) \
303 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
304
305 PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
306 PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
307 PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
308 PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
309 PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
310 PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
311 PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
312 PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
313 PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
314 PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
315 PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
316
317 #define __HAVE_ARCH_PTE_SPECIAL
318
319 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
320 {
321 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
322 }
323
324 /* Macro to mark a page protection as uncacheable */
325 #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
326
327 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
328 pte_t *ptep, pte_t pteval)
329 {
330 set_pte(ptep, pteval);
331 }
332
333 /*
334 * All kernel related VM pages are in init's mm.
335 */
336 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
337 #define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
338 #define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
339
340 /*
341 * Macro to quickly access the PGD entry, utlising the fact that some
342 * arch may cache the pointer to Page Directory of "current" task
343 * in a MMU register
344 *
345 * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
346 * becomes read a register
347 *
348 * ********CAUTION*******:
349 * Kernel code might be dealing with some mm_struct of NON "current"
350 * Thus use this macro only when you are certain that "current" is current
351 * e.g. when dealing with signal frame setup code etc
352 */
353 #ifndef CONFIG_SMP
354 #define pgd_offset_fast(mm, addr) \
355 ({ \
356 pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
357 pgd_base + pgd_index(addr); \
358 })
359 #else
360 #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
361 #endif
362
363 extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
364 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
365 pte_t *ptep);
366
367 /* Encode swap {type,off} tuple into PTE
368 * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
369 * PAGE_PRESENT is zero in a PTE holding swap "identifier"
370 */
371 #define __swp_entry(type, off) ((swp_entry_t) { \
372 ((type) & 0x1f) | ((off) << 13) })
373
374 /* Decode a PTE containing swap "identifier "into constituents */
375 #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
376 #define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
377
378 /* NOPs, to keep generic kernel happy */
379 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
380 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
381
382 #define kern_addr_valid(addr) (1)
383
384 /*
385 * remap a physical page `pfn' of size `size' with page protection `prot'
386 * into virtual address `from'
387 */
388 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
389 #include <asm/hugepage.h>
390 #endif
391
392 #include <asm-generic/pgtable.h>
393
394 /* to cope with aliasing VIPT cache */
395 #define HAVE_ARCH_UNMAPPED_AREA
396
397 /*
398 * No page table caches to initialise
399 */
400 #define pgtable_cache_init() do { } while (0)
401
402 #endif /* __ASSEMBLY__ */
403
404 #endif