]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/arc/include/asm/pgtable.h
Merge tag 'mfd-arizona-v3.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-zesty-kernel.git] / arch / arc / include / asm / pgtable.h
1 /*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: May 2011
9 * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
10 * They are semantically the same although in different contexts
11 * VALID marks a TLB entry exists and it will only happen if PRESENT
12 * - Utilise some unused free bits to confine PTE flags to 12 bits
13 * This is a must for 4k pg-sz
14 *
15 * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
16 * -TLB Locking never really existed, except for initial specs
17 * -SILENT_xxx not needed for our port
18 * -Per my request, MMU V3 changes the layout of some of the bits
19 * to avoid a few shifts in TLB Miss handlers.
20 *
21 * vineetg: April 2010
22 * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
23 * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
24 *
25 * vineetg: April 2010
26 * -Switched form 8:11:13 split for page table lookup to 11:8:13
27 * -this speeds up page table allocation itself as we now have to memset 1K
28 * instead of 8k per page table.
29 * -TODO: Right now page table alloc is 8K and rest 7K is unused
30 * need to optimise it
31 *
32 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
33 */
34
35 #ifndef _ASM_ARC_PGTABLE_H
36 #define _ASM_ARC_PGTABLE_H
37
38 #include <asm/page.h>
39 #include <asm/mmu.h>
40 #include <asm-generic/pgtable-nopmd.h>
41
42 /**************************************************************************
43 * Page Table Flags
44 *
45 * ARC700 MMU only deals with softare managed TLB entries.
46 * Page Tables are purely for Linux VM's consumption and the bits below are
47 * suited to that (uniqueness). Hence some are not implemented in the TLB and
48 * some have different value in TLB.
49 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
50 * seperate PD0 and PD1, which combined forms a translation entry)
51 * while for PTE perspective, they are 8 and 9 respectively
52 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
53 * (saves some bit shift ops in TLB Miss hdlrs)
54 */
55
56 #if (CONFIG_ARC_MMU_VER <= 2)
57
58 #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
59 #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
60 #define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */
61 #define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */
62 #define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */
63 #define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
64 #define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
65 #define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
66 #define _PAGE_GLOBAL (1<<9) /* Page is global (H) */
67 #define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */
68 #define _PAGE_FILE (1<<10) /* page cache/ swap (S) */
69 #define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */
70
71 #else
72
73 /* PD1 */
74 #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
75 #define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */
76 #define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */
77 #define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */
78 #define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
79 #define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
80 #define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
81 #define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */
82
83 /* PD0 */
84 #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
85 #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
86 #define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr
87 usable for shared TLB entries (H) */
88
89 #define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */
90 #define _PAGE_FILE (1<<12) /* page cache/ swap (S) */
91
92 #define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
93 #endif
94
95 /* Kernel allowed all permissions for all pages */
96 #define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \
97 _PAGE_GLOBAL | _PAGE_PRESENT)
98
99 #ifdef CONFIG_ARC_CACHE_PAGES
100 #define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
101 #else
102 #define _PAGE_DEF_CACHEABLE (0)
103 #endif
104
105 /* Helper for every "user" page
106 * -kernel can R/W/X
107 * -by default cached, unless config otherwise
108 * -present in memory
109 */
110 #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
111
112 #define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ)
113 #define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE)
114 #define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE)
115
116 /* Set of bits not changed in pte_modify */
117 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
118
119 /* More Abbrevaited helpers */
120 #define PAGE_U_NONE __pgprot(___DEF)
121 #define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
122 #define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
123 #define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
124 #define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
125 _PAGE_EXECUTE)
126
127 #define PAGE_SHARED PAGE_U_W_R
128
129 /* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
130 * kernel vaddr space - visible in all addr spaces, but kernel mode only
131 * Thus Global, all-kernel-access, no-user-access, cached
132 */
133 #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
134
135 /* ioremap */
136 #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
137
138 /**************************************************************************
139 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
140 *
141 * Certain cases have 1:1 mapping
142 * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
143 * which directly corresponds to PAGE_U_X_R
144 *
145 * Other rules which cause the divergence from 1:1 mapping
146 *
147 * 1. Although ARC700 can do exclusive execute/write protection (meaning R
148 * can be tracked independet of X/W unlike some other CPUs), still to
149 * keep things consistent with other archs:
150 * -Write implies Read: W => R
151 * -Execute implies Read: X => R
152 *
153 * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
154 * This is to enable COW mechanism
155 */
156 /* xwr */
157 #define __P000 PAGE_U_NONE
158 #define __P001 PAGE_U_R
159 #define __P010 PAGE_U_R /* Pvt-W => !W */
160 #define __P011 PAGE_U_R /* Pvt-W => !W */
161 #define __P100 PAGE_U_X_R /* X => R */
162 #define __P101 PAGE_U_X_R
163 #define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
164 #define __P111 PAGE_U_X_R /* Pvt-W => !W */
165
166 #define __S000 PAGE_U_NONE
167 #define __S001 PAGE_U_R
168 #define __S010 PAGE_U_W_R /* W => R */
169 #define __S011 PAGE_U_W_R
170 #define __S100 PAGE_U_X_R /* X => R */
171 #define __S101 PAGE_U_X_R
172 #define __S110 PAGE_U_X_W_R /* X => R */
173 #define __S111 PAGE_U_X_W_R
174
175 /****************************************************************
176 * Page Table Lookup split
177 *
178 * We implement 2 tier paging and since this is all software, we are free
179 * to customize the span of a PGD / PTE entry to suit us
180 *
181 * 32 bit virtual address
182 * -------------------------------------------------------
183 * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
184 * -------------------------------------------------------
185 * | | |
186 * | | --> off in page frame
187 * | |
188 * | ---> index into Page Table
189 * |
190 * ----> index into Page Directory
191 */
192
193 #define BITS_IN_PAGE PAGE_SHIFT
194
195 /* Optimal Sizing of Pg Tbl - based on MMU page size */
196 #if defined(CONFIG_ARC_PAGE_SIZE_8K)
197 #define BITS_FOR_PTE 8
198 #elif defined(CONFIG_ARC_PAGE_SIZE_16K)
199 #define BITS_FOR_PTE 8
200 #elif defined(CONFIG_ARC_PAGE_SIZE_4K)
201 #define BITS_FOR_PTE 9
202 #endif
203
204 #define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
205
206 #define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE)
207 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
208 #define PGDIR_MASK (~(PGDIR_SIZE-1))
209
210 #ifdef __ASSEMBLY__
211 #define PTRS_PER_PTE (1 << BITS_FOR_PTE)
212 #define PTRS_PER_PGD (1 << BITS_FOR_PGD)
213 #else
214 #define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
215 #define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
216 #endif
217 /*
218 * Number of entries a user land program use.
219 * TASK_SIZE is the maximum vaddr that can be used by a userland program.
220 */
221 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
222
223 /*
224 * No special requirements for lowest virtual address we permit any user space
225 * mapping to be mapped at.
226 */
227 #define FIRST_USER_ADDRESS 0
228
229
230 /****************************************************************
231 * Bucket load of VM Helpers
232 */
233
234 #ifndef __ASSEMBLY__
235
236 #define pte_ERROR(e) \
237 pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
238 #define pgd_ERROR(e) \
239 pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
240
241 /* the zero page used for uninitialized and anonymous pages */
242 extern char empty_zero_page[PAGE_SIZE];
243 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
244
245 #define pte_unmap(pte) do { } while (0)
246 #define pte_unmap_nested(pte) do { } while (0)
247
248 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
249 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
250
251 /* find the page descriptor of the Page Tbl ref by PMD entry */
252 #define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
253
254 /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
255 #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
256
257 /* In a 2 level sys, setup the PGD entry with PTE value */
258 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
259 {
260 pmd_val(*pmdp) = (unsigned long)ptep;
261 }
262
263 #define pte_none(x) (!pte_val(x))
264 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
265 #define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
266
267 #define pmd_none(x) (!pmd_val(x))
268 #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
269 #define pmd_present(x) (pmd_val(x))
270 #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
271
272 #define pte_page(x) (mem_map + \
273 (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
274
275 #define mk_pte(page, pgprot) \
276 ({ \
277 pte_t pte; \
278 pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
279 pte; \
280 })
281
282 /* TBD: Non linear mapping stuff */
283 static inline int pte_file(pte_t pte)
284 {
285 return pte_val(pte) & _PAGE_FILE;
286 }
287
288 #define PTE_FILE_MAX_BITS 30
289 #define pgoff_to_pte(x) __pte(x)
290 #define pte_to_pgoff(x) (pte_val(x) >> 2)
291 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
292 #define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
293 #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
294
295 /*
296 * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
297 * and returns ptr to PTE entry corresponding to @addr
298 */
299 #define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
300 __pte_index(addr))
301
302 /* No mapping of Page Tables in high mem etc, so following same as above */
303 #define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
304 #define pte_offset_map(dir, addr) pte_offset(dir, addr)
305
306 /* Zoo of pte_xxx function */
307 #define pte_read(pte) (pte_val(pte) & _PAGE_READ)
308 #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
309 #define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
310 #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
311 #define pte_special(pte) (0)
312
313 #define PTE_BIT_FUNC(fn, op) \
314 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
315
316 PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
317 PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
318 PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED));
319 PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED));
320 PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
321 PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
322 PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
323 PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
324
325 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
326
327 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
328 {
329 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
330 }
331
332 /* Macro to mark a page protection as uncacheable */
333 #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
334
335 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
336 pte_t *ptep, pte_t pteval)
337 {
338 set_pte(ptep, pteval);
339 }
340
341 /*
342 * All kernel related VM pages are in init's mm.
343 */
344 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
345 #define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
346 #define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
347
348 /*
349 * Macro to quickly access the PGD entry, utlising the fact that some
350 * arch may cache the pointer to Page Directory of "current" task
351 * in a MMU register
352 *
353 * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
354 * becomes read a register
355 *
356 * ********CAUTION*******:
357 * Kernel code might be dealing with some mm_struct of NON "current"
358 * Thus use this macro only when you are certain that "current" is current
359 * e.g. when dealing with signal frame setup code etc
360 */
361 #ifndef CONFIG_SMP
362 #define pgd_offset_fast(mm, addr) \
363 ({ \
364 pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
365 pgd_base + pgd_index(addr); \
366 })
367 #else
368 #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
369 #endif
370
371 extern void paging_init(void);
372 extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
373 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
374 pte_t *ptep);
375
376 /* Encode swap {type,off} tuple into PTE
377 * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
378 * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
379 */
380 #define __swp_entry(type, off) ((swp_entry_t) { \
381 ((type) & 0x1f) | ((off) << 13) })
382
383 /* Decode a PTE containing swap "identifier "into constituents */
384 #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
385 #define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
386
387 /* NOPs, to keep generic kernel happy */
388 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
389 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
390
391 #define kern_addr_valid(addr) (1)
392
393 /*
394 * remap a physical page `pfn' of size `size' with page protection `prot'
395 * into virtual address `from'
396 */
397 #define io_remap_pfn_range(vma, from, pfn, size, prot) \
398 remap_pfn_range(vma, from, pfn, size, prot)
399
400 #include <asm-generic/pgtable.h>
401
402 /* to cope with aliasing VIPT cache */
403 #define HAVE_ARCH_UNMAPPED_AREA
404
405 /*
406 * No page table caches to initialise
407 */
408 #define pgtable_cache_init() do { } while (0)
409
410 #endif /* __ASSEMBLY__ */
411
412 #endif