]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/asm-arm/pgtable.h
Merge with /pub/scm/linux/kernel/git/sfrench/cifs-2.6.git/
[mirror_ubuntu-artful-kernel.git] / include / asm-arm / pgtable.h
1 /*
2 * linux/include/asm-arm/pgtable.h
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #ifndef _ASMARM_PGTABLE_H
11 #define _ASMARM_PGTABLE_H
12
13 #include <asm-generic/4level-fixup.h>
14
15 #include <asm/memory.h>
16 #include <asm/proc-fns.h>
17 #include <asm/arch/vmalloc.h>
18
19 /*
20 * Just any arbitrary offset to the start of the vmalloc VM area: the
21 * current 8MB value just means that there will be a 8MB "hole" after the
22 * physical memory until the kernel virtual memory starts. That means that
23 * any out-of-bounds memory accesses will hopefully be caught.
24 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
25 * area for the same reason. ;)
26 *
27 * Note that platforms may override VMALLOC_START, but they must provide
28 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
29 * which may not overlap IO space.
30 */
31 #ifndef VMALLOC_START
32 #define VMALLOC_OFFSET (8*1024*1024)
33 #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
34 #endif
35
36 /*
37 * Hardware-wise, we have a two level page table structure, where the first
38 * level has 4096 entries, and the second level has 256 entries. Each entry
39 * is one 32-bit word. Most of the bits in the second level entry are used
40 * by hardware, and there aren't any "accessed" and "dirty" bits.
41 *
42 * Linux on the other hand has a three level page table structure, which can
43 * be wrapped to fit a two level page table structure easily - using the PGD
44 * and PTE only. However, Linux also expects one "PTE" table per page, and
45 * at least a "dirty" bit.
46 *
47 * Therefore, we tweak the implementation slightly - we tell Linux that we
48 * have 2048 entries in the first level, each of which is 8 bytes (iow, two
49 * hardware pointers to the second level.) The second level contains two
50 * hardware PTE tables arranged contiguously, followed by Linux versions
51 * which contain the state information Linux needs. We, therefore, end up
52 * with 512 entries in the "PTE" level.
53 *
54 * This leads to the page tables having the following layout:
55 *
56 * pgd pte
57 * | |
58 * +--------+ +0
59 * | |-----> +------------+ +0
60 * +- - - - + +4 | h/w pt 0 |
61 * | |-----> +------------+ +1024
62 * +--------+ +8 | h/w pt 1 |
63 * | | +------------+ +2048
64 * +- - - - + | Linux pt 0 |
65 * | | +------------+ +3072
66 * +--------+ | Linux pt 1 |
67 * | | +------------+ +4096
68 *
69 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
70 * PTE_xxx for definitions of bits appearing in the "h/w pt".
71 *
72 * PMD_xxx definitions refer to bits in the first level page table.
73 *
74 * The "dirty" bit is emulated by only granting hardware write permission
75 * iff the page is marked "writable" and "dirty" in the Linux PTE. This
76 * means that a write to a clean page will cause a permission fault, and
77 * the Linux MM layer will mark the page dirty via handle_pte_fault().
78 * For the hardware to notice the permission change, the TLB entry must
79 * be flushed, and ptep_establish() does that for us.
80 *
81 * The "accessed" or "young" bit is emulated by a similar method; we only
82 * allow accesses to the page if the "young" bit is set. Accesses to the
83 * page will cause a fault, and handle_pte_fault() will set the young bit
84 * for us as long as the page is marked present in the corresponding Linux
85 * PTE entry. Again, ptep_establish() will ensure that the TLB is up to
86 * date.
87 *
88 * However, when the "young" bit is cleared, we deny access to the page
89 * by clearing the hardware PTE. Currently Linux does not flush the TLB
90 * for us in this case, which means the TLB will retain the transation
91 * until either the TLB entry is evicted under pressure, or a context
92 * switch which changes the user space mapping occurs.
93 */
94 #define PTRS_PER_PTE 512
95 #define PTRS_PER_PMD 1
96 #define PTRS_PER_PGD 2048
97
98 /*
99 * PMD_SHIFT determines the size of the area a second-level page table can map
100 * PGDIR_SHIFT determines what a third-level page table entry can map
101 */
102 #define PMD_SHIFT 21
103 #define PGDIR_SHIFT 21
104
105 #define LIBRARY_TEXT_START 0x0c000000
106
107 #ifndef __ASSEMBLY__
108 extern void __pte_error(const char *file, int line, unsigned long val);
109 extern void __pmd_error(const char *file, int line, unsigned long val);
110 extern void __pgd_error(const char *file, int line, unsigned long val);
111
112 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
113 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
114 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
115 #endif /* !__ASSEMBLY__ */
116
117 #define PMD_SIZE (1UL << PMD_SHIFT)
118 #define PMD_MASK (~(PMD_SIZE-1))
119 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
120 #define PGDIR_MASK (~(PGDIR_SIZE-1))
121
122 /*
123 * This is the lowest virtual address we can permit any user space
124 * mapping to be mapped at. This is particularly important for
125 * non-high vector CPUs.
126 */
127 #define FIRST_USER_ADDRESS PAGE_SIZE
128
129 #define FIRST_USER_PGD_NR 1
130 #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
131
132 /*
133 * ARMv6 supersection address mask and size definitions.
134 */
135 #define SUPERSECTION_SHIFT 24
136 #define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT)
137 #define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1))
138
139 /*
140 * Hardware page table definitions.
141 *
142 * + Level 1 descriptor (PMD)
143 * - common
144 */
145 #define PMD_TYPE_MASK (3 << 0)
146 #define PMD_TYPE_FAULT (0 << 0)
147 #define PMD_TYPE_TABLE (1 << 0)
148 #define PMD_TYPE_SECT (2 << 0)
149 #define PMD_BIT4 (1 << 4)
150 #define PMD_DOMAIN(x) ((x) << 5)
151 #define PMD_PROTECTION (1 << 9) /* v5 */
152 /*
153 * - section
154 */
155 #define PMD_SECT_BUFFERABLE (1 << 2)
156 #define PMD_SECT_CACHEABLE (1 << 3)
157 #define PMD_SECT_AP_WRITE (1 << 10)
158 #define PMD_SECT_AP_READ (1 << 11)
159 #define PMD_SECT_TEX(x) ((x) << 12) /* v5 */
160 #define PMD_SECT_APX (1 << 15) /* v6 */
161 #define PMD_SECT_S (1 << 16) /* v6 */
162 #define PMD_SECT_nG (1 << 17) /* v6 */
163 #define PMD_SECT_SUPER (1 << 18) /* v6 */
164
165 #define PMD_SECT_UNCACHED (0)
166 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
167 #define PMD_SECT_WT (PMD_SECT_CACHEABLE)
168 #define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
169 #define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
170 #define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
171
172 /*
173 * - coarse table (not used)
174 */
175
176 /*
177 * + Level 2 descriptor (PTE)
178 * - common
179 */
180 #define PTE_TYPE_MASK (3 << 0)
181 #define PTE_TYPE_FAULT (0 << 0)
182 #define PTE_TYPE_LARGE (1 << 0)
183 #define PTE_TYPE_SMALL (2 << 0)
184 #define PTE_TYPE_EXT (3 << 0) /* v5 */
185 #define PTE_BUFFERABLE (1 << 2)
186 #define PTE_CACHEABLE (1 << 3)
187
188 /*
189 * - extended small page/tiny page
190 */
191 #define PTE_EXT_XN (1 << 0) /* v6 */
192 #define PTE_EXT_AP_MASK (3 << 4)
193 #define PTE_EXT_AP0 (1 << 4)
194 #define PTE_EXT_AP1 (2 << 4)
195 #define PTE_EXT_AP_UNO_SRO (0 << 4)
196 #define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0)
197 #define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1)
198 #define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
199 #define PTE_EXT_TEX(x) ((x) << 6) /* v5 */
200 #define PTE_EXT_APX (1 << 9) /* v6 */
201 #define PTE_EXT_SHARED (1 << 10) /* v6 */
202 #define PTE_EXT_NG (1 << 11) /* v6 */
203
204 /*
205 * - small page
206 */
207 #define PTE_SMALL_AP_MASK (0xff << 4)
208 #define PTE_SMALL_AP_UNO_SRO (0x00 << 4)
209 #define PTE_SMALL_AP_UNO_SRW (0x55 << 4)
210 #define PTE_SMALL_AP_URO_SRW (0xaa << 4)
211 #define PTE_SMALL_AP_URW_SRW (0xff << 4)
212
213 /*
214 * "Linux" PTE definitions.
215 *
216 * We keep two sets of PTEs - the hardware and the linux version.
217 * This allows greater flexibility in the way we map the Linux bits
218 * onto the hardware tables, and allows us to have YOUNG and DIRTY
219 * bits.
220 *
221 * The PTE table pointer refers to the hardware entries; the "Linux"
222 * entries are stored 1024 bytes below.
223 */
224 #define L_PTE_PRESENT (1 << 0)
225 #define L_PTE_FILE (1 << 1) /* only when !PRESENT */
226 #define L_PTE_YOUNG (1 << 1)
227 #define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */
228 #define L_PTE_CACHEABLE (1 << 3) /* matches PTE */
229 #define L_PTE_USER (1 << 4)
230 #define L_PTE_WRITE (1 << 5)
231 #define L_PTE_EXEC (1 << 6)
232 #define L_PTE_DIRTY (1 << 7)
233 #define L_PTE_SHARED (1 << 10) /* shared between CPUs (v6) */
234 #define L_PTE_ASID (1 << 11) /* non-global (use ASID, v6) */
235
236 #ifndef __ASSEMBLY__
237
238 #include <asm/domain.h>
239
240 #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
241 #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
242
243 /*
244 * The following macros handle the cache and bufferable bits...
245 */
246 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
247 #define _L_PTE_READ L_PTE_USER | L_PTE_EXEC
248
249 extern pgprot_t pgprot_kernel;
250
251 #define PAGE_NONE __pgprot(_L_PTE_DEFAULT)
252 #define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
253 #define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
254 #define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
255 #define PAGE_KERNEL pgprot_kernel
256
257 #endif /* __ASSEMBLY__ */
258
259 /*
260 * The table below defines the page protection levels that we insert into our
261 * Linux page table version. These get translated into the best that the
262 * architecture can perform. Note that on most ARM hardware:
263 * 1) We cannot do execute protection
264 * 2) If we could do execute protection, then read is implied
265 * 3) write implies read permissions
266 */
267 #define __P000 PAGE_NONE
268 #define __P001 PAGE_READONLY
269 #define __P010 PAGE_COPY
270 #define __P011 PAGE_COPY
271 #define __P100 PAGE_READONLY
272 #define __P101 PAGE_READONLY
273 #define __P110 PAGE_COPY
274 #define __P111 PAGE_COPY
275
276 #define __S000 PAGE_NONE
277 #define __S001 PAGE_READONLY
278 #define __S010 PAGE_SHARED
279 #define __S011 PAGE_SHARED
280 #define __S100 PAGE_READONLY
281 #define __S101 PAGE_READONLY
282 #define __S110 PAGE_SHARED
283 #define __S111 PAGE_SHARED
284
285 #ifndef __ASSEMBLY__
286 /*
287 * ZERO_PAGE is a global shared page that is always zero: used
288 * for zero-mapped memory areas etc..
289 */
290 extern struct page *empty_zero_page;
291 #define ZERO_PAGE(vaddr) (empty_zero_page)
292
293 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
294 #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
295
296 #define pte_none(pte) (!pte_val(pte))
297 #define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0))
298 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
299 #define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
300 #define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
301 #define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
302 #define pte_unmap(pte) do { } while (0)
303 #define pte_unmap_nested(pte) do { } while (0)
304
305 #define set_pte(ptep, pte) cpu_set_pte(ptep,pte)
306 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
307
308 /*
309 * The following only work if pte_present() is true.
310 * Undefined behaviour if not..
311 */
312 #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
313 #define pte_read(pte) (pte_val(pte) & L_PTE_USER)
314 #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
315 #define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
316 #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
317 #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
318
319 /*
320 * The following only works if pte_present() is not true.
321 */
322 #define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
323 #define pte_to_pgoff(x) (pte_val(x) >> 2)
324 #define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE)
325
326 #define PTE_FILE_MAX_BITS 30
327
328 #define PTE_BIT_FUNC(fn,op) \
329 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
330
331 /*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/
332 /*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/
333 PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
334 PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE);
335 PTE_BIT_FUNC(exprotect, &= ~L_PTE_EXEC);
336 PTE_BIT_FUNC(mkexec, |= L_PTE_EXEC);
337 PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
338 PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
339 PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
340 PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
341
342 /*
343 * Mark the prot value as uncacheable and unbufferable.
344 */
345 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
346 #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE)
347
348 #define pmd_none(pmd) (!pmd_val(pmd))
349 #define pmd_present(pmd) (pmd_val(pmd))
350 #define pmd_bad(pmd) (pmd_val(pmd) & 2)
351
352 #define copy_pmd(pmdpd,pmdps) \
353 do { \
354 pmdpd[0] = pmdps[0]; \
355 pmdpd[1] = pmdps[1]; \
356 flush_pmd_entry(pmdpd); \
357 } while (0)
358
359 #define pmd_clear(pmdp) \
360 do { \
361 pmdp[0] = __pmd(0); \
362 pmdp[1] = __pmd(0); \
363 clean_pmd_entry(pmdp); \
364 } while (0)
365
366 static inline pte_t *pmd_page_kernel(pmd_t pmd)
367 {
368 unsigned long ptr;
369
370 ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
371 ptr += PTRS_PER_PTE * sizeof(void *);
372
373 return __va(ptr);
374 }
375
376 #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
377
378 /*
379 * Permanent address of a page. We never have highmem, so this is trivial.
380 */
381 #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
382
383 /*
384 * Conversion functions: convert a page and protection to a page entry,
385 * and a page entry and page directory to the page they refer to.
386 */
387 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
388
389 /*
390 * The "pgd_xxx()" functions here are trivial for a folded two-level
391 * setup: the pgd is never bad, and a pmd always exists (as it's folded
392 * into the pgd entry)
393 */
394 #define pgd_none(pgd) (0)
395 #define pgd_bad(pgd) (0)
396 #define pgd_present(pgd) (1)
397 #define pgd_clear(pgdp) do { } while (0)
398 #define set_pgd(pgd,pgdp) do { } while (0)
399
400 #define page_pte_prot(page,prot) mk_pte(page, prot)
401 #define page_pte(page) mk_pte(page, __pgprot(0))
402
403 /* to find an entry in a page-table-directory */
404 #define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
405
406 #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
407
408 /* to find an entry in a kernel page-table-directory */
409 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
410
411 /* Find an entry in the second-level page table.. */
412 #define pmd_offset(dir, addr) ((pmd_t *)(dir))
413
414 /* Find an entry in the third-level page table.. */
415 #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
416
417 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
418 {
419 const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER;
420 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
421 return pte;
422 }
423
424 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
425
426 /* Encode and decode a swap entry.
427 *
428 * We support up to 32GB of swap on 4k machines
429 */
430 #define __swp_type(x) (((x).val >> 2) & 0x7f)
431 #define __swp_offset(x) ((x).val >> 9)
432 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
433 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
434 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
435
436 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
437 /* FIXME: this is not correct */
438 #define kern_addr_valid(addr) (1)
439
440 #include <asm-generic/pgtable.h>
441
442 /*
443 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
444 */
445 #define HAVE_ARCH_UNMAPPED_AREA
446
447 /*
448 * remap a physical page `pfn' of size `size' with page protection `prot'
449 * into virtual address `from'
450 */
451 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
452 remap_pfn_range(vma, from, pfn, size, prot)
453
454 #define MK_IOSPACE_PFN(space, pfn) (pfn)
455 #define GET_IOSPACE(pfn) 0
456 #define GET_PFN(pfn) (pfn)
457
458 #define pgtable_cache_init() do { } while (0)
459
460 #endif /* !__ASSEMBLY__ */
461
462 #endif /* _ASMARM_PGTABLE_H */