]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/mips/include/asm/pgtable-64.h
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[mirror_ubuntu-artful-kernel.git] / arch / mips / include / asm / pgtable-64.h
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_PGTABLE_64_H
10 #define _ASM_PGTABLE_64_H
11
12 #include <linux/compiler.h>
13 #include <linux/linkage.h>
14
15 #include <asm/addrspace.h>
16 #include <asm/page.h>
17 #include <asm/cachectl.h>
18 #include <asm/fixmap.h>
19
20 #define __ARCH_USE_5LEVEL_HACK
21 #if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
22 #include <asm-generic/pgtable-nopmd.h>
23 #else
24 #include <asm-generic/pgtable-nopud.h>
25 #endif
26
27 /*
28 * Each address space has 2 4K pages as its page directory, giving 1024
29 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
30 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
31 * tables. Each page table is also a single 4K page, giving 512 (==
32 * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
33 * invalid_pmd_table, each pmd entry is initialized to point to
34 * invalid_pte_table, each pte is initialized to 0. When memory is low,
35 * and a pmd table or a page table allocation fails, empty_bad_pmd_table
36 * and empty_bad_page_table is returned back to higher layer code, so
37 * that the failure is recognized later on. Linux does not seem to
38 * handle these failures very well though. The empty_bad_page_table has
39 * invalid pte entries in it, to force page faults.
40 *
41 * Kernel mappings: kernel mappings are held in the swapper_pg_table.
42 * The layout is identical to userspace except it's indexed with the
43 * fault address - VMALLOC_START.
44 */
45
46
47 /* PGDIR_SHIFT determines what a third-level page table entry can map */
48 #ifdef __PAGETABLE_PMD_FOLDED
49 #define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
50 #else
51
52 /* PMD_SHIFT determines the size of the area a second-level page table can map */
53 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
54 #define PMD_SIZE (1UL << PMD_SHIFT)
55 #define PMD_MASK (~(PMD_SIZE-1))
56
57
58 #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
59 #endif
60 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
61 #define PGDIR_MASK (~(PGDIR_SIZE-1))
62
63 /*
64 * For 4kB page size we use a 3 level page tree and an 8kB pud, which
65 * permits us mapping 40 bits of virtual address space.
66 *
67 * We used to implement 41 bits by having an order 1 pmd level but that seemed
68 * rather pointless.
69 *
70 * For 8kB page size we use a 3 level page tree which permits a total of
71 * 8TB of address space. Alternatively a 33-bit / 8GB organization using
72 * two levels would be easy to implement.
73 *
74 * For 16kB page size we use a 2 level page tree which permits a total of
75 * 36 bits of virtual address space. We could add a third level but it seems
76 * like at the moment there's no need for this.
77 *
78 * For 64kB page size we use a 2 level page table tree for a total of 42 bits
79 * of virtual address space.
80 */
81 #ifdef CONFIG_PAGE_SIZE_4KB
82 #define PGD_ORDER 1
83 #define PUD_ORDER aieeee_attempt_to_allocate_pud
84 #define PMD_ORDER 0
85 #define PTE_ORDER 0
86 #endif
87 #ifdef CONFIG_PAGE_SIZE_8KB
88 #define PGD_ORDER 0
89 #define PUD_ORDER aieeee_attempt_to_allocate_pud
90 #define PMD_ORDER 0
91 #define PTE_ORDER 0
92 #endif
93 #ifdef CONFIG_PAGE_SIZE_16KB
94 #ifdef CONFIG_MIPS_VA_BITS_48
95 #define PGD_ORDER 1
96 #else
97 #define PGD_ORDER 0
98 #endif
99 #define PUD_ORDER aieeee_attempt_to_allocate_pud
100 #define PMD_ORDER 0
101 #define PTE_ORDER 0
102 #endif
103 #ifdef CONFIG_PAGE_SIZE_32KB
104 #define PGD_ORDER 0
105 #define PUD_ORDER aieeee_attempt_to_allocate_pud
106 #define PMD_ORDER 0
107 #define PTE_ORDER 0
108 #endif
109 #ifdef CONFIG_PAGE_SIZE_64KB
110 #define PGD_ORDER 0
111 #define PUD_ORDER aieeee_attempt_to_allocate_pud
112 #ifdef CONFIG_MIPS_VA_BITS_48
113 #define PMD_ORDER 0
114 #else
115 #define PMD_ORDER aieeee_attempt_to_allocate_pmd
116 #endif
117 #define PTE_ORDER 0
118 #endif
119
120 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
121 #ifndef __PAGETABLE_PMD_FOLDED
122 #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
123 #endif
124 #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
125
126 #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
127 #define FIRST_USER_ADDRESS 0UL
128
129 /*
130 * TLB refill handlers also map the vmalloc area into xuseg. Avoid
131 * the first couple of pages so NULL pointer dereferences will still
132 * reliably trap.
133 */
134 #define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
135 #define VMALLOC_END \
136 (MAP_BASE + \
137 min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
138 (1UL << cpu_vmbits)) - (1UL << 32))
139
140 #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
141 VMALLOC_START != CKSSEG
142 /* Load modules into 32bit-compatible segment. */
143 #define MODULE_START CKSSEG
144 #define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
145 #endif
146
147 #define pte_ERROR(e) \
148 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
149 #ifndef __PAGETABLE_PMD_FOLDED
150 #define pmd_ERROR(e) \
151 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
152 #endif
153 #define pgd_ERROR(e) \
154 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
155
156 extern pte_t invalid_pte_table[PTRS_PER_PTE];
157 extern pte_t empty_bad_page_table[PTRS_PER_PTE];
158
159
160 #ifndef __PAGETABLE_PMD_FOLDED
161 /*
162 * For 3-level pagetables we defines these ourselves, for 2-level the
163 * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
164 */
165 typedef struct { unsigned long pmd; } pmd_t;
166 #define pmd_val(x) ((x).pmd)
167 #define __pmd(x) ((pmd_t) { (x) } )
168
169
170 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
171 #endif
172
173 /*
174 * Empty pgd/pmd entries point to the invalid_pte_table.
175 */
176 static inline int pmd_none(pmd_t pmd)
177 {
178 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
179 }
180
181 static inline int pmd_bad(pmd_t pmd)
182 {
183 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
184 /* pmd_huge(pmd) but inline */
185 if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
186 return 0;
187 #endif
188
189 if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
190 return 1;
191
192 return 0;
193 }
194
195 static inline int pmd_present(pmd_t pmd)
196 {
197 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
198 }
199
200 static inline void pmd_clear(pmd_t *pmdp)
201 {
202 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
203 }
204 #ifndef __PAGETABLE_PMD_FOLDED
205
206 /*
207 * Empty pud entries point to the invalid_pmd_table.
208 */
209 static inline int pud_none(pud_t pud)
210 {
211 return pud_val(pud) == (unsigned long) invalid_pmd_table;
212 }
213
214 static inline int pud_bad(pud_t pud)
215 {
216 return pud_val(pud) & ~PAGE_MASK;
217 }
218
219 static inline int pud_present(pud_t pud)
220 {
221 return pud_val(pud) != (unsigned long) invalid_pmd_table;
222 }
223
224 static inline void pud_clear(pud_t *pudp)
225 {
226 pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
227 }
228 #endif
229
230 #define pte_page(x) pfn_to_page(pte_pfn(x))
231
232 #ifdef CONFIG_CPU_VR41XX
233 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
234 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
235 #else
236 #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
237 #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
238 #define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
239 #endif
240
241 #define __pgd_offset(address) pgd_index(address)
242 #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
243 #define __pmd_offset(address) pmd_index(address)
244
245 /* to find an entry in a kernel page-table-directory */
246 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
247
248 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
249 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
250
251 /* to find an entry in a page-table-directory */
252 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
253
254 #ifndef __PAGETABLE_PMD_FOLDED
255 static inline unsigned long pud_page_vaddr(pud_t pud)
256 {
257 return pud_val(pud);
258 }
259 #define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
260 #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
261
262 /* Find an entry in the second-level page table.. */
263 static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
264 {
265 return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
266 }
267 #endif
268
269 /* Find an entry in the third-level page table.. */
270 #define __pte_offset(address) \
271 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
272 #define pte_offset(dir, address) \
273 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
274 #define pte_offset_kernel(dir, address) \
275 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
276 #define pte_offset_map(dir, address) \
277 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
278 #define pte_unmap(pte) ((void)(pte))
279
280 /*
281 * Initialize a new pgd / pmd table with invalid pointers.
282 */
283 extern void pgd_init(unsigned long page);
284 extern void pmd_init(unsigned long page, unsigned long pagetable);
285
286 /*
287 * Non-present pages: high 40 bits are offset, next 8 bits type,
288 * low 16 bits zero.
289 */
290 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
291 { pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
292
293 #define __swp_type(x) (((x).val >> 16) & 0xff)
294 #define __swp_offset(x) ((x).val >> 24)
295 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
296 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
297 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
298
299 #endif /* _ASM_PGTABLE_64_H */