]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle | |
7 | * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. | |
8 | */ | |
9 | #ifndef _ASM_PGTABLE_64_H | |
10 | #define _ASM_PGTABLE_64_H | |
11 | ||
1da177e4 LT |
12 | #include <linux/linkage.h> |
13 | ||
14 | #include <asm/addrspace.h> | |
15 | #include <asm/page.h> | |
16 | #include <asm/cachectl.h> | |
17 | ||
c6e8b587 RB |
18 | #include <asm-generic/pgtable-nopud.h> |
19 | ||
1da177e4 LT |
20 | /* |
21 | * Each address space has 2 4K pages as its page directory, giving 1024 | |
22 | * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a | |
c6e8b587 RB |
23 | * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page |
24 | * tables. Each page table is also a single 4K page, giving 512 (== | |
25 | * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to | |
26 | * invalid_pmd_table, each pmd entry is initialized to point to | |
1da177e4 LT |
27 | * invalid_pte_table, each pte is initialized to 0. When memory is low, |
28 | * and a pmd table or a page table allocation fails, empty_bad_pmd_table | |
29 | * and empty_bad_page_table is returned back to higher layer code, so | |
30 | * that the failure is recognized later on. Linux does not seem to | |
31 | * handle these failures very well though. The empty_bad_page_table has | |
32 | * invalid pte entries in it, to force page faults. | |
33 | * | |
34 | * Kernel mappings: kernel mappings are held in the swapper_pg_table. | |
35 | * The layout is identical to userspace except it's indexed with the | |
36 | * fault address - VMALLOC_START. | |
37 | */ | |
38 | ||
39 | /* PMD_SHIFT determines the size of the area a second-level page table can map */ | |
c6e8b587 | 40 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) |
1da177e4 LT |
41 | #define PMD_SIZE (1UL << PMD_SHIFT) |
42 | #define PMD_MASK (~(PMD_SIZE-1)) | |
43 | ||
44 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | |
c6e8b587 | 45 | #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) |
1da177e4 LT |
46 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
47 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
48 | ||
49 | /* | |
c6e8b587 | 50 | * For 4kB page size we use a 3 level page tree and an 8kB pud, which |
1da177e4 LT |
51 | * permits us mapping 40 bits of virtual address space. |
52 | * | |
53 | * We used to implement 41 bits by having an order 1 pmd level but that seemed | |
54 | * rather pointless. | |
55 | * | |
56 | * For 8kB page size we use a 3 level page tree which permits a total of | |
57 | * 8TB of address space. Alternatively a 33-bit / 8GB organization using | |
58 | * two levels would be easy to implement. | |
59 | * | |
60 | * For 16kB page size we use a 2 level page tree which permits a total of | |
f29244a5 | 61 | * 36 bits of virtual address space. We could add a third level but it seems |
1da177e4 LT |
62 | * like at the moment there's no need for this. |
63 | * | |
64 | * For 64kB page size we use a 2 level page table tree for a total of 42 bits | |
65 | * of virtual address space. | |
66 | */ | |
67 | #ifdef CONFIG_PAGE_SIZE_4KB | |
68 | #define PGD_ORDER 1 | |
c6e8b587 | 69 | #define PUD_ORDER aieeee_attempt_to_allocate_pud |
1da177e4 LT |
70 | #define PMD_ORDER 0 |
71 | #define PTE_ORDER 0 | |
72 | #endif | |
73 | #ifdef CONFIG_PAGE_SIZE_8KB | |
74 | #define PGD_ORDER 0 | |
c6e8b587 | 75 | #define PUD_ORDER aieeee_attempt_to_allocate_pud |
1da177e4 LT |
76 | #define PMD_ORDER 0 |
77 | #define PTE_ORDER 0 | |
78 | #endif | |
79 | #ifdef CONFIG_PAGE_SIZE_16KB | |
80 | #define PGD_ORDER 0 | |
c6e8b587 | 81 | #define PUD_ORDER aieeee_attempt_to_allocate_pud |
1da177e4 LT |
82 | #define PMD_ORDER 0 |
83 | #define PTE_ORDER 0 | |
84 | #endif | |
85 | #ifdef CONFIG_PAGE_SIZE_64KB | |
86 | #define PGD_ORDER 0 | |
c6e8b587 | 87 | #define PUD_ORDER aieeee_attempt_to_allocate_pud |
1da177e4 LT |
88 | #define PMD_ORDER 0 |
89 | #define PTE_ORDER 0 | |
90 | #endif | |
91 | ||
92 | #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) | |
93 | #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t)) | |
94 | #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) | |
95 | ||
96 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | |
d455a369 | 97 | #define FIRST_USER_ADDRESS 0 |
1da177e4 | 98 | |
f29244a5 | 99 | #define VMALLOC_START MAP_BASE |
1da177e4 LT |
100 | #define VMALLOC_END \ |
101 | (VMALLOC_START + PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE) | |
102 | ||
103 | #define pte_ERROR(e) \ | |
104 | printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) | |
105 | #define pmd_ERROR(e) \ | |
106 | printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) | |
107 | #define pgd_ERROR(e) \ | |
108 | printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) | |
109 | ||
c6e8b587 RB |
110 | extern pte_t invalid_pte_table[PTRS_PER_PTE]; |
111 | extern pte_t empty_bad_page_table[PTRS_PER_PTE]; | |
112 | extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; | |
113 | extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD]; | |
1da177e4 LT |
114 | |
115 | /* | |
1b3a6e97 | 116 | * Empty pgd/pmd entries point to the invalid_pte_table. |
1da177e4 LT |
117 | */ |
118 | static inline int pmd_none(pmd_t pmd) | |
119 | { | |
120 | return pmd_val(pmd) == (unsigned long) invalid_pte_table; | |
121 | } | |
122 | ||
123 | #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) | |
124 | ||
125 | static inline int pmd_present(pmd_t pmd) | |
126 | { | |
127 | return pmd_val(pmd) != (unsigned long) invalid_pte_table; | |
128 | } | |
129 | ||
130 | static inline void pmd_clear(pmd_t *pmdp) | |
131 | { | |
132 | pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); | |
133 | } | |
134 | ||
135 | /* | |
f29244a5 | 136 | * Empty pud entries point to the invalid_pmd_table. |
1da177e4 | 137 | */ |
c6e8b587 | 138 | static inline int pud_none(pud_t pud) |
1da177e4 | 139 | { |
c6e8b587 | 140 | return pud_val(pud) == (unsigned long) invalid_pmd_table; |
1da177e4 LT |
141 | } |
142 | ||
c6e8b587 RB |
143 | static inline int pud_bad(pud_t pud) |
144 | { | |
145 | return pud_val(pud) & ~PAGE_MASK; | |
146 | } | |
1da177e4 | 147 | |
c6e8b587 | 148 | static inline int pud_present(pud_t pud) |
1da177e4 | 149 | { |
c6e8b587 | 150 | return pud_val(pud) != (unsigned long) invalid_pmd_table; |
1da177e4 LT |
151 | } |
152 | ||
c6e8b587 | 153 | static inline void pud_clear(pud_t *pudp) |
1da177e4 | 154 | { |
c6e8b587 | 155 | pud_val(*pudp) = ((unsigned long) invalid_pmd_table); |
1da177e4 LT |
156 | } |
157 | ||
1b3a6e97 TS |
158 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
159 | ||
1da177e4 LT |
160 | #ifdef CONFIG_CPU_VR41XX |
161 | #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) | |
162 | #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) | |
163 | #else | |
164 | #define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) | |
165 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | |
166 | #endif | |
167 | ||
168 | #define __pgd_offset(address) pgd_index(address) | |
f29244a5 | 169 | #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
1b3a6e97 | 170 | #define __pmd_offset(address) pmd_index(address) |
1da177e4 LT |
171 | |
172 | /* to find an entry in a kernel page-table-directory */ | |
173 | #define pgd_offset_k(address) pgd_offset(&init_mm, 0) | |
174 | ||
f29244a5 | 175 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
1b3a6e97 | 176 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
1da177e4 LT |
177 | |
178 | /* to find an entry in a page-table-directory */ | |
179 | #define pgd_offset(mm,addr) ((mm)->pgd + pgd_index(addr)) | |
180 | ||
46a82b2d | 181 | static inline unsigned long pud_page_vaddr(pud_t pud) |
1da177e4 | 182 | { |
c6e8b587 | 183 | return pud_val(pud); |
1da177e4 | 184 | } |
46a82b2d DM |
185 | #define pud_phys(pud) (pud_val(pud) - PAGE_OFFSET) |
186 | #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) | |
1da177e4 LT |
187 | |
188 | /* Find an entry in the second-level page table.. */ | |
c6e8b587 | 189 | static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) |
1da177e4 | 190 | { |
46a82b2d | 191 | return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address); |
1da177e4 LT |
192 | } |
193 | ||
194 | /* Find an entry in the third-level page table.. */ | |
195 | #define __pte_offset(address) \ | |
196 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
197 | #define pte_offset(dir, address) \ | |
46a82b2d | 198 | ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address)) |
1da177e4 | 199 | #define pte_offset_kernel(dir, address) \ |
46a82b2d | 200 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) |
1da177e4 LT |
201 | #define pte_offset_map(dir, address) \ |
202 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) | |
203 | #define pte_offset_map_nested(dir, address) \ | |
204 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) | |
205 | #define pte_unmap(pte) ((void)(pte)) | |
206 | #define pte_unmap_nested(pte) ((void)(pte)) | |
207 | ||
208 | /* | |
209 | * Initialize a new pgd / pmd table with invalid pointers. | |
210 | */ | |
211 | extern void pgd_init(unsigned long page); | |
212 | extern void pmd_init(unsigned long page, unsigned long pagetable); | |
213 | ||
214 | /* | |
215 | * Non-present pages: high 24 bits are offset, next 8 bits type, | |
216 | * low 32 bits zero. | |
217 | */ | |
218 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | |
219 | { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; } | |
220 | ||
221 | #define __swp_type(x) (((x).val >> 32) & 0xff) | |
222 | #define __swp_offset(x) ((x).val >> 40) | |
223 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) | |
224 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
225 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
226 | ||
227 | /* | |
7cb710c9 SS |
228 | * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to |
229 | * make things easier, and only use the upper 56 bits for the page offset... | |
1da177e4 | 230 | */ |
7cb710c9 | 231 | #define PTE_FILE_MAX_BITS 56 |
1da177e4 | 232 | |
7cb710c9 SS |
233 | #define pte_to_pgoff(_pte) ((_pte).pte >> 8) |
234 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE }) | |
1da177e4 LT |
235 | |
236 | #endif /* _ASM_PGTABLE_64_H */ |