]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle | |
7 | * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. | |
8 | */ | |
9 | #ifndef _ASM_PGTABLE_32_H | |
10 | #define _ASM_PGTABLE_32_H | |
11 | ||
1da177e4 LT |
12 | #include <asm/addrspace.h> |
13 | #include <asm/page.h> | |
14 | ||
15 | #include <linux/linkage.h> | |
16 | #include <asm/cachectl.h> | |
17 | #include <asm/fixmap.h> | |
18 | ||
9849a569 | 19 | #define __ARCH_USE_5LEVEL_HACK |
c6e8b587 RB |
20 | #include <asm-generic/pgtable-nopmd.h> |
21 | ||
b1f7e112 | 22 | extern int temp_tlb_entry; |
6ee1d934 | 23 | |
d377732c RM |
24 | /* |
25 | * - add_temporary_entry() add a temporary TLB entry. We use TLB entries | |
26 | * starting at the top and working down. This is for populating the | |
27 | * TLB before trap_init() puts the TLB miss handler in place. It | |
28 | * should be used only for entries matching the actual page tables, | |
29 | * to prevent inconsistencies. | |
30 | */ | |
31 | extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |
32 | unsigned long entryhi, unsigned long pagemask); | |
33 | ||
1da177e4 | 34 | /* |
39b74143 | 35 | * Basically we have the same two-level (which is the logical three level |
1da177e4 LT |
36 | * Linux page table layout folded) page tables as the i386. Some day |
37 | * when we have proper page coloring support we can have a 1% quicker | |
38 | * tlb refill handling mechanism, but for now it is a bit slower but | |
39 | * works even with the cache aliasing problem the R4k and above have. | |
40 | */ | |
41 | ||
1da177e4 | 42 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ |
4c8081e4 | 43 | #define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2) |
1da177e4 LT |
44 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
45 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
46 | ||
47 | /* | |
48 | * Entries per page directory level: we use two-level, so | |
c6e8b587 | 49 | * we don't really have any PUD/PMD directory physically. |
1da177e4 | 50 | */ |
99e480d8 RB |
51 | #define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2) |
52 | #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) | |
c6e8b587 RB |
53 | #define PUD_ORDER aieeee_attempt_to_allocate_pud |
54 | #define PMD_ORDER 1 | |
1da177e4 | 55 | #define PTE_ORDER 0 |
1da177e4 | 56 | |
5291925a | 57 | #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) |
1da177e4 LT |
58 | #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) |
59 | ||
60 | #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) | |
d016bf7e | 61 | #define FIRST_USER_ADDRESS 0UL |
1da177e4 | 62 | |
70342287 | 63 | #define VMALLOC_START MAP_BASE |
1da177e4 | 64 | |
2ac7401d RB |
65 | #define PKMAP_BASE (0xfe000000UL) |
66 | ||
1da177e4 LT |
67 | #ifdef CONFIG_HIGHMEM |
68 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | |
69 | #else | |
70 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | |
71 | #endif | |
72 | ||
34adb28d | 73 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
1da177e4 LT |
74 | #define pte_ERROR(e) \ |
75 | printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) | |
76 | #else | |
77 | #define pte_ERROR(e) \ | |
78 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | |
79 | #endif | |
1da177e4 LT |
80 | #define pgd_ERROR(e) \ |
81 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | |
82 | ||
83 | extern void load_pgd(unsigned long pg_dir); | |
84 | ||
85 | extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; | |
86 | ||
87 | /* | |
88 | * Empty pgd/pmd entries point to the invalid_pte_table. | |
89 | */ | |
90 | static inline int pmd_none(pmd_t pmd) | |
91 | { | |
92 | return pmd_val(pmd) == (unsigned long) invalid_pte_table; | |
93 | } | |
94 | ||
95 | #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) | |
96 | ||
97 | static inline int pmd_present(pmd_t pmd) | |
98 | { | |
99 | return pmd_val(pmd) != (unsigned long) invalid_pte_table; | |
100 | } | |
101 | ||
102 | static inline void pmd_clear(pmd_t *pmdp) | |
103 | { | |
104 | pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); | |
105 | } | |
106 | ||
7b2cb64f | 107 | #if defined(CONFIG_XPA) |
745f3558 | 108 | |
c5b36783 | 109 | #define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT)) |
1da177e4 LT |
110 | static inline pte_t |
111 | pfn_pte(unsigned long pfn, pgprot_t prot) | |
112 | { | |
113 | pte_t pte; | |
c5b36783 SH |
114 | |
115 | pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) | | |
116 | (pgprot_val(prot) & ~_PFNX_MASK); | |
117 | pte.pte_high = (pfn << _PFN_SHIFT) | | |
118 | (pgprot_val(prot) & ~_PFN_MASK); | |
1da177e4 LT |
119 | return pte; |
120 | } | |
121 | ||
7b2cb64f PB |
122 | #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
123 | ||
124 | #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) | |
125 | ||
126 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | |
127 | { | |
128 | pte_t pte; | |
129 | ||
130 | pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f); | |
131 | pte.pte_low = pgprot_val(prot); | |
132 | ||
133 | return pte; | |
134 | } | |
135 | ||
1da177e4 LT |
136 | #else |
137 | ||
1da177e4 LT |
138 | #ifdef CONFIG_CPU_VR41XX |
139 | #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) | |
140 | #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) | |
141 | #else | |
6dd9344c DD |
142 | #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) |
143 | #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot)) | |
1da177e4 | 144 | #endif |
34adb28d | 145 | #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */ |
1da177e4 | 146 | |
745f3558 PB |
147 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
148 | ||
1da177e4 | 149 | #define __pgd_offset(address) pgd_index(address) |
f29244a5 | 150 | #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
1da177e4 LT |
151 | #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
152 | ||
153 | /* to find an entry in a kernel page-table-directory */ | |
154 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
155 | ||
f29244a5 | 156 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
1da177e4 LT |
157 | |
158 | /* to find an entry in a page-table-directory */ | |
21a151d8 | 159 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) |
1da177e4 | 160 | |
1da177e4 LT |
161 | /* Find an entry in the third-level page table.. */ |
162 | #define __pte_offset(address) \ | |
163 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
164 | #define pte_offset(dir, address) \ | |
5b70a317 FBH |
165 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) |
166 | #define pte_offset_kernel(dir, address) \ | |
167 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) | |
1da177e4 | 168 | |
70342287 | 169 | #define pte_offset_map(dir, address) \ |
1da177e4 | 170 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) |
1da177e4 | 171 | #define pte_unmap(pte) ((void)(pte)) |
1da177e4 LT |
172 | |
173 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | |
174 | ||
175 | /* Swap entries must have VALID bit cleared. */ | |
77a5c593 SH |
176 | #define __swp_type(x) (((x).val >> 10) & 0x1f) |
177 | #define __swp_offset(x) ((x).val >> 15) | |
178 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 10) | ((offset) << 15) }) | |
179 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
180 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
1da177e4 | 181 | |
1da177e4 LT |
182 | #else |
183 | ||
7b2cb64f | 184 | #if defined(CONFIG_XPA) |
1da177e4 | 185 | |
77a5c593 | 186 | /* Swap entries must have VALID and GLOBAL bits cleared. */ |
c5b36783 SH |
187 | #define __swp_type(x) (((x).val >> 4) & 0x1f) |
188 | #define __swp_offset(x) ((x).val >> 9) | |
189 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) }) | |
77a5c593 SH |
190 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) |
191 | #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) | |
192 | ||
7b2cb64f PB |
193 | #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
194 | ||
195 | /* Swap entries must have VALID and GLOBAL bits cleared. */ | |
196 | #define __swp_type(x) (((x).val >> 2) & 0x1f) | |
197 | #define __swp_offset(x) ((x).val >> 7) | |
198 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) | |
199 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) | |
200 | #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) | |
201 | ||
1da177e4 | 202 | #else |
7cb710c9 | 203 | /* |
77a5c593 SH |
204 | * Constraints: |
205 | * _PAGE_PRESENT at bit 0 | |
206 | * _PAGE_MODIFIED at bit 4 | |
207 | * _PAGE_GLOBAL at bit 6 | |
208 | * _PAGE_VALID at bit 7 | |
7cb710c9 | 209 | */ |
77a5c593 SH |
210 | #define __swp_type(x) (((x).val >> 8) & 0x1f) |
211 | #define __swp_offset(x) ((x).val >> 13) | |
212 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 8) | ((offset) << 13) }) | |
213 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
214 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
7cb710c9 | 215 | |
77a5c593 | 216 | #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */ |
1da177e4 | 217 | |
77a5c593 | 218 | #endif /* defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) */ |
1da177e4 LT |
219 | |
220 | #endif /* _ASM_PGTABLE_32_H */ |