]>
Commit | Line | Data |
---|---|---|
dcfdae04 CM |
1 | /* |
2 | * arch/arm/include/asm/pgtable-3level.h | |
3 | * | |
4 | * Copyright (C) 2011 ARM Ltd. | |
5 | * Author: Catalin Marinas <catalin.marinas@arm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | */ | |
20 | #ifndef _ASM_PGTABLE_3LEVEL_H | |
21 | #define _ASM_PGTABLE_3LEVEL_H | |
22 | ||
23 | /* | |
24 | * With LPAE, there are 3 levels of page tables. Each level has 512 entries of | |
25 | * 8 bytes each, occupying a 4K page. The first level table covers a range of | |
26 | * 512GB, each entry representing 1GB. Since we are limited to 4GB input | |
27 | * address range, only 4 entries in the PGD are used. | |
28 | * | |
29 | * There are enough spare bits in a page table entry for the kernel specific | |
30 | * state. | |
31 | */ | |
32 | #define PTRS_PER_PTE 512 | |
33 | #define PTRS_PER_PMD 512 | |
34 | #define PTRS_PER_PGD 4 | |
35 | ||
e38a5175 | 36 | #define PTE_HWTABLE_PTRS (0) |
dcfdae04 CM |
37 | #define PTE_HWTABLE_OFF (0) |
38 | #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) | |
39 | ||
40 | /* | |
41 | * PGDIR_SHIFT determines the size a top-level page table entry can map. | |
42 | */ | |
43 | #define PGDIR_SHIFT 30 | |
44 | ||
45 | /* | |
46 | * PMD_SHIFT determines the size a middle-level page table entry can map. | |
47 | */ | |
48 | #define PMD_SHIFT 21 | |
49 | ||
50 | #define PMD_SIZE (1UL << PMD_SHIFT) | |
926edcc7 | 51 | #define PMD_MASK (~((1 << PMD_SHIFT) - 1)) |
dcfdae04 | 52 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
926edcc7 | 53 | #define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1)) |
dcfdae04 CM |
54 | |
55 | /* | |
56 | * section address mask and size definitions. | |
57 | */ | |
58 | #define SECTION_SHIFT 21 | |
59 | #define SECTION_SIZE (1UL << SECTION_SHIFT) | |
926edcc7 | 60 | #define SECTION_MASK (~((1 << SECTION_SHIFT) - 1)) |
dcfdae04 CM |
61 | |
62 | #define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) | |
63 | ||
1355e2a6 CM |
64 | /* |
65 | * Hugetlb definitions. | |
66 | */ | |
67 | #define HPAGE_SHIFT PMD_SHIFT | |
68 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | |
69 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | |
70 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | |
71 | ||
dcfdae04 CM |
72 | /* |
73 | * "Linux" PTE definitions for LPAE. | |
74 | * | |
75 | * These bits overlap with the hardware bits but the naming is preserved for | |
76 | * consistency with the classic page table format. | |
77 | */ | |
dbf62d50 WD |
78 | #define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ |
79 | #define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */ | |
dcfdae04 | 80 | #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ |
dcfdae04 CM |
81 | #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ |
82 | #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ | |
83 | #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ | |
84 | #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */ | |
85 | #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ | |
86 | #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ | |
87 | #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ | |
26ffd0d4 | 88 | #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ |
dcfdae04 | 89 | |
8d962507 CM |
90 | #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) |
91 | #define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) | |
92 | #define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56) | |
93 | #define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) | |
94 | ||
dcfdae04 CM |
95 | /* |
96 | * To be used in assembly code with the upper page attributes. | |
97 | */ | |
98 | #define L_PTE_XN_HIGH (1 << (54 - 32)) | |
99 | #define L_PTE_DIRTY_HIGH (1 << (55 - 32)) | |
100 | ||
101 | /* | |
102 | * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). | |
103 | */ | |
104 | #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0) << 2) /* strongly ordered */ | |
105 | #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 1) << 2) /* normal non-cacheable */ | |
106 | #define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 2) << 2) /* normal inner write-through */ | |
107 | #define L_PTE_MT_WRITEBACK (_AT(pteval_t, 3) << 2) /* normal inner write-back */ | |
108 | #define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 7) << 2) /* normal inner write-alloc */ | |
109 | #define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 4) << 2) /* device */ | |
110 | #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 4) << 2) /* device */ | |
111 | #define L_PTE_MT_DEV_WC (_AT(pteval_t, 1) << 2) /* normal non-cacheable */ | |
112 | #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 3) << 2) /* normal inner write-back */ | |
113 | #define L_PTE_MT_MASK (_AT(pteval_t, 7) << 2) | |
114 | ||
da028779 CM |
115 | /* |
116 | * Software PGD flags. | |
117 | */ | |
118 | #define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ | |
119 | ||
cc577c26 CD |
120 | /* |
121 | * 2nd stage PTE definitions for LPAE. | |
122 | */ | |
123 | #define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ | |
124 | #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ | |
125 | #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ | |
126 | #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ | |
865499ea | 127 | #define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ |
cc577c26 | 128 | |
ad361f09 CD |
129 | #define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ |
130 | ||
cc577c26 CD |
131 | /* |
132 | * Hyp-mode PL2 PTE definitions for LPAE. | |
133 | */ | |
134 | #define L_PTE_HYP L_PTE_USER | |
135 | ||
da028779 CM |
136 | #ifndef __ASSEMBLY__ |
137 | ||
138 | #define pud_none(pud) (!pud_val(pud)) | |
139 | #define pud_bad(pud) (!(pud_val(pud) & 2)) | |
140 | #define pud_present(pud) (pud_val(pud)) | |
cc577c26 CD |
141 | #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ |
142 | PMD_TYPE_TABLE) | |
143 | #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ | |
144 | PMD_TYPE_SECT) | |
1fd15b87 | 145 | #define pmd_large(pmd) pmd_sect(pmd) |
da028779 CM |
146 | |
147 | #define pud_clear(pudp) \ | |
148 | do { \ | |
149 | *pudp = __pud(0); \ | |
150 | clean_pmd_entry(pudp); \ | |
151 | } while (0) | |
152 | ||
153 | #define set_pud(pudp, pud) \ | |
154 | do { \ | |
155 | *pudp = pud; \ | |
156 | flush_pmd_entry(pudp); \ | |
157 | } while (0) | |
158 | ||
159 | static inline pmd_t *pud_page_vaddr(pud_t pud) | |
160 | { | |
161 | return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); | |
162 | } | |
163 | ||
164 | /* Find an entry in the second-level page table.. */ | |
165 | #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) | |
166 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |
167 | { | |
168 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); | |
169 | } | |
170 | ||
171 | #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) | |
172 | ||
173 | #define copy_pmd(pmdpd,pmdps) \ | |
174 | do { \ | |
175 | *pmdpd = *pmdps; \ | |
176 | flush_pmd_entry(pmdpd); \ | |
177 | } while (0) | |
178 | ||
179 | #define pmd_clear(pmdp) \ | |
180 | do { \ | |
181 | *pmdp = __pmd(0); \ | |
182 | clean_pmd_entry(pmdp); \ | |
183 | } while (0) | |
184 | ||
dde1b651 SC |
185 | /* |
186 | * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes | |
187 | * that are written to a page table but not for ptes created with mk_pte. | |
188 | * | |
189 | * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to | |
190 | * hugetlb_cow, where it is compared with an entry in a page table. | |
191 | * This comparison test fails erroneously leading ultimately to a memory leak. | |
192 | * | |
193 | * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is | |
194 | * present before running the comparison. | |
195 | */ | |
196 | #define __HAVE_ARCH_PTE_SAME | |
197 | #define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \ | |
198 | : pte_val(pte_a)) \ | |
199 | == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \ | |
200 | : pte_val(pte_b))) | |
201 | ||
da028779 CM |
202 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) |
203 | ||
1355e2a6 CM |
204 | #define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT)) |
205 | #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) | |
206 | ||
8d962507 CM |
207 | #define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) |
208 | ||
209 | #define __HAVE_ARCH_PMD_WRITE | |
210 | #define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) | |
211 | ||
a3a9ea65 SC |
212 | #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) |
213 | #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) | |
214 | ||
8d962507 CM |
215 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
216 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) | |
217 | #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) | |
218 | #endif | |
219 | ||
220 | #define PMD_BIT_FUNC(fn,op) \ | |
221 | static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } | |
222 | ||
223 | PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); | |
224 | PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); | |
225 | PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); | |
226 | PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); | |
227 | PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); | |
228 | PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); | |
229 | ||
230 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) | |
231 | ||
232 | #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) | |
233 | #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | |
234 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) | |
235 | ||
236 | /* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ | |
237 | #define pmd_mknotpresent(pmd) (__pmd(0)) | |
238 | ||
239 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |
240 | { | |
241 | const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY | | |
242 | PMD_SECT_VALID | PMD_SECT_NONE; | |
243 | pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); | |
244 | return pmd; | |
245 | } | |
246 | ||
247 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |
248 | pmd_t *pmdp, pmd_t pmd) | |
249 | { | |
250 | BUG_ON(addr >= TASK_SIZE); | |
251 | ||
252 | /* create a faulting entry if PROT_NONE protected */ | |
253 | if (pmd_val(pmd) & PMD_SECT_NONE) | |
254 | pmd_val(pmd) &= ~PMD_SECT_VALID; | |
255 | ||
256 | *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG); | |
257 | flush_pmd_entry(pmdp); | |
258 | } | |
259 | ||
260 | static inline int has_transparent_hugepage(void) | |
261 | { | |
262 | return 1; | |
263 | } | |
264 | ||
da028779 CM |
265 | #endif /* __ASSEMBLY__ */ |
266 | ||
dcfdae04 | 267 | #endif /* _ASM_PGTABLE_3LEVEL_H */ |