]>
Commit | Line | Data |
---|---|---|
047ea784 PM |
1 | #ifndef _ASM_POWERPC_PGTABLE_H |
2 | #define _ASM_POWERPC_PGTABLE_H | |
88ced031 | 3 | #ifdef __KERNEL__ |
047ea784 | 4 | |
9c709f3b | 5 | #ifndef __ASSEMBLY__ |
c34a51ce | 6 | #include <linux/mmdebug.h> |
9c709f3b DG |
7 | #include <asm/processor.h> /* For TASK_SIZE */ |
8 | #include <asm/mmu.h> | |
9 | #include <asm/page.h> | |
8d30c14c | 10 | |
9c709f3b | 11 | struct mm_struct; |
8d30c14c | 12 | |
9c709f3b DG |
13 | #endif /* !__ASSEMBLY__ */ |
14 | ||
f88df14b DG |
15 | #if defined(CONFIG_PPC64) |
16 | # include <asm/pgtable-ppc64.h> | |
047ea784 | 17 | #else |
f88df14b | 18 | # include <asm/pgtable-ppc32.h> |
e28f7faf | 19 | #endif |
1da177e4 | 20 | |
cc3665a6 AK |
21 | /* |
22 | * We save the slot number & secondary bit in the second half of the | |
23 | * PTE page. We use the 8 bytes per each pte entry. | |
24 | */ | |
25 | #define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8) | |
26 | ||
1da177e4 | 27 | #ifndef __ASSEMBLY__ |
64b3d0e8 | 28 | |
78f1dbde AK |
29 | #include <asm/tlbflush.h> |
30 | ||
71087002 BH |
31 | /* Generic accessors to PTE bits */ |
32 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | |
33 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | |
34 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | |
35 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | |
36 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } | |
71087002 BH |
37 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } |
38 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } | |
39 | ||
c34a51ce AK |
40 | #ifdef CONFIG_NUMA_BALANCING |
41 | ||
42 | static inline int pte_present(pte_t pte) | |
43 | { | |
44 | return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA); | |
45 | } | |
46 | ||
c46a7c81 MG |
47 | #define pte_present_nonuma pte_present_nonuma |
48 | static inline int pte_present_nonuma(pte_t pte) | |
49 | { | |
50 | return pte_val(pte) & (_PAGE_PRESENT); | |
51 | } | |
52 | ||
c34a51ce AK |
53 | #define pte_numa pte_numa |
54 | static inline int pte_numa(pte_t pte) | |
55 | { | |
56 | return (pte_val(pte) & | |
57 | (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; | |
58 | } | |
59 | ||
60 | #define pte_mknonnuma pte_mknonnuma | |
61 | static inline pte_t pte_mknonnuma(pte_t pte) | |
62 | { | |
63 | pte_val(pte) &= ~_PAGE_NUMA; | |
64 | pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED; | |
65 | return pte; | |
66 | } | |
67 | ||
68 | #define pte_mknuma pte_mknuma | |
69 | static inline pte_t pte_mknuma(pte_t pte) | |
70 | { | |
71 | /* | |
72 | * We should not set _PAGE_NUMA on non present ptes. Also clear the | |
73 | * present bit so that hash_page will return 1 and we collect this | |
74 | * as numa fault. | |
75 | */ | |
76 | if (pte_present(pte)) { | |
77 | pte_val(pte) |= _PAGE_NUMA; | |
78 | pte_val(pte) &= ~_PAGE_PRESENT; | |
79 | } else | |
80 | VM_BUG_ON(1); | |
81 | return pte; | |
82 | } | |
83 | ||
56eecdb9 AK |
84 | #define ptep_set_numa ptep_set_numa |
85 | static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, | |
86 | pte_t *ptep) | |
87 | { | |
88 | if ((pte_val(*ptep) & _PAGE_PRESENT) == 0) | |
89 | VM_BUG_ON(1); | |
90 | ||
91 | pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0); | |
92 | return; | |
93 | } | |
94 | ||
c34a51ce AK |
95 | #define pmd_numa pmd_numa |
96 | static inline int pmd_numa(pmd_t pmd) | |
97 | { | |
98 | return pte_numa(pmd_pte(pmd)); | |
99 | } | |
100 | ||
56eecdb9 AK |
101 | #define pmdp_set_numa pmdp_set_numa |
102 | static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, | |
103 | pmd_t *pmdp) | |
104 | { | |
105 | if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0) | |
106 | VM_BUG_ON(1); | |
107 | ||
108 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA); | |
109 | return; | |
110 | } | |
111 | ||
c34a51ce AK |
112 | #define pmd_mknonnuma pmd_mknonnuma |
113 | static inline pmd_t pmd_mknonnuma(pmd_t pmd) | |
114 | { | |
115 | return pte_pmd(pte_mknonnuma(pmd_pte(pmd))); | |
116 | } | |
117 | ||
118 | #define pmd_mknuma pmd_mknuma | |
119 | static inline pmd_t pmd_mknuma(pmd_t pmd) | |
120 | { | |
121 | return pte_pmd(pte_mknuma(pmd_pte(pmd))); | |
122 | } | |
123 | ||
124 | # else | |
125 | ||
126 | static inline int pte_present(pte_t pte) | |
127 | { | |
128 | return pte_val(pte) & _PAGE_PRESENT; | |
129 | } | |
130 | #endif /* CONFIG_NUMA_BALANCING */ | |
131 | ||
71087002 BH |
132 | /* Conversion functions: convert a page and protection to a page entry, |
133 | * and a page entry and page directory to the page they refer to. | |
134 | * | |
135 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned | |
136 | * long for now. | |
137 | */ | |
138 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { | |
139 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | | |
140 | pgprot_val(pgprot)); } | |
141 | static inline unsigned long pte_pfn(pte_t pte) { | |
142 | return pte_val(pte) >> PTE_RPN_SHIFT; } | |
143 | ||
144 | /* Keep these as a macros to avoid include dependency mess */ | |
145 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
146 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
147 | ||
148 | /* Generic modifiers for PTE bits */ | |
149 | static inline pte_t pte_wrprotect(pte_t pte) { | |
150 | pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } | |
151 | static inline pte_t pte_mkclean(pte_t pte) { | |
152 | pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } | |
153 | static inline pte_t pte_mkold(pte_t pte) { | |
154 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | |
155 | static inline pte_t pte_mkwrite(pte_t pte) { | |
156 | pte_val(pte) |= _PAGE_RW; return pte; } | |
157 | static inline pte_t pte_mkdirty(pte_t pte) { | |
158 | pte_val(pte) |= _PAGE_DIRTY; return pte; } | |
159 | static inline pte_t pte_mkyoung(pte_t pte) { | |
160 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | |
161 | static inline pte_t pte_mkspecial(pte_t pte) { | |
162 | pte_val(pte) |= _PAGE_SPECIAL; return pte; } | |
163 | static inline pte_t pte_mkhuge(pte_t pte) { | |
164 | return pte; } | |
165 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
166 | { | |
167 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); | |
168 | return pte; | |
169 | } | |
170 | ||
171 | ||
8d30c14c BH |
172 | /* Insert a PTE, top-level function is out of line. It uses an inline |
173 | * low level function in the respective pgtable-* files | |
174 | */ | |
175 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, | |
176 | pte_t pte); | |
177 | ||
178 | /* This low level function performs the actual PTE insertion | |
179 | * Setting the PTE depends on the MMU type and other factors. It's | |
180 | * an horrible mess that I'm not going to try to clean up now but | |
181 | * I'm keeping it in one place rather than spread around | |
182 | */ | |
183 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |
184 | pte_t *ptep, pte_t pte, int percpu) | |
185 | { | |
186 | #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) | |
187 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the | |
188 | * helper pte_update() which does an atomic update. We need to do that | |
189 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a | |
190 | * per-CPU PTE such as a kmap_atomic, we do a simple update preserving | |
191 | * the hash bits instead (ie, same as the non-SMP case) | |
192 | */ | |
193 | if (percpu) | |
194 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | |
195 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | |
196 | else | |
197 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | |
198 | ||
1660e9d3 PM |
199 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) |
200 | /* Second case is 32-bit with 64-bit PTE. In this case, we | |
8d30c14c BH |
201 | * can just store as long as we do the two halves in the right order |
202 | * with a barrier in between. This is possible because we take care, | |
203 | * in the hash code, to pre-invalidate if the PTE was already hashed, | |
204 | * which synchronizes us with any concurrent invalidation. | |
205 | * In the percpu case, we also fallback to the simple update preserving | |
206 | * the hash bits | |
207 | */ | |
208 | if (percpu) { | |
209 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | |
210 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | |
211 | return; | |
212 | } | |
213 | #if _PAGE_HASHPTE != 0 | |
214 | if (pte_val(*ptep) & _PAGE_HASHPTE) | |
215 | flush_hash_entry(mm, ptep, addr); | |
216 | #endif | |
217 | __asm__ __volatile__("\ | |
218 | stw%U0%X0 %2,%0\n\ | |
219 | eieio\n\ | |
220 | stw%U0%X0 %L2,%1" | |
221 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) | |
222 | : "r" (pte) : "memory"); | |
223 | ||
224 | #elif defined(CONFIG_PPC_STD_MMU_32) | |
225 | /* Third case is 32-bit hash table in UP mode, we need to preserve | |
226 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous | |
227 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) | |
228 | * and see we need to keep track that this PTE needs invalidating | |
229 | */ | |
230 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | |
231 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | |
232 | ||
233 | #else | |
234 | /* Anything else just stores the PTE normally. That covers all 64-bit | |
1660e9d3 | 235 | * cases, and 32-bit non-hash with 32-bit PTEs. |
8d30c14c BH |
236 | */ |
237 | *ptep = pte; | |
238 | #endif | |
239 | } | |
240 | ||
241 | ||
242 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
243 | extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |
244 | pte_t *ptep, pte_t entry, int dirty); | |
245 | ||
64b3d0e8 BH |
246 | /* |
247 | * Macro to mark a page protection value as "uncacheable". | |
248 | */ | |
249 | ||
250 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ | |
251 | _PAGE_WRITETHRU) | |
252 | ||
253 | #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | |
254 | _PAGE_NO_CACHE | _PAGE_GUARDED)) | |
255 | ||
256 | #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | |
257 | _PAGE_NO_CACHE)) | |
258 | ||
259 | #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | |
260 | _PAGE_COHERENT)) | |
261 | ||
262 | #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | |
263 | _PAGE_COHERENT | _PAGE_WRITETHRU)) | |
264 | ||
09c188c4 GT |
265 | #define pgprot_cached_noncoherent(prot) \ |
266 | (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) | |
267 | ||
fe3cc0d9 | 268 | #define pgprot_writecombine pgprot_noncached_wc |
64b3d0e8 BH |
269 | |
270 | struct file; | |
271 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
272 | unsigned long size, pgprot_t vma_prot); | |
273 | #define __HAVE_PHYS_MEM_ACCESS_PROT | |
274 | ||
9c709f3b DG |
275 | /* |
276 | * ZERO_PAGE is a global shared page that is always zero: used | |
277 | * for zero-mapped memory areas etc.. | |
278 | */ | |
279 | extern unsigned long empty_zero_page[]; | |
280 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
281 | ||
282 | extern pgd_t swapper_pg_dir[]; | |
283 | ||
284 | extern void paging_init(void); | |
285 | ||
286 | /* | |
287 | * kern_addr_valid is intended to indicate whether an address is a valid | |
288 | * kernel address. Most 32-bit archs define it as always true (like this) | |
289 | * but most 64-bit archs actually perform a test. What should we do here? | |
290 | */ | |
291 | #define kern_addr_valid(addr) (1) | |
292 | ||
1da177e4 | 293 | #include <asm-generic/pgtable.h> |
1e3519f8 BH |
294 | |
295 | ||
296 | /* | |
297 | * This gets called at the end of handling a page fault, when | |
298 | * the kernel has put a new PTE into the page table for the process. | |
299 | * We use it to ensure coherency between the i-cache and d-cache | |
300 | * for the page which has just been mapped in. | |
301 | * On machines which use an MMU hash table, we use this to put a | |
302 | * corresponding HPTE into the hash table ahead of time, instead of | |
303 | * waiting for the inevitable extra hash-table miss exception. | |
304 | */ | |
4b3073e1 | 305 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); |
1e3519f8 | 306 | |
a4fe3ce7 DG |
307 | extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, |
308 | unsigned long end, int write, struct page **pages, int *nr); | |
309 | ||
e2b3d202 AK |
310 | extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, |
311 | unsigned long end, int write, struct page **pages, int *nr); | |
074c2eae AK |
312 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE |
313 | #define pmd_large(pmd) 0 | |
314 | #define has_transparent_hugepage() 0 | |
315 | #endif | |
29409997 AK |
316 | pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, |
317 | unsigned *shift); | |
f5e3fe09 BB |
318 | |
319 | static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva, | |
320 | unsigned long *pte_sizep) | |
321 | { | |
322 | pte_t *ptep; | |
323 | unsigned long ps = *pte_sizep; | |
324 | unsigned int shift; | |
325 | ||
326 | ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift); | |
327 | if (!ptep) | |
328 | return NULL; | |
329 | if (shift) | |
330 | *pte_sizep = 1ul << shift; | |
331 | else | |
332 | *pte_sizep = PAGE_SIZE; | |
333 | ||
334 | if (ps > *pte_sizep) | |
335 | return NULL; | |
336 | ||
337 | return ptep; | |
338 | } | |
1da177e4 LT |
339 | #endif /* __ASSEMBLY__ */ |
340 | ||
88ced031 | 341 | #endif /* __KERNEL__ */ |
047ea784 | 342 | #endif /* _ASM_POWERPC_PGTABLE_H */ |