]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _PPC64_PGTABLE_H |
2 | #define _PPC64_PGTABLE_H | |
3 | ||
1da177e4 LT |
4 | /* |
5 | * This file contains the functions and defines necessary to modify and use | |
6 | * the ppc64 hashed page table. | |
7 | */ | |
8 | ||
9 | #ifndef __ASSEMBLY__ | |
10 | #include <linux/config.h> | |
11 | #include <linux/stddef.h> | |
12 | #include <asm/processor.h> /* For TASK_SIZE */ | |
13 | #include <asm/mmu.h> | |
14 | #include <asm/page.h> | |
15 | #include <asm/tlbflush.h> | |
16 | #endif /* __ASSEMBLY__ */ | |
17 | ||
1da177e4 LT |
18 | /* |
19 | * Entries per page directory level. The PTE level must use a 64b record | |
20 | * for each page table entry. The PMD and PGD level use a 32b record for | |
21 | * each entry by assuming that each entry is page aligned. | |
22 | */ | |
23 | #define PTE_INDEX_SIZE 9 | |
e28f7faf DG |
24 | #define PMD_INDEX_SIZE 7 |
25 | #define PUD_INDEX_SIZE 7 | |
26 | #define PGD_INDEX_SIZE 9 | |
27 | ||
28 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | |
29 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) | |
30 | #define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) | |
31 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | |
1da177e4 LT |
32 | |
33 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | |
34 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) | |
e28f7faf | 35 | #define PTRS_PER_PUD (1 << PMD_INDEX_SIZE) |
1da177e4 LT |
36 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) |
37 | ||
1f8d419e DG |
38 | /* PMD_SHIFT determines what a second-level page table entry can map */ |
39 | #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) | |
40 | #define PMD_SIZE (1UL << PMD_SHIFT) | |
41 | #define PMD_MASK (~(PMD_SIZE-1)) | |
1da177e4 | 42 | |
e28f7faf DG |
43 | /* PUD_SHIFT determines what a third-level page table entry can map */ |
44 | #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) | |
45 | #define PUD_SIZE (1UL << PUD_SHIFT) | |
46 | #define PUD_MASK (~(PUD_SIZE-1)) | |
47 | ||
48 | /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ | |
49 | #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) | |
1f8d419e DG |
50 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
51 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
52 | ||
53 | #define FIRST_USER_ADDRESS 0 | |
1da177e4 LT |
54 | |
55 | /* | |
56 | * Size of EA range mapped by our pagetables. | |
57 | */ | |
e28f7faf DG |
58 | #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ |
59 | PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) | |
60 | #define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE) | |
61 | ||
62 | #if TASK_SIZE_USER64 > PGTABLE_RANGE | |
63 | #error TASK_SIZE_USER64 exceeds pagetable range | |
64 | #endif | |
65 | ||
66 | #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) | |
67 | #error TASK_SIZE_USER64 exceeds user VSID range | |
68 | #endif | |
1da177e4 LT |
69 | |
70 | /* | |
71 | * Define the address range of the vmalloc VM area. | |
72 | */ | |
73 | #define VMALLOC_START (0xD000000000000000ul) | |
e28f7faf | 74 | #define VMALLOC_SIZE (0x80000000000UL) |
20cee16c | 75 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) |
1da177e4 LT |
76 | |
77 | /* | |
78 | * Bits in a linux-style PTE. These match the bits in the | |
79 | * (hardware-defined) PowerPC PTE as closely as possible. | |
80 | */ | |
81 | #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ | |
82 | #define _PAGE_USER 0x0002 /* matches one of the PP bits */ | |
83 | #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ | |
84 | #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ | |
85 | #define _PAGE_GUARDED 0x0008 | |
86 | #define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ | |
87 | #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ | |
88 | #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ | |
89 | #define _PAGE_DIRTY 0x0080 /* C: page changed */ | |
90 | #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ | |
91 | #define _PAGE_RW 0x0200 /* software: user write access allowed */ | |
92 | #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ | |
93 | #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ | |
94 | #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ | |
95 | #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ | |
96 | #define _PAGE_HUGE 0x10000 /* 16MB page */ | |
97 | /* Bits 0x7000 identify the index within an HPT Group */ | |
98 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_SECONDARY | _PAGE_GROUP_IX) | |
99 | /* PAGE_MASK gives the right answer below, but only by accident */ | |
100 | /* It should be preserving the high 48 bits and then specifically */ | |
101 | /* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ | |
102 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HPTEFLAGS) | |
103 | ||
104 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) | |
105 | ||
106 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) | |
107 | ||
108 | /* __pgprot defined in asm-ppc64/page.h */ | |
109 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) | |
110 | ||
111 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) | |
112 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) | |
113 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) | |
114 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | |
115 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) | |
116 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | |
117 | #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE) | |
118 | #define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | |
119 | _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED) | |
120 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC) | |
121 | ||
122 | #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) | |
123 | #define HAVE_PAGE_AGP | |
124 | ||
125 | /* | |
126 | * This bit in a hardware PTE indicates that the page is *not* executable. | |
127 | */ | |
128 | #define HW_NO_EXEC _PAGE_EXEC | |
129 | ||
130 | /* | |
131 | * POWER4 and newer have per page execute protection, older chips can only | |
132 | * do this on a segment (256MB) basis. | |
133 | * | |
134 | * Also, write permissions imply read permissions. | |
135 | * This is the closest we can get.. | |
136 | * | |
137 | * Note due to the way vm flags are laid out, the bits are XWR | |
138 | */ | |
139 | #define __P000 PAGE_NONE | |
140 | #define __P001 PAGE_READONLY | |
141 | #define __P010 PAGE_COPY | |
142 | #define __P011 PAGE_COPY | |
143 | #define __P100 PAGE_READONLY_X | |
144 | #define __P101 PAGE_READONLY_X | |
145 | #define __P110 PAGE_COPY_X | |
146 | #define __P111 PAGE_COPY_X | |
147 | ||
148 | #define __S000 PAGE_NONE | |
149 | #define __S001 PAGE_READONLY | |
150 | #define __S010 PAGE_SHARED | |
151 | #define __S011 PAGE_SHARED | |
152 | #define __S100 PAGE_READONLY_X | |
153 | #define __S101 PAGE_READONLY_X | |
154 | #define __S110 PAGE_SHARED_X | |
155 | #define __S111 PAGE_SHARED_X | |
156 | ||
157 | #ifndef __ASSEMBLY__ | |
158 | ||
159 | /* | |
160 | * ZERO_PAGE is a global shared page that is always zero: used | |
161 | * for zero-mapped memory areas etc.. | |
162 | */ | |
163 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | |
164 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
165 | #endif /* __ASSEMBLY__ */ | |
166 | ||
167 | /* shift to put page number into pte */ | |
168 | #define PTE_SHIFT (17) | |
169 | ||
1da177e4 LT |
170 | #ifdef CONFIG_HUGETLB_PAGE |
171 | ||
172 | #ifndef __ASSEMBLY__ | |
173 | int hash_huge_page(struct mm_struct *mm, unsigned long access, | |
174 | unsigned long ea, unsigned long vsid, int local); | |
1da177e4 LT |
175 | #endif /* __ASSEMBLY__ */ |
176 | ||
177 | #define HAVE_ARCH_UNMAPPED_AREA | |
178 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | |
179 | #else | |
180 | ||
181 | #define hash_huge_page(mm,a,ea,vsid,local) -1 | |
1da177e4 LT |
182 | |
183 | #endif | |
184 | ||
185 | #ifndef __ASSEMBLY__ | |
186 | ||
187 | /* | |
188 | * Conversion functions: convert a page and protection to a page entry, | |
189 | * and a page entry and page directory to the page they refer to. | |
190 | * | |
191 | * mk_pte takes a (struct page *) as input | |
192 | */ | |
193 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
194 | ||
1f8d419e DG |
195 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) |
196 | { | |
197 | pte_t pte; | |
198 | ||
199 | ||
200 | pte_val(pte) = (pfn << PTE_SHIFT) | pgprot_val(pgprot); | |
201 | return pte; | |
202 | } | |
1da177e4 LT |
203 | |
204 | #define pte_modify(_pte, newprot) \ | |
205 | (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) | |
206 | ||
207 | #define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0) | |
208 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | |
209 | ||
210 | /* pte_clear moved to later in this file */ | |
211 | ||
212 | #define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) | |
213 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
214 | ||
e28f7faf | 215 | #define pmd_set(pmdp, ptep) ({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);}) |
1da177e4 LT |
216 | #define pmd_none(pmd) (!pmd_val(pmd)) |
217 | #define pmd_bad(pmd) (pmd_val(pmd) == 0) | |
218 | #define pmd_present(pmd) (pmd_val(pmd) != 0) | |
219 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) | |
e28f7faf | 220 | #define pmd_page_kernel(pmd) (pmd_val(pmd)) |
1da177e4 | 221 | #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) |
58366af5 | 222 | |
e28f7faf | 223 | #define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (unsigned long)(pmdp)) |
58366af5 | 224 | #define pud_none(pud) (!pud_val(pud)) |
e28f7faf DG |
225 | #define pud_bad(pud) ((pud_val(pud)) == 0) |
226 | #define pud_present(pud) (pud_val(pud) != 0) | |
227 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0) | |
228 | #define pud_page(pud) (pud_val(pud)) | |
229 | ||
230 | #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) | |
231 | #define pgd_none(pgd) (!pgd_val(pgd)) | |
232 | #define pgd_bad(pgd) (pgd_val(pgd) == 0) | |
233 | #define pgd_present(pgd) (pgd_val(pgd) != 0) | |
234 | #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) | |
235 | #define pgd_page(pgd) (pgd_val(pgd)) | |
1da177e4 LT |
236 | |
237 | /* | |
238 | * Find an entry in a page-table-directory. We combine the address region | |
239 | * (the high order N bits) and the pgd portion of the address. | |
240 | */ | |
241 | /* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ | |
e28f7faf | 242 | #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff) |
1da177e4 LT |
243 | |
244 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
245 | ||
e28f7faf DG |
246 | #define pud_offset(pgdp, addr) \ |
247 | (((pud_t *) pgd_page(*(pgdp))) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) | |
248 | ||
58366af5 | 249 | #define pmd_offset(pudp,addr) \ |
e28f7faf | 250 | (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) |
1da177e4 | 251 | |
1da177e4 | 252 | #define pte_offset_kernel(dir,addr) \ |
e28f7faf | 253 | (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) |
1da177e4 LT |
254 | |
255 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | |
256 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) | |
257 | #define pte_unmap(pte) do { } while(0) | |
258 | #define pte_unmap_nested(pte) do { } while(0) | |
259 | ||
260 | /* to find an entry in a kernel page-table-directory */ | |
261 | /* This now only contains the vmalloc pages */ | |
262 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
263 | ||
1da177e4 LT |
264 | /* |
265 | * The following only work if pte_present() is true. | |
266 | * Undefined behaviour if not.. | |
267 | */ | |
268 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;} | |
269 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} | |
270 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} | |
271 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} | |
272 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} | |
273 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} | |
274 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE;} | |
275 | ||
276 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } | |
277 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } | |
278 | ||
279 | static inline pte_t pte_rdprotect(pte_t pte) { | |
280 | pte_val(pte) &= ~_PAGE_USER; return pte; } | |
281 | static inline pte_t pte_exprotect(pte_t pte) { | |
282 | pte_val(pte) &= ~_PAGE_EXEC; return pte; } | |
283 | static inline pte_t pte_wrprotect(pte_t pte) { | |
284 | pte_val(pte) &= ~(_PAGE_RW); return pte; } | |
285 | static inline pte_t pte_mkclean(pte_t pte) { | |
286 | pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } | |
287 | static inline pte_t pte_mkold(pte_t pte) { | |
288 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | |
289 | ||
290 | static inline pte_t pte_mkread(pte_t pte) { | |
291 | pte_val(pte) |= _PAGE_USER; return pte; } | |
292 | static inline pte_t pte_mkexec(pte_t pte) { | |
293 | pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } | |
294 | static inline pte_t pte_mkwrite(pte_t pte) { | |
295 | pte_val(pte) |= _PAGE_RW; return pte; } | |
296 | static inline pte_t pte_mkdirty(pte_t pte) { | |
297 | pte_val(pte) |= _PAGE_DIRTY; return pte; } | |
298 | static inline pte_t pte_mkyoung(pte_t pte) { | |
299 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | |
300 | static inline pte_t pte_mkhuge(pte_t pte) { | |
301 | pte_val(pte) |= _PAGE_HUGE; return pte; } | |
302 | ||
303 | /* Atomic PTE updates */ | |
304 | static inline unsigned long pte_update(pte_t *p, unsigned long clr) | |
305 | { | |
306 | unsigned long old, tmp; | |
307 | ||
308 | __asm__ __volatile__( | |
309 | "1: ldarx %0,0,%3 # pte_update\n\ | |
310 | andi. %1,%0,%6\n\ | |
311 | bne- 1b \n\ | |
312 | andc %1,%0,%4 \n\ | |
313 | stdcx. %1,0,%3 \n\ | |
314 | bne- 1b" | |
315 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | |
316 | : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY) | |
317 | : "cc" ); | |
318 | return old; | |
319 | } | |
320 | ||
321 | /* PTE updating functions, this function puts the PTE in the | |
322 | * batch, doesn't actually triggers the hash flush immediately, | |
323 | * you need to call flush_tlb_pending() to do that. | |
324 | */ | |
325 | extern void hpte_update(struct mm_struct *mm, unsigned long addr, unsigned long pte, | |
326 | int wrprot); | |
327 | ||
328 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
329 | { | |
330 | unsigned long old; | |
331 | ||
332 | if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | |
333 | return 0; | |
334 | old = pte_update(ptep, _PAGE_ACCESSED); | |
335 | if (old & _PAGE_HASHPTE) { | |
336 | hpte_update(mm, addr, old, 0); | |
337 | flush_tlb_pending(); | |
338 | } | |
339 | return (old & _PAGE_ACCESSED) != 0; | |
340 | } | |
341 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
342 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | |
343 | ({ \ | |
344 | int __r; \ | |
345 | __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ | |
346 | __r; \ | |
347 | }) | |
348 | ||
349 | /* | |
350 | * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the | |
351 | * moment we always flush but we need to fix hpte_update and test if the | |
352 | * optimisation is worth it. | |
353 | */ | |
354 | static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
355 | { | |
356 | unsigned long old; | |
357 | ||
358 | if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) | |
359 | return 0; | |
360 | old = pte_update(ptep, _PAGE_DIRTY); | |
361 | if (old & _PAGE_HASHPTE) | |
362 | hpte_update(mm, addr, old, 0); | |
363 | return (old & _PAGE_DIRTY) != 0; | |
364 | } | |
365 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | |
366 | #define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \ | |
367 | ({ \ | |
368 | int __r; \ | |
369 | __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \ | |
370 | __r; \ | |
371 | }) | |
372 | ||
373 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
374 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
375 | { | |
376 | unsigned long old; | |
377 | ||
378 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | |
379 | return; | |
380 | old = pte_update(ptep, _PAGE_RW); | |
381 | if (old & _PAGE_HASHPTE) | |
382 | hpte_update(mm, addr, old, 0); | |
383 | } | |
384 | ||
385 | /* | |
386 | * We currently remove entries from the hashtable regardless of whether | |
387 | * the entry was young or dirty. The generic routines only flush if the | |
388 | * entry was young or dirty which is not good enough. | |
389 | * | |
390 | * We should be more intelligent about this but for the moment we override | |
391 | * these functions and force a tlb flush unconditionally | |
392 | */ | |
393 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
394 | #define ptep_clear_flush_young(__vma, __address, __ptep) \ | |
395 | ({ \ | |
396 | int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ | |
397 | __ptep); \ | |
398 | __young; \ | |
399 | }) | |
400 | ||
401 | #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH | |
402 | #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ | |
403 | ({ \ | |
404 | int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ | |
405 | __ptep); \ | |
406 | flush_tlb_page(__vma, __address); \ | |
407 | __dirty; \ | |
408 | }) | |
409 | ||
410 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
411 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
412 | { | |
413 | unsigned long old = pte_update(ptep, ~0UL); | |
414 | ||
415 | if (old & _PAGE_HASHPTE) | |
416 | hpte_update(mm, addr, old, 0); | |
417 | return __pte(old); | |
418 | } | |
419 | ||
420 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) | |
421 | { | |
422 | unsigned long old = pte_update(ptep, ~0UL); | |
423 | ||
424 | if (old & _PAGE_HASHPTE) | |
425 | hpte_update(mm, addr, old, 0); | |
426 | } | |
427 | ||
428 | /* | |
429 | * set_pte stores a linux PTE into the linux page table. | |
430 | */ | |
431 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |
432 | pte_t *ptep, pte_t pte) | |
433 | { | |
434 | if (pte_present(*ptep)) { | |
435 | pte_clear(mm, addr, ptep); | |
436 | flush_tlb_pending(); | |
437 | } | |
1f8d419e | 438 | *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); |
1da177e4 LT |
439 | } |
440 | ||
441 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this | |
442 | * function doesn't need to flush the hash entry | |
443 | */ | |
444 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
445 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) | |
446 | { | |
447 | unsigned long bits = pte_val(entry) & | |
448 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); | |
449 | unsigned long old, tmp; | |
450 | ||
451 | __asm__ __volatile__( | |
452 | "1: ldarx %0,0,%4\n\ | |
453 | andi. %1,%0,%6\n\ | |
454 | bne- 1b \n\ | |
455 | or %0,%3,%0\n\ | |
456 | stdcx. %0,0,%4\n\ | |
457 | bne- 1b" | |
458 | :"=&r" (old), "=&r" (tmp), "=m" (*ptep) | |
459 | :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) | |
460 | :"cc"); | |
461 | } | |
462 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | |
463 | do { \ | |
464 | __ptep_set_access_flags(__ptep, __entry, __dirty); \ | |
465 | flush_tlb_page_nohash(__vma, __address); \ | |
466 | } while(0) | |
467 | ||
468 | /* | |
469 | * Macro to mark a page protection value as "uncacheable". | |
470 | */ | |
471 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) | |
472 | ||
473 | struct file; | |
474 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, | |
475 | unsigned long size, pgprot_t vma_prot); | |
476 | #define __HAVE_PHYS_MEM_ACCESS_PROT | |
477 | ||
478 | #define __HAVE_ARCH_PTE_SAME | |
479 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) | |
480 | ||
1da177e4 | 481 | #define pmd_ERROR(e) \ |
e28f7faf DG |
482 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) |
483 | #define pud_ERROR(e) \ | |
484 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e)) | |
1da177e4 | 485 | #define pgd_ERROR(e) \ |
e28f7faf | 486 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
1da177e4 | 487 | |
1f8d419e | 488 | extern pgd_t swapper_pg_dir[]; |
1da177e4 LT |
489 | |
490 | extern void paging_init(void); | |
491 | ||
b74d0bd5 | 492 | #ifdef CONFIG_HUGETLB_PAGE |
3bf5ee95 | 493 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ |
e28f7faf | 494 | free_pgd_range(tlb, addr, end, floor, ceiling) |
b74d0bd5 | 495 | #endif |
1da177e4 LT |
496 | |
497 | /* | |
498 | * This gets called at the end of handling a page fault, when | |
499 | * the kernel has put a new PTE into the page table for the process. | |
500 | * We use it to put a corresponding HPTE into the hash table | |
501 | * ahead of time, instead of waiting for the inevitable extra | |
502 | * hash-table miss exception. | |
503 | */ | |
504 | struct vm_area_struct; | |
505 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | |
506 | ||
507 | /* Encode and de-code a swap entry */ | |
508 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) | |
509 | #define __swp_offset(entry) ((entry).val >> 8) | |
510 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | |
511 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> PTE_SHIFT }) | |
512 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_SHIFT }) | |
513 | #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT) | |
514 | #define pgoff_to_pte(off) ((pte_t) {((off) << PTE_SHIFT)|_PAGE_FILE}) | |
515 | #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) | |
516 | ||
517 | /* | |
518 | * kern_addr_valid is intended to indicate whether an address is a valid | |
519 | * kernel address. Most 32-bit archs define it as always true (like this) | |
520 | * but most 64-bit archs actually perform a test. What should we do here? | |
521 | * The only use is in fs/ncpfs/dir.c | |
522 | */ | |
523 | #define kern_addr_valid(addr) (1) | |
524 | ||
1da177e4 LT |
525 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
526 | remap_pfn_range(vma, vaddr, pfn, size, prot) | |
527 | ||
1da177e4 LT |
528 | void pgtable_cache_init(void); |
529 | ||
1da177e4 LT |
530 | /* |
531 | * find_linux_pte returns the address of a linux pte for a given | |
532 | * effective address and directory. If not found, it returns zero. | |
533 | */ | |
534 | static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) | |
535 | { | |
536 | pgd_t *pg; | |
58366af5 | 537 | pud_t *pu; |
1da177e4 LT |
538 | pmd_t *pm; |
539 | pte_t *pt = NULL; | |
540 | pte_t pte; | |
541 | ||
542 | pg = pgdir + pgd_index(ea); | |
543 | if (!pgd_none(*pg)) { | |
58366af5 BH |
544 | pu = pud_offset(pg, ea); |
545 | if (!pud_none(*pu)) { | |
546 | pm = pmd_offset(pu, ea); | |
547 | if (pmd_present(*pm)) { | |
548 | pt = pte_offset_kernel(pm, ea); | |
549 | pte = *pt; | |
550 | if (!pte_present(pte)) | |
551 | pt = NULL; | |
552 | } | |
1da177e4 LT |
553 | } |
554 | } | |
555 | ||
556 | return pt; | |
557 | } | |
558 | ||
559 | #include <asm-generic/pgtable.h> | |
560 | ||
561 | #endif /* __ASSEMBLY__ */ | |
562 | ||
563 | #endif /* _PPC64_PGTABLE_H */ |