]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/include/asm/book3s/64/pgtable.h
1 #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
2 #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
4 * This file contains the functions and defines necessary to modify and use
5 * the ppc64 hashed page table.
8 #include <asm/book3s/64/hash.h>
9 #include <asm/barrier.h>
12 * The second half of the kernel virtual space is used for IO mappings,
13 * it's itself carved into the PIO region (ISA and PHB IO space) and
16 * ISA_IO_BASE = KERN_IO_START, 64K reserved area
17 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
18 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
20 #define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
21 #define FULL_IO_SIZE 0x80000000ul
22 #define ISA_IO_BASE (KERN_IO_START)
23 #define ISA_IO_END (KERN_IO_START + 0x10000ul)
24 #define PHB_IO_BASE (ISA_IO_END)
25 #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
26 #define IOREMAP_BASE (PHB_IO_END)
27 #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
29 #define vmemmap ((struct page *)VMEMMAP_BASE)
31 /* Advertise special mapping type for AGP */
34 /* Advertise support for _PAGE_SPECIAL */
35 #define __HAVE_ARCH_PTE_SPECIAL
40 * This is the default implementation of various PTE accessors, it's
41 * used in all cases except Book3S with 64K pages where we have a
42 * concept of sub-pages
46 #ifdef CONFIG_STRICT_MM_TYPECHECKS
47 #define __real_pte(e,p) ((real_pte_t){(e)})
48 #define __rpte_to_pte(r) ((r).pte)
50 #define __real_pte(e,p) (e)
51 #define __rpte_to_pte(r) (__pte(r))
53 #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT)
55 #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
58 shift = mmu_psize_defs[psize].shift; \
60 #define pte_iterate_hashed_end() } while(0)
63 * We expect this to be called only for user addresses or kernel virtual
64 * addresses other than the linear mapping.
66 #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
68 #endif /* __real_pte */
70 static inline void pmd_set(pmd_t
*pmdp
, unsigned long val
)
75 static inline void pmd_clear(pmd_t
*pmdp
)
80 #define pmd_none(pmd) (!pmd_val(pmd))
81 #define pmd_present(pmd) (!pmd_none(pmd))
83 static inline void pud_set(pud_t
*pudp
, unsigned long val
)
88 static inline void pud_clear(pud_t
*pudp
)
93 #define pud_none(pud) (!pud_val(pud))
94 #define pud_present(pud) (pud_val(pud) != 0)
96 extern struct page
*pud_page(pud_t pud
);
97 extern struct page
*pmd_page(pmd_t pmd
);
98 static inline pte_t
pud_pte(pud_t pud
)
100 return __pte(pud_val(pud
));
103 static inline pud_t
pte_pud(pte_t pte
)
105 return __pud(pte_val(pte
));
107 #define pud_write(pud) pte_write(pud_pte(pud))
108 #define pgd_write(pgd) pte_write(pgd_pte(pgd))
109 static inline void pgd_set(pgd_t
*pgdp
, unsigned long val
)
115 * Find an entry in a page-table-directory. We combine the address region
116 * (the high order N bits) and the pgd portion of the address.
119 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
121 #define pmd_offset(pudp,addr) \
122 (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
124 #define pte_offset_kernel(dir,addr) \
125 (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
127 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
128 #define pte_unmap(pte) do { } while(0)
130 /* to find an entry in a kernel page-table-directory */
131 /* This now only contains the vmalloc pages */
132 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
134 #define pte_ERROR(e) \
135 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
136 #define pmd_ERROR(e) \
137 pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
138 #define pgd_ERROR(e) \
139 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
141 /* Encode and de-code a swap entry */
142 #define MAX_SWAPFILES_CHECK() do { \
143 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
145 * Don't have overlapping bits with _PAGE_HPTEFLAGS \
146 * We filter HPTEFLAGS on set_pte. \
148 BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
149 BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \
152 * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
154 #define SWP_TYPE_BITS 5
155 #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
156 & ((1UL << SWP_TYPE_BITS) - 1))
157 #define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT)
158 #define __swp_entry(type, offset) ((swp_entry_t) { \
159 ((type) << _PAGE_BIT_SWAP_TYPE) \
160 | ((offset) << PTE_RPN_SHIFT) })
162 * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
163 * swap type and offset we get from swap and convert that to pte to find a
164 * matching pte in linux page table.
165 * Clear bits not found in swap entries here.
167 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
168 #define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
170 #ifdef CONFIG_MEM_SOFT_DIRTY
171 #define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
173 #define _PAGE_SWP_SOFT_DIRTY 0UL
174 #endif /* CONFIG_MEM_SOFT_DIRTY */
176 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
177 static inline pte_t
pte_swp_mksoft_dirty(pte_t pte
)
179 return __pte(pte_val(pte
) | _PAGE_SWP_SOFT_DIRTY
);
181 static inline bool pte_swp_soft_dirty(pte_t pte
)
183 return !!(pte_val(pte
) & _PAGE_SWP_SOFT_DIRTY
);
185 static inline pte_t
pte_swp_clear_soft_dirty(pte_t pte
)
187 return __pte(pte_val(pte
) & ~_PAGE_SWP_SOFT_DIRTY
);
189 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
191 void pgtable_cache_add(unsigned shift
, void (*ctor
)(void *));
192 void pgtable_cache_init(void);
194 struct page
*realmode_pfn_to_page(unsigned long pfn
);
196 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
197 extern pmd_t
pfn_pmd(unsigned long pfn
, pgprot_t pgprot
);
198 extern pmd_t
mk_pmd(struct page
*page
, pgprot_t pgprot
);
199 extern pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
);
200 extern void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
201 pmd_t
*pmdp
, pmd_t pmd
);
202 extern void update_mmu_cache_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
204 extern int has_transparent_hugepage(void);
205 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
208 static inline pte_t
pmd_pte(pmd_t pmd
)
210 return __pte(pmd_val(pmd
));
213 static inline pmd_t
pte_pmd(pte_t pte
)
215 return __pmd(pte_val(pte
));
218 static inline pte_t
*pmdp_ptep(pmd_t
*pmd
)
223 #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
224 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
225 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
226 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
227 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
228 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
229 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
230 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
231 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
232 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
234 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
235 #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
236 #define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
237 #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
238 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
240 #ifdef CONFIG_NUMA_BALANCING
241 static inline int pmd_protnone(pmd_t pmd
)
243 return pte_protnone(pmd_pte(pmd
));
245 #endif /* CONFIG_NUMA_BALANCING */
247 #define __HAVE_ARCH_PMD_WRITE
248 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
250 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
252 return __pmd(pmd_val(pmd
) | (_PAGE_PTE
| _PAGE_THP_HUGE
));
255 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
256 extern int pmdp_set_access_flags(struct vm_area_struct
*vma
,
257 unsigned long address
, pmd_t
*pmdp
,
258 pmd_t entry
, int dirty
);
260 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
261 extern int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
262 unsigned long address
, pmd_t
*pmdp
);
263 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
264 extern int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
265 unsigned long address
, pmd_t
*pmdp
);
267 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
268 extern pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
269 unsigned long addr
, pmd_t
*pmdp
);
271 extern pmd_t
pmdp_collapse_flush(struct vm_area_struct
*vma
,
272 unsigned long address
, pmd_t
*pmdp
);
273 #define pmdp_collapse_flush pmdp_collapse_flush
275 #define __HAVE_ARCH_PGTABLE_DEPOSIT
276 extern void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
278 #define __HAVE_ARCH_PGTABLE_WITHDRAW
279 extern pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
);
281 #define __HAVE_ARCH_PMDP_INVALIDATE
282 extern void pmdp_invalidate(struct vm_area_struct
*vma
, unsigned long address
,
285 #define pmd_move_must_withdraw pmd_move_must_withdraw
287 static inline int pmd_move_must_withdraw(struct spinlock
*new_pmd_ptl
,
288 struct spinlock
*old_pmd_ptl
)
291 * Archs like ppc64 use pgtable to store per pmd
292 * specific information. So when we switch the pmd,
293 * we should also withdraw and deposit the pgtable
297 #endif /* __ASSEMBLY__ */
298 #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */