]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/include/asm/hugetlb.h
mm/hugetlb: add more arch-defined huge_pte functions
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / include / asm / hugetlb.h
1 #ifndef _ASM_POWERPC_HUGETLB_H
2 #define _ASM_POWERPC_HUGETLB_H
3
4 #ifdef CONFIG_HUGETLB_PAGE
5 #include <asm/page.h>
6 #include <asm-generic/hugetlb.h>
7
8 extern struct kmem_cache *hugepte_cache;
9
10 static inline pte_t *hugepd_page(hugepd_t hpd)
11 {
12 BUG_ON(!hugepd_ok(hpd));
13 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
14 }
15
16 static inline unsigned int hugepd_shift(hugepd_t hpd)
17 {
18 return hpd.pd & HUGEPD_SHIFT_MASK;
19 }
20
21 static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
22 unsigned pdshift)
23 {
24 /*
25 * On FSL BookE, we have multiple higher-level table entries that
26 * point to the same hugepte. Just use the first one since they're all
27 * identical. So for that case, idx=0.
28 */
29 unsigned long idx = 0;
30
31 pte_t *dir = hugepd_page(*hpdp);
32 #ifndef CONFIG_PPC_FSL_BOOK3E
33 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
34 #endif
35
36 return dir + idx;
37 }
38
39 pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
40 unsigned long addr, unsigned *shift);
41
42 void flush_dcache_icache_hugepage(struct page *page);
43
44 #if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
45 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
46 unsigned long len);
47 #else
48 static inline int is_hugepage_only_range(struct mm_struct *mm,
49 unsigned long addr,
50 unsigned long len)
51 {
52 return 0;
53 }
54 #endif
55
56 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
57 pte_t pte);
58 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
59
60 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
61 unsigned long end, unsigned long floor,
62 unsigned long ceiling);
63
64 /*
65 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
66 * to override the version in mm/hugetlb.c
67 */
68 #define vma_mmu_pagesize vma_mmu_pagesize
69
70 /*
71 * If the arch doesn't supply something else, assume that hugepage
72 * size aligned regions are ok without further preparation.
73 */
74 static inline int prepare_hugepage_range(struct file *file,
75 unsigned long addr, unsigned long len)
76 {
77 struct hstate *h = hstate_file(file);
78 if (len & ~huge_page_mask(h))
79 return -EINVAL;
80 if (addr & ~huge_page_mask(h))
81 return -EINVAL;
82 return 0;
83 }
84
85 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
86 {
87 }
88
89
90 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
91 pte_t *ptep, pte_t pte)
92 {
93 set_pte_at(mm, addr, ptep, pte);
94 }
95
96 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
97 unsigned long addr, pte_t *ptep)
98 {
99 #ifdef CONFIG_PPC64
100 return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
101 #else
102 return __pte(pte_update(ptep, ~0UL, 0));
103 #endif
104 }
105
106 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
107 unsigned long addr, pte_t *ptep)
108 {
109 pte_t pte;
110 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
111 flush_tlb_page(vma, addr);
112 }
113
114 static inline int huge_pte_none(pte_t pte)
115 {
116 return pte_none(pte);
117 }
118
119 static inline pte_t huge_pte_wrprotect(pte_t pte)
120 {
121 return pte_wrprotect(pte);
122 }
123
124 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
125 unsigned long addr, pte_t *ptep,
126 pte_t pte, int dirty)
127 {
128 #ifdef HUGETLB_NEED_PRELOAD
129 /*
130 * The "return 1" forces a call of update_mmu_cache, which will write a
131 * TLB entry. Without this, platforms that don't do a write of the TLB
132 * entry in the TLB miss handler asm will fault ad infinitum.
133 */
134 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
135 return 1;
136 #else
137 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
138 #endif
139 }
140
141 static inline pte_t huge_ptep_get(pte_t *ptep)
142 {
143 return *ptep;
144 }
145
146 static inline int arch_prepare_hugepage(struct page *page)
147 {
148 return 0;
149 }
150
151 static inline void arch_release_hugepage(struct page *page)
152 {
153 }
154
155 static inline void arch_clear_hugepage_flags(struct page *page)
156 {
157 }
158
159 #else /* ! CONFIG_HUGETLB_PAGE */
160 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
161 unsigned long vmaddr)
162 {
163 }
164 #endif /* CONFIG_HUGETLB_PAGE */
165
166
167 /*
168 * FSL Book3E platforms require special gpage handling - the gpages
169 * are reserved early in the boot process by memblock instead of via
170 * the .dts as on IBM platforms.
171 */
172 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
173 extern void __init reserve_hugetlb_gpages(void);
174 #else
175 static inline void reserve_hugetlb_gpages(void)
176 {
177 }
178 #endif
179
180 #endif /* _ASM_POWERPC_HUGETLB_H */