]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/include/asm/hugetlb.h
powerpc/mm/radix: Drop unneeded NULL check
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / include / asm / hugetlb.h
CommitLineData
6d779079
GS
1#ifndef _ASM_POWERPC_HUGETLB_H
2#define _ASM_POWERPC_HUGETLB_H
3
41151e77 4#ifdef CONFIG_HUGETLB_PAGE
6d779079 5#include <asm/page.h>
106c992a 6#include <asm-generic/hugetlb.h>
6d779079 7
41151e77 8extern struct kmem_cache *hugepte_cache;
41151e77 9
cf9427b8 10#ifdef CONFIG_PPC_BOOK3S_64
48483760 11
bee8b3b5 12#include <asm/book3s/64/hugetlb.h>
cf9427b8
AK
13/*
14 * This should work for other subarchs too. But right now we use the
15 * new format only for 64bit book3s
16 */
17static inline pte_t *hugepd_page(hugepd_t hpd)
18{
19 BUG_ON(!hugepd_ok(hpd));
20 /*
21 * We have only four bits to encode, MMU page size
22 */
23 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
20717e1f 24 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
cf9427b8
AK
25}
26
27static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
28{
20717e1f 29 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
cf9427b8
AK
30}
31
32static inline unsigned int hugepd_shift(hugepd_t hpd)
33{
34 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
35}
48483760
AK
36static inline void flush_hugetlb_page(struct vm_area_struct *vma,
37 unsigned long vmaddr)
38{
39 if (radix_enabled())
40 return radix__flush_hugetlb_page(vma, vmaddr);
41}
cf9427b8
AK
42
43#else
44
41151e77
BB
45static inline pte_t *hugepd_page(hugepd_t hpd)
46{
47 BUG_ON(!hugepd_ok(hpd));
4b914286 48#ifdef CONFIG_PPC_8xx
20717e1f
AK
49 return (pte_t *)__va(hpd_val(hpd) &
50 ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
4b914286 51#else
20717e1f
AK
52 return (pte_t *)((hpd_val(hpd) &
53 ~HUGEPD_SHIFT_MASK) | PD_HUGE);
4b914286 54#endif
41151e77
BB
55}
56
57static inline unsigned int hugepd_shift(hugepd_t hpd)
58{
4b914286 59#ifdef CONFIG_PPC_8xx
20717e1f 60 return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
4b914286 61#else
20717e1f 62 return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
4b914286 63#endif
41151e77
BB
64}
65
cf9427b8
AK
66#endif /* CONFIG_PPC_BOOK3S_64 */
67
68
b30e7590 69static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
41151e77
BB
70 unsigned pdshift)
71{
72 /*
881fde1d
BB
73 * On FSL BookE, we have multiple higher-level table entries that
74 * point to the same hugepte. Just use the first one since they're all
41151e77
BB
75 * identical. So for that case, idx=0.
76 */
77 unsigned long idx = 0;
78
b30e7590 79 pte_t *dir = hugepd_page(hpd);
881fde1d 80#ifndef CONFIG_PPC_FSL_BOOK3E
b30e7590 81 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
41151e77
BB
82#endif
83
84 return dir + idx;
85}
86
883a3e52
DG
87pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
88 unsigned long addr, unsigned *shift);
89
0895ecda
DG
90void flush_dcache_icache_hugepage(struct page *page);
91
ca5f1d16 92#if defined(CONFIG_PPC_MM_SLICES)
6d779079
GS
93int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
94 unsigned long len);
41151e77
BB
95#else
96static inline int is_hugepage_only_range(struct mm_struct *mm,
97 unsigned long addr,
98 unsigned long len)
99{
100 return 0;
101}
102#endif
103
d93e4d7d
BB
104void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
105 pte_t pte);
4b914286
CL
106#ifdef CONFIG_PPC_8xx
107static inline void flush_hugetlb_page(struct vm_area_struct *vma,
108 unsigned long vmaddr)
109{
110 flush_tlb_page(vma, vmaddr);
111}
112#else
41151e77 113void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
4b914286 114#endif
6d779079 115
42b77728 116void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
6d779079
GS
117 unsigned long end, unsigned long floor,
118 unsigned long ceiling);
119
3340289d
MG
120/*
121 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
122 * to override the version in mm/hugetlb.c
123 */
124#define vma_mmu_pagesize vma_mmu_pagesize
125
6d779079
GS
126/*
127 * If the arch doesn't supply something else, assume that hugepage
128 * size aligned regions are ok without further preparation.
129 */
a5516438
AK
130static inline int prepare_hugepage_range(struct file *file,
131 unsigned long addr, unsigned long len)
6d779079 132{
0d9ea754
JT
133 struct hstate *h = hstate_file(file);
134 if (len & ~huge_page_mask(h))
6d779079 135 return -EINVAL;
0d9ea754 136 if (addr & ~huge_page_mask(h))
6d779079
GS
137 return -EINVAL;
138 return 0;
139}
140
0895ecda
DG
141static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
142 pte_t *ptep, pte_t pte)
143{
144 set_pte_at(mm, addr, ptep, pte);
145}
146
147static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
148 unsigned long addr, pte_t *ptep)
149{
41151e77 150#ifdef CONFIG_PPC64
88247e8d 151 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
41151e77
BB
152#else
153 return __pte(pte_update(ptep, ~0UL, 0));
154#endif
0895ecda
DG
155}
156
8fe627ec
GS
157static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
158 unsigned long addr, pte_t *ptep)
159{
0895ecda
DG
160 pte_t pte;
161 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
13dce033 162 flush_hugetlb_page(vma, addr);
8fe627ec
GS
163}
164
7f2e9525
GS
165static inline int huge_pte_none(pte_t pte)
166{
167 return pte_none(pte);
168}
169
170static inline pte_t huge_pte_wrprotect(pte_t pte)
171{
172 return pte_wrprotect(pte);
173}
174
7f2e9525
GS
175static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
176 unsigned long addr, pte_t *ptep,
177 pte_t pte, int dirty)
178{
1f6820b4 179#ifdef HUGETLB_NEED_PRELOAD
97632e6f
BB
180 /*
181 * The "return 1" forces a call of update_mmu_cache, which will write a
182 * TLB entry. Without this, platforms that don't do a write of the TLB
183 * entry in the TLB miss handler asm will fault ad infinitum.
184 */
185 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
186 return 1;
187#else
7f2e9525 188 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
97632e6f 189#endif
7f2e9525
GS
190}
191
192static inline pte_t huge_ptep_get(pte_t *ptep)
193{
194 return *ptep;
195}
196
5d3a551c
WD
197static inline void arch_clear_hugepage_flags(struct page *page)
198{
199}
200
41151e77 201#else /* ! CONFIG_HUGETLB_PAGE */
41151e77
BB
202static inline void flush_hugetlb_page(struct vm_area_struct *vma,
203 unsigned long vmaddr)
204{
205}
a6146888 206
29409997 207#define hugepd_shift(x) 0
b30e7590 208static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
29409997
AK
209 unsigned pdshift)
210{
211 return 0;
212}
213#endif /* CONFIG_HUGETLB_PAGE */
a6146888 214
6d779079 215#endif /* _ASM_POWERPC_HUGETLB_H */