]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/mm/hash64_64k.c
2 * Copyright IBM Corporation, 2015
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16 #include <asm/machdep.h>
21 bool __rpte_sub_valid(real_pte_t rpte
, unsigned long index
)
24 unsigned long ptev
= pte_val(rpte
.pte
);
26 g_idx
= (ptev
& _PAGE_COMBO_VALID
) >> _PAGE_F_GIX_SHIFT
;
28 if (g_idx
& (0x1 << index
))
36 static unsigned long mark_subptegroup_valid(unsigned long ptev
, unsigned long index
)
40 if (!(ptev
& _PAGE_COMBO
))
45 return ptev
| (g_idx
<< _PAGE_F_GIX_SHIFT
);
48 int __hash_page_4K(unsigned long ea
, unsigned long access
, unsigned long vsid
,
49 pte_t
*ptep
, unsigned long trap
, unsigned long flags
,
50 int ssize
, int subpg_prot
)
54 unsigned long hpte_group
;
55 unsigned int subpg_index
;
56 unsigned long rflags
, pa
, hidx
;
57 unsigned long old_pte
, new_pte
, subpg_pte
;
58 unsigned long vpn
, hash
, slot
;
59 unsigned long shift
= mmu_psize_defs
[MMU_PAGE_4K
].shift
;
62 * atomically mark the linux large page PTE busy and dirty
65 pte_t pte
= READ_ONCE(*ptep
);
67 old_pte
= pte_val(pte
);
68 /* If PTE busy, retry the access */
69 if (unlikely(old_pte
& _PAGE_BUSY
))
71 /* If PTE permissions don't match, take page fault */
72 if (unlikely(access
& ~old_pte
))
75 * Try to lock the PTE, add ACCESSED and DIRTY if it was
76 * a write access. Since this is 4K insert of 64K page size
77 * also add _PAGE_COMBO
79 new_pte
= old_pte
| _PAGE_BUSY
| _PAGE_ACCESSED
| _PAGE_COMBO
;
80 if (access
& _PAGE_RW
)
81 new_pte
|= _PAGE_DIRTY
;
82 } while (old_pte
!= __cmpxchg_u64((unsigned long *)ptep
,
85 * Handle the subpage protection bits
87 subpg_pte
= new_pte
& ~subpg_prot
;
88 rflags
= htab_convert_pte_flags(subpg_pte
);
90 if (!cpu_has_feature(CPU_FTR_NOEXECUTE
) &&
91 !cpu_has_feature(CPU_FTR_COHERENT_ICACHE
)) {
94 * No CPU has hugepages but lacks no execute, so we
95 * don't need to worry about that case
97 rflags
= hash_page_do_lazy_icache(rflags
, __pte(old_pte
), trap
);
100 subpg_index
= (ea
& (PAGE_SIZE
- 1)) >> shift
;
101 vpn
= hpt_vpn(ea
, vsid
, ssize
);
102 rpte
= __real_pte(__pte(old_pte
), ptep
);
104 *None of the sub 4k page is hashed
106 if (!(old_pte
& _PAGE_HASHPTE
))
107 goto htab_insert_hpte
;
109 * Check if the pte was already inserted into the hash table
110 * as a 64k HW page, and invalidate the 64k HPTE if so.
112 if (!(old_pte
& _PAGE_COMBO
)) {
113 flush_hash_page(vpn
, rpte
, MMU_PAGE_64K
, ssize
, flags
);
114 old_pte
&= ~_PAGE_HASHPTE
| _PAGE_F_GIX
| _PAGE_F_SECOND
;
115 goto htab_insert_hpte
;
118 * Check for sub page valid and update
120 if (__rpte_sub_valid(rpte
, subpg_index
)) {
123 hash
= hpt_hash(vpn
, shift
, ssize
);
124 hidx
= __rpte_to_hidx(rpte
, subpg_index
);
125 if (hidx
& _PTEIDX_SECONDARY
)
127 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
128 slot
+= hidx
& _PTEIDX_GROUP_IX
;
130 ret
= ppc_md
.hpte_updatepp(slot
, rflags
, vpn
,
131 MMU_PAGE_4K
, MMU_PAGE_4K
,
134 *if we failed because typically the HPTE wasn't really here
135 * we try an insertion.
138 goto htab_insert_hpte
;
140 *ptep
= __pte(new_pte
& ~_PAGE_BUSY
);
146 * handle _PAGE_4K_PFN case
148 if (old_pte
& _PAGE_4K_PFN
) {
150 * All the sub 4k page have the same
153 pa
= pte_pfn(__pte(old_pte
)) << HW_PAGE_SHIFT
;
155 pa
= pte_pfn(__pte(old_pte
)) << PAGE_SHIFT
;
156 pa
+= (subpg_index
<< shift
);
158 hash
= hpt_hash(vpn
, shift
, ssize
);
160 hpte_group
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
) & ~0x7UL
;
162 /* Insert into the hash table, primary slot */
163 slot
= ppc_md
.hpte_insert(hpte_group
, vpn
, pa
, rflags
, 0,
164 MMU_PAGE_4K
, MMU_PAGE_4K
, ssize
);
166 * Primary is full, try the secondary
168 if (unlikely(slot
== -1)) {
169 hpte_group
= ((~hash
& htab_hash_mask
) * HPTES_PER_GROUP
) & ~0x7UL
;
170 slot
= ppc_md
.hpte_insert(hpte_group
, vpn
, pa
,
171 rflags
, HPTE_V_SECONDARY
,
172 MMU_PAGE_4K
, MMU_PAGE_4K
, ssize
);
175 hpte_group
= ((hash
& htab_hash_mask
) *
176 HPTES_PER_GROUP
) & ~0x7UL
;
177 ppc_md
.hpte_remove(hpte_group
);
179 * FIXME!! Should be try the group from which we removed ?
185 * Hypervisor failure. Restore old pmd and return -1
186 * similar to __hash_page_*
188 if (unlikely(slot
== -2)) {
189 *ptep
= __pte(old_pte
);
190 hash_failure_debug(ea
, access
, vsid
, trap
, ssize
,
191 MMU_PAGE_4K
, MMU_PAGE_4K
, old_pte
);
195 * Insert slot number & secondary bit in PTE second half,
196 * clear _PAGE_BUSY and set appropriate HPTE slot bit
197 * Since we have _PAGE_BUSY set on ptep, we can be sure
198 * nobody is undating hidx.
200 hidxp
= (unsigned long *)(ptep
+ PTRS_PER_PTE
);
201 rpte
.hidx
&= ~(0xfUL
<< (subpg_index
<< 2));
202 *hidxp
= rpte
.hidx
| (slot
<< (subpg_index
<< 2));
203 new_pte
= mark_subptegroup_valid(new_pte
, subpg_index
);
204 new_pte
|= _PAGE_HASHPTE
;
206 * check __real_pte for details on matching smp_rmb()
209 *ptep
= __pte(new_pte
& ~_PAGE_BUSY
);
213 int __hash_page_64K(unsigned long ea
, unsigned long access
,
214 unsigned long vsid
, pte_t
*ptep
, unsigned long trap
,
215 unsigned long flags
, int ssize
)
218 unsigned long hpte_group
;
219 unsigned long rflags
, pa
;
220 unsigned long old_pte
, new_pte
;
221 unsigned long vpn
, hash
, slot
;
222 unsigned long shift
= mmu_psize_defs
[MMU_PAGE_64K
].shift
;
225 * atomically mark the linux large page PTE busy and dirty
228 pte_t pte
= READ_ONCE(*ptep
);
230 old_pte
= pte_val(pte
);
231 /* If PTE busy, retry the access */
232 if (unlikely(old_pte
& _PAGE_BUSY
))
234 /* If PTE permissions don't match, take page fault */
235 if (unlikely(access
& ~old_pte
))
238 * Check if PTE has the cache-inhibit bit set
239 * If so, bail out and refault as a 4k page
241 if (!mmu_has_feature(MMU_FTR_CI_LARGE_PAGE
) &&
242 unlikely(old_pte
& _PAGE_NO_CACHE
))
245 * Try to lock the PTE, add ACCESSED and DIRTY if it was
246 * a write access. Since this is 4K insert of 64K page size
247 * also add _PAGE_COMBO
249 new_pte
= old_pte
| _PAGE_BUSY
| _PAGE_ACCESSED
;
250 if (access
& _PAGE_RW
)
251 new_pte
|= _PAGE_DIRTY
;
252 } while (old_pte
!= __cmpxchg_u64((unsigned long *)ptep
,
255 rflags
= htab_convert_pte_flags(new_pte
);
257 if (!cpu_has_feature(CPU_FTR_NOEXECUTE
) &&
258 !cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
259 rflags
= hash_page_do_lazy_icache(rflags
, __pte(old_pte
), trap
);
261 vpn
= hpt_vpn(ea
, vsid
, ssize
);
262 if (unlikely(old_pte
& _PAGE_HASHPTE
)) {
264 * There MIGHT be an HPTE for this pte
266 hash
= hpt_hash(vpn
, shift
, ssize
);
267 if (old_pte
& _PAGE_F_SECOND
)
269 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
270 slot
+= (old_pte
& _PAGE_F_GIX
) >> _PAGE_F_GIX_SHIFT
;
272 if (ppc_md
.hpte_updatepp(slot
, rflags
, vpn
, MMU_PAGE_64K
,
273 MMU_PAGE_64K
, ssize
, flags
) == -1)
274 old_pte
&= ~_PAGE_HPTEFLAGS
;
277 if (likely(!(old_pte
& _PAGE_HASHPTE
))) {
279 pa
= pte_pfn(__pte(old_pte
)) << PAGE_SHIFT
;
280 hash
= hpt_hash(vpn
, shift
, ssize
);
283 hpte_group
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
) & ~0x7UL
;
285 /* Insert into the hash table, primary slot */
286 slot
= ppc_md
.hpte_insert(hpte_group
, vpn
, pa
, rflags
, 0,
287 MMU_PAGE_64K
, MMU_PAGE_64K
, ssize
);
289 * Primary is full, try the secondary
291 if (unlikely(slot
== -1)) {
292 hpte_group
= ((~hash
& htab_hash_mask
) * HPTES_PER_GROUP
) & ~0x7UL
;
293 slot
= ppc_md
.hpte_insert(hpte_group
, vpn
, pa
,
294 rflags
, HPTE_V_SECONDARY
,
295 MMU_PAGE_64K
, MMU_PAGE_64K
, ssize
);
298 hpte_group
= ((hash
& htab_hash_mask
) *
299 HPTES_PER_GROUP
) & ~0x7UL
;
300 ppc_md
.hpte_remove(hpte_group
);
302 * FIXME!! Should be try the group from which we removed ?
308 * Hypervisor failure. Restore old pmd and return -1
309 * similar to __hash_page_*
311 if (unlikely(slot
== -2)) {
312 *ptep
= __pte(old_pte
);
313 hash_failure_debug(ea
, access
, vsid
, trap
, ssize
,
314 MMU_PAGE_64K
, MMU_PAGE_64K
, old_pte
);
317 new_pte
= (new_pte
& ~_PAGE_HPTEFLAGS
) | _PAGE_HASHPTE
;
318 new_pte
|= (slot
<< _PAGE_F_GIX_SHIFT
) & (_PAGE_F_SECOND
| _PAGE_F_GIX
);
320 *ptep
= __pte(new_pte
& ~_PAGE_BUSY
);