]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/powerpc/include/asm/book3s/64/mmu-hash.h
powerpc/64s/hash: remove the vmalloc segment from the bolted SLB
[mirror_ubuntu-focal-kernel.git] / arch / powerpc / include / asm / book3s / 64 / mmu-hash.h
CommitLineData
11a6f6ab
AK
1#ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
2#define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
8d2169e8
DG
3/*
4 * PowerPC64 memory management structures
5 *
6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7 * PPC64 rework.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
8d2169e8 15#include <asm/page.h>
891121e6 16#include <asm/bug.h>
ec0c464c 17#include <asm/asm-const.h>
8d2169e8 18
78f1dbde
AK
19/*
20 * This is necessary to get the definition of PGTABLE_RANGE which we
21 * need for various slices related matters. Note that this isn't the
22 * complete pgtable.h but only a portion of it.
23 */
3dfcb315 24#include <asm/book3s/64/pgtable.h>
cf9427b8 25#include <asm/bug.h>
dad6f37c 26#include <asm/processor.h>
b92a226e 27#include <asm/cpu_has_feature.h>
78f1dbde 28
8d2169e8
DG
29/*
30 * SLB
31 */
32
85376e2a 33#define SLB_NUM_BOLTED 2
8d2169e8 34#define SLB_CACHE_ENTRIES 8
46db2f86 35#define SLB_MIN_SIZE 32
8d2169e8
DG
36
37/* Bits in the SLB ESID word */
38#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
39
40/* Bits in the SLB VSID word */
41#define SLB_VSID_SHIFT 12
e6f81a92 42#define SLB_VSID_SHIFT_256M SLB_VSID_SHIFT
1189be65
PM
43#define SLB_VSID_SHIFT_1T 24
44#define SLB_VSID_SSIZE_SHIFT 62
8d2169e8
DG
45#define SLB_VSID_B ASM_CONST(0xc000000000000000)
46#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
47#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
48#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
49#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
50#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
51#define SLB_VSID_L ASM_CONST(0x0000000000000100)
52#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
53#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
54#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
55#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
56#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
57#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
58#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
59
60#define SLB_VSID_KERNEL (SLB_VSID_KP)
61#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
62
63#define SLBIE_C (0x08000000)
1189be65 64#define SLBIE_SSIZE_SHIFT 25
8d2169e8
DG
65
66/*
67 * Hash table
68 */
69
70#define HPTES_PER_GROUP 8
71
2454c7e9 72#define HPTE_V_SSIZE_SHIFT 62
8d2169e8 73#define HPTE_V_AVPN_SHIFT 7
6b243fcf 74#define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
2454c7e9 75#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
6b243fcf 76#define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
8d2169e8 77#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
91bbbe22 78#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
8d2169e8
DG
79#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
80#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
81#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
82#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
83#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
84
50de596d 85/*
6b243fcf 86 * ISA 3.0 has a different HPTE format.
50de596d
AK
87 */
88#define HPTE_R_3_0_SSIZE_SHIFT 58
6b243fcf 89#define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
8d2169e8
DG
90#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
91#define HPTE_R_TS ASM_CONST(0x4000000000000000)
de56a948 92#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
a6590ca5
RP
93#define HPTE_R_KEY_BIT0 ASM_CONST(0x2000000000000000)
94#define HPTE_R_KEY_BIT1 ASM_CONST(0x1000000000000000)
8d2169e8 95#define HPTE_R_RPN_SHIFT 12
de56a948 96#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
6b243fcf 97#define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
8d2169e8 98#define HPTE_R_PP ASM_CONST(0x0000000000000003)
8550e2fa 99#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
8d2169e8 100#define HPTE_R_N ASM_CONST(0x0000000000000004)
de56a948
PM
101#define HPTE_R_G ASM_CONST(0x0000000000000008)
102#define HPTE_R_M ASM_CONST(0x0000000000000010)
103#define HPTE_R_I ASM_CONST(0x0000000000000020)
104#define HPTE_R_W ASM_CONST(0x0000000000000040)
105#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
8d2169e8
DG
106#define HPTE_R_C ASM_CONST(0x0000000000000080)
107#define HPTE_R_R ASM_CONST(0x0000000000000100)
de56a948 108#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
a6590ca5
RP
109#define HPTE_R_KEY_BIT2 ASM_CONST(0x0000000000000800)
110#define HPTE_R_KEY_BIT3 ASM_CONST(0x0000000000000400)
111#define HPTE_R_KEY_BIT4 ASM_CONST(0x0000000000000200)
d182b8fd 112#define HPTE_R_KEY (HPTE_R_KEY_LO | HPTE_R_KEY_HI)
8d2169e8 113
b7abc5c5
SS
114#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
115#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
116
8d2169e8 117/* Values for PP (assumes Ks=0, Kp=1) */
8d2169e8
DG
118#define PP_RWXX 0 /* Supervisor read/write, User none */
119#define PP_RWRX 1 /* Supervisor read/write, User read */
120#define PP_RWRW 2 /* Supervisor read/write, User read/write */
121#define PP_RXRX 3 /* Supervisor read, User read */
697d3899 122#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
8d2169e8 123
b4072df4
PM
124/* Fields for tlbiel instruction in architecture 2.06 */
125#define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
126#define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
127#define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
128#define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
129#define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
130#define TLBIEL_INVAL_SET_SHIFT 12
131
132#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
45706bb5 133#define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */
c3ab300e 134#define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */
1a472c9d 135#define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */
b4072df4 136
8d2169e8
DG
137#ifndef __ASSEMBLY__
138
7025776e
BH
139struct mmu_hash_ops {
140 void (*hpte_invalidate)(unsigned long slot,
141 unsigned long vpn,
142 int bpsize, int apsize,
143 int ssize, int local);
144 long (*hpte_updatepp)(unsigned long slot,
145 unsigned long newpp,
146 unsigned long vpn,
147 int bpsize, int apsize,
148 int ssize, unsigned long flags);
149 void (*hpte_updateboltedpp)(unsigned long newpp,
150 unsigned long ea,
151 int psize, int ssize);
152 long (*hpte_insert)(unsigned long hpte_group,
153 unsigned long vpn,
154 unsigned long prpn,
155 unsigned long rflags,
156 unsigned long vflags,
157 int psize, int apsize,
158 int ssize);
159 long (*hpte_remove)(unsigned long hpte_group);
160 int (*hpte_removebolted)(unsigned long ea,
161 int psize, int ssize);
162 void (*flush_hash_range)(unsigned long number, int local);
163 void (*hugepage_invalidate)(unsigned long vsid,
164 unsigned long addr,
165 unsigned char *hpte_slot_array,
166 int psize, int ssize, int local);
dbcf929c 167 int (*resize_hpt)(unsigned long shift);
7025776e
BH
168 /*
169 * Special for kexec.
170 * To be called in real mode with interrupts disabled. No locks are
171 * taken as such, concurrent access on pre POWER5 hardware could result
172 * in a deadlock.
173 * The linear mapping is destroyed as well.
174 */
175 void (*hpte_clear_all)(void);
176};
177extern struct mmu_hash_ops mmu_hash_ops;
178
8e561e7e 179struct hash_pte {
12f04f2b
AB
180 __be64 v;
181 __be64 r;
8e561e7e 182};
8d2169e8 183
8e561e7e 184extern struct hash_pte *htab_address;
8d2169e8
DG
185extern unsigned long htab_size_bytes;
186extern unsigned long htab_hash_mask;
187
cf9427b8
AK
188
189static inline int shift_to_mmu_psize(unsigned int shift)
190{
191 int psize;
192
193 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
194 if (mmu_psize_defs[psize].shift == shift)
195 return psize;
196 return -1;
197}
198
199static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
200{
201 if (mmu_psize_defs[mmu_psize].shift)
202 return mmu_psize_defs[mmu_psize].shift;
203 BUG();
204}
8d2169e8 205
138ee7ee
AK
206static inline unsigned long get_sllp_encoding(int psize)
207{
208 unsigned long sllp;
209
210 sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
211 ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
212 return sllp;
213}
214
8d2169e8
DG
215#endif /* __ASSEMBLY__ */
216
2454c7e9
PM
217/*
218 * Segment sizes.
219 * These are the values used by hardware in the B field of
220 * SLB entries and the first dword of MMU hashtable entries.
221 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
222 */
223#define MMU_SEGSIZE_256M 0
224#define MMU_SEGSIZE_1T 1
225
5524a27d
AK
226/*
227 * encode page number shift.
228 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
229 * 12 bits. This enable us to address upto 76 bit va.
230 * For hpt hash from a va we can ignore the page size bits of va and for
231 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
232 * we work in all cases including 4k page size.
233 */
234#define VPN_SHIFT 12
1189be65 235
b1022fbd
AK
236/*
237 * HPTE Large Page (LP) details
238 */
239#define LP_SHIFT 12
240#define LP_BITS 8
241#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
242
8d2169e8
DG
243#ifndef __ASSEMBLY__
244
73d16a6e
IM
245static inline int slb_vsid_shift(int ssize)
246{
247 if (ssize == MMU_SEGSIZE_256M)
248 return SLB_VSID_SHIFT;
249 return SLB_VSID_SHIFT_1T;
250}
251
5524a27d
AK
252static inline int segment_shift(int ssize)
253{
254 if (ssize == MMU_SEGSIZE_256M)
255 return SID_SHIFT;
256 return SID_SHIFT_1T;
257}
258
0eeede0c
PM
259/*
260 * This array is indexed by the LP field of the HPTE second dword.
261 * Since this field may contain some RPN bits, some entries are
262 * replicated so that we get the same value irrespective of RPN.
263 * The top 4 bits are the page size index (MMU_PAGE_*) for the
264 * actual page size, the bottom 4 bits are the base page size.
265 */
266extern u8 hpte_page_sizes[1 << LP_BITS];
267
268static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
269 bool is_base_size)
270{
271 unsigned int i, lp;
272
273 if (!(h & HPTE_V_LARGE))
274 return 1ul << 12;
275
276 /* Look at the 8 bit LP value */
277 lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
278 i = hpte_page_sizes[lp];
279 if (!i)
280 return 0;
281 if (!is_base_size)
282 i >>= 4;
283 return 1ul << mmu_psize_defs[i & 0xf].shift;
284}
285
286static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
287{
288 return __hpte_page_size(h, l, 0);
289}
290
291static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
292{
293 return __hpte_page_size(h, l, 1);
294}
295
8d2169e8 296/*
1189be65 297 * The current system page and segment sizes
8d2169e8 298 */
1189be65
PM
299extern int mmu_kernel_ssize;
300extern int mmu_highuser_ssize;
584f8b71 301extern u16 mmu_slb_size;
572fb578 302extern unsigned long tce_alloc_start, tce_alloc_end;
8d2169e8
DG
303
304/*
305 * If the processor supports 64k normal pages but not 64k cache
306 * inhibited pages, we have to be prepared to switch processes
307 * to use 4k pages when they create cache-inhibited mappings.
308 * If this is the case, mmu_ci_restrictions will be set to 1.
309 */
310extern int mmu_ci_restrictions;
311
5524a27d
AK
312/*
313 * This computes the AVPN and B fields of the first dword of a HPTE,
314 * for use when we want to match an existing PTE. The bottom 7 bits
315 * of the returned value are zero.
316 */
317static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
318 int ssize)
319{
320 unsigned long v;
321 /*
322 * The AVA field omits the low-order 23 bits of the 78 bits VA.
323 * These bits are not needed in the PTE, because the
324 * low-order b of these bits are part of the byte offset
325 * into the virtual page and, if b < 23, the high-order
326 * 23-b of these bits are always used in selecting the
327 * PTEGs to be searched
328 */
329 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
330 v <<= HPTE_V_AVPN_SHIFT;
6b243fcf 331 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
5524a27d
AK
332 return v;
333}
334
6b243fcf
PM
335/*
336 * ISA v3.0 defines a new HPTE format, which differs from the old
337 * format in having smaller AVPN and ARPN fields, and the B field
338 * in the second dword instead of the first.
339 */
340static inline unsigned long hpte_old_to_new_v(unsigned long v)
341{
342 /* trim AVPN, drop B */
343 return v & HPTE_V_COMMON_BITS;
344}
345
346static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
347{
348 /* move B field from 1st to 2nd dword, trim ARPN */
349 return (r & ~HPTE_R_3_0_SSIZE_MASK) |
350 (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
351}
352
353static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
354{
355 /* insert B field */
356 return (v & HPTE_V_COMMON_BITS) |
357 ((r & HPTE_R_3_0_SSIZE_MASK) <<
358 (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
359}
360
361static inline unsigned long hpte_new_to_old_r(unsigned long r)
362{
363 /* clear out B field */
364 return r & ~HPTE_R_3_0_SSIZE_MASK;
365}
366
a833280b
AK
367static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
368{
369 unsigned long hpte_v;
370
371 hpte_v = be64_to_cpu(hptep->v);
372 if (cpu_has_feature(CPU_FTR_ARCH_300))
373 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
374 return hpte_v;
375}
376
8d2169e8
DG
377/*
378 * This function sets the AVPN and L fields of the HPTE appropriately
b1022fbd 379 * using the base page size and actual page size.
8d2169e8 380 */
b1022fbd
AK
381static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
382 int actual_psize, int ssize)
8d2169e8 383{
1189be65 384 unsigned long v;
b1022fbd
AK
385 v = hpte_encode_avpn(vpn, base_psize, ssize);
386 if (actual_psize != MMU_PAGE_4K)
8d2169e8
DG
387 v |= HPTE_V_LARGE;
388 return v;
389}
390
391/*
392 * This function sets the ARPN, and LP fields of the HPTE appropriately
393 * for the page size. We assume the pa is already "clean" that is properly
394 * aligned for the requested page size
395 */
b1022fbd 396static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
6b243fcf 397 int actual_psize)
8d2169e8 398{
8d2169e8 399 /* A 4K page needs no special encoding */
b1022fbd 400 if (actual_psize == MMU_PAGE_4K)
8d2169e8
DG
401 return pa & HPTE_R_RPN;
402 else {
b1022fbd
AK
403 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
404 unsigned int shift = mmu_psize_defs[actual_psize].shift;
405 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
8d2169e8 406 }
8d2169e8
DG
407}
408
409/*
5524a27d 410 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
8d2169e8 411 */
5524a27d
AK
412static inline unsigned long hpt_vpn(unsigned long ea,
413 unsigned long vsid, int ssize)
1189be65 414{
5524a27d
AK
415 unsigned long mask;
416 int s_shift = segment_shift(ssize);
417
418 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
419 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
1189be65 420}
8d2169e8 421
1189be65
PM
422/*
423 * This hashes a virtual address
424 */
5524a27d
AK
425static inline unsigned long hpt_hash(unsigned long vpn,
426 unsigned int shift, int ssize)
8d2169e8 427{
59248aec 428 unsigned long mask;
1189be65
PM
429 unsigned long hash, vsid;
430
5524a27d 431 /* VPN_SHIFT can be atmost 12 */
1189be65 432 if (ssize == MMU_SEGSIZE_256M) {
5524a27d
AK
433 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
434 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
435 ((vpn & mask) >> (shift - VPN_SHIFT));
1189be65 436 } else {
5524a27d
AK
437 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
438 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
439 hash = vsid ^ (vsid << 25) ^
440 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
1189be65
PM
441 }
442 return hash & 0x7fffffffffUL;
8d2169e8
DG
443}
444
aefa5688
AK
445#define HPTE_LOCAL_UPDATE 0x1
446#define HPTE_NOHPTE_UPDATE 0x2
447
8d2169e8
DG
448extern int __hash_page_4K(unsigned long ea, unsigned long access,
449 unsigned long vsid, pte_t *ptep, unsigned long trap,
aefa5688 450 unsigned long flags, int ssize, int subpage_prot);
8d2169e8
DG
451extern int __hash_page_64K(unsigned long ea, unsigned long access,
452 unsigned long vsid, pte_t *ptep, unsigned long trap,
aefa5688 453 unsigned long flags, int ssize);
8d2169e8 454struct mm_struct;
0895ecda 455unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
aefa5688
AK
456extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
457 unsigned long access, unsigned long trap,
458 unsigned long flags);
459extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
460 unsigned long dsisr);
a4fe3ce7 461int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
aefa5688
AK
462 pte_t *ptep, unsigned long trap, unsigned long flags,
463 int ssize, unsigned int shift, unsigned int mmu_psize);
6d492ecc
AK
464#ifdef CONFIG_TRANSPARENT_HUGEPAGE
465extern int __hash_page_thp(unsigned long ea, unsigned long access,
466 unsigned long vsid, pmd_t *pmdp, unsigned long trap,
aefa5688 467 unsigned long flags, int ssize, unsigned int psize);
6d492ecc
AK
468#else
469static inline int __hash_page_thp(unsigned long ea, unsigned long access,
470 unsigned long vsid, pmd_t *pmdp,
aefa5688 471 unsigned long trap, unsigned long flags,
6d492ecc
AK
472 int ssize, unsigned int psize)
473{
474 BUG();
ff1e7683 475 return -1;
6d492ecc
AK
476}
477#endif
4b8692c0
BH
478extern void hash_failure_debug(unsigned long ea, unsigned long access,
479 unsigned long vsid, unsigned long trap,
d8139ebf
AK
480 int ssize, int psize, int lpsize,
481 unsigned long pte);
8d2169e8 482extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
bc033b63 483 unsigned long pstart, unsigned long prot,
1189be65 484 int psize, int ssize);
f6026df1
AB
485int htab_remove_mapping(unsigned long vstart, unsigned long vend,
486 int psize, int ssize);
79cc38de 487extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
fa28237c 488extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
8d2169e8 489
6364e84e
ME
490#ifdef CONFIG_PPC_PSERIES
491void hpte_init_pseries(void);
492#else
493static inline void hpte_init_pseries(void) { }
494#endif
495
8d2169e8 496extern void hpte_init_native(void);
8d2169e8 497
c6d15258
MS
498struct slb_entry {
499 u64 esid;
500 u64 vsid;
501};
502
8d2169e8
DG
503extern void slb_initialize(void);
504extern void slb_flush_and_rebolt(void);
e7e81847
NP
505void slb_flush_all_realmode(void);
506void __slb_restore_bolted_realmode(void);
507void slb_restore_bolted_realmode(void);
c6d15258
MS
508void slb_save_contents(struct slb_entry *slb_ptr);
509void slb_dump_contents(struct slb_entry *slb_ptr);
8d2169e8 510
67439b76 511extern void slb_vmalloc_update(void);
46db2f86 512extern void slb_set_size(u16 size);
8d2169e8
DG
513#endif /* __ASSEMBLY__ */
514
515/*
f033d659 516 * VSID allocation (256MB segment)
8d2169e8 517 *
c60ac569
AK
518 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
519 * from mmu context id and effective segment id of the address.
8d2169e8 520 *
941711a3
AK
521 * For user processes max context id is limited to MAX_USER_CONTEXT.
522
add2e1e5 523 * For kernel space, we use context ids 1-4 to map addresses as below:
c60ac569 524 * NOTE: each context only support 64TB now.
941711a3
AK
525 * 0x00001 - [ 0xc000000000000000 - 0xc0003fffffffffff ]
526 * 0x00002 - [ 0xd000000000000000 - 0xd0003fffffffffff ]
527 * 0x00003 - [ 0xe000000000000000 - 0xe0003fffffffffff ]
528 * 0x00004 - [ 0xf000000000000000 - 0xf0003fffffffffff ]
8d2169e8
DG
529 *
530 * The proto-VSIDs are then scrambled into real VSIDs with the
531 * multiplicative hash:
532 *
533 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
8d2169e8 534 *
f033d659 535 * VSID_MULTIPLIER is prime, so in particular it is
8d2169e8
DG
536 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
537 * Because the modulus is 2^n-1 we can compute it efficiently without
c60ac569
AK
538 * a divide or extra multiply (see below). The scramble function gives
539 * robust scattering in the hash table (at least based on some initial
540 * results).
8d2169e8 541 *
941711a3
AK
542 * We use VSID 0 to indicate an invalid VSID. The means we can't use context id
543 * 0, because a context id of 0 and an EA of 0 gives a proto-VSID of 0, which
544 * will produce a VSID of 0.
8d2169e8 545 *
c60ac569
AK
546 * We also need to avoid the last segment of the last context, because that
547 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
941711a3 548 * because of the modulo operation in vsid scramble.
8d2169e8 549 */
8d2169e8 550
e6f81a92
AK
551/*
552 * Max Va bits we support as of now is 68 bits. We want 19 bit
553 * context ID.
554 * Restrictions:
555 * GPU has restrictions of not able to access beyond 128TB
556 * (47 bit effective address). We also cannot do more than 20bit PID.
557 * For p4 and p5 which can only do 65 bit VA, we restrict our CONTEXT_BITS
558 * to 16 bits (ie, we can only have 2^16 pids at the same time).
559 */
560#define VA_BITS 68
e39d1a47 561#define CONTEXT_BITS 19
e6f81a92
AK
562#define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS))
563#define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
e39d1a47 564
79270e0a
AK
565#define ESID_BITS_MASK ((1 << ESID_BITS) - 1)
566#define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
567
c60ac569
AK
568/*
569 * 256MB segment
af81d787 570 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
941711a3
AK
571 * available for user + kernel mapping. VSID 0 is reserved as invalid, contexts
572 * 1-4 are used for kernel mapping. Each segment contains 2^28 bytes. Each
e6f81a92 573 * context maps 2^49 bytes (512TB).
941711a3
AK
574 *
575 * We also need to avoid the last segment of the last context, because that
576 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
577 * because of the modulo operation in vsid scramble.
c60ac569 578 */
941711a3
AK
579#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
580#define MIN_USER_CONTEXT (5)
581
582/* Would be nice to use KERNEL_REGION_ID here */
583#define KERNEL_REGION_CONTEXT_OFFSET (0xc - 1)
c60ac569 584
e6f81a92
AK
585/*
586 * For platforms that support on 65bit VA we limit the context bits
587 */
588#define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
589
048ee099
AK
590/*
591 * This should be computed such that protovosid * vsid_mulitplier
e6f81a92
AK
592 * doesn't overflow 64 bits. The vsid_mutliplier should also be
593 * co-prime to vsid_modulus. We also need to make sure that number
594 * of bits in multiplied result (dividend) is less than twice the number of
595 * protovsid bits for our modulus optmization to work.
596 *
597 * The below table shows the current values used.
598 * |-------+------------+----------------------+------------+-------------------|
599 * | | Prime Bits | proto VSID_BITS_65VA | Total Bits | 2* prot VSID_BITS |
600 * |-------+------------+----------------------+------------+-------------------|
601 * | 1T | 24 | 25 | 49 | 50 |
602 * |-------+------------+----------------------+------------+-------------------|
603 * | 256MB | 24 | 37 | 61 | 74 |
604 * |-------+------------+----------------------+------------+-------------------|
605 *
606 * |-------+------------+----------------------+------------+--------------------|
607 * | | Prime Bits | proto VSID_BITS_68VA | Total Bits | 2* proto VSID_BITS |
608 * |-------+------------+----------------------+------------+--------------------|
609 * | 1T | 24 | 28 | 52 | 56 |
610 * |-------+------------+----------------------+------------+--------------------|
611 * | 256MB | 24 | 40 | 64 | 80 |
612 * |-------+------------+----------------------+------------+--------------------|
613 *
048ee099
AK
614 */
615#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
e6f81a92
AK
616#define VSID_BITS_256M (VA_BITS - SID_SHIFT)
617#define VSID_BITS_65_256M (65 - SID_SHIFT)
82228e36
AK
618/*
619 * Modular multiplicative inverse of VSID_MULTIPLIER under modulo VSID_MODULUS
620 */
621#define VSID_MULINV_256M ASM_CONST(665548017062)
8d2169e8 622
1189be65 623#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
e6f81a92
AK
624#define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T)
625#define VSID_BITS_65_1T (65 - SID_SHIFT_1T)
82228e36 626#define VSID_MULINV_1T ASM_CONST(209034062)
8d2169e8 627
82228e36
AK
628/* 1TB VSID reserved for VRMA */
629#define VRMA_VSID 0x1ffffffUL
af81d787 630#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
8d2169e8 631
78f1dbde 632/* 4 bits per slice and we have one slice per 1TB */
957b778a 633#define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
4722476b 634#define TASK_SLICE_ARRAY_SZ(x) ((x)->context.slb_addr_limit >> 41)
8d2169e8
DG
635
636#ifndef __ASSEMBLY__
637
d28513bc
DG
638#ifdef CONFIG_PPC_SUBPAGE_PROT
639/*
640 * For the sub-page protection option, we extend the PGD with one of
641 * these. Basically we have a 3-level tree, with the top level being
642 * the protptrs array. To optimize speed and memory consumption when
643 * only addresses < 4GB are being protected, pointers to the first
644 * four pages of sub-page protection words are stored in the low_prot
645 * array.
646 * Each page of sub-page protection words protects 1GB (4 bytes
647 * protects 64k). For the 3-level tree, each page of pointers then
648 * protects 8TB.
649 */
650struct subpage_prot_table {
651 unsigned long maxaddr; /* only addresses < this are protected */
dad6f37c 652 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
d28513bc
DG
653 unsigned int *low_prot[4];
654};
655
656#define SBP_L1_BITS (PAGE_SHIFT - 2)
657#define SBP_L2_BITS (PAGE_SHIFT - 3)
658#define SBP_L1_COUNT (1 << SBP_L1_BITS)
659#define SBP_L2_COUNT (1 << SBP_L2_BITS)
660#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
661#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
662
663extern void subpage_prot_free(struct mm_struct *mm);
664extern void subpage_prot_init_new_context(struct mm_struct *mm);
665#else
666static inline void subpage_prot_free(struct mm_struct *mm) {}
667static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
668#endif /* CONFIG_PPC_SUBPAGE_PROT */
669
8d2169e8 670#if 0
1189be65
PM
671/*
672 * The code below is equivalent to this function for arguments
673 * < 2^VSID_BITS, which is all this should ever be called
674 * with. However gcc is not clever enough to compute the
675 * modulus (2^n-1) without a second multiply.
676 */
34692708 677#define vsid_scramble(protovsid, size) \
1189be65 678 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
8d2169e8 679
e6f81a92 680/* simplified form avoiding mod operation */
1189be65
PM
681#define vsid_scramble(protovsid, size) \
682 ({ \
683 unsigned long x; \
684 x = (protovsid) * VSID_MULTIPLIER_##size; \
685 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
686 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
687 })
e6f81a92
AK
688
689#else /* 1 */
690static inline unsigned long vsid_scramble(unsigned long protovsid,
691 unsigned long vsid_multiplier, int vsid_bits)
692{
693 unsigned long vsid;
694 unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
695 /*
696 * We have same multipler for both 256 and 1T segements now
697 */
698 vsid = protovsid * vsid_multiplier;
699 vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
700 return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
701}
702
8d2169e8 703#endif /* 1 */
8d2169e8 704
1189be65
PM
705/* Returns the segment size indicator for a user address */
706static inline int user_segment_size(unsigned long addr)
8d2169e8 707{
1189be65
PM
708 /* Use 1T segments if possible for addresses >= 1T */
709 if (addr >= (1UL << SID_SHIFT_1T))
710 return mmu_highuser_ssize;
711 return MMU_SEGSIZE_256M;
8d2169e8
DG
712}
713
1189be65
PM
714static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
715 int ssize)
716{
e6f81a92
AK
717 unsigned long va_bits = VA_BITS;
718 unsigned long vsid_bits;
719 unsigned long protovsid;
720
c60ac569
AK
721 /*
722 * Bad address. We return VSID 0 for that
723 */
dd1842a2 724 if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
c60ac569
AK
725 return 0;
726
e6f81a92
AK
727 if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
728 va_bits = 65;
729
730 if (ssize == MMU_SEGSIZE_256M) {
731 vsid_bits = va_bits - SID_SHIFT;
732 protovsid = (context << ESID_BITS) |
733 ((ea >> SID_SHIFT) & ESID_BITS_MASK);
734 return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits);
735 }
736 /* 1T segment */
737 vsid_bits = va_bits - SID_SHIFT_1T;
738 protovsid = (context << ESID_BITS_1T) |
739 ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
740 return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
1189be65
PM
741}
742
c60ac569
AK
743/*
744 * This is only valid for addresses >= PAGE_OFFSET
c60ac569
AK
745 */
746static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
747{
748 unsigned long context;
749
85beb1c4
ME
750 if (!is_kernel_addr(ea))
751 return 0;
752
c60ac569 753 /*
941711a3
AK
754 * For kernel space, we use context ids 1-4 to map the address space as
755 * below:
756 *
757 * 0x00001 - [ 0xc000000000000000 - 0xc0003fffffffffff ]
758 * 0x00002 - [ 0xd000000000000000 - 0xd0003fffffffffff ]
759 * 0x00003 - [ 0xe000000000000000 - 0xe0003fffffffffff ]
760 * 0x00004 - [ 0xf000000000000000 - 0xf0003fffffffffff ]
761 *
762 * So we can compute the context from the region (top nibble) by
763 * subtracting 11, or 0xc - 1.
c60ac569 764 */
941711a3
AK
765 context = (ea >> 60) - KERNEL_REGION_CONTEXT_OFFSET;
766
c60ac569
AK
767 return get_vsid(context, ea, ssize);
768}
5c3c7ede
DG
769
770unsigned htab_shift_for_mem_size(unsigned long mem_size);
771
8d2169e8
DG
772#endif /* __ASSEMBLY__ */
773
11a6f6ab 774#endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */