]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/asm-powerpc/mmu-hash64.h
[POWERPC] Introduce address space "slices"
[mirror_ubuntu-artful-kernel.git] / include / asm-powerpc / mmu-hash64.h
1 #ifndef _ASM_POWERPC_MMU_HASH64_H_
2 #define _ASM_POWERPC_MMU_HASH64_H_
3 /*
4 * PowerPC64 memory management structures
5 *
6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7 * PPC64 rework.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <asm/asm-compat.h>
16 #include <asm/page.h>
17
18 /*
19 * Segment table
20 */
21
22 #define STE_ESID_V 0x80
23 #define STE_ESID_KS 0x20
24 #define STE_ESID_KP 0x10
25 #define STE_ESID_N 0x08
26
27 #define STE_VSID_SHIFT 12
28
29 /* Location of cpu0's segment table */
30 #define STAB0_PAGE 0x6
31 #define STAB0_OFFSET (STAB0_PAGE << 12)
32 #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
33
34 #ifndef __ASSEMBLY__
35 extern char initial_stab[];
36 #endif /* ! __ASSEMBLY */
37
38 /*
39 * SLB
40 */
41
42 #define SLB_NUM_BOLTED 3
43 #define SLB_CACHE_ENTRIES 8
44
45 /* Bits in the SLB ESID word */
46 #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
47
48 /* Bits in the SLB VSID word */
49 #define SLB_VSID_SHIFT 12
50 #define SLB_VSID_B ASM_CONST(0xc000000000000000)
51 #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
52 #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
53 #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
54 #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
55 #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
56 #define SLB_VSID_L ASM_CONST(0x0000000000000100)
57 #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
58 #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
59 #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
60 #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
61 #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
62 #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
63 #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
64
65 #define SLB_VSID_KERNEL (SLB_VSID_KP)
66 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
67
68 #define SLBIE_C (0x08000000)
69
70 /*
71 * Hash table
72 */
73
74 #define HPTES_PER_GROUP 8
75
76 #define HPTE_V_AVPN_SHIFT 7
77 #define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80)
78 #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
79 #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN))
80 #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
81 #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
82 #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
83 #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
84 #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
85
86 #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
87 #define HPTE_R_TS ASM_CONST(0x4000000000000000)
88 #define HPTE_R_RPN_SHIFT 12
89 #define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
90 #define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
91 #define HPTE_R_PP ASM_CONST(0x0000000000000003)
92 #define HPTE_R_N ASM_CONST(0x0000000000000004)
93 #define HPTE_R_C ASM_CONST(0x0000000000000080)
94 #define HPTE_R_R ASM_CONST(0x0000000000000100)
95
96 /* Values for PP (assumes Ks=0, Kp=1) */
97 /* pp0 will always be 0 for linux */
98 #define PP_RWXX 0 /* Supervisor read/write, User none */
99 #define PP_RWRX 1 /* Supervisor read/write, User read */
100 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
101 #define PP_RXRX 3 /* Supervisor read, User read */
102
103 #ifndef __ASSEMBLY__
104
105 typedef struct {
106 unsigned long v;
107 unsigned long r;
108 } hpte_t;
109
110 extern hpte_t *htab_address;
111 extern unsigned long htab_size_bytes;
112 extern unsigned long htab_hash_mask;
113
114 /*
115 * Page size definition
116 *
117 * shift : is the "PAGE_SHIFT" value for that page size
118 * sllp : is a bit mask with the value of SLB L || LP to be or'ed
119 * directly to a slbmte "vsid" value
120 * penc : is the HPTE encoding mask for the "LP" field:
121 *
122 */
123 struct mmu_psize_def
124 {
125 unsigned int shift; /* number of bits */
126 unsigned int penc; /* HPTE encoding */
127 unsigned int tlbiel; /* tlbiel supported for that page size */
128 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
129 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
130 };
131
132 #endif /* __ASSEMBLY__ */
133
134 /*
135 * The kernel use the constants below to index in the page sizes array.
136 * The use of fixed constants for this purpose is better for performances
137 * of the low level hash refill handlers.
138 *
139 * A non supported page size has a "shift" field set to 0
140 *
141 * Any new page size being implemented can get a new entry in here. Whether
142 * the kernel will use it or not is a different matter though. The actual page
143 * size used by hugetlbfs is not defined here and may be made variable
144 */
145
146 #define MMU_PAGE_4K 0 /* 4K */
147 #define MMU_PAGE_64K 1 /* 64K */
148 #define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */
149 #define MMU_PAGE_1M 3 /* 1M */
150 #define MMU_PAGE_16M 4 /* 16M */
151 #define MMU_PAGE_16G 5 /* 16G */
152 #define MMU_PAGE_COUNT 6
153
154 #ifndef __ASSEMBLY__
155
156 /*
157 * The current system page sizes
158 */
159 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
160 extern int mmu_linear_psize;
161 extern int mmu_virtual_psize;
162 extern int mmu_vmalloc_psize;
163 extern int mmu_io_psize;
164
165 /*
166 * If the processor supports 64k normal pages but not 64k cache
167 * inhibited pages, we have to be prepared to switch processes
168 * to use 4k pages when they create cache-inhibited mappings.
169 * If this is the case, mmu_ci_restrictions will be set to 1.
170 */
171 extern int mmu_ci_restrictions;
172
173 #ifdef CONFIG_HUGETLB_PAGE
174 /*
175 * The page size index of the huge pages for use by hugetlbfs
176 */
177 extern int mmu_huge_psize;
178
179 #endif /* CONFIG_HUGETLB_PAGE */
180
181 /*
182 * This function sets the AVPN and L fields of the HPTE appropriately
183 * for the page size
184 */
185 static inline unsigned long hpte_encode_v(unsigned long va, int psize)
186 {
187 unsigned long v =
188 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
189 v <<= HPTE_V_AVPN_SHIFT;
190 if (psize != MMU_PAGE_4K)
191 v |= HPTE_V_LARGE;
192 return v;
193 }
194
195 /*
196 * This function sets the ARPN, and LP fields of the HPTE appropriately
197 * for the page size. We assume the pa is already "clean" that is properly
198 * aligned for the requested page size
199 */
200 static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
201 {
202 unsigned long r;
203
204 /* A 4K page needs no special encoding */
205 if (psize == MMU_PAGE_4K)
206 return pa & HPTE_R_RPN;
207 else {
208 unsigned int penc = mmu_psize_defs[psize].penc;
209 unsigned int shift = mmu_psize_defs[psize].shift;
210 return (pa & ~((1ul << shift) - 1)) | (penc << 12);
211 }
212 return r;
213 }
214
215 /*
216 * This hashes a virtual address for a 256Mb segment only for now
217 */
218
219 static inline unsigned long hpt_hash(unsigned long va, unsigned int shift)
220 {
221 return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift);
222 }
223
224 extern int __hash_page_4K(unsigned long ea, unsigned long access,
225 unsigned long vsid, pte_t *ptep, unsigned long trap,
226 unsigned int local);
227 extern int __hash_page_64K(unsigned long ea, unsigned long access,
228 unsigned long vsid, pte_t *ptep, unsigned long trap,
229 unsigned int local);
230 struct mm_struct;
231 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
232 extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
233 unsigned long ea, unsigned long vsid, int local,
234 unsigned long trap);
235
236 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
237 unsigned long pstart, unsigned long mode,
238 int psize);
239
240 extern void htab_initialize(void);
241 extern void htab_initialize_secondary(void);
242 extern void hpte_init_native(void);
243 extern void hpte_init_lpar(void);
244 extern void hpte_init_iSeries(void);
245 extern void hpte_init_beat(void);
246
247 extern void stabs_alloc(void);
248 extern void slb_initialize(void);
249 extern void slb_flush_and_rebolt(void);
250 extern void stab_initialize(unsigned long stab);
251
252 #endif /* __ASSEMBLY__ */
253
254 /*
255 * VSID allocation
256 *
257 * We first generate a 36-bit "proto-VSID". For kernel addresses this
258 * is equal to the ESID, for user addresses it is:
259 * (context << 15) | (esid & 0x7fff)
260 *
261 * The two forms are distinguishable because the top bit is 0 for user
262 * addresses, whereas the top two bits are 1 for kernel addresses.
263 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
264 * now.
265 *
266 * The proto-VSIDs are then scrambled into real VSIDs with the
267 * multiplicative hash:
268 *
269 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
270 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
271 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
272 *
273 * This scramble is only well defined for proto-VSIDs below
274 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
275 * reserved. VSID_MULTIPLIER is prime, so in particular it is
276 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
277 * Because the modulus is 2^n-1 we can compute it efficiently without
278 * a divide or extra multiply (see below).
279 *
280 * This scheme has several advantages over older methods:
281 *
282 * - We have VSIDs allocated for every kernel address
283 * (i.e. everything above 0xC000000000000000), except the very top
284 * segment, which simplifies several things.
285 *
286 * - We allow for 15 significant bits of ESID and 20 bits of
287 * context for user addresses. i.e. 8T (43 bits) of address space for
288 * up to 1M contexts (although the page table structure and context
289 * allocation will need changes to take advantage of this).
290 *
291 * - The scramble function gives robust scattering in the hash
292 * table (at least based on some initial results). The previous
293 * method was more susceptible to pathological cases giving excessive
294 * hash collisions.
295 */
296 /*
297 * WARNING - If you change these you must make sure the asm
298 * implementations in slb_allocate (slb_low.S), do_stab_bolted
299 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
300 *
301 * You'll also need to change the precomputed VSID values in head.S
302 * which are used by the iSeries firmware.
303 */
304
305 #define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
306 #define VSID_BITS 36
307 #define VSID_MODULUS ((1UL<<VSID_BITS)-1)
308
309 #define CONTEXT_BITS 19
310 #define USER_ESID_BITS 16
311
312 #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
313
314 /*
315 * This macro generates asm code to compute the VSID scramble
316 * function. Used in slb_allocate() and do_stab_bolted. The function
317 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
318 *
319 * rt = register continaing the proto-VSID and into which the
320 * VSID will be stored
321 * rx = scratch register (clobbered)
322 *
323 * - rt and rx must be different registers
324 * - The answer will end up in the low 36 bits of rt. The higher
325 * bits may contain other garbage, so you may need to mask the
326 * result.
327 */
328 #define ASM_VSID_SCRAMBLE(rt, rx) \
329 lis rx,VSID_MULTIPLIER@h; \
330 ori rx,rx,VSID_MULTIPLIER@l; \
331 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
332 \
333 srdi rx,rt,VSID_BITS; \
334 clrldi rt,rt,(64-VSID_BITS); \
335 add rt,rt,rx; /* add high and low bits */ \
336 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
337 * 2^36-1+2^28-1. That in particular means that if r3 >= \
338 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
339 * the bit clear, r3 already has the answer we want, if it \
340 * doesn't, the answer is the low 36 bits of r3+1. So in all \
341 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
342 addi rx,rt,1; \
343 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
344 add rt,rt,rx
345
346
347 #ifndef __ASSEMBLY__
348
349 typedef unsigned long mm_context_id_t;
350
351 typedef struct {
352 mm_context_id_t id;
353 u16 user_psize; /* page size index */
354
355 #ifdef CONFIG_PPC_MM_SLICES
356 u64 low_slices_psize; /* SLB page size encodings */
357 u64 high_slices_psize; /* 4 bits per slice for now */
358 #else
359 u16 sllp; /* SLB page size encoding */
360 #endif
361 unsigned long vdso_base;
362 } mm_context_t;
363
364
365 static inline unsigned long vsid_scramble(unsigned long protovsid)
366 {
367 #if 0
368 /* The code below is equivalent to this function for arguments
369 * < 2^VSID_BITS, which is all this should ever be called
370 * with. However gcc is not clever enough to compute the
371 * modulus (2^n-1) without a second multiply. */
372 return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
373 #else /* 1 */
374 unsigned long x;
375
376 x = protovsid * VSID_MULTIPLIER;
377 x = (x >> VSID_BITS) + (x & VSID_MODULUS);
378 return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
379 #endif /* 1 */
380 }
381
382 /* This is only valid for addresses >= KERNELBASE */
383 static inline unsigned long get_kernel_vsid(unsigned long ea)
384 {
385 return vsid_scramble(ea >> SID_SHIFT);
386 }
387
388 /* This is only valid for user addresses (which are below 2^41) */
389 static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
390 {
391 return vsid_scramble((context << USER_ESID_BITS)
392 | (ea >> SID_SHIFT));
393 }
394
395 #define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
396 #define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
397
398 /* Physical address used by some IO functions */
399 typedef unsigned long phys_addr_t;
400
401 #endif /* __ASSEMBLY__ */
402
403 #endif /* _ASM_POWERPC_MMU_HASH64_H_ */