]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/powerpc/mm/book3s64/hash_utils.c
mm: introduce include/linux/pgtable.h
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / mm / book3s64 / hash_utils.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
4 * {mikejc|engebret}@us.ibm.com
5 *
6 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
7 *
8 * SMP scalability work:
9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 *
11 * Module name: htab.c
12 *
13 * Description:
14 * PowerPC Hashed Page Table functions
1da177e4
LT
15 */
16
17#undef DEBUG
3c726f8d 18#undef DEBUG_LOW
1da177e4 19
7f142661 20#define pr_fmt(fmt) "hash-mmu: " fmt
1da177e4
LT
21#include <linux/spinlock.h>
22#include <linux/errno.h>
589ee628 23#include <linux/sched/mm.h>
1da177e4
LT
24#include <linux/proc_fs.h>
25#include <linux/stat.h>
26#include <linux/sysctl.h>
66b15db6 27#include <linux/export.h>
1da177e4
LT
28#include <linux/ctype.h>
29#include <linux/cache.h>
30#include <linux/init.h>
31#include <linux/signal.h>
95f72d1e 32#include <linux/memblock.h>
ba12eede 33#include <linux/context_tracking.h>
5556ecf5 34#include <linux/libfdt.h>
92e3da3c 35#include <linux/pkeys.h>
45d0ba52 36#include <linux/hugetlb.h>
c784be43 37#include <linux/cpu.h>
1da177e4 38
7644d581 39#include <asm/debugfs.h>
1da177e4 40#include <asm/processor.h>
ca5999fd 41#include <linux/pgtable.h>
1da177e4
LT
42#include <asm/mmu.h>
43#include <asm/mmu_context.h>
44#include <asm/page.h>
45#include <asm/types.h>
7c0f6ba6 46#include <linux/uaccess.h>
1da177e4 47#include <asm/machdep.h>
d9b2b2a2 48#include <asm/prom.h>
1da177e4
LT
49#include <asm/io.h>
50#include <asm/eeh.h>
51#include <asm/tlb.h>
52#include <asm/cacheflush.h>
53#include <asm/cputable.h>
1da177e4 54#include <asm/sections.h>
be3ebfe8 55#include <asm/copro.h>
aa39be09 56#include <asm/udbg.h>
b68a70c4 57#include <asm/code-patching.h>
3ccc00a7 58#include <asm/fadump.h>
f5339277 59#include <asm/firmware.h>
bc2a9408 60#include <asm/tm.h>
cfcb3d80 61#include <asm/trace.h>
166dd7d3 62#include <asm/ps3.h>
94171b19 63#include <asm/pte-walk.h>
eacbb218 64#include <asm/asm-prototypes.h>
52231340 65#include <asm/ultravisor.h>
1da177e4 66
e4dccf90
CL
67#include <mm/mmu_decl.h>
68
82a1b8ed
NP
69#include "internal.h"
70
71
1da177e4
LT
72#ifdef DEBUG
73#define DBG(fmt...) udbg_printf(fmt)
74#else
75#define DBG(fmt...)
76#endif
77
3c726f8d
BH
78#ifdef DEBUG_LOW
79#define DBG_LOW(fmt...) udbg_printf(fmt)
80#else
81#define DBG_LOW(fmt...)
82#endif
83
84#define KB (1024)
85#define MB (1024*KB)
658013e9 86#define GB (1024L*MB)
3c726f8d 87
1da177e4
LT
88/*
89 * Note: pte --> Linux PTE
90 * HPTE --> PowerPC Hashed Page Table Entry
91 *
92 * Execution context:
93 * htab_initialize is called with the MMU off (of course), but
94 * the kernel has been copied down to zero so it can directly
95 * reference global data. At this point it is very difficult
96 * to print debug info.
97 *
98 */
99
799d6046
PM
100static unsigned long _SDR1;
101struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
e1802b06 102EXPORT_SYMBOL_GPL(mmu_psize_defs);
799d6046 103
0eeede0c
PM
104u8 hpte_page_sizes[1 << LP_BITS];
105EXPORT_SYMBOL_GPL(hpte_page_sizes);
106
8e561e7e 107struct hash_pte *htab_address;
337a7128 108unsigned long htab_size_bytes;
96e28449 109unsigned long htab_hash_mask;
4ab79aa8 110EXPORT_SYMBOL_GPL(htab_hash_mask);
3c726f8d 111int mmu_linear_psize = MMU_PAGE_4K;
8ca7a82f 112EXPORT_SYMBOL_GPL(mmu_linear_psize);
3c726f8d 113int mmu_virtual_psize = MMU_PAGE_4K;
bf72aeba 114int mmu_vmalloc_psize = MMU_PAGE_4K;
cec08e7a
BH
115#ifdef CONFIG_SPARSEMEM_VMEMMAP
116int mmu_vmemmap_psize = MMU_PAGE_4K;
117#endif
bf72aeba 118int mmu_io_psize = MMU_PAGE_4K;
1189be65 119int mmu_kernel_ssize = MMU_SEGSIZE_256M;
8ca7a82f 120EXPORT_SYMBOL_GPL(mmu_kernel_ssize);
1189be65 121int mmu_highuser_ssize = MMU_SEGSIZE_256M;
584f8b71 122u16 mmu_slb_size = 64;
4ab79aa8 123EXPORT_SYMBOL_GPL(mmu_slb_size);
bf72aeba
PM
124#ifdef CONFIG_PPC_64K_PAGES
125int mmu_ci_restrictions;
126#endif
370a908d
BH
127#ifdef CONFIG_DEBUG_PAGEALLOC
128static u8 *linear_map_hash_slots;
129static unsigned long linear_map_hash_count;
ed166692 130static DEFINE_SPINLOCK(linear_map_hash_lock);
370a908d 131#endif /* CONFIG_DEBUG_PAGEALLOC */
7025776e
BH
132struct mmu_hash_ops mmu_hash_ops;
133EXPORT_SYMBOL(mmu_hash_ops);
1da177e4 134
47d99948
CL
135/*
136 * These are definitions of page sizes arrays to be used when none
3c726f8d
BH
137 * is provided by the firmware.
138 */
1da177e4 139
471d7ff8
NP
140/*
141 * Fallback (4k pages only)
3c726f8d 142 */
471d7ff8 143static struct mmu_psize_def mmu_psize_defaults[] = {
3c726f8d
BH
144 [MMU_PAGE_4K] = {
145 .shift = 12,
146 .sllp = 0,
b1022fbd 147 .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
3c726f8d
BH
148 .avpnm = 0,
149 .tlbiel = 0,
150 },
151};
152
47d99948
CL
153/*
154 * POWER4, GPUL, POWER5
3c726f8d
BH
155 *
156 * Support for 16Mb large pages
157 */
09de9ff8 158static struct mmu_psize_def mmu_psize_defaults_gp[] = {
3c726f8d
BH
159 [MMU_PAGE_4K] = {
160 .shift = 12,
161 .sllp = 0,
b1022fbd 162 .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
3c726f8d
BH
163 .avpnm = 0,
164 .tlbiel = 1,
165 },
166 [MMU_PAGE_16M] = {
167 .shift = 24,
168 .sllp = SLB_VSID_L,
b1022fbd
AK
169 .penc = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0,
170 [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 },
3c726f8d
BH
171 .avpnm = 0x1UL,
172 .tlbiel = 0,
173 },
174};
175
dc47c0c1
AK
176/*
177 * 'R' and 'C' update notes:
178 * - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
179 * create writeable HPTEs without C set, because the hcall H_PROTECT
180 * that we use in that case will not update C
181 * - The above is however not a problem, because we also don't do that
182 * fancy "no flush" variant of eviction and we use H_REMOVE which will
183 * do the right thing and thus we don't have the race I described earlier
184 *
185 * - Under bare metal, we do have the race, so we need R and C set
186 * - We make sure R is always set and never lost
187 * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
188 */
c6a3c495 189unsigned long htab_convert_pte_flags(unsigned long pteflags)
bc033b63 190{
c6a3c495 191 unsigned long rflags = 0;
bc033b63
BH
192
193 /* _PAGE_EXEC -> NOEXEC */
194 if ((pteflags & _PAGE_EXEC) == 0)
195 rflags |= HPTE_R_N;
c6a3c495 196 /*
e58e87ad 197 * PPP bits:
1ec3f937 198 * Linux uses slb key 0 for kernel and 1 for user.
e58e87ad
AK
199 * kernel RW areas are mapped with PPP=0b000
200 * User area is mapped with PPP=0b010 for read/write
201 * or PPP=0b011 for read-only (including writeable but clean pages).
bc033b63 202 */
e58e87ad
AK
203 if (pteflags & _PAGE_PRIVILEGED) {
204 /*
205 * Kernel read only mapped with ppp bits 0b110
206 */
984d7a1e
AK
207 if (!(pteflags & _PAGE_WRITE)) {
208 if (mmu_has_feature(MMU_FTR_KERNEL_RO))
209 rflags |= (HPTE_R_PP0 | 0x2);
210 else
211 rflags |= 0x3;
212 }
e58e87ad 213 } else {
c7d54842
AK
214 if (pteflags & _PAGE_RWX)
215 rflags |= 0x2;
216 if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY)))
c6a3c495
AK
217 rflags |= 0x1;
218 }
c8c06f5a 219 /*
dc47c0c1
AK
220 * We can't allow hardware to update hpte bits. Hence always
221 * set 'R' bit and set 'C' if it is a write fault
c8c06f5a 222 */
e568006b 223 rflags |= HPTE_R_R;
dc47c0c1
AK
224
225 if (pteflags & _PAGE_DIRTY)
226 rflags |= HPTE_R_C;
40e8550a
AK
227 /*
228 * Add in WIG bits
229 */
30bda41a
AK
230
231 if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
40e8550a 232 rflags |= HPTE_R_I;
e568006b 233 else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
30bda41a 234 rflags |= (HPTE_R_I | HPTE_R_G);
e568006b
AK
235 else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
236 rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
237 else
238 /*
239 * Add memory coherence if cache inhibited is not set
240 */
241 rflags |= HPTE_R_M;
40e8550a 242
a6590ca5 243 rflags |= pte_to_hpte_pkey_bits(pteflags);
40e8550a 244 return rflags;
bc033b63 245}
3c726f8d
BH
246
247int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
bc033b63 248 unsigned long pstart, unsigned long prot,
1189be65 249 int psize, int ssize)
1da177e4 250{
3c726f8d
BH
251 unsigned long vaddr, paddr;
252 unsigned int step, shift;
3c726f8d 253 int ret = 0;
1da177e4 254
3c726f8d
BH
255 shift = mmu_psize_defs[psize].shift;
256 step = 1 << shift;
1da177e4 257
bc033b63
BH
258 prot = htab_convert_pte_flags(prot);
259
260 DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
261 vstart, vend, pstart, prot, psize, ssize);
262
3c726f8d
BH
263 for (vaddr = vstart, paddr = pstart; vaddr < vend;
264 vaddr += step, paddr += step) {
370a908d 265 unsigned long hash, hpteg;
1189be65 266 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
5524a27d 267 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
9e88ba4e 268 unsigned long tprot = prot;
d78d5dac 269 bool secondary_hash = false;
9e88ba4e 270
c60ac569
AK
271 /*
272 * If we hit a bad address return error.
273 */
274 if (!vsid)
275 return -1;
9e88ba4e 276 /* Make kernel text executable */
549e8152 277 if (overlaps_kernel_text(vaddr, vaddr + step))
9e88ba4e 278 tprot &= ~HPTE_R_N;
1da177e4 279
429d2e83
MS
280 /*
281 * If relocatable, check if it overlaps interrupt vectors that
282 * are copied down to real 0. For relocatable kernel
283 * (e.g. kdump case) we copy interrupt vectors down to real
284 * address 0. Mark that region as executable. This is
285 * because on p8 system with relocation on exception feature
286 * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
287 * in order to execute the interrupt handlers in virtual
288 * mode the vector region need to be marked as executable.
289 */
290 if ((PHYSICAL_START > MEMORY_START) &&
291 overlaps_interrupt_vector_text(vaddr, vaddr + step))
292 tprot &= ~HPTE_R_N;
293
5524a27d 294 hash = hpt_hash(vpn, shift, ssize);
1da177e4
LT
295 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
296
7025776e 297 BUG_ON(!mmu_hash_ops.hpte_insert);
d78d5dac 298repeat:
7025776e
BH
299 ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
300 HPTE_V_BOLTED, psize, psize,
301 ssize);
75838a32 302 if (ret == -1) {
d78d5dac
AK
303 /*
304 * Try to to keep bolted entries in primary.
305 * Remove non bolted entries and try insert again
306 */
75838a32
AK
307 ret = mmu_hash_ops.hpte_remove(hpteg);
308 if (ret != -1)
309 ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
310 HPTE_V_BOLTED, psize, psize,
311 ssize);
d78d5dac
AK
312 if (ret == -1 && !secondary_hash) {
313 secondary_hash = true;
314 hpteg = ((~hash & htab_hash_mask) * HPTES_PER_GROUP);
315 goto repeat;
316 }
75838a32 317 }
d78d5dac 318
3c726f8d
BH
319 if (ret < 0)
320 break;
e7df0d88 321
16f6b67c 322 cond_resched();
370a908d 323#ifdef CONFIG_DEBUG_PAGEALLOC
e7df0d88
JK
324 if (debug_pagealloc_enabled() &&
325 (paddr >> PAGE_SHIFT) < linear_map_hash_count)
370a908d
BH
326 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
327#endif /* CONFIG_DEBUG_PAGEALLOC */
3c726f8d
BH
328 }
329 return ret < 0 ? ret : 0;
330}
1da177e4 331
ed5694a8 332int htab_remove_mapping(unsigned long vstart, unsigned long vend,
f8c8803b
BP
333 int psize, int ssize)
334{
335 unsigned long vaddr;
336 unsigned int step, shift;
27828f98
DG
337 int rc;
338 int ret = 0;
f8c8803b
BP
339
340 shift = mmu_psize_defs[psize].shift;
341 step = 1 << shift;
342
7025776e 343 if (!mmu_hash_ops.hpte_removebolted)
abd0a0e7 344 return -ENODEV;
f8c8803b 345
27828f98 346 for (vaddr = vstart; vaddr < vend; vaddr += step) {
7025776e 347 rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
27828f98
DG
348 if (rc == -ENOENT) {
349 ret = -ENOENT;
350 continue;
351 }
352 if (rc < 0)
353 return rc;
354 }
52db9b44 355
27828f98 356 return ret;
f8c8803b
BP
357}
358
faf78829
OH
359static bool disable_1tb_segments = false;
360
361static int __init parse_disable_1tb_segments(char *p)
362{
363 disable_1tb_segments = true;
364 return 0;
365}
366early_param("disable_1tb_segments", parse_disable_1tb_segments);
367
1189be65
PM
368static int __init htab_dt_scan_seg_sizes(unsigned long node,
369 const char *uname, int depth,
370 void *data)
371{
9d0c4dfe
RH
372 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
373 const __be32 *prop;
374 int size = 0;
1189be65
PM
375
376 /* We are scanning "cpu" nodes only */
377 if (type == NULL || strcmp(type, "cpu") != 0)
378 return 0;
379
12f04f2b 380 prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size);
1189be65
PM
381 if (prop == NULL)
382 return 0;
383 for (; size >= 4; size -= 4, ++prop) {
12f04f2b 384 if (be32_to_cpu(prop[0]) == 40) {
1189be65 385 DBG("1T segment support detected\n");
faf78829
OH
386
387 if (disable_1tb_segments) {
388 DBG("1T segments disabled by command line\n");
389 break;
390 }
391
44ae3ab3 392 cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
f5534004 393 return 1;
1189be65 394 }
1189be65 395 }
44ae3ab3 396 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
1189be65
PM
397 return 0;
398}
399
b1022fbd
AK
400static int __init get_idx_from_shift(unsigned int shift)
401{
402 int idx = -1;
403
404 switch (shift) {
405 case 0xc:
406 idx = MMU_PAGE_4K;
407 break;
408 case 0x10:
409 idx = MMU_PAGE_64K;
410 break;
411 case 0x14:
412 idx = MMU_PAGE_1M;
413 break;
414 case 0x18:
415 idx = MMU_PAGE_16M;
416 break;
417 case 0x22:
418 idx = MMU_PAGE_16G;
419 break;
420 }
421 return idx;
422}
423
3c726f8d
BH
424static int __init htab_dt_scan_page_sizes(unsigned long node,
425 const char *uname, int depth,
426 void *data)
427{
9d0c4dfe
RH
428 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
429 const __be32 *prop;
430 int size = 0;
3c726f8d
BH
431
432 /* We are scanning "cpu" nodes only */
433 if (type == NULL || strcmp(type, "cpu") != 0)
434 return 0;
435
12f04f2b 436 prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);
9e34992a
ME
437 if (!prop)
438 return 0;
439
440 pr_info("Page sizes from device-tree:\n");
441 size /= 4;
442 cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
443 while(size > 0) {
444 unsigned int base_shift = be32_to_cpu(prop[0]);
445 unsigned int slbenc = be32_to_cpu(prop[1]);
446 unsigned int lpnum = be32_to_cpu(prop[2]);
447 struct mmu_psize_def *def;
448 int idx, base_idx;
449
450 size -= 3; prop += 3;
451 base_idx = get_idx_from_shift(base_shift);
452 if (base_idx < 0) {
453 /* skip the pte encoding also */
454 prop += lpnum * 2; size -= lpnum * 2;
455 continue;
456 }
457 def = &mmu_psize_defs[base_idx];
458 if (base_idx == MMU_PAGE_16M)
459 cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
460
461 def->shift = base_shift;
462 if (base_shift <= 23)
463 def->avpnm = 0;
464 else
465 def->avpnm = (1 << (base_shift - 23)) - 1;
466 def->sllp = slbenc;
467 /*
468 * We don't know for sure what's up with tlbiel, so
469 * for now we only set it for 4K and 64K pages
470 */
471 if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K)
472 def->tlbiel = 1;
473 else
474 def->tlbiel = 0;
475
476 while (size > 0 && lpnum) {
477 unsigned int shift = be32_to_cpu(prop[0]);
478 int penc = be32_to_cpu(prop[1]);
479
480 prop += 2; size -= 2;
481 lpnum--;
482
483 idx = get_idx_from_shift(shift);
484 if (idx < 0)
b1022fbd 485 continue;
9e34992a
ME
486
487 if (penc == -1)
488 pr_err("Invalid penc for base_shift=%d "
489 "shift=%d\n", base_shift, shift);
490
491 def->penc[idx] = penc;
492 pr_info("base_shift=%d: shift=%d, sllp=0x%04lx,"
493 " avpnm=0x%08lx, tlbiel=%d, penc=%d\n",
494 base_shift, shift, def->sllp,
495 def->avpnm, def->tlbiel, def->penc[idx]);
1da177e4 496 }
3c726f8d 497 }
9e34992a
ME
498
499 return 1;
3c726f8d
BH
500}
501
e16a9c09 502#ifdef CONFIG_HUGETLB_PAGE
47d99948
CL
503/*
504 * Scan for 16G memory blocks that have been set aside for huge pages
658013e9
JT
505 * and reserve those blocks for 16G huge pages.
506 */
507static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
508 const char *uname, int depth,
509 void *data) {
9d0c4dfe
RH
510 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
511 const __be64 *addr_prop;
512 const __be32 *page_count_prop;
658013e9
JT
513 unsigned int expected_pages;
514 long unsigned int phys_addr;
515 long unsigned int block_size;
516
517 /* We are scanning "memory" nodes only */
518 if (type == NULL || strcmp(type, "memory") != 0)
519 return 0;
520
47d99948
CL
521 /*
522 * This property is the log base 2 of the number of virtual pages that
523 * will represent this memory block.
524 */
658013e9
JT
525 page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
526 if (page_count_prop == NULL)
527 return 0;
12f04f2b 528 expected_pages = (1 << be32_to_cpu(page_count_prop[0]));
658013e9
JT
529 addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
530 if (addr_prop == NULL)
531 return 0;
12f04f2b
AB
532 phys_addr = be64_to_cpu(addr_prop[0]);
533 block_size = be64_to_cpu(addr_prop[1]);
658013e9
JT
534 if (block_size != (16 * GB))
535 return 0;
536 printk(KERN_INFO "Huge page(16GB) memory: "
537 "addr = 0x%lX size = 0x%lX pages = %d\n",
538 phys_addr, block_size, expected_pages);
23493c12 539 if (phys_addr + block_size * expected_pages <= memblock_end_of_DRAM()) {
95f72d1e 540 memblock_reserve(phys_addr, block_size * expected_pages);
79cc38de 541 pseries_add_gpage(phys_addr, block_size, expected_pages);
4792adba 542 }
658013e9
JT
543 return 0;
544}
e16a9c09 545#endif /* CONFIG_HUGETLB_PAGE */
658013e9 546
b1022fbd
AK
547static void mmu_psize_set_default_penc(void)
548{
549 int bpsize, apsize;
550 for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
551 for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++)
552 mmu_psize_defs[bpsize].penc[apsize] = -1;
553}
554
9048e648
AG
555#ifdef CONFIG_PPC_64K_PAGES
556
557static bool might_have_hea(void)
558{
559 /*
560 * The HEA ethernet adapter requires awareness of the
561 * GX bus. Without that awareness we can easily assume
562 * we will never see an HEA ethernet device.
563 */
564#ifdef CONFIG_IBMEBUS
2b4e3ad8 565 return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
08bf75ba 566 firmware_has_feature(FW_FEATURE_SPLPAR);
9048e648
AG
567#else
568 return false;
569#endif
570}
571
572#endif /* #ifdef CONFIG_PPC_64K_PAGES */
573
bacf9cf8 574static void __init htab_scan_page_sizes(void)
3c726f8d
BH
575{
576 int rc;
577
b1022fbd
AK
578 /* se the invalid penc to -1 */
579 mmu_psize_set_default_penc();
580
3c726f8d 581 /* Default to 4K pages only */
471d7ff8
NP
582 memcpy(mmu_psize_defs, mmu_psize_defaults,
583 sizeof(mmu_psize_defaults));
3c726f8d
BH
584
585 /*
586 * Try to find the available page sizes in the device-tree
587 */
588 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
b8f1b4f8 589 if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) {
bacf9cf8
ME
590 /*
591 * Nothing in the device-tree, but the CPU supports 16M pages,
592 * so let's fallback on a known size list for 16M capable CPUs.
593 */
3c726f8d
BH
594 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
595 sizeof(mmu_psize_defaults_gp));
bacf9cf8
ME
596 }
597
598#ifdef CONFIG_HUGETLB_PAGE
85975387
HB
599 if (!hugetlb_disabled) {
600 /* Reserve 16G huge page memory sections for huge pages */
601 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
602 }
bacf9cf8
ME
603#endif /* CONFIG_HUGETLB_PAGE */
604}
605
0eeede0c
PM
606/*
607 * Fill in the hpte_page_sizes[] array.
608 * We go through the mmu_psize_defs[] array looking for all the
609 * supported base/actual page size combinations. Each combination
610 * has a unique pagesize encoding (penc) value in the low bits of
611 * the LP field of the HPTE. For actual page sizes less than 1MB,
612 * some of the upper LP bits are used for RPN bits, meaning that
613 * we need to fill in several entries in hpte_page_sizes[].
614 *
615 * In diagrammatic form, with r = RPN bits and z = page size bits:
616 * PTE LP actual page size
617 * rrrr rrrz >=8KB
618 * rrrr rrzz >=16KB
619 * rrrr rzzz >=32KB
620 * rrrr zzzz >=64KB
621 * ...
622 *
623 * The zzzz bits are implementation-specific but are chosen so that
624 * no encoding for a larger page size uses the same value in its
625 * low-order N bits as the encoding for the 2^(12+N) byte page size
626 * (if it exists).
627 */
628static void init_hpte_page_sizes(void)
629{
630 long int ap, bp;
631 long int shift, penc;
632
633 for (bp = 0; bp < MMU_PAGE_COUNT; ++bp) {
634 if (!mmu_psize_defs[bp].shift)
635 continue; /* not a supported page size */
636 for (ap = bp; ap < MMU_PAGE_COUNT; ++ap) {
637 penc = mmu_psize_defs[bp].penc[ap];
10527e80 638 if (penc == -1 || !mmu_psize_defs[ap].shift)
0eeede0c
PM
639 continue;
640 shift = mmu_psize_defs[ap].shift - LP_SHIFT;
641 if (shift <= 0)
642 continue; /* should never happen */
643 /*
644 * For page sizes less than 1MB, this loop
645 * replicates the entry for all possible values
646 * of the rrrr bits.
647 */
648 while (penc < (1 << LP_BITS)) {
649 hpte_page_sizes[penc] = (ap << 4) | bp;
650 penc += 1 << shift;
651 }
652 }
653 }
654}
655
bacf9cf8
ME
656static void __init htab_init_page_sizes(void)
657{
970d54f9 658 bool aligned = true;
0eeede0c
PM
659 init_hpte_page_sizes();
660
e7df0d88
JK
661 if (!debug_pagealloc_enabled()) {
662 /*
663 * Pick a size for the linear mapping. Currently, we only
664 * support 16M, 1M and 4K which is the default
665 */
970d54f9
RC
666 if (IS_ENABLED(STRICT_KERNEL_RWX) &&
667 (unsigned long)_stext % 0x1000000) {
668 if (mmu_psize_defs[MMU_PAGE_16M].shift)
669 pr_warn("Kernel not 16M aligned, "
670 "disabling 16M linear map alignment");
671 aligned = false;
672 }
673
674 if (mmu_psize_defs[MMU_PAGE_16M].shift && aligned)
e7df0d88
JK
675 mmu_linear_psize = MMU_PAGE_16M;
676 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
677 mmu_linear_psize = MMU_PAGE_1M;
678 }
3c726f8d 679
bf72aeba 680#ifdef CONFIG_PPC_64K_PAGES
3c726f8d
BH
681 /*
682 * Pick a size for the ordinary pages. Default is 4K, we support
bf72aeba
PM
683 * 64K for user mappings and vmalloc if supported by the processor.
684 * We only use 64k for ioremap if the processor
685 * (and firmware) support cache-inhibited large pages.
686 * If not, we use 4k and set mmu_ci_restrictions so that
687 * hash_page knows to switch processes that use cache-inhibited
688 * mappings to 4k pages.
3c726f8d 689 */
bf72aeba 690 if (mmu_psize_defs[MMU_PAGE_64K].shift) {
3c726f8d 691 mmu_virtual_psize = MMU_PAGE_64K;
bf72aeba 692 mmu_vmalloc_psize = MMU_PAGE_64K;
370a908d
BH
693 if (mmu_linear_psize == MMU_PAGE_4K)
694 mmu_linear_psize = MMU_PAGE_64K;
44ae3ab3 695 if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
cfe666b1 696 /*
9048e648
AG
697 * When running on pSeries using 64k pages for ioremap
698 * would stop us accessing the HEA ethernet. So if we
699 * have the chance of ever seeing one, stay at 4k.
cfe666b1 700 */
2b4e3ad8 701 if (!might_have_hea())
cfe666b1
PM
702 mmu_io_psize = MMU_PAGE_64K;
703 } else
bf72aeba
PM
704 mmu_ci_restrictions = 1;
705 }
370a908d 706#endif /* CONFIG_PPC_64K_PAGES */
3c726f8d 707
cec08e7a 708#ifdef CONFIG_SPARSEMEM_VMEMMAP
47d99948
CL
709 /*
710 * We try to use 16M pages for vmemmap if that is supported
cec08e7a
BH
711 * and we have at least 1G of RAM at boot
712 */
713 if (mmu_psize_defs[MMU_PAGE_16M].shift &&
95f72d1e 714 memblock_phys_mem_size() >= 0x40000000)
cec08e7a 715 mmu_vmemmap_psize = MMU_PAGE_16M;
cec08e7a 716 else
78c94988 717 mmu_vmemmap_psize = mmu_virtual_psize;
cec08e7a
BH
718#endif /* CONFIG_SPARSEMEM_VMEMMAP */
719
bf72aeba 720 printk(KERN_DEBUG "Page orders: linear mapping = %d, "
cec08e7a
BH
721 "virtual = %d, io = %d"
722#ifdef CONFIG_SPARSEMEM_VMEMMAP
723 ", vmemmap = %d"
724#endif
725 "\n",
3c726f8d 726 mmu_psize_defs[mmu_linear_psize].shift,
bf72aeba 727 mmu_psize_defs[mmu_virtual_psize].shift,
cec08e7a
BH
728 mmu_psize_defs[mmu_io_psize].shift
729#ifdef CONFIG_SPARSEMEM_VMEMMAP
730 ,mmu_psize_defs[mmu_vmemmap_psize].shift
731#endif
732 );
3c726f8d
BH
733}
734
735static int __init htab_dt_scan_pftsize(unsigned long node,
736 const char *uname, int depth,
737 void *data)
738{
9d0c4dfe
RH
739 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
740 const __be32 *prop;
3c726f8d
BH
741
742 /* We are scanning "cpu" nodes only */
743 if (type == NULL || strcmp(type, "cpu") != 0)
744 return 0;
745
12f04f2b 746 prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
3c726f8d
BH
747 if (prop != NULL) {
748 /* pft_size[0] is the NUMA CEC cookie */
12f04f2b 749 ppc64_pft_size = be32_to_cpu(prop[1]);
3c726f8d 750 return 1;
1da177e4 751 }
3c726f8d 752 return 0;
1da177e4
LT
753}
754
5c3c7ede 755unsigned htab_shift_for_mem_size(unsigned long mem_size)
3eac8c69 756{
5c3c7ede
DG
757 unsigned memshift = __ilog2(mem_size);
758 unsigned pshift = mmu_psize_defs[mmu_virtual_psize].shift;
759 unsigned pteg_shift;
760
761 /* round mem_size up to next power of 2 */
762 if ((1UL << memshift) < mem_size)
763 memshift += 1;
3eac8c69 764
5c3c7ede
DG
765 /* aim for 2 pages / pteg */
766 pteg_shift = memshift - (pshift + 1);
3eac8c69 767
5c3c7ede
DG
768 /*
769 * 2^11 PTEGS of 128 bytes each, ie. 2^18 bytes is the minimum htab
770 * size permitted by the architecture.
771 */
772 return max(pteg_shift + 7, 18U);
773}
774
775static unsigned long __init htab_get_table_size(void)
776{
47d99948
CL
777 /*
778 * If hash size isn't already provided by the platform, we try to
943ffb58 779 * retrieve it from the device-tree. If it's not there neither, we
3c726f8d 780 * calculate it now based on the total RAM size
3eac8c69 781 */
3c726f8d
BH
782 if (ppc64_pft_size == 0)
783 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
3eac8c69
PM
784 if (ppc64_pft_size)
785 return 1UL << ppc64_pft_size;
786
5c3c7ede 787 return 1UL << htab_shift_for_mem_size(memblock_phys_mem_size());
3eac8c69
PM
788}
789
54b79248 790#ifdef CONFIG_MEMORY_HOTPLUG
f172acbf 791int resize_hpt_for_hotplug(unsigned long new_mem_size)
438cc81a
DG
792{
793 unsigned target_hpt_shift;
794
795 if (!mmu_hash_ops.resize_hpt)
f172acbf 796 return 0;
438cc81a
DG
797
798 target_hpt_shift = htab_shift_for_mem_size(new_mem_size);
799
800 /*
801 * To avoid lots of HPT resizes if memory size is fluctuating
802 * across a boundary, we deliberately have some hysterisis
803 * here: we immediately increase the HPT size if the target
804 * shift exceeds the current shift, but we won't attempt to
805 * reduce unless the target shift is at least 2 below the
806 * current shift
807 */
f172acbf
LV
808 if (target_hpt_shift > ppc64_pft_size ||
809 target_hpt_shift < ppc64_pft_size - 1)
810 return mmu_hash_ops.resize_hpt(target_hpt_shift);
811
812 return 0;
438cc81a
DG
813}
814
4e00c5af
LG
815int hash__create_section_mapping(unsigned long start, unsigned long end,
816 int nid, pgprot_t prot)
54b79248 817{
e0909392
AK
818 int rc;
819
820 if (end >= H_VMALLOC_START) {
f341d897 821 pr_warn("Outside the supported range\n");
e0909392
AK
822 return -1;
823 }
824
825 rc = htab_bolt_mapping(start, end, __pa(start),
4e00c5af 826 pgprot_val(prot), mmu_linear_psize,
e0909392 827 mmu_kernel_ssize);
1dace6c6
DG
828
829 if (rc < 0) {
830 int rc2 = htab_remove_mapping(start, end, mmu_linear_psize,
831 mmu_kernel_ssize);
832 BUG_ON(rc2 && (rc2 != -ENOENT));
833 }
834 return rc;
54b79248 835}
f8c8803b 836
32b53c01 837int hash__remove_section_mapping(unsigned long start, unsigned long end)
f8c8803b 838{
abd0a0e7
DG
839 int rc = htab_remove_mapping(start, end, mmu_linear_psize,
840 mmu_kernel_ssize);
841 WARN_ON(rc < 0);
842 return rc;
f8c8803b 843}
54b79248
MK
844#endif /* CONFIG_MEMORY_HOTPLUG */
845
50de596d 846static void __init hash_init_partition_table(phys_addr_t hash_table,
4b7a3504 847 unsigned long htab_size)
50de596d 848{
9d661958 849 mmu_partition_table_init();
50de596d
AK
850
851 /*
9d661958
PM
852 * PS field (VRMA page size) is not used for LPID 0, hence set to 0.
853 * For now, UPRT is 0 and we have no segment table.
50de596d 854 */
4b7a3504 855 htab_size = __ilog2(htab_size) - 18;
7d805acc 856 mmu_partition_table_set_entry(0, hash_table | htab_size, 0, false);
56547411 857 pr_info("Partition table %p\n", partition_tb);
50de596d
AK
858}
859
757c74d2 860static void __init htab_initialize(void)
1da177e4 861{
337a7128 862 unsigned long table;
1da177e4 863 unsigned long pteg_count;
9e88ba4e 864 unsigned long prot;
5556ecf5 865 unsigned long base = 0, size = 0;
28be7072 866 struct memblock_region *reg;
3c726f8d 867
1da177e4
LT
868 DBG(" -> htab_initialize()\n");
869
44ae3ab3 870 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
1189be65
PM
871 mmu_kernel_ssize = MMU_SEGSIZE_1T;
872 mmu_highuser_ssize = MMU_SEGSIZE_1T;
873 printk(KERN_INFO "Using 1TB segments\n");
874 }
875
82a1b8ed
NP
876 if (stress_slb_enabled)
877 static_branch_enable(&stress_slb_key);
878
1da177e4
LT
879 /*
880 * Calculate the required size of the htab. We want the number of
881 * PTEGs to equal one half the number of real pages.
882 */
3c726f8d 883 htab_size_bytes = htab_get_table_size();
1da177e4
LT
884 pteg_count = htab_size_bytes >> 7;
885
1da177e4
LT
886 htab_hash_mask = pteg_count - 1;
887
5556ecf5
BH
888 if (firmware_has_feature(FW_FEATURE_LPAR) ||
889 firmware_has_feature(FW_FEATURE_PS3_LV1)) {
1da177e4
LT
890 /* Using a hypervisor which owns the htab */
891 htab_address = NULL;
892 _SDR1 = 0;
3ccc00a7
MS
893#ifdef CONFIG_FA_DUMP
894 /*
895 * If firmware assisted dump is active firmware preserves
896 * the contents of htab along with entire partition memory.
897 * Clear the htab if firmware assisted dump is active so
898 * that we dont end up using old mappings.
899 */
7025776e
BH
900 if (is_fadump_active() && mmu_hash_ops.hpte_clear_all)
901 mmu_hash_ops.hpte_clear_all();
3ccc00a7 902#endif
1da177e4 903 } else {
5556ecf5
BH
904 unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE;
905
906#ifdef CONFIG_PPC_CELL
907 /*
908 * Cell may require the hash table down low when using the
909 * Axon IOMMU in order to fit the dynamic region over it, see
910 * comments in cell/iommu.c
1da177e4 911 */
5556ecf5 912 if (fdt_subnode_offset(initial_boot_params, 0, "axon") > 0) {
31bf1119 913 limit = 0x80000000;
5556ecf5
BH
914 pr_info("Hash table forced below 2G for Axon IOMMU\n");
915 }
916#endif /* CONFIG_PPC_CELL */
41d824bf 917
0ba9e6ed
MR
918 table = memblock_phys_alloc_range(htab_size_bytes,
919 htab_size_bytes,
920 0, limit);
921 if (!table)
922 panic("ERROR: Failed to allocate %pa bytes below %pa\n",
923 &htab_size_bytes, &limit);
1da177e4
LT
924
925 DBG("Hash table allocated at %lx, size: %lx\n", table,
926 htab_size_bytes);
927
70267a7f 928 htab_address = __va(table);
1da177e4
LT
929
930 /* htab absolute addr + encoded htabsize */
4b7a3504 931 _SDR1 = table + __ilog2(htab_size_bytes) - 18;
1da177e4
LT
932
933 /* Initialize the HPT with no entries */
934 memset((void *)table, 0, htab_size_bytes);
799d6046 935
50de596d
AK
936 if (!cpu_has_feature(CPU_FTR_ARCH_300))
937 /* Set SDR1 */
938 mtspr(SPRN_SDR1, _SDR1);
939 else
4b7a3504 940 hash_init_partition_table(table, htab_size_bytes);
1da177e4
LT
941 }
942
f5ea64dc 943 prot = pgprot_val(PAGE_KERNEL);
1da177e4 944
370a908d 945#ifdef CONFIG_DEBUG_PAGEALLOC
e7df0d88
JK
946 if (debug_pagealloc_enabled()) {
947 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
f806714f
MR
948 linear_map_hash_slots = memblock_alloc_try_nid(
949 linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
950 ppc64_rma_size, NUMA_NO_NODE);
8a7f97b9
MR
951 if (!linear_map_hash_slots)
952 panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
953 __func__, linear_map_hash_count, &ppc64_rma_size);
e7df0d88 954 }
370a908d
BH
955#endif /* CONFIG_DEBUG_PAGEALLOC */
956
1da177e4 957 /* create bolted the linear mapping in the hash table */
28be7072
BH
958 for_each_memblock(memory, reg) {
959 base = (unsigned long)__va(reg->base);
960 size = reg->size;
1da177e4 961
5c339919 962 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
9e88ba4e 963 base, size, prot);
1da177e4 964
e0909392 965 if ((base + size) >= H_VMALLOC_START) {
f341d897 966 pr_warn("Outside the supported range\n");
e0909392
AK
967 continue;
968 }
969
caf80e57 970 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
9e88ba4e 971 prot, mmu_linear_psize, mmu_kernel_ssize));
e63075a3
BH
972 }
973 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
1da177e4
LT
974
975 /*
976 * If we have a memory_limit and we've allocated TCEs then we need to
977 * explicitly map the TCE area at the top of RAM. We also cope with the
978 * case that the TCEs start below memory_limit.
979 * tce_alloc_start/end are 16MB aligned so the mapping should work
980 * for either 4K or 16MB pages.
981 */
982 if (tce_alloc_start) {
b5666f70
ME
983 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
984 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
1da177e4
LT
985
986 if (base + size >= tce_alloc_start)
987 tce_alloc_start = base + size + 1;
988
caf80e57 989 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
bc033b63 990 __pa(tce_alloc_start), prot,
1189be65 991 mmu_linear_psize, mmu_kernel_ssize));
1da177e4
LT
992 }
993
7d0daae4 994
1da177e4
LT
995 DBG(" <- htab_initialize()\n");
996}
997#undef KB
998#undef MB
1da177e4 999
bacf9cf8
ME
1000void __init hash__early_init_devtree(void)
1001{
1002 /* Initialize segment sizes */
1003 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
1004
1005 /* Initialize page sizes */
1006 htab_scan_page_sizes();
1007}
1008
d667edc0 1009static struct hash_mm_context init_hash_mm_context;
756d08d1 1010void __init hash__early_init_mmu(void)
799d6046 1011{
9d2edb18 1012#ifndef CONFIG_PPC_64K_PAGES
6aa59f51 1013 /*
9d2edb18 1014 * We have code in __hash_page_4K() and elsewhere, which assumes it can
6aa59f51
AK
1015 * do the following:
1016 * new_pte |= (slot << H_PAGE_F_GIX_SHIFT) & (H_PAGE_F_SECOND | H_PAGE_F_GIX);
1017 *
1018 * Where the slot number is between 0-15, and values of 8-15 indicate
1019 * the secondary bucket. For that code to work H_PAGE_F_SECOND and
1020 * H_PAGE_F_GIX must occupy four contiguous bits in the PTE, and
1021 * H_PAGE_F_SECOND must be placed above H_PAGE_F_GIX. Assert that here
1022 * with a BUILD_BUG_ON().
1023 */
1024 BUILD_BUG_ON(H_PAGE_F_SECOND != (1ul << (H_PAGE_F_GIX_SHIFT + 3)));
9d2edb18 1025#endif /* CONFIG_PPC_64K_PAGES */
6aa59f51 1026
bacf9cf8
ME
1027 htab_init_page_sizes();
1028
dd1842a2
AK
1029 /*
1030 * initialize page table size
1031 */
5ed7ecd0
AK
1032 __pte_frag_nr = H_PTE_FRAG_NR;
1033 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
8a6c697b
AK
1034 __pmd_frag_nr = H_PMD_FRAG_NR;
1035 __pmd_frag_size_shift = H_PMD_FRAG_SIZE_SHIFT;
5ed7ecd0 1036
dd1842a2
AK
1037 __pte_index_size = H_PTE_INDEX_SIZE;
1038 __pmd_index_size = H_PMD_INDEX_SIZE;
1039 __pud_index_size = H_PUD_INDEX_SIZE;
1040 __pgd_index_size = H_PGD_INDEX_SIZE;
fae22116 1041 __pud_cache_index = H_PUD_CACHE_INDEX;
dd1842a2
AK
1042 __pte_table_size = H_PTE_TABLE_SIZE;
1043 __pmd_table_size = H_PMD_TABLE_SIZE;
1044 __pud_table_size = H_PUD_TABLE_SIZE;
1045 __pgd_table_size = H_PGD_TABLE_SIZE;
a2f41eb9
AK
1046 /*
1047 * 4k use hugepd format, so for hash set then to
1048 * zero
1049 */
da7ad366
AK
1050 __pmd_val_bits = HASH_PMD_VAL_BITS;
1051 __pud_val_bits = HASH_PUD_VAL_BITS;
1052 __pgd_val_bits = HASH_PGD_VAL_BITS;
d6a9996e
AK
1053
1054 __kernel_virt_start = H_KERN_VIRT_START;
d6a9996e
AK
1055 __vmalloc_start = H_VMALLOC_START;
1056 __vmalloc_end = H_VMALLOC_END;
63ee9b2f 1057 __kernel_io_start = H_KERN_IO_START;
a35a3c6f 1058 __kernel_io_end = H_KERN_IO_END;
0034d395 1059 vmemmap = (struct page *)H_VMEMMAP_START;
d6a9996e
AK
1060 ioremap_bot = IOREMAP_BASE;
1061
bfa37087
DS
1062#ifdef CONFIG_PCI
1063 pci_io_base = ISA_IO_BASE;
1064#endif
1065
166dd7d3
BH
1066 /* Select appropriate backend */
1067 if (firmware_has_feature(FW_FEATURE_PS3_LV1))
1068 ps3_early_mm_init();
1069 else if (firmware_has_feature(FW_FEATURE_LPAR))
6364e84e 1070 hpte_init_pseries();
fbef66f0 1071 else if (IS_ENABLED(CONFIG_PPC_NATIVE))
166dd7d3
BH
1072 hpte_init_native();
1073
7353644f
ME
1074 if (!mmu_hash_ops.hpte_insert)
1075 panic("hash__early_init_mmu: No MMU hash ops defined!\n");
1076
47d99948
CL
1077 /*
1078 * Initialize the MMU Hash table and create the linear mapping
376af594
ME
1079 * of memory. Has to be done before SLB initialization as this is
1080 * currently where the page size encoding is obtained.
757c74d2
BH
1081 */
1082 htab_initialize();
1083
70110186 1084 init_mm.context.hash_context = &init_hash_mm_context;
5953fb4f 1085 mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT);
67fda38f 1086
56547411 1087 pr_info("Initializing hash mmu with SLB\n");
376af594 1088 /* Initialize SLB management */
13b3d13b 1089 slb_initialize();
d4748276
NP
1090
1091 if (cpu_has_feature(CPU_FTR_ARCH_206)
1092 && cpu_has_feature(CPU_FTR_HVMODE))
1093 tlbiel_all();
757c74d2
BH
1094}
1095
1096#ifdef CONFIG_SMP
756d08d1 1097void hash__early_init_mmu_secondary(void)
757c74d2
BH
1098{
1099 /* Initialize hash table for that CPU */
b5dcc609 1100 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
cac4a185 1101
b5dcc609
AK
1102 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1103 mtspr(SPRN_SDR1, _SDR1);
1104 else
52231340
CC
1105 set_ptcr_when_no_uv(__pa(partition_tb) |
1106 (PATB_SIZE_SHIFT - 12));
b5dcc609 1107 }
376af594 1108 /* Initialize SLB */
13b3d13b 1109 slb_initialize();
d4748276
NP
1110
1111 if (cpu_has_feature(CPU_FTR_ARCH_206)
1112 && cpu_has_feature(CPU_FTR_HVMODE))
1113 tlbiel_all();
799d6046 1114}
757c74d2 1115#endif /* CONFIG_SMP */
799d6046 1116
1da177e4
LT
1117/*
1118 * Called by asm hashtable.S for doing lazy icache flush
1119 */
1120unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
1121{
1122 struct page *page;
1123
76c8e25b
BH
1124 if (!pfn_valid(pte_pfn(pte)))
1125 return pp;
1126
1da177e4
LT
1127 page = pte_page(pte);
1128
1129 /* page is dirty */
1130 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
1131 if (trap == 0x400) {
0895ecda 1132 flush_dcache_icache_page(page);
1da177e4
LT
1133 set_bit(PG_arch_1, &page->flags);
1134 } else
3c726f8d 1135 pp |= HPTE_R_N;
1da177e4
LT
1136 }
1137 return pp;
1138}
1139
3a8247cc 1140#ifdef CONFIG_PPC_MM_SLICES
54be0b9c 1141static unsigned int get_paca_psize(unsigned long addr)
3a8247cc 1142{
15472423 1143 unsigned char *psizes;
7aa0727f 1144 unsigned long index, mask_index;
3a8247cc
PM
1145
1146 if (addr < SLICE_LOW_TOP) {
54be0b9c 1147 psizes = get_paca()->mm_ctx_low_slices_psize;
3a8247cc 1148 index = GET_LOW_SLICE_INDEX(addr);
15472423 1149 } else {
54be0b9c 1150 psizes = get_paca()->mm_ctx_high_slices_psize;
15472423 1151 index = GET_HIGH_SLICE_INDEX(addr);
3a8247cc 1152 }
7aa0727f 1153 mask_index = index & 0x1;
15472423 1154 return (psizes[index >> 1] >> (mask_index * 4)) & 0xF;
3a8247cc
PM
1155}
1156
1157#else
54be0b9c 1158unsigned int get_paca_psize(unsigned long addr)
3a8247cc 1159{
54be0b9c 1160 return get_paca()->mm_ctx_user_psize;
3a8247cc
PM
1161}
1162#endif
1163
721151d0
PM
1164/*
1165 * Demote a segment to using 4k pages.
1166 * For now this makes the whole process use 4k pages.
1167 */
721151d0 1168#ifdef CONFIG_PPC_64K_PAGES
fa28237c 1169void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
16f1c746 1170{
54be0b9c 1171 if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
721151d0 1172 return;
3a8247cc 1173 slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
be3ebfe8 1174 copro_flush_all_slbs(mm);
54be0b9c
ME
1175 if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
1176
1177 copy_mm_to_paca(mm);
94ee4272 1178 slb_flush_and_restore_bolted();
54be0b9c 1179 }
721151d0 1180}
16f1c746 1181#endif /* CONFIG_PPC_64K_PAGES */
721151d0 1182
fa28237c
PM
1183#ifdef CONFIG_PPC_SUBPAGE_PROT
1184/*
1185 * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
1186 * Userspace sets the subpage permissions using the subpage_prot system call.
1187 *
1188 * Result is 0: full permissions, _PAGE_RW: read-only,
73a1441a 1189 * _PAGE_RWX: no access.
fa28237c 1190 */
d28513bc 1191static int subpage_protection(struct mm_struct *mm, unsigned long ea)
fa28237c 1192{
60458fba 1193 struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
fa28237c
PM
1194 u32 spp = 0;
1195 u32 **sbpm, *sbpp;
1196
ef629cc5
AK
1197 if (!spt)
1198 return 0;
1199
fa28237c
PM
1200 if (ea >= spt->maxaddr)
1201 return 0;
b0d436c7 1202 if (ea < 0x100000000UL) {
fa28237c
PM
1203 /* addresses below 4GB use spt->low_prot */
1204 sbpm = spt->low_prot;
1205 } else {
1206 sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
1207 if (!sbpm)
1208 return 0;
1209 }
1210 sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
1211 if (!sbpp)
1212 return 0;
1213 spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
1214
1215 /* extract 2-bit bitfield for this 4k subpage */
1216 spp >>= 30 - 2 * ((ea >> 12) & 0xf);
1217
73a1441a
AK
1218 /*
1219 * 0 -> full premission
1220 * 1 -> Read only
1221 * 2 -> no access.
1222 * We return the flag that need to be cleared.
1223 */
1224 spp = ((spp & 2) ? _PAGE_RWX : 0) | ((spp & 1) ? _PAGE_WRITE : 0);
fa28237c
PM
1225 return spp;
1226}
1227
1228#else /* CONFIG_PPC_SUBPAGE_PROT */
d28513bc 1229static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
fa28237c
PM
1230{
1231 return 0;
1232}
1233#endif
1234
4b8692c0
BH
1235void hash_failure_debug(unsigned long ea, unsigned long access,
1236 unsigned long vsid, unsigned long trap,
d8139ebf 1237 int ssize, int psize, int lpsize, unsigned long pte)
4b8692c0
BH
1238{
1239 if (!printk_ratelimit())
1240 return;
1241 pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
1242 ea, access, current->comm);
d8139ebf
AK
1243 pr_info(" trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n",
1244 trap, vsid, ssize, psize, lpsize, pte);
4b8692c0
BH
1245}
1246
54be0b9c
ME
1247static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
1248 int psize, bool user_region)
1249{
1250 if (user_region) {
1251 if (psize != get_paca_psize(ea)) {
1252 copy_mm_to_paca(mm);
94ee4272 1253 slb_flush_and_restore_bolted();
54be0b9c
ME
1254 }
1255 } else if (get_paca()->vmalloc_sllp !=
1256 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
1257 get_paca()->vmalloc_sllp =
1258 mmu_psize_defs[mmu_vmalloc_psize].sllp;
1259 slb_vmalloc_update();
1260 }
1261}
1262
47d99948
CL
1263/*
1264 * Result code is:
1da177e4
LT
1265 * 0 - handled
1266 * 1 - normal page fault
1267 * -1 - critical hash insertion error
fa28237c 1268 * -2 - access not permitted by subpage protection mechanism
1da177e4 1269 */
aefa5688
AK
1270int hash_page_mm(struct mm_struct *mm, unsigned long ea,
1271 unsigned long access, unsigned long trap,
1272 unsigned long flags)
1da177e4 1273{
891121e6 1274 bool is_thp;
ba12eede 1275 enum ctx_state prev_state = exception_enter();
a1128f8f 1276 pgd_t *pgdir;
1da177e4 1277 unsigned long vsid;
1da177e4 1278 pte_t *ptep;
a4fe3ce7 1279 unsigned hugeshift;
aefa5688 1280 int rc, user_region = 0;
1189be65 1281 int psize, ssize;
1da177e4 1282
3c726f8d
BH
1283 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
1284 ea, access, trap);
cfcb3d80 1285 trace_hash_fault(ea, access, trap);
1f8d419e 1286
3c726f8d 1287 /* Get region & vsid */
0034d395 1288 switch (get_region_id(ea)) {
1da177e4
LT
1289 case USER_REGION_ID:
1290 user_region = 1;
3c726f8d
BH
1291 if (! mm) {
1292 DBG_LOW(" user region with no mm !\n");
ba12eede
LZ
1293 rc = 1;
1294 goto bail;
3c726f8d 1295 }
54be0b9c 1296 psize = get_slice_psize(mm, ea);
1189be65 1297 ssize = user_segment_size(ea);
f384796c 1298 vsid = get_user_vsid(&mm->context, ea, ssize);
1da177e4 1299 break;
1da177e4 1300 case VMALLOC_REGION_ID:
1189be65 1301 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
0034d395
AK
1302 psize = mmu_vmalloc_psize;
1303 ssize = mmu_kernel_ssize;
1304 break;
1305
1306 case IO_REGION_ID:
1307 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
1308 psize = mmu_io_psize;
1189be65 1309 ssize = mmu_kernel_ssize;
1da177e4 1310 break;
1da177e4 1311 default:
47d99948
CL
1312 /*
1313 * Not a valid range
1314 * Send the problem up to do_page_fault()
1da177e4 1315 */
ba12eede
LZ
1316 rc = 1;
1317 goto bail;
1da177e4 1318 }
3c726f8d 1319 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
1da177e4 1320
c60ac569
AK
1321 /* Bad address. */
1322 if (!vsid) {
1323 DBG_LOW("Bad address!\n");
ba12eede
LZ
1324 rc = 1;
1325 goto bail;
c60ac569 1326 }
3c726f8d 1327 /* Get pgdir */
1da177e4 1328 pgdir = mm->pgd;
ba12eede
LZ
1329 if (pgdir == NULL) {
1330 rc = 1;
1331 goto bail;
1332 }
1da177e4 1333
3c726f8d 1334 /* Check CPU locality */
b426e4bd 1335 if (user_region && mm_is_thread_local(mm))
aefa5688 1336 flags |= HPTE_LOCAL_UPDATE;
1da177e4 1337
16c2d476 1338#ifndef CONFIG_PPC_64K_PAGES
47d99948
CL
1339 /*
1340 * If we use 4K pages and our psize is not 4K, then we might
a4fe3ce7
DG
1341 * be hitting a special driver mapping, and need to align the
1342 * address before we fetch the PTE.
1343 *
1344 * It could also be a hugepage mapping, in which case this is
1345 * not necessary, but it's not harmful, either.
16c2d476
BH
1346 */
1347 if (psize != MMU_PAGE_4K)
1348 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
1349#endif /* CONFIG_PPC_64K_PAGES */
1350
3c726f8d 1351 /* Get PTE and page size from page tables */
94171b19 1352 ptep = find_linux_pte(pgdir, ea, &is_thp, &hugeshift);
3c726f8d
BH
1353 if (ptep == NULL || !pte_present(*ptep)) {
1354 DBG_LOW(" no PTE !\n");
ba12eede
LZ
1355 rc = 1;
1356 goto bail;
3c726f8d
BH
1357 }
1358
ec4abf1e
AK
1359 /*
1360 * Add _PAGE_PRESENT to the required access perm. If there are parallel
1361 * updates to the pte that can possibly clear _PAGE_PTE, catch that too.
1362 *
1363 * We can safely use the return pte address in rest of the function
1364 * because we do set H_PAGE_BUSY which prevents further updates to pte
1365 * from generic code.
1366 */
1367 access |= _PAGE_PRESENT | _PAGE_PTE;
ca91e6c0 1368
47d99948
CL
1369 /*
1370 * Pre-check access permissions (will be re-checked atomically
ca91e6c0
BH
1371 * in __hash_page_XX but this pre-check is a fast path
1372 */
ac29c640 1373 if (!check_pte_access(access, pte_val(*ptep))) {
ca91e6c0 1374 DBG_LOW(" no access !\n");
ba12eede
LZ
1375 rc = 1;
1376 goto bail;
ca91e6c0
BH
1377 }
1378
ba12eede 1379 if (hugeshift) {
891121e6 1380 if (is_thp)
6d492ecc 1381 rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep,
aefa5688 1382 trap, flags, ssize, psize);
6d492ecc
AK
1383#ifdef CONFIG_HUGETLB_PAGE
1384 else
1385 rc = __hash_page_huge(ea, access, vsid, ptep, trap,
aefa5688 1386 flags, ssize, hugeshift, psize);
6d492ecc
AK
1387#else
1388 else {
1389 /*
1390 * if we have hugeshift, and is not transhuge with
1391 * hugetlb disabled, something is really wrong.
1392 */
1393 rc = 1;
1394 WARN_ON(1);
1395 }
1396#endif
54be0b9c
ME
1397 if (current->mm == mm)
1398 check_paca_psize(ea, mm, psize, user_region);
1399
ba12eede
LZ
1400 goto bail;
1401 }
a4fe3ce7 1402
3c726f8d
BH
1403#ifndef CONFIG_PPC_64K_PAGES
1404 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
1405#else
1406 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
1407 pte_val(*(ptep + PTRS_PER_PTE)));
1408#endif
3c726f8d 1409 /* Do actual hashing */
16c2d476 1410#ifdef CONFIG_PPC_64K_PAGES
945537df
AK
1411 /* If H_PAGE_4K_PFN is set, make sure this is a 4k segment */
1412 if ((pte_val(*ptep) & H_PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
721151d0
PM
1413 demote_segment_4k(mm, ea);
1414 psize = MMU_PAGE_4K;
1415 }
1416
47d99948
CL
1417 /*
1418 * If this PTE is non-cacheable and we have restrictions on
16f1c746
BH
1419 * using non cacheable large pages, then we switch to 4k
1420 */
30bda41a 1421 if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) {
16f1c746
BH
1422 if (user_region) {
1423 demote_segment_4k(mm, ea);
1424 psize = MMU_PAGE_4K;
1425 } else if (ea < VMALLOC_END) {
1426 /*
1427 * some driver did a non-cacheable mapping
1428 * in vmalloc space, so switch vmalloc
1429 * to 4k pages
1430 */
1431 printk(KERN_ALERT "Reducing vmalloc segment "
1432 "to 4kB pages because of "
1433 "non-cacheable mapping\n");
1434 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
be3ebfe8 1435 copro_flush_all_slbs(mm);
bf72aeba 1436 }
16f1c746 1437 }
09567e7f 1438
0863d7f2
AK
1439#endif /* CONFIG_PPC_64K_PAGES */
1440
54be0b9c
ME
1441 if (current->mm == mm)
1442 check_paca_psize(ea, mm, psize, user_region);
1443
73b341ef 1444#ifdef CONFIG_PPC_64K_PAGES
bf72aeba 1445 if (psize == MMU_PAGE_64K)
aefa5688
AK
1446 rc = __hash_page_64K(ea, access, vsid, ptep, trap,
1447 flags, ssize);
3c726f8d 1448 else
73b341ef 1449#endif /* CONFIG_PPC_64K_PAGES */
fa28237c 1450 {
a1128f8f 1451 int spp = subpage_protection(mm, ea);
fa28237c
PM
1452 if (access & spp)
1453 rc = -2;
1454 else
1455 rc = __hash_page_4K(ea, access, vsid, ptep, trap,
aefa5688 1456 flags, ssize, spp);
fa28237c 1457 }
3c726f8d 1458
47d99948
CL
1459 /*
1460 * Dump some info in case of hash insertion failure, they should
4b8692c0
BH
1461 * never happen so it is really useful to know if/when they do
1462 */
1463 if (rc == -1)
1464 hash_failure_debug(ea, access, vsid, trap, ssize, psize,
d8139ebf 1465 psize, pte_val(*ptep));
3c726f8d
BH
1466#ifndef CONFIG_PPC_64K_PAGES
1467 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
1468#else
1469 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
1470 pte_val(*(ptep + PTRS_PER_PTE)));
1471#endif
1472 DBG_LOW(" -> rc=%d\n", rc);
ba12eede
LZ
1473
1474bail:
1475 exception_exit(prev_state);
3c726f8d 1476 return rc;
1da177e4 1477}
a1dca346
IM
1478EXPORT_SYMBOL_GPL(hash_page_mm);
1479
aefa5688
AK
1480int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
1481 unsigned long dsisr)
a1dca346 1482{
aefa5688 1483 unsigned long flags = 0;
a1dca346
IM
1484 struct mm_struct *mm = current->mm;
1485
0034d395
AK
1486 if ((get_region_id(ea) == VMALLOC_REGION_ID) ||
1487 (get_region_id(ea) == IO_REGION_ID))
a1dca346
IM
1488 mm = &init_mm;
1489
aefa5688
AK
1490 if (dsisr & DSISR_NOHPTE)
1491 flags |= HPTE_NOHPTE_UPDATE;
1492
1493 return hash_page_mm(mm, ea, access, trap, flags);
a1dca346 1494}
67207b96 1495EXPORT_SYMBOL_GPL(hash_page);
1da177e4 1496
9b123d1e
NP
1497int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr,
1498 unsigned long msr)
106713a1 1499{
c7d54842 1500 unsigned long access = _PAGE_PRESENT | _PAGE_READ;
106713a1
AK
1501 unsigned long flags = 0;
1502 struct mm_struct *mm = current->mm;
0034d395 1503 unsigned int region_id = get_region_id(ea);
106713a1 1504
0034d395 1505 if ((region_id == VMALLOC_REGION_ID) || (region_id == IO_REGION_ID))
106713a1
AK
1506 mm = &init_mm;
1507
1508 if (dsisr & DSISR_NOHPTE)
1509 flags |= HPTE_NOHPTE_UPDATE;
1510
1511 if (dsisr & DSISR_ISSTORE)
c7d54842 1512 access |= _PAGE_WRITE;
106713a1 1513 /*
ac29c640
AK
1514 * We set _PAGE_PRIVILEGED only when
1515 * kernel mode access kernel space.
1516 *
1517 * _PAGE_PRIVILEGED is NOT set
1518 * 1) when kernel mode access user space
1519 * 2) user space access kernel space.
106713a1 1520 */
ac29c640 1521 access |= _PAGE_PRIVILEGED;
0034d395 1522 if ((msr & MSR_PR) || (region_id == USER_REGION_ID))
ac29c640 1523 access &= ~_PAGE_PRIVILEGED;
106713a1
AK
1524
1525 if (trap == 0x400)
1526 access |= _PAGE_EXEC;
1527
1528 return hash_page_mm(mm, ea, access, trap, flags);
1529}
1530
8bbc9b7b
ME
1531#ifdef CONFIG_PPC_MM_SLICES
1532static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
1533{
54be0b9c 1534 int psize = get_slice_psize(mm, ea);
aac55d75 1535
8bbc9b7b 1536 /* We only prefault standard pages for now */
60458fba 1537 if (unlikely(psize != mm_ctx_user_psize(&mm->context)))
aac55d75
ME
1538 return false;
1539
1540 /*
1541 * Don't prefault if subpage protection is enabled for the EA.
1542 */
1543 if (unlikely((psize == MMU_PAGE_4K) && subpage_protection(mm, ea)))
8bbc9b7b
ME
1544 return false;
1545
1546 return true;
1547}
1548#else
1549static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
1550{
1551 return true;
1552}
1553#endif
1554
2f92447f 1555static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
e5a1edb9 1556 bool is_exec, unsigned long trap)
1da177e4 1557{
3c726f8d 1558 unsigned long vsid;
0b97fee0 1559 pgd_t *pgdir;
aefa5688 1560 int rc, ssize, update_flags = 0;
34eb138e 1561 unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
3c726f8d 1562
0034d395 1563 BUG_ON(get_region_id(ea) != USER_REGION_ID);
d0f13e3c 1564
8bbc9b7b 1565 if (!should_hash_preload(mm, ea))
3c726f8d
BH
1566 return;
1567
1568 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
1569 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
1da177e4 1570
16f1c746 1571 /* Get Linux PTE if available */
3c726f8d
BH
1572 pgdir = mm->pgd;
1573 if (pgdir == NULL)
1574 return;
0ac52dd7
AK
1575
1576 /* Get VSID */
1577 ssize = user_segment_size(ea);
f384796c 1578 vsid = get_user_vsid(&mm->context, ea, ssize);
0ac52dd7
AK
1579 if (!vsid)
1580 return;
0ac52dd7 1581
16f1c746 1582#ifdef CONFIG_PPC_64K_PAGES
945537df 1583 /* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on
16f1c746
BH
1584 * a 64K kernel), then we don't preload, hash_page() will take
1585 * care of it once we actually try to access the page.
1586 * That way we don't have to duplicate all of the logic for segment
1587 * page size demotion here
2f92447f
AK
1588 * Called with PTL held, hence can be sure the value won't change in
1589 * between.
16f1c746 1590 */
945537df 1591 if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep))
2f92447f 1592 return;
16f1c746
BH
1593#endif /* CONFIG_PPC_64K_PAGES */
1594
16c2d476 1595 /* Is that local to this CPU ? */
b426e4bd 1596 if (mm_is_thread_local(mm))
aefa5688 1597 update_flags |= HPTE_LOCAL_UPDATE;
16c2d476
BH
1598
1599 /* Hash it in */
73b341ef 1600#ifdef CONFIG_PPC_64K_PAGES
60458fba 1601 if (mm_ctx_user_psize(&mm->context) == MMU_PAGE_64K)
aefa5688
AK
1602 rc = __hash_page_64K(ea, access, vsid, ptep, trap,
1603 update_flags, ssize);
1da177e4 1604 else
73b341ef 1605#endif /* CONFIG_PPC_64K_PAGES */
aefa5688
AK
1606 rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags,
1607 ssize, subpage_protection(mm, ea));
4b8692c0
BH
1608
1609 /* Dump some info in case of hash insertion failure, they should
1610 * never happen so it is really useful to know if/when they do
1611 */
1612 if (rc == -1)
1613 hash_failure_debug(ea, access, vsid, trap, ssize,
60458fba
AK
1614 mm_ctx_user_psize(&mm->context),
1615 mm_ctx_user_psize(&mm->context),
d8139ebf 1616 pte_val(*ptep));
3c726f8d
BH
1617}
1618
e5a1edb9
CL
1619/*
1620 * This is called at the end of handling a user page fault, when the
1621 * fault has been handled by updating a PTE in the linux page tables.
1622 * We use it to preload an HPTE into the hash table corresponding to
1623 * the updated linux PTE.
1624 *
1625 * This must always be called with the pte lock held.
1626 */
1627void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
1628 pte_t *ptep)
1629{
1630 /*
1631 * We don't need to worry about _PAGE_PRESENT here because we are
1632 * called with either mm->page_table_lock held or ptl lock held
1633 */
1634 unsigned long trap;
1635 bool is_exec;
1636
18594f9b 1637 if (radix_enabled())
e5a1edb9 1638 return;
e5a1edb9
CL
1639
1640 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
1641 if (!pte_young(*ptep) || address >= TASK_SIZE)
1642 return;
1643
1644 /*
1645 * We try to figure out if we are coming from an instruction
1646 * access fault and pass that down to __hash_page so we avoid
1647 * double-faulting on execution of fresh text. We have to test
1648 * for regs NULL since init will get here first thing at boot.
1649 *
1650 * We also avoid filling the hash if not coming from a fault.
1651 */
1652
1653 trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
1654 switch (trap) {
1655 case 0x300:
1656 is_exec = false;
1657 break;
1658 case 0x400:
1659 is_exec = true;
1660 break;
1661 default:
1662 return;
1663 }
1664
2f92447f 1665 hash_preload(vma->vm_mm, ptep, address, is_exec, trap);
e5a1edb9
CL
1666}
1667
f1a55ce0
RT
1668#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1669static inline void tm_flush_hash_page(int local)
1670{
1671 /*
1672 * Transactions are not aborted by tlbiel, only tlbie. Without, syncing a
1673 * page back to a block device w/PIO could pick up transactional data
1674 * (bad!) so we force an abort here. Before the sync the page will be
1675 * made read-only, which will flush_hash_page. BIG ISSUE here: if the
1676 * kernel uses a page from userspace without unmapping it first, it may
1677 * see the speculated version.
1678 */
1679 if (local && cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
1680 MSR_TM_ACTIVE(current->thread.regs->msr)) {
1681 tm_enable();
1682 tm_abort(TM_CAUSE_TLBI);
1683 }
1684}
1685#else
1686static inline void tm_flush_hash_page(int local)
1687{
1688}
1689#endif
1690
318995b4
RP
1691/*
1692 * Return the global hash slot, corresponding to the given PTE, which contains
1693 * the HPTE.
1694 */
1695unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
1696 int ssize, real_pte_t rpte, unsigned int subpg_index)
1697{
1698 unsigned long hash, gslot, hidx;
1699
1700 hash = hpt_hash(vpn, shift, ssize);
1701 hidx = __rpte_to_hidx(rpte, subpg_index);
1702 if (hidx & _PTEIDX_SECONDARY)
1703 hash = ~hash;
1704 gslot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1705 gslot += hidx & _PTEIDX_GROUP_IX;
1706 return gslot;
1707}
1708
47d99948
CL
1709/*
1710 * WARNING: This is called from hash_low_64.S, if you change this prototype,
f6ab0b92
BH
1711 * do not forget to update the assembly call site !
1712 */
5524a27d 1713void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
aefa5688 1714 unsigned long flags)
3c726f8d 1715{
a8548686 1716 unsigned long index, shift, gslot;
aefa5688 1717 int local = flags & HPTE_LOCAL_UPDATE;
3c726f8d 1718
5524a27d
AK
1719 DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
1720 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
a8548686
RP
1721 gslot = pte_get_hash_gslot(vpn, shift, ssize, pte, index);
1722 DBG_LOW(" sub %ld: gslot=%lx\n", index, gslot);
db3d8534
AK
1723 /*
1724 * We use same base page size and actual psize, because we don't
1725 * use these functions for hugepage
1726 */
a8548686 1727 mmu_hash_ops.hpte_invalidate(gslot, vpn, psize, psize,
7025776e 1728 ssize, local);
3c726f8d 1729 } pte_iterate_hashed_end();
bc2a9408 1730
f1a55ce0 1731 tm_flush_hash_page(local);
1da177e4
LT
1732}
1733
f1581bf1
AK
1734#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1735void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
aefa5688
AK
1736 pmd_t *pmdp, unsigned int psize, int ssize,
1737 unsigned long flags)
f1581bf1
AK
1738{
1739 int i, max_hpte_count, valid;
1740 unsigned long s_addr;
1741 unsigned char *hpte_slot_array;
1742 unsigned long hidx, shift, vpn, hash, slot;
aefa5688 1743 int local = flags & HPTE_LOCAL_UPDATE;
f1581bf1
AK
1744
1745 s_addr = addr & HPAGE_PMD_MASK;
1746 hpte_slot_array = get_hpte_slot_array(pmdp);
1747 /*
1748 * IF we try to do a HUGE PTE update after a withdraw is done.
1749 * we will find the below NULL. This happens when we do
9ef258ba 1750 * split_huge_pmd
f1581bf1
AK
1751 */
1752 if (!hpte_slot_array)
1753 return;
1754
7025776e
BH
1755 if (mmu_hash_ops.hugepage_invalidate) {
1756 mmu_hash_ops.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
1757 psize, ssize, local);
d557b098
AK
1758 goto tm_abort;
1759 }
f1581bf1
AK
1760 /*
1761 * No bluk hpte removal support, invalidate each entry
1762 */
1763 shift = mmu_psize_defs[psize].shift;
1764 max_hpte_count = HPAGE_PMD_SIZE >> shift;
1765 for (i = 0; i < max_hpte_count; i++) {
1766 /*
1767 * 8 bits per each hpte entries
1768 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
1769 */
1770 valid = hpte_valid(hpte_slot_array, i);
1771 if (!valid)
1772 continue;
1773 hidx = hpte_hash_index(hpte_slot_array, i);
1774
1775 /* get the vpn */
1776 addr = s_addr + (i * (1ul << shift));
1777 vpn = hpt_vpn(addr, vsid, ssize);
1778 hash = hpt_hash(vpn, shift, ssize);
1779 if (hidx & _PTEIDX_SECONDARY)
1780 hash = ~hash;
1781
1782 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1783 slot += hidx & _PTEIDX_GROUP_IX;
7025776e
BH
1784 mmu_hash_ops.hpte_invalidate(slot, vpn, psize,
1785 MMU_PAGE_16M, ssize, local);
d557b098
AK
1786 }
1787tm_abort:
f1a55ce0 1788 tm_flush_hash_page(local);
f1581bf1
AK
1789}
1790#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1791
61b1a942 1792void flush_hash_range(unsigned long number, int local)
1da177e4 1793{
7025776e
BH
1794 if (mmu_hash_ops.flush_hash_range)
1795 mmu_hash_ops.flush_hash_range(number, local);
3c726f8d 1796 else {
1da177e4 1797 int i;
61b1a942 1798 struct ppc64_tlb_batch *batch =
69111bac 1799 this_cpu_ptr(&ppc64_tlb_batch);
1da177e4
LT
1800
1801 for (i = 0; i < number; i++)
5524a27d 1802 flush_hash_page(batch->vpn[i], batch->pte[i],
1189be65 1803 batch->psize, batch->ssize, local);
1da177e4
LT
1804 }
1805}
1806
1da177e4
LT
1807/*
1808 * low_hash_fault is called when we the low level hash code failed
1809 * to instert a PTE due to an hypervisor error
1810 */
fa28237c 1811void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
1da177e4 1812{
ba12eede
LZ
1813 enum ctx_state prev_state = exception_enter();
1814
1da177e4 1815 if (user_mode(regs)) {
fa28237c
PM
1816#ifdef CONFIG_PPC_SUBPAGE_PROT
1817 if (rc == -2)
1818 _exception(SIGSEGV, regs, SEGV_ACCERR, address);
1819 else
1820#endif
1821 _exception(SIGBUS, regs, BUS_ADRERR, address);
1822 } else
1823 bad_page_fault(regs, address, SIGBUS);
ba12eede
LZ
1824
1825 exception_exit(prev_state);
1da177e4 1826}
370a908d 1827
b170bd3d
LZ
1828long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
1829 unsigned long pa, unsigned long rflags,
1830 unsigned long vflags, int psize, int ssize)
1831{
1832 unsigned long hpte_group;
1833 long slot;
1834
1835repeat:
1531cff4 1836 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
b170bd3d
LZ
1837
1838 /* Insert into the hash table, primary slot */
7025776e
BH
1839 slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
1840 psize, psize, ssize);
b170bd3d
LZ
1841
1842 /* Primary is full, try the secondary */
1843 if (unlikely(slot == -1)) {
1531cff4 1844 hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
7025776e
BH
1845 slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags,
1846 vflags | HPTE_V_SECONDARY,
1847 psize, psize, ssize);
b170bd3d
LZ
1848 if (slot == -1) {
1849 if (mftb() & 0x1)
1531cff4
AK
1850 hpte_group = (hash & htab_hash_mask) *
1851 HPTES_PER_GROUP;
b170bd3d 1852
7025776e 1853 mmu_hash_ops.hpte_remove(hpte_group);
b170bd3d
LZ
1854 goto repeat;
1855 }
1856 }
1857
1858 return slot;
1859}
1860
370a908d
BH
1861#ifdef CONFIG_DEBUG_PAGEALLOC
1862static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1863{
016af59f 1864 unsigned long hash;
1189be65 1865 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
5524a27d 1866 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
09f3f326 1867 unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
016af59f 1868 long ret;
370a908d 1869
5524a27d 1870 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
370a908d 1871
c60ac569
AK
1872 /* Don't create HPTE entries for bad address */
1873 if (!vsid)
1874 return;
016af59f
LZ
1875
1876 ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
1877 HPTE_V_BOLTED,
1878 mmu_linear_psize, mmu_kernel_ssize);
1879
370a908d
BH
1880 BUG_ON (ret < 0);
1881 spin_lock(&linear_map_hash_lock);
1882 BUG_ON(linear_map_hash_slots[lmi] & 0x80);
1883 linear_map_hash_slots[lmi] = ret | 0x80;
1884 spin_unlock(&linear_map_hash_lock);
1885}
1886
1887static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1888{
1189be65
PM
1889 unsigned long hash, hidx, slot;
1890 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
5524a27d 1891 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
370a908d 1892
5524a27d 1893 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
370a908d
BH
1894 spin_lock(&linear_map_hash_lock);
1895 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
1896 hidx = linear_map_hash_slots[lmi] & 0x7f;
1897 linear_map_hash_slots[lmi] = 0;
1898 spin_unlock(&linear_map_hash_lock);
1899 if (hidx & _PTEIDX_SECONDARY)
1900 hash = ~hash;
1901 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1902 slot += hidx & _PTEIDX_GROUP_IX;
7025776e
BH
1903 mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize,
1904 mmu_linear_psize,
1905 mmu_kernel_ssize, 0);
370a908d
BH
1906}
1907
031bc574 1908void __kernel_map_pages(struct page *page, int numpages, int enable)
370a908d
BH
1909{
1910 unsigned long flags, vaddr, lmi;
1911 int i;
1912
1913 local_irq_save(flags);
1914 for (i = 0; i < numpages; i++, page++) {
1915 vaddr = (unsigned long)page_address(page);
1916 lmi = __pa(vaddr) >> PAGE_SHIFT;
1917 if (lmi >= linear_map_hash_count)
1918 continue;
1919 if (enable)
1920 kernel_map_linear_page(vaddr, lmi);
1921 else
1922 kernel_unmap_linear_page(vaddr, lmi);
1923 }
1924 local_irq_restore(flags);
1925}
1926#endif /* CONFIG_DEBUG_PAGEALLOC */
cd3db0c4 1927
756d08d1 1928void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
cd3db0c4
BH
1929 phys_addr_t first_memblock_size)
1930{
47d99948
CL
1931 /*
1932 * We don't currently support the first MEMBLOCK not mapping 0
cd3db0c4
BH
1933 * physical on those processors
1934 */
1935 BUG_ON(first_memblock_base != 0);
1936
1513c33d
NP
1937 /*
1938 * On virtualized systems the first entry is our RMA region aka VRMA,
1939 * non-virtualized 64-bit hash MMU systems don't have a limitation
1940 * on real mode access.
1941 *
c610d65c
NP
1942 * For guests on platforms before POWER9, we clamp the it limit to 1G
1943 * to avoid some funky things such as RTAS bugs etc...
da0ef933
SJS
1944 *
1945 * On POWER9 we limit to 1TB in case the host erroneously told us that
1946 * the RMA was >1TB. Effective address bits 0:23 are treated as zero
1947 * (meaning the access is aliased to zero i.e. addr = addr % 1TB)
1948 * for virtual real mode addressing and so it doesn't make sense to
1949 * have an area larger than 1TB as it can't be addressed.
cd3db0c4 1950 */
1513c33d 1951 if (!early_cpu_has_feature(CPU_FTR_HVMODE)) {
c610d65c
NP
1952 ppc64_rma_size = first_memblock_size;
1953 if (!early_cpu_has_feature(CPU_FTR_ARCH_300))
1954 ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000);
da0ef933
SJS
1955 else
1956 ppc64_rma_size = min_t(u64, ppc64_rma_size,
1957 1UL << SID_SHIFT_1T);
cd3db0c4 1958
1513c33d
NP
1959 /* Finally limit subsequent allocations */
1960 memblock_set_current_limit(ppc64_rma_size);
1961 } else {
1962 ppc64_rma_size = ULONG_MAX;
1963 }
cd3db0c4 1964}
dbcf929c
DG
1965
1966#ifdef CONFIG_DEBUG_FS
1967
1968static int hpt_order_get(void *data, u64 *val)
1969{
1970 *val = ppc64_pft_size;
1971 return 0;
1972}
1973
1974static int hpt_order_set(void *data, u64 val)
1975{
c784be43
GS
1976 int ret;
1977
dbcf929c
DG
1978 if (!mmu_hash_ops.resize_hpt)
1979 return -ENODEV;
1980
c784be43
GS
1981 cpus_read_lock();
1982 ret = mmu_hash_ops.resize_hpt(val);
1983 cpus_read_unlock();
1984
1985 return ret;
dbcf929c
DG
1986}
1987
7cd4774f 1988DEFINE_DEBUGFS_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n");
dbcf929c
DG
1989
1990static int __init hash64_debugfs(void)
1991{
08f6a797
GKH
1992 debugfs_create_file("hpt_order", 0600, powerpc_debugfs_root, NULL,
1993 &fops_hpt_order);
dbcf929c
DG
1994 return 0;
1995}
1996machine_device_initcall(pseries, hash64_debugfs);
dbcf929c 1997#endif /* CONFIG_DEBUG_FS */
e4dccf90
CL
1998
1999void __init print_system_hash_info(void)
2000{
2001 pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
2002
2003 if (htab_hash_mask)
2004 pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
e4dccf90 2005}