]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/mm/pgtable-radix.c
Merge branch 'omap-for-v4.14/fixes' into omap-for-v4.15/fixes-v2
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / mm / pgtable-radix.c
CommitLineData
2bfd65e4
AK
1/*
2 * Page table handling routines for radix page table.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
bd350f71
ME
11
12#define pr_fmt(fmt) "radix-mmu: " fmt
13
14#include <linux/kernel.h>
589ee628 15#include <linux/sched/mm.h>
2bfd65e4
AK
16#include <linux/memblock.h>
17#include <linux/of_fdt.h>
7614ff32 18#include <linux/mm.h>
6deb6b47 19#include <linux/string_helpers.h>
2bfd65e4
AK
20
21#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/dma.h>
24#include <asm/machdep.h>
25#include <asm/mmu.h>
26#include <asm/firmware.h>
1d0761d2 27#include <asm/powernv.h>
9abcc981 28#include <asm/sections.h>
0428491c 29#include <asm/trace.h>
2bfd65e4 30
bde3eb62
AK
31#include <trace/events/thp.h>
32
a25bd72b
BH
33unsigned int mmu_pid_bits;
34unsigned int mmu_base_pid;
35
83209bc8
AK
36static int native_register_process_table(unsigned long base, unsigned long pg_sz,
37 unsigned long table_size)
2bfd65e4 38{
7cd2a869
SJS
39 unsigned long patb0, patb1;
40
41 patb0 = be64_to_cpu(partition_tb[0].patb0);
42 patb1 = base | table_size | PATB_GR;
43
44 mmu_partition_table_set_entry(0, patb0, patb1);
83209bc8 45
2bfd65e4
AK
46 return 0;
47}
48
49static __ref void *early_alloc_pgtable(unsigned long size)
50{
51 void *pt;
52
53 pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE));
54 memset(pt, 0, size);
55
56 return pt;
57}
58
59int radix__map_kernel_page(unsigned long ea, unsigned long pa,
60 pgprot_t flags,
61 unsigned int map_page_size)
62{
63 pgd_t *pgdp;
64 pud_t *pudp;
65 pmd_t *pmdp;
66 pte_t *ptep;
67 /*
68 * Make sure task size is correct as per the max adddr
69 */
70 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
71 if (slab_is_available()) {
72 pgdp = pgd_offset_k(ea);
73 pudp = pud_alloc(&init_mm, pgdp, ea);
74 if (!pudp)
75 return -ENOMEM;
76 if (map_page_size == PUD_SIZE) {
77 ptep = (pte_t *)pudp;
78 goto set_the_pte;
79 }
80 pmdp = pmd_alloc(&init_mm, pudp, ea);
81 if (!pmdp)
82 return -ENOMEM;
83 if (map_page_size == PMD_SIZE) {
a0615a16 84 ptep = pmdp_ptep(pmdp);
2bfd65e4
AK
85 goto set_the_pte;
86 }
87 ptep = pte_alloc_kernel(pmdp, ea);
88 if (!ptep)
89 return -ENOMEM;
90 } else {
91 pgdp = pgd_offset_k(ea);
92 if (pgd_none(*pgdp)) {
93 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
94 BUG_ON(pudp == NULL);
95 pgd_populate(&init_mm, pgdp, pudp);
96 }
97 pudp = pud_offset(pgdp, ea);
98 if (map_page_size == PUD_SIZE) {
99 ptep = (pte_t *)pudp;
100 goto set_the_pte;
101 }
102 if (pud_none(*pudp)) {
103 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
104 BUG_ON(pmdp == NULL);
105 pud_populate(&init_mm, pudp, pmdp);
106 }
107 pmdp = pmd_offset(pudp, ea);
108 if (map_page_size == PMD_SIZE) {
a0615a16 109 ptep = pmdp_ptep(pmdp);
2bfd65e4
AK
110 goto set_the_pte;
111 }
112 if (!pmd_present(*pmdp)) {
113 ptep = early_alloc_pgtable(PAGE_SIZE);
114 BUG_ON(ptep == NULL);
115 pmd_populate_kernel(&init_mm, pmdp, ptep);
116 }
117 ptep = pte_offset_kernel(pmdp, ea);
118 }
119
120set_the_pte:
121 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags));
122 smp_wmb();
123 return 0;
124}
125
7614ff32 126#ifdef CONFIG_STRICT_KERNEL_RWX
b134bd90
ME
127void radix__change_memory_range(unsigned long start, unsigned long end,
128 unsigned long clear)
7614ff32 129{
7614ff32
BS
130 unsigned long idx;
131 pgd_t *pgdp;
132 pud_t *pudp;
133 pmd_t *pmdp;
134 pte_t *ptep;
135
136 start = ALIGN_DOWN(start, PAGE_SIZE);
137 end = PAGE_ALIGN(end); // aligns up
138
b134bd90
ME
139 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
140 start, end, clear);
7614ff32
BS
141
142 for (idx = start; idx < end; idx += PAGE_SIZE) {
143 pgdp = pgd_offset_k(idx);
144 pudp = pud_alloc(&init_mm, pgdp, idx);
145 if (!pudp)
146 continue;
147 if (pud_huge(*pudp)) {
148 ptep = (pte_t *)pudp;
149 goto update_the_pte;
150 }
151 pmdp = pmd_alloc(&init_mm, pudp, idx);
152 if (!pmdp)
153 continue;
154 if (pmd_huge(*pmdp)) {
155 ptep = pmdp_ptep(pmdp);
156 goto update_the_pte;
157 }
158 ptep = pte_alloc_kernel(pmdp, idx);
159 if (!ptep)
160 continue;
161update_the_pte:
b134bd90 162 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
7614ff32
BS
163 }
164
165 radix__flush_tlb_kernel_range(start, end);
166}
b134bd90
ME
167
168void radix__mark_rodata_ro(void)
169{
170 unsigned long start, end;
171
f79ad50e
BS
172 /*
173 * mark_rodata_ro() will mark itself as !writable at some point.
174 * Due to DD1 workaround in radix__pte_update(), we'll end up with
175 * an invalid pte and the system will crash quite severly.
176 */
177 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
178 pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n");
179 return;
180 }
181
b134bd90
ME
182 start = (unsigned long)_stext;
183 end = (unsigned long)__init_begin;
184
185 radix__change_memory_range(start, end, _PAGE_WRITE);
186}
029d9252
ME
187
188void radix__mark_initmem_nx(void)
189{
190 unsigned long start = (unsigned long)__init_begin;
191 unsigned long end = (unsigned long)__init_end;
192
193 radix__change_memory_range(start, end, _PAGE_EXEC);
194}
7614ff32
BS
195#endif /* CONFIG_STRICT_KERNEL_RWX */
196
b5200ec9
RA
197static inline void __meminit print_mapping(unsigned long start,
198 unsigned long end,
199 unsigned long size)
200{
6deb6b47
ME
201 char buf[10];
202
b5200ec9
RA
203 if (end <= start)
204 return;
205
6deb6b47
ME
206 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
207
208 pr_info("Mapped 0x%016lx-0x%016lx with %s pages\n", start, end, buf);
b5200ec9
RA
209}
210
211static int __meminit create_physical_mapping(unsigned long start,
212 unsigned long end)
213{
9abcc981
ME
214 unsigned long vaddr, addr, mapping_size = 0;
215 pgprot_t prot;
7614ff32
BS
216 unsigned long max_mapping_size;
217#ifdef CONFIG_STRICT_KERNEL_RWX
218 int split_text_mapping = 1;
219#else
220 int split_text_mapping = 0;
221#endif
b5200ec9
RA
222
223 start = _ALIGN_UP(start, PAGE_SIZE);
224 for (addr = start; addr < end; addr += mapping_size) {
225 unsigned long gap, previous_size;
226 int rc;
227
228 gap = end - addr;
229 previous_size = mapping_size;
7614ff32 230 max_mapping_size = PUD_SIZE;
b5200ec9 231
7614ff32 232retry:
b5200ec9 233 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
7614ff32
BS
234 mmu_psize_defs[MMU_PAGE_1G].shift &&
235 PUD_SIZE <= max_mapping_size)
b5200ec9
RA
236 mapping_size = PUD_SIZE;
237 else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
238 mmu_psize_defs[MMU_PAGE_2M].shift)
239 mapping_size = PMD_SIZE;
240 else
241 mapping_size = PAGE_SIZE;
242
7614ff32
BS
243 if (split_text_mapping && (mapping_size == PUD_SIZE) &&
244 (addr <= __pa_symbol(__init_begin)) &&
245 (addr + mapping_size) >= __pa_symbol(_stext)) {
246 max_mapping_size = PMD_SIZE;
247 goto retry;
248 }
249
250 if (split_text_mapping && (mapping_size == PMD_SIZE) &&
251 (addr <= __pa_symbol(__init_begin)) &&
252 (addr + mapping_size) >= __pa_symbol(_stext))
253 mapping_size = PAGE_SIZE;
254
b5200ec9
RA
255 if (mapping_size != previous_size) {
256 print_mapping(start, addr, previous_size);
257 start = addr;
258 }
259
9abcc981
ME
260 vaddr = (unsigned long)__va(addr);
261
7f6d498e
BS
262 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
263 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
9abcc981
ME
264 prot = PAGE_KERNEL_X;
265 else
266 prot = PAGE_KERNEL;
267
268 rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size);
b5200ec9
RA
269 if (rc)
270 return rc;
271 }
272
273 print_mapping(start, addr, mapping_size);
274 return 0;
275}
276
2bfd65e4
AK
277static void __init radix_init_pgtable(void)
278{
2bfd65e4
AK
279 unsigned long rts_field;
280 struct memblock_region *reg;
2bfd65e4
AK
281
282 /* We don't support slb for radix */
283 mmu_slb_size = 0;
284 /*
285 * Create the linear mapping, using standard page size for now
286 */
b5200ec9
RA
287 for_each_memblock(memory, reg)
288 WARN_ON(create_physical_mapping(reg->base,
289 reg->base + reg->size));
a25bd72b
BH
290
291 /* Find out how many PID bits are supported */
292 if (cpu_has_feature(CPU_FTR_HVMODE)) {
293 if (!mmu_pid_bits)
294 mmu_pid_bits = 20;
295#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
296 /*
297 * When KVM is possible, we only use the top half of the
298 * PID space to avoid collisions between host and guest PIDs
299 * which can cause problems due to prefetch when exiting the
300 * guest with AIL=3
301 */
302 mmu_base_pid = 1 << (mmu_pid_bits - 1);
303#else
304 mmu_base_pid = 1;
305#endif
306 } else {
307 /* The guest uses the bottom half of the PID space */
308 if (!mmu_pid_bits)
309 mmu_pid_bits = 19;
310 mmu_base_pid = 1;
311 }
312
2bfd65e4
AK
313 /*
314 * Allocate Partition table and process table for the
315 * host.
316 */
a25bd72b 317 BUG_ON(PRTB_SIZE_SHIFT > 36);
2bfd65e4
AK
318 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
319 /*
320 * Fill in the process table.
2bfd65e4 321 */
b23d9c5b 322 rts_field = radix__get_tree_size();
2bfd65e4
AK
323 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
324 /*
325 * Fill in the partition table. We are suppose to use effective address
326 * of process table here. But our linear mapping also enable us to use
327 * physical address here.
328 */
eea8148c 329 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
2bfd65e4 330 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
7a70d728
PM
331 asm volatile("ptesync" : : : "memory");
332 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
333 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
334 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
0428491c 335 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
2bfd65e4
AK
336}
337
338static void __init radix_init_partition_table(void)
339{
9d661958 340 unsigned long rts_field, dw0;
b23d9c5b 341
9d661958 342 mmu_partition_table_init();
b23d9c5b 343 rts_field = radix__get_tree_size();
9d661958
PM
344 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
345 mmu_partition_table_set_entry(0, dw0, 0);
2bfd65e4 346
56547411
AK
347 pr_info("Initializing Radix MMU\n");
348 pr_info("Partition table %p\n", partition_tb);
2bfd65e4
AK
349}
350
351void __init radix_init_native(void)
352{
eea8148c 353 register_process_table = native_register_process_table;
2bfd65e4
AK
354}
355
356static int __init get_idx_from_shift(unsigned int shift)
357{
358 int idx = -1;
359
360 switch (shift) {
361 case 0xc:
362 idx = MMU_PAGE_4K;
363 break;
364 case 0x10:
365 idx = MMU_PAGE_64K;
366 break;
367 case 0x15:
368 idx = MMU_PAGE_2M;
369 break;
370 case 0x1e:
371 idx = MMU_PAGE_1G;
372 break;
373 }
374 return idx;
375}
376
377static int __init radix_dt_scan_page_sizes(unsigned long node,
378 const char *uname, int depth,
379 void *data)
380{
381 int size = 0;
382 int shift, idx;
383 unsigned int ap;
384 const __be32 *prop;
385 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
386
387 /* We are scanning "cpu" nodes only */
388 if (type == NULL || strcmp(type, "cpu") != 0)
389 return 0;
390
a25bd72b
BH
391 /* Find MMU PID size */
392 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
393 if (prop && size == 4)
394 mmu_pid_bits = be32_to_cpup(prop);
395
396 /* Grab page size encodings */
2bfd65e4
AK
397 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
398 if (!prop)
399 return 0;
400
401 pr_info("Page sizes from device-tree:\n");
402 for (; size >= 4; size -= 4, ++prop) {
403
404 struct mmu_psize_def *def;
405
406 /* top 3 bit is AP encoding */
407 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
408 ap = be32_to_cpu(prop[0]) >> 29;
ac8d3818 409 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
2bfd65e4
AK
410
411 idx = get_idx_from_shift(shift);
412 if (idx < 0)
413 continue;
414
415 def = &mmu_psize_defs[idx];
416 def->shift = shift;
417 def->ap = ap;
418 }
419
420 /* needed ? */
421 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
422 return 1;
423}
424
2537b09c 425void __init radix__early_init_devtree(void)
2bfd65e4
AK
426{
427 int rc;
428
429 /*
430 * Try to find the available page sizes in the device-tree
431 */
432 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
433 if (rc != 0) /* Found */
434 goto found;
435 /*
436 * let's assume we have page 4k and 64k support
437 */
438 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
439 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
440
441 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
442 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
443found:
444#ifdef CONFIG_SPARSEMEM_VMEMMAP
445 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
446 /*
447 * map vmemmap using 2M if available
448 */
449 mmu_vmemmap_psize = MMU_PAGE_2M;
450 }
451#endif /* CONFIG_SPARSEMEM_VMEMMAP */
452 return;
453}
454
ad410674
AK
455static void update_hid_for_radix(void)
456{
457 unsigned long hid0;
458 unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
459
460 asm volatile("ptesync": : :"memory");
461 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
462 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
463 : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
464 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
465 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
466 : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
467 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
0428491c
BS
468 trace_tlbie(0, 0, rb, 0, 2, 0, 1);
469 trace_tlbie(0, 0, rb, 0, 2, 1, 1);
470
ad410674
AK
471 /*
472 * now switch the HID
473 */
474 hid0 = mfspr(SPRN_HID0);
475 hid0 |= HID0_POWER9_RADIX;
476 mtspr(SPRN_HID0, hid0);
477 asm volatile("isync": : :"memory");
478
479 /* Wait for it to happen */
480 while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
481 cpu_relax();
482}
483
ee97b6b9
BS
484static void radix_init_amor(void)
485{
486 /*
487 * In HV mode, we init AMOR (Authority Mask Override Register) so that
488 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
489 * Register), enable key 0 and set it to 1.
490 *
491 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
492 */
493 mtspr(SPRN_AMOR, (3ul << 62));
494}
495
3b10d009
BS
496static void radix_init_iamr(void)
497{
498 unsigned long iamr;
499
500 /*
501 * The IAMR should set to 0 on DD1.
502 */
503 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
504 iamr = 0;
505 else
506 iamr = (1ul << 62);
507
508 /*
509 * Radix always uses key0 of the IAMR to determine if an access is
510 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
511 * fetch.
512 */
513 mtspr(SPRN_IAMR, iamr);
514}
515
2bfd65e4
AK
516void __init radix__early_init_mmu(void)
517{
518 unsigned long lpcr;
2bfd65e4
AK
519
520#ifdef CONFIG_PPC_64K_PAGES
521 /* PAGE_SIZE mappings */
522 mmu_virtual_psize = MMU_PAGE_64K;
523#else
524 mmu_virtual_psize = MMU_PAGE_4K;
525#endif
526
527#ifdef CONFIG_SPARSEMEM_VMEMMAP
528 /* vmemmap mapping */
529 mmu_vmemmap_psize = mmu_virtual_psize;
530#endif
531 /*
532 * initialize page table size
533 */
534 __pte_index_size = RADIX_PTE_INDEX_SIZE;
535 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
536 __pud_index_size = RADIX_PUD_INDEX_SIZE;
537 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
538 __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
539 __pte_table_size = RADIX_PTE_TABLE_SIZE;
540 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
541 __pud_table_size = RADIX_PUD_TABLE_SIZE;
542 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
543
a2f41eb9
AK
544 __pmd_val_bits = RADIX_PMD_VAL_BITS;
545 __pud_val_bits = RADIX_PUD_VAL_BITS;
546 __pgd_val_bits = RADIX_PGD_VAL_BITS;
2bfd65e4 547
d6a9996e
AK
548 __kernel_virt_start = RADIX_KERN_VIRT_START;
549 __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
550 __vmalloc_start = RADIX_VMALLOC_START;
551 __vmalloc_end = RADIX_VMALLOC_END;
63ee9b2f 552 __kernel_io_start = RADIX_KERN_IO_START;
d6a9996e
AK
553 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
554 ioremap_bot = IOREMAP_BASE;
bfa37087
DS
555
556#ifdef CONFIG_PCI
557 pci_io_base = ISA_IO_BASE;
558#endif
559
5ed7ecd0
AK
560 /*
561 * For now radix also use the same frag size
562 */
563 __pte_frag_nr = H_PTE_FRAG_NR;
564 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
d6a9996e 565
d6c88600 566 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
166dd7d3 567 radix_init_native();
ad410674
AK
568 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
569 update_hid_for_radix();
d6c88600 570 lpcr = mfspr(SPRN_LPCR);
bf16cdf4 571 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2bfd65e4 572 radix_init_partition_table();
ee97b6b9 573 radix_init_amor();
cc3d2940
PM
574 } else {
575 radix_init_pseries();
d6c88600 576 }
2bfd65e4 577
9d661958
PM
578 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
579
3b10d009 580 radix_init_iamr();
2bfd65e4
AK
581 radix_init_pgtable();
582}
583
584void radix__early_init_mmu_secondary(void)
585{
586 unsigned long lpcr;
587 /*
d6c88600 588 * update partition table control register and UPRT
2bfd65e4 589 */
d6c88600 590 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
cac4a185
AK
591
592 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
593 update_hid_for_radix();
594
d6c88600 595 lpcr = mfspr(SPRN_LPCR);
bf16cdf4 596 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
d6c88600 597
2bfd65e4
AK
598 mtspr(SPRN_PTCR,
599 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
ee97b6b9 600 radix_init_amor();
d6c88600 601 }
3b10d009 602 radix_init_iamr();
2bfd65e4
AK
603}
604
fe036a06
BH
605void radix__mmu_cleanup_all(void)
606{
607 unsigned long lpcr;
608
609 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
610 lpcr = mfspr(SPRN_LPCR);
611 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
612 mtspr(SPRN_PTCR, 0);
1d0761d2 613 powernv_set_nmmu_ptcr(0);
fe036a06
BH
614 radix__flush_tlb_all();
615 }
616}
617
2bfd65e4
AK
618void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
619 phys_addr_t first_memblock_size)
620{
177ba7c6
AK
621 /* We don't currently support the first MEMBLOCK not mapping 0
622 * physical on those processors
623 */
624 BUG_ON(first_memblock_base != 0);
625 /*
626 * We limit the allocation that depend on ppc64_rma_size
627 * to first_memblock_size. We also clamp it to 1GB to
628 * avoid some funky things such as RTAS bugs.
629 *
630 * On radix config we really don't have a limitation
631 * on real mode access. But keeping it as above works
632 * well enough.
633 */
634 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
635 /*
636 * Finally limit subsequent allocations. We really don't want
637 * to limit the memblock allocations to rma_size. FIXME!! should
638 * we even limit at all ?
639 */
2bfd65e4
AK
640 memblock_set_current_limit(first_memblock_base + first_memblock_size);
641}
d9225ad9 642
6cc27341 643#ifdef CONFIG_MEMORY_HOTPLUG
4b5d62ca
RA
644static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
645{
646 pte_t *pte;
647 int i;
648
649 for (i = 0; i < PTRS_PER_PTE; i++) {
650 pte = pte_start + i;
651 if (!pte_none(*pte))
652 return;
653 }
654
655 pte_free_kernel(&init_mm, pte_start);
656 pmd_clear(pmd);
657}
658
659static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
660{
661 pmd_t *pmd;
662 int i;
663
664 for (i = 0; i < PTRS_PER_PMD; i++) {
665 pmd = pmd_start + i;
666 if (!pmd_none(*pmd))
667 return;
668 }
669
670 pmd_free(&init_mm, pmd_start);
671 pud_clear(pud);
672}
673
674static void remove_pte_table(pte_t *pte_start, unsigned long addr,
675 unsigned long end)
676{
677 unsigned long next;
678 pte_t *pte;
679
680 pte = pte_start + pte_index(addr);
681 for (; addr < end; addr = next, pte++) {
682 next = (addr + PAGE_SIZE) & PAGE_MASK;
683 if (next > end)
684 next = end;
685
686 if (!pte_present(*pte))
687 continue;
688
0d0a4bc2
RA
689 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
690 /*
691 * The vmemmap_free() and remove_section_mapping()
692 * codepaths call us with aligned addresses.
693 */
694 WARN_ONCE(1, "%s: unaligned range\n", __func__);
695 continue;
696 }
697
4b5d62ca
RA
698 pte_clear(&init_mm, addr, pte);
699 }
700}
701
702static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
703 unsigned long end)
704{
705 unsigned long next;
706 pte_t *pte_base;
707 pmd_t *pmd;
708
709 pmd = pmd_start + pmd_index(addr);
710 for (; addr < end; addr = next, pmd++) {
711 next = pmd_addr_end(addr, end);
712
713 if (!pmd_present(*pmd))
714 continue;
715
716 if (pmd_huge(*pmd)) {
0d0a4bc2
RA
717 if (!IS_ALIGNED(addr, PMD_SIZE) ||
718 !IS_ALIGNED(next, PMD_SIZE)) {
719 WARN_ONCE(1, "%s: unaligned range\n", __func__);
720 continue;
721 }
722
4b5d62ca
RA
723 pte_clear(&init_mm, addr, (pte_t *)pmd);
724 continue;
725 }
726
727 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
728 remove_pte_table(pte_base, addr, next);
729 free_pte_table(pte_base, pmd);
730 }
731}
732
733static void remove_pud_table(pud_t *pud_start, unsigned long addr,
734 unsigned long end)
735{
736 unsigned long next;
737 pmd_t *pmd_base;
738 pud_t *pud;
739
740 pud = pud_start + pud_index(addr);
741 for (; addr < end; addr = next, pud++) {
742 next = pud_addr_end(addr, end);
743
744 if (!pud_present(*pud))
745 continue;
746
747 if (pud_huge(*pud)) {
0d0a4bc2
RA
748 if (!IS_ALIGNED(addr, PUD_SIZE) ||
749 !IS_ALIGNED(next, PUD_SIZE)) {
750 WARN_ONCE(1, "%s: unaligned range\n", __func__);
751 continue;
752 }
753
4b5d62ca
RA
754 pte_clear(&init_mm, addr, (pte_t *)pud);
755 continue;
756 }
757
758 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
759 remove_pmd_table(pmd_base, addr, next);
760 free_pmd_table(pmd_base, pud);
761 }
762}
763
764static void remove_pagetable(unsigned long start, unsigned long end)
765{
766 unsigned long addr, next;
767 pud_t *pud_base;
768 pgd_t *pgd;
769
770 spin_lock(&init_mm.page_table_lock);
771
772 for (addr = start; addr < end; addr = next) {
773 next = pgd_addr_end(addr, end);
774
775 pgd = pgd_offset_k(addr);
776 if (!pgd_present(*pgd))
777 continue;
778
779 if (pgd_huge(*pgd)) {
0d0a4bc2
RA
780 if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
781 !IS_ALIGNED(next, PGDIR_SIZE)) {
782 WARN_ONCE(1, "%s: unaligned range\n", __func__);
783 continue;
784 }
785
4b5d62ca
RA
786 pte_clear(&init_mm, addr, (pte_t *)pgd);
787 continue;
788 }
789
790 pud_base = (pud_t *)pgd_page_vaddr(*pgd);
791 remove_pud_table(pud_base, addr, next);
792 }
793
794 spin_unlock(&init_mm.page_table_lock);
795 radix__flush_tlb_kernel_range(start, end);
796}
797
6cc27341
RA
798int __ref radix__create_section_mapping(unsigned long start, unsigned long end)
799{
800 return create_physical_mapping(start, end);
801}
4b5d62ca
RA
802
803int radix__remove_section_mapping(unsigned long start, unsigned long end)
804{
805 remove_pagetable(start, end);
806 return 0;
807}
6cc27341
RA
808#endif /* CONFIG_MEMORY_HOTPLUG */
809
d9225ad9
AK
810#ifdef CONFIG_SPARSEMEM_VMEMMAP
811int __meminit radix__vmemmap_create_mapping(unsigned long start,
812 unsigned long page_size,
813 unsigned long phys)
814{
815 /* Create a PTE encoding */
816 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
817
818 BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size));
819 return 0;
820}
821
822#ifdef CONFIG_MEMORY_HOTPLUG
823void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
824{
0d0a4bc2 825 remove_pagetable(start, start + page_size);
d9225ad9
AK
826}
827#endif
828#endif
bde3eb62
AK
829
830#ifdef CONFIG_TRANSPARENT_HUGEPAGE
831
832unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
833 pmd_t *pmdp, unsigned long clr,
834 unsigned long set)
835{
836 unsigned long old;
837
838#ifdef CONFIG_DEBUG_VM
ebd31197 839 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
bde3eb62
AK
840 assert_spin_locked(&mm->page_table_lock);
841#endif
842
843 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
844 trace_hugepage_update(addr, old, clr, set);
845
846 return old;
847}
848
849pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
850 pmd_t *pmdp)
851
852{
853 pmd_t pmd;
854
855 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
856 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
ebd31197 857 VM_BUG_ON(pmd_devmap(*pmdp));
bde3eb62
AK
858 /*
859 * khugepaged calls this for normal pmd
860 */
861 pmd = *pmdp;
862 pmd_clear(pmdp);
424de9c6 863
bde3eb62 864 /*FIXME!! Verify whether we need this kick below */
fa4531f7 865 serialize_against_pte_lookup(vma->vm_mm);
424de9c6
BH
866
867 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
868
bde3eb62
AK
869 return pmd;
870}
871
872/*
873 * For us pgtable_t is pte_t *. Inorder to save the deposisted
874 * page table, we consider the allocated page table as a list
875 * head. On withdraw we need to make sure we zero out the used
876 * list_head memory area.
877 */
878void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
879 pgtable_t pgtable)
880{
881 struct list_head *lh = (struct list_head *) pgtable;
882
883 assert_spin_locked(pmd_lockptr(mm, pmdp));
884
885 /* FIFO */
886 if (!pmd_huge_pte(mm, pmdp))
887 INIT_LIST_HEAD(lh);
888 else
889 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
890 pmd_huge_pte(mm, pmdp) = pgtable;
891}
892
893pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
894{
895 pte_t *ptep;
896 pgtable_t pgtable;
897 struct list_head *lh;
898
899 assert_spin_locked(pmd_lockptr(mm, pmdp));
900
901 /* FIFO */
902 pgtable = pmd_huge_pte(mm, pmdp);
903 lh = (struct list_head *) pgtable;
904 if (list_empty(lh))
905 pmd_huge_pte(mm, pmdp) = NULL;
906 else {
907 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
908 list_del(lh);
909 }
910 ptep = (pte_t *) pgtable;
911 *ptep = __pte(0);
912 ptep++;
913 *ptep = __pte(0);
914 return pgtable;
915}
916
917
918pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
919 unsigned long addr, pmd_t *pmdp)
920{
921 pmd_t old_pmd;
922 unsigned long old;
923
924 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
925 old_pmd = __pmd(old);
926 /*
fa4531f7 927 * Serialize against find_current_mm_pte which does lock-less
bde3eb62
AK
928 * lookup in page tables with local interrupts disabled. For huge pages
929 * it casts pmd_t to pte_t. Since format of pte_t is different from
930 * pmd_t we want to prevent transit from pmd pointing to page table
931 * to pmd pointing to huge page (and back) while interrupts are disabled.
932 * We clear pmd to possibly replace it with page table pointer in
933 * different code paths. So make sure we wait for the parallel
fa4531f7 934 * find_current_mm_pte to finish.
bde3eb62 935 */
fa4531f7 936 serialize_against_pte_lookup(mm);
bde3eb62
AK
937 return old_pmd;
938}
939
940int radix__has_transparent_hugepage(void)
941{
942 /* For radix 2M at PMD level means thp */
943 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
944 return 1;
945 return 0;
946}
947#endif /* CONFIG_TRANSPARENT_HUGEPAGE */