]>
Commit | Line | Data |
---|---|---|
2bfd65e4 AK |
1 | /* |
2 | * Page table handling routines for radix page table. | |
3 | * | |
4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
589ee628 | 11 | #include <linux/sched/mm.h> |
2bfd65e4 AK |
12 | #include <linux/memblock.h> |
13 | #include <linux/of_fdt.h> | |
7614ff32 | 14 | #include <linux/mm.h> |
2bfd65e4 AK |
15 | |
16 | #include <asm/pgtable.h> | |
17 | #include <asm/pgalloc.h> | |
18 | #include <asm/dma.h> | |
19 | #include <asm/machdep.h> | |
20 | #include <asm/mmu.h> | |
21 | #include <asm/firmware.h> | |
1d0761d2 | 22 | #include <asm/powernv.h> |
9abcc981 | 23 | #include <asm/sections.h> |
0428491c | 24 | #include <asm/trace.h> |
2bfd65e4 | 25 | |
bde3eb62 AK |
26 | #include <trace/events/thp.h> |
27 | ||
83209bc8 AK |
28 | static int native_register_process_table(unsigned long base, unsigned long pg_sz, |
29 | unsigned long table_size) | |
2bfd65e4 | 30 | { |
83209bc8 AK |
31 | unsigned long patb1 = base | table_size | PATB_GR; |
32 | ||
2bfd65e4 AK |
33 | partition_tb->patb1 = cpu_to_be64(patb1); |
34 | return 0; | |
35 | } | |
36 | ||
37 | static __ref void *early_alloc_pgtable(unsigned long size) | |
38 | { | |
39 | void *pt; | |
40 | ||
41 | pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE)); | |
42 | memset(pt, 0, size); | |
43 | ||
44 | return pt; | |
45 | } | |
46 | ||
47 | int radix__map_kernel_page(unsigned long ea, unsigned long pa, | |
48 | pgprot_t flags, | |
49 | unsigned int map_page_size) | |
50 | { | |
51 | pgd_t *pgdp; | |
52 | pud_t *pudp; | |
53 | pmd_t *pmdp; | |
54 | pte_t *ptep; | |
55 | /* | |
56 | * Make sure task size is correct as per the max adddr | |
57 | */ | |
58 | BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE); | |
59 | if (slab_is_available()) { | |
60 | pgdp = pgd_offset_k(ea); | |
61 | pudp = pud_alloc(&init_mm, pgdp, ea); | |
62 | if (!pudp) | |
63 | return -ENOMEM; | |
64 | if (map_page_size == PUD_SIZE) { | |
65 | ptep = (pte_t *)pudp; | |
66 | goto set_the_pte; | |
67 | } | |
68 | pmdp = pmd_alloc(&init_mm, pudp, ea); | |
69 | if (!pmdp) | |
70 | return -ENOMEM; | |
71 | if (map_page_size == PMD_SIZE) { | |
a0615a16 | 72 | ptep = pmdp_ptep(pmdp); |
2bfd65e4 AK |
73 | goto set_the_pte; |
74 | } | |
75 | ptep = pte_alloc_kernel(pmdp, ea); | |
76 | if (!ptep) | |
77 | return -ENOMEM; | |
78 | } else { | |
79 | pgdp = pgd_offset_k(ea); | |
80 | if (pgd_none(*pgdp)) { | |
81 | pudp = early_alloc_pgtable(PUD_TABLE_SIZE); | |
82 | BUG_ON(pudp == NULL); | |
83 | pgd_populate(&init_mm, pgdp, pudp); | |
84 | } | |
85 | pudp = pud_offset(pgdp, ea); | |
86 | if (map_page_size == PUD_SIZE) { | |
87 | ptep = (pte_t *)pudp; | |
88 | goto set_the_pte; | |
89 | } | |
90 | if (pud_none(*pudp)) { | |
91 | pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); | |
92 | BUG_ON(pmdp == NULL); | |
93 | pud_populate(&init_mm, pudp, pmdp); | |
94 | } | |
95 | pmdp = pmd_offset(pudp, ea); | |
96 | if (map_page_size == PMD_SIZE) { | |
a0615a16 | 97 | ptep = pmdp_ptep(pmdp); |
2bfd65e4 AK |
98 | goto set_the_pte; |
99 | } | |
100 | if (!pmd_present(*pmdp)) { | |
101 | ptep = early_alloc_pgtable(PAGE_SIZE); | |
102 | BUG_ON(ptep == NULL); | |
103 | pmd_populate_kernel(&init_mm, pmdp, ptep); | |
104 | } | |
105 | ptep = pte_offset_kernel(pmdp, ea); | |
106 | } | |
107 | ||
108 | set_the_pte: | |
109 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags)); | |
110 | smp_wmb(); | |
111 | return 0; | |
112 | } | |
113 | ||
7614ff32 | 114 | #ifdef CONFIG_STRICT_KERNEL_RWX |
b134bd90 ME |
115 | void radix__change_memory_range(unsigned long start, unsigned long end, |
116 | unsigned long clear) | |
7614ff32 | 117 | { |
7614ff32 BS |
118 | unsigned long idx; |
119 | pgd_t *pgdp; | |
120 | pud_t *pudp; | |
121 | pmd_t *pmdp; | |
122 | pte_t *ptep; | |
123 | ||
124 | start = ALIGN_DOWN(start, PAGE_SIZE); | |
125 | end = PAGE_ALIGN(end); // aligns up | |
126 | ||
b134bd90 ME |
127 | pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n", |
128 | start, end, clear); | |
7614ff32 BS |
129 | |
130 | for (idx = start; idx < end; idx += PAGE_SIZE) { | |
131 | pgdp = pgd_offset_k(idx); | |
132 | pudp = pud_alloc(&init_mm, pgdp, idx); | |
133 | if (!pudp) | |
134 | continue; | |
135 | if (pud_huge(*pudp)) { | |
136 | ptep = (pte_t *)pudp; | |
137 | goto update_the_pte; | |
138 | } | |
139 | pmdp = pmd_alloc(&init_mm, pudp, idx); | |
140 | if (!pmdp) | |
141 | continue; | |
142 | if (pmd_huge(*pmdp)) { | |
143 | ptep = pmdp_ptep(pmdp); | |
144 | goto update_the_pte; | |
145 | } | |
146 | ptep = pte_alloc_kernel(pmdp, idx); | |
147 | if (!ptep) | |
148 | continue; | |
149 | update_the_pte: | |
b134bd90 | 150 | radix__pte_update(&init_mm, idx, ptep, clear, 0, 0); |
7614ff32 BS |
151 | } |
152 | ||
153 | radix__flush_tlb_kernel_range(start, end); | |
154 | } | |
b134bd90 ME |
155 | |
156 | void radix__mark_rodata_ro(void) | |
157 | { | |
158 | unsigned long start, end; | |
159 | ||
160 | start = (unsigned long)_stext; | |
161 | end = (unsigned long)__init_begin; | |
162 | ||
163 | radix__change_memory_range(start, end, _PAGE_WRITE); | |
164 | } | |
029d9252 ME |
165 | |
166 | void radix__mark_initmem_nx(void) | |
167 | { | |
168 | unsigned long start = (unsigned long)__init_begin; | |
169 | unsigned long end = (unsigned long)__init_end; | |
170 | ||
171 | radix__change_memory_range(start, end, _PAGE_EXEC); | |
172 | } | |
7614ff32 BS |
173 | #endif /* CONFIG_STRICT_KERNEL_RWX */ |
174 | ||
b5200ec9 RA |
175 | static inline void __meminit print_mapping(unsigned long start, |
176 | unsigned long end, | |
177 | unsigned long size) | |
178 | { | |
179 | if (end <= start) | |
180 | return; | |
181 | ||
182 | pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size); | |
183 | } | |
184 | ||
185 | static int __meminit create_physical_mapping(unsigned long start, | |
186 | unsigned long end) | |
187 | { | |
9abcc981 ME |
188 | unsigned long vaddr, addr, mapping_size = 0; |
189 | pgprot_t prot; | |
7614ff32 BS |
190 | unsigned long max_mapping_size; |
191 | #ifdef CONFIG_STRICT_KERNEL_RWX | |
192 | int split_text_mapping = 1; | |
193 | #else | |
194 | int split_text_mapping = 0; | |
195 | #endif | |
b5200ec9 RA |
196 | |
197 | start = _ALIGN_UP(start, PAGE_SIZE); | |
198 | for (addr = start; addr < end; addr += mapping_size) { | |
199 | unsigned long gap, previous_size; | |
200 | int rc; | |
201 | ||
202 | gap = end - addr; | |
203 | previous_size = mapping_size; | |
7614ff32 | 204 | max_mapping_size = PUD_SIZE; |
b5200ec9 | 205 | |
7614ff32 | 206 | retry: |
b5200ec9 | 207 | if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && |
7614ff32 BS |
208 | mmu_psize_defs[MMU_PAGE_1G].shift && |
209 | PUD_SIZE <= max_mapping_size) | |
b5200ec9 RA |
210 | mapping_size = PUD_SIZE; |
211 | else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && | |
212 | mmu_psize_defs[MMU_PAGE_2M].shift) | |
213 | mapping_size = PMD_SIZE; | |
214 | else | |
215 | mapping_size = PAGE_SIZE; | |
216 | ||
7614ff32 BS |
217 | if (split_text_mapping && (mapping_size == PUD_SIZE) && |
218 | (addr <= __pa_symbol(__init_begin)) && | |
219 | (addr + mapping_size) >= __pa_symbol(_stext)) { | |
220 | max_mapping_size = PMD_SIZE; | |
221 | goto retry; | |
222 | } | |
223 | ||
224 | if (split_text_mapping && (mapping_size == PMD_SIZE) && | |
225 | (addr <= __pa_symbol(__init_begin)) && | |
226 | (addr + mapping_size) >= __pa_symbol(_stext)) | |
227 | mapping_size = PAGE_SIZE; | |
228 | ||
b5200ec9 RA |
229 | if (mapping_size != previous_size) { |
230 | print_mapping(start, addr, previous_size); | |
231 | start = addr; | |
232 | } | |
233 | ||
9abcc981 ME |
234 | vaddr = (unsigned long)__va(addr); |
235 | ||
7f6d498e BS |
236 | if (overlaps_kernel_text(vaddr, vaddr + mapping_size) || |
237 | overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) | |
9abcc981 ME |
238 | prot = PAGE_KERNEL_X; |
239 | else | |
240 | prot = PAGE_KERNEL; | |
241 | ||
242 | rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size); | |
b5200ec9 RA |
243 | if (rc) |
244 | return rc; | |
245 | } | |
246 | ||
247 | print_mapping(start, addr, mapping_size); | |
248 | return 0; | |
249 | } | |
250 | ||
2bfd65e4 AK |
251 | static void __init radix_init_pgtable(void) |
252 | { | |
2bfd65e4 AK |
253 | unsigned long rts_field; |
254 | struct memblock_region *reg; | |
2bfd65e4 AK |
255 | |
256 | /* We don't support slb for radix */ | |
257 | mmu_slb_size = 0; | |
258 | /* | |
259 | * Create the linear mapping, using standard page size for now | |
260 | */ | |
b5200ec9 RA |
261 | for_each_memblock(memory, reg) |
262 | WARN_ON(create_physical_mapping(reg->base, | |
263 | reg->base + reg->size)); | |
2bfd65e4 AK |
264 | /* |
265 | * Allocate Partition table and process table for the | |
266 | * host. | |
267 | */ | |
555c1632 | 268 | BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large."); |
2bfd65e4 AK |
269 | process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); |
270 | /* | |
271 | * Fill in the process table. | |
2bfd65e4 | 272 | */ |
b23d9c5b | 273 | rts_field = radix__get_tree_size(); |
2bfd65e4 AK |
274 | process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); |
275 | /* | |
276 | * Fill in the partition table. We are suppose to use effective address | |
277 | * of process table here. But our linear mapping also enable us to use | |
278 | * physical address here. | |
279 | */ | |
eea8148c | 280 | register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); |
2bfd65e4 | 281 | pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); |
7a70d728 PM |
282 | asm volatile("ptesync" : : : "memory"); |
283 | asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : | |
284 | "r" (TLBIEL_INVAL_SET_LPID), "r" (0)); | |
285 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); | |
0428491c | 286 | trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1); |
2bfd65e4 AK |
287 | } |
288 | ||
289 | static void __init radix_init_partition_table(void) | |
290 | { | |
9d661958 | 291 | unsigned long rts_field, dw0; |
b23d9c5b | 292 | |
9d661958 | 293 | mmu_partition_table_init(); |
b23d9c5b | 294 | rts_field = radix__get_tree_size(); |
9d661958 PM |
295 | dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR; |
296 | mmu_partition_table_set_entry(0, dw0, 0); | |
2bfd65e4 | 297 | |
56547411 AK |
298 | pr_info("Initializing Radix MMU\n"); |
299 | pr_info("Partition table %p\n", partition_tb); | |
2bfd65e4 AK |
300 | } |
301 | ||
302 | void __init radix_init_native(void) | |
303 | { | |
eea8148c | 304 | register_process_table = native_register_process_table; |
2bfd65e4 AK |
305 | } |
306 | ||
307 | static int __init get_idx_from_shift(unsigned int shift) | |
308 | { | |
309 | int idx = -1; | |
310 | ||
311 | switch (shift) { | |
312 | case 0xc: | |
313 | idx = MMU_PAGE_4K; | |
314 | break; | |
315 | case 0x10: | |
316 | idx = MMU_PAGE_64K; | |
317 | break; | |
318 | case 0x15: | |
319 | idx = MMU_PAGE_2M; | |
320 | break; | |
321 | case 0x1e: | |
322 | idx = MMU_PAGE_1G; | |
323 | break; | |
324 | } | |
325 | return idx; | |
326 | } | |
327 | ||
328 | static int __init radix_dt_scan_page_sizes(unsigned long node, | |
329 | const char *uname, int depth, | |
330 | void *data) | |
331 | { | |
332 | int size = 0; | |
333 | int shift, idx; | |
334 | unsigned int ap; | |
335 | const __be32 *prop; | |
336 | const char *type = of_get_flat_dt_prop(node, "device_type", NULL); | |
337 | ||
338 | /* We are scanning "cpu" nodes only */ | |
339 | if (type == NULL || strcmp(type, "cpu") != 0) | |
340 | return 0; | |
341 | ||
342 | prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size); | |
343 | if (!prop) | |
344 | return 0; | |
345 | ||
346 | pr_info("Page sizes from device-tree:\n"); | |
347 | for (; size >= 4; size -= 4, ++prop) { | |
348 | ||
349 | struct mmu_psize_def *def; | |
350 | ||
351 | /* top 3 bit is AP encoding */ | |
352 | shift = be32_to_cpu(prop[0]) & ~(0xe << 28); | |
353 | ap = be32_to_cpu(prop[0]) >> 29; | |
ac8d3818 | 354 | pr_info("Page size shift = %d AP=0x%x\n", shift, ap); |
2bfd65e4 AK |
355 | |
356 | idx = get_idx_from_shift(shift); | |
357 | if (idx < 0) | |
358 | continue; | |
359 | ||
360 | def = &mmu_psize_defs[idx]; | |
361 | def->shift = shift; | |
362 | def->ap = ap; | |
363 | } | |
364 | ||
365 | /* needed ? */ | |
366 | cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; | |
367 | return 1; | |
368 | } | |
369 | ||
2537b09c | 370 | void __init radix__early_init_devtree(void) |
2bfd65e4 AK |
371 | { |
372 | int rc; | |
373 | ||
374 | /* | |
375 | * Try to find the available page sizes in the device-tree | |
376 | */ | |
377 | rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL); | |
378 | if (rc != 0) /* Found */ | |
379 | goto found; | |
380 | /* | |
381 | * let's assume we have page 4k and 64k support | |
382 | */ | |
383 | mmu_psize_defs[MMU_PAGE_4K].shift = 12; | |
384 | mmu_psize_defs[MMU_PAGE_4K].ap = 0x0; | |
385 | ||
386 | mmu_psize_defs[MMU_PAGE_64K].shift = 16; | |
387 | mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; | |
388 | found: | |
389 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
390 | if (mmu_psize_defs[MMU_PAGE_2M].shift) { | |
391 | /* | |
392 | * map vmemmap using 2M if available | |
393 | */ | |
394 | mmu_vmemmap_psize = MMU_PAGE_2M; | |
395 | } | |
396 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | |
397 | return; | |
398 | } | |
399 | ||
ad410674 AK |
400 | static void update_hid_for_radix(void) |
401 | { | |
402 | unsigned long hid0; | |
403 | unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */ | |
404 | ||
405 | asm volatile("ptesync": : :"memory"); | |
406 | /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */ | |
407 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) | |
408 | : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory"); | |
409 | /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */ | |
410 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) | |
411 | : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory"); | |
412 | asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory"); | |
0428491c BS |
413 | trace_tlbie(0, 0, rb, 0, 2, 0, 1); |
414 | trace_tlbie(0, 0, rb, 0, 2, 1, 1); | |
415 | ||
ad410674 AK |
416 | /* |
417 | * now switch the HID | |
418 | */ | |
419 | hid0 = mfspr(SPRN_HID0); | |
420 | hid0 |= HID0_POWER9_RADIX; | |
421 | mtspr(SPRN_HID0, hid0); | |
422 | asm volatile("isync": : :"memory"); | |
423 | ||
424 | /* Wait for it to happen */ | |
425 | while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX)) | |
426 | cpu_relax(); | |
427 | } | |
428 | ||
ee97b6b9 BS |
429 | static void radix_init_amor(void) |
430 | { | |
431 | /* | |
432 | * In HV mode, we init AMOR (Authority Mask Override Register) so that | |
433 | * the hypervisor and guest can setup IAMR (Instruction Authority Mask | |
434 | * Register), enable key 0 and set it to 1. | |
435 | * | |
436 | * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11) | |
437 | */ | |
438 | mtspr(SPRN_AMOR, (3ul << 62)); | |
439 | } | |
440 | ||
3b10d009 BS |
441 | static void radix_init_iamr(void) |
442 | { | |
443 | unsigned long iamr; | |
444 | ||
445 | /* | |
446 | * The IAMR should set to 0 on DD1. | |
447 | */ | |
448 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) | |
449 | iamr = 0; | |
450 | else | |
451 | iamr = (1ul << 62); | |
452 | ||
453 | /* | |
454 | * Radix always uses key0 of the IAMR to determine if an access is | |
455 | * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction | |
456 | * fetch. | |
457 | */ | |
458 | mtspr(SPRN_IAMR, iamr); | |
459 | } | |
460 | ||
2bfd65e4 AK |
461 | void __init radix__early_init_mmu(void) |
462 | { | |
463 | unsigned long lpcr; | |
2bfd65e4 AK |
464 | |
465 | #ifdef CONFIG_PPC_64K_PAGES | |
466 | /* PAGE_SIZE mappings */ | |
467 | mmu_virtual_psize = MMU_PAGE_64K; | |
468 | #else | |
469 | mmu_virtual_psize = MMU_PAGE_4K; | |
470 | #endif | |
471 | ||
472 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
473 | /* vmemmap mapping */ | |
474 | mmu_vmemmap_psize = mmu_virtual_psize; | |
475 | #endif | |
476 | /* | |
477 | * initialize page table size | |
478 | */ | |
479 | __pte_index_size = RADIX_PTE_INDEX_SIZE; | |
480 | __pmd_index_size = RADIX_PMD_INDEX_SIZE; | |
481 | __pud_index_size = RADIX_PUD_INDEX_SIZE; | |
482 | __pgd_index_size = RADIX_PGD_INDEX_SIZE; | |
483 | __pmd_cache_index = RADIX_PMD_INDEX_SIZE; | |
484 | __pte_table_size = RADIX_PTE_TABLE_SIZE; | |
485 | __pmd_table_size = RADIX_PMD_TABLE_SIZE; | |
486 | __pud_table_size = RADIX_PUD_TABLE_SIZE; | |
487 | __pgd_table_size = RADIX_PGD_TABLE_SIZE; | |
488 | ||
a2f41eb9 AK |
489 | __pmd_val_bits = RADIX_PMD_VAL_BITS; |
490 | __pud_val_bits = RADIX_PUD_VAL_BITS; | |
491 | __pgd_val_bits = RADIX_PGD_VAL_BITS; | |
2bfd65e4 | 492 | |
d6a9996e AK |
493 | __kernel_virt_start = RADIX_KERN_VIRT_START; |
494 | __kernel_virt_size = RADIX_KERN_VIRT_SIZE; | |
495 | __vmalloc_start = RADIX_VMALLOC_START; | |
496 | __vmalloc_end = RADIX_VMALLOC_END; | |
497 | vmemmap = (struct page *)RADIX_VMEMMAP_BASE; | |
498 | ioremap_bot = IOREMAP_BASE; | |
bfa37087 DS |
499 | |
500 | #ifdef CONFIG_PCI | |
501 | pci_io_base = ISA_IO_BASE; | |
502 | #endif | |
503 | ||
5ed7ecd0 AK |
504 | /* |
505 | * For now radix also use the same frag size | |
506 | */ | |
507 | __pte_frag_nr = H_PTE_FRAG_NR; | |
508 | __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; | |
d6a9996e | 509 | |
d6c88600 | 510 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
166dd7d3 | 511 | radix_init_native(); |
ad410674 AK |
512 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) |
513 | update_hid_for_radix(); | |
d6c88600 | 514 | lpcr = mfspr(SPRN_LPCR); |
bf16cdf4 | 515 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); |
2bfd65e4 | 516 | radix_init_partition_table(); |
ee97b6b9 | 517 | radix_init_amor(); |
cc3d2940 PM |
518 | } else { |
519 | radix_init_pseries(); | |
d6c88600 | 520 | } |
2bfd65e4 | 521 | |
9d661958 PM |
522 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); |
523 | ||
3b10d009 | 524 | radix_init_iamr(); |
2bfd65e4 AK |
525 | radix_init_pgtable(); |
526 | } | |
527 | ||
528 | void radix__early_init_mmu_secondary(void) | |
529 | { | |
530 | unsigned long lpcr; | |
531 | /* | |
d6c88600 | 532 | * update partition table control register and UPRT |
2bfd65e4 | 533 | */ |
d6c88600 | 534 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
cac4a185 AK |
535 | |
536 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) | |
537 | update_hid_for_radix(); | |
538 | ||
d6c88600 | 539 | lpcr = mfspr(SPRN_LPCR); |
bf16cdf4 | 540 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); |
d6c88600 | 541 | |
2bfd65e4 AK |
542 | mtspr(SPRN_PTCR, |
543 | __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); | |
ee97b6b9 | 544 | radix_init_amor(); |
d6c88600 | 545 | } |
3b10d009 | 546 | radix_init_iamr(); |
2bfd65e4 AK |
547 | } |
548 | ||
fe036a06 BH |
549 | void radix__mmu_cleanup_all(void) |
550 | { | |
551 | unsigned long lpcr; | |
552 | ||
553 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { | |
554 | lpcr = mfspr(SPRN_LPCR); | |
555 | mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT); | |
556 | mtspr(SPRN_PTCR, 0); | |
1d0761d2 | 557 | powernv_set_nmmu_ptcr(0); |
fe036a06 BH |
558 | radix__flush_tlb_all(); |
559 | } | |
560 | } | |
561 | ||
2bfd65e4 AK |
562 | void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, |
563 | phys_addr_t first_memblock_size) | |
564 | { | |
177ba7c6 AK |
565 | /* We don't currently support the first MEMBLOCK not mapping 0 |
566 | * physical on those processors | |
567 | */ | |
568 | BUG_ON(first_memblock_base != 0); | |
569 | /* | |
570 | * We limit the allocation that depend on ppc64_rma_size | |
571 | * to first_memblock_size. We also clamp it to 1GB to | |
572 | * avoid some funky things such as RTAS bugs. | |
573 | * | |
574 | * On radix config we really don't have a limitation | |
575 | * on real mode access. But keeping it as above works | |
576 | * well enough. | |
577 | */ | |
578 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | |
579 | /* | |
580 | * Finally limit subsequent allocations. We really don't want | |
581 | * to limit the memblock allocations to rma_size. FIXME!! should | |
582 | * we even limit at all ? | |
583 | */ | |
2bfd65e4 AK |
584 | memblock_set_current_limit(first_memblock_base + first_memblock_size); |
585 | } | |
d9225ad9 | 586 | |
6cc27341 | 587 | #ifdef CONFIG_MEMORY_HOTPLUG |
4b5d62ca RA |
588 | static void free_pte_table(pte_t *pte_start, pmd_t *pmd) |
589 | { | |
590 | pte_t *pte; | |
591 | int i; | |
592 | ||
593 | for (i = 0; i < PTRS_PER_PTE; i++) { | |
594 | pte = pte_start + i; | |
595 | if (!pte_none(*pte)) | |
596 | return; | |
597 | } | |
598 | ||
599 | pte_free_kernel(&init_mm, pte_start); | |
600 | pmd_clear(pmd); | |
601 | } | |
602 | ||
603 | static void free_pmd_table(pmd_t *pmd_start, pud_t *pud) | |
604 | { | |
605 | pmd_t *pmd; | |
606 | int i; | |
607 | ||
608 | for (i = 0; i < PTRS_PER_PMD; i++) { | |
609 | pmd = pmd_start + i; | |
610 | if (!pmd_none(*pmd)) | |
611 | return; | |
612 | } | |
613 | ||
614 | pmd_free(&init_mm, pmd_start); | |
615 | pud_clear(pud); | |
616 | } | |
617 | ||
618 | static void remove_pte_table(pte_t *pte_start, unsigned long addr, | |
619 | unsigned long end) | |
620 | { | |
621 | unsigned long next; | |
622 | pte_t *pte; | |
623 | ||
624 | pte = pte_start + pte_index(addr); | |
625 | for (; addr < end; addr = next, pte++) { | |
626 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
627 | if (next > end) | |
628 | next = end; | |
629 | ||
630 | if (!pte_present(*pte)) | |
631 | continue; | |
632 | ||
0d0a4bc2 RA |
633 | if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) { |
634 | /* | |
635 | * The vmemmap_free() and remove_section_mapping() | |
636 | * codepaths call us with aligned addresses. | |
637 | */ | |
638 | WARN_ONCE(1, "%s: unaligned range\n", __func__); | |
639 | continue; | |
640 | } | |
641 | ||
4b5d62ca RA |
642 | pte_clear(&init_mm, addr, pte); |
643 | } | |
644 | } | |
645 | ||
646 | static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, | |
647 | unsigned long end) | |
648 | { | |
649 | unsigned long next; | |
650 | pte_t *pte_base; | |
651 | pmd_t *pmd; | |
652 | ||
653 | pmd = pmd_start + pmd_index(addr); | |
654 | for (; addr < end; addr = next, pmd++) { | |
655 | next = pmd_addr_end(addr, end); | |
656 | ||
657 | if (!pmd_present(*pmd)) | |
658 | continue; | |
659 | ||
660 | if (pmd_huge(*pmd)) { | |
0d0a4bc2 RA |
661 | if (!IS_ALIGNED(addr, PMD_SIZE) || |
662 | !IS_ALIGNED(next, PMD_SIZE)) { | |
663 | WARN_ONCE(1, "%s: unaligned range\n", __func__); | |
664 | continue; | |
665 | } | |
666 | ||
4b5d62ca RA |
667 | pte_clear(&init_mm, addr, (pte_t *)pmd); |
668 | continue; | |
669 | } | |
670 | ||
671 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); | |
672 | remove_pte_table(pte_base, addr, next); | |
673 | free_pte_table(pte_base, pmd); | |
674 | } | |
675 | } | |
676 | ||
677 | static void remove_pud_table(pud_t *pud_start, unsigned long addr, | |
678 | unsigned long end) | |
679 | { | |
680 | unsigned long next; | |
681 | pmd_t *pmd_base; | |
682 | pud_t *pud; | |
683 | ||
684 | pud = pud_start + pud_index(addr); | |
685 | for (; addr < end; addr = next, pud++) { | |
686 | next = pud_addr_end(addr, end); | |
687 | ||
688 | if (!pud_present(*pud)) | |
689 | continue; | |
690 | ||
691 | if (pud_huge(*pud)) { | |
0d0a4bc2 RA |
692 | if (!IS_ALIGNED(addr, PUD_SIZE) || |
693 | !IS_ALIGNED(next, PUD_SIZE)) { | |
694 | WARN_ONCE(1, "%s: unaligned range\n", __func__); | |
695 | continue; | |
696 | } | |
697 | ||
4b5d62ca RA |
698 | pte_clear(&init_mm, addr, (pte_t *)pud); |
699 | continue; | |
700 | } | |
701 | ||
702 | pmd_base = (pmd_t *)pud_page_vaddr(*pud); | |
703 | remove_pmd_table(pmd_base, addr, next); | |
704 | free_pmd_table(pmd_base, pud); | |
705 | } | |
706 | } | |
707 | ||
708 | static void remove_pagetable(unsigned long start, unsigned long end) | |
709 | { | |
710 | unsigned long addr, next; | |
711 | pud_t *pud_base; | |
712 | pgd_t *pgd; | |
713 | ||
714 | spin_lock(&init_mm.page_table_lock); | |
715 | ||
716 | for (addr = start; addr < end; addr = next) { | |
717 | next = pgd_addr_end(addr, end); | |
718 | ||
719 | pgd = pgd_offset_k(addr); | |
720 | if (!pgd_present(*pgd)) | |
721 | continue; | |
722 | ||
723 | if (pgd_huge(*pgd)) { | |
0d0a4bc2 RA |
724 | if (!IS_ALIGNED(addr, PGDIR_SIZE) || |
725 | !IS_ALIGNED(next, PGDIR_SIZE)) { | |
726 | WARN_ONCE(1, "%s: unaligned range\n", __func__); | |
727 | continue; | |
728 | } | |
729 | ||
4b5d62ca RA |
730 | pte_clear(&init_mm, addr, (pte_t *)pgd); |
731 | continue; | |
732 | } | |
733 | ||
734 | pud_base = (pud_t *)pgd_page_vaddr(*pgd); | |
735 | remove_pud_table(pud_base, addr, next); | |
736 | } | |
737 | ||
738 | spin_unlock(&init_mm.page_table_lock); | |
739 | radix__flush_tlb_kernel_range(start, end); | |
740 | } | |
741 | ||
6cc27341 RA |
742 | int __ref radix__create_section_mapping(unsigned long start, unsigned long end) |
743 | { | |
744 | return create_physical_mapping(start, end); | |
745 | } | |
4b5d62ca RA |
746 | |
747 | int radix__remove_section_mapping(unsigned long start, unsigned long end) | |
748 | { | |
749 | remove_pagetable(start, end); | |
750 | return 0; | |
751 | } | |
6cc27341 RA |
752 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
753 | ||
d9225ad9 AK |
754 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
755 | int __meminit radix__vmemmap_create_mapping(unsigned long start, | |
756 | unsigned long page_size, | |
757 | unsigned long phys) | |
758 | { | |
759 | /* Create a PTE encoding */ | |
760 | unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW; | |
761 | ||
762 | BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size)); | |
763 | return 0; | |
764 | } | |
765 | ||
766 | #ifdef CONFIG_MEMORY_HOTPLUG | |
767 | void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) | |
768 | { | |
0d0a4bc2 | 769 | remove_pagetable(start, start + page_size); |
d9225ad9 AK |
770 | } |
771 | #endif | |
772 | #endif | |
bde3eb62 AK |
773 | |
774 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
775 | ||
776 | unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, | |
777 | pmd_t *pmdp, unsigned long clr, | |
778 | unsigned long set) | |
779 | { | |
780 | unsigned long old; | |
781 | ||
782 | #ifdef CONFIG_DEBUG_VM | |
ebd31197 | 783 | WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
bde3eb62 AK |
784 | assert_spin_locked(&mm->page_table_lock); |
785 | #endif | |
786 | ||
787 | old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1); | |
788 | trace_hugepage_update(addr, old, clr, set); | |
789 | ||
790 | return old; | |
791 | } | |
792 | ||
793 | pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, | |
794 | pmd_t *pmdp) | |
795 | ||
796 | { | |
797 | pmd_t pmd; | |
798 | ||
799 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
800 | VM_BUG_ON(radix__pmd_trans_huge(*pmdp)); | |
ebd31197 | 801 | VM_BUG_ON(pmd_devmap(*pmdp)); |
bde3eb62 AK |
802 | /* |
803 | * khugepaged calls this for normal pmd | |
804 | */ | |
805 | pmd = *pmdp; | |
806 | pmd_clear(pmdp); | |
424de9c6 | 807 | |
bde3eb62 AK |
808 | /*FIXME!! Verify whether we need this kick below */ |
809 | kick_all_cpus_sync(); | |
424de9c6 BH |
810 | |
811 | radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); | |
812 | ||
bde3eb62 AK |
813 | return pmd; |
814 | } | |
815 | ||
816 | /* | |
817 | * For us pgtable_t is pte_t *. Inorder to save the deposisted | |
818 | * page table, we consider the allocated page table as a list | |
819 | * head. On withdraw we need to make sure we zero out the used | |
820 | * list_head memory area. | |
821 | */ | |
822 | void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | |
823 | pgtable_t pgtable) | |
824 | { | |
825 | struct list_head *lh = (struct list_head *) pgtable; | |
826 | ||
827 | assert_spin_locked(pmd_lockptr(mm, pmdp)); | |
828 | ||
829 | /* FIFO */ | |
830 | if (!pmd_huge_pte(mm, pmdp)) | |
831 | INIT_LIST_HEAD(lh); | |
832 | else | |
833 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); | |
834 | pmd_huge_pte(mm, pmdp) = pgtable; | |
835 | } | |
836 | ||
837 | pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) | |
838 | { | |
839 | pte_t *ptep; | |
840 | pgtable_t pgtable; | |
841 | struct list_head *lh; | |
842 | ||
843 | assert_spin_locked(pmd_lockptr(mm, pmdp)); | |
844 | ||
845 | /* FIFO */ | |
846 | pgtable = pmd_huge_pte(mm, pmdp); | |
847 | lh = (struct list_head *) pgtable; | |
848 | if (list_empty(lh)) | |
849 | pmd_huge_pte(mm, pmdp) = NULL; | |
850 | else { | |
851 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; | |
852 | list_del(lh); | |
853 | } | |
854 | ptep = (pte_t *) pgtable; | |
855 | *ptep = __pte(0); | |
856 | ptep++; | |
857 | *ptep = __pte(0); | |
858 | return pgtable; | |
859 | } | |
860 | ||
861 | ||
862 | pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, | |
863 | unsigned long addr, pmd_t *pmdp) | |
864 | { | |
865 | pmd_t old_pmd; | |
866 | unsigned long old; | |
867 | ||
868 | old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); | |
869 | old_pmd = __pmd(old); | |
870 | /* | |
871 | * Serialize against find_linux_pte_or_hugepte which does lock-less | |
872 | * lookup in page tables with local interrupts disabled. For huge pages | |
873 | * it casts pmd_t to pte_t. Since format of pte_t is different from | |
874 | * pmd_t we want to prevent transit from pmd pointing to page table | |
875 | * to pmd pointing to huge page (and back) while interrupts are disabled. | |
876 | * We clear pmd to possibly replace it with page table pointer in | |
877 | * different code paths. So make sure we wait for the parallel | |
878 | * find_linux_pte_or_hugepage to finish. | |
879 | */ | |
880 | kick_all_cpus_sync(); | |
881 | return old_pmd; | |
882 | } | |
883 | ||
884 | int radix__has_transparent_hugepage(void) | |
885 | { | |
886 | /* For radix 2M at PMD level means thp */ | |
887 | if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT) | |
888 | return 1; | |
889 | return 0; | |
890 | } | |
891 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |