]>
Commit | Line | Data |
---|---|---|
2bfd65e4 AK |
1 | /* |
2 | * Page table handling routines for radix page table. | |
3 | * | |
4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | #include <linux/sched.h> | |
12 | #include <linux/memblock.h> | |
13 | #include <linux/of_fdt.h> | |
14 | ||
15 | #include <asm/pgtable.h> | |
16 | #include <asm/pgalloc.h> | |
17 | #include <asm/dma.h> | |
18 | #include <asm/machdep.h> | |
19 | #include <asm/mmu.h> | |
20 | #include <asm/firmware.h> | |
21 | ||
bde3eb62 AK |
22 | #include <trace/events/thp.h> |
23 | ||
2bfd65e4 AK |
24 | static int native_update_partition_table(u64 patb1) |
25 | { | |
26 | partition_tb->patb1 = cpu_to_be64(patb1); | |
27 | return 0; | |
28 | } | |
29 | ||
30 | static __ref void *early_alloc_pgtable(unsigned long size) | |
31 | { | |
32 | void *pt; | |
33 | ||
34 | pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE)); | |
35 | memset(pt, 0, size); | |
36 | ||
37 | return pt; | |
38 | } | |
39 | ||
40 | int radix__map_kernel_page(unsigned long ea, unsigned long pa, | |
41 | pgprot_t flags, | |
42 | unsigned int map_page_size) | |
43 | { | |
44 | pgd_t *pgdp; | |
45 | pud_t *pudp; | |
46 | pmd_t *pmdp; | |
47 | pte_t *ptep; | |
48 | /* | |
49 | * Make sure task size is correct as per the max adddr | |
50 | */ | |
51 | BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE); | |
52 | if (slab_is_available()) { | |
53 | pgdp = pgd_offset_k(ea); | |
54 | pudp = pud_alloc(&init_mm, pgdp, ea); | |
55 | if (!pudp) | |
56 | return -ENOMEM; | |
57 | if (map_page_size == PUD_SIZE) { | |
58 | ptep = (pte_t *)pudp; | |
59 | goto set_the_pte; | |
60 | } | |
61 | pmdp = pmd_alloc(&init_mm, pudp, ea); | |
62 | if (!pmdp) | |
63 | return -ENOMEM; | |
64 | if (map_page_size == PMD_SIZE) { | |
65 | ptep = (pte_t *)pudp; | |
66 | goto set_the_pte; | |
67 | } | |
68 | ptep = pte_alloc_kernel(pmdp, ea); | |
69 | if (!ptep) | |
70 | return -ENOMEM; | |
71 | } else { | |
72 | pgdp = pgd_offset_k(ea); | |
73 | if (pgd_none(*pgdp)) { | |
74 | pudp = early_alloc_pgtable(PUD_TABLE_SIZE); | |
75 | BUG_ON(pudp == NULL); | |
76 | pgd_populate(&init_mm, pgdp, pudp); | |
77 | } | |
78 | pudp = pud_offset(pgdp, ea); | |
79 | if (map_page_size == PUD_SIZE) { | |
80 | ptep = (pte_t *)pudp; | |
81 | goto set_the_pte; | |
82 | } | |
83 | if (pud_none(*pudp)) { | |
84 | pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); | |
85 | BUG_ON(pmdp == NULL); | |
86 | pud_populate(&init_mm, pudp, pmdp); | |
87 | } | |
88 | pmdp = pmd_offset(pudp, ea); | |
89 | if (map_page_size == PMD_SIZE) { | |
90 | ptep = (pte_t *)pudp; | |
91 | goto set_the_pte; | |
92 | } | |
93 | if (!pmd_present(*pmdp)) { | |
94 | ptep = early_alloc_pgtable(PAGE_SIZE); | |
95 | BUG_ON(ptep == NULL); | |
96 | pmd_populate_kernel(&init_mm, pmdp, ptep); | |
97 | } | |
98 | ptep = pte_offset_kernel(pmdp, ea); | |
99 | } | |
100 | ||
101 | set_the_pte: | |
102 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags)); | |
103 | smp_wmb(); | |
104 | return 0; | |
105 | } | |
106 | ||
107 | static void __init radix_init_pgtable(void) | |
108 | { | |
109 | int loop_count; | |
110 | u64 base, end, start_addr; | |
111 | unsigned long rts_field; | |
112 | struct memblock_region *reg; | |
113 | unsigned long linear_page_size; | |
114 | ||
115 | /* We don't support slb for radix */ | |
116 | mmu_slb_size = 0; | |
117 | /* | |
118 | * Create the linear mapping, using standard page size for now | |
119 | */ | |
120 | loop_count = 0; | |
121 | for_each_memblock(memory, reg) { | |
122 | ||
123 | start_addr = reg->base; | |
124 | ||
125 | redo: | |
126 | if (loop_count < 1 && mmu_psize_defs[MMU_PAGE_1G].shift) | |
127 | linear_page_size = PUD_SIZE; | |
128 | else if (loop_count < 2 && mmu_psize_defs[MMU_PAGE_2M].shift) | |
129 | linear_page_size = PMD_SIZE; | |
130 | else | |
131 | linear_page_size = PAGE_SIZE; | |
132 | ||
133 | base = _ALIGN_UP(start_addr, linear_page_size); | |
134 | end = _ALIGN_DOWN(reg->base + reg->size, linear_page_size); | |
135 | ||
136 | pr_info("Mapping range 0x%lx - 0x%lx with 0x%lx\n", | |
137 | (unsigned long)base, (unsigned long)end, | |
138 | linear_page_size); | |
139 | ||
140 | while (base < end) { | |
141 | radix__map_kernel_page((unsigned long)__va(base), | |
142 | base, PAGE_KERNEL_X, | |
143 | linear_page_size); | |
144 | base += linear_page_size; | |
145 | } | |
146 | /* | |
147 | * map the rest using lower page size | |
148 | */ | |
149 | if (end < reg->base + reg->size) { | |
150 | start_addr = end; | |
151 | loop_count++; | |
152 | goto redo; | |
153 | } | |
154 | } | |
155 | /* | |
156 | * Allocate Partition table and process table for the | |
157 | * host. | |
158 | */ | |
159 | BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 23), "Process table size too large."); | |
160 | process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); | |
161 | /* | |
162 | * Fill in the process table. | |
163 | * we support 52 bits, hence 52-28 = 24, 11000 | |
164 | */ | |
165 | rts_field = 3ull << PPC_BITLSHIFT(2); | |
166 | process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); | |
167 | /* | |
168 | * Fill in the partition table. We are suppose to use effective address | |
169 | * of process table here. But our linear mapping also enable us to use | |
170 | * physical address here. | |
171 | */ | |
172 | ppc_md.update_partition_table(__pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR); | |
173 | pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); | |
174 | } | |
175 | ||
176 | static void __init radix_init_partition_table(void) | |
177 | { | |
178 | unsigned long rts_field; | |
179 | /* | |
180 | * we support 52 bits, hence 52-28 = 24, 11000 | |
181 | */ | |
182 | rts_field = 3ull << PPC_BITLSHIFT(2); | |
183 | ||
184 | BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large."); | |
185 | partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT); | |
186 | partition_tb->patb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | | |
187 | RADIX_PGD_INDEX_SIZE | PATB_HR); | |
188 | printk("Partition table %p\n", partition_tb); | |
189 | ||
190 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); | |
191 | /* | |
192 | * update partition table control register, | |
193 | * 64 K size. | |
194 | */ | |
195 | mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); | |
196 | } | |
197 | ||
198 | void __init radix_init_native(void) | |
199 | { | |
200 | ppc_md.update_partition_table = native_update_partition_table; | |
201 | } | |
202 | ||
203 | static int __init get_idx_from_shift(unsigned int shift) | |
204 | { | |
205 | int idx = -1; | |
206 | ||
207 | switch (shift) { | |
208 | case 0xc: | |
209 | idx = MMU_PAGE_4K; | |
210 | break; | |
211 | case 0x10: | |
212 | idx = MMU_PAGE_64K; | |
213 | break; | |
214 | case 0x15: | |
215 | idx = MMU_PAGE_2M; | |
216 | break; | |
217 | case 0x1e: | |
218 | idx = MMU_PAGE_1G; | |
219 | break; | |
220 | } | |
221 | return idx; | |
222 | } | |
223 | ||
224 | static int __init radix_dt_scan_page_sizes(unsigned long node, | |
225 | const char *uname, int depth, | |
226 | void *data) | |
227 | { | |
228 | int size = 0; | |
229 | int shift, idx; | |
230 | unsigned int ap; | |
231 | const __be32 *prop; | |
232 | const char *type = of_get_flat_dt_prop(node, "device_type", NULL); | |
233 | ||
234 | /* We are scanning "cpu" nodes only */ | |
235 | if (type == NULL || strcmp(type, "cpu") != 0) | |
236 | return 0; | |
237 | ||
238 | prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size); | |
239 | if (!prop) | |
240 | return 0; | |
241 | ||
242 | pr_info("Page sizes from device-tree:\n"); | |
243 | for (; size >= 4; size -= 4, ++prop) { | |
244 | ||
245 | struct mmu_psize_def *def; | |
246 | ||
247 | /* top 3 bit is AP encoding */ | |
248 | shift = be32_to_cpu(prop[0]) & ~(0xe << 28); | |
249 | ap = be32_to_cpu(prop[0]) >> 29; | |
250 | pr_info("Page size sift = %d AP=0x%x\n", shift, ap); | |
251 | ||
252 | idx = get_idx_from_shift(shift); | |
253 | if (idx < 0) | |
254 | continue; | |
255 | ||
256 | def = &mmu_psize_defs[idx]; | |
257 | def->shift = shift; | |
258 | def->ap = ap; | |
259 | } | |
260 | ||
261 | /* needed ? */ | |
262 | cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; | |
263 | return 1; | |
264 | } | |
265 | ||
266 | static void __init radix_init_page_sizes(void) | |
267 | { | |
268 | int rc; | |
269 | ||
270 | /* | |
271 | * Try to find the available page sizes in the device-tree | |
272 | */ | |
273 | rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL); | |
274 | if (rc != 0) /* Found */ | |
275 | goto found; | |
276 | /* | |
277 | * let's assume we have page 4k and 64k support | |
278 | */ | |
279 | mmu_psize_defs[MMU_PAGE_4K].shift = 12; | |
280 | mmu_psize_defs[MMU_PAGE_4K].ap = 0x0; | |
281 | ||
282 | mmu_psize_defs[MMU_PAGE_64K].shift = 16; | |
283 | mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; | |
284 | found: | |
285 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
286 | if (mmu_psize_defs[MMU_PAGE_2M].shift) { | |
287 | /* | |
288 | * map vmemmap using 2M if available | |
289 | */ | |
290 | mmu_vmemmap_psize = MMU_PAGE_2M; | |
291 | } | |
292 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | |
293 | return; | |
294 | } | |
295 | ||
296 | void __init radix__early_init_mmu(void) | |
297 | { | |
298 | unsigned long lpcr; | |
2bfd65e4 AK |
299 | |
300 | #ifdef CONFIG_PPC_64K_PAGES | |
301 | /* PAGE_SIZE mappings */ | |
302 | mmu_virtual_psize = MMU_PAGE_64K; | |
303 | #else | |
304 | mmu_virtual_psize = MMU_PAGE_4K; | |
305 | #endif | |
306 | ||
307 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
308 | /* vmemmap mapping */ | |
309 | mmu_vmemmap_psize = mmu_virtual_psize; | |
310 | #endif | |
311 | /* | |
312 | * initialize page table size | |
313 | */ | |
314 | __pte_index_size = RADIX_PTE_INDEX_SIZE; | |
315 | __pmd_index_size = RADIX_PMD_INDEX_SIZE; | |
316 | __pud_index_size = RADIX_PUD_INDEX_SIZE; | |
317 | __pgd_index_size = RADIX_PGD_INDEX_SIZE; | |
318 | __pmd_cache_index = RADIX_PMD_INDEX_SIZE; | |
319 | __pte_table_size = RADIX_PTE_TABLE_SIZE; | |
320 | __pmd_table_size = RADIX_PMD_TABLE_SIZE; | |
321 | __pud_table_size = RADIX_PUD_TABLE_SIZE; | |
322 | __pgd_table_size = RADIX_PGD_TABLE_SIZE; | |
323 | ||
a2f41eb9 AK |
324 | __pmd_val_bits = RADIX_PMD_VAL_BITS; |
325 | __pud_val_bits = RADIX_PUD_VAL_BITS; | |
326 | __pgd_val_bits = RADIX_PGD_VAL_BITS; | |
2bfd65e4 | 327 | |
d6a9996e AK |
328 | __kernel_virt_start = RADIX_KERN_VIRT_START; |
329 | __kernel_virt_size = RADIX_KERN_VIRT_SIZE; | |
330 | __vmalloc_start = RADIX_VMALLOC_START; | |
331 | __vmalloc_end = RADIX_VMALLOC_END; | |
332 | vmemmap = (struct page *)RADIX_VMEMMAP_BASE; | |
333 | ioremap_bot = IOREMAP_BASE; | |
5ed7ecd0 AK |
334 | /* |
335 | * For now radix also use the same frag size | |
336 | */ | |
337 | __pte_frag_nr = H_PTE_FRAG_NR; | |
338 | __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; | |
d6a9996e | 339 | |
a2f41eb9 | 340 | radix_init_page_sizes(); |
d6c88600 AK |
341 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
342 | lpcr = mfspr(SPRN_LPCR); | |
343 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); | |
2bfd65e4 | 344 | radix_init_partition_table(); |
d6c88600 | 345 | } |
2bfd65e4 AK |
346 | |
347 | radix_init_pgtable(); | |
348 | } | |
349 | ||
350 | void radix__early_init_mmu_secondary(void) | |
351 | { | |
352 | unsigned long lpcr; | |
353 | /* | |
d6c88600 | 354 | * update partition table control register and UPRT |
2bfd65e4 | 355 | */ |
d6c88600 AK |
356 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
357 | lpcr = mfspr(SPRN_LPCR); | |
358 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); | |
359 | ||
2bfd65e4 AK |
360 | mtspr(SPRN_PTCR, |
361 | __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); | |
d6c88600 | 362 | } |
2bfd65e4 AK |
363 | } |
364 | ||
365 | void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, | |
366 | phys_addr_t first_memblock_size) | |
367 | { | |
177ba7c6 AK |
368 | /* We don't currently support the first MEMBLOCK not mapping 0 |
369 | * physical on those processors | |
370 | */ | |
371 | BUG_ON(first_memblock_base != 0); | |
372 | /* | |
373 | * We limit the allocation that depend on ppc64_rma_size | |
374 | * to first_memblock_size. We also clamp it to 1GB to | |
375 | * avoid some funky things such as RTAS bugs. | |
376 | * | |
377 | * On radix config we really don't have a limitation | |
378 | * on real mode access. But keeping it as above works | |
379 | * well enough. | |
380 | */ | |
381 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | |
382 | /* | |
383 | * Finally limit subsequent allocations. We really don't want | |
384 | * to limit the memblock allocations to rma_size. FIXME!! should | |
385 | * we even limit at all ? | |
386 | */ | |
2bfd65e4 AK |
387 | memblock_set_current_limit(first_memblock_base + first_memblock_size); |
388 | } | |
d9225ad9 AK |
389 | |
390 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
391 | int __meminit radix__vmemmap_create_mapping(unsigned long start, | |
392 | unsigned long page_size, | |
393 | unsigned long phys) | |
394 | { | |
395 | /* Create a PTE encoding */ | |
396 | unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW; | |
397 | ||
398 | BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size)); | |
399 | return 0; | |
400 | } | |
401 | ||
402 | #ifdef CONFIG_MEMORY_HOTPLUG | |
403 | void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) | |
404 | { | |
405 | /* FIXME!! intel does more. We should free page tables mapping vmemmap ? */ | |
406 | } | |
407 | #endif | |
408 | #endif | |
bde3eb62 AK |
409 | |
410 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
411 | ||
412 | unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, | |
413 | pmd_t *pmdp, unsigned long clr, | |
414 | unsigned long set) | |
415 | { | |
416 | unsigned long old; | |
417 | ||
418 | #ifdef CONFIG_DEBUG_VM | |
419 | WARN_ON(!radix__pmd_trans_huge(*pmdp)); | |
420 | assert_spin_locked(&mm->page_table_lock); | |
421 | #endif | |
422 | ||
423 | old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1); | |
424 | trace_hugepage_update(addr, old, clr, set); | |
425 | ||
426 | return old; | |
427 | } | |
428 | ||
429 | pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, | |
430 | pmd_t *pmdp) | |
431 | ||
432 | { | |
433 | pmd_t pmd; | |
434 | ||
435 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
436 | VM_BUG_ON(radix__pmd_trans_huge(*pmdp)); | |
437 | /* | |
438 | * khugepaged calls this for normal pmd | |
439 | */ | |
440 | pmd = *pmdp; | |
441 | pmd_clear(pmdp); | |
442 | /*FIXME!! Verify whether we need this kick below */ | |
443 | kick_all_cpus_sync(); | |
444 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | |
445 | return pmd; | |
446 | } | |
447 | ||
448 | /* | |
449 | * For us pgtable_t is pte_t *. Inorder to save the deposisted | |
450 | * page table, we consider the allocated page table as a list | |
451 | * head. On withdraw we need to make sure we zero out the used | |
452 | * list_head memory area. | |
453 | */ | |
454 | void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | |
455 | pgtable_t pgtable) | |
456 | { | |
457 | struct list_head *lh = (struct list_head *) pgtable; | |
458 | ||
459 | assert_spin_locked(pmd_lockptr(mm, pmdp)); | |
460 | ||
461 | /* FIFO */ | |
462 | if (!pmd_huge_pte(mm, pmdp)) | |
463 | INIT_LIST_HEAD(lh); | |
464 | else | |
465 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); | |
466 | pmd_huge_pte(mm, pmdp) = pgtable; | |
467 | } | |
468 | ||
469 | pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) | |
470 | { | |
471 | pte_t *ptep; | |
472 | pgtable_t pgtable; | |
473 | struct list_head *lh; | |
474 | ||
475 | assert_spin_locked(pmd_lockptr(mm, pmdp)); | |
476 | ||
477 | /* FIFO */ | |
478 | pgtable = pmd_huge_pte(mm, pmdp); | |
479 | lh = (struct list_head *) pgtable; | |
480 | if (list_empty(lh)) | |
481 | pmd_huge_pte(mm, pmdp) = NULL; | |
482 | else { | |
483 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; | |
484 | list_del(lh); | |
485 | } | |
486 | ptep = (pte_t *) pgtable; | |
487 | *ptep = __pte(0); | |
488 | ptep++; | |
489 | *ptep = __pte(0); | |
490 | return pgtable; | |
491 | } | |
492 | ||
493 | ||
494 | pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, | |
495 | unsigned long addr, pmd_t *pmdp) | |
496 | { | |
497 | pmd_t old_pmd; | |
498 | unsigned long old; | |
499 | ||
500 | old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); | |
501 | old_pmd = __pmd(old); | |
502 | /* | |
503 | * Serialize against find_linux_pte_or_hugepte which does lock-less | |
504 | * lookup in page tables with local interrupts disabled. For huge pages | |
505 | * it casts pmd_t to pte_t. Since format of pte_t is different from | |
506 | * pmd_t we want to prevent transit from pmd pointing to page table | |
507 | * to pmd pointing to huge page (and back) while interrupts are disabled. | |
508 | * We clear pmd to possibly replace it with page table pointer in | |
509 | * different code paths. So make sure we wait for the parallel | |
510 | * find_linux_pte_or_hugepage to finish. | |
511 | */ | |
512 | kick_all_cpus_sync(); | |
513 | return old_pmd; | |
514 | } | |
515 | ||
516 | int radix__has_transparent_hugepage(void) | |
517 | { | |
518 | /* For radix 2M at PMD level means thp */ | |
519 | if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT) | |
520 | return 1; | |
521 | return 0; | |
522 | } | |
523 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |