]>
Commit | Line | Data |
---|---|---|
2bfd65e4 AK |
1 | /* |
2 | * Page table handling routines for radix page table. | |
3 | * | |
4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | #include <linux/sched.h> | |
12 | #include <linux/memblock.h> | |
13 | #include <linux/of_fdt.h> | |
14 | ||
15 | #include <asm/pgtable.h> | |
16 | #include <asm/pgalloc.h> | |
17 | #include <asm/dma.h> | |
18 | #include <asm/machdep.h> | |
19 | #include <asm/mmu.h> | |
20 | #include <asm/firmware.h> | |
21 | ||
22 | static int native_update_partition_table(u64 patb1) | |
23 | { | |
24 | partition_tb->patb1 = cpu_to_be64(patb1); | |
25 | return 0; | |
26 | } | |
27 | ||
28 | static __ref void *early_alloc_pgtable(unsigned long size) | |
29 | { | |
30 | void *pt; | |
31 | ||
32 | pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE)); | |
33 | memset(pt, 0, size); | |
34 | ||
35 | return pt; | |
36 | } | |
37 | ||
38 | int radix__map_kernel_page(unsigned long ea, unsigned long pa, | |
39 | pgprot_t flags, | |
40 | unsigned int map_page_size) | |
41 | { | |
42 | pgd_t *pgdp; | |
43 | pud_t *pudp; | |
44 | pmd_t *pmdp; | |
45 | pte_t *ptep; | |
46 | /* | |
47 | * Make sure task size is correct as per the max adddr | |
48 | */ | |
49 | BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE); | |
50 | if (slab_is_available()) { | |
51 | pgdp = pgd_offset_k(ea); | |
52 | pudp = pud_alloc(&init_mm, pgdp, ea); | |
53 | if (!pudp) | |
54 | return -ENOMEM; | |
55 | if (map_page_size == PUD_SIZE) { | |
56 | ptep = (pte_t *)pudp; | |
57 | goto set_the_pte; | |
58 | } | |
59 | pmdp = pmd_alloc(&init_mm, pudp, ea); | |
60 | if (!pmdp) | |
61 | return -ENOMEM; | |
62 | if (map_page_size == PMD_SIZE) { | |
63 | ptep = (pte_t *)pudp; | |
64 | goto set_the_pte; | |
65 | } | |
66 | ptep = pte_alloc_kernel(pmdp, ea); | |
67 | if (!ptep) | |
68 | return -ENOMEM; | |
69 | } else { | |
70 | pgdp = pgd_offset_k(ea); | |
71 | if (pgd_none(*pgdp)) { | |
72 | pudp = early_alloc_pgtable(PUD_TABLE_SIZE); | |
73 | BUG_ON(pudp == NULL); | |
74 | pgd_populate(&init_mm, pgdp, pudp); | |
75 | } | |
76 | pudp = pud_offset(pgdp, ea); | |
77 | if (map_page_size == PUD_SIZE) { | |
78 | ptep = (pte_t *)pudp; | |
79 | goto set_the_pte; | |
80 | } | |
81 | if (pud_none(*pudp)) { | |
82 | pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); | |
83 | BUG_ON(pmdp == NULL); | |
84 | pud_populate(&init_mm, pudp, pmdp); | |
85 | } | |
86 | pmdp = pmd_offset(pudp, ea); | |
87 | if (map_page_size == PMD_SIZE) { | |
88 | ptep = (pte_t *)pudp; | |
89 | goto set_the_pte; | |
90 | } | |
91 | if (!pmd_present(*pmdp)) { | |
92 | ptep = early_alloc_pgtable(PAGE_SIZE); | |
93 | BUG_ON(ptep == NULL); | |
94 | pmd_populate_kernel(&init_mm, pmdp, ptep); | |
95 | } | |
96 | ptep = pte_offset_kernel(pmdp, ea); | |
97 | } | |
98 | ||
99 | set_the_pte: | |
100 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags)); | |
101 | smp_wmb(); | |
102 | return 0; | |
103 | } | |
104 | ||
105 | static void __init radix_init_pgtable(void) | |
106 | { | |
107 | int loop_count; | |
108 | u64 base, end, start_addr; | |
109 | unsigned long rts_field; | |
110 | struct memblock_region *reg; | |
111 | unsigned long linear_page_size; | |
112 | ||
113 | /* We don't support slb for radix */ | |
114 | mmu_slb_size = 0; | |
115 | /* | |
116 | * Create the linear mapping, using standard page size for now | |
117 | */ | |
118 | loop_count = 0; | |
119 | for_each_memblock(memory, reg) { | |
120 | ||
121 | start_addr = reg->base; | |
122 | ||
123 | redo: | |
124 | if (loop_count < 1 && mmu_psize_defs[MMU_PAGE_1G].shift) | |
125 | linear_page_size = PUD_SIZE; | |
126 | else if (loop_count < 2 && mmu_psize_defs[MMU_PAGE_2M].shift) | |
127 | linear_page_size = PMD_SIZE; | |
128 | else | |
129 | linear_page_size = PAGE_SIZE; | |
130 | ||
131 | base = _ALIGN_UP(start_addr, linear_page_size); | |
132 | end = _ALIGN_DOWN(reg->base + reg->size, linear_page_size); | |
133 | ||
134 | pr_info("Mapping range 0x%lx - 0x%lx with 0x%lx\n", | |
135 | (unsigned long)base, (unsigned long)end, | |
136 | linear_page_size); | |
137 | ||
138 | while (base < end) { | |
139 | radix__map_kernel_page((unsigned long)__va(base), | |
140 | base, PAGE_KERNEL_X, | |
141 | linear_page_size); | |
142 | base += linear_page_size; | |
143 | } | |
144 | /* | |
145 | * map the rest using lower page size | |
146 | */ | |
147 | if (end < reg->base + reg->size) { | |
148 | start_addr = end; | |
149 | loop_count++; | |
150 | goto redo; | |
151 | } | |
152 | } | |
153 | /* | |
154 | * Allocate Partition table and process table for the | |
155 | * host. | |
156 | */ | |
157 | BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 23), "Process table size too large."); | |
158 | process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); | |
159 | /* | |
160 | * Fill in the process table. | |
161 | * we support 52 bits, hence 52-28 = 24, 11000 | |
162 | */ | |
163 | rts_field = 3ull << PPC_BITLSHIFT(2); | |
164 | process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); | |
165 | /* | |
166 | * Fill in the partition table. We are suppose to use effective address | |
167 | * of process table here. But our linear mapping also enable us to use | |
168 | * physical address here. | |
169 | */ | |
170 | ppc_md.update_partition_table(__pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR); | |
171 | pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); | |
172 | } | |
173 | ||
174 | static void __init radix_init_partition_table(void) | |
175 | { | |
176 | unsigned long rts_field; | |
177 | /* | |
178 | * we support 52 bits, hence 52-28 = 24, 11000 | |
179 | */ | |
180 | rts_field = 3ull << PPC_BITLSHIFT(2); | |
181 | ||
182 | BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large."); | |
183 | partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT); | |
184 | partition_tb->patb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | | |
185 | RADIX_PGD_INDEX_SIZE | PATB_HR); | |
186 | printk("Partition table %p\n", partition_tb); | |
187 | ||
188 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); | |
189 | /* | |
190 | * update partition table control register, | |
191 | * 64 K size. | |
192 | */ | |
193 | mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); | |
194 | } | |
195 | ||
196 | void __init radix_init_native(void) | |
197 | { | |
198 | ppc_md.update_partition_table = native_update_partition_table; | |
199 | } | |
200 | ||
201 | static int __init get_idx_from_shift(unsigned int shift) | |
202 | { | |
203 | int idx = -1; | |
204 | ||
205 | switch (shift) { | |
206 | case 0xc: | |
207 | idx = MMU_PAGE_4K; | |
208 | break; | |
209 | case 0x10: | |
210 | idx = MMU_PAGE_64K; | |
211 | break; | |
212 | case 0x15: | |
213 | idx = MMU_PAGE_2M; | |
214 | break; | |
215 | case 0x1e: | |
216 | idx = MMU_PAGE_1G; | |
217 | break; | |
218 | } | |
219 | return idx; | |
220 | } | |
221 | ||
222 | static int __init radix_dt_scan_page_sizes(unsigned long node, | |
223 | const char *uname, int depth, | |
224 | void *data) | |
225 | { | |
226 | int size = 0; | |
227 | int shift, idx; | |
228 | unsigned int ap; | |
229 | const __be32 *prop; | |
230 | const char *type = of_get_flat_dt_prop(node, "device_type", NULL); | |
231 | ||
232 | /* We are scanning "cpu" nodes only */ | |
233 | if (type == NULL || strcmp(type, "cpu") != 0) | |
234 | return 0; | |
235 | ||
236 | prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size); | |
237 | if (!prop) | |
238 | return 0; | |
239 | ||
240 | pr_info("Page sizes from device-tree:\n"); | |
241 | for (; size >= 4; size -= 4, ++prop) { | |
242 | ||
243 | struct mmu_psize_def *def; | |
244 | ||
245 | /* top 3 bit is AP encoding */ | |
246 | shift = be32_to_cpu(prop[0]) & ~(0xe << 28); | |
247 | ap = be32_to_cpu(prop[0]) >> 29; | |
248 | pr_info("Page size sift = %d AP=0x%x\n", shift, ap); | |
249 | ||
250 | idx = get_idx_from_shift(shift); | |
251 | if (idx < 0) | |
252 | continue; | |
253 | ||
254 | def = &mmu_psize_defs[idx]; | |
255 | def->shift = shift; | |
256 | def->ap = ap; | |
257 | } | |
258 | ||
259 | /* needed ? */ | |
260 | cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; | |
261 | return 1; | |
262 | } | |
263 | ||
264 | static void __init radix_init_page_sizes(void) | |
265 | { | |
266 | int rc; | |
267 | ||
268 | /* | |
269 | * Try to find the available page sizes in the device-tree | |
270 | */ | |
271 | rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL); | |
272 | if (rc != 0) /* Found */ | |
273 | goto found; | |
274 | /* | |
275 | * let's assume we have page 4k and 64k support | |
276 | */ | |
277 | mmu_psize_defs[MMU_PAGE_4K].shift = 12; | |
278 | mmu_psize_defs[MMU_PAGE_4K].ap = 0x0; | |
279 | ||
280 | mmu_psize_defs[MMU_PAGE_64K].shift = 16; | |
281 | mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; | |
282 | found: | |
283 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
284 | if (mmu_psize_defs[MMU_PAGE_2M].shift) { | |
285 | /* | |
286 | * map vmemmap using 2M if available | |
287 | */ | |
288 | mmu_vmemmap_psize = MMU_PAGE_2M; | |
289 | } | |
290 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | |
291 | return; | |
292 | } | |
293 | ||
294 | void __init radix__early_init_mmu(void) | |
295 | { | |
296 | unsigned long lpcr; | |
297 | /* | |
298 | * setup LPCR UPRT based on mmu_features | |
299 | */ | |
300 | lpcr = mfspr(SPRN_LPCR); | |
301 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); | |
302 | ||
303 | #ifdef CONFIG_PPC_64K_PAGES | |
304 | /* PAGE_SIZE mappings */ | |
305 | mmu_virtual_psize = MMU_PAGE_64K; | |
306 | #else | |
307 | mmu_virtual_psize = MMU_PAGE_4K; | |
308 | #endif | |
309 | ||
310 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
311 | /* vmemmap mapping */ | |
312 | mmu_vmemmap_psize = mmu_virtual_psize; | |
313 | #endif | |
314 | /* | |
315 | * initialize page table size | |
316 | */ | |
317 | __pte_index_size = RADIX_PTE_INDEX_SIZE; | |
318 | __pmd_index_size = RADIX_PMD_INDEX_SIZE; | |
319 | __pud_index_size = RADIX_PUD_INDEX_SIZE; | |
320 | __pgd_index_size = RADIX_PGD_INDEX_SIZE; | |
321 | __pmd_cache_index = RADIX_PMD_INDEX_SIZE; | |
322 | __pte_table_size = RADIX_PTE_TABLE_SIZE; | |
323 | __pmd_table_size = RADIX_PMD_TABLE_SIZE; | |
324 | __pud_table_size = RADIX_PUD_TABLE_SIZE; | |
325 | __pgd_table_size = RADIX_PGD_TABLE_SIZE; | |
326 | ||
327 | radix_init_page_sizes(); | |
328 | ||
329 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | |
330 | radix_init_partition_table(); | |
331 | ||
332 | radix_init_pgtable(); | |
333 | } | |
334 | ||
335 | void radix__early_init_mmu_secondary(void) | |
336 | { | |
337 | unsigned long lpcr; | |
338 | /* | |
339 | * setup LPCR UPRT based on mmu_features | |
340 | */ | |
341 | lpcr = mfspr(SPRN_LPCR); | |
342 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); | |
343 | /* | |
344 | * update partition table control register, 64 K size. | |
345 | */ | |
346 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | |
347 | mtspr(SPRN_PTCR, | |
348 | __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); | |
349 | } | |
350 | ||
351 | void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, | |
352 | phys_addr_t first_memblock_size) | |
353 | { | |
177ba7c6 AK |
354 | /* We don't currently support the first MEMBLOCK not mapping 0 |
355 | * physical on those processors | |
356 | */ | |
357 | BUG_ON(first_memblock_base != 0); | |
358 | /* | |
359 | * We limit the allocation that depend on ppc64_rma_size | |
360 | * to first_memblock_size. We also clamp it to 1GB to | |
361 | * avoid some funky things such as RTAS bugs. | |
362 | * | |
363 | * On radix config we really don't have a limitation | |
364 | * on real mode access. But keeping it as above works | |
365 | * well enough. | |
366 | */ | |
367 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | |
368 | /* | |
369 | * Finally limit subsequent allocations. We really don't want | |
370 | * to limit the memblock allocations to rma_size. FIXME!! should | |
371 | * we even limit at all ? | |
372 | */ | |
2bfd65e4 AK |
373 | memblock_set_current_limit(first_memblock_base + first_memblock_size); |
374 | } | |
d9225ad9 AK |
375 | |
376 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
377 | int __meminit radix__vmemmap_create_mapping(unsigned long start, | |
378 | unsigned long page_size, | |
379 | unsigned long phys) | |
380 | { | |
381 | /* Create a PTE encoding */ | |
382 | unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW; | |
383 | ||
384 | BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size)); | |
385 | return 0; | |
386 | } | |
387 | ||
388 | #ifdef CONFIG_MEMORY_HOTPLUG | |
389 | void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) | |
390 | { | |
391 | /* FIXME!! intel does more. We should free page tables mapping vmemmap ? */ | |
392 | } | |
393 | #endif | |
394 | #endif |