]>
Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * PowerPC version | |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
6 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
7 | * Copyright (C) 1996 Paul Mackerras | |
14cf11af PM |
8 | * |
9 | * Derived from "arch/i386/mm/init.c" | |
10 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
11 | * | |
12 | * Dave Engebretsen <engebret@us.ibm.com> | |
13 | * Rework for PPC64 port. | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
19 | * | |
20 | */ | |
21 | ||
cec08e7a BH |
22 | #undef DEBUG |
23 | ||
14cf11af PM |
24 | #include <linux/signal.h> |
25 | #include <linux/sched.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/errno.h> | |
28 | #include <linux/string.h> | |
29 | #include <linux/types.h> | |
30 | #include <linux/mman.h> | |
31 | #include <linux/mm.h> | |
32 | #include <linux/swap.h> | |
33 | #include <linux/stddef.h> | |
34 | #include <linux/vmalloc.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/delay.h> | |
14cf11af PM |
37 | #include <linux/highmem.h> |
38 | #include <linux/idr.h> | |
39 | #include <linux/nodemask.h> | |
40 | #include <linux/module.h> | |
c9cf5528 | 41 | #include <linux/poison.h> |
95f72d1e | 42 | #include <linux/memblock.h> |
a4fe3ce7 | 43 | #include <linux/hugetlb.h> |
5a0e3ad6 | 44 | #include <linux/slab.h> |
14cf11af PM |
45 | |
46 | #include <asm/pgalloc.h> | |
47 | #include <asm/page.h> | |
48 | #include <asm/prom.h> | |
14cf11af PM |
49 | #include <asm/rtas.h> |
50 | #include <asm/io.h> | |
51 | #include <asm/mmu_context.h> | |
52 | #include <asm/pgtable.h> | |
53 | #include <asm/mmu.h> | |
54 | #include <asm/uaccess.h> | |
55 | #include <asm/smp.h> | |
56 | #include <asm/machdep.h> | |
57 | #include <asm/tlb.h> | |
58 | #include <asm/eeh.h> | |
59 | #include <asm/processor.h> | |
60 | #include <asm/mmzone.h> | |
61 | #include <asm/cputable.h> | |
14cf11af | 62 | #include <asm/sections.h> |
14cf11af | 63 | #include <asm/iommu.h> |
14cf11af | 64 | #include <asm/vdso.h> |
800fc3ee DG |
65 | |
66 | #include "mmu_decl.h" | |
14cf11af | 67 | |
94491685 | 68 | #ifdef CONFIG_PPC_STD_MMU_64 |
dd1842a2 | 69 | #if H_PGTABLE_RANGE > USER_VSID_RANGE |
14cf11af PM |
70 | #warning Limited user VSID range means pagetable space is wasted |
71 | #endif | |
72 | ||
dd1842a2 | 73 | #if (TASK_SIZE_USER64 < H_PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) |
14cf11af PM |
74 | #warning TASK_SIZE is smaller than it needs to be. |
75 | #endif | |
94491685 | 76 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
14cf11af | 77 | |
37dd2bad | 78 | phys_addr_t memstart_addr = ~0; |
79c3095f | 79 | EXPORT_SYMBOL_GPL(memstart_addr); |
37dd2bad | 80 | phys_addr_t kernstart_addr; |
79c3095f | 81 | EXPORT_SYMBOL_GPL(kernstart_addr); |
d7917ba7 | 82 | |
51cc5068 | 83 | static void pgd_ctor(void *addr) |
14cf11af | 84 | { |
51cc5068 AD |
85 | memset(addr, 0, PGD_TABLE_SIZE); |
86 | } | |
87 | ||
368ced78 AK |
88 | static void pud_ctor(void *addr) |
89 | { | |
90 | memset(addr, 0, PUD_TABLE_SIZE); | |
91 | } | |
92 | ||
51cc5068 AD |
93 | static void pmd_ctor(void *addr) |
94 | { | |
95 | memset(addr, 0, PMD_TABLE_SIZE); | |
14cf11af PM |
96 | } |
97 | ||
a0668cdc DG |
98 | struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; |
99 | ||
100 | /* | |
101 | * Create a kmem_cache() for pagetables. This is not used for PTE | |
102 | * pages - they're linked to struct page, come from the normal free | |
103 | * pages pool and have a different entry size (see real_pte_t) to | |
104 | * everything else. Caches created by this function are used for all | |
105 | * the higher level pagetables, and for hugepage pagetables. | |
106 | */ | |
107 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) | |
108 | { | |
109 | char *name; | |
110 | unsigned long table_size = sizeof(void *) << shift; | |
111 | unsigned long align = table_size; | |
112 | ||
113 | /* When batching pgtable pointers for RCU freeing, we store | |
114 | * the index size in the low bits. Table alignment must be | |
a4fe3ce7 DG |
115 | * big enough to fit it. |
116 | * | |
117 | * Likewise, hugeapge pagetable pointers contain a (different) | |
118 | * shift value in the low bits. All tables must be aligned so | |
119 | * as to leave enough 0 bits in the address to contain it. */ | |
120 | unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, | |
121 | HUGEPD_SHIFT_MASK + 1); | |
a0668cdc DG |
122 | struct kmem_cache *new; |
123 | ||
124 | /* It would be nice if this was a BUILD_BUG_ON(), but at the | |
125 | * moment, gcc doesn't seem to recognize is_power_of_2 as a | |
126 | * constant expression, so so much for that. */ | |
127 | BUG_ON(!is_power_of_2(minalign)); | |
128 | BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); | |
129 | ||
130 | if (PGT_CACHE(shift)) | |
131 | return; /* Already have a cache of this size */ | |
132 | ||
133 | align = max_t(unsigned long, align, minalign); | |
134 | name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); | |
135 | new = kmem_cache_create(name, table_size, align, 0, ctor); | |
e77553cb | 136 | kfree(name); |
cf9427b8 | 137 | pgtable_cache[shift - 1] = new; |
a0668cdc DG |
138 | pr_debug("Allocated pgtable cache for order %d\n", shift); |
139 | } | |
140 | ||
14cf11af PM |
141 | |
142 | void pgtable_cache_init(void) | |
143 | { | |
a0668cdc | 144 | pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); |
f940f528 | 145 | pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); |
368ced78 AK |
146 | /* |
147 | * In all current configs, when the PUD index exists it's the | |
148 | * same size as either the pgd or pmd index except with THP enabled | |
149 | * on book3s 64 | |
150 | */ | |
151 | if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) | |
152 | pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); | |
153 | ||
f940f528 | 154 | if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX)) |
a0668cdc | 155 | panic("Couldn't allocate pgtable caches"); |
368ced78 AK |
156 | if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) |
157 | panic("Couldn't allocate pud pgtable caches"); | |
14cf11af | 158 | } |
d29eff7b AW |
159 | |
160 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
161 | /* | |
162 | * Given an address within the vmemmap, determine the pfn of the page that | |
163 | * represents the start of the section it is within. Note that we have to | |
164 | * do this by hand as the proffered address may not be correctly aligned. | |
165 | * Subtraction of non-aligned pointers produces undefined results. | |
166 | */ | |
09de9ff8 | 167 | static unsigned long __meminit vmemmap_section_start(unsigned long page) |
d29eff7b AW |
168 | { |
169 | unsigned long offset = page - ((unsigned long)(vmemmap)); | |
170 | ||
171 | /* Return the pfn of the start of the section. */ | |
172 | return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; | |
173 | } | |
174 | ||
175 | /* | |
176 | * Check if this vmemmap page is already initialised. If any section | |
177 | * which overlaps this vmemmap page is initialised then this page is | |
178 | * initialised already. | |
179 | */ | |
09de9ff8 | 180 | static int __meminit vmemmap_populated(unsigned long start, int page_size) |
d29eff7b AW |
181 | { |
182 | unsigned long end = start + page_size; | |
16a05bff | 183 | start = (unsigned long)(pfn_to_page(vmemmap_section_start(start))); |
d29eff7b AW |
184 | |
185 | for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) | |
16a05bff | 186 | if (pfn_valid(page_to_pfn((struct page *)start))) |
d29eff7b AW |
187 | return 1; |
188 | ||
189 | return 0; | |
190 | } | |
191 | ||
91eea67c | 192 | struct vmemmap_backing *vmemmap_list; |
bd8cb03d LZ |
193 | static struct vmemmap_backing *next; |
194 | static int num_left; | |
195 | static int num_freed; | |
91eea67c MN |
196 | |
197 | static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) | |
198 | { | |
bd8cb03d LZ |
199 | struct vmemmap_backing *vmem_back; |
200 | /* get from freed entries first */ | |
201 | if (num_freed) { | |
202 | num_freed--; | |
203 | vmem_back = next; | |
204 | next = next->list; | |
205 | ||
206 | return vmem_back; | |
207 | } | |
91eea67c MN |
208 | |
209 | /* allocate a page when required and hand out chunks */ | |
bd8cb03d | 210 | if (!num_left) { |
91eea67c MN |
211 | next = vmemmap_alloc_block(PAGE_SIZE, node); |
212 | if (unlikely(!next)) { | |
213 | WARN_ON(1); | |
214 | return NULL; | |
215 | } | |
216 | num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); | |
217 | } | |
218 | ||
219 | num_left--; | |
220 | ||
221 | return next++; | |
222 | } | |
223 | ||
224 | static __meminit void vmemmap_list_populate(unsigned long phys, | |
225 | unsigned long start, | |
226 | int node) | |
227 | { | |
228 | struct vmemmap_backing *vmem_back; | |
229 | ||
230 | vmem_back = vmemmap_list_alloc(node); | |
231 | if (unlikely(!vmem_back)) { | |
232 | WARN_ON(1); | |
233 | return; | |
234 | } | |
235 | ||
236 | vmem_back->phys = phys; | |
237 | vmem_back->virt_addr = start; | |
238 | vmem_back->list = vmemmap_list; | |
239 | ||
240 | vmemmap_list = vmem_back; | |
241 | } | |
242 | ||
71b0bfe4 LZ |
243 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
244 | { | |
245 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; | |
246 | ||
247 | /* Align to the page size of the linear mapping. */ | |
248 | start = _ALIGN_DOWN(start, page_size); | |
249 | ||
250 | pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); | |
251 | ||
252 | for (; start < end; start += page_size) { | |
253 | void *p; | |
1dace6c6 | 254 | int rc; |
71b0bfe4 LZ |
255 | |
256 | if (vmemmap_populated(start, page_size)) | |
257 | continue; | |
258 | ||
259 | p = vmemmap_alloc_block(page_size, node); | |
260 | if (!p) | |
261 | return -ENOMEM; | |
262 | ||
263 | vmemmap_list_populate(__pa(p), start, node); | |
264 | ||
265 | pr_debug(" * %016lx..%016lx allocated at %p\n", | |
266 | start, start + page_size, p); | |
267 | ||
1dace6c6 DG |
268 | rc = vmemmap_create_mapping(start, page_size, __pa(p)); |
269 | if (rc < 0) { | |
270 | pr_warning( | |
271 | "vmemmap_populate: Unable to create vmemmap mapping: %d\n", | |
272 | rc); | |
273 | return -EFAULT; | |
274 | } | |
71b0bfe4 LZ |
275 | } |
276 | ||
277 | return 0; | |
278 | } | |
279 | ||
280 | #ifdef CONFIG_MEMORY_HOTPLUG | |
bd8cb03d LZ |
281 | static unsigned long vmemmap_list_free(unsigned long start) |
282 | { | |
283 | struct vmemmap_backing *vmem_back, *vmem_back_prev; | |
284 | ||
285 | vmem_back_prev = vmem_back = vmemmap_list; | |
286 | ||
287 | /* look for it with prev pointer recorded */ | |
288 | for (; vmem_back; vmem_back = vmem_back->list) { | |
289 | if (vmem_back->virt_addr == start) | |
290 | break; | |
291 | vmem_back_prev = vmem_back; | |
292 | } | |
293 | ||
294 | if (unlikely(!vmem_back)) { | |
295 | WARN_ON(1); | |
296 | return 0; | |
297 | } | |
298 | ||
299 | /* remove it from vmemmap_list */ | |
300 | if (vmem_back == vmemmap_list) /* remove head */ | |
301 | vmemmap_list = vmem_back->list; | |
302 | else | |
303 | vmem_back_prev->list = vmem_back->list; | |
304 | ||
305 | /* next point to this freed entry */ | |
306 | vmem_back->list = next; | |
307 | next = vmem_back; | |
308 | num_freed++; | |
309 | ||
310 | return vmem_back->phys; | |
311 | } | |
312 | ||
71b0bfe4 | 313 | void __ref vmemmap_free(unsigned long start, unsigned long end) |
d29eff7b | 314 | { |
cec08e7a | 315 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
d29eff7b | 316 | |
d29eff7b AW |
317 | start = _ALIGN_DOWN(start, page_size); |
318 | ||
71b0bfe4 | 319 | pr_debug("vmemmap_free %lx...%lx\n", start, end); |
32a74949 | 320 | |
d29eff7b | 321 | for (; start < end; start += page_size) { |
71b0bfe4 | 322 | unsigned long addr; |
d29eff7b | 323 | |
71b0bfe4 LZ |
324 | /* |
325 | * the section has already be marked as invalid, so | |
326 | * vmemmap_populated() true means some other sections still | |
327 | * in this page, so skip it. | |
328 | */ | |
d29eff7b AW |
329 | if (vmemmap_populated(start, page_size)) |
330 | continue; | |
331 | ||
71b0bfe4 LZ |
332 | addr = vmemmap_list_free(start); |
333 | if (addr) { | |
334 | struct page *page = pfn_to_page(addr >> PAGE_SHIFT); | |
335 | ||
336 | if (PageReserved(page)) { | |
337 | /* allocated from bootmem */ | |
338 | if (page_size < PAGE_SIZE) { | |
339 | /* | |
340 | * this shouldn't happen, but if it is | |
341 | * the case, leave the memory there | |
342 | */ | |
343 | WARN_ON_ONCE(1); | |
344 | } else { | |
345 | unsigned int nr_pages = | |
346 | 1 << get_order(page_size); | |
347 | while (nr_pages--) | |
348 | free_reserved_page(page++); | |
349 | } | |
350 | } else | |
351 | free_pages((unsigned long)(__va(addr)), | |
352 | get_order(page_size)); | |
353 | ||
354 | vmemmap_remove_mapping(start, page_size); | |
355 | } | |
d29eff7b | 356 | } |
0197518c | 357 | } |
71b0bfe4 | 358 | #endif |
f7e3334a NF |
359 | void register_page_bootmem_memmap(unsigned long section_nr, |
360 | struct page *start_page, unsigned long size) | |
361 | { | |
362 | } | |
cd3db0c4 | 363 | |
8e0861fa AK |
364 | /* |
365 | * We do not have access to the sparsemem vmemmap, so we fallback to | |
366 | * walking the list of sparsemem blocks which we already maintain for | |
367 | * the sake of crashdump. In the long run, we might want to maintain | |
368 | * a tree if performance of that linear walk becomes a problem. | |
369 | * | |
370 | * realmode_pfn_to_page functions can fail due to: | |
371 | * 1) As real sparsemem blocks do not lay in RAM continously (they | |
372 | * are in virtual address space which is not available in the real mode), | |
373 | * the requested page struct can be split between blocks so get_page/put_page | |
374 | * may fail. | |
375 | * 2) When huge pages are used, the get_page/put_page API will fail | |
376 | * in real mode as the linked addresses in the page struct are virtual | |
377 | * too. | |
378 | */ | |
379 | struct page *realmode_pfn_to_page(unsigned long pfn) | |
380 | { | |
381 | struct vmemmap_backing *vmem_back; | |
382 | struct page *page; | |
383 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; | |
384 | unsigned long pg_va = (unsigned long) pfn_to_page(pfn); | |
385 | ||
386 | for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) { | |
387 | if (pg_va < vmem_back->virt_addr) | |
388 | continue; | |
389 | ||
bd8cb03d LZ |
390 | /* After vmemmap_list entry free is possible, need check all */ |
391 | if ((pg_va + sizeof(struct page)) <= | |
392 | (vmem_back->virt_addr + page_size)) { | |
393 | page = (struct page *) (vmem_back->phys + pg_va - | |
8e0861fa | 394 | vmem_back->virt_addr); |
bd8cb03d LZ |
395 | return page; |
396 | } | |
8e0861fa AK |
397 | } |
398 | ||
bd8cb03d | 399 | /* Probably that page struct is split between real pages */ |
8e0861fa AK |
400 | return NULL; |
401 | } | |
402 | EXPORT_SYMBOL_GPL(realmode_pfn_to_page); | |
403 | ||
404 | #elif defined(CONFIG_FLATMEM) | |
405 | ||
406 | struct page *realmode_pfn_to_page(unsigned long pfn) | |
407 | { | |
408 | struct page *page = pfn_to_page(pfn); | |
409 | return page; | |
410 | } | |
411 | EXPORT_SYMBOL_GPL(realmode_pfn_to_page); | |
412 | ||
413 | #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ | |
1a01dc87 ME |
414 | |
415 | #ifdef CONFIG_PPC_STD_MMU_64 | |
c610ec60 ME |
416 | static bool disable_radix; |
417 | static int __init parse_disable_radix(char *p) | |
418 | { | |
419 | disable_radix = true; | |
420 | return 0; | |
421 | } | |
422 | early_param("disable_radix", parse_disable_radix); | |
423 | ||
1a01dc87 ME |
424 | void __init mmu_early_init_devtree(void) |
425 | { | |
c610ec60 ME |
426 | /* Disable radix mode based on kernel command line. */ |
427 | if (disable_radix) | |
428 | cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX; | |
1a01dc87 ME |
429 | } |
430 | #endif /* CONFIG_PPC_STD_MMU_64 */ |