]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/powerpc/mm/init_64.c
mm: pass the vmem_altmap to arch_add_memory and __add_pages
[mirror_ubuntu-eoan-kernel.git] / arch / powerpc / mm / init_64.c
CommitLineData
14cf11af
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
14cf11af
PM
8 *
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11 *
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
cec08e7a
BH
22#undef DEBUG
23
14cf11af
PM
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
14cf11af
PM
37#include <linux/highmem.h>
38#include <linux/idr.h>
39#include <linux/nodemask.h>
40#include <linux/module.h>
c9cf5528 41#include <linux/poison.h>
95f72d1e 42#include <linux/memblock.h>
a4fe3ce7 43#include <linux/hugetlb.h>
5a0e3ad6 44#include <linux/slab.h>
18569c1f
PM
45#include <linux/of_fdt.h>
46#include <linux/libfdt.h>
b584c254 47#include <linux/memremap.h>
14cf11af
PM
48
49#include <asm/pgalloc.h>
50#include <asm/page.h>
51#include <asm/prom.h>
14cf11af
PM
52#include <asm/rtas.h>
53#include <asm/io.h>
54#include <asm/mmu_context.h>
55#include <asm/pgtable.h>
56#include <asm/mmu.h>
7c0f6ba6 57#include <linux/uaccess.h>
14cf11af
PM
58#include <asm/smp.h>
59#include <asm/machdep.h>
60#include <asm/tlb.h>
61#include <asm/eeh.h>
62#include <asm/processor.h>
63#include <asm/mmzone.h>
64#include <asm/cputable.h>
14cf11af 65#include <asm/sections.h>
14cf11af 66#include <asm/iommu.h>
14cf11af 67#include <asm/vdso.h>
800fc3ee
DG
68
69#include "mmu_decl.h"
14cf11af 70
4e003747 71#ifdef CONFIG_PPC_BOOK3S_64
dd1842a2 72#if H_PGTABLE_RANGE > USER_VSID_RANGE
14cf11af
PM
73#warning Limited user VSID range means pagetable space is wasted
74#endif
4e003747 75#endif /* CONFIG_PPC_BOOK3S_64 */
14cf11af 76
37dd2bad 77phys_addr_t memstart_addr = ~0;
79c3095f 78EXPORT_SYMBOL_GPL(memstart_addr);
37dd2bad 79phys_addr_t kernstart_addr;
79c3095f 80EXPORT_SYMBOL_GPL(kernstart_addr);
d7917ba7 81
d29eff7b
AW
82#ifdef CONFIG_SPARSEMEM_VMEMMAP
83/*
84 * Given an address within the vmemmap, determine the pfn of the page that
85 * represents the start of the section it is within. Note that we have to
86 * do this by hand as the proffered address may not be correctly aligned.
87 * Subtraction of non-aligned pointers produces undefined results.
88 */
09de9ff8 89static unsigned long __meminit vmemmap_section_start(unsigned long page)
d29eff7b
AW
90{
91 unsigned long offset = page - ((unsigned long)(vmemmap));
92
93 /* Return the pfn of the start of the section. */
94 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
95}
96
97/*
98 * Check if this vmemmap page is already initialised. If any section
99 * which overlaps this vmemmap page is initialised then this page is
100 * initialised already.
101 */
09de9ff8 102static int __meminit vmemmap_populated(unsigned long start, int page_size)
d29eff7b
AW
103{
104 unsigned long end = start + page_size;
16a05bff 105 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
d29eff7b
AW
106
107 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
16a05bff 108 if (pfn_valid(page_to_pfn((struct page *)start)))
d29eff7b
AW
109 return 1;
110
111 return 0;
112}
113
39e46751
AK
114/*
115 * vmemmap virtual address space management does not have a traditonal page
116 * table to track which virtual struct pages are backed by physical mapping.
117 * The virtual to physical mappings are tracked in a simple linked list
118 * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
119 * all times where as the 'next' list maintains the available
120 * vmemmap_backing structures which have been deleted from the
121 * 'vmemmap_global' list during system runtime (memory hotplug remove
122 * operation). The freed 'vmemmap_backing' structures are reused later when
123 * new requests come in without allocating fresh memory. This pointer also
124 * tracks the allocated 'vmemmap_backing' structures as we allocate one
125 * full page memory at a time when we dont have any.
126 */
91eea67c 127struct vmemmap_backing *vmemmap_list;
bd8cb03d 128static struct vmemmap_backing *next;
39e46751
AK
129
130/*
131 * The same pointer 'next' tracks individual chunks inside the allocated
132 * full page during the boot time and again tracks the freeed nodes during
133 * runtime. It is racy but it does not happen as they are separated by the
134 * boot process. Will create problem if some how we have memory hotplug
135 * operation during boot !!
136 */
bd8cb03d
LZ
137static int num_left;
138static int num_freed;
91eea67c
MN
139
140static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
141{
bd8cb03d
LZ
142 struct vmemmap_backing *vmem_back;
143 /* get from freed entries first */
144 if (num_freed) {
145 num_freed--;
146 vmem_back = next;
147 next = next->list;
148
149 return vmem_back;
150 }
91eea67c
MN
151
152 /* allocate a page when required and hand out chunks */
bd8cb03d 153 if (!num_left) {
91eea67c
MN
154 next = vmemmap_alloc_block(PAGE_SIZE, node);
155 if (unlikely(!next)) {
156 WARN_ON(1);
157 return NULL;
158 }
159 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
160 }
161
162 num_left--;
163
164 return next++;
165}
166
167static __meminit void vmemmap_list_populate(unsigned long phys,
168 unsigned long start,
169 int node)
170{
171 struct vmemmap_backing *vmem_back;
172
173 vmem_back = vmemmap_list_alloc(node);
174 if (unlikely(!vmem_back)) {
175 WARN_ON(1);
176 return;
177 }
178
179 vmem_back->phys = phys;
180 vmem_back->virt_addr = start;
181 vmem_back->list = vmemmap_list;
182
183 vmemmap_list = vmem_back;
184}
185
71b0bfe4
LZ
186int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
187{
188 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
189
190 /* Align to the page size of the linear mapping. */
191 start = _ALIGN_DOWN(start, page_size);
192
193 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
194
195 for (; start < end; start += page_size) {
b584c254 196 struct vmem_altmap *altmap;
71b0bfe4 197 void *p;
1dace6c6 198 int rc;
71b0bfe4
LZ
199
200 if (vmemmap_populated(start, page_size))
201 continue;
202
b584c254
OH
203 /* altmap lookups only work at section boundaries */
204 altmap = to_vmem_altmap(SECTION_ALIGN_DOWN(start));
205
206 p = __vmemmap_alloc_block_buf(page_size, node, altmap);
71b0bfe4
LZ
207 if (!p)
208 return -ENOMEM;
209
210 vmemmap_list_populate(__pa(p), start, node);
211
212 pr_debug(" * %016lx..%016lx allocated at %p\n",
213 start, start + page_size, p);
214
1dace6c6
DG
215 rc = vmemmap_create_mapping(start, page_size, __pa(p));
216 if (rc < 0) {
217 pr_warning(
218 "vmemmap_populate: Unable to create vmemmap mapping: %d\n",
219 rc);
220 return -EFAULT;
221 }
71b0bfe4
LZ
222 }
223
224 return 0;
225}
226
227#ifdef CONFIG_MEMORY_HOTPLUG
bd8cb03d
LZ
228static unsigned long vmemmap_list_free(unsigned long start)
229{
230 struct vmemmap_backing *vmem_back, *vmem_back_prev;
231
232 vmem_back_prev = vmem_back = vmemmap_list;
233
234 /* look for it with prev pointer recorded */
235 for (; vmem_back; vmem_back = vmem_back->list) {
236 if (vmem_back->virt_addr == start)
237 break;
238 vmem_back_prev = vmem_back;
239 }
240
241 if (unlikely(!vmem_back)) {
242 WARN_ON(1);
243 return 0;
244 }
245
246 /* remove it from vmemmap_list */
247 if (vmem_back == vmemmap_list) /* remove head */
248 vmemmap_list = vmem_back->list;
249 else
250 vmem_back_prev->list = vmem_back->list;
251
252 /* next point to this freed entry */
253 vmem_back->list = next;
254 next = vmem_back;
255 num_freed++;
256
257 return vmem_back->phys;
258}
259
71b0bfe4 260void __ref vmemmap_free(unsigned long start, unsigned long end)
d29eff7b 261{
cec08e7a 262 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
d7d9b612 263 unsigned long page_order = get_order(page_size);
d29eff7b 264
d29eff7b
AW
265 start = _ALIGN_DOWN(start, page_size);
266
71b0bfe4 267 pr_debug("vmemmap_free %lx...%lx\n", start, end);
32a74949 268
d29eff7b 269 for (; start < end; start += page_size) {
d7d9b612 270 unsigned long nr_pages, addr;
b584c254
OH
271 struct vmem_altmap *altmap;
272 struct page *section_base;
d7d9b612 273 struct page *page;
d29eff7b 274
71b0bfe4
LZ
275 /*
276 * the section has already be marked as invalid, so
277 * vmemmap_populated() true means some other sections still
278 * in this page, so skip it.
279 */
d29eff7b
AW
280 if (vmemmap_populated(start, page_size))
281 continue;
282
71b0bfe4 283 addr = vmemmap_list_free(start);
d7d9b612
OH
284 if (!addr)
285 continue;
286
287 page = pfn_to_page(addr >> PAGE_SHIFT);
b584c254 288 section_base = pfn_to_page(vmemmap_section_start(start));
d7d9b612
OH
289 nr_pages = 1 << page_order;
290
b584c254
OH
291 altmap = to_vmem_altmap((unsigned long) section_base);
292 if (altmap) {
293 vmem_altmap_free(altmap, nr_pages);
294 } else if (PageReserved(page)) {
d7d9b612
OH
295 /* allocated from bootmem */
296 if (page_size < PAGE_SIZE) {
297 /*
298 * this shouldn't happen, but if it is
299 * the case, leave the memory there
300 */
301 WARN_ON_ONCE(1);
302 } else {
303 while (nr_pages--)
304 free_reserved_page(page++);
305 }
306 } else {
307 free_pages((unsigned long)(__va(addr)), page_order);
71b0bfe4 308 }
d7d9b612
OH
309
310 vmemmap_remove_mapping(start, page_size);
d29eff7b 311 }
0197518c 312}
71b0bfe4 313#endif
f7e3334a
NF
314void register_page_bootmem_memmap(unsigned long section_nr,
315 struct page *start_page, unsigned long size)
316{
317}
cd3db0c4 318
8e0861fa
AK
319/*
320 * We do not have access to the sparsemem vmemmap, so we fallback to
321 * walking the list of sparsemem blocks which we already maintain for
322 * the sake of crashdump. In the long run, we might want to maintain
323 * a tree if performance of that linear walk becomes a problem.
324 *
325 * realmode_pfn_to_page functions can fail due to:
326 * 1) As real sparsemem blocks do not lay in RAM continously (they
327 * are in virtual address space which is not available in the real mode),
328 * the requested page struct can be split between blocks so get_page/put_page
329 * may fail.
330 * 2) When huge pages are used, the get_page/put_page API will fail
331 * in real mode as the linked addresses in the page struct are virtual
332 * too.
333 */
334struct page *realmode_pfn_to_page(unsigned long pfn)
335{
336 struct vmemmap_backing *vmem_back;
337 struct page *page;
338 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
339 unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
340
341 for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
342 if (pg_va < vmem_back->virt_addr)
343 continue;
344
bd8cb03d
LZ
345 /* After vmemmap_list entry free is possible, need check all */
346 if ((pg_va + sizeof(struct page)) <=
347 (vmem_back->virt_addr + page_size)) {
348 page = (struct page *) (vmem_back->phys + pg_va -
8e0861fa 349 vmem_back->virt_addr);
bd8cb03d
LZ
350 return page;
351 }
8e0861fa
AK
352 }
353
bd8cb03d 354 /* Probably that page struct is split between real pages */
8e0861fa
AK
355 return NULL;
356}
357EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
358
7e7dc66a 359#else
8e0861fa
AK
360
361struct page *realmode_pfn_to_page(unsigned long pfn)
362{
363 struct page *page = pfn_to_page(pfn);
364 return page;
365}
366EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
367
7e7dc66a 368#endif /* CONFIG_SPARSEMEM_VMEMMAP */
1a01dc87 369
4e003747 370#ifdef CONFIG_PPC_BOOK3S_64
1fd6c022
ME
371static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
372
c610ec60
ME
373static int __init parse_disable_radix(char *p)
374{
1fd6c022
ME
375 bool val;
376
377 if (strlen(p) == 0)
378 val = true;
379 else if (kstrtobool(p, &val))
380 return -EINVAL;
381
382 disable_radix = val;
383
c610ec60
ME
384 return 0;
385}
386early_param("disable_radix", parse_disable_radix);
387
18569c1f 388/*
cc3d2940
PM
389 * If we're running under a hypervisor, we need to check the contents of
390 * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
391 * radix. If not, we clear the radix feature bit so we fall back to hash.
18569c1f 392 */
7559952e 393static void __init early_check_vec5(void)
18569c1f
PM
394{
395 unsigned long root, chosen;
396 int size;
397 const u8 *vec5;
014d02cb 398 u8 mmu_supported;
18569c1f
PM
399
400 root = of_get_flat_dt_root();
401 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
014d02cb
SJS
402 if (chosen == -FDT_ERR_NOTFOUND) {
403 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
18569c1f 404 return;
014d02cb 405 }
18569c1f 406 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
014d02cb
SJS
407 if (!vec5) {
408 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
18569c1f 409 return;
014d02cb
SJS
410 }
411 if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
cc3d2940 412 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
014d02cb
SJS
413 return;
414 }
415
416 /* Check for supported configuration */
417 mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
418 OV5_FEAT(OV5_MMU_SUPPORT);
419 if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
420 /* Hypervisor only supports radix - check enabled && GTSE */
421 if (!early_radix_enabled()) {
422 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
423 }
424 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
425 OV5_FEAT(OV5_RADIX_GTSE))) {
426 pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
427 }
428 /* Do radix anyway - the hypervisor said we had to */
429 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
430 } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
431 /* Hypervisor only supports hash - disable radix */
432 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
433 }
18569c1f
PM
434}
435
1a01dc87
ME
436void __init mmu_early_init_devtree(void)
437{
c610ec60 438 /* Disable radix mode based on kernel command line. */
fc36a903 439 if (disable_radix)
5a25b6f5 440 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
bacf9cf8 441
18569c1f
PM
442 /*
443 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
444 * When running bare-metal, we can use radix if we like
445 * even though the ibm,architecture-vec-5 property created by
446 * skiboot doesn't have the necessary bits set.
447 */
014d02cb 448 if (!(mfmsr() & MSR_HV))
18569c1f
PM
449 early_check_vec5();
450
b8f1b4f8 451 if (early_radix_enabled())
2537b09c
ME
452 radix__early_init_devtree();
453 else
bacf9cf8 454 hash__early_init_devtree();
1a01dc87 455}
4e003747 456#endif /* CONFIG_PPC_BOOK3S_64 */