1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/x86_64/mm/init.c
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/initrd.h>
23 #include <linux/pagemap.h>
24 #include <linux/memblock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/pci.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/memory.h>
31 #include <linux/memory_hotplug.h>
32 #include <linux/memremap.h>
33 #include <linux/nmi.h>
34 #include <linux/gfp.h>
35 #include <linux/kcore.h>
36 #include <linux/bootmem_info.h>
38 #include <asm/processor.h>
39 #include <asm/bios_ebda.h>
40 #include <linux/uaccess.h>
41 #include <asm/pgalloc.h>
43 #include <asm/fixmap.h>
44 #include <asm/e820/api.h>
47 #include <asm/mmu_context.h>
48 #include <asm/proto.h>
50 #include <asm/sections.h>
51 #include <asm/kdebug.h>
53 #include <asm/set_memory.h>
55 #include <asm/uv/uv.h>
56 #include <asm/setup.h>
57 #include <asm/ftrace.h>
59 #include "mm_internal.h"
61 #include "ident_map.c"
63 #define DEFINE_POPULATE(fname, type1, type2, init) \
64 static inline void fname##_init(struct mm_struct *mm, \
65 type1##_t *arg1, type2##_t *arg2, bool init) \
68 fname##_safe(mm, arg1, arg2); \
70 fname(mm, arg1, arg2); \
73 DEFINE_POPULATE(p4d_populate
, p4d
, pud
, init
)
74 DEFINE_POPULATE(pgd_populate
, pgd
, p4d
, init
)
75 DEFINE_POPULATE(pud_populate
, pud
, pmd
, init
)
76 DEFINE_POPULATE(pmd_populate_kernel
, pmd
, pte
, init
)
78 #define DEFINE_ENTRY(type1, type2, init) \
79 static inline void set_##type1##_init(type1##_t *arg1, \
80 type2##_t arg2, bool init) \
83 set_##type1##_safe(arg1, arg2); \
85 set_##type1(arg1, arg2); \
88 DEFINE_ENTRY(p4d
, p4d
, init
)
89 DEFINE_ENTRY(pud
, pud
, init
)
90 DEFINE_ENTRY(pmd
, pmd
, init
)
91 DEFINE_ENTRY(pte
, pte
, init
)
95 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
96 * physical space so we can cache the place of the first one and move
97 * around without checking the pgd every time.
100 /* Bits supported by the hardware: */
101 pteval_t __supported_pte_mask __read_mostly
= ~0;
102 /* Bits allowed in normal kernel mappings: */
103 pteval_t __default_kernel_pte_mask __read_mostly
= ~0;
104 EXPORT_SYMBOL_GPL(__supported_pte_mask
);
105 /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
106 EXPORT_SYMBOL(__default_kernel_pte_mask
);
108 int force_personality32
;
112 * Control non executable heap for 32bit processes.
113 * To control the stack too use noexec=off
115 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
116 * off PROT_READ implies PROT_EXEC
118 static int __init
nonx32_setup(char *str
)
120 if (!strcmp(str
, "on"))
121 force_personality32
&= ~READ_IMPLIES_EXEC
;
122 else if (!strcmp(str
, "off"))
123 force_personality32
|= READ_IMPLIES_EXEC
;
126 __setup("noexec32=", nonx32_setup
);
128 static void sync_global_pgds_l5(unsigned long start
, unsigned long end
)
132 for (addr
= start
; addr
<= end
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
133 const pgd_t
*pgd_ref
= pgd_offset_k(addr
);
136 /* Check for overflow */
140 if (pgd_none(*pgd_ref
))
143 spin_lock(&pgd_lock
);
144 list_for_each_entry(page
, &pgd_list
, lru
) {
146 spinlock_t
*pgt_lock
;
148 pgd
= (pgd_t
*)page_address(page
) + pgd_index(addr
);
149 /* the pgt_lock only for Xen */
150 pgt_lock
= &pgd_page_get_mm(page
)->page_table_lock
;
153 if (!pgd_none(*pgd_ref
) && !pgd_none(*pgd
))
154 BUG_ON(pgd_page_vaddr(*pgd
) != pgd_page_vaddr(*pgd_ref
));
157 set_pgd(pgd
, *pgd_ref
);
159 spin_unlock(pgt_lock
);
161 spin_unlock(&pgd_lock
);
165 static void sync_global_pgds_l4(unsigned long start
, unsigned long end
)
169 for (addr
= start
; addr
<= end
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
170 pgd_t
*pgd_ref
= pgd_offset_k(addr
);
171 const p4d_t
*p4d_ref
;
175 * With folded p4d, pgd_none() is always false, we need to
176 * handle synchronization on p4d level.
178 MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref
));
179 p4d_ref
= p4d_offset(pgd_ref
, addr
);
181 if (p4d_none(*p4d_ref
))
184 spin_lock(&pgd_lock
);
185 list_for_each_entry(page
, &pgd_list
, lru
) {
188 spinlock_t
*pgt_lock
;
190 pgd
= (pgd_t
*)page_address(page
) + pgd_index(addr
);
191 p4d
= p4d_offset(pgd
, addr
);
192 /* the pgt_lock only for Xen */
193 pgt_lock
= &pgd_page_get_mm(page
)->page_table_lock
;
196 if (!p4d_none(*p4d_ref
) && !p4d_none(*p4d
))
197 BUG_ON(p4d_pgtable(*p4d
)
198 != p4d_pgtable(*p4d_ref
));
201 set_p4d(p4d
, *p4d_ref
);
203 spin_unlock(pgt_lock
);
205 spin_unlock(&pgd_lock
);
210 * When memory was added make sure all the processes MM have
211 * suitable PGD entries in the local PGD level page.
213 static void sync_global_pgds(unsigned long start
, unsigned long end
)
215 if (pgtable_l5_enabled())
216 sync_global_pgds_l5(start
, end
);
218 sync_global_pgds_l4(start
, end
);
222 * NOTE: This function is marked __ref because it calls __init function
223 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
225 static __ref
void *spp_getpage(void)
230 ptr
= (void *) get_zeroed_page(GFP_ATOMIC
);
232 ptr
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
234 if (!ptr
|| ((unsigned long)ptr
& ~PAGE_MASK
)) {
235 panic("set_pte_phys: cannot allocate page data %s\n",
236 after_bootmem
? "after bootmem" : "");
239 pr_debug("spp_getpage %p\n", ptr
);
244 static p4d_t
*fill_p4d(pgd_t
*pgd
, unsigned long vaddr
)
246 if (pgd_none(*pgd
)) {
247 p4d_t
*p4d
= (p4d_t
*)spp_getpage();
248 pgd_populate(&init_mm
, pgd
, p4d
);
249 if (p4d
!= p4d_offset(pgd
, 0))
250 printk(KERN_ERR
"PAGETABLE BUG #00! %p <-> %p\n",
251 p4d
, p4d_offset(pgd
, 0));
253 return p4d_offset(pgd
, vaddr
);
256 static pud_t
*fill_pud(p4d_t
*p4d
, unsigned long vaddr
)
258 if (p4d_none(*p4d
)) {
259 pud_t
*pud
= (pud_t
*)spp_getpage();
260 p4d_populate(&init_mm
, p4d
, pud
);
261 if (pud
!= pud_offset(p4d
, 0))
262 printk(KERN_ERR
"PAGETABLE BUG #01! %p <-> %p\n",
263 pud
, pud_offset(p4d
, 0));
265 return pud_offset(p4d
, vaddr
);
268 static pmd_t
*fill_pmd(pud_t
*pud
, unsigned long vaddr
)
270 if (pud_none(*pud
)) {
271 pmd_t
*pmd
= (pmd_t
*) spp_getpage();
272 pud_populate(&init_mm
, pud
, pmd
);
273 if (pmd
!= pmd_offset(pud
, 0))
274 printk(KERN_ERR
"PAGETABLE BUG #02! %p <-> %p\n",
275 pmd
, pmd_offset(pud
, 0));
277 return pmd_offset(pud
, vaddr
);
280 static pte_t
*fill_pte(pmd_t
*pmd
, unsigned long vaddr
)
282 if (pmd_none(*pmd
)) {
283 pte_t
*pte
= (pte_t
*) spp_getpage();
284 pmd_populate_kernel(&init_mm
, pmd
, pte
);
285 if (pte
!= pte_offset_kernel(pmd
, 0))
286 printk(KERN_ERR
"PAGETABLE BUG #03!\n");
288 return pte_offset_kernel(pmd
, vaddr
);
291 static void __set_pte_vaddr(pud_t
*pud
, unsigned long vaddr
, pte_t new_pte
)
293 pmd_t
*pmd
= fill_pmd(pud
, vaddr
);
294 pte_t
*pte
= fill_pte(pmd
, vaddr
);
296 set_pte(pte
, new_pte
);
299 * It's enough to flush this one mapping.
300 * (PGE mappings get flushed as well)
302 flush_tlb_one_kernel(vaddr
);
305 void set_pte_vaddr_p4d(p4d_t
*p4d_page
, unsigned long vaddr
, pte_t new_pte
)
307 p4d_t
*p4d
= p4d_page
+ p4d_index(vaddr
);
308 pud_t
*pud
= fill_pud(p4d
, vaddr
);
310 __set_pte_vaddr(pud
, vaddr
, new_pte
);
313 void set_pte_vaddr_pud(pud_t
*pud_page
, unsigned long vaddr
, pte_t new_pte
)
315 pud_t
*pud
= pud_page
+ pud_index(vaddr
);
317 __set_pte_vaddr(pud
, vaddr
, new_pte
);
320 void set_pte_vaddr(unsigned long vaddr
, pte_t pteval
)
325 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr
, native_pte_val(pteval
));
327 pgd
= pgd_offset_k(vaddr
);
328 if (pgd_none(*pgd
)) {
330 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
334 p4d_page
= p4d_offset(pgd
, 0);
335 set_pte_vaddr_p4d(p4d_page
, vaddr
, pteval
);
338 pmd_t
* __init
populate_extra_pmd(unsigned long vaddr
)
344 pgd
= pgd_offset_k(vaddr
);
345 p4d
= fill_p4d(pgd
, vaddr
);
346 pud
= fill_pud(p4d
, vaddr
);
347 return fill_pmd(pud
, vaddr
);
350 pte_t
* __init
populate_extra_pte(unsigned long vaddr
)
354 pmd
= populate_extra_pmd(vaddr
);
355 return fill_pte(pmd
, vaddr
);
359 * Create large page table mappings for a range of physical addresses.
361 static void __init
__init_extra_mapping(unsigned long phys
, unsigned long size
,
362 enum page_cache_mode cache
)
370 pgprot_val(prot
) = pgprot_val(PAGE_KERNEL_LARGE
) |
371 protval_4k_2_large(cachemode2protval(cache
));
372 BUG_ON((phys
& ~PMD_MASK
) || (size
& ~PMD_MASK
));
373 for (; size
; phys
+= PMD_SIZE
, size
-= PMD_SIZE
) {
374 pgd
= pgd_offset_k((unsigned long)__va(phys
));
375 if (pgd_none(*pgd
)) {
376 p4d
= (p4d_t
*) spp_getpage();
377 set_pgd(pgd
, __pgd(__pa(p4d
) | _KERNPG_TABLE
|
380 p4d
= p4d_offset(pgd
, (unsigned long)__va(phys
));
381 if (p4d_none(*p4d
)) {
382 pud
= (pud_t
*) spp_getpage();
383 set_p4d(p4d
, __p4d(__pa(pud
) | _KERNPG_TABLE
|
386 pud
= pud_offset(p4d
, (unsigned long)__va(phys
));
387 if (pud_none(*pud
)) {
388 pmd
= (pmd_t
*) spp_getpage();
389 set_pud(pud
, __pud(__pa(pmd
) | _KERNPG_TABLE
|
392 pmd
= pmd_offset(pud
, phys
);
393 BUG_ON(!pmd_none(*pmd
));
394 set_pmd(pmd
, __pmd(phys
| pgprot_val(prot
)));
398 void __init
init_extra_mapping_wb(unsigned long phys
, unsigned long size
)
400 __init_extra_mapping(phys
, size
, _PAGE_CACHE_MODE_WB
);
403 void __init
init_extra_mapping_uc(unsigned long phys
, unsigned long size
)
405 __init_extra_mapping(phys
, size
, _PAGE_CACHE_MODE_UC
);
409 * The head.S code sets up the kernel high mapping:
411 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
413 * phys_base holds the negative offset to the kernel, which is added
414 * to the compile time generated pmds. This results in invalid pmds up
415 * to the point where we hit the physaddr 0 mapping.
417 * We limit the mappings to the region from _text to _brk_end. _brk_end
418 * is rounded up to the 2MB boundary. This catches the invalid pmds as
419 * well, as they are located before _text:
421 void __init
cleanup_highmap(void)
423 unsigned long vaddr
= __START_KERNEL_map
;
424 unsigned long vaddr_end
= __START_KERNEL_map
+ KERNEL_IMAGE_SIZE
;
425 unsigned long end
= roundup((unsigned long)_brk_end
, PMD_SIZE
) - 1;
426 pmd_t
*pmd
= level2_kernel_pgt
;
429 * Native path, max_pfn_mapped is not set yet.
430 * Xen has valid max_pfn_mapped set in
431 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
434 vaddr_end
= __START_KERNEL_map
+ (max_pfn_mapped
<< PAGE_SHIFT
);
436 for (; vaddr
+ PMD_SIZE
- 1 < vaddr_end
; pmd
++, vaddr
+= PMD_SIZE
) {
439 if (vaddr
< (unsigned long) _text
|| vaddr
> end
)
440 set_pmd(pmd
, __pmd(0));
445 * Create PTE level page table mapping for physical addresses.
446 * It returns the last physical address mapped.
448 static unsigned long __meminit
449 phys_pte_init(pte_t
*pte_page
, unsigned long paddr
, unsigned long paddr_end
,
450 pgprot_t prot
, bool init
)
452 unsigned long pages
= 0, paddr_next
;
453 unsigned long paddr_last
= paddr_end
;
457 pte
= pte_page
+ pte_index(paddr
);
458 i
= pte_index(paddr
);
460 for (; i
< PTRS_PER_PTE
; i
++, paddr
= paddr_next
, pte
++) {
461 paddr_next
= (paddr
& PAGE_MASK
) + PAGE_SIZE
;
462 if (paddr
>= paddr_end
) {
463 if (!after_bootmem
&&
464 !e820__mapped_any(paddr
& PAGE_MASK
, paddr_next
,
466 !e820__mapped_any(paddr
& PAGE_MASK
, paddr_next
,
467 E820_TYPE_RESERVED_KERN
))
468 set_pte_init(pte
, __pte(0), init
);
473 * We will re-use the existing mapping.
474 * Xen for example has some special requirements, like mapping
475 * pagetable pages as RO. So assume someone who pre-setup
476 * these mappings are more intelligent.
478 if (!pte_none(*pte
)) {
485 pr_info(" pte=%p addr=%lx pte=%016lx\n", pte
, paddr
,
486 pfn_pte(paddr
>> PAGE_SHIFT
, PAGE_KERNEL
).pte
);
488 set_pte_init(pte
, pfn_pte(paddr
>> PAGE_SHIFT
, prot
), init
);
489 paddr_last
= (paddr
& PAGE_MASK
) + PAGE_SIZE
;
492 update_page_count(PG_LEVEL_4K
, pages
);
498 * Create PMD level page table mapping for physical addresses. The virtual
499 * and physical address have to be aligned at this level.
500 * It returns the last physical address mapped.
502 static unsigned long __meminit
503 phys_pmd_init(pmd_t
*pmd_page
, unsigned long paddr
, unsigned long paddr_end
,
504 unsigned long page_size_mask
, pgprot_t prot
, bool init
)
506 unsigned long pages
= 0, paddr_next
;
507 unsigned long paddr_last
= paddr_end
;
509 int i
= pmd_index(paddr
);
511 for (; i
< PTRS_PER_PMD
; i
++, paddr
= paddr_next
) {
512 pmd_t
*pmd
= pmd_page
+ pmd_index(paddr
);
514 pgprot_t new_prot
= prot
;
516 paddr_next
= (paddr
& PMD_MASK
) + PMD_SIZE
;
517 if (paddr
>= paddr_end
) {
518 if (!after_bootmem
&&
519 !e820__mapped_any(paddr
& PMD_MASK
, paddr_next
,
521 !e820__mapped_any(paddr
& PMD_MASK
, paddr_next
,
522 E820_TYPE_RESERVED_KERN
))
523 set_pmd_init(pmd
, __pmd(0), init
);
527 if (!pmd_none(*pmd
)) {
528 if (!pmd_large(*pmd
)) {
529 spin_lock(&init_mm
.page_table_lock
);
530 pte
= (pte_t
*)pmd_page_vaddr(*pmd
);
531 paddr_last
= phys_pte_init(pte
, paddr
,
534 spin_unlock(&init_mm
.page_table_lock
);
538 * If we are ok with PG_LEVEL_2M mapping, then we will
539 * use the existing mapping,
541 * Otherwise, we will split the large page mapping but
542 * use the same existing protection bits except for
543 * large page, so that we don't violate Intel's TLB
544 * Application note (317080) which says, while changing
545 * the page sizes, new and old translations should
546 * not differ with respect to page frame and
549 if (page_size_mask
& (1 << PG_LEVEL_2M
)) {
552 paddr_last
= paddr_next
;
555 new_prot
= pte_pgprot(pte_clrhuge(*(pte_t
*)pmd
));
558 if (page_size_mask
& (1<<PG_LEVEL_2M
)) {
560 spin_lock(&init_mm
.page_table_lock
);
561 set_pte_init((pte_t
*)pmd
,
562 pfn_pte((paddr
& PMD_MASK
) >> PAGE_SHIFT
,
563 __pgprot(pgprot_val(prot
) | _PAGE_PSE
)),
565 spin_unlock(&init_mm
.page_table_lock
);
566 paddr_last
= paddr_next
;
570 pte
= alloc_low_page();
571 paddr_last
= phys_pte_init(pte
, paddr
, paddr_end
, new_prot
, init
);
573 spin_lock(&init_mm
.page_table_lock
);
574 pmd_populate_kernel_init(&init_mm
, pmd
, pte
, init
);
575 spin_unlock(&init_mm
.page_table_lock
);
577 update_page_count(PG_LEVEL_2M
, pages
);
582 * Create PUD level page table mapping for physical addresses. The virtual
583 * and physical address do not have to be aligned at this level. KASLR can
584 * randomize virtual addresses up to this level.
585 * It returns the last physical address mapped.
587 static unsigned long __meminit
588 phys_pud_init(pud_t
*pud_page
, unsigned long paddr
, unsigned long paddr_end
,
589 unsigned long page_size_mask
, pgprot_t _prot
, bool init
)
591 unsigned long pages
= 0, paddr_next
;
592 unsigned long paddr_last
= paddr_end
;
593 unsigned long vaddr
= (unsigned long)__va(paddr
);
594 int i
= pud_index(vaddr
);
596 for (; i
< PTRS_PER_PUD
; i
++, paddr
= paddr_next
) {
599 pgprot_t prot
= _prot
;
601 vaddr
= (unsigned long)__va(paddr
);
602 pud
= pud_page
+ pud_index(vaddr
);
603 paddr_next
= (paddr
& PUD_MASK
) + PUD_SIZE
;
605 if (paddr
>= paddr_end
) {
606 if (!after_bootmem
&&
607 !e820__mapped_any(paddr
& PUD_MASK
, paddr_next
,
609 !e820__mapped_any(paddr
& PUD_MASK
, paddr_next
,
610 E820_TYPE_RESERVED_KERN
))
611 set_pud_init(pud
, __pud(0), init
);
615 if (!pud_none(*pud
)) {
616 if (!pud_large(*pud
)) {
617 pmd
= pmd_offset(pud
, 0);
618 paddr_last
= phys_pmd_init(pmd
, paddr
,
625 * If we are ok with PG_LEVEL_1G mapping, then we will
626 * use the existing mapping.
628 * Otherwise, we will split the gbpage mapping but use
629 * the same existing protection bits except for large
630 * page, so that we don't violate Intel's TLB
631 * Application note (317080) which says, while changing
632 * the page sizes, new and old translations should
633 * not differ with respect to page frame and
636 if (page_size_mask
& (1 << PG_LEVEL_1G
)) {
639 paddr_last
= paddr_next
;
642 prot
= pte_pgprot(pte_clrhuge(*(pte_t
*)pud
));
645 if (page_size_mask
& (1<<PG_LEVEL_1G
)) {
647 spin_lock(&init_mm
.page_table_lock
);
649 prot
= __pgprot(pgprot_val(prot
) | __PAGE_KERNEL_LARGE
);
651 set_pte_init((pte_t
*)pud
,
652 pfn_pte((paddr
& PUD_MASK
) >> PAGE_SHIFT
,
655 spin_unlock(&init_mm
.page_table_lock
);
656 paddr_last
= paddr_next
;
660 pmd
= alloc_low_page();
661 paddr_last
= phys_pmd_init(pmd
, paddr
, paddr_end
,
662 page_size_mask
, prot
, init
);
664 spin_lock(&init_mm
.page_table_lock
);
665 pud_populate_init(&init_mm
, pud
, pmd
, init
);
666 spin_unlock(&init_mm
.page_table_lock
);
669 update_page_count(PG_LEVEL_1G
, pages
);
674 static unsigned long __meminit
675 phys_p4d_init(p4d_t
*p4d_page
, unsigned long paddr
, unsigned long paddr_end
,
676 unsigned long page_size_mask
, pgprot_t prot
, bool init
)
678 unsigned long vaddr
, vaddr_end
, vaddr_next
, paddr_next
, paddr_last
;
680 paddr_last
= paddr_end
;
681 vaddr
= (unsigned long)__va(paddr
);
682 vaddr_end
= (unsigned long)__va(paddr_end
);
684 if (!pgtable_l5_enabled())
685 return phys_pud_init((pud_t
*) p4d_page
, paddr
, paddr_end
,
686 page_size_mask
, prot
, init
);
688 for (; vaddr
< vaddr_end
; vaddr
= vaddr_next
) {
689 p4d_t
*p4d
= p4d_page
+ p4d_index(vaddr
);
692 vaddr_next
= (vaddr
& P4D_MASK
) + P4D_SIZE
;
695 if (paddr
>= paddr_end
) {
696 paddr_next
= __pa(vaddr_next
);
697 if (!after_bootmem
&&
698 !e820__mapped_any(paddr
& P4D_MASK
, paddr_next
,
700 !e820__mapped_any(paddr
& P4D_MASK
, paddr_next
,
701 E820_TYPE_RESERVED_KERN
))
702 set_p4d_init(p4d
, __p4d(0), init
);
706 if (!p4d_none(*p4d
)) {
707 pud
= pud_offset(p4d
, 0);
708 paddr_last
= phys_pud_init(pud
, paddr
, __pa(vaddr_end
),
709 page_size_mask
, prot
, init
);
713 pud
= alloc_low_page();
714 paddr_last
= phys_pud_init(pud
, paddr
, __pa(vaddr_end
),
715 page_size_mask
, prot
, init
);
717 spin_lock(&init_mm
.page_table_lock
);
718 p4d_populate_init(&init_mm
, p4d
, pud
, init
);
719 spin_unlock(&init_mm
.page_table_lock
);
725 static unsigned long __meminit
726 __kernel_physical_mapping_init(unsigned long paddr_start
,
727 unsigned long paddr_end
,
728 unsigned long page_size_mask
,
729 pgprot_t prot
, bool init
)
731 bool pgd_changed
= false;
732 unsigned long vaddr
, vaddr_start
, vaddr_end
, vaddr_next
, paddr_last
;
734 paddr_last
= paddr_end
;
735 vaddr
= (unsigned long)__va(paddr_start
);
736 vaddr_end
= (unsigned long)__va(paddr_end
);
739 for (; vaddr
< vaddr_end
; vaddr
= vaddr_next
) {
740 pgd_t
*pgd
= pgd_offset_k(vaddr
);
743 vaddr_next
= (vaddr
& PGDIR_MASK
) + PGDIR_SIZE
;
746 p4d
= (p4d_t
*)pgd_page_vaddr(*pgd
);
747 paddr_last
= phys_p4d_init(p4d
, __pa(vaddr
),
754 p4d
= alloc_low_page();
755 paddr_last
= phys_p4d_init(p4d
, __pa(vaddr
), __pa(vaddr_end
),
756 page_size_mask
, prot
, init
);
758 spin_lock(&init_mm
.page_table_lock
);
759 if (pgtable_l5_enabled())
760 pgd_populate_init(&init_mm
, pgd
, p4d
, init
);
762 p4d_populate_init(&init_mm
, p4d_offset(pgd
, vaddr
),
763 (pud_t
*) p4d
, init
);
765 spin_unlock(&init_mm
.page_table_lock
);
770 sync_global_pgds(vaddr_start
, vaddr_end
- 1);
777 * Create page table mapping for the physical memory for specific physical
778 * addresses. Note that it can only be used to populate non-present entries.
779 * The virtual and physical addresses have to be aligned on PMD level
780 * down. It returns the last physical address mapped.
782 unsigned long __meminit
783 kernel_physical_mapping_init(unsigned long paddr_start
,
784 unsigned long paddr_end
,
785 unsigned long page_size_mask
, pgprot_t prot
)
787 return __kernel_physical_mapping_init(paddr_start
, paddr_end
,
788 page_size_mask
, prot
, true);
792 * This function is similar to kernel_physical_mapping_init() above with the
793 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
794 * when updating the mapping. The caller is responsible to flush the TLBs after
795 * the function returns.
797 unsigned long __meminit
798 kernel_physical_mapping_change(unsigned long paddr_start
,
799 unsigned long paddr_end
,
800 unsigned long page_size_mask
)
802 return __kernel_physical_mapping_init(paddr_start
, paddr_end
,
803 page_size_mask
, PAGE_KERNEL
,
808 void __init
initmem_init(void)
810 memblock_set_node(0, PHYS_ADDR_MAX
, &memblock
.memory
, 0);
814 void __init
paging_init(void)
819 * clear the default setting with node 0
820 * note: don't use nodes_clear here, that is really clearing when
821 * numa support is not compiled in, and later node_set_state
822 * will not set it back.
824 node_clear_state(0, N_MEMORY
);
825 node_clear_state(0, N_NORMAL_MEMORY
);
830 #ifdef CONFIG_SPARSEMEM_VMEMMAP
831 #define PAGE_UNUSED 0xFD
834 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges
835 * from unused_pmd_start to next PMD_SIZE boundary.
837 static unsigned long unused_pmd_start __meminitdata
;
839 static void __meminit
vmemmap_flush_unused_pmd(void)
841 if (!unused_pmd_start
)
844 * Clears (unused_pmd_start, PMD_END]
846 memset((void *)unused_pmd_start
, PAGE_UNUSED
,
847 ALIGN(unused_pmd_start
, PMD_SIZE
) - unused_pmd_start
);
848 unused_pmd_start
= 0;
851 #ifdef CONFIG_MEMORY_HOTPLUG
852 /* Returns true if the PMD is completely unused and thus it can be freed */
853 static bool __meminit
vmemmap_pmd_is_unused(unsigned long addr
, unsigned long end
)
855 unsigned long start
= ALIGN_DOWN(addr
, PMD_SIZE
);
858 * Flush the unused range cache to ensure that memchr_inv() will work
859 * for the whole range.
861 vmemmap_flush_unused_pmd();
862 memset((void *)addr
, PAGE_UNUSED
, end
- addr
);
864 return !memchr_inv((void *)start
, PAGE_UNUSED
, PMD_SIZE
);
868 static void __meminit
__vmemmap_use_sub_pmd(unsigned long start
)
871 * As we expect to add in the same granularity as we remove, it's
872 * sufficient to mark only some piece used to block the memmap page from
873 * getting removed when removing some other adjacent memmap (just in
874 * case the first memmap never gets initialized e.g., because the memory
875 * block never gets onlined).
877 memset((void *)start
, 0, sizeof(struct page
));
880 static void __meminit
vmemmap_use_sub_pmd(unsigned long start
, unsigned long end
)
883 * We only optimize if the new used range directly follows the
884 * previously unused range (esp., when populating consecutive sections).
886 if (unused_pmd_start
== start
) {
887 if (likely(IS_ALIGNED(end
, PMD_SIZE
)))
888 unused_pmd_start
= 0;
890 unused_pmd_start
= end
;
895 * If the range does not contiguously follows previous one, make sure
896 * to mark the unused range of the previous one so it can be removed.
898 vmemmap_flush_unused_pmd();
899 __vmemmap_use_sub_pmd(start
);
903 static void __meminit
vmemmap_use_new_sub_pmd(unsigned long start
, unsigned long end
)
905 vmemmap_flush_unused_pmd();
908 * Could be our memmap page is filled with PAGE_UNUSED already from a
909 * previous remove. Make sure to reset it.
911 __vmemmap_use_sub_pmd(start
);
914 * Mark with PAGE_UNUSED the unused parts of the new memmap range
916 if (!IS_ALIGNED(start
, PMD_SIZE
))
917 memset((void *)start
, PAGE_UNUSED
,
918 start
- ALIGN_DOWN(start
, PMD_SIZE
));
921 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
922 * consecutive sections. Remember for the last added PMD where the
923 * unused range begins.
925 if (!IS_ALIGNED(end
, PMD_SIZE
))
926 unused_pmd_start
= end
;
931 * Memory hotplug specific functions
933 #ifdef CONFIG_MEMORY_HOTPLUG
935 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
938 static void update_end_of_memory_vars(u64 start
, u64 size
)
940 unsigned long end_pfn
= PFN_UP(start
+ size
);
942 if (end_pfn
> max_pfn
) {
944 max_low_pfn
= end_pfn
;
945 high_memory
= (void *)__va(max_pfn
* PAGE_SIZE
- 1) + 1;
949 int add_pages(int nid
, unsigned long start_pfn
, unsigned long nr_pages
,
950 struct mhp_params
*params
)
954 ret
= __add_pages(nid
, start_pfn
, nr_pages
, params
);
957 /* update max_pfn, max_low_pfn and high_memory */
958 update_end_of_memory_vars(start_pfn
<< PAGE_SHIFT
,
959 nr_pages
<< PAGE_SHIFT
);
964 int arch_add_memory(int nid
, u64 start
, u64 size
,
965 struct mhp_params
*params
)
967 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
968 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
970 init_memory_mapping(start
, start
+ size
, params
->pgprot
);
972 return add_pages(nid
, start_pfn
, nr_pages
, params
);
975 static void __meminit
free_pagetable(struct page
*page
, int order
)
978 unsigned int nr_pages
= 1 << order
;
980 /* bootmem page has reserved flag */
981 if (PageReserved(page
)) {
982 __ClearPageReserved(page
);
984 magic
= (unsigned long)page
->freelist
;
985 if (magic
== SECTION_INFO
|| magic
== MIX_SECTION_INFO
) {
987 put_page_bootmem(page
++);
990 free_reserved_page(page
++);
992 free_pages((unsigned long)page_address(page
), order
);
995 static void __meminit
free_hugepage_table(struct page
*page
,
996 struct vmem_altmap
*altmap
)
999 vmem_altmap_free(altmap
, PMD_SIZE
/ PAGE_SIZE
);
1001 free_pagetable(page
, get_order(PMD_SIZE
));
1004 static void __meminit
free_pte_table(pte_t
*pte_start
, pmd_t
*pmd
)
1009 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
1010 pte
= pte_start
+ i
;
1011 if (!pte_none(*pte
))
1015 /* free a pte talbe */
1016 free_pagetable(pmd_page(*pmd
), 0);
1017 spin_lock(&init_mm
.page_table_lock
);
1019 spin_unlock(&init_mm
.page_table_lock
);
1022 static void __meminit
free_pmd_table(pmd_t
*pmd_start
, pud_t
*pud
)
1027 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
1028 pmd
= pmd_start
+ i
;
1029 if (!pmd_none(*pmd
))
1033 /* free a pmd talbe */
1034 free_pagetable(pud_page(*pud
), 0);
1035 spin_lock(&init_mm
.page_table_lock
);
1037 spin_unlock(&init_mm
.page_table_lock
);
1040 static void __meminit
free_pud_table(pud_t
*pud_start
, p4d_t
*p4d
)
1045 for (i
= 0; i
< PTRS_PER_PUD
; i
++) {
1046 pud
= pud_start
+ i
;
1047 if (!pud_none(*pud
))
1051 /* free a pud talbe */
1052 free_pagetable(p4d_page(*p4d
), 0);
1053 spin_lock(&init_mm
.page_table_lock
);
1055 spin_unlock(&init_mm
.page_table_lock
);
1058 static void __meminit
1059 remove_pte_table(pte_t
*pte_start
, unsigned long addr
, unsigned long end
,
1062 unsigned long next
, pages
= 0;
1064 phys_addr_t phys_addr
;
1066 pte
= pte_start
+ pte_index(addr
);
1067 for (; addr
< end
; addr
= next
, pte
++) {
1068 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1072 if (!pte_present(*pte
))
1076 * We mapped [0,1G) memory as identity mapping when
1077 * initializing, in arch/x86/kernel/head_64.S. These
1078 * pagetables cannot be removed.
1080 phys_addr
= pte_val(*pte
) + (addr
& PAGE_MASK
);
1081 if (phys_addr
< (phys_addr_t
)0x40000000)
1085 free_pagetable(pte_page(*pte
), 0);
1087 spin_lock(&init_mm
.page_table_lock
);
1088 pte_clear(&init_mm
, addr
, pte
);
1089 spin_unlock(&init_mm
.page_table_lock
);
1091 /* For non-direct mapping, pages means nothing. */
1095 /* Call free_pte_table() in remove_pmd_table(). */
1098 update_page_count(PG_LEVEL_4K
, -pages
);
1101 static void __meminit
1102 remove_pmd_table(pmd_t
*pmd_start
, unsigned long addr
, unsigned long end
,
1103 bool direct
, struct vmem_altmap
*altmap
)
1105 unsigned long next
, pages
= 0;
1109 pmd
= pmd_start
+ pmd_index(addr
);
1110 for (; addr
< end
; addr
= next
, pmd
++) {
1111 next
= pmd_addr_end(addr
, end
);
1113 if (!pmd_present(*pmd
))
1116 if (pmd_large(*pmd
)) {
1117 if (IS_ALIGNED(addr
, PMD_SIZE
) &&
1118 IS_ALIGNED(next
, PMD_SIZE
)) {
1120 free_hugepage_table(pmd_page(*pmd
),
1123 spin_lock(&init_mm
.page_table_lock
);
1125 spin_unlock(&init_mm
.page_table_lock
);
1128 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1129 else if (vmemmap_pmd_is_unused(addr
, next
)) {
1130 free_hugepage_table(pmd_page(*pmd
),
1132 spin_lock(&init_mm
.page_table_lock
);
1134 spin_unlock(&init_mm
.page_table_lock
);
1140 pte_base
= (pte_t
*)pmd_page_vaddr(*pmd
);
1141 remove_pte_table(pte_base
, addr
, next
, direct
);
1142 free_pte_table(pte_base
, pmd
);
1145 /* Call free_pmd_table() in remove_pud_table(). */
1147 update_page_count(PG_LEVEL_2M
, -pages
);
1150 static void __meminit
1151 remove_pud_table(pud_t
*pud_start
, unsigned long addr
, unsigned long end
,
1152 struct vmem_altmap
*altmap
, bool direct
)
1154 unsigned long next
, pages
= 0;
1158 pud
= pud_start
+ pud_index(addr
);
1159 for (; addr
< end
; addr
= next
, pud
++) {
1160 next
= pud_addr_end(addr
, end
);
1162 if (!pud_present(*pud
))
1165 if (pud_large(*pud
) &&
1166 IS_ALIGNED(addr
, PUD_SIZE
) &&
1167 IS_ALIGNED(next
, PUD_SIZE
)) {
1168 spin_lock(&init_mm
.page_table_lock
);
1170 spin_unlock(&init_mm
.page_table_lock
);
1175 pmd_base
= pmd_offset(pud
, 0);
1176 remove_pmd_table(pmd_base
, addr
, next
, direct
, altmap
);
1177 free_pmd_table(pmd_base
, pud
);
1181 update_page_count(PG_LEVEL_1G
, -pages
);
1184 static void __meminit
1185 remove_p4d_table(p4d_t
*p4d_start
, unsigned long addr
, unsigned long end
,
1186 struct vmem_altmap
*altmap
, bool direct
)
1188 unsigned long next
, pages
= 0;
1192 p4d
= p4d_start
+ p4d_index(addr
);
1193 for (; addr
< end
; addr
= next
, p4d
++) {
1194 next
= p4d_addr_end(addr
, end
);
1196 if (!p4d_present(*p4d
))
1199 BUILD_BUG_ON(p4d_large(*p4d
));
1201 pud_base
= pud_offset(p4d
, 0);
1202 remove_pud_table(pud_base
, addr
, next
, altmap
, direct
);
1204 * For 4-level page tables we do not want to free PUDs, but in the
1205 * 5-level case we should free them. This code will have to change
1206 * to adapt for boot-time switching between 4 and 5 level page tables.
1208 if (pgtable_l5_enabled())
1209 free_pud_table(pud_base
, p4d
);
1213 update_page_count(PG_LEVEL_512G
, -pages
);
1216 /* start and end are both virtual address. */
1217 static void __meminit
1218 remove_pagetable(unsigned long start
, unsigned long end
, bool direct
,
1219 struct vmem_altmap
*altmap
)
1226 for (addr
= start
; addr
< end
; addr
= next
) {
1227 next
= pgd_addr_end(addr
, end
);
1229 pgd
= pgd_offset_k(addr
);
1230 if (!pgd_present(*pgd
))
1233 p4d
= p4d_offset(pgd
, 0);
1234 remove_p4d_table(p4d
, addr
, next
, altmap
, direct
);
1240 void __ref
vmemmap_free(unsigned long start
, unsigned long end
,
1241 struct vmem_altmap
*altmap
)
1243 VM_BUG_ON(!IS_ALIGNED(start
, PAGE_SIZE
));
1244 VM_BUG_ON(!IS_ALIGNED(end
, PAGE_SIZE
));
1246 remove_pagetable(start
, end
, false, altmap
);
1249 static void __meminit
1250 kernel_physical_mapping_remove(unsigned long start
, unsigned long end
)
1252 start
= (unsigned long)__va(start
);
1253 end
= (unsigned long)__va(end
);
1255 remove_pagetable(start
, end
, true, NULL
);
1258 void __ref
arch_remove_memory(u64 start
, u64 size
, struct vmem_altmap
*altmap
)
1260 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
1261 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
1263 __remove_pages(start_pfn
, nr_pages
, altmap
);
1264 kernel_physical_mapping_remove(start
, start
+ size
);
1266 #endif /* CONFIG_MEMORY_HOTPLUG */
1268 static struct kcore_list kcore_vsyscall
;
1270 static void __init
register_page_bootmem_info(void)
1272 #if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP)
1275 for_each_online_node(i
)
1276 register_page_bootmem_info_node(NODE_DATA(i
));
1281 * Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
1282 * Only the level which needs to be synchronized between all page-tables is
1283 * allocated because the synchronization can be expensive.
1285 static void __init
preallocate_vmalloc_pages(void)
1290 for (addr
= VMALLOC_START
; addr
<= VMALLOC_END
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
1291 pgd_t
*pgd
= pgd_offset_k(addr
);
1296 p4d
= p4d_alloc(&init_mm
, pgd
, addr
);
1300 if (pgtable_l5_enabled())
1304 * The goal here is to allocate all possibly required
1305 * hardware page tables pointed to by the top hardware
1308 * On 4-level systems, the P4D layer is folded away and
1309 * the above code does no preallocation. Below, go down
1310 * to the pud _software_ level to ensure the second
1311 * hardware level is allocated on 4-level systems too.
1314 pud
= pud_alloc(&init_mm
, p4d
, addr
);
1324 * The pages have to be there now or they will be missing in
1325 * process page-tables later.
1327 panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl
);
1330 void __init
mem_init(void)
1334 /* clear_bss() already clear the empty_zero_page */
1336 /* this will put all memory onto the freelists */
1337 memblock_free_all();
1339 x86_init
.hyper
.init_after_bootmem();
1342 * Must be done after boot memory is put on freelist, because here we
1343 * might set fields in deferred struct pages that have not yet been
1344 * initialized, and memblock_free_all() initializes all the reserved
1345 * deferred pages for us.
1347 register_page_bootmem_info();
1349 /* Register memory areas for /proc/kcore */
1350 if (get_gate_vma(&init_mm
))
1351 kclist_add(&kcore_vsyscall
, (void *)VSYSCALL_ADDR
, PAGE_SIZE
, KCORE_USER
);
1353 preallocate_vmalloc_pages();
1356 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1357 int __init
deferred_page_init_max_threads(const struct cpumask
*node_cpumask
)
1360 * More CPUs always led to greater speedups on tested systems, up to
1361 * all the nodes' CPUs. Use all since the system is otherwise idle
1364 return max_t(int, cpumask_weight(node_cpumask
), 1);
1368 int kernel_set_to_readonly
;
1370 void mark_rodata_ro(void)
1372 unsigned long start
= PFN_ALIGN(_text
);
1373 unsigned long rodata_start
= PFN_ALIGN(__start_rodata
);
1374 unsigned long end
= (unsigned long)__end_rodata_hpage_align
;
1375 unsigned long text_end
= PFN_ALIGN(_etext
);
1376 unsigned long rodata_end
= PFN_ALIGN(__end_rodata
);
1377 unsigned long all_end
;
1379 printk(KERN_INFO
"Write protecting the kernel read-only data: %luk\n",
1380 (end
- start
) >> 10);
1381 set_memory_ro(start
, (end
- start
) >> PAGE_SHIFT
);
1383 kernel_set_to_readonly
= 1;
1386 * The rodata/data/bss/brk section (but not the kernel text!)
1387 * should also be not-executable.
1389 * We align all_end to PMD_SIZE because the existing mapping
1390 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1391 * split the PMD and the reminder between _brk_end and the end
1392 * of the PMD will remain mapped executable.
1394 * Any PMD which was setup after the one which covers _brk_end
1395 * has been zapped already via cleanup_highmem().
1397 all_end
= roundup((unsigned long)_brk_end
, PMD_SIZE
);
1398 set_memory_nx(text_end
, (all_end
- text_end
) >> PAGE_SHIFT
);
1400 set_ftrace_ops_ro();
1402 #ifdef CONFIG_CPA_DEBUG
1403 printk(KERN_INFO
"Testing CPA: undo %lx-%lx\n", start
, end
);
1404 set_memory_rw(start
, (end
-start
) >> PAGE_SHIFT
);
1406 printk(KERN_INFO
"Testing CPA: again\n");
1407 set_memory_ro(start
, (end
-start
) >> PAGE_SHIFT
);
1410 free_kernel_image_pages("unused kernel image (text/rodata gap)",
1411 (void *)text_end
, (void *)rodata_start
);
1412 free_kernel_image_pages("unused kernel image (rodata/data gap)",
1413 (void *)rodata_end
, (void *)_sdata
);
1418 int kern_addr_valid(unsigned long addr
)
1420 unsigned long above
= ((long)addr
) >> __VIRTUAL_MASK_SHIFT
;
1427 if (above
!= 0 && above
!= -1UL)
1430 pgd
= pgd_offset_k(addr
);
1434 p4d
= p4d_offset(pgd
, addr
);
1435 if (!p4d_present(*p4d
))
1438 pud
= pud_offset(p4d
, addr
);
1439 if (!pud_present(*pud
))
1442 if (pud_large(*pud
))
1443 return pfn_valid(pud_pfn(*pud
));
1445 pmd
= pmd_offset(pud
, addr
);
1446 if (!pmd_present(*pmd
))
1449 if (pmd_large(*pmd
))
1450 return pfn_valid(pmd_pfn(*pmd
));
1452 pte
= pte_offset_kernel(pmd
, addr
);
1456 return pfn_valid(pte_pfn(*pte
));
1460 * Block size is the minimum amount of memory which can be hotplugged or
1461 * hotremoved. It must be power of two and must be equal or larger than
1462 * MIN_MEMORY_BLOCK_SIZE.
1464 #define MAX_BLOCK_SIZE (2UL << 30)
1466 /* Amount of ram needed to start using large blocks */
1467 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
1469 /* Adjustable memory block size */
1470 static unsigned long set_memory_block_size
;
1471 int __init
set_memory_block_size_order(unsigned int order
)
1473 unsigned long size
= 1UL << order
;
1475 if (size
> MEM_SIZE_FOR_LARGE_BLOCK
|| size
< MIN_MEMORY_BLOCK_SIZE
)
1478 set_memory_block_size
= size
;
1482 static unsigned long probe_memory_block_size(void)
1484 unsigned long boot_mem_end
= max_pfn
<< PAGE_SHIFT
;
1487 /* If memory block size has been set, then use it */
1488 bz
= set_memory_block_size
;
1492 /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
1493 if (boot_mem_end
< MEM_SIZE_FOR_LARGE_BLOCK
) {
1494 bz
= MIN_MEMORY_BLOCK_SIZE
;
1499 * Use max block size to minimize overhead on bare metal, where
1500 * alignment for memory hotplug isn't a concern.
1502 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1503 bz
= MAX_BLOCK_SIZE
;
1507 /* Find the largest allowed block size that aligns to memory end */
1508 for (bz
= MAX_BLOCK_SIZE
; bz
> MIN_MEMORY_BLOCK_SIZE
; bz
>>= 1) {
1509 if (IS_ALIGNED(boot_mem_end
, bz
))
1513 pr_info("x86/mm: Memory block size: %ldMB\n", bz
>> 20);
1518 static unsigned long memory_block_size_probed
;
1519 unsigned long memory_block_size_bytes(void)
1521 if (!memory_block_size_probed
)
1522 memory_block_size_probed
= probe_memory_block_size();
1524 return memory_block_size_probed
;
1527 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1529 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1531 static long __meminitdata addr_start
, addr_end
;
1532 static void __meminitdata
*p_start
, *p_end
;
1533 static int __meminitdata node_start
;
1535 static int __meminit
vmemmap_populate_hugepages(unsigned long start
,
1536 unsigned long end
, int node
, struct vmem_altmap
*altmap
)
1545 for (addr
= start
; addr
< end
; addr
= next
) {
1546 next
= pmd_addr_end(addr
, end
);
1548 pgd
= vmemmap_pgd_populate(addr
, node
);
1552 p4d
= vmemmap_p4d_populate(pgd
, addr
, node
);
1556 pud
= vmemmap_pud_populate(p4d
, addr
, node
);
1560 pmd
= pmd_offset(pud
, addr
);
1561 if (pmd_none(*pmd
)) {
1564 p
= vmemmap_alloc_block_buf(PMD_SIZE
, node
, altmap
);
1568 entry
= pfn_pte(__pa(p
) >> PAGE_SHIFT
,
1570 set_pmd(pmd
, __pmd(pte_val(entry
)));
1572 /* check to see if we have contiguous blocks */
1573 if (p_end
!= p
|| node_start
!= node
) {
1575 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1576 addr_start
, addr_end
-1, p_start
, p_end
-1, node_start
);
1582 addr_end
= addr
+ PMD_SIZE
;
1583 p_end
= p
+ PMD_SIZE
;
1585 if (!IS_ALIGNED(addr
, PMD_SIZE
) ||
1586 !IS_ALIGNED(next
, PMD_SIZE
))
1587 vmemmap_use_new_sub_pmd(addr
, next
);
1591 return -ENOMEM
; /* no fallback */
1592 } else if (pmd_large(*pmd
)) {
1593 vmemmap_verify((pte_t
*)pmd
, node
, addr
, next
);
1594 vmemmap_use_sub_pmd(addr
, next
);
1597 if (vmemmap_populate_basepages(addr
, next
, node
, NULL
))
1603 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
,
1604 struct vmem_altmap
*altmap
)
1608 VM_BUG_ON(!IS_ALIGNED(start
, PAGE_SIZE
));
1609 VM_BUG_ON(!IS_ALIGNED(end
, PAGE_SIZE
));
1611 if (end
- start
< PAGES_PER_SECTION
* sizeof(struct page
))
1612 err
= vmemmap_populate_basepages(start
, end
, node
, NULL
);
1613 else if (boot_cpu_has(X86_FEATURE_PSE
))
1614 err
= vmemmap_populate_hugepages(start
, end
, node
, altmap
);
1616 pr_err_once("%s: no cpu support for altmap allocations\n",
1620 err
= vmemmap_populate_basepages(start
, end
, node
, NULL
);
1622 sync_global_pgds(start
, end
- 1);
1626 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
1627 void register_page_bootmem_memmap(unsigned long section_nr
,
1628 struct page
*start_page
, unsigned long nr_pages
)
1630 unsigned long addr
= (unsigned long)start_page
;
1631 unsigned long end
= (unsigned long)(start_page
+ nr_pages
);
1637 unsigned int nr_pmd_pages
;
1640 for (; addr
< end
; addr
= next
) {
1643 pgd
= pgd_offset_k(addr
);
1644 if (pgd_none(*pgd
)) {
1645 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1648 get_page_bootmem(section_nr
, pgd_page(*pgd
), MIX_SECTION_INFO
);
1650 p4d
= p4d_offset(pgd
, addr
);
1651 if (p4d_none(*p4d
)) {
1652 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1655 get_page_bootmem(section_nr
, p4d_page(*p4d
), MIX_SECTION_INFO
);
1657 pud
= pud_offset(p4d
, addr
);
1658 if (pud_none(*pud
)) {
1659 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1662 get_page_bootmem(section_nr
, pud_page(*pud
), MIX_SECTION_INFO
);
1664 if (!boot_cpu_has(X86_FEATURE_PSE
)) {
1665 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1666 pmd
= pmd_offset(pud
, addr
);
1669 get_page_bootmem(section_nr
, pmd_page(*pmd
),
1672 pte
= pte_offset_kernel(pmd
, addr
);
1675 get_page_bootmem(section_nr
, pte_page(*pte
),
1678 next
= pmd_addr_end(addr
, end
);
1680 pmd
= pmd_offset(pud
, addr
);
1684 nr_pmd_pages
= 1 << get_order(PMD_SIZE
);
1685 page
= pmd_page(*pmd
);
1686 while (nr_pmd_pages
--)
1687 get_page_bootmem(section_nr
, page
++,
1694 void __meminit
vmemmap_populate_print_last(void)
1697 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1698 addr_start
, addr_end
-1, p_start
, p_end
-1, node_start
);