1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/x86_64/mm/init.c
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/initrd.h>
23 #include <linux/pagemap.h>
24 #include <linux/memblock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/pci.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/memory.h>
31 #include <linux/memory_hotplug.h>
32 #include <linux/memremap.h>
33 #include <linux/nmi.h>
34 #include <linux/gfp.h>
35 #include <linux/kcore.h>
36 #include <linux/bootmem_info.h>
37 #include <linux/hugetlb.h>
39 #include <asm/processor.h>
40 #include <asm/bios_ebda.h>
41 #include <linux/uaccess.h>
42 #include <asm/pgalloc.h>
44 #include <asm/fixmap.h>
45 #include <asm/e820/api.h>
48 #include <asm/mmu_context.h>
49 #include <asm/proto.h>
51 #include <asm/sections.h>
52 #include <asm/kdebug.h>
54 #include <asm/set_memory.h>
56 #include <asm/uv/uv.h>
57 #include <asm/setup.h>
58 #include <asm/ftrace.h>
60 #include "mm_internal.h"
62 #include "ident_map.c"
64 #define DEFINE_POPULATE(fname, type1, type2, init) \
65 static inline void fname##_init(struct mm_struct *mm, \
66 type1##_t *arg1, type2##_t *arg2, bool init) \
69 fname##_safe(mm, arg1, arg2); \
71 fname(mm, arg1, arg2); \
74 DEFINE_POPULATE(p4d_populate
, p4d
, pud
, init
)
75 DEFINE_POPULATE(pgd_populate
, pgd
, p4d
, init
)
76 DEFINE_POPULATE(pud_populate
, pud
, pmd
, init
)
77 DEFINE_POPULATE(pmd_populate_kernel
, pmd
, pte
, init
)
79 #define DEFINE_ENTRY(type1, type2, init) \
80 static inline void set_##type1##_init(type1##_t *arg1, \
81 type2##_t arg2, bool init) \
84 set_##type1##_safe(arg1, arg2); \
86 set_##type1(arg1, arg2); \
89 DEFINE_ENTRY(p4d
, p4d
, init
)
90 DEFINE_ENTRY(pud
, pud
, init
)
91 DEFINE_ENTRY(pmd
, pmd
, init
)
92 DEFINE_ENTRY(pte
, pte
, init
)
96 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
97 * physical space so we can cache the place of the first one and move
98 * around without checking the pgd every time.
101 /* Bits supported by the hardware: */
102 pteval_t __supported_pte_mask __read_mostly
= ~0;
103 /* Bits allowed in normal kernel mappings: */
104 pteval_t __default_kernel_pte_mask __read_mostly
= ~0;
105 EXPORT_SYMBOL_GPL(__supported_pte_mask
);
106 /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
107 EXPORT_SYMBOL(__default_kernel_pte_mask
);
109 int force_personality32
;
113 * Control non executable heap for 32bit processes.
114 * To control the stack too use noexec=off
116 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
117 * off PROT_READ implies PROT_EXEC
119 static int __init
nonx32_setup(char *str
)
121 if (!strcmp(str
, "on"))
122 force_personality32
&= ~READ_IMPLIES_EXEC
;
123 else if (!strcmp(str
, "off"))
124 force_personality32
|= READ_IMPLIES_EXEC
;
127 __setup("noexec32=", nonx32_setup
);
129 static void sync_global_pgds_l5(unsigned long start
, unsigned long end
)
133 for (addr
= start
; addr
<= end
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
134 const pgd_t
*pgd_ref
= pgd_offset_k(addr
);
137 /* Check for overflow */
141 if (pgd_none(*pgd_ref
))
144 spin_lock(&pgd_lock
);
145 list_for_each_entry(page
, &pgd_list
, lru
) {
147 spinlock_t
*pgt_lock
;
149 pgd
= (pgd_t
*)page_address(page
) + pgd_index(addr
);
150 /* the pgt_lock only for Xen */
151 pgt_lock
= &pgd_page_get_mm(page
)->page_table_lock
;
154 if (!pgd_none(*pgd_ref
) && !pgd_none(*pgd
))
155 BUG_ON(pgd_page_vaddr(*pgd
) != pgd_page_vaddr(*pgd_ref
));
158 set_pgd(pgd
, *pgd_ref
);
160 spin_unlock(pgt_lock
);
162 spin_unlock(&pgd_lock
);
166 static void sync_global_pgds_l4(unsigned long start
, unsigned long end
)
170 for (addr
= start
; addr
<= end
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
171 pgd_t
*pgd_ref
= pgd_offset_k(addr
);
172 const p4d_t
*p4d_ref
;
176 * With folded p4d, pgd_none() is always false, we need to
177 * handle synchronization on p4d level.
179 MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref
));
180 p4d_ref
= p4d_offset(pgd_ref
, addr
);
182 if (p4d_none(*p4d_ref
))
185 spin_lock(&pgd_lock
);
186 list_for_each_entry(page
, &pgd_list
, lru
) {
189 spinlock_t
*pgt_lock
;
191 pgd
= (pgd_t
*)page_address(page
) + pgd_index(addr
);
192 p4d
= p4d_offset(pgd
, addr
);
193 /* the pgt_lock only for Xen */
194 pgt_lock
= &pgd_page_get_mm(page
)->page_table_lock
;
197 if (!p4d_none(*p4d_ref
) && !p4d_none(*p4d
))
198 BUG_ON(p4d_page_vaddr(*p4d
)
199 != p4d_page_vaddr(*p4d_ref
));
202 set_p4d(p4d
, *p4d_ref
);
204 spin_unlock(pgt_lock
);
206 spin_unlock(&pgd_lock
);
211 * When memory was added make sure all the processes MM have
212 * suitable PGD entries in the local PGD level page.
214 static void sync_global_pgds(unsigned long start
, unsigned long end
)
216 if (pgtable_l5_enabled())
217 sync_global_pgds_l5(start
, end
);
219 sync_global_pgds_l4(start
, end
);
223 * NOTE: This function is marked __ref because it calls __init function
224 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
226 static __ref
void *spp_getpage(void)
231 ptr
= (void *) get_zeroed_page(GFP_ATOMIC
);
233 ptr
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
235 if (!ptr
|| ((unsigned long)ptr
& ~PAGE_MASK
)) {
236 panic("set_pte_phys: cannot allocate page data %s\n",
237 after_bootmem
? "after bootmem" : "");
240 pr_debug("spp_getpage %p\n", ptr
);
245 static p4d_t
*fill_p4d(pgd_t
*pgd
, unsigned long vaddr
)
247 if (pgd_none(*pgd
)) {
248 p4d_t
*p4d
= (p4d_t
*)spp_getpage();
249 pgd_populate(&init_mm
, pgd
, p4d
);
250 if (p4d
!= p4d_offset(pgd
, 0))
251 printk(KERN_ERR
"PAGETABLE BUG #00! %p <-> %p\n",
252 p4d
, p4d_offset(pgd
, 0));
254 return p4d_offset(pgd
, vaddr
);
257 static pud_t
*fill_pud(p4d_t
*p4d
, unsigned long vaddr
)
259 if (p4d_none(*p4d
)) {
260 pud_t
*pud
= (pud_t
*)spp_getpage();
261 p4d_populate(&init_mm
, p4d
, pud
);
262 if (pud
!= pud_offset(p4d
, 0))
263 printk(KERN_ERR
"PAGETABLE BUG #01! %p <-> %p\n",
264 pud
, pud_offset(p4d
, 0));
266 return pud_offset(p4d
, vaddr
);
269 static pmd_t
*fill_pmd(pud_t
*pud
, unsigned long vaddr
)
271 if (pud_none(*pud
)) {
272 pmd_t
*pmd
= (pmd_t
*) spp_getpage();
273 pud_populate(&init_mm
, pud
, pmd
);
274 if (pmd
!= pmd_offset(pud
, 0))
275 printk(KERN_ERR
"PAGETABLE BUG #02! %p <-> %p\n",
276 pmd
, pmd_offset(pud
, 0));
278 return pmd_offset(pud
, vaddr
);
281 static pte_t
*fill_pte(pmd_t
*pmd
, unsigned long vaddr
)
283 if (pmd_none(*pmd
)) {
284 pte_t
*pte
= (pte_t
*) spp_getpage();
285 pmd_populate_kernel(&init_mm
, pmd
, pte
);
286 if (pte
!= pte_offset_kernel(pmd
, 0))
287 printk(KERN_ERR
"PAGETABLE BUG #03!\n");
289 return pte_offset_kernel(pmd
, vaddr
);
292 static void __set_pte_vaddr(pud_t
*pud
, unsigned long vaddr
, pte_t new_pte
)
294 pmd_t
*pmd
= fill_pmd(pud
, vaddr
);
295 pte_t
*pte
= fill_pte(pmd
, vaddr
);
297 set_pte(pte
, new_pte
);
300 * It's enough to flush this one mapping.
301 * (PGE mappings get flushed as well)
303 flush_tlb_one_kernel(vaddr
);
306 void set_pte_vaddr_p4d(p4d_t
*p4d_page
, unsigned long vaddr
, pte_t new_pte
)
308 p4d_t
*p4d
= p4d_page
+ p4d_index(vaddr
);
309 pud_t
*pud
= fill_pud(p4d
, vaddr
);
311 __set_pte_vaddr(pud
, vaddr
, new_pte
);
314 void set_pte_vaddr_pud(pud_t
*pud_page
, unsigned long vaddr
, pte_t new_pte
)
316 pud_t
*pud
= pud_page
+ pud_index(vaddr
);
318 __set_pte_vaddr(pud
, vaddr
, new_pte
);
321 void set_pte_vaddr(unsigned long vaddr
, pte_t pteval
)
326 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr
, native_pte_val(pteval
));
328 pgd
= pgd_offset_k(vaddr
);
329 if (pgd_none(*pgd
)) {
331 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
335 p4d_page
= p4d_offset(pgd
, 0);
336 set_pte_vaddr_p4d(p4d_page
, vaddr
, pteval
);
339 pmd_t
* __init
populate_extra_pmd(unsigned long vaddr
)
345 pgd
= pgd_offset_k(vaddr
);
346 p4d
= fill_p4d(pgd
, vaddr
);
347 pud
= fill_pud(p4d
, vaddr
);
348 return fill_pmd(pud
, vaddr
);
351 pte_t
* __init
populate_extra_pte(unsigned long vaddr
)
355 pmd
= populate_extra_pmd(vaddr
);
356 return fill_pte(pmd
, vaddr
);
360 * Create large page table mappings for a range of physical addresses.
362 static void __init
__init_extra_mapping(unsigned long phys
, unsigned long size
,
363 enum page_cache_mode cache
)
371 pgprot_val(prot
) = pgprot_val(PAGE_KERNEL_LARGE
) |
372 protval_4k_2_large(cachemode2protval(cache
));
373 BUG_ON((phys
& ~PMD_MASK
) || (size
& ~PMD_MASK
));
374 for (; size
; phys
+= PMD_SIZE
, size
-= PMD_SIZE
) {
375 pgd
= pgd_offset_k((unsigned long)__va(phys
));
376 if (pgd_none(*pgd
)) {
377 p4d
= (p4d_t
*) spp_getpage();
378 set_pgd(pgd
, __pgd(__pa(p4d
) | _KERNPG_TABLE
|
381 p4d
= p4d_offset(pgd
, (unsigned long)__va(phys
));
382 if (p4d_none(*p4d
)) {
383 pud
= (pud_t
*) spp_getpage();
384 set_p4d(p4d
, __p4d(__pa(pud
) | _KERNPG_TABLE
|
387 pud
= pud_offset(p4d
, (unsigned long)__va(phys
));
388 if (pud_none(*pud
)) {
389 pmd
= (pmd_t
*) spp_getpage();
390 set_pud(pud
, __pud(__pa(pmd
) | _KERNPG_TABLE
|
393 pmd
= pmd_offset(pud
, phys
);
394 BUG_ON(!pmd_none(*pmd
));
395 set_pmd(pmd
, __pmd(phys
| pgprot_val(prot
)));
399 void __init
init_extra_mapping_wb(unsigned long phys
, unsigned long size
)
401 __init_extra_mapping(phys
, size
, _PAGE_CACHE_MODE_WB
);
404 void __init
init_extra_mapping_uc(unsigned long phys
, unsigned long size
)
406 __init_extra_mapping(phys
, size
, _PAGE_CACHE_MODE_UC
);
410 * The head.S code sets up the kernel high mapping:
412 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
414 * phys_base holds the negative offset to the kernel, which is added
415 * to the compile time generated pmds. This results in invalid pmds up
416 * to the point where we hit the physaddr 0 mapping.
418 * We limit the mappings to the region from _text to _brk_end. _brk_end
419 * is rounded up to the 2MB boundary. This catches the invalid pmds as
420 * well, as they are located before _text:
422 void __init
cleanup_highmap(void)
424 unsigned long vaddr
= __START_KERNEL_map
;
425 unsigned long vaddr_end
= __START_KERNEL_map
+ KERNEL_IMAGE_SIZE
;
426 unsigned long end
= roundup((unsigned long)_brk_end
, PMD_SIZE
) - 1;
427 pmd_t
*pmd
= level2_kernel_pgt
;
430 * Native path, max_pfn_mapped is not set yet.
431 * Xen has valid max_pfn_mapped set in
432 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
435 vaddr_end
= __START_KERNEL_map
+ (max_pfn_mapped
<< PAGE_SHIFT
);
437 for (; vaddr
+ PMD_SIZE
- 1 < vaddr_end
; pmd
++, vaddr
+= PMD_SIZE
) {
440 if (vaddr
< (unsigned long) _text
|| vaddr
> end
)
441 set_pmd(pmd
, __pmd(0));
446 * Create PTE level page table mapping for physical addresses.
447 * It returns the last physical address mapped.
449 static unsigned long __meminit
450 phys_pte_init(pte_t
*pte_page
, unsigned long paddr
, unsigned long paddr_end
,
451 pgprot_t prot
, bool init
)
453 unsigned long pages
= 0, paddr_next
;
454 unsigned long paddr_last
= paddr_end
;
458 pte
= pte_page
+ pte_index(paddr
);
459 i
= pte_index(paddr
);
461 for (; i
< PTRS_PER_PTE
; i
++, paddr
= paddr_next
, pte
++) {
462 paddr_next
= (paddr
& PAGE_MASK
) + PAGE_SIZE
;
463 if (paddr
>= paddr_end
) {
464 if (!after_bootmem
&&
465 !e820__mapped_any(paddr
& PAGE_MASK
, paddr_next
,
467 !e820__mapped_any(paddr
& PAGE_MASK
, paddr_next
,
468 E820_TYPE_RESERVED_KERN
))
469 set_pte_init(pte
, __pte(0), init
);
474 * We will re-use the existing mapping.
475 * Xen for example has some special requirements, like mapping
476 * pagetable pages as RO. So assume someone who pre-setup
477 * these mappings are more intelligent.
479 if (!pte_none(*pte
)) {
486 pr_info(" pte=%p addr=%lx pte=%016lx\n", pte
, paddr
,
487 pfn_pte(paddr
>> PAGE_SHIFT
, PAGE_KERNEL
).pte
);
489 set_pte_init(pte
, pfn_pte(paddr
>> PAGE_SHIFT
, prot
), init
);
490 paddr_last
= (paddr
& PAGE_MASK
) + PAGE_SIZE
;
493 update_page_count(PG_LEVEL_4K
, pages
);
499 * Create PMD level page table mapping for physical addresses. The virtual
500 * and physical address have to be aligned at this level.
501 * It returns the last physical address mapped.
503 static unsigned long __meminit
504 phys_pmd_init(pmd_t
*pmd_page
, unsigned long paddr
, unsigned long paddr_end
,
505 unsigned long page_size_mask
, pgprot_t prot
, bool init
)
507 unsigned long pages
= 0, paddr_next
;
508 unsigned long paddr_last
= paddr_end
;
510 int i
= pmd_index(paddr
);
512 for (; i
< PTRS_PER_PMD
; i
++, paddr
= paddr_next
) {
513 pmd_t
*pmd
= pmd_page
+ pmd_index(paddr
);
515 pgprot_t new_prot
= prot
;
517 paddr_next
= (paddr
& PMD_MASK
) + PMD_SIZE
;
518 if (paddr
>= paddr_end
) {
519 if (!after_bootmem
&&
520 !e820__mapped_any(paddr
& PMD_MASK
, paddr_next
,
522 !e820__mapped_any(paddr
& PMD_MASK
, paddr_next
,
523 E820_TYPE_RESERVED_KERN
))
524 set_pmd_init(pmd
, __pmd(0), init
);
528 if (!pmd_none(*pmd
)) {
529 if (!pmd_large(*pmd
)) {
530 spin_lock(&init_mm
.page_table_lock
);
531 pte
= (pte_t
*)pmd_page_vaddr(*pmd
);
532 paddr_last
= phys_pte_init(pte
, paddr
,
535 spin_unlock(&init_mm
.page_table_lock
);
539 * If we are ok with PG_LEVEL_2M mapping, then we will
540 * use the existing mapping,
542 * Otherwise, we will split the large page mapping but
543 * use the same existing protection bits except for
544 * large page, so that we don't violate Intel's TLB
545 * Application note (317080) which says, while changing
546 * the page sizes, new and old translations should
547 * not differ with respect to page frame and
550 if (page_size_mask
& (1 << PG_LEVEL_2M
)) {
553 paddr_last
= paddr_next
;
556 new_prot
= pte_pgprot(pte_clrhuge(*(pte_t
*)pmd
));
559 if (page_size_mask
& (1<<PG_LEVEL_2M
)) {
561 spin_lock(&init_mm
.page_table_lock
);
562 set_pte_init((pte_t
*)pmd
,
563 pfn_pte((paddr
& PMD_MASK
) >> PAGE_SHIFT
,
564 __pgprot(pgprot_val(prot
) | _PAGE_PSE
)),
566 spin_unlock(&init_mm
.page_table_lock
);
567 paddr_last
= paddr_next
;
571 pte
= alloc_low_page();
572 paddr_last
= phys_pte_init(pte
, paddr
, paddr_end
, new_prot
, init
);
574 spin_lock(&init_mm
.page_table_lock
);
575 pmd_populate_kernel_init(&init_mm
, pmd
, pte
, init
);
576 spin_unlock(&init_mm
.page_table_lock
);
578 update_page_count(PG_LEVEL_2M
, pages
);
583 * Create PUD level page table mapping for physical addresses. The virtual
584 * and physical address do not have to be aligned at this level. KASLR can
585 * randomize virtual addresses up to this level.
586 * It returns the last physical address mapped.
588 static unsigned long __meminit
589 phys_pud_init(pud_t
*pud_page
, unsigned long paddr
, unsigned long paddr_end
,
590 unsigned long page_size_mask
, pgprot_t _prot
, bool init
)
592 unsigned long pages
= 0, paddr_next
;
593 unsigned long paddr_last
= paddr_end
;
594 unsigned long vaddr
= (unsigned long)__va(paddr
);
595 int i
= pud_index(vaddr
);
597 for (; i
< PTRS_PER_PUD
; i
++, paddr
= paddr_next
) {
600 pgprot_t prot
= _prot
;
602 vaddr
= (unsigned long)__va(paddr
);
603 pud
= pud_page
+ pud_index(vaddr
);
604 paddr_next
= (paddr
& PUD_MASK
) + PUD_SIZE
;
606 if (paddr
>= paddr_end
) {
607 if (!after_bootmem
&&
608 !e820__mapped_any(paddr
& PUD_MASK
, paddr_next
,
610 !e820__mapped_any(paddr
& PUD_MASK
, paddr_next
,
611 E820_TYPE_RESERVED_KERN
))
612 set_pud_init(pud
, __pud(0), init
);
616 if (!pud_none(*pud
)) {
617 if (!pud_large(*pud
)) {
618 pmd
= pmd_offset(pud
, 0);
619 paddr_last
= phys_pmd_init(pmd
, paddr
,
626 * If we are ok with PG_LEVEL_1G mapping, then we will
627 * use the existing mapping.
629 * Otherwise, we will split the gbpage mapping but use
630 * the same existing protection bits except for large
631 * page, so that we don't violate Intel's TLB
632 * Application note (317080) which says, while changing
633 * the page sizes, new and old translations should
634 * not differ with respect to page frame and
637 if (page_size_mask
& (1 << PG_LEVEL_1G
)) {
640 paddr_last
= paddr_next
;
643 prot
= pte_pgprot(pte_clrhuge(*(pte_t
*)pud
));
646 if (page_size_mask
& (1<<PG_LEVEL_1G
)) {
648 spin_lock(&init_mm
.page_table_lock
);
650 prot
= __pgprot(pgprot_val(prot
) | __PAGE_KERNEL_LARGE
);
652 set_pte_init((pte_t
*)pud
,
653 pfn_pte((paddr
& PUD_MASK
) >> PAGE_SHIFT
,
656 spin_unlock(&init_mm
.page_table_lock
);
657 paddr_last
= paddr_next
;
661 pmd
= alloc_low_page();
662 paddr_last
= phys_pmd_init(pmd
, paddr
, paddr_end
,
663 page_size_mask
, prot
, init
);
665 spin_lock(&init_mm
.page_table_lock
);
666 pud_populate_init(&init_mm
, pud
, pmd
, init
);
667 spin_unlock(&init_mm
.page_table_lock
);
670 update_page_count(PG_LEVEL_1G
, pages
);
675 static unsigned long __meminit
676 phys_p4d_init(p4d_t
*p4d_page
, unsigned long paddr
, unsigned long paddr_end
,
677 unsigned long page_size_mask
, pgprot_t prot
, bool init
)
679 unsigned long vaddr
, vaddr_end
, vaddr_next
, paddr_next
, paddr_last
;
681 paddr_last
= paddr_end
;
682 vaddr
= (unsigned long)__va(paddr
);
683 vaddr_end
= (unsigned long)__va(paddr_end
);
685 if (!pgtable_l5_enabled())
686 return phys_pud_init((pud_t
*) p4d_page
, paddr
, paddr_end
,
687 page_size_mask
, prot
, init
);
689 for (; vaddr
< vaddr_end
; vaddr
= vaddr_next
) {
690 p4d_t
*p4d
= p4d_page
+ p4d_index(vaddr
);
693 vaddr_next
= (vaddr
& P4D_MASK
) + P4D_SIZE
;
696 if (paddr
>= paddr_end
) {
697 paddr_next
= __pa(vaddr_next
);
698 if (!after_bootmem
&&
699 !e820__mapped_any(paddr
& P4D_MASK
, paddr_next
,
701 !e820__mapped_any(paddr
& P4D_MASK
, paddr_next
,
702 E820_TYPE_RESERVED_KERN
))
703 set_p4d_init(p4d
, __p4d(0), init
);
707 if (!p4d_none(*p4d
)) {
708 pud
= pud_offset(p4d
, 0);
709 paddr_last
= phys_pud_init(pud
, paddr
, __pa(vaddr_end
),
710 page_size_mask
, prot
, init
);
714 pud
= alloc_low_page();
715 paddr_last
= phys_pud_init(pud
, paddr
, __pa(vaddr_end
),
716 page_size_mask
, prot
, init
);
718 spin_lock(&init_mm
.page_table_lock
);
719 p4d_populate_init(&init_mm
, p4d
, pud
, init
);
720 spin_unlock(&init_mm
.page_table_lock
);
726 static unsigned long __meminit
727 __kernel_physical_mapping_init(unsigned long paddr_start
,
728 unsigned long paddr_end
,
729 unsigned long page_size_mask
,
730 pgprot_t prot
, bool init
)
732 bool pgd_changed
= false;
733 unsigned long vaddr
, vaddr_start
, vaddr_end
, vaddr_next
, paddr_last
;
735 paddr_last
= paddr_end
;
736 vaddr
= (unsigned long)__va(paddr_start
);
737 vaddr_end
= (unsigned long)__va(paddr_end
);
740 for (; vaddr
< vaddr_end
; vaddr
= vaddr_next
) {
741 pgd_t
*pgd
= pgd_offset_k(vaddr
);
744 vaddr_next
= (vaddr
& PGDIR_MASK
) + PGDIR_SIZE
;
747 p4d
= (p4d_t
*)pgd_page_vaddr(*pgd
);
748 paddr_last
= phys_p4d_init(p4d
, __pa(vaddr
),
755 p4d
= alloc_low_page();
756 paddr_last
= phys_p4d_init(p4d
, __pa(vaddr
), __pa(vaddr_end
),
757 page_size_mask
, prot
, init
);
759 spin_lock(&init_mm
.page_table_lock
);
760 if (pgtable_l5_enabled())
761 pgd_populate_init(&init_mm
, pgd
, p4d
, init
);
763 p4d_populate_init(&init_mm
, p4d_offset(pgd
, vaddr
),
764 (pud_t
*) p4d
, init
);
766 spin_unlock(&init_mm
.page_table_lock
);
771 sync_global_pgds(vaddr_start
, vaddr_end
- 1);
778 * Create page table mapping for the physical memory for specific physical
779 * addresses. Note that it can only be used to populate non-present entries.
780 * The virtual and physical addresses have to be aligned on PMD level
781 * down. It returns the last physical address mapped.
783 unsigned long __meminit
784 kernel_physical_mapping_init(unsigned long paddr_start
,
785 unsigned long paddr_end
,
786 unsigned long page_size_mask
, pgprot_t prot
)
788 return __kernel_physical_mapping_init(paddr_start
, paddr_end
,
789 page_size_mask
, prot
, true);
793 * This function is similar to kernel_physical_mapping_init() above with the
794 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
795 * when updating the mapping. The caller is responsible to flush the TLBs after
796 * the function returns.
798 unsigned long __meminit
799 kernel_physical_mapping_change(unsigned long paddr_start
,
800 unsigned long paddr_end
,
801 unsigned long page_size_mask
)
803 return __kernel_physical_mapping_init(paddr_start
, paddr_end
,
804 page_size_mask
, PAGE_KERNEL
,
809 void __init
initmem_init(void)
811 memblock_set_node(0, PHYS_ADDR_MAX
, &memblock
.memory
, 0);
815 void __init
paging_init(void)
820 * clear the default setting with node 0
821 * note: don't use nodes_clear here, that is really clearing when
822 * numa support is not compiled in, and later node_set_state
823 * will not set it back.
825 node_clear_state(0, N_MEMORY
);
826 node_clear_state(0, N_NORMAL_MEMORY
);
831 #ifdef CONFIG_SPARSEMEM_VMEMMAP
832 #define PAGE_UNUSED 0xFD
835 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges
836 * from unused_pmd_start to next PMD_SIZE boundary.
838 static unsigned long unused_pmd_start __meminitdata
;
840 static void __meminit
vmemmap_flush_unused_pmd(void)
842 if (!unused_pmd_start
)
845 * Clears (unused_pmd_start, PMD_END]
847 memset((void *)unused_pmd_start
, PAGE_UNUSED
,
848 ALIGN(unused_pmd_start
, PMD_SIZE
) - unused_pmd_start
);
849 unused_pmd_start
= 0;
852 #ifdef CONFIG_MEMORY_HOTPLUG
853 /* Returns true if the PMD is completely unused and thus it can be freed */
854 static bool __meminit
vmemmap_pmd_is_unused(unsigned long addr
, unsigned long end
)
856 unsigned long start
= ALIGN_DOWN(addr
, PMD_SIZE
);
859 * Flush the unused range cache to ensure that memchr_inv() will work
860 * for the whole range.
862 vmemmap_flush_unused_pmd();
863 memset((void *)addr
, PAGE_UNUSED
, end
- addr
);
865 return !memchr_inv((void *)start
, PAGE_UNUSED
, PMD_SIZE
);
869 static void __meminit
__vmemmap_use_sub_pmd(unsigned long start
)
872 * As we expect to add in the same granularity as we remove, it's
873 * sufficient to mark only some piece used to block the memmap page from
874 * getting removed when removing some other adjacent memmap (just in
875 * case the first memmap never gets initialized e.g., because the memory
876 * block never gets onlined).
878 memset((void *)start
, 0, sizeof(struct page
));
881 static void __meminit
vmemmap_use_sub_pmd(unsigned long start
, unsigned long end
)
884 * We only optimize if the new used range directly follows the
885 * previously unused range (esp., when populating consecutive sections).
887 if (unused_pmd_start
== start
) {
888 if (likely(IS_ALIGNED(end
, PMD_SIZE
)))
889 unused_pmd_start
= 0;
891 unused_pmd_start
= end
;
896 * If the range does not contiguously follows previous one, make sure
897 * to mark the unused range of the previous one so it can be removed.
899 vmemmap_flush_unused_pmd();
900 __vmemmap_use_sub_pmd(start
);
904 static void __meminit
vmemmap_use_new_sub_pmd(unsigned long start
, unsigned long end
)
906 vmemmap_flush_unused_pmd();
909 * Could be our memmap page is filled with PAGE_UNUSED already from a
910 * previous remove. Make sure to reset it.
912 __vmemmap_use_sub_pmd(start
);
915 * Mark with PAGE_UNUSED the unused parts of the new memmap range
917 if (!IS_ALIGNED(start
, PMD_SIZE
))
918 memset((void *)start
, PAGE_UNUSED
,
919 start
- ALIGN_DOWN(start
, PMD_SIZE
));
922 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
923 * consecutive sections. Remember for the last added PMD where the
924 * unused range begins.
926 if (!IS_ALIGNED(end
, PMD_SIZE
))
927 unused_pmd_start
= end
;
932 * Memory hotplug specific functions
934 #ifdef CONFIG_MEMORY_HOTPLUG
936 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
939 static void update_end_of_memory_vars(u64 start
, u64 size
)
941 unsigned long end_pfn
= PFN_UP(start
+ size
);
943 if (end_pfn
> max_pfn
) {
945 max_low_pfn
= end_pfn
;
946 high_memory
= (void *)__va(max_pfn
* PAGE_SIZE
- 1) + 1;
950 int add_pages(int nid
, unsigned long start_pfn
, unsigned long nr_pages
,
951 struct mhp_params
*params
)
955 ret
= __add_pages(nid
, start_pfn
, nr_pages
, params
);
958 /* update max_pfn, max_low_pfn and high_memory */
959 update_end_of_memory_vars(start_pfn
<< PAGE_SHIFT
,
960 nr_pages
<< PAGE_SHIFT
);
965 int arch_add_memory(int nid
, u64 start
, u64 size
,
966 struct mhp_params
*params
)
968 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
969 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
971 init_memory_mapping(start
, start
+ size
, params
->pgprot
);
973 return add_pages(nid
, start_pfn
, nr_pages
, params
);
976 static void __meminit
free_pagetable(struct page
*page
, int order
)
979 unsigned int nr_pages
= 1 << order
;
981 /* bootmem page has reserved flag */
982 if (PageReserved(page
)) {
983 __ClearPageReserved(page
);
985 magic
= (unsigned long)page
->freelist
;
986 if (magic
== SECTION_INFO
|| magic
== MIX_SECTION_INFO
) {
988 put_page_bootmem(page
++);
991 free_reserved_page(page
++);
993 free_pages((unsigned long)page_address(page
), order
);
996 static void __meminit
free_hugepage_table(struct page
*page
,
997 struct vmem_altmap
*altmap
)
1000 vmem_altmap_free(altmap
, PMD_SIZE
/ PAGE_SIZE
);
1002 free_pagetable(page
, get_order(PMD_SIZE
));
1005 static void __meminit
free_pte_table(pte_t
*pte_start
, pmd_t
*pmd
)
1010 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
1011 pte
= pte_start
+ i
;
1012 if (!pte_none(*pte
))
1016 /* free a pte talbe */
1017 free_pagetable(pmd_page(*pmd
), 0);
1018 spin_lock(&init_mm
.page_table_lock
);
1020 spin_unlock(&init_mm
.page_table_lock
);
1023 static void __meminit
free_pmd_table(pmd_t
*pmd_start
, pud_t
*pud
)
1028 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
1029 pmd
= pmd_start
+ i
;
1030 if (!pmd_none(*pmd
))
1034 /* free a pmd talbe */
1035 free_pagetable(pud_page(*pud
), 0);
1036 spin_lock(&init_mm
.page_table_lock
);
1038 spin_unlock(&init_mm
.page_table_lock
);
1041 static void __meminit
free_pud_table(pud_t
*pud_start
, p4d_t
*p4d
)
1046 for (i
= 0; i
< PTRS_PER_PUD
; i
++) {
1047 pud
= pud_start
+ i
;
1048 if (!pud_none(*pud
))
1052 /* free a pud talbe */
1053 free_pagetable(p4d_page(*p4d
), 0);
1054 spin_lock(&init_mm
.page_table_lock
);
1056 spin_unlock(&init_mm
.page_table_lock
);
1059 static void __meminit
1060 remove_pte_table(pte_t
*pte_start
, unsigned long addr
, unsigned long end
,
1063 unsigned long next
, pages
= 0;
1065 phys_addr_t phys_addr
;
1067 pte
= pte_start
+ pte_index(addr
);
1068 for (; addr
< end
; addr
= next
, pte
++) {
1069 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1073 if (!pte_present(*pte
))
1077 * We mapped [0,1G) memory as identity mapping when
1078 * initializing, in arch/x86/kernel/head_64.S. These
1079 * pagetables cannot be removed.
1081 phys_addr
= pte_val(*pte
) + (addr
& PAGE_MASK
);
1082 if (phys_addr
< (phys_addr_t
)0x40000000)
1086 free_pagetable(pte_page(*pte
), 0);
1088 spin_lock(&init_mm
.page_table_lock
);
1089 pte_clear(&init_mm
, addr
, pte
);
1090 spin_unlock(&init_mm
.page_table_lock
);
1092 /* For non-direct mapping, pages means nothing. */
1096 /* Call free_pte_table() in remove_pmd_table(). */
1099 update_page_count(PG_LEVEL_4K
, -pages
);
1102 static void __meminit
1103 remove_pmd_table(pmd_t
*pmd_start
, unsigned long addr
, unsigned long end
,
1104 bool direct
, struct vmem_altmap
*altmap
)
1106 unsigned long next
, pages
= 0;
1110 pmd
= pmd_start
+ pmd_index(addr
);
1111 for (; addr
< end
; addr
= next
, pmd
++) {
1112 next
= pmd_addr_end(addr
, end
);
1114 if (!pmd_present(*pmd
))
1117 if (pmd_large(*pmd
)) {
1118 if (IS_ALIGNED(addr
, PMD_SIZE
) &&
1119 IS_ALIGNED(next
, PMD_SIZE
)) {
1121 free_hugepage_table(pmd_page(*pmd
),
1124 spin_lock(&init_mm
.page_table_lock
);
1126 spin_unlock(&init_mm
.page_table_lock
);
1129 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1130 else if (vmemmap_pmd_is_unused(addr
, next
)) {
1131 free_hugepage_table(pmd_page(*pmd
),
1133 spin_lock(&init_mm
.page_table_lock
);
1135 spin_unlock(&init_mm
.page_table_lock
);
1141 pte_base
= (pte_t
*)pmd_page_vaddr(*pmd
);
1142 remove_pte_table(pte_base
, addr
, next
, direct
);
1143 free_pte_table(pte_base
, pmd
);
1146 /* Call free_pmd_table() in remove_pud_table(). */
1148 update_page_count(PG_LEVEL_2M
, -pages
);
1151 static void __meminit
1152 remove_pud_table(pud_t
*pud_start
, unsigned long addr
, unsigned long end
,
1153 struct vmem_altmap
*altmap
, bool direct
)
1155 unsigned long next
, pages
= 0;
1159 pud
= pud_start
+ pud_index(addr
);
1160 for (; addr
< end
; addr
= next
, pud
++) {
1161 next
= pud_addr_end(addr
, end
);
1163 if (!pud_present(*pud
))
1166 if (pud_large(*pud
) &&
1167 IS_ALIGNED(addr
, PUD_SIZE
) &&
1168 IS_ALIGNED(next
, PUD_SIZE
)) {
1169 spin_lock(&init_mm
.page_table_lock
);
1171 spin_unlock(&init_mm
.page_table_lock
);
1176 pmd_base
= pmd_offset(pud
, 0);
1177 remove_pmd_table(pmd_base
, addr
, next
, direct
, altmap
);
1178 free_pmd_table(pmd_base
, pud
);
1182 update_page_count(PG_LEVEL_1G
, -pages
);
1185 static void __meminit
1186 remove_p4d_table(p4d_t
*p4d_start
, unsigned long addr
, unsigned long end
,
1187 struct vmem_altmap
*altmap
, bool direct
)
1189 unsigned long next
, pages
= 0;
1193 p4d
= p4d_start
+ p4d_index(addr
);
1194 for (; addr
< end
; addr
= next
, p4d
++) {
1195 next
= p4d_addr_end(addr
, end
);
1197 if (!p4d_present(*p4d
))
1200 BUILD_BUG_ON(p4d_large(*p4d
));
1202 pud_base
= pud_offset(p4d
, 0);
1203 remove_pud_table(pud_base
, addr
, next
, altmap
, direct
);
1205 * For 4-level page tables we do not want to free PUDs, but in the
1206 * 5-level case we should free them. This code will have to change
1207 * to adapt for boot-time switching between 4 and 5 level page tables.
1209 if (pgtable_l5_enabled())
1210 free_pud_table(pud_base
, p4d
);
1214 update_page_count(PG_LEVEL_512G
, -pages
);
1217 /* start and end are both virtual address. */
1218 static void __meminit
1219 remove_pagetable(unsigned long start
, unsigned long end
, bool direct
,
1220 struct vmem_altmap
*altmap
)
1227 for (addr
= start
; addr
< end
; addr
= next
) {
1228 next
= pgd_addr_end(addr
, end
);
1230 pgd
= pgd_offset_k(addr
);
1231 if (!pgd_present(*pgd
))
1234 p4d
= p4d_offset(pgd
, 0);
1235 remove_p4d_table(p4d
, addr
, next
, altmap
, direct
);
1241 void __ref
vmemmap_free(unsigned long start
, unsigned long end
,
1242 struct vmem_altmap
*altmap
)
1244 VM_BUG_ON(!IS_ALIGNED(start
, PAGE_SIZE
));
1245 VM_BUG_ON(!IS_ALIGNED(end
, PAGE_SIZE
));
1247 remove_pagetable(start
, end
, false, altmap
);
1250 static void __meminit
1251 kernel_physical_mapping_remove(unsigned long start
, unsigned long end
)
1253 start
= (unsigned long)__va(start
);
1254 end
= (unsigned long)__va(end
);
1256 remove_pagetable(start
, end
, true, NULL
);
1259 void __ref
arch_remove_memory(int nid
, u64 start
, u64 size
,
1260 struct vmem_altmap
*altmap
)
1262 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
1263 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
1265 __remove_pages(start_pfn
, nr_pages
, altmap
);
1266 kernel_physical_mapping_remove(start
, start
+ size
);
1268 #endif /* CONFIG_MEMORY_HOTPLUG */
1270 static struct kcore_list kcore_vsyscall
;
1272 static void __init
register_page_bootmem_info(void)
1274 #if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP)
1277 for_each_online_node(i
)
1278 register_page_bootmem_info_node(NODE_DATA(i
));
1283 * Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
1284 * Only the level which needs to be synchronized between all page-tables is
1285 * allocated because the synchronization can be expensive.
1287 static void __init
preallocate_vmalloc_pages(void)
1292 for (addr
= VMALLOC_START
; addr
<= VMALLOC_END
; addr
= ALIGN(addr
+ 1, PGDIR_SIZE
)) {
1293 pgd_t
*pgd
= pgd_offset_k(addr
);
1298 p4d
= p4d_alloc(&init_mm
, pgd
, addr
);
1302 if (pgtable_l5_enabled())
1306 * The goal here is to allocate all possibly required
1307 * hardware page tables pointed to by the top hardware
1310 * On 4-level systems, the P4D layer is folded away and
1311 * the above code does no preallocation. Below, go down
1312 * to the pud _software_ level to ensure the second
1313 * hardware level is allocated on 4-level systems too.
1316 pud
= pud_alloc(&init_mm
, p4d
, addr
);
1326 * The pages have to be there now or they will be missing in
1327 * process page-tables later.
1329 panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl
);
1332 void __init
mem_init(void)
1336 /* clear_bss() already clear the empty_zero_page */
1338 /* this will put all memory onto the freelists */
1339 memblock_free_all();
1341 x86_init
.hyper
.init_after_bootmem();
1344 * Must be done after boot memory is put on freelist, because here we
1345 * might set fields in deferred struct pages that have not yet been
1346 * initialized, and memblock_free_all() initializes all the reserved
1347 * deferred pages for us.
1349 register_page_bootmem_info();
1351 /* Register memory areas for /proc/kcore */
1352 if (get_gate_vma(&init_mm
))
1353 kclist_add(&kcore_vsyscall
, (void *)VSYSCALL_ADDR
, PAGE_SIZE
, KCORE_USER
);
1355 preallocate_vmalloc_pages();
1358 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1359 int __init
deferred_page_init_max_threads(const struct cpumask
*node_cpumask
)
1362 * More CPUs always led to greater speedups on tested systems, up to
1363 * all the nodes' CPUs. Use all since the system is otherwise idle
1366 return max_t(int, cpumask_weight(node_cpumask
), 1);
1370 int kernel_set_to_readonly
;
1372 void mark_rodata_ro(void)
1374 unsigned long start
= PFN_ALIGN(_text
);
1375 unsigned long rodata_start
= PFN_ALIGN(__start_rodata
);
1376 unsigned long end
= (unsigned long)__end_rodata_hpage_align
;
1377 unsigned long text_end
= PFN_ALIGN(_etext
);
1378 unsigned long rodata_end
= PFN_ALIGN(__end_rodata
);
1379 unsigned long all_end
;
1381 printk(KERN_INFO
"Write protecting the kernel read-only data: %luk\n",
1382 (end
- start
) >> 10);
1383 set_memory_ro(start
, (end
- start
) >> PAGE_SHIFT
);
1385 kernel_set_to_readonly
= 1;
1388 * The rodata/data/bss/brk section (but not the kernel text!)
1389 * should also be not-executable.
1391 * We align all_end to PMD_SIZE because the existing mapping
1392 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1393 * split the PMD and the reminder between _brk_end and the end
1394 * of the PMD will remain mapped executable.
1396 * Any PMD which was setup after the one which covers _brk_end
1397 * has been zapped already via cleanup_highmem().
1399 all_end
= roundup((unsigned long)_brk_end
, PMD_SIZE
);
1400 set_memory_nx(text_end
, (all_end
- text_end
) >> PAGE_SHIFT
);
1402 set_ftrace_ops_ro();
1404 #ifdef CONFIG_CPA_DEBUG
1405 printk(KERN_INFO
"Testing CPA: undo %lx-%lx\n", start
, end
);
1406 set_memory_rw(start
, (end
-start
) >> PAGE_SHIFT
);
1408 printk(KERN_INFO
"Testing CPA: again\n");
1409 set_memory_ro(start
, (end
-start
) >> PAGE_SHIFT
);
1412 free_kernel_image_pages("unused kernel image (text/rodata gap)",
1413 (void *)text_end
, (void *)rodata_start
);
1414 free_kernel_image_pages("unused kernel image (rodata/data gap)",
1415 (void *)rodata_end
, (void *)_sdata
);
1420 int kern_addr_valid(unsigned long addr
)
1422 unsigned long above
= ((long)addr
) >> __VIRTUAL_MASK_SHIFT
;
1429 if (above
!= 0 && above
!= -1UL)
1432 pgd
= pgd_offset_k(addr
);
1436 p4d
= p4d_offset(pgd
, addr
);
1440 pud
= pud_offset(p4d
, addr
);
1444 if (pud_large(*pud
))
1445 return pfn_valid(pud_pfn(*pud
));
1447 pmd
= pmd_offset(pud
, addr
);
1451 if (pmd_large(*pmd
))
1452 return pfn_valid(pmd_pfn(*pmd
));
1454 pte
= pte_offset_kernel(pmd
, addr
);
1458 return pfn_valid(pte_pfn(*pte
));
1462 * Block size is the minimum amount of memory which can be hotplugged or
1463 * hotremoved. It must be power of two and must be equal or larger than
1464 * MIN_MEMORY_BLOCK_SIZE.
1466 #define MAX_BLOCK_SIZE (2UL << 30)
1468 /* Amount of ram needed to start using large blocks */
1469 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
1471 /* Adjustable memory block size */
1472 static unsigned long set_memory_block_size
;
1473 int __init
set_memory_block_size_order(unsigned int order
)
1475 unsigned long size
= 1UL << order
;
1477 if (size
> MEM_SIZE_FOR_LARGE_BLOCK
|| size
< MIN_MEMORY_BLOCK_SIZE
)
1480 set_memory_block_size
= size
;
1484 static unsigned long probe_memory_block_size(void)
1486 unsigned long boot_mem_end
= max_pfn
<< PAGE_SHIFT
;
1489 /* If memory block size has been set, then use it */
1490 bz
= set_memory_block_size
;
1494 /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
1495 if (boot_mem_end
< MEM_SIZE_FOR_LARGE_BLOCK
) {
1496 bz
= MIN_MEMORY_BLOCK_SIZE
;
1501 * Use max block size to minimize overhead on bare metal, where
1502 * alignment for memory hotplug isn't a concern.
1504 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
1505 bz
= MAX_BLOCK_SIZE
;
1509 /* Find the largest allowed block size that aligns to memory end */
1510 for (bz
= MAX_BLOCK_SIZE
; bz
> MIN_MEMORY_BLOCK_SIZE
; bz
>>= 1) {
1511 if (IS_ALIGNED(boot_mem_end
, bz
))
1515 pr_info("x86/mm: Memory block size: %ldMB\n", bz
>> 20);
1520 static unsigned long memory_block_size_probed
;
1521 unsigned long memory_block_size_bytes(void)
1523 if (!memory_block_size_probed
)
1524 memory_block_size_probed
= probe_memory_block_size();
1526 return memory_block_size_probed
;
1529 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1531 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1533 static long __meminitdata addr_start
, addr_end
;
1534 static void __meminitdata
*p_start
, *p_end
;
1535 static int __meminitdata node_start
;
1537 static int __meminit
vmemmap_populate_hugepages(unsigned long start
,
1538 unsigned long end
, int node
, struct vmem_altmap
*altmap
)
1547 for (addr
= start
; addr
< end
; addr
= next
) {
1548 next
= pmd_addr_end(addr
, end
);
1550 pgd
= vmemmap_pgd_populate(addr
, node
);
1554 p4d
= vmemmap_p4d_populate(pgd
, addr
, node
);
1558 pud
= vmemmap_pud_populate(p4d
, addr
, node
);
1562 pmd
= pmd_offset(pud
, addr
);
1563 if (pmd_none(*pmd
)) {
1566 p
= vmemmap_alloc_block_buf(PMD_SIZE
, node
, altmap
);
1570 entry
= pfn_pte(__pa(p
) >> PAGE_SHIFT
,
1572 set_pmd(pmd
, __pmd(pte_val(entry
)));
1574 /* check to see if we have contiguous blocks */
1575 if (p_end
!= p
|| node_start
!= node
) {
1577 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1578 addr_start
, addr_end
-1, p_start
, p_end
-1, node_start
);
1584 addr_end
= addr
+ PMD_SIZE
;
1585 p_end
= p
+ PMD_SIZE
;
1587 if (!IS_ALIGNED(addr
, PMD_SIZE
) ||
1588 !IS_ALIGNED(next
, PMD_SIZE
))
1589 vmemmap_use_new_sub_pmd(addr
, next
);
1593 return -ENOMEM
; /* no fallback */
1594 } else if (pmd_large(*pmd
)) {
1595 vmemmap_verify((pte_t
*)pmd
, node
, addr
, next
);
1596 vmemmap_use_sub_pmd(addr
, next
);
1599 if (vmemmap_populate_basepages(addr
, next
, node
, NULL
))
1605 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
,
1606 struct vmem_altmap
*altmap
)
1610 VM_BUG_ON(!IS_ALIGNED(start
, PAGE_SIZE
));
1611 VM_BUG_ON(!IS_ALIGNED(end
, PAGE_SIZE
));
1613 if ((is_hugetlb_free_vmemmap_enabled() && !altmap
) ||
1614 end
- start
< PAGES_PER_SECTION
* sizeof(struct page
))
1615 err
= vmemmap_populate_basepages(start
, end
, node
, NULL
);
1616 else if (boot_cpu_has(X86_FEATURE_PSE
))
1617 err
= vmemmap_populate_hugepages(start
, end
, node
, altmap
);
1619 pr_err_once("%s: no cpu support for altmap allocations\n",
1623 err
= vmemmap_populate_basepages(start
, end
, node
, NULL
);
1625 sync_global_pgds(start
, end
- 1);
1629 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
1630 void register_page_bootmem_memmap(unsigned long section_nr
,
1631 struct page
*start_page
, unsigned long nr_pages
)
1633 unsigned long addr
= (unsigned long)start_page
;
1634 unsigned long end
= (unsigned long)(start_page
+ nr_pages
);
1640 unsigned int nr_pmd_pages
;
1642 bool base_mapping
= !boot_cpu_has(X86_FEATURE_PSE
) ||
1643 is_hugetlb_free_vmemmap_enabled();
1645 for (; addr
< end
; addr
= next
) {
1648 pgd
= pgd_offset_k(addr
);
1649 if (pgd_none(*pgd
)) {
1650 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1653 get_page_bootmem(section_nr
, pgd_page(*pgd
), MIX_SECTION_INFO
);
1655 p4d
= p4d_offset(pgd
, addr
);
1656 if (p4d_none(*p4d
)) {
1657 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1660 get_page_bootmem(section_nr
, p4d_page(*p4d
), MIX_SECTION_INFO
);
1662 pud
= pud_offset(p4d
, addr
);
1663 if (pud_none(*pud
)) {
1664 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1667 get_page_bootmem(section_nr
, pud_page(*pud
), MIX_SECTION_INFO
);
1670 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
1671 pmd
= pmd_offset(pud
, addr
);
1674 get_page_bootmem(section_nr
, pmd_page(*pmd
),
1677 pte
= pte_offset_kernel(pmd
, addr
);
1680 get_page_bootmem(section_nr
, pte_page(*pte
),
1683 next
= pmd_addr_end(addr
, end
);
1685 pmd
= pmd_offset(pud
, addr
);
1689 nr_pmd_pages
= 1 << get_order(PMD_SIZE
);
1690 page
= pmd_page(*pmd
);
1691 while (nr_pmd_pages
--)
1692 get_page_bootmem(section_nr
, page
++,
1699 void __meminit
vmemmap_populate_print_last(void)
1702 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1703 addr_start
, addr_end
-1, p_start
, p_end
-1, node_start
);