]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - mm/sparse-vmemmap.c
memblock: replace BOOTMEM_ALLOC_* with MEMBLOCK variants
[mirror_ubuntu-focal-kernel.git] / mm / sparse-vmemmap.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
8f6aac41
CL
2/*
3 * Virtual Memory Map support
4 *
cde53535 5 * (C) 2007 sgi. Christoph Lameter.
8f6aac41
CL
6 *
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
10 *
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
b595076a 13 * via TLBs. For those arches the virtual memory map is essentially
8f6aac41
CL
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
17 *
29c71111
AW
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
8f6aac41
CL
20 */
21#include <linux/mm.h>
22#include <linux/mmzone.h>
23#include <linux/bootmem.h>
97ad1087 24#include <linux/memblock.h>
4b94ffdc 25#include <linux/memremap.h>
8f6aac41 26#include <linux/highmem.h>
5a0e3ad6 27#include <linux/slab.h>
8f6aac41
CL
28#include <linux/spinlock.h>
29#include <linux/vmalloc.h>
8bca44bb 30#include <linux/sched.h>
8f6aac41
CL
31#include <asm/dma.h>
32#include <asm/pgalloc.h>
33#include <asm/pgtable.h>
34
35/*
36 * Allocate a block of memory to be used to back the virtual memory map
37 * or to back the page tables that are used to create the mapping.
38 * Uses the main allocators if they are available, else bootmem.
39 */
e0dc3a53 40
bd721ea7 41static void * __ref __earlyonly_bootmem_alloc(int node,
e0dc3a53
KH
42 unsigned long size,
43 unsigned long align,
44 unsigned long goal)
45{
eb31d559 46 return memblock_alloc_try_nid_raw(size, align, goal,
97ad1087 47 MEMBLOCK_ALLOC_ACCESSIBLE, node);
e0dc3a53
KH
48}
49
8f6aac41
CL
50void * __meminit vmemmap_alloc_block(unsigned long size, int node)
51{
52 /* If the main allocator is up use that, fallback to bootmem. */
53 if (slab_is_available()) {
fcdaf842
MH
54 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
55 int order = get_order(size);
56 static bool warned;
f52407ce
SL
57 struct page *page;
58
fcdaf842 59 page = alloc_pages_node(node, gfp_mask, order);
8f6aac41
CL
60 if (page)
61 return page_address(page);
fcdaf842
MH
62
63 if (!warned) {
64 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
65 "vmemmap alloc failure: order:%u", order);
66 warned = true;
67 }
8f6aac41
CL
68 return NULL;
69 } else
e0dc3a53 70 return __earlyonly_bootmem_alloc(node, size, size,
8f6aac41
CL
71 __pa(MAX_DMA_ADDRESS));
72}
73
9bdac914 74/* need to make sure size is all the same during early stage */
a8fc357b 75void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
9bdac914 76{
35fd1eb1 77 void *ptr = sparse_buffer_alloc(size);
9bdac914 78
35fd1eb1
PT
79 if (!ptr)
80 ptr = vmemmap_alloc_block(size, node);
9bdac914
YL
81 return ptr;
82}
83
4b94ffdc
DW
84static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
85{
86 return altmap->base_pfn + altmap->reserve + altmap->alloc
87 + altmap->align;
88}
89
90static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
91{
92 unsigned long allocated = altmap->alloc + altmap->align;
93
94 if (altmap->free > allocated)
95 return altmap->free - allocated;
96 return 0;
97}
98
99/**
eb804533
CH
100 * altmap_alloc_block_buf - allocate pages from the device page map
101 * @altmap: device page map
102 * @size: size (in bytes) of the allocation
4b94ffdc 103 *
eb804533 104 * Allocations are aligned to the size of the request.
4b94ffdc 105 */
a8fc357b 106void * __meminit altmap_alloc_block_buf(unsigned long size,
4b94ffdc
DW
107 struct vmem_altmap *altmap)
108{
eb804533 109 unsigned long pfn, nr_pfns, nr_align;
4b94ffdc
DW
110
111 if (size & ~PAGE_MASK) {
112 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
113 __func__, size);
114 return NULL;
115 }
116
eb804533 117 pfn = vmem_altmap_next_pfn(altmap);
4b94ffdc 118 nr_pfns = size >> PAGE_SHIFT;
eb804533
CH
119 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
120 nr_align = ALIGN(pfn, nr_align) - pfn;
121 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
122 return NULL;
123
124 altmap->alloc += nr_pfns;
125 altmap->align += nr_align;
126 pfn += nr_align;
127
4b94ffdc
DW
128 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
129 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
eb804533 130 return __va(__pfn_to_phys(pfn));
4b94ffdc
DW
131}
132
8f6aac41
CL
133void __meminit vmemmap_verify(pte_t *pte, int node,
134 unsigned long start, unsigned long end)
135{
136 unsigned long pfn = pte_pfn(*pte);
137 int actual_node = early_pfn_to_nid(pfn);
138
b41ad14c 139 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
1170532b
JP
140 pr_warn("[%lx-%lx] potential offnode page_structs\n",
141 start, end - 1);
8f6aac41
CL
142}
143
29c71111 144pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
8f6aac41 145{
29c71111
AW
146 pte_t *pte = pte_offset_kernel(pmd, addr);
147 if (pte_none(*pte)) {
148 pte_t entry;
a8fc357b 149 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
29c71111 150 if (!p)
9dce07f1 151 return NULL;
29c71111
AW
152 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
153 set_pte_at(&init_mm, addr, pte, entry);
154 }
155 return pte;
8f6aac41
CL
156}
157
f7f99100
PT
158static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
159{
160 void *p = vmemmap_alloc_block(size, node);
161
162 if (!p)
163 return NULL;
164 memset(p, 0, size);
165
166 return p;
167}
168
29c71111 169pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
8f6aac41 170{
29c71111
AW
171 pmd_t *pmd = pmd_offset(pud, addr);
172 if (pmd_none(*pmd)) {
f7f99100 173 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
29c71111 174 if (!p)
9dce07f1 175 return NULL;
29c71111 176 pmd_populate_kernel(&init_mm, pmd, p);
8f6aac41 177 }
29c71111 178 return pmd;
8f6aac41 179}
8f6aac41 180
c2febafc 181pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
8f6aac41 182{
c2febafc 183 pud_t *pud = pud_offset(p4d, addr);
29c71111 184 if (pud_none(*pud)) {
f7f99100 185 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
29c71111 186 if (!p)
9dce07f1 187 return NULL;
29c71111
AW
188 pud_populate(&init_mm, pud, p);
189 }
190 return pud;
191}
8f6aac41 192
c2febafc
KS
193p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
194{
195 p4d_t *p4d = p4d_offset(pgd, addr);
196 if (p4d_none(*p4d)) {
f7f99100 197 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
c2febafc
KS
198 if (!p)
199 return NULL;
200 p4d_populate(&init_mm, p4d, p);
201 }
202 return p4d;
203}
204
29c71111
AW
205pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
206{
207 pgd_t *pgd = pgd_offset_k(addr);
208 if (pgd_none(*pgd)) {
f7f99100 209 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
29c71111 210 if (!p)
9dce07f1 211 return NULL;
29c71111 212 pgd_populate(&init_mm, pgd, p);
8f6aac41 213 }
29c71111 214 return pgd;
8f6aac41
CL
215}
216
0aad818b
JW
217int __meminit vmemmap_populate_basepages(unsigned long start,
218 unsigned long end, int node)
8f6aac41 219{
0aad818b 220 unsigned long addr = start;
29c71111 221 pgd_t *pgd;
c2febafc 222 p4d_t *p4d;
29c71111
AW
223 pud_t *pud;
224 pmd_t *pmd;
225 pte_t *pte;
8f6aac41 226
29c71111
AW
227 for (; addr < end; addr += PAGE_SIZE) {
228 pgd = vmemmap_pgd_populate(addr, node);
229 if (!pgd)
230 return -ENOMEM;
c2febafc
KS
231 p4d = vmemmap_p4d_populate(pgd, addr, node);
232 if (!p4d)
233 return -ENOMEM;
234 pud = vmemmap_pud_populate(p4d, addr, node);
29c71111
AW
235 if (!pud)
236 return -ENOMEM;
237 pmd = vmemmap_pmd_populate(pud, addr, node);
238 if (!pmd)
239 return -ENOMEM;
240 pte = vmemmap_pte_populate(pmd, addr, node);
241 if (!pte)
242 return -ENOMEM;
243 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
8f6aac41 244 }
29c71111
AW
245
246 return 0;
8f6aac41 247}
8f6aac41 248
7b73d978
CH
249struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid,
250 struct vmem_altmap *altmap)
8f6aac41 251{
0aad818b
JW
252 unsigned long start;
253 unsigned long end;
254 struct page *map;
255
256 map = pfn_to_page(pnum * PAGES_PER_SECTION);
257 start = (unsigned long)map;
258 end = (unsigned long)(map + PAGES_PER_SECTION);
259
7b73d978 260 if (vmemmap_populate(start, end, nid, altmap))
8f6aac41
CL
261 return NULL;
262
263 return map;
264}