]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - mm/sparse-vmemmap.c
mm/sparsemem: enable vmem_altmap support in vmemmap_populate_basepages()
[mirror_ubuntu-hirsute-kernel.git] / mm / sparse-vmemmap.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
8f6aac41
CL
2/*
3 * Virtual Memory Map support
4 *
cde53535 5 * (C) 2007 sgi. Christoph Lameter.
8f6aac41
CL
6 *
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
10 *
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
b595076a 13 * via TLBs. For those arches the virtual memory map is essentially
8f6aac41
CL
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
17 *
29c71111
AW
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
8f6aac41
CL
20 */
21#include <linux/mm.h>
22#include <linux/mmzone.h>
97ad1087 23#include <linux/memblock.h>
4b94ffdc 24#include <linux/memremap.h>
8f6aac41 25#include <linux/highmem.h>
5a0e3ad6 26#include <linux/slab.h>
8f6aac41
CL
27#include <linux/spinlock.h>
28#include <linux/vmalloc.h>
8bca44bb 29#include <linux/sched.h>
8f6aac41
CL
30#include <asm/dma.h>
31#include <asm/pgalloc.h>
8f6aac41
CL
32
33/*
34 * Allocate a block of memory to be used to back the virtual memory map
35 * or to back the page tables that are used to create the mapping.
36 * Uses the main allocators if they are available, else bootmem.
37 */
e0dc3a53 38
bd721ea7 39static void * __ref __earlyonly_bootmem_alloc(int node,
e0dc3a53
KH
40 unsigned long size,
41 unsigned long align,
42 unsigned long goal)
43{
eb31d559 44 return memblock_alloc_try_nid_raw(size, align, goal,
97ad1087 45 MEMBLOCK_ALLOC_ACCESSIBLE, node);
e0dc3a53
KH
46}
47
8f6aac41
CL
48void * __meminit vmemmap_alloc_block(unsigned long size, int node)
49{
50 /* If the main allocator is up use that, fallback to bootmem. */
51 if (slab_is_available()) {
fcdaf842
MH
52 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
53 int order = get_order(size);
54 static bool warned;
f52407ce
SL
55 struct page *page;
56
fcdaf842 57 page = alloc_pages_node(node, gfp_mask, order);
8f6aac41
CL
58 if (page)
59 return page_address(page);
fcdaf842
MH
60
61 if (!warned) {
62 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
63 "vmemmap alloc failure: order:%u", order);
64 warned = true;
65 }
8f6aac41
CL
66 return NULL;
67 } else
e0dc3a53 68 return __earlyonly_bootmem_alloc(node, size, size,
8f6aac41
CL
69 __pa(MAX_DMA_ADDRESS));
70}
71
9bdac914 72/* need to make sure size is all the same during early stage */
a8fc357b 73void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
9bdac914 74{
35fd1eb1 75 void *ptr = sparse_buffer_alloc(size);
9bdac914 76
35fd1eb1
PT
77 if (!ptr)
78 ptr = vmemmap_alloc_block(size, node);
9bdac914
YL
79 return ptr;
80}
81
4b94ffdc
DW
82static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
83{
84 return altmap->base_pfn + altmap->reserve + altmap->alloc
85 + altmap->align;
86}
87
88static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
89{
90 unsigned long allocated = altmap->alloc + altmap->align;
91
92 if (altmap->free > allocated)
93 return altmap->free - allocated;
94 return 0;
95}
96
97/**
eb804533
CH
98 * altmap_alloc_block_buf - allocate pages from the device page map
99 * @altmap: device page map
100 * @size: size (in bytes) of the allocation
4b94ffdc 101 *
eb804533 102 * Allocations are aligned to the size of the request.
4b94ffdc 103 */
a8fc357b 104void * __meminit altmap_alloc_block_buf(unsigned long size,
4b94ffdc
DW
105 struct vmem_altmap *altmap)
106{
eb804533 107 unsigned long pfn, nr_pfns, nr_align;
4b94ffdc
DW
108
109 if (size & ~PAGE_MASK) {
110 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
111 __func__, size);
112 return NULL;
113 }
114
eb804533 115 pfn = vmem_altmap_next_pfn(altmap);
4b94ffdc 116 nr_pfns = size >> PAGE_SHIFT;
eb804533
CH
117 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
118 nr_align = ALIGN(pfn, nr_align) - pfn;
119 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
120 return NULL;
121
122 altmap->alloc += nr_pfns;
123 altmap->align += nr_align;
124 pfn += nr_align;
125
4b94ffdc
DW
126 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
127 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
eb804533 128 return __va(__pfn_to_phys(pfn));
4b94ffdc
DW
129}
130
8f6aac41
CL
131void __meminit vmemmap_verify(pte_t *pte, int node,
132 unsigned long start, unsigned long end)
133{
134 unsigned long pfn = pte_pfn(*pte);
135 int actual_node = early_pfn_to_nid(pfn);
136
b41ad14c 137 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
1170532b
JP
138 pr_warn("[%lx-%lx] potential offnode page_structs\n",
139 start, end - 1);
8f6aac41
CL
140}
141
1d9cfee7
AK
142pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
143 struct vmem_altmap *altmap)
8f6aac41 144{
29c71111
AW
145 pte_t *pte = pte_offset_kernel(pmd, addr);
146 if (pte_none(*pte)) {
147 pte_t entry;
1d9cfee7
AK
148 void *p;
149
150 if (altmap)
151 p = altmap_alloc_block_buf(PAGE_SIZE, altmap);
152 else
153 p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
29c71111 154 if (!p)
9dce07f1 155 return NULL;
29c71111
AW
156 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
157 set_pte_at(&init_mm, addr, pte, entry);
158 }
159 return pte;
8f6aac41
CL
160}
161
f7f99100
PT
162static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
163{
164 void *p = vmemmap_alloc_block(size, node);
165
166 if (!p)
167 return NULL;
168 memset(p, 0, size);
169
170 return p;
171}
172
29c71111 173pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
8f6aac41 174{
29c71111
AW
175 pmd_t *pmd = pmd_offset(pud, addr);
176 if (pmd_none(*pmd)) {
f7f99100 177 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
29c71111 178 if (!p)
9dce07f1 179 return NULL;
29c71111 180 pmd_populate_kernel(&init_mm, pmd, p);
8f6aac41 181 }
29c71111 182 return pmd;
8f6aac41 183}
8f6aac41 184
c2febafc 185pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
8f6aac41 186{
c2febafc 187 pud_t *pud = pud_offset(p4d, addr);
29c71111 188 if (pud_none(*pud)) {
f7f99100 189 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
29c71111 190 if (!p)
9dce07f1 191 return NULL;
29c71111
AW
192 pud_populate(&init_mm, pud, p);
193 }
194 return pud;
195}
8f6aac41 196
c2febafc
KS
197p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
198{
199 p4d_t *p4d = p4d_offset(pgd, addr);
200 if (p4d_none(*p4d)) {
f7f99100 201 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
c2febafc
KS
202 if (!p)
203 return NULL;
204 p4d_populate(&init_mm, p4d, p);
205 }
206 return p4d;
207}
208
29c71111
AW
209pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
210{
211 pgd_t *pgd = pgd_offset_k(addr);
212 if (pgd_none(*pgd)) {
f7f99100 213 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
29c71111 214 if (!p)
9dce07f1 215 return NULL;
29c71111 216 pgd_populate(&init_mm, pgd, p);
8f6aac41 217 }
29c71111 218 return pgd;
8f6aac41
CL
219}
220
1d9cfee7
AK
221int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
222 int node, struct vmem_altmap *altmap)
8f6aac41 223{
0aad818b 224 unsigned long addr = start;
29c71111 225 pgd_t *pgd;
c2febafc 226 p4d_t *p4d;
29c71111
AW
227 pud_t *pud;
228 pmd_t *pmd;
229 pte_t *pte;
8f6aac41 230
29c71111
AW
231 for (; addr < end; addr += PAGE_SIZE) {
232 pgd = vmemmap_pgd_populate(addr, node);
233 if (!pgd)
234 return -ENOMEM;
c2febafc
KS
235 p4d = vmemmap_p4d_populate(pgd, addr, node);
236 if (!p4d)
237 return -ENOMEM;
238 pud = vmemmap_pud_populate(p4d, addr, node);
29c71111
AW
239 if (!pud)
240 return -ENOMEM;
241 pmd = vmemmap_pmd_populate(pud, addr, node);
242 if (!pmd)
243 return -ENOMEM;
1d9cfee7 244 pte = vmemmap_pte_populate(pmd, addr, node, altmap);
29c71111
AW
245 if (!pte)
246 return -ENOMEM;
247 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
8f6aac41 248 }
29c71111
AW
249
250 return 0;
8f6aac41 251}
8f6aac41 252
e9c0a3f0
DW
253struct page * __meminit __populate_section_memmap(unsigned long pfn,
254 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
8f6aac41 255{
0aad818b
JW
256 unsigned long start;
257 unsigned long end;
0aad818b 258
e9c0a3f0
DW
259 /*
260 * The minimum granularity of memmap extensions is
261 * PAGES_PER_SUBSECTION as allocations are tracked in the
262 * 'subsection_map' bitmap of the section.
263 */
264 end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION);
265 pfn &= PAGE_SUBSECTION_MASK;
266 nr_pages = end - pfn;
267
268 start = (unsigned long) pfn_to_page(pfn);
269 end = start + nr_pages * sizeof(struct page);
0aad818b 270
7b73d978 271 if (vmemmap_populate(start, end, nid, altmap))
8f6aac41
CL
272 return NULL;
273
e9c0a3f0 274 return pfn_to_page(pfn);
8f6aac41 275}