]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/sparse-vmemmap.c
x86/mm: Drop usage of __flush_tlb_all() in kernel_physical_mapping_init()
[mirror_ubuntu-bionic-kernel.git] / mm / sparse-vmemmap.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
8f6aac41
CL
2/*
3 * Virtual Memory Map support
4 *
cde53535 5 * (C) 2007 sgi. Christoph Lameter.
8f6aac41
CL
6 *
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
10 *
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
b595076a 13 * via TLBs. For those arches the virtual memory map is essentially
8f6aac41
CL
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
17 *
29c71111
AW
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
8f6aac41
CL
20 */
21#include <linux/mm.h>
22#include <linux/mmzone.h>
23#include <linux/bootmem.h>
4b94ffdc 24#include <linux/memremap.h>
8f6aac41 25#include <linux/highmem.h>
5a0e3ad6 26#include <linux/slab.h>
8f6aac41
CL
27#include <linux/spinlock.h>
28#include <linux/vmalloc.h>
8bca44bb 29#include <linux/sched.h>
8f6aac41
CL
30#include <asm/dma.h>
31#include <asm/pgalloc.h>
32#include <asm/pgtable.h>
33
34/*
35 * Allocate a block of memory to be used to back the virtual memory map
36 * or to back the page tables that are used to create the mapping.
37 * Uses the main allocators if they are available, else bootmem.
38 */
e0dc3a53 39
bd721ea7 40static void * __ref __earlyonly_bootmem_alloc(int node,
e0dc3a53
KH
41 unsigned long size,
42 unsigned long align,
43 unsigned long goal)
44{
f7f99100 45 return memblock_virt_alloc_try_nid_raw(size, align, goal,
bb016b84 46 BOOTMEM_ALLOC_ACCESSIBLE, node);
e0dc3a53
KH
47}
48
9bdac914
YL
49static void *vmemmap_buf;
50static void *vmemmap_buf_end;
e0dc3a53 51
8f6aac41
CL
52void * __meminit vmemmap_alloc_block(unsigned long size, int node)
53{
54 /* If the main allocator is up use that, fallback to bootmem. */
55 if (slab_is_available()) {
fcdaf842
MH
56 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
57 int order = get_order(size);
58 static bool warned;
f52407ce
SL
59 struct page *page;
60
fcdaf842 61 page = alloc_pages_node(node, gfp_mask, order);
8f6aac41
CL
62 if (page)
63 return page_address(page);
fcdaf842
MH
64
65 if (!warned) {
66 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
67 "vmemmap alloc failure: order:%u", order);
68 warned = true;
69 }
8f6aac41
CL
70 return NULL;
71 } else
e0dc3a53 72 return __earlyonly_bootmem_alloc(node, size, size,
8f6aac41
CL
73 __pa(MAX_DMA_ADDRESS));
74}
75
9bdac914 76/* need to make sure size is all the same during early stage */
4b94ffdc 77static void * __meminit alloc_block_buf(unsigned long size, int node)
9bdac914
YL
78{
79 void *ptr;
80
81 if (!vmemmap_buf)
82 return vmemmap_alloc_block(size, node);
83
84 /* take the from buf */
85 ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
86 if (ptr + size > vmemmap_buf_end)
87 return vmemmap_alloc_block(size, node);
88
89 vmemmap_buf = ptr + size;
90
91 return ptr;
92}
93
4b94ffdc
DW
94static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
95{
96 return altmap->base_pfn + altmap->reserve + altmap->alloc
97 + altmap->align;
98}
99
100static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
101{
102 unsigned long allocated = altmap->alloc + altmap->align;
103
104 if (altmap->free > allocated)
105 return altmap->free - allocated;
106 return 0;
107}
108
109/**
110 * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation
111 * @altmap - reserved page pool for the allocation
112 * @nr_pfns - size (in pages) of the allocation
113 *
114 * Allocations are aligned to the size of the request
115 */
116static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
117 unsigned long nr_pfns)
118{
119 unsigned long pfn = vmem_altmap_next_pfn(altmap);
120 unsigned long nr_align;
121
122 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
123 nr_align = ALIGN(pfn, nr_align) - pfn;
124
125 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
126 return ULONG_MAX;
127 altmap->alloc += nr_pfns;
128 altmap->align += nr_align;
129 return pfn + nr_align;
130}
131
132static void * __meminit altmap_alloc_block_buf(unsigned long size,
133 struct vmem_altmap *altmap)
134{
135 unsigned long pfn, nr_pfns;
136 void *ptr;
137
138 if (size & ~PAGE_MASK) {
139 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
140 __func__, size);
141 return NULL;
142 }
143
144 nr_pfns = size >> PAGE_SHIFT;
145 pfn = vmem_altmap_alloc(altmap, nr_pfns);
146 if (pfn < ULONG_MAX)
147 ptr = __va(__pfn_to_phys(pfn));
148 else
149 ptr = NULL;
150 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
151 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
152
153 return ptr;
154}
155
156/* need to make sure size is all the same during early stage */
157void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node,
158 struct vmem_altmap *altmap)
159{
160 if (altmap)
161 return altmap_alloc_block_buf(size, altmap);
162 return alloc_block_buf(size, node);
163}
164
8f6aac41
CL
165void __meminit vmemmap_verify(pte_t *pte, int node,
166 unsigned long start, unsigned long end)
167{
168 unsigned long pfn = pte_pfn(*pte);
169 int actual_node = early_pfn_to_nid(pfn);
170
b41ad14c 171 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
1170532b
JP
172 pr_warn("[%lx-%lx] potential offnode page_structs\n",
173 start, end - 1);
8f6aac41
CL
174}
175
29c71111 176pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
8f6aac41 177{
29c71111
AW
178 pte_t *pte = pte_offset_kernel(pmd, addr);
179 if (pte_none(*pte)) {
180 pte_t entry;
4b94ffdc 181 void *p = alloc_block_buf(PAGE_SIZE, node);
29c71111 182 if (!p)
9dce07f1 183 return NULL;
29c71111
AW
184 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
185 set_pte_at(&init_mm, addr, pte, entry);
186 }
187 return pte;
8f6aac41
CL
188}
189
f7f99100
PT
190static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
191{
192 void *p = vmemmap_alloc_block(size, node);
193
194 if (!p)
195 return NULL;
196 memset(p, 0, size);
197
198 return p;
199}
200
29c71111 201pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
8f6aac41 202{
29c71111
AW
203 pmd_t *pmd = pmd_offset(pud, addr);
204 if (pmd_none(*pmd)) {
f7f99100 205 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
29c71111 206 if (!p)
9dce07f1 207 return NULL;
29c71111 208 pmd_populate_kernel(&init_mm, pmd, p);
8f6aac41 209 }
29c71111 210 return pmd;
8f6aac41 211}
8f6aac41 212
c2febafc 213pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
8f6aac41 214{
c2febafc 215 pud_t *pud = pud_offset(p4d, addr);
29c71111 216 if (pud_none(*pud)) {
f7f99100 217 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
29c71111 218 if (!p)
9dce07f1 219 return NULL;
29c71111
AW
220 pud_populate(&init_mm, pud, p);
221 }
222 return pud;
223}
8f6aac41 224
c2febafc
KS
225p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
226{
227 p4d_t *p4d = p4d_offset(pgd, addr);
228 if (p4d_none(*p4d)) {
f7f99100 229 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
c2febafc
KS
230 if (!p)
231 return NULL;
232 p4d_populate(&init_mm, p4d, p);
233 }
234 return p4d;
235}
236
29c71111
AW
237pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
238{
239 pgd_t *pgd = pgd_offset_k(addr);
240 if (pgd_none(*pgd)) {
f7f99100 241 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
29c71111 242 if (!p)
9dce07f1 243 return NULL;
29c71111 244 pgd_populate(&init_mm, pgd, p);
8f6aac41 245 }
29c71111 246 return pgd;
8f6aac41
CL
247}
248
0aad818b
JW
249int __meminit vmemmap_populate_basepages(unsigned long start,
250 unsigned long end, int node)
8f6aac41 251{
0aad818b 252 unsigned long addr = start;
29c71111 253 pgd_t *pgd;
c2febafc 254 p4d_t *p4d;
29c71111
AW
255 pud_t *pud;
256 pmd_t *pmd;
257 pte_t *pte;
8f6aac41 258
29c71111
AW
259 for (; addr < end; addr += PAGE_SIZE) {
260 pgd = vmemmap_pgd_populate(addr, node);
261 if (!pgd)
262 return -ENOMEM;
c2febafc
KS
263 p4d = vmemmap_p4d_populate(pgd, addr, node);
264 if (!p4d)
265 return -ENOMEM;
266 pud = vmemmap_pud_populate(p4d, addr, node);
29c71111
AW
267 if (!pud)
268 return -ENOMEM;
269 pmd = vmemmap_pmd_populate(pud, addr, node);
270 if (!pmd)
271 return -ENOMEM;
272 pte = vmemmap_pte_populate(pmd, addr, node);
273 if (!pte)
274 return -ENOMEM;
275 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
8f6aac41 276 }
29c71111
AW
277
278 return 0;
8f6aac41 279}
8f6aac41 280
98f3cfc1 281struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
8f6aac41 282{
0aad818b
JW
283 unsigned long start;
284 unsigned long end;
285 struct page *map;
286
287 map = pfn_to_page(pnum * PAGES_PER_SECTION);
288 start = (unsigned long)map;
289 end = (unsigned long)(map + PAGES_PER_SECTION);
290
291 if (vmemmap_populate(start, end, nid))
8f6aac41
CL
292 return NULL;
293
294 return map;
295}
9bdac914
YL
296
297void __init sparse_mem_maps_populate_node(struct page **map_map,
298 unsigned long pnum_begin,
299 unsigned long pnum_end,
300 unsigned long map_count, int nodeid)
301{
302 unsigned long pnum;
303 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
304 void *vmemmap_buf_start;
305
306 size = ALIGN(size, PMD_SIZE);
307 vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
308 PMD_SIZE, __pa(MAX_DMA_ADDRESS));
309
310 if (vmemmap_buf_start) {
311 vmemmap_buf = vmemmap_buf_start;
312 vmemmap_buf_end = vmemmap_buf_start + size * map_count;
313 }
314
315 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
316 struct mem_section *ms;
317
318 if (!present_section_nr(pnum))
319 continue;
320
321 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
322 if (map_map[pnum])
323 continue;
324 ms = __nr_to_section(pnum);
1170532b 325 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
756a025f 326 __func__);
9bdac914
YL
327 ms->section_mem_map = 0;
328 }
329
330 if (vmemmap_buf_start) {
331 /* need to free left buf */
bb016b84
SS
332 memblock_free_early(__pa(vmemmap_buf),
333 vmemmap_buf_end - vmemmap_buf);
9bdac914
YL
334 vmemmap_buf = NULL;
335 vmemmap_buf_end = NULL;
336 }
337}