]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/sparse.c
enetc: Remove the mdio bus on PF probe bailout
[mirror_ubuntu-jammy-kernel.git] / mm / sparse.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
d41dee36
AW
2/*
3 * sparse memory mappings.
4 */
d41dee36 5#include <linux/mm.h>
5a0e3ad6 6#include <linux/slab.h>
d41dee36 7#include <linux/mmzone.h>
97ad1087 8#include <linux/memblock.h>
3b32123d 9#include <linux/compiler.h>
0b0acbec 10#include <linux/highmem.h>
b95f1b31 11#include <linux/export.h>
28ae55c9 12#include <linux/spinlock.h>
0b0acbec 13#include <linux/vmalloc.h>
9f82883c
AS
14#include <linux/swap.h>
15#include <linux/swapops.h>
3b32123d 16
0c0a4a51 17#include "internal.h"
d41dee36 18#include <asm/dma.h>
8f6aac41 19#include <asm/pgalloc.h>
d41dee36
AW
20
21/*
22 * Permanent SPARSEMEM data:
23 *
24 * 1) mem_section - memory sections, mem_map's for valid memory
25 */
3e347261 26#ifdef CONFIG_SPARSEMEM_EXTREME
83e3c487 27struct mem_section **mem_section;
3e347261
BP
28#else
29struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
22fc6ecc 30 ____cacheline_internodealigned_in_smp;
3e347261
BP
31#endif
32EXPORT_SYMBOL(mem_section);
33
89689ae7
CL
34#ifdef NODE_NOT_IN_PAGE_FLAGS
35/*
36 * If we did not store the node number in the page then we have to
37 * do a lookup in the section_to_node_table in order to find which
38 * node the page belongs to.
39 */
40#if MAX_NUMNODES <= 256
41static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
42#else
43static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
44#endif
45
33dd4e0e 46int page_to_nid(const struct page *page)
89689ae7
CL
47{
48 return section_to_node_table[page_to_section(page)];
49}
50EXPORT_SYMBOL(page_to_nid);
85770ffe
AW
51
52static void set_section_nid(unsigned long section_nr, int nid)
53{
54 section_to_node_table[section_nr] = nid;
55}
56#else /* !NODE_NOT_IN_PAGE_FLAGS */
57static inline void set_section_nid(unsigned long section_nr, int nid)
58{
59}
89689ae7
CL
60#endif
61
3e347261 62#ifdef CONFIG_SPARSEMEM_EXTREME
bd721ea7 63static noinline struct mem_section __ref *sparse_index_alloc(int nid)
28ae55c9
DH
64{
65 struct mem_section *section = NULL;
66 unsigned long array_size = SECTIONS_PER_ROOT *
67 sizeof(struct mem_section);
68
8a7f97b9 69 if (slab_is_available()) {
b95046b0 70 section = kzalloc_node(array_size, GFP_KERNEL, nid);
8a7f97b9 71 } else {
7e1c4e27
MR
72 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
73 nid);
8a7f97b9
MR
74 if (!section)
75 panic("%s: Failed to allocate %lu bytes nid=%d\n",
76 __func__, array_size, nid);
77 }
28ae55c9
DH
78
79 return section;
3e347261 80}
802f192e 81
a3142c8e 82static int __meminit sparse_index_init(unsigned long section_nr, int nid)
802f192e 83{
28ae55c9
DH
84 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
85 struct mem_section *section;
802f192e 86
ba72b4c8
DW
87 /*
88 * An existing section is possible in the sub-section hotplug
89 * case. First hot-add instantiates, follow-on hot-add reuses
90 * the existing section.
91 *
92 * The mem_hotplug_lock resolves the apparent race below.
93 */
802f192e 94 if (mem_section[root])
ba72b4c8 95 return 0;
3e347261 96
28ae55c9 97 section = sparse_index_alloc(nid);
af0cd5a7
WC
98 if (!section)
99 return -ENOMEM;
28ae55c9
DH
100
101 mem_section[root] = section;
c1c95183 102
9d1936cf 103 return 0;
28ae55c9
DH
104}
105#else /* !SPARSEMEM_EXTREME */
106static inline int sparse_index_init(unsigned long section_nr, int nid)
107{
108 return 0;
802f192e 109}
28ae55c9
DH
110#endif
111
91fd8b95 112#ifdef CONFIG_SPARSEMEM_EXTREME
2491f0a2 113unsigned long __section_nr(struct mem_section *ms)
4ca644d9
DH
114{
115 unsigned long root_nr;
83e3c487 116 struct mem_section *root = NULL;
4ca644d9 117
12783b00
MK
118 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
119 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
4ca644d9
DH
120 if (!root)
121 continue;
122
123 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
124 break;
125 }
126
83e3c487 127 VM_BUG_ON(!root);
db36a461 128
4ca644d9
DH
129 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
130}
91fd8b95 131#else
2491f0a2 132unsigned long __section_nr(struct mem_section *ms)
91fd8b95 133{
2491f0a2 134 return (unsigned long)(ms - mem_section[0]);
91fd8b95
ZC
135}
136#endif
4ca644d9 137
30c253e6
AW
138/*
139 * During early boot, before section_mem_map is used for an actual
140 * mem_map, we use section_mem_map to store the section's NUMA
141 * node. This keeps us from having to use another data structure. The
142 * node information is cleared just before we store the real mem_map.
143 */
144static inline unsigned long sparse_encode_early_nid(int nid)
145{
146 return (nid << SECTION_NID_SHIFT);
147}
148
149static inline int sparse_early_nid(struct mem_section *section)
150{
151 return (section->section_mem_map >> SECTION_NID_SHIFT);
152}
153
2dbb51c4
MG
154/* Validate the physical addressing limitations of the model */
155void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
156 unsigned long *end_pfn)
d41dee36 157{
2dbb51c4 158 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
d41dee36 159
bead9a3a
IM
160 /*
161 * Sanity checks - do not allow an architecture to pass
162 * in larger pfns than the maximum scope of sparsemem:
163 */
2dbb51c4
MG
164 if (*start_pfn > max_sparsemem_pfn) {
165 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
166 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
167 *start_pfn, *end_pfn, max_sparsemem_pfn);
168 WARN_ON_ONCE(1);
169 *start_pfn = max_sparsemem_pfn;
170 *end_pfn = max_sparsemem_pfn;
ef161a98 171 } else if (*end_pfn > max_sparsemem_pfn) {
2dbb51c4
MG
172 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
173 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
174 *start_pfn, *end_pfn, max_sparsemem_pfn);
175 WARN_ON_ONCE(1);
176 *end_pfn = max_sparsemem_pfn;
177 }
178}
179
c4e1be9e
DH
180/*
181 * There are a number of times that we loop over NR_MEM_SECTIONS,
182 * looking for section_present() on each. But, when we have very
183 * large physical address spaces, NR_MEM_SECTIONS can also be
184 * very large which makes the loops quite long.
185 *
186 * Keeping track of this gives us an easy way to break out of
187 * those loops early.
188 */
2491f0a2 189unsigned long __highest_present_section_nr;
c4e1be9e
DH
190static void section_mark_present(struct mem_section *ms)
191{
2491f0a2 192 unsigned long section_nr = __section_nr(ms);
c4e1be9e
DH
193
194 if (section_nr > __highest_present_section_nr)
195 __highest_present_section_nr = section_nr;
196
197 ms->section_mem_map |= SECTION_MARKED_PRESENT;
198}
199
c4e1be9e
DH
200#define for_each_present_section_nr(start, section_nr) \
201 for (section_nr = next_present_section_nr(start-1); \
d778015a 202 ((section_nr != -1) && \
c4e1be9e
DH
203 (section_nr <= __highest_present_section_nr)); \
204 section_nr = next_present_section_nr(section_nr))
205
85c77f79
PT
206static inline unsigned long first_present_section_nr(void)
207{
208 return next_present_section_nr(-1);
209}
210
0a9f9f62 211#ifdef CONFIG_SPARSEMEM_VMEMMAP
758b8db4 212static void subsection_mask_set(unsigned long *map, unsigned long pfn,
f46edbd1
DW
213 unsigned long nr_pages)
214{
215 int idx = subsection_map_index(pfn);
216 int end = subsection_map_index(pfn + nr_pages - 1);
217
218 bitmap_set(map, idx, end - idx + 1);
219}
220
221void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
222{
223 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
9a845030 224 unsigned long nr, start_sec = pfn_to_section_nr(pfn);
f46edbd1
DW
225
226 if (!nr_pages)
227 return;
228
9a845030 229 for (nr = start_sec; nr <= end_sec; nr++) {
f46edbd1
DW
230 struct mem_section *ms;
231 unsigned long pfns;
232
233 pfns = min(nr_pages, PAGES_PER_SECTION
234 - (pfn & ~PAGE_SECTION_MASK));
9a845030 235 ms = __nr_to_section(nr);
f46edbd1
DW
236 subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
237
9a845030 238 pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
f46edbd1
DW
239 pfns, subsection_map_index(pfn),
240 subsection_map_index(pfn + pfns - 1));
241
242 pfn += pfns;
243 nr_pages -= pfns;
244 }
245}
0a9f9f62
BH
246#else
247void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
248{
249}
250#endif
f46edbd1 251
2dbb51c4
MG
252/* Record a memory area against a node. */
253void __init memory_present(int nid, unsigned long start, unsigned long end)
254{
255 unsigned long pfn;
bead9a3a 256
629a359b
KS
257#ifdef CONFIG_SPARSEMEM_EXTREME
258 if (unlikely(!mem_section)) {
259 unsigned long size, align;
260
d09cfbbf 261 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
629a359b 262 align = 1 << (INTERNODE_CACHE_SHIFT);
eb31d559 263 mem_section = memblock_alloc(size, align);
8a7f97b9
MR
264 if (!mem_section)
265 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
266 __func__, size, align);
629a359b
KS
267 }
268#endif
269
d41dee36 270 start &= PAGE_SECTION_MASK;
2dbb51c4 271 mminit_validate_memmodel_limits(&start, &end);
d41dee36
AW
272 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
273 unsigned long section = pfn_to_section_nr(pfn);
802f192e
BP
274 struct mem_section *ms;
275
276 sparse_index_init(section, nid);
85770ffe 277 set_section_nid(section, nid);
802f192e
BP
278
279 ms = __nr_to_section(section);
c4e1be9e 280 if (!ms->section_mem_map) {
2d070eab
MH
281 ms->section_mem_map = sparse_encode_early_nid(nid) |
282 SECTION_IS_ONLINE;
c4e1be9e
DH
283 section_mark_present(ms);
284 }
d41dee36
AW
285 }
286}
287
9def36e0
LG
288/*
289 * Mark all memblocks as present using memory_present(). This is a
2e6787d3 290 * convenience function that is useful for a number of arches
9def36e0
LG
291 * to mark all of the systems memory as present during initialization.
292 */
293void __init memblocks_present(void)
294{
295 struct memblock_region *reg;
296
297 for_each_memblock(memory, reg) {
298 memory_present(memblock_get_region_node(reg),
299 memblock_region_memory_base_pfn(reg),
300 memblock_region_memory_end_pfn(reg));
301 }
302}
303
29751f69
AW
304/*
305 * Subtle, we encode the real pfn into the mem_map such that
306 * the identity pfn - section_mem_map will return the actual
307 * physical page frame number.
308 */
309static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
310{
def9b71e
PT
311 unsigned long coded_mem_map =
312 (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
313 BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
314 BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
315 return coded_mem_map;
29751f69
AW
316}
317
318/*
ea01ea93 319 * Decode mem_map from the coded memmap
29751f69 320 */
29751f69
AW
321struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
322{
ea01ea93
BP
323 /* mask off the extra low bits of information */
324 coded_mem_map &= SECTION_MAP_MASK;
29751f69
AW
325 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
326}
327
4e40987f 328static void __meminit sparse_init_one_section(struct mem_section *ms,
5c0e3066 329 unsigned long pnum, struct page *mem_map,
326e1b8f 330 struct mem_section_usage *usage, unsigned long flags)
29751f69 331{
30c253e6 332 ms->section_mem_map &= ~SECTION_MAP_MASK;
326e1b8f
DW
333 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum)
334 | SECTION_HAS_MEM_MAP | flags;
f1eca35a 335 ms->usage = usage;
29751f69
AW
336}
337
f1eca35a 338static unsigned long usemap_size(void)
5c0e3066 339{
60a7a88d 340 return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
5c0e3066
MG
341}
342
f1eca35a 343size_t mem_section_usage_size(void)
5c0e3066 344{
f1eca35a 345 return sizeof(struct mem_section_usage) + usemap_size();
5c0e3066 346}
5c0e3066 347
48c90682 348#ifdef CONFIG_MEMORY_HOTREMOVE
f1eca35a 349static struct mem_section_usage * __init
a4322e1b 350sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
238305bb 351 unsigned long size)
48c90682 352{
f1eca35a 353 struct mem_section_usage *usage;
99ab7b19 354 unsigned long goal, limit;
99ab7b19 355 int nid;
48c90682
YG
356 /*
357 * A page may contain usemaps for other sections preventing the
358 * page being freed and making a section unremovable while
c800bcd5 359 * other sections referencing the usemap remain active. Similarly,
48c90682
YG
360 * a pgdat can prevent a section being removed. If section A
361 * contains a pgdat and section B contains the usemap, both
362 * sections become inter-dependent. This allocates usemaps
363 * from the same section as the pgdat where possible to avoid
364 * this problem.
365 */
07b4e2bc 366 goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
99ab7b19
YL
367 limit = goal + (1UL << PA_SECTION_SHIFT);
368 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
369again:
f1eca35a
DW
370 usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
371 if (!usage && limit) {
99ab7b19
YL
372 limit = 0;
373 goto again;
374 }
f1eca35a 375 return usage;
48c90682
YG
376}
377
f1eca35a
DW
378static void __init check_usemap_section_nr(int nid,
379 struct mem_section_usage *usage)
48c90682
YG
380{
381 unsigned long usemap_snr, pgdat_snr;
83e3c487
KS
382 static unsigned long old_usemap_snr;
383 static unsigned long old_pgdat_snr;
48c90682
YG
384 struct pglist_data *pgdat = NODE_DATA(nid);
385 int usemap_nid;
386
83e3c487
KS
387 /* First call */
388 if (!old_usemap_snr) {
389 old_usemap_snr = NR_MEM_SECTIONS;
390 old_pgdat_snr = NR_MEM_SECTIONS;
391 }
392
f1eca35a 393 usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
48c90682
YG
394 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
395 if (usemap_snr == pgdat_snr)
396 return;
397
398 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
399 /* skip redundant message */
400 return;
401
402 old_usemap_snr = usemap_snr;
403 old_pgdat_snr = pgdat_snr;
404
405 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
406 if (usemap_nid != nid) {
1170532b
JP
407 pr_info("node %d must be removed before remove section %ld\n",
408 nid, usemap_snr);
48c90682
YG
409 return;
410 }
411 /*
412 * There is a circular dependency.
413 * Some platforms allow un-removable section because they will just
414 * gather other removable sections for dynamic partitioning.
415 * Just notify un-removable section's number here.
416 */
1170532b
JP
417 pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
418 usemap_snr, pgdat_snr, nid);
48c90682
YG
419}
420#else
f1eca35a 421static struct mem_section_usage * __init
a4322e1b 422sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
238305bb 423 unsigned long size)
48c90682 424{
26fb3dae 425 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
48c90682
YG
426}
427
f1eca35a
DW
428static void __init check_usemap_section_nr(int nid,
429 struct mem_section_usage *usage)
48c90682
YG
430{
431}
432#endif /* CONFIG_MEMORY_HOTREMOVE */
433
35fd1eb1 434#ifdef CONFIG_SPARSEMEM_VMEMMAP
afda57bc 435static unsigned long __init section_map_size(void)
35fd1eb1
PT
436{
437 return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
438}
439
440#else
afda57bc 441static unsigned long __init section_map_size(void)
e131c06b
PT
442{
443 return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
444}
445
e9c0a3f0
DW
446struct page __init *__populate_section_memmap(unsigned long pfn,
447 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
29751f69 448{
e131c06b
PT
449 unsigned long size = section_map_size();
450 struct page *map = sparse_buffer_alloc(size);
8a7f97b9 451 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
e131c06b
PT
452
453 if (map)
454 return map;
29751f69 455
09dbcf42 456 map = memblock_alloc_try_nid_raw(size, size, addr,
97ad1087 457 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
8a7f97b9
MR
458 if (!map)
459 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
460 __func__, size, PAGE_SIZE, nid, &addr);
461
8f6aac41
CL
462 return map;
463}
464#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
465
35fd1eb1
PT
466static void *sparsemap_buf __meminitdata;
467static void *sparsemap_buf_end __meminitdata;
468
ae831894
LC
469static inline void __meminit sparse_buffer_free(unsigned long size)
470{
471 WARN_ON(!sparsemap_buf || size == 0);
472 memblock_free_early(__pa(sparsemap_buf), size);
473}
474
afda57bc 475static void __init sparse_buffer_init(unsigned long size, int nid)
35fd1eb1 476{
8a7f97b9 477 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
35fd1eb1 478 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
09dbcf42
MH
479 /*
480 * Pre-allocated buffer is mainly used by __populate_section_memmap
481 * and we want it to be properly aligned to the section size - this is
482 * especially the case for VMEMMAP which maps memmap to PMDs
483 */
0ac398b1 484 sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(),
09dbcf42 485 addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
35fd1eb1
PT
486 sparsemap_buf_end = sparsemap_buf + size;
487}
488
afda57bc 489static void __init sparse_buffer_fini(void)
35fd1eb1
PT
490{
491 unsigned long size = sparsemap_buf_end - sparsemap_buf;
492
493 if (sparsemap_buf && size > 0)
ae831894 494 sparse_buffer_free(size);
35fd1eb1
PT
495 sparsemap_buf = NULL;
496}
497
498void * __meminit sparse_buffer_alloc(unsigned long size)
499{
500 void *ptr = NULL;
501
502 if (sparsemap_buf) {
db57e98d 503 ptr = (void *) roundup((unsigned long)sparsemap_buf, size);
35fd1eb1
PT
504 if (ptr + size > sparsemap_buf_end)
505 ptr = NULL;
ae831894
LC
506 else {
507 /* Free redundant aligned space */
508 if ((unsigned long)(ptr - sparsemap_buf) > 0)
509 sparse_buffer_free((unsigned long)(ptr - sparsemap_buf));
35fd1eb1 510 sparsemap_buf = ptr + size;
ae831894 511 }
35fd1eb1
PT
512 }
513 return ptr;
514}
515
3b32123d 516void __weak __meminit vmemmap_populate_print_last(void)
c2b91e2e
YL
517{
518}
a4322e1b 519
85c77f79
PT
520/*
521 * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
522 * And number of present sections in this node is map_count.
523 */
524static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
525 unsigned long pnum_end,
526 unsigned long map_count)
527{
f1eca35a
DW
528 struct mem_section_usage *usage;
529 unsigned long pnum;
85c77f79
PT
530 struct page *map;
531
f1eca35a
DW
532 usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
533 mem_section_usage_size() * map_count);
534 if (!usage) {
85c77f79
PT
535 pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
536 goto failed;
537 }
538 sparse_buffer_init(map_count * section_map_size(), nid);
539 for_each_present_section_nr(pnum_begin, pnum) {
e9c0a3f0
DW
540 unsigned long pfn = section_nr_to_pfn(pnum);
541
85c77f79
PT
542 if (pnum >= pnum_end)
543 break;
544
e9c0a3f0
DW
545 map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
546 nid, NULL);
85c77f79
PT
547 if (!map) {
548 pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
549 __func__, nid);
550 pnum_begin = pnum;
551 goto failed;
552 }
f1eca35a 553 check_usemap_section_nr(nid, usage);
326e1b8f
DW
554 sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
555 SECTION_IS_EARLY);
f1eca35a 556 usage = (void *) usage + mem_section_usage_size();
85c77f79
PT
557 }
558 sparse_buffer_fini();
559 return;
560failed:
561 /* We failed to allocate, mark all the following pnums as not present */
562 for_each_present_section_nr(pnum_begin, pnum) {
563 struct mem_section *ms;
564
565 if (pnum >= pnum_end)
566 break;
567 ms = __nr_to_section(pnum);
568 ms->section_mem_map = 0;
569 }
570}
571
572/*
573 * Allocate the accumulated non-linear sections, allocate a mem_map
574 * for each and record the physical to section mapping.
575 */
2a3cb8ba 576void __init sparse_init(void)
85c77f79
PT
577{
578 unsigned long pnum_begin = first_present_section_nr();
579 int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
580 unsigned long pnum_end, map_count = 1;
581
582 /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
583 set_pageblock_order();
584
585 for_each_present_section_nr(pnum_begin + 1, pnum_end) {
586 int nid = sparse_early_nid(__nr_to_section(pnum_end));
587
588 if (nid == nid_begin) {
589 map_count++;
590 continue;
591 }
592 /* Init node with sections in range [pnum_begin, pnum_end) */
593 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
594 nid_begin = nid;
595 pnum_begin = pnum_end;
596 map_count = 1;
597 }
598 /* cover the last node */
599 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
600 vmemmap_populate_print_last();
601}
602
193faea9 603#ifdef CONFIG_MEMORY_HOTPLUG
2d070eab
MH
604
605/* Mark all memory sections within the pfn range as online */
606void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
607{
608 unsigned long pfn;
609
610 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
b4ccec41 611 unsigned long section_nr = pfn_to_section_nr(pfn);
2d070eab
MH
612 struct mem_section *ms;
613
614 /* onlining code should never touch invalid ranges */
615 if (WARN_ON(!valid_section_nr(section_nr)))
616 continue;
617
618 ms = __nr_to_section(section_nr);
619 ms->section_mem_map |= SECTION_IS_ONLINE;
620 }
621}
622
623#ifdef CONFIG_MEMORY_HOTREMOVE
9b7ea46a 624/* Mark all memory sections within the pfn range as offline */
2d070eab
MH
625void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
626{
627 unsigned long pfn;
628
629 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
27227c73 630 unsigned long section_nr = pfn_to_section_nr(pfn);
2d070eab
MH
631 struct mem_section *ms;
632
633 /*
634 * TODO this needs some double checking. Offlining code makes
635 * sure to check pfn_valid but those checks might be just bogus
636 */
637 if (WARN_ON(!valid_section_nr(section_nr)))
638 continue;
639
640 ms = __nr_to_section(section_nr);
641 ms->section_mem_map &= ~SECTION_IS_ONLINE;
642 }
643}
644#endif
645
98f3cfc1 646#ifdef CONFIG_SPARSEMEM_VMEMMAP
030eab4f 647static struct page * __meminit populate_section_memmap(unsigned long pfn,
e9c0a3f0 648 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
98f3cfc1 649{
e9c0a3f0 650 return __populate_section_memmap(pfn, nr_pages, nid, altmap);
98f3cfc1 651}
e9c0a3f0
DW
652
653static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
24b6d416 654 struct vmem_altmap *altmap)
98f3cfc1 655{
e9c0a3f0
DW
656 unsigned long start = (unsigned long) pfn_to_page(pfn);
657 unsigned long end = start + nr_pages * sizeof(struct page);
0aad818b 658
24b6d416 659 vmemmap_free(start, end, altmap);
98f3cfc1 660}
81556b02 661static void free_map_bootmem(struct page *memmap)
0c0a4a51 662{
0aad818b 663 unsigned long start = (unsigned long)memmap;
81556b02 664 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
0aad818b 665
24b6d416 666 vmemmap_free(start, end, NULL);
0c0a4a51 667}
6ecb0fc6
BH
668
669static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
670{
671 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
672 DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
673 struct mem_section *ms = __pfn_to_section(pfn);
674 unsigned long *subsection_map = ms->usage
675 ? &ms->usage->subsection_map[0] : NULL;
676
677 subsection_mask_set(map, pfn, nr_pages);
678 if (subsection_map)
679 bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
680
681 if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
682 "section already deactivated (%#lx + %ld)\n",
683 pfn, nr_pages))
684 return -EINVAL;
685
686 bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
687 return 0;
688}
689
690static bool is_subsection_map_empty(struct mem_section *ms)
691{
692 return bitmap_empty(&ms->usage->subsection_map[0],
693 SUBSECTIONS_PER_SECTION);
694}
695
696static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
697{
698 struct mem_section *ms = __pfn_to_section(pfn);
699 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
700 unsigned long *subsection_map;
701 int rc = 0;
702
703 subsection_mask_set(map, pfn, nr_pages);
704
705 subsection_map = &ms->usage->subsection_map[0];
706
707 if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
708 rc = -EINVAL;
709 else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
710 rc = -EEXIST;
711 else
712 bitmap_or(subsection_map, map, subsection_map,
713 SUBSECTIONS_PER_SECTION);
714
715 return rc;
716}
98f3cfc1 717#else
030eab4f 718struct page * __meminit populate_section_memmap(unsigned long pfn,
e9c0a3f0 719 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
0b0acbec 720{
4027149a
BH
721 return kvmalloc_node(array_size(sizeof(struct page),
722 PAGES_PER_SECTION), GFP_KERNEL, nid);
0b0acbec
DH
723}
724
e9c0a3f0 725static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
7b73d978 726 struct vmem_altmap *altmap)
98f3cfc1 727{
3af776f6 728 kvfree(pfn_to_page(pfn));
0b0acbec 729}
0c0a4a51 730
81556b02 731static void free_map_bootmem(struct page *memmap)
0c0a4a51
YG
732{
733 unsigned long maps_section_nr, removing_section_nr, i;
81556b02 734 unsigned long magic, nr_pages;
ae64ffca 735 struct page *page = virt_to_page(memmap);
0c0a4a51 736
81556b02
ZY
737 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
738 >> PAGE_SHIFT;
739
0c0a4a51 740 for (i = 0; i < nr_pages; i++, page++) {
ddffe98d 741 magic = (unsigned long) page->freelist;
0c0a4a51
YG
742
743 BUG_ON(magic == NODE_INFO);
744
745 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
857e522a 746 removing_section_nr = page_private(page);
0c0a4a51
YG
747
748 /*
749 * When this function is called, the removing section is
750 * logical offlined state. This means all pages are isolated
751 * from page allocator. If removing section's memmap is placed
752 * on the same section, it must not be freed.
753 * If it is freed, page allocator may allocate it which will
754 * be removed physically soon.
755 */
756 if (maps_section_nr != removing_section_nr)
757 put_page_bootmem(page);
758 }
759}
0b0acbec 760
37bc1502 761static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
ba72b4c8 762{
37bc1502
BH
763 return 0;
764}
765
766static bool is_subsection_map_empty(struct mem_section *ms)
767{
6ecb0fc6 768 return true;
0a9f9f62
BH
769}
770
6ecb0fc6 771static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
0a9f9f62 772{
6ecb0fc6 773 return 0;
0a9f9f62 774}
6ecb0fc6 775#endif /* CONFIG_SPARSEMEM_VMEMMAP */
37bc1502 776
95a5a34d
BH
777/*
778 * To deactivate a memory region, there are 3 cases to handle across
779 * two configurations (SPARSEMEM_VMEMMAP={y,n}):
780 *
781 * 1. deactivation of a partial hot-added section (only possible in
782 * the SPARSEMEM_VMEMMAP=y case).
783 * a) section was present at memory init.
784 * b) section was hot-added post memory init.
785 * 2. deactivation of a complete hot-added section.
786 * 3. deactivation of a complete section from memory init.
787 *
788 * For 1, when subsection_map does not empty we will not be freeing the
789 * usage map, but still need to free the vmemmap range.
790 *
791 * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified
792 */
37bc1502
BH
793static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
794 struct vmem_altmap *altmap)
795{
796 struct mem_section *ms = __pfn_to_section(pfn);
797 bool section_is_early = early_section(ms);
798 struct page *memmap = NULL;
799 bool empty;
800
801 if (clear_subsection_map(pfn, nr_pages))
802 return;
95a5a34d 803
37bc1502 804 empty = is_subsection_map_empty(ms);
d41e2f3b 805 if (empty) {
ba72b4c8
DW
806 unsigned long section_nr = pfn_to_section_nr(pfn);
807
8068df3b
DH
808 /*
809 * When removing an early section, the usage map is kept (as the
810 * usage maps of other sections fall into the same page). It
811 * will be re-used when re-adding the section - which is then no
812 * longer an early section. If the usage map is PageReserved, it
813 * was allocated during boot.
814 */
815 if (!PageReserved(virt_to_page(ms->usage))) {
ba72b4c8
DW
816 kfree(ms->usage);
817 ms->usage = NULL;
818 }
819 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
b943f045
AK
820 /*
821 * Mark the section invalid so that valid_section()
822 * return false. This prevents code from dereferencing
823 * ms->usage array.
824 */
825 ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
ba72b4c8
DW
826 }
827
828 if (section_is_early && memmap)
829 free_map_bootmem(memmap);
830 else
831 depopulate_section_memmap(pfn, nr_pages, altmap);
d41e2f3b
BH
832
833 if (empty)
834 ms->section_mem_map = (unsigned long)NULL;
ba72b4c8
DW
835}
836
5d87255c
BH
837static struct page * __meminit section_activate(int nid, unsigned long pfn,
838 unsigned long nr_pages, struct vmem_altmap *altmap)
839{
840 struct mem_section *ms = __pfn_to_section(pfn);
841 struct mem_section_usage *usage = NULL;
842 struct page *memmap;
843 int rc = 0;
844
845 if (!ms->usage) {
846 usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
847 if (!usage)
848 return ERR_PTR(-ENOMEM);
849 ms->usage = usage;
850 }
851
852 rc = fill_subsection_map(pfn, nr_pages);
ba72b4c8
DW
853 if (rc) {
854 if (usage)
855 ms->usage = NULL;
856 kfree(usage);
857 return ERR_PTR(rc);
858 }
859
860 /*
861 * The early init code does not consider partially populated
862 * initial sections, it simply assumes that memory will never be
863 * referenced. If we hot-add memory into such a section then we
864 * do not need to populate the memmap and can simply reuse what
865 * is already there.
866 */
867 if (nr_pages < PAGES_PER_SECTION && early_section(ms))
868 return pfn_to_page(pfn);
869
870 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
871 if (!memmap) {
872 section_deactivate(pfn, nr_pages, altmap);
873 return ERR_PTR(-ENOMEM);
874 }
875
876 return memmap;
877}
878
7567cfc5 879/**
ba72b4c8 880 * sparse_add_section - add a memory section, or populate an existing one
7567cfc5
BH
881 * @nid: The node to add section on
882 * @start_pfn: start pfn of the memory range
ba72b4c8 883 * @nr_pages: number of pfns to add in the section
7567cfc5
BH
884 * @altmap: device page map
885 *
886 * This is only intended for hotplug.
887 *
95a5a34d
BH
888 * Note that only VMEMMAP supports sub-section aligned hotplug,
889 * the proper alignment and size are gated by check_pfn_span().
890 *
891 *
7567cfc5
BH
892 * Return:
893 * * 0 - On success.
894 * * -EEXIST - Section has been present.
895 * * -ENOMEM - Out of memory.
29751f69 896 */
7ea62160
DW
897int __meminit sparse_add_section(int nid, unsigned long start_pfn,
898 unsigned long nr_pages, struct vmem_altmap *altmap)
29751f69 899{
0b0acbec 900 unsigned long section_nr = pfn_to_section_nr(start_pfn);
0b0acbec
DH
901 struct mem_section *ms;
902 struct page *memmap;
0b0acbec 903 int ret;
29751f69 904
4e0d2e7e 905 ret = sparse_index_init(section_nr, nid);
ba72b4c8 906 if (ret < 0)
bbd06825 907 return ret;
0b0acbec 908
ba72b4c8
DW
909 memmap = section_activate(nid, start_pfn, nr_pages, altmap);
910 if (IS_ERR(memmap))
911 return PTR_ERR(memmap);
5c0e3066 912
d0dc12e8
PT
913 /*
914 * Poison uninitialized struct pages in order to catch invalid flags
915 * combinations.
916 */
18e19f19 917 page_init_poison(memmap, sizeof(struct page) * nr_pages);
3ac19f8e 918
c1cbc3ee 919 ms = __nr_to_section(section_nr);
26f26bed 920 set_section_nid(section_nr, nid);
c4e1be9e 921 section_mark_present(ms);
0b0acbec 922
ba72b4c8
DW
923 /* Align memmap to section boundary in the subsection case */
924 if (section_nr_to_pfn(section_nr) != start_pfn)
4627d76d 925 memmap = pfn_to_page(section_nr_to_pfn(section_nr));
ba72b4c8
DW
926 sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
927
928 return 0;
29751f69 929}
ea01ea93 930
95a4774d
WC
931#ifdef CONFIG_MEMORY_FAILURE
932static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
933{
934 int i;
935
5eb570a8
BS
936 /*
937 * A further optimization is to have per section refcounted
938 * num_poisoned_pages. But that would need more space per memmap, so
939 * for now just do a quick global check to speed up this routine in the
940 * absence of bad pages.
941 */
942 if (atomic_long_read(&num_poisoned_pages) == 0)
943 return;
944
4b94ffdc 945 for (i = 0; i < nr_pages; i++) {
95a4774d 946 if (PageHWPoison(&memmap[i])) {
9f82883c 947 num_poisoned_pages_dec();
95a4774d
WC
948 ClearPageHWPoison(&memmap[i]);
949 }
950 }
951}
952#else
953static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
954{
955}
956#endif
957
ba72b4c8 958void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
7ea62160
DW
959 unsigned long nr_pages, unsigned long map_offset,
960 struct vmem_altmap *altmap)
ea01ea93 961{
ba72b4c8
DW
962 clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset,
963 nr_pages - map_offset);
964 section_deactivate(pfn, nr_pages, altmap);
ea01ea93 965}
4edd7cef 966#endif /* CONFIG_MEMORY_HOTPLUG */