]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
d41dee36 AW |
2 | /* |
3 | * sparse memory mappings. | |
4 | */ | |
d41dee36 | 5 | #include <linux/mm.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
d41dee36 | 7 | #include <linux/mmzone.h> |
97ad1087 | 8 | #include <linux/memblock.h> |
3b32123d | 9 | #include <linux/compiler.h> |
0b0acbec | 10 | #include <linux/highmem.h> |
b95f1b31 | 11 | #include <linux/export.h> |
28ae55c9 | 12 | #include <linux/spinlock.h> |
0b0acbec | 13 | #include <linux/vmalloc.h> |
3b32123d | 14 | |
0c0a4a51 | 15 | #include "internal.h" |
d41dee36 | 16 | #include <asm/dma.h> |
8f6aac41 CL |
17 | #include <asm/pgalloc.h> |
18 | #include <asm/pgtable.h> | |
d41dee36 AW |
19 | |
20 | /* | |
21 | * Permanent SPARSEMEM data: | |
22 | * | |
23 | * 1) mem_section - memory sections, mem_map's for valid memory | |
24 | */ | |
3e347261 | 25 | #ifdef CONFIG_SPARSEMEM_EXTREME |
83e3c487 | 26 | struct mem_section **mem_section; |
3e347261 BP |
27 | #else |
28 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | |
22fc6ecc | 29 | ____cacheline_internodealigned_in_smp; |
3e347261 BP |
30 | #endif |
31 | EXPORT_SYMBOL(mem_section); | |
32 | ||
89689ae7 CL |
33 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
34 | /* | |
35 | * If we did not store the node number in the page then we have to | |
36 | * do a lookup in the section_to_node_table in order to find which | |
37 | * node the page belongs to. | |
38 | */ | |
39 | #if MAX_NUMNODES <= 256 | |
40 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |
41 | #else | |
42 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |
43 | #endif | |
44 | ||
33dd4e0e | 45 | int page_to_nid(const struct page *page) |
89689ae7 CL |
46 | { |
47 | return section_to_node_table[page_to_section(page)]; | |
48 | } | |
49 | EXPORT_SYMBOL(page_to_nid); | |
85770ffe AW |
50 | |
51 | static void set_section_nid(unsigned long section_nr, int nid) | |
52 | { | |
53 | section_to_node_table[section_nr] = nid; | |
54 | } | |
55 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ | |
56 | static inline void set_section_nid(unsigned long section_nr, int nid) | |
57 | { | |
58 | } | |
89689ae7 CL |
59 | #endif |
60 | ||
3e347261 | 61 | #ifdef CONFIG_SPARSEMEM_EXTREME |
bd721ea7 | 62 | static noinline struct mem_section __ref *sparse_index_alloc(int nid) |
28ae55c9 DH |
63 | { |
64 | struct mem_section *section = NULL; | |
65 | unsigned long array_size = SECTIONS_PER_ROOT * | |
66 | sizeof(struct mem_section); | |
67 | ||
8a7f97b9 | 68 | if (slab_is_available()) { |
b95046b0 | 69 | section = kzalloc_node(array_size, GFP_KERNEL, nid); |
8a7f97b9 | 70 | } else { |
7e1c4e27 MR |
71 | section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, |
72 | nid); | |
8a7f97b9 MR |
73 | if (!section) |
74 | panic("%s: Failed to allocate %lu bytes nid=%d\n", | |
75 | __func__, array_size, nid); | |
76 | } | |
28ae55c9 DH |
77 | |
78 | return section; | |
3e347261 | 79 | } |
802f192e | 80 | |
a3142c8e | 81 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
802f192e | 82 | { |
28ae55c9 DH |
83 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
84 | struct mem_section *section; | |
802f192e BP |
85 | |
86 | if (mem_section[root]) | |
28ae55c9 | 87 | return -EEXIST; |
3e347261 | 88 | |
28ae55c9 | 89 | section = sparse_index_alloc(nid); |
af0cd5a7 WC |
90 | if (!section) |
91 | return -ENOMEM; | |
28ae55c9 DH |
92 | |
93 | mem_section[root] = section; | |
c1c95183 | 94 | |
9d1936cf | 95 | return 0; |
28ae55c9 DH |
96 | } |
97 | #else /* !SPARSEMEM_EXTREME */ | |
98 | static inline int sparse_index_init(unsigned long section_nr, int nid) | |
99 | { | |
100 | return 0; | |
802f192e | 101 | } |
28ae55c9 DH |
102 | #endif |
103 | ||
91fd8b95 | 104 | #ifdef CONFIG_SPARSEMEM_EXTREME |
2491f0a2 | 105 | unsigned long __section_nr(struct mem_section *ms) |
4ca644d9 DH |
106 | { |
107 | unsigned long root_nr; | |
83e3c487 | 108 | struct mem_section *root = NULL; |
4ca644d9 | 109 | |
12783b00 MK |
110 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
111 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); | |
4ca644d9 DH |
112 | if (!root) |
113 | continue; | |
114 | ||
115 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) | |
116 | break; | |
117 | } | |
118 | ||
83e3c487 | 119 | VM_BUG_ON(!root); |
db36a461 | 120 | |
4ca644d9 DH |
121 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); |
122 | } | |
91fd8b95 | 123 | #else |
2491f0a2 | 124 | unsigned long __section_nr(struct mem_section *ms) |
91fd8b95 | 125 | { |
2491f0a2 | 126 | return (unsigned long)(ms - mem_section[0]); |
91fd8b95 ZC |
127 | } |
128 | #endif | |
4ca644d9 | 129 | |
30c253e6 AW |
130 | /* |
131 | * During early boot, before section_mem_map is used for an actual | |
132 | * mem_map, we use section_mem_map to store the section's NUMA | |
133 | * node. This keeps us from having to use another data structure. The | |
134 | * node information is cleared just before we store the real mem_map. | |
135 | */ | |
136 | static inline unsigned long sparse_encode_early_nid(int nid) | |
137 | { | |
138 | return (nid << SECTION_NID_SHIFT); | |
139 | } | |
140 | ||
141 | static inline int sparse_early_nid(struct mem_section *section) | |
142 | { | |
143 | return (section->section_mem_map >> SECTION_NID_SHIFT); | |
144 | } | |
145 | ||
2dbb51c4 MG |
146 | /* Validate the physical addressing limitations of the model */ |
147 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
148 | unsigned long *end_pfn) | |
d41dee36 | 149 | { |
2dbb51c4 | 150 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); |
d41dee36 | 151 | |
bead9a3a IM |
152 | /* |
153 | * Sanity checks - do not allow an architecture to pass | |
154 | * in larger pfns than the maximum scope of sparsemem: | |
155 | */ | |
2dbb51c4 MG |
156 | if (*start_pfn > max_sparsemem_pfn) { |
157 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | |
158 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | |
159 | *start_pfn, *end_pfn, max_sparsemem_pfn); | |
160 | WARN_ON_ONCE(1); | |
161 | *start_pfn = max_sparsemem_pfn; | |
162 | *end_pfn = max_sparsemem_pfn; | |
ef161a98 | 163 | } else if (*end_pfn > max_sparsemem_pfn) { |
2dbb51c4 MG |
164 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
165 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | |
166 | *start_pfn, *end_pfn, max_sparsemem_pfn); | |
167 | WARN_ON_ONCE(1); | |
168 | *end_pfn = max_sparsemem_pfn; | |
169 | } | |
170 | } | |
171 | ||
c4e1be9e DH |
172 | /* |
173 | * There are a number of times that we loop over NR_MEM_SECTIONS, | |
174 | * looking for section_present() on each. But, when we have very | |
175 | * large physical address spaces, NR_MEM_SECTIONS can also be | |
176 | * very large which makes the loops quite long. | |
177 | * | |
178 | * Keeping track of this gives us an easy way to break out of | |
179 | * those loops early. | |
180 | */ | |
2491f0a2 | 181 | unsigned long __highest_present_section_nr; |
c4e1be9e DH |
182 | static void section_mark_present(struct mem_section *ms) |
183 | { | |
2491f0a2 | 184 | unsigned long section_nr = __section_nr(ms); |
c4e1be9e DH |
185 | |
186 | if (section_nr > __highest_present_section_nr) | |
187 | __highest_present_section_nr = section_nr; | |
188 | ||
189 | ms->section_mem_map |= SECTION_MARKED_PRESENT; | |
190 | } | |
191 | ||
2491f0a2 | 192 | static inline unsigned long next_present_section_nr(unsigned long section_nr) |
c4e1be9e DH |
193 | { |
194 | do { | |
195 | section_nr++; | |
196 | if (present_section_nr(section_nr)) | |
197 | return section_nr; | |
d538c164 | 198 | } while ((section_nr <= __highest_present_section_nr)); |
c4e1be9e DH |
199 | |
200 | return -1; | |
201 | } | |
202 | #define for_each_present_section_nr(start, section_nr) \ | |
203 | for (section_nr = next_present_section_nr(start-1); \ | |
d778015a | 204 | ((section_nr != -1) && \ |
c4e1be9e DH |
205 | (section_nr <= __highest_present_section_nr)); \ |
206 | section_nr = next_present_section_nr(section_nr)) | |
207 | ||
85c77f79 PT |
208 | static inline unsigned long first_present_section_nr(void) |
209 | { | |
210 | return next_present_section_nr(-1); | |
211 | } | |
212 | ||
2dbb51c4 MG |
213 | /* Record a memory area against a node. */ |
214 | void __init memory_present(int nid, unsigned long start, unsigned long end) | |
215 | { | |
216 | unsigned long pfn; | |
bead9a3a | 217 | |
629a359b KS |
218 | #ifdef CONFIG_SPARSEMEM_EXTREME |
219 | if (unlikely(!mem_section)) { | |
220 | unsigned long size, align; | |
221 | ||
d09cfbbf | 222 | size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; |
629a359b | 223 | align = 1 << (INTERNODE_CACHE_SHIFT); |
eb31d559 | 224 | mem_section = memblock_alloc(size, align); |
8a7f97b9 MR |
225 | if (!mem_section) |
226 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", | |
227 | __func__, size, align); | |
629a359b KS |
228 | } |
229 | #endif | |
230 | ||
d41dee36 | 231 | start &= PAGE_SECTION_MASK; |
2dbb51c4 | 232 | mminit_validate_memmodel_limits(&start, &end); |
d41dee36 AW |
233 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
234 | unsigned long section = pfn_to_section_nr(pfn); | |
802f192e BP |
235 | struct mem_section *ms; |
236 | ||
237 | sparse_index_init(section, nid); | |
85770ffe | 238 | set_section_nid(section, nid); |
802f192e BP |
239 | |
240 | ms = __nr_to_section(section); | |
c4e1be9e | 241 | if (!ms->section_mem_map) { |
2d070eab MH |
242 | ms->section_mem_map = sparse_encode_early_nid(nid) | |
243 | SECTION_IS_ONLINE; | |
c4e1be9e DH |
244 | section_mark_present(ms); |
245 | } | |
d41dee36 AW |
246 | } |
247 | } | |
248 | ||
9def36e0 LG |
249 | /* |
250 | * Mark all memblocks as present using memory_present(). This is a | |
251 | * convienence function that is useful for a number of arches | |
252 | * to mark all of the systems memory as present during initialization. | |
253 | */ | |
254 | void __init memblocks_present(void) | |
255 | { | |
256 | struct memblock_region *reg; | |
257 | ||
258 | for_each_memblock(memory, reg) { | |
259 | memory_present(memblock_get_region_node(reg), | |
260 | memblock_region_memory_base_pfn(reg), | |
261 | memblock_region_memory_end_pfn(reg)); | |
262 | } | |
263 | } | |
264 | ||
29751f69 AW |
265 | /* |
266 | * Subtle, we encode the real pfn into the mem_map such that | |
267 | * the identity pfn - section_mem_map will return the actual | |
268 | * physical page frame number. | |
269 | */ | |
270 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) | |
271 | { | |
def9b71e PT |
272 | unsigned long coded_mem_map = |
273 | (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); | |
274 | BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT)); | |
275 | BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); | |
276 | return coded_mem_map; | |
29751f69 AW |
277 | } |
278 | ||
279 | /* | |
ea01ea93 | 280 | * Decode mem_map from the coded memmap |
29751f69 | 281 | */ |
29751f69 AW |
282 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
283 | { | |
ea01ea93 BP |
284 | /* mask off the extra low bits of information */ |
285 | coded_mem_map &= SECTION_MAP_MASK; | |
29751f69 AW |
286 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
287 | } | |
288 | ||
4e40987f | 289 | static void __meminit sparse_init_one_section(struct mem_section *ms, |
5c0e3066 | 290 | unsigned long pnum, struct page *mem_map, |
f1eca35a | 291 | struct mem_section_usage *usage) |
29751f69 | 292 | { |
30c253e6 | 293 | ms->section_mem_map &= ~SECTION_MAP_MASK; |
540557b9 AW |
294 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | |
295 | SECTION_HAS_MEM_MAP; | |
f1eca35a | 296 | ms->usage = usage; |
29751f69 AW |
297 | } |
298 | ||
f1eca35a | 299 | static unsigned long usemap_size(void) |
5c0e3066 | 300 | { |
60a7a88d | 301 | return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); |
5c0e3066 MG |
302 | } |
303 | ||
f1eca35a | 304 | size_t mem_section_usage_size(void) |
5c0e3066 | 305 | { |
f1eca35a | 306 | return sizeof(struct mem_section_usage) + usemap_size(); |
5c0e3066 | 307 | } |
5c0e3066 | 308 | |
48c90682 | 309 | #ifdef CONFIG_MEMORY_HOTREMOVE |
f1eca35a | 310 | static struct mem_section_usage * __init |
a4322e1b | 311 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
238305bb | 312 | unsigned long size) |
48c90682 | 313 | { |
f1eca35a | 314 | struct mem_section_usage *usage; |
99ab7b19 | 315 | unsigned long goal, limit; |
99ab7b19 | 316 | int nid; |
48c90682 YG |
317 | /* |
318 | * A page may contain usemaps for other sections preventing the | |
319 | * page being freed and making a section unremovable while | |
c800bcd5 | 320 | * other sections referencing the usemap remain active. Similarly, |
48c90682 YG |
321 | * a pgdat can prevent a section being removed. If section A |
322 | * contains a pgdat and section B contains the usemap, both | |
323 | * sections become inter-dependent. This allocates usemaps | |
324 | * from the same section as the pgdat where possible to avoid | |
325 | * this problem. | |
326 | */ | |
07b4e2bc | 327 | goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); |
99ab7b19 YL |
328 | limit = goal + (1UL << PA_SECTION_SHIFT); |
329 | nid = early_pfn_to_nid(goal >> PAGE_SHIFT); | |
330 | again: | |
f1eca35a DW |
331 | usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid); |
332 | if (!usage && limit) { | |
99ab7b19 YL |
333 | limit = 0; |
334 | goto again; | |
335 | } | |
f1eca35a | 336 | return usage; |
48c90682 YG |
337 | } |
338 | ||
f1eca35a DW |
339 | static void __init check_usemap_section_nr(int nid, |
340 | struct mem_section_usage *usage) | |
48c90682 YG |
341 | { |
342 | unsigned long usemap_snr, pgdat_snr; | |
83e3c487 KS |
343 | static unsigned long old_usemap_snr; |
344 | static unsigned long old_pgdat_snr; | |
48c90682 YG |
345 | struct pglist_data *pgdat = NODE_DATA(nid); |
346 | int usemap_nid; | |
347 | ||
83e3c487 KS |
348 | /* First call */ |
349 | if (!old_usemap_snr) { | |
350 | old_usemap_snr = NR_MEM_SECTIONS; | |
351 | old_pgdat_snr = NR_MEM_SECTIONS; | |
352 | } | |
353 | ||
f1eca35a | 354 | usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT); |
48c90682 YG |
355 | pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); |
356 | if (usemap_snr == pgdat_snr) | |
357 | return; | |
358 | ||
359 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) | |
360 | /* skip redundant message */ | |
361 | return; | |
362 | ||
363 | old_usemap_snr = usemap_snr; | |
364 | old_pgdat_snr = pgdat_snr; | |
365 | ||
366 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); | |
367 | if (usemap_nid != nid) { | |
1170532b JP |
368 | pr_info("node %d must be removed before remove section %ld\n", |
369 | nid, usemap_snr); | |
48c90682 YG |
370 | return; |
371 | } | |
372 | /* | |
373 | * There is a circular dependency. | |
374 | * Some platforms allow un-removable section because they will just | |
375 | * gather other removable sections for dynamic partitioning. | |
376 | * Just notify un-removable section's number here. | |
377 | */ | |
1170532b JP |
378 | pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", |
379 | usemap_snr, pgdat_snr, nid); | |
48c90682 YG |
380 | } |
381 | #else | |
f1eca35a | 382 | static struct mem_section_usage * __init |
a4322e1b | 383 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
238305bb | 384 | unsigned long size) |
48c90682 | 385 | { |
26fb3dae | 386 | return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); |
48c90682 YG |
387 | } |
388 | ||
f1eca35a DW |
389 | static void __init check_usemap_section_nr(int nid, |
390 | struct mem_section_usage *usage) | |
48c90682 YG |
391 | { |
392 | } | |
393 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
394 | ||
35fd1eb1 | 395 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
afda57bc | 396 | static unsigned long __init section_map_size(void) |
35fd1eb1 PT |
397 | { |
398 | return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); | |
399 | } | |
400 | ||
401 | #else | |
afda57bc | 402 | static unsigned long __init section_map_size(void) |
e131c06b PT |
403 | { |
404 | return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); | |
405 | } | |
406 | ||
7b73d978 CH |
407 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, |
408 | struct vmem_altmap *altmap) | |
29751f69 | 409 | { |
e131c06b PT |
410 | unsigned long size = section_map_size(); |
411 | struct page *map = sparse_buffer_alloc(size); | |
8a7f97b9 | 412 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); |
e131c06b PT |
413 | |
414 | if (map) | |
415 | return map; | |
29751f69 | 416 | |
eb31d559 | 417 | map = memblock_alloc_try_nid(size, |
8a7f97b9 | 418 | PAGE_SIZE, addr, |
97ad1087 | 419 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
8a7f97b9 MR |
420 | if (!map) |
421 | panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n", | |
422 | __func__, size, PAGE_SIZE, nid, &addr); | |
423 | ||
8f6aac41 CL |
424 | return map; |
425 | } | |
426 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
427 | ||
35fd1eb1 PT |
428 | static void *sparsemap_buf __meminitdata; |
429 | static void *sparsemap_buf_end __meminitdata; | |
430 | ||
afda57bc | 431 | static void __init sparse_buffer_init(unsigned long size, int nid) |
35fd1eb1 | 432 | { |
8a7f97b9 | 433 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); |
35fd1eb1 PT |
434 | WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ |
435 | sparsemap_buf = | |
eb31d559 | 436 | memblock_alloc_try_nid_raw(size, PAGE_SIZE, |
8a7f97b9 | 437 | addr, |
97ad1087 | 438 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
35fd1eb1 PT |
439 | sparsemap_buf_end = sparsemap_buf + size; |
440 | } | |
441 | ||
afda57bc | 442 | static void __init sparse_buffer_fini(void) |
35fd1eb1 PT |
443 | { |
444 | unsigned long size = sparsemap_buf_end - sparsemap_buf; | |
445 | ||
446 | if (sparsemap_buf && size > 0) | |
447 | memblock_free_early(__pa(sparsemap_buf), size); | |
448 | sparsemap_buf = NULL; | |
449 | } | |
450 | ||
451 | void * __meminit sparse_buffer_alloc(unsigned long size) | |
452 | { | |
453 | void *ptr = NULL; | |
454 | ||
455 | if (sparsemap_buf) { | |
456 | ptr = PTR_ALIGN(sparsemap_buf, size); | |
457 | if (ptr + size > sparsemap_buf_end) | |
458 | ptr = NULL; | |
459 | else | |
460 | sparsemap_buf = ptr + size; | |
461 | } | |
462 | return ptr; | |
463 | } | |
464 | ||
3b32123d | 465 | void __weak __meminit vmemmap_populate_print_last(void) |
c2b91e2e YL |
466 | { |
467 | } | |
a4322e1b | 468 | |
85c77f79 PT |
469 | /* |
470 | * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) | |
471 | * And number of present sections in this node is map_count. | |
472 | */ | |
473 | static void __init sparse_init_nid(int nid, unsigned long pnum_begin, | |
474 | unsigned long pnum_end, | |
475 | unsigned long map_count) | |
476 | { | |
f1eca35a DW |
477 | struct mem_section_usage *usage; |
478 | unsigned long pnum; | |
85c77f79 PT |
479 | struct page *map; |
480 | ||
f1eca35a DW |
481 | usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), |
482 | mem_section_usage_size() * map_count); | |
483 | if (!usage) { | |
85c77f79 PT |
484 | pr_err("%s: node[%d] usemap allocation failed", __func__, nid); |
485 | goto failed; | |
486 | } | |
487 | sparse_buffer_init(map_count * section_map_size(), nid); | |
488 | for_each_present_section_nr(pnum_begin, pnum) { | |
489 | if (pnum >= pnum_end) | |
490 | break; | |
491 | ||
492 | map = sparse_mem_map_populate(pnum, nid, NULL); | |
493 | if (!map) { | |
494 | pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", | |
495 | __func__, nid); | |
496 | pnum_begin = pnum; | |
497 | goto failed; | |
498 | } | |
f1eca35a DW |
499 | check_usemap_section_nr(nid, usage); |
500 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage); | |
501 | usage = (void *) usage + mem_section_usage_size(); | |
85c77f79 PT |
502 | } |
503 | sparse_buffer_fini(); | |
504 | return; | |
505 | failed: | |
506 | /* We failed to allocate, mark all the following pnums as not present */ | |
507 | for_each_present_section_nr(pnum_begin, pnum) { | |
508 | struct mem_section *ms; | |
509 | ||
510 | if (pnum >= pnum_end) | |
511 | break; | |
512 | ms = __nr_to_section(pnum); | |
513 | ms->section_mem_map = 0; | |
514 | } | |
515 | } | |
516 | ||
517 | /* | |
518 | * Allocate the accumulated non-linear sections, allocate a mem_map | |
519 | * for each and record the physical to section mapping. | |
520 | */ | |
2a3cb8ba | 521 | void __init sparse_init(void) |
85c77f79 PT |
522 | { |
523 | unsigned long pnum_begin = first_present_section_nr(); | |
524 | int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); | |
525 | unsigned long pnum_end, map_count = 1; | |
526 | ||
527 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ | |
528 | set_pageblock_order(); | |
529 | ||
530 | for_each_present_section_nr(pnum_begin + 1, pnum_end) { | |
531 | int nid = sparse_early_nid(__nr_to_section(pnum_end)); | |
532 | ||
533 | if (nid == nid_begin) { | |
534 | map_count++; | |
535 | continue; | |
536 | } | |
537 | /* Init node with sections in range [pnum_begin, pnum_end) */ | |
538 | sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); | |
539 | nid_begin = nid; | |
540 | pnum_begin = pnum_end; | |
541 | map_count = 1; | |
542 | } | |
543 | /* cover the last node */ | |
544 | sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); | |
545 | vmemmap_populate_print_last(); | |
546 | } | |
547 | ||
193faea9 | 548 | #ifdef CONFIG_MEMORY_HOTPLUG |
2d070eab MH |
549 | |
550 | /* Mark all memory sections within the pfn range as online */ | |
551 | void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) | |
552 | { | |
553 | unsigned long pfn; | |
554 | ||
555 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
b4ccec41 | 556 | unsigned long section_nr = pfn_to_section_nr(pfn); |
2d070eab MH |
557 | struct mem_section *ms; |
558 | ||
559 | /* onlining code should never touch invalid ranges */ | |
560 | if (WARN_ON(!valid_section_nr(section_nr))) | |
561 | continue; | |
562 | ||
563 | ms = __nr_to_section(section_nr); | |
564 | ms->section_mem_map |= SECTION_IS_ONLINE; | |
565 | } | |
566 | } | |
567 | ||
568 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
9b7ea46a | 569 | /* Mark all memory sections within the pfn range as offline */ |
2d070eab MH |
570 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) |
571 | { | |
572 | unsigned long pfn; | |
573 | ||
574 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
27227c73 | 575 | unsigned long section_nr = pfn_to_section_nr(pfn); |
2d070eab MH |
576 | struct mem_section *ms; |
577 | ||
578 | /* | |
579 | * TODO this needs some double checking. Offlining code makes | |
580 | * sure to check pfn_valid but those checks might be just bogus | |
581 | */ | |
582 | if (WARN_ON(!valid_section_nr(section_nr))) | |
583 | continue; | |
584 | ||
585 | ms = __nr_to_section(section_nr); | |
586 | ms->section_mem_map &= ~SECTION_IS_ONLINE; | |
587 | } | |
588 | } | |
589 | #endif | |
590 | ||
98f3cfc1 | 591 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
7b73d978 CH |
592 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, |
593 | struct vmem_altmap *altmap) | |
98f3cfc1 YG |
594 | { |
595 | /* This will make the necessary allocations eventually. */ | |
7b73d978 | 596 | return sparse_mem_map_populate(pnum, nid, altmap); |
98f3cfc1 | 597 | } |
24b6d416 CH |
598 | static void __kfree_section_memmap(struct page *memmap, |
599 | struct vmem_altmap *altmap) | |
98f3cfc1 | 600 | { |
0aad818b | 601 | unsigned long start = (unsigned long)memmap; |
85b35fea | 602 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
0aad818b | 603 | |
24b6d416 | 604 | vmemmap_free(start, end, altmap); |
98f3cfc1 | 605 | } |
81556b02 | 606 | static void free_map_bootmem(struct page *memmap) |
0c0a4a51 | 607 | { |
0aad818b | 608 | unsigned long start = (unsigned long)memmap; |
81556b02 | 609 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
0aad818b | 610 | |
24b6d416 | 611 | vmemmap_free(start, end, NULL); |
0c0a4a51 | 612 | } |
98f3cfc1 | 613 | #else |
85b35fea | 614 | static struct page *__kmalloc_section_memmap(void) |
0b0acbec DH |
615 | { |
616 | struct page *page, *ret; | |
85b35fea | 617 | unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; |
0b0acbec | 618 | |
f2d0aa5b | 619 | page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); |
0b0acbec DH |
620 | if (page) |
621 | goto got_map_page; | |
622 | ||
623 | ret = vmalloc(memmap_size); | |
624 | if (ret) | |
625 | goto got_map_ptr; | |
626 | ||
627 | return NULL; | |
628 | got_map_page: | |
629 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); | |
630 | got_map_ptr: | |
0b0acbec DH |
631 | |
632 | return ret; | |
633 | } | |
634 | ||
7b73d978 CH |
635 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, |
636 | struct vmem_altmap *altmap) | |
98f3cfc1 | 637 | { |
85b35fea | 638 | return __kmalloc_section_memmap(); |
98f3cfc1 YG |
639 | } |
640 | ||
24b6d416 CH |
641 | static void __kfree_section_memmap(struct page *memmap, |
642 | struct vmem_altmap *altmap) | |
0b0acbec | 643 | { |
9e2779fa | 644 | if (is_vmalloc_addr(memmap)) |
0b0acbec DH |
645 | vfree(memmap); |
646 | else | |
647 | free_pages((unsigned long)memmap, | |
85b35fea | 648 | get_order(sizeof(struct page) * PAGES_PER_SECTION)); |
0b0acbec | 649 | } |
0c0a4a51 | 650 | |
81556b02 | 651 | static void free_map_bootmem(struct page *memmap) |
0c0a4a51 YG |
652 | { |
653 | unsigned long maps_section_nr, removing_section_nr, i; | |
81556b02 | 654 | unsigned long magic, nr_pages; |
ae64ffca | 655 | struct page *page = virt_to_page(memmap); |
0c0a4a51 | 656 | |
81556b02 ZY |
657 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) |
658 | >> PAGE_SHIFT; | |
659 | ||
0c0a4a51 | 660 | for (i = 0; i < nr_pages; i++, page++) { |
ddffe98d | 661 | magic = (unsigned long) page->freelist; |
0c0a4a51 YG |
662 | |
663 | BUG_ON(magic == NODE_INFO); | |
664 | ||
665 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); | |
857e522a | 666 | removing_section_nr = page_private(page); |
0c0a4a51 YG |
667 | |
668 | /* | |
669 | * When this function is called, the removing section is | |
670 | * logical offlined state. This means all pages are isolated | |
671 | * from page allocator. If removing section's memmap is placed | |
672 | * on the same section, it must not be freed. | |
673 | * If it is freed, page allocator may allocate it which will | |
674 | * be removed physically soon. | |
675 | */ | |
676 | if (maps_section_nr != removing_section_nr) | |
677 | put_page_bootmem(page); | |
678 | } | |
679 | } | |
98f3cfc1 | 680 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
0b0acbec | 681 | |
7567cfc5 BH |
682 | /** |
683 | * sparse_add_one_section - add a memory section | |
684 | * @nid: The node to add section on | |
685 | * @start_pfn: start pfn of the memory range | |
686 | * @altmap: device page map | |
687 | * | |
688 | * This is only intended for hotplug. | |
689 | * | |
690 | * Return: | |
691 | * * 0 - On success. | |
692 | * * -EEXIST - Section has been present. | |
693 | * * -ENOMEM - Out of memory. | |
29751f69 | 694 | */ |
4e0d2e7e WY |
695 | int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, |
696 | struct vmem_altmap *altmap) | |
29751f69 | 697 | { |
0b0acbec | 698 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
f1eca35a | 699 | struct mem_section_usage *usage; |
0b0acbec DH |
700 | struct mem_section *ms; |
701 | struct page *memmap; | |
0b0acbec | 702 | int ret; |
29751f69 | 703 | |
0b0acbec DH |
704 | /* |
705 | * no locking for this, because it does its own | |
706 | * plus, it does a kmalloc | |
707 | */ | |
4e0d2e7e | 708 | ret = sparse_index_init(section_nr, nid); |
bbd06825 WC |
709 | if (ret < 0 && ret != -EEXIST) |
710 | return ret; | |
4e40987f | 711 | ret = 0; |
4e0d2e7e | 712 | memmap = kmalloc_section_memmap(section_nr, nid, altmap); |
bbd06825 WC |
713 | if (!memmap) |
714 | return -ENOMEM; | |
f1eca35a DW |
715 | usage = kzalloc(mem_section_usage_size(), GFP_KERNEL); |
716 | if (!usage) { | |
24b6d416 | 717 | __kfree_section_memmap(memmap, altmap); |
bbd06825 WC |
718 | return -ENOMEM; |
719 | } | |
0b0acbec | 720 | |
0b0acbec DH |
721 | ms = __pfn_to_section(start_pfn); |
722 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { | |
723 | ret = -EEXIST; | |
724 | goto out; | |
725 | } | |
5c0e3066 | 726 | |
d0dc12e8 PT |
727 | /* |
728 | * Poison uninitialized struct pages in order to catch invalid flags | |
729 | * combinations. | |
730 | */ | |
f682a97a | 731 | page_init_poison(memmap, sizeof(struct page) * PAGES_PER_SECTION); |
3ac19f8e | 732 | |
26f26bed | 733 | set_section_nid(section_nr, nid); |
c4e1be9e | 734 | section_mark_present(ms); |
f1eca35a | 735 | sparse_init_one_section(ms, section_nr, memmap, usage); |
0b0acbec | 736 | |
0b0acbec | 737 | out: |
4e40987f | 738 | if (ret < 0) { |
f1eca35a | 739 | kfree(usage); |
24b6d416 | 740 | __kfree_section_memmap(memmap, altmap); |
bbd06825 | 741 | } |
0b0acbec | 742 | return ret; |
29751f69 | 743 | } |
ea01ea93 | 744 | |
95a4774d WC |
745 | #ifdef CONFIG_MEMORY_FAILURE |
746 | static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |
747 | { | |
748 | int i; | |
749 | ||
750 | if (!memmap) | |
751 | return; | |
752 | ||
5eb570a8 BS |
753 | /* |
754 | * A further optimization is to have per section refcounted | |
755 | * num_poisoned_pages. But that would need more space per memmap, so | |
756 | * for now just do a quick global check to speed up this routine in the | |
757 | * absence of bad pages. | |
758 | */ | |
759 | if (atomic_long_read(&num_poisoned_pages) == 0) | |
760 | return; | |
761 | ||
4b94ffdc | 762 | for (i = 0; i < nr_pages; i++) { |
95a4774d | 763 | if (PageHWPoison(&memmap[i])) { |
293c07e3 | 764 | atomic_long_sub(1, &num_poisoned_pages); |
95a4774d WC |
765 | ClearPageHWPoison(&memmap[i]); |
766 | } | |
767 | } | |
768 | } | |
769 | #else | |
770 | static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |
771 | { | |
772 | } | |
773 | #endif | |
774 | ||
f1eca35a DW |
775 | static void free_section_usage(struct page *memmap, |
776 | struct mem_section_usage *usage, struct vmem_altmap *altmap) | |
4edd7cef | 777 | { |
f1eca35a | 778 | struct page *usage_page; |
4edd7cef | 779 | |
f1eca35a | 780 | if (!usage) |
4edd7cef DR |
781 | return; |
782 | ||
f1eca35a | 783 | usage_page = virt_to_page(usage); |
4edd7cef DR |
784 | /* |
785 | * Check to see if allocation came from hot-plug-add | |
786 | */ | |
f1eca35a DW |
787 | if (PageSlab(usage_page) || PageCompound(usage_page)) { |
788 | kfree(usage); | |
4edd7cef | 789 | if (memmap) |
24b6d416 | 790 | __kfree_section_memmap(memmap, altmap); |
4edd7cef DR |
791 | return; |
792 | } | |
793 | ||
794 | /* | |
795 | * The usemap came from bootmem. This is packed with other usemaps | |
796 | * on the section which has pgdat at boot time. Just keep it as is now. | |
797 | */ | |
798 | ||
81556b02 ZY |
799 | if (memmap) |
800 | free_map_bootmem(memmap); | |
4edd7cef DR |
801 | } |
802 | ||
b9bf8d34 DH |
803 | void sparse_remove_one_section(struct mem_section *ms, unsigned long map_offset, |
804 | struct vmem_altmap *altmap) | |
ea01ea93 BP |
805 | { |
806 | struct page *memmap = NULL; | |
f1eca35a | 807 | struct mem_section_usage *usage = NULL; |
ea01ea93 BP |
808 | |
809 | if (ms->section_mem_map) { | |
f1eca35a | 810 | usage = ms->usage; |
ea01ea93 BP |
811 | memmap = sparse_decode_mem_map(ms->section_mem_map, |
812 | __section_nr(ms)); | |
813 | ms->section_mem_map = 0; | |
f1eca35a | 814 | ms->usage = NULL; |
ea01ea93 BP |
815 | } |
816 | ||
4b94ffdc DW |
817 | clear_hwpoisoned_pages(memmap + map_offset, |
818 | PAGES_PER_SECTION - map_offset); | |
f1eca35a | 819 | free_section_usage(memmap, usage, altmap); |
ea01ea93 | 820 | } |
4edd7cef | 821 | #endif /* CONFIG_MEMORY_HOTPLUG */ |