]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-only | |
2 | /* | |
3 | * linux/mm/memory_hotplug.c | |
4 | * | |
5 | * Copyright (C) | |
6 | */ | |
7 | ||
8 | #include <linux/stddef.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/sched/signal.h> | |
11 | #include <linux/swap.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/compiler.h> | |
15 | #include <linux/export.h> | |
16 | #include <linux/pagevec.h> | |
17 | #include <linux/writeback.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/sysctl.h> | |
20 | #include <linux/cpu.h> | |
21 | #include <linux/memory.h> | |
22 | #include <linux/memremap.h> | |
23 | #include <linux/memory_hotplug.h> | |
24 | #include <linux/highmem.h> | |
25 | #include <linux/vmalloc.h> | |
26 | #include <linux/ioport.h> | |
27 | #include <linux/delay.h> | |
28 | #include <linux/migrate.h> | |
29 | #include <linux/page-isolation.h> | |
30 | #include <linux/pfn.h> | |
31 | #include <linux/suspend.h> | |
32 | #include <linux/mm_inline.h> | |
33 | #include <linux/firmware-map.h> | |
34 | #include <linux/stop_machine.h> | |
35 | #include <linux/hugetlb.h> | |
36 | #include <linux/memblock.h> | |
37 | #include <linux/compaction.h> | |
38 | #include <linux/rmap.h> | |
39 | ||
40 | #include <asm/tlbflush.h> | |
41 | ||
42 | #include "internal.h" | |
43 | #include "shuffle.h" | |
44 | ||
45 | ||
46 | /* | |
47 | * memory_hotplug.memmap_on_memory parameter | |
48 | */ | |
49 | static bool memmap_on_memory __ro_after_init; | |
50 | #ifdef CONFIG_MHP_MEMMAP_ON_MEMORY | |
51 | module_param(memmap_on_memory, bool, 0444); | |
52 | MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug"); | |
53 | #endif | |
54 | ||
55 | /* | |
56 | * online_page_callback contains pointer to current page onlining function. | |
57 | * Initially it is generic_online_page(). If it is required it could be | |
58 | * changed by calling set_online_page_callback() for callback registration | |
59 | * and restore_online_page_callback() for generic callback restore. | |
60 | */ | |
61 | ||
62 | static online_page_callback_t online_page_callback = generic_online_page; | |
63 | static DEFINE_MUTEX(online_page_callback_lock); | |
64 | ||
65 | DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock); | |
66 | ||
67 | void get_online_mems(void) | |
68 | { | |
69 | percpu_down_read(&mem_hotplug_lock); | |
70 | } | |
71 | ||
72 | void put_online_mems(void) | |
73 | { | |
74 | percpu_up_read(&mem_hotplug_lock); | |
75 | } | |
76 | ||
77 | bool movable_node_enabled = false; | |
78 | ||
79 | #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE | |
80 | int mhp_default_online_type = MMOP_OFFLINE; | |
81 | #else | |
82 | int mhp_default_online_type = MMOP_ONLINE; | |
83 | #endif | |
84 | ||
85 | static int __init setup_memhp_default_state(char *str) | |
86 | { | |
87 | const int online_type = mhp_online_type_from_str(str); | |
88 | ||
89 | if (online_type >= 0) | |
90 | mhp_default_online_type = online_type; | |
91 | ||
92 | return 1; | |
93 | } | |
94 | __setup("memhp_default_state=", setup_memhp_default_state); | |
95 | ||
96 | void mem_hotplug_begin(void) | |
97 | { | |
98 | cpus_read_lock(); | |
99 | percpu_down_write(&mem_hotplug_lock); | |
100 | } | |
101 | ||
102 | void mem_hotplug_done(void) | |
103 | { | |
104 | percpu_up_write(&mem_hotplug_lock); | |
105 | cpus_read_unlock(); | |
106 | } | |
107 | ||
108 | u64 max_mem_size = U64_MAX; | |
109 | ||
110 | /* add this memory to iomem resource */ | |
111 | static struct resource *register_memory_resource(u64 start, u64 size, | |
112 | const char *resource_name) | |
113 | { | |
114 | struct resource *res; | |
115 | unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; | |
116 | ||
117 | if (strcmp(resource_name, "System RAM")) | |
118 | flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED; | |
119 | ||
120 | if (!mhp_range_allowed(start, size, true)) | |
121 | return ERR_PTR(-E2BIG); | |
122 | ||
123 | /* | |
124 | * Make sure value parsed from 'mem=' only restricts memory adding | |
125 | * while booting, so that memory hotplug won't be impacted. Please | |
126 | * refer to document of 'mem=' in kernel-parameters.txt for more | |
127 | * details. | |
128 | */ | |
129 | if (start + size > max_mem_size && system_state < SYSTEM_RUNNING) | |
130 | return ERR_PTR(-E2BIG); | |
131 | ||
132 | /* | |
133 | * Request ownership of the new memory range. This might be | |
134 | * a child of an existing resource that was present but | |
135 | * not marked as busy. | |
136 | */ | |
137 | res = __request_region(&iomem_resource, start, size, | |
138 | resource_name, flags); | |
139 | ||
140 | if (!res) { | |
141 | pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n", | |
142 | start, start + size); | |
143 | return ERR_PTR(-EEXIST); | |
144 | } | |
145 | return res; | |
146 | } | |
147 | ||
148 | static void release_memory_resource(struct resource *res) | |
149 | { | |
150 | if (!res) | |
151 | return; | |
152 | release_resource(res); | |
153 | kfree(res); | |
154 | } | |
155 | ||
156 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | |
157 | void get_page_bootmem(unsigned long info, struct page *page, | |
158 | unsigned long type) | |
159 | { | |
160 | page->freelist = (void *)type; | |
161 | SetPagePrivate(page); | |
162 | set_page_private(page, info); | |
163 | page_ref_inc(page); | |
164 | } | |
165 | ||
166 | void put_page_bootmem(struct page *page) | |
167 | { | |
168 | unsigned long type; | |
169 | ||
170 | type = (unsigned long) page->freelist; | |
171 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || | |
172 | type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); | |
173 | ||
174 | if (page_ref_dec_return(page) == 1) { | |
175 | page->freelist = NULL; | |
176 | ClearPagePrivate(page); | |
177 | set_page_private(page, 0); | |
178 | INIT_LIST_HEAD(&page->lru); | |
179 | free_reserved_page(page); | |
180 | } | |
181 | } | |
182 | ||
183 | #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE | |
184 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | |
185 | static void register_page_bootmem_info_section(unsigned long start_pfn) | |
186 | { | |
187 | unsigned long mapsize, section_nr, i; | |
188 | struct mem_section *ms; | |
189 | struct page *page, *memmap; | |
190 | struct mem_section_usage *usage; | |
191 | ||
192 | section_nr = pfn_to_section_nr(start_pfn); | |
193 | ms = __nr_to_section(section_nr); | |
194 | ||
195 | /* Get section's memmap address */ | |
196 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
197 | ||
198 | /* | |
199 | * Get page for the memmap's phys address | |
200 | * XXX: need more consideration for sparse_vmemmap... | |
201 | */ | |
202 | page = virt_to_page(memmap); | |
203 | mapsize = sizeof(struct page) * PAGES_PER_SECTION; | |
204 | mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; | |
205 | ||
206 | /* remember memmap's page */ | |
207 | for (i = 0; i < mapsize; i++, page++) | |
208 | get_page_bootmem(section_nr, page, SECTION_INFO); | |
209 | ||
210 | usage = ms->usage; | |
211 | page = virt_to_page(usage); | |
212 | ||
213 | mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT; | |
214 | ||
215 | for (i = 0; i < mapsize; i++, page++) | |
216 | get_page_bootmem(section_nr, page, MIX_SECTION_INFO); | |
217 | ||
218 | } | |
219 | #else /* CONFIG_SPARSEMEM_VMEMMAP */ | |
220 | static void register_page_bootmem_info_section(unsigned long start_pfn) | |
221 | { | |
222 | unsigned long mapsize, section_nr, i; | |
223 | struct mem_section *ms; | |
224 | struct page *page, *memmap; | |
225 | struct mem_section_usage *usage; | |
226 | ||
227 | section_nr = pfn_to_section_nr(start_pfn); | |
228 | ms = __nr_to_section(section_nr); | |
229 | ||
230 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
231 | ||
232 | register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); | |
233 | ||
234 | usage = ms->usage; | |
235 | page = virt_to_page(usage); | |
236 | ||
237 | mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT; | |
238 | ||
239 | for (i = 0; i < mapsize; i++, page++) | |
240 | get_page_bootmem(section_nr, page, MIX_SECTION_INFO); | |
241 | } | |
242 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
243 | ||
244 | void __init register_page_bootmem_info_node(struct pglist_data *pgdat) | |
245 | { | |
246 | unsigned long i, pfn, end_pfn, nr_pages; | |
247 | int node = pgdat->node_id; | |
248 | struct page *page; | |
249 | ||
250 | nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; | |
251 | page = virt_to_page(pgdat); | |
252 | ||
253 | for (i = 0; i < nr_pages; i++, page++) | |
254 | get_page_bootmem(node, page, NODE_INFO); | |
255 | ||
256 | pfn = pgdat->node_start_pfn; | |
257 | end_pfn = pgdat_end_pfn(pgdat); | |
258 | ||
259 | /* register section info */ | |
260 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
261 | /* | |
262 | * Some platforms can assign the same pfn to multiple nodes - on | |
263 | * node0 as well as nodeN. To avoid registering a pfn against | |
264 | * multiple nodes we check that this pfn does not already | |
265 | * reside in some other nodes. | |
266 | */ | |
267 | if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) | |
268 | register_page_bootmem_info_section(pfn); | |
269 | } | |
270 | } | |
271 | #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ | |
272 | ||
273 | static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, | |
274 | const char *reason) | |
275 | { | |
276 | /* | |
277 | * Disallow all operations smaller than a sub-section and only | |
278 | * allow operations smaller than a section for | |
279 | * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range() | |
280 | * enforces a larger memory_block_size_bytes() granularity for | |
281 | * memory that will be marked online, so this check should only | |
282 | * fire for direct arch_{add,remove}_memory() users outside of | |
283 | * add_memory_resource(). | |
284 | */ | |
285 | unsigned long min_align; | |
286 | ||
287 | if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) | |
288 | min_align = PAGES_PER_SUBSECTION; | |
289 | else | |
290 | min_align = PAGES_PER_SECTION; | |
291 | if (!IS_ALIGNED(pfn, min_align) | |
292 | || !IS_ALIGNED(nr_pages, min_align)) { | |
293 | WARN(1, "Misaligned __%s_pages start: %#lx end: #%lx\n", | |
294 | reason, pfn, pfn + nr_pages - 1); | |
295 | return -EINVAL; | |
296 | } | |
297 | return 0; | |
298 | } | |
299 | ||
300 | /* | |
301 | * Return page for the valid pfn only if the page is online. All pfn | |
302 | * walkers which rely on the fully initialized page->flags and others | |
303 | * should use this rather than pfn_valid && pfn_to_page | |
304 | */ | |
305 | struct page *pfn_to_online_page(unsigned long pfn) | |
306 | { | |
307 | unsigned long nr = pfn_to_section_nr(pfn); | |
308 | struct dev_pagemap *pgmap; | |
309 | struct mem_section *ms; | |
310 | ||
311 | if (nr >= NR_MEM_SECTIONS) | |
312 | return NULL; | |
313 | ||
314 | ms = __nr_to_section(nr); | |
315 | if (!online_section(ms)) | |
316 | return NULL; | |
317 | ||
318 | /* | |
319 | * Save some code text when online_section() + | |
320 | * pfn_section_valid() are sufficient. | |
321 | */ | |
322 | if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn)) | |
323 | return NULL; | |
324 | ||
325 | if (!pfn_section_valid(ms, pfn)) | |
326 | return NULL; | |
327 | ||
328 | if (!online_device_section(ms)) | |
329 | return pfn_to_page(pfn); | |
330 | ||
331 | /* | |
332 | * Slowpath: when ZONE_DEVICE collides with | |
333 | * ZONE_{NORMAL,MOVABLE} within the same section some pfns in | |
334 | * the section may be 'offline' but 'valid'. Only | |
335 | * get_dev_pagemap() can determine sub-section online status. | |
336 | */ | |
337 | pgmap = get_dev_pagemap(pfn, NULL); | |
338 | put_dev_pagemap(pgmap); | |
339 | ||
340 | /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */ | |
341 | if (pgmap) | |
342 | return NULL; | |
343 | ||
344 | return pfn_to_page(pfn); | |
345 | } | |
346 | EXPORT_SYMBOL_GPL(pfn_to_online_page); | |
347 | ||
348 | /* | |
349 | * Reasonably generic function for adding memory. It is | |
350 | * expected that archs that support memory hotplug will | |
351 | * call this function after deciding the zone to which to | |
352 | * add the new pages. | |
353 | */ | |
354 | int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, | |
355 | struct mhp_params *params) | |
356 | { | |
357 | const unsigned long end_pfn = pfn + nr_pages; | |
358 | unsigned long cur_nr_pages; | |
359 | int err; | |
360 | struct vmem_altmap *altmap = params->altmap; | |
361 | ||
362 | if (WARN_ON_ONCE(!params->pgprot.pgprot)) | |
363 | return -EINVAL; | |
364 | ||
365 | VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); | |
366 | ||
367 | if (altmap) { | |
368 | /* | |
369 | * Validate altmap is within bounds of the total request | |
370 | */ | |
371 | if (altmap->base_pfn != pfn | |
372 | || vmem_altmap_offset(altmap) > nr_pages) { | |
373 | pr_warn_once("memory add fail, invalid altmap\n"); | |
374 | return -EINVAL; | |
375 | } | |
376 | altmap->alloc = 0; | |
377 | } | |
378 | ||
379 | err = check_pfn_span(pfn, nr_pages, "add"); | |
380 | if (err) | |
381 | return err; | |
382 | ||
383 | for (; pfn < end_pfn; pfn += cur_nr_pages) { | |
384 | /* Select all remaining pages up to the next section boundary */ | |
385 | cur_nr_pages = min(end_pfn - pfn, | |
386 | SECTION_ALIGN_UP(pfn + 1) - pfn); | |
387 | err = sparse_add_section(nid, pfn, cur_nr_pages, altmap); | |
388 | if (err) | |
389 | break; | |
390 | cond_resched(); | |
391 | } | |
392 | vmemmap_populate_print_last(); | |
393 | return err; | |
394 | } | |
395 | ||
396 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ | |
397 | static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, | |
398 | unsigned long start_pfn, | |
399 | unsigned long end_pfn) | |
400 | { | |
401 | for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { | |
402 | if (unlikely(!pfn_to_online_page(start_pfn))) | |
403 | continue; | |
404 | ||
405 | if (unlikely(pfn_to_nid(start_pfn) != nid)) | |
406 | continue; | |
407 | ||
408 | if (zone != page_zone(pfn_to_page(start_pfn))) | |
409 | continue; | |
410 | ||
411 | return start_pfn; | |
412 | } | |
413 | ||
414 | return 0; | |
415 | } | |
416 | ||
417 | /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ | |
418 | static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, | |
419 | unsigned long start_pfn, | |
420 | unsigned long end_pfn) | |
421 | { | |
422 | unsigned long pfn; | |
423 | ||
424 | /* pfn is the end pfn of a memory section. */ | |
425 | pfn = end_pfn - 1; | |
426 | for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { | |
427 | if (unlikely(!pfn_to_online_page(pfn))) | |
428 | continue; | |
429 | ||
430 | if (unlikely(pfn_to_nid(pfn) != nid)) | |
431 | continue; | |
432 | ||
433 | if (zone != page_zone(pfn_to_page(pfn))) | |
434 | continue; | |
435 | ||
436 | return pfn; | |
437 | } | |
438 | ||
439 | return 0; | |
440 | } | |
441 | ||
442 | static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, | |
443 | unsigned long end_pfn) | |
444 | { | |
445 | unsigned long pfn; | |
446 | int nid = zone_to_nid(zone); | |
447 | ||
448 | zone_span_writelock(zone); | |
449 | if (zone->zone_start_pfn == start_pfn) { | |
450 | /* | |
451 | * If the section is smallest section in the zone, it need | |
452 | * shrink zone->zone_start_pfn and zone->zone_spanned_pages. | |
453 | * In this case, we find second smallest valid mem_section | |
454 | * for shrinking zone. | |
455 | */ | |
456 | pfn = find_smallest_section_pfn(nid, zone, end_pfn, | |
457 | zone_end_pfn(zone)); | |
458 | if (pfn) { | |
459 | zone->spanned_pages = zone_end_pfn(zone) - pfn; | |
460 | zone->zone_start_pfn = pfn; | |
461 | } else { | |
462 | zone->zone_start_pfn = 0; | |
463 | zone->spanned_pages = 0; | |
464 | } | |
465 | } else if (zone_end_pfn(zone) == end_pfn) { | |
466 | /* | |
467 | * If the section is biggest section in the zone, it need | |
468 | * shrink zone->spanned_pages. | |
469 | * In this case, we find second biggest valid mem_section for | |
470 | * shrinking zone. | |
471 | */ | |
472 | pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, | |
473 | start_pfn); | |
474 | if (pfn) | |
475 | zone->spanned_pages = pfn - zone->zone_start_pfn + 1; | |
476 | else { | |
477 | zone->zone_start_pfn = 0; | |
478 | zone->spanned_pages = 0; | |
479 | } | |
480 | } | |
481 | zone_span_writeunlock(zone); | |
482 | } | |
483 | ||
484 | static void update_pgdat_span(struct pglist_data *pgdat) | |
485 | { | |
486 | unsigned long node_start_pfn = 0, node_end_pfn = 0; | |
487 | struct zone *zone; | |
488 | ||
489 | for (zone = pgdat->node_zones; | |
490 | zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { | |
491 | unsigned long end_pfn = zone_end_pfn(zone); | |
492 | ||
493 | /* No need to lock the zones, they can't change. */ | |
494 | if (!zone->spanned_pages) | |
495 | continue; | |
496 | if (!node_end_pfn) { | |
497 | node_start_pfn = zone->zone_start_pfn; | |
498 | node_end_pfn = end_pfn; | |
499 | continue; | |
500 | } | |
501 | ||
502 | if (end_pfn > node_end_pfn) | |
503 | node_end_pfn = end_pfn; | |
504 | if (zone->zone_start_pfn < node_start_pfn) | |
505 | node_start_pfn = zone->zone_start_pfn; | |
506 | } | |
507 | ||
508 | pgdat->node_start_pfn = node_start_pfn; | |
509 | pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; | |
510 | } | |
511 | ||
512 | void __ref remove_pfn_range_from_zone(struct zone *zone, | |
513 | unsigned long start_pfn, | |
514 | unsigned long nr_pages) | |
515 | { | |
516 | const unsigned long end_pfn = start_pfn + nr_pages; | |
517 | struct pglist_data *pgdat = zone->zone_pgdat; | |
518 | unsigned long pfn, cur_nr_pages, flags; | |
519 | ||
520 | /* Poison struct pages because they are now uninitialized again. */ | |
521 | for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) { | |
522 | cond_resched(); | |
523 | ||
524 | /* Select all remaining pages up to the next section boundary */ | |
525 | cur_nr_pages = | |
526 | min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); | |
527 | page_init_poison(pfn_to_page(pfn), | |
528 | sizeof(struct page) * cur_nr_pages); | |
529 | } | |
530 | ||
531 | #ifdef CONFIG_ZONE_DEVICE | |
532 | /* | |
533 | * Zone shrinking code cannot properly deal with ZONE_DEVICE. So | |
534 | * we will not try to shrink the zones - which is okay as | |
535 | * set_zone_contiguous() cannot deal with ZONE_DEVICE either way. | |
536 | */ | |
537 | if (zone_idx(zone) == ZONE_DEVICE) | |
538 | return; | |
539 | #endif | |
540 | ||
541 | clear_zone_contiguous(zone); | |
542 | ||
543 | pgdat_resize_lock(zone->zone_pgdat, &flags); | |
544 | shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); | |
545 | update_pgdat_span(pgdat); | |
546 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | |
547 | ||
548 | set_zone_contiguous(zone); | |
549 | } | |
550 | ||
551 | static void __remove_section(unsigned long pfn, unsigned long nr_pages, | |
552 | unsigned long map_offset, | |
553 | struct vmem_altmap *altmap) | |
554 | { | |
555 | struct mem_section *ms = __pfn_to_section(pfn); | |
556 | ||
557 | if (WARN_ON_ONCE(!valid_section(ms))) | |
558 | return; | |
559 | ||
560 | sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); | |
561 | } | |
562 | ||
563 | /** | |
564 | * __remove_pages() - remove sections of pages | |
565 | * @pfn: starting pageframe (must be aligned to start of a section) | |
566 | * @nr_pages: number of pages to remove (must be multiple of section size) | |
567 | * @altmap: alternative device page map or %NULL if default memmap is used | |
568 | * | |
569 | * Generic helper function to remove section mappings and sysfs entries | |
570 | * for the section of the memory we are removing. Caller needs to make | |
571 | * sure that pages are marked reserved and zones are adjust properly by | |
572 | * calling offline_pages(). | |
573 | */ | |
574 | void __remove_pages(unsigned long pfn, unsigned long nr_pages, | |
575 | struct vmem_altmap *altmap) | |
576 | { | |
577 | const unsigned long end_pfn = pfn + nr_pages; | |
578 | unsigned long cur_nr_pages; | |
579 | unsigned long map_offset = 0; | |
580 | ||
581 | map_offset = vmem_altmap_offset(altmap); | |
582 | ||
583 | if (check_pfn_span(pfn, nr_pages, "remove")) | |
584 | return; | |
585 | ||
586 | for (; pfn < end_pfn; pfn += cur_nr_pages) { | |
587 | cond_resched(); | |
588 | /* Select all remaining pages up to the next section boundary */ | |
589 | cur_nr_pages = min(end_pfn - pfn, | |
590 | SECTION_ALIGN_UP(pfn + 1) - pfn); | |
591 | __remove_section(pfn, cur_nr_pages, map_offset, altmap); | |
592 | map_offset = 0; | |
593 | } | |
594 | } | |
595 | ||
596 | int set_online_page_callback(online_page_callback_t callback) | |
597 | { | |
598 | int rc = -EINVAL; | |
599 | ||
600 | get_online_mems(); | |
601 | mutex_lock(&online_page_callback_lock); | |
602 | ||
603 | if (online_page_callback == generic_online_page) { | |
604 | online_page_callback = callback; | |
605 | rc = 0; | |
606 | } | |
607 | ||
608 | mutex_unlock(&online_page_callback_lock); | |
609 | put_online_mems(); | |
610 | ||
611 | return rc; | |
612 | } | |
613 | EXPORT_SYMBOL_GPL(set_online_page_callback); | |
614 | ||
615 | int restore_online_page_callback(online_page_callback_t callback) | |
616 | { | |
617 | int rc = -EINVAL; | |
618 | ||
619 | get_online_mems(); | |
620 | mutex_lock(&online_page_callback_lock); | |
621 | ||
622 | if (online_page_callback == callback) { | |
623 | online_page_callback = generic_online_page; | |
624 | rc = 0; | |
625 | } | |
626 | ||
627 | mutex_unlock(&online_page_callback_lock); | |
628 | put_online_mems(); | |
629 | ||
630 | return rc; | |
631 | } | |
632 | EXPORT_SYMBOL_GPL(restore_online_page_callback); | |
633 | ||
634 | void generic_online_page(struct page *page, unsigned int order) | |
635 | { | |
636 | /* | |
637 | * Freeing the page with debug_pagealloc enabled will try to unmap it, | |
638 | * so we should map it first. This is better than introducing a special | |
639 | * case in page freeing fast path. | |
640 | */ | |
641 | debug_pagealloc_map_pages(page, 1 << order); | |
642 | __free_pages_core(page, order); | |
643 | totalram_pages_add(1UL << order); | |
644 | #ifdef CONFIG_HIGHMEM | |
645 | if (PageHighMem(page)) | |
646 | totalhigh_pages_add(1UL << order); | |
647 | #endif | |
648 | } | |
649 | EXPORT_SYMBOL_GPL(generic_online_page); | |
650 | ||
651 | static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages) | |
652 | { | |
653 | const unsigned long end_pfn = start_pfn + nr_pages; | |
654 | unsigned long pfn; | |
655 | ||
656 | /* | |
657 | * Online the pages in MAX_ORDER - 1 aligned chunks. The callback might | |
658 | * decide to not expose all pages to the buddy (e.g., expose them | |
659 | * later). We account all pages as being online and belonging to this | |
660 | * zone ("present"). | |
661 | * When using memmap_on_memory, the range might not be aligned to | |
662 | * MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect | |
663 | * this and the first chunk to online will be pageblock_nr_pages. | |
664 | */ | |
665 | for (pfn = start_pfn; pfn < end_pfn;) { | |
666 | int order = min(MAX_ORDER - 1UL, __ffs(pfn)); | |
667 | ||
668 | (*online_page_callback)(pfn_to_page(pfn), order); | |
669 | pfn += (1UL << order); | |
670 | } | |
671 | ||
672 | /* mark all involved sections as online */ | |
673 | online_mem_sections(start_pfn, end_pfn); | |
674 | } | |
675 | ||
676 | /* check which state of node_states will be changed when online memory */ | |
677 | static void node_states_check_changes_online(unsigned long nr_pages, | |
678 | struct zone *zone, struct memory_notify *arg) | |
679 | { | |
680 | int nid = zone_to_nid(zone); | |
681 | ||
682 | arg->status_change_nid = NUMA_NO_NODE; | |
683 | arg->status_change_nid_normal = NUMA_NO_NODE; | |
684 | arg->status_change_nid_high = NUMA_NO_NODE; | |
685 | ||
686 | if (!node_state(nid, N_MEMORY)) | |
687 | arg->status_change_nid = nid; | |
688 | if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY)) | |
689 | arg->status_change_nid_normal = nid; | |
690 | #ifdef CONFIG_HIGHMEM | |
691 | if (zone_idx(zone) <= ZONE_HIGHMEM && !node_state(nid, N_HIGH_MEMORY)) | |
692 | arg->status_change_nid_high = nid; | |
693 | #endif | |
694 | } | |
695 | ||
696 | static void node_states_set_node(int node, struct memory_notify *arg) | |
697 | { | |
698 | if (arg->status_change_nid_normal >= 0) | |
699 | node_set_state(node, N_NORMAL_MEMORY); | |
700 | ||
701 | if (arg->status_change_nid_high >= 0) | |
702 | node_set_state(node, N_HIGH_MEMORY); | |
703 | ||
704 | if (arg->status_change_nid >= 0) | |
705 | node_set_state(node, N_MEMORY); | |
706 | } | |
707 | ||
708 | static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, | |
709 | unsigned long nr_pages) | |
710 | { | |
711 | unsigned long old_end_pfn = zone_end_pfn(zone); | |
712 | ||
713 | if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) | |
714 | zone->zone_start_pfn = start_pfn; | |
715 | ||
716 | zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; | |
717 | } | |
718 | ||
719 | static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, | |
720 | unsigned long nr_pages) | |
721 | { | |
722 | unsigned long old_end_pfn = pgdat_end_pfn(pgdat); | |
723 | ||
724 | if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) | |
725 | pgdat->node_start_pfn = start_pfn; | |
726 | ||
727 | pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; | |
728 | ||
729 | } | |
730 | ||
731 | static void section_taint_zone_device(unsigned long pfn) | |
732 | { | |
733 | struct mem_section *ms = __pfn_to_section(pfn); | |
734 | ||
735 | ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE; | |
736 | } | |
737 | ||
738 | /* | |
739 | * Associate the pfn range with the given zone, initializing the memmaps | |
740 | * and resizing the pgdat/zone data to span the added pages. After this | |
741 | * call, all affected pages are PG_reserved. | |
742 | * | |
743 | * All aligned pageblocks are initialized to the specified migratetype | |
744 | * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related | |
745 | * zone stats (e.g., nr_isolate_pageblock) are touched. | |
746 | */ | |
747 | void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, | |
748 | unsigned long nr_pages, | |
749 | struct vmem_altmap *altmap, int migratetype) | |
750 | { | |
751 | struct pglist_data *pgdat = zone->zone_pgdat; | |
752 | int nid = pgdat->node_id; | |
753 | unsigned long flags; | |
754 | ||
755 | clear_zone_contiguous(zone); | |
756 | ||
757 | /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */ | |
758 | pgdat_resize_lock(pgdat, &flags); | |
759 | zone_span_writelock(zone); | |
760 | if (zone_is_empty(zone)) | |
761 | init_currently_empty_zone(zone, start_pfn, nr_pages); | |
762 | resize_zone_range(zone, start_pfn, nr_pages); | |
763 | zone_span_writeunlock(zone); | |
764 | resize_pgdat_range(pgdat, start_pfn, nr_pages); | |
765 | pgdat_resize_unlock(pgdat, &flags); | |
766 | ||
767 | /* | |
768 | * Subsection population requires care in pfn_to_online_page(). | |
769 | * Set the taint to enable the slow path detection of | |
770 | * ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE} | |
771 | * section. | |
772 | */ | |
773 | if (zone_is_zone_device(zone)) { | |
774 | if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION)) | |
775 | section_taint_zone_device(start_pfn); | |
776 | if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)) | |
777 | section_taint_zone_device(start_pfn + nr_pages); | |
778 | } | |
779 | ||
780 | /* | |
781 | * TODO now we have a visible range of pages which are not associated | |
782 | * with their zone properly. Not nice but set_pfnblock_flags_mask | |
783 | * expects the zone spans the pfn range. All the pages in the range | |
784 | * are reserved so nobody should be touching them so we should be safe | |
785 | */ | |
786 | memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, | |
787 | MEMINIT_HOTPLUG, altmap, migratetype); | |
788 | ||
789 | set_zone_contiguous(zone); | |
790 | } | |
791 | ||
792 | /* | |
793 | * Returns a default kernel memory zone for the given pfn range. | |
794 | * If no kernel zone covers this pfn range it will automatically go | |
795 | * to the ZONE_NORMAL. | |
796 | */ | |
797 | static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, | |
798 | unsigned long nr_pages) | |
799 | { | |
800 | struct pglist_data *pgdat = NODE_DATA(nid); | |
801 | int zid; | |
802 | ||
803 | for (zid = 0; zid <= ZONE_NORMAL; zid++) { | |
804 | struct zone *zone = &pgdat->node_zones[zid]; | |
805 | ||
806 | if (zone_intersects(zone, start_pfn, nr_pages)) | |
807 | return zone; | |
808 | } | |
809 | ||
810 | return &pgdat->node_zones[ZONE_NORMAL]; | |
811 | } | |
812 | ||
813 | static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, | |
814 | unsigned long nr_pages) | |
815 | { | |
816 | struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, | |
817 | nr_pages); | |
818 | struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
819 | bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); | |
820 | bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); | |
821 | ||
822 | /* | |
823 | * We inherit the existing zone in a simple case where zones do not | |
824 | * overlap in the given range | |
825 | */ | |
826 | if (in_kernel ^ in_movable) | |
827 | return (in_kernel) ? kernel_zone : movable_zone; | |
828 | ||
829 | /* | |
830 | * If the range doesn't belong to any zone or two zones overlap in the | |
831 | * given range then we use movable zone only if movable_node is | |
832 | * enabled because we always online to a kernel zone by default. | |
833 | */ | |
834 | return movable_node_enabled ? movable_zone : kernel_zone; | |
835 | } | |
836 | ||
837 | struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, | |
838 | unsigned long nr_pages) | |
839 | { | |
840 | if (online_type == MMOP_ONLINE_KERNEL) | |
841 | return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); | |
842 | ||
843 | if (online_type == MMOP_ONLINE_MOVABLE) | |
844 | return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
845 | ||
846 | return default_zone_for_pfn(nid, start_pfn, nr_pages); | |
847 | } | |
848 | ||
849 | /* | |
850 | * This function should only be called by memory_block_{online,offline}, | |
851 | * and {online,offline}_pages. | |
852 | */ | |
853 | void adjust_present_page_count(struct zone *zone, long nr_pages) | |
854 | { | |
855 | unsigned long flags; | |
856 | ||
857 | zone->present_pages += nr_pages; | |
858 | pgdat_resize_lock(zone->zone_pgdat, &flags); | |
859 | zone->zone_pgdat->node_present_pages += nr_pages; | |
860 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | |
861 | } | |
862 | ||
863 | int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, | |
864 | struct zone *zone) | |
865 | { | |
866 | unsigned long end_pfn = pfn + nr_pages; | |
867 | int ret; | |
868 | ||
869 | ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); | |
870 | if (ret) | |
871 | return ret; | |
872 | ||
873 | move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE); | |
874 | ||
875 | /* | |
876 | * It might be that the vmemmap_pages fully span sections. If that is | |
877 | * the case, mark those sections online here as otherwise they will be | |
878 | * left offline. | |
879 | */ | |
880 | if (nr_pages >= PAGES_PER_SECTION) | |
881 | online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); | |
882 | ||
883 | return ret; | |
884 | } | |
885 | ||
886 | void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages) | |
887 | { | |
888 | unsigned long end_pfn = pfn + nr_pages; | |
889 | ||
890 | /* | |
891 | * It might be that the vmemmap_pages fully span sections. If that is | |
892 | * the case, mark those sections offline here as otherwise they will be | |
893 | * left online. | |
894 | */ | |
895 | if (nr_pages >= PAGES_PER_SECTION) | |
896 | offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); | |
897 | ||
898 | /* | |
899 | * The pages associated with this vmemmap have been offlined, so | |
900 | * we can reset its state here. | |
901 | */ | |
902 | remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages); | |
903 | kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); | |
904 | } | |
905 | ||
906 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone) | |
907 | { | |
908 | unsigned long flags; | |
909 | int need_zonelists_rebuild = 0; | |
910 | const int nid = zone_to_nid(zone); | |
911 | int ret; | |
912 | struct memory_notify arg; | |
913 | ||
914 | /* | |
915 | * {on,off}lining is constrained to full memory sections (or more | |
916 | * precisly to memory blocks from the user space POV). | |
917 | * memmap_on_memory is an exception because it reserves initial part | |
918 | * of the physical memory space for vmemmaps. That space is pageblock | |
919 | * aligned. | |
920 | */ | |
921 | if (WARN_ON_ONCE(!nr_pages || | |
922 | !IS_ALIGNED(pfn, pageblock_nr_pages) || | |
923 | !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) | |
924 | return -EINVAL; | |
925 | ||
926 | mem_hotplug_begin(); | |
927 | ||
928 | /* associate pfn range with the zone */ | |
929 | move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); | |
930 | ||
931 | arg.start_pfn = pfn; | |
932 | arg.nr_pages = nr_pages; | |
933 | node_states_check_changes_online(nr_pages, zone, &arg); | |
934 | ||
935 | ret = memory_notify(MEM_GOING_ONLINE, &arg); | |
936 | ret = notifier_to_errno(ret); | |
937 | if (ret) | |
938 | goto failed_addition; | |
939 | ||
940 | /* | |
941 | * Fixup the number of isolated pageblocks before marking the sections | |
942 | * onlining, such that undo_isolate_page_range() works correctly. | |
943 | */ | |
944 | spin_lock_irqsave(&zone->lock, flags); | |
945 | zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages; | |
946 | spin_unlock_irqrestore(&zone->lock, flags); | |
947 | ||
948 | /* | |
949 | * If this zone is not populated, then it is not in zonelist. | |
950 | * This means the page allocator ignores this zone. | |
951 | * So, zonelist must be updated after online. | |
952 | */ | |
953 | if (!populated_zone(zone)) { | |
954 | need_zonelists_rebuild = 1; | |
955 | setup_zone_pageset(zone); | |
956 | } | |
957 | ||
958 | online_pages_range(pfn, nr_pages); | |
959 | adjust_present_page_count(zone, nr_pages); | |
960 | ||
961 | node_states_set_node(nid, &arg); | |
962 | if (need_zonelists_rebuild) | |
963 | build_all_zonelists(NULL); | |
964 | zone_pcp_update(zone); | |
965 | ||
966 | /* Basic onlining is complete, allow allocation of onlined pages. */ | |
967 | undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE); | |
968 | ||
969 | /* | |
970 | * Freshly onlined pages aren't shuffled (e.g., all pages are placed to | |
971 | * the tail of the freelist when undoing isolation). Shuffle the whole | |
972 | * zone to make sure the just onlined pages are properly distributed | |
973 | * across the whole freelist - to create an initial shuffle. | |
974 | */ | |
975 | shuffle_zone(zone); | |
976 | ||
977 | init_per_zone_wmark_min(); | |
978 | ||
979 | kswapd_run(nid); | |
980 | kcompactd_run(nid); | |
981 | ||
982 | writeback_set_ratelimit(); | |
983 | ||
984 | memory_notify(MEM_ONLINE, &arg); | |
985 | mem_hotplug_done(); | |
986 | return 0; | |
987 | ||
988 | failed_addition: | |
989 | pr_debug("online_pages [mem %#010llx-%#010llx] failed\n", | |
990 | (unsigned long long) pfn << PAGE_SHIFT, | |
991 | (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); | |
992 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
993 | remove_pfn_range_from_zone(zone, pfn, nr_pages); | |
994 | mem_hotplug_done(); | |
995 | return ret; | |
996 | } | |
997 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ | |
998 | ||
999 | static void reset_node_present_pages(pg_data_t *pgdat) | |
1000 | { | |
1001 | struct zone *z; | |
1002 | ||
1003 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) | |
1004 | z->present_pages = 0; | |
1005 | ||
1006 | pgdat->node_present_pages = 0; | |
1007 | } | |
1008 | ||
1009 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ | |
1010 | static pg_data_t __ref *hotadd_new_pgdat(int nid) | |
1011 | { | |
1012 | struct pglist_data *pgdat; | |
1013 | ||
1014 | pgdat = NODE_DATA(nid); | |
1015 | if (!pgdat) { | |
1016 | pgdat = arch_alloc_nodedata(nid); | |
1017 | if (!pgdat) | |
1018 | return NULL; | |
1019 | ||
1020 | pgdat->per_cpu_nodestats = | |
1021 | alloc_percpu(struct per_cpu_nodestat); | |
1022 | arch_refresh_nodedata(nid, pgdat); | |
1023 | } else { | |
1024 | int cpu; | |
1025 | /* | |
1026 | * Reset the nr_zones, order and highest_zoneidx before reuse. | |
1027 | * Note that kswapd will init kswapd_highest_zoneidx properly | |
1028 | * when it starts in the near future. | |
1029 | */ | |
1030 | pgdat->nr_zones = 0; | |
1031 | pgdat->kswapd_order = 0; | |
1032 | pgdat->kswapd_highest_zoneidx = 0; | |
1033 | for_each_online_cpu(cpu) { | |
1034 | struct per_cpu_nodestat *p; | |
1035 | ||
1036 | p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); | |
1037 | memset(p, 0, sizeof(*p)); | |
1038 | } | |
1039 | } | |
1040 | ||
1041 | /* we can use NODE_DATA(nid) from here */ | |
1042 | pgdat->node_id = nid; | |
1043 | pgdat->node_start_pfn = 0; | |
1044 | ||
1045 | /* init node's zones as empty zones, we don't have any present pages.*/ | |
1046 | free_area_init_core_hotplug(nid); | |
1047 | ||
1048 | /* | |
1049 | * The node we allocated has no zone fallback lists. For avoiding | |
1050 | * to access not-initialized zonelist, build here. | |
1051 | */ | |
1052 | build_all_zonelists(pgdat); | |
1053 | ||
1054 | /* | |
1055 | * When memory is hot-added, all the memory is in offline state. So | |
1056 | * clear all zones' present_pages because they will be updated in | |
1057 | * online_pages() and offline_pages(). | |
1058 | */ | |
1059 | reset_node_managed_pages(pgdat); | |
1060 | reset_node_present_pages(pgdat); | |
1061 | ||
1062 | return pgdat; | |
1063 | } | |
1064 | ||
1065 | static void rollback_node_hotadd(int nid) | |
1066 | { | |
1067 | pg_data_t *pgdat = NODE_DATA(nid); | |
1068 | ||
1069 | arch_refresh_nodedata(nid, NULL); | |
1070 | free_percpu(pgdat->per_cpu_nodestats); | |
1071 | arch_free_nodedata(pgdat); | |
1072 | } | |
1073 | ||
1074 | ||
1075 | /** | |
1076 | * try_online_node - online a node if offlined | |
1077 | * @nid: the node ID | |
1078 | * @set_node_online: Whether we want to online the node | |
1079 | * called by cpu_up() to online a node without onlined memory. | |
1080 | * | |
1081 | * Returns: | |
1082 | * 1 -> a new node has been allocated | |
1083 | * 0 -> the node is already online | |
1084 | * -ENOMEM -> the node could not be allocated | |
1085 | */ | |
1086 | static int __try_online_node(int nid, bool set_node_online) | |
1087 | { | |
1088 | pg_data_t *pgdat; | |
1089 | int ret = 1; | |
1090 | ||
1091 | if (node_online(nid)) | |
1092 | return 0; | |
1093 | ||
1094 | pgdat = hotadd_new_pgdat(nid); | |
1095 | if (!pgdat) { | |
1096 | pr_err("Cannot online node %d due to NULL pgdat\n", nid); | |
1097 | ret = -ENOMEM; | |
1098 | goto out; | |
1099 | } | |
1100 | ||
1101 | if (set_node_online) { | |
1102 | node_set_online(nid); | |
1103 | ret = register_one_node(nid); | |
1104 | BUG_ON(ret); | |
1105 | } | |
1106 | out: | |
1107 | return ret; | |
1108 | } | |
1109 | ||
1110 | /* | |
1111 | * Users of this function always want to online/register the node | |
1112 | */ | |
1113 | int try_online_node(int nid) | |
1114 | { | |
1115 | int ret; | |
1116 | ||
1117 | mem_hotplug_begin(); | |
1118 | ret = __try_online_node(nid, true); | |
1119 | mem_hotplug_done(); | |
1120 | return ret; | |
1121 | } | |
1122 | ||
1123 | static int check_hotplug_memory_range(u64 start, u64 size) | |
1124 | { | |
1125 | /* memory range must be block size aligned */ | |
1126 | if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) || | |
1127 | !IS_ALIGNED(size, memory_block_size_bytes())) { | |
1128 | pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx", | |
1129 | memory_block_size_bytes(), start, size); | |
1130 | return -EINVAL; | |
1131 | } | |
1132 | ||
1133 | return 0; | |
1134 | } | |
1135 | ||
1136 | static int online_memory_block(struct memory_block *mem, void *arg) | |
1137 | { | |
1138 | mem->online_type = mhp_default_online_type; | |
1139 | return device_online(&mem->dev); | |
1140 | } | |
1141 | ||
1142 | bool mhp_supports_memmap_on_memory(unsigned long size) | |
1143 | { | |
1144 | unsigned long nr_vmemmap_pages = size / PAGE_SIZE; | |
1145 | unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page); | |
1146 | unsigned long remaining_size = size - vmemmap_size; | |
1147 | ||
1148 | /* | |
1149 | * Besides having arch support and the feature enabled at runtime, we | |
1150 | * need a few more assumptions to hold true: | |
1151 | * | |
1152 | * a) We span a single memory block: memory onlining/offlinin;g happens | |
1153 | * in memory block granularity. We don't want the vmemmap of online | |
1154 | * memory blocks to reside on offline memory blocks. In the future, | |
1155 | * we might want to support variable-sized memory blocks to make the | |
1156 | * feature more versatile. | |
1157 | * | |
1158 | * b) The vmemmap pages span complete PMDs: We don't want vmemmap code | |
1159 | * to populate memory from the altmap for unrelated parts (i.e., | |
1160 | * other memory blocks) | |
1161 | * | |
1162 | * c) The vmemmap pages (and thereby the pages that will be exposed to | |
1163 | * the buddy) have to cover full pageblocks: memory onlining/offlining | |
1164 | * code requires applicable ranges to be page-aligned, for example, to | |
1165 | * set the migratetypes properly. | |
1166 | * | |
1167 | * TODO: Although we have a check here to make sure that vmemmap pages | |
1168 | * fully populate a PMD, it is not the right place to check for | |
1169 | * this. A much better solution involves improving vmemmap code | |
1170 | * to fallback to base pages when trying to populate vmemmap using | |
1171 | * altmap as an alternative source of memory, and we do not exactly | |
1172 | * populate a single PMD. | |
1173 | */ | |
1174 | return memmap_on_memory && | |
1175 | IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) && | |
1176 | size == memory_block_size_bytes() && | |
1177 | IS_ALIGNED(vmemmap_size, PMD_SIZE) && | |
1178 | IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT)); | |
1179 | } | |
1180 | ||
1181 | /* | |
1182 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
1183 | * and online/offline operations (triggered e.g. by sysfs). | |
1184 | * | |
1185 | * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG | |
1186 | */ | |
1187 | int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) | |
1188 | { | |
1189 | struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) }; | |
1190 | struct vmem_altmap mhp_altmap = {}; | |
1191 | u64 start, size; | |
1192 | bool new_node = false; | |
1193 | int ret; | |
1194 | ||
1195 | start = res->start; | |
1196 | size = resource_size(res); | |
1197 | ||
1198 | ret = check_hotplug_memory_range(start, size); | |
1199 | if (ret) | |
1200 | return ret; | |
1201 | ||
1202 | if (!node_possible(nid)) { | |
1203 | WARN(1, "node %d was absent from the node_possible_map\n", nid); | |
1204 | return -EINVAL; | |
1205 | } | |
1206 | ||
1207 | mem_hotplug_begin(); | |
1208 | ||
1209 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) | |
1210 | memblock_add_node(start, size, nid); | |
1211 | ||
1212 | ret = __try_online_node(nid, false); | |
1213 | if (ret < 0) | |
1214 | goto error; | |
1215 | new_node = ret; | |
1216 | ||
1217 | /* | |
1218 | * Self hosted memmap array | |
1219 | */ | |
1220 | if (mhp_flags & MHP_MEMMAP_ON_MEMORY) { | |
1221 | if (!mhp_supports_memmap_on_memory(size)) { | |
1222 | ret = -EINVAL; | |
1223 | goto error; | |
1224 | } | |
1225 | mhp_altmap.free = PHYS_PFN(size); | |
1226 | mhp_altmap.base_pfn = PHYS_PFN(start); | |
1227 | params.altmap = &mhp_altmap; | |
1228 | } | |
1229 | ||
1230 | /* call arch's memory hotadd */ | |
1231 | ret = arch_add_memory(nid, start, size, ¶ms); | |
1232 | if (ret < 0) | |
1233 | goto error; | |
1234 | ||
1235 | /* create memory block devices after memory was added */ | |
1236 | ret = create_memory_block_devices(start, size, mhp_altmap.alloc); | |
1237 | if (ret) { | |
1238 | arch_remove_memory(nid, start, size, NULL); | |
1239 | goto error; | |
1240 | } | |
1241 | ||
1242 | if (new_node) { | |
1243 | /* If sysfs file of new node can't be created, cpu on the node | |
1244 | * can't be hot-added. There is no rollback way now. | |
1245 | * So, check by BUG_ON() to catch it reluctantly.. | |
1246 | * We online node here. We can't roll back from here. | |
1247 | */ | |
1248 | node_set_online(nid); | |
1249 | ret = __register_one_node(nid); | |
1250 | BUG_ON(ret); | |
1251 | } | |
1252 | ||
1253 | /* link memory sections under this node.*/ | |
1254 | link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1), | |
1255 | MEMINIT_HOTPLUG); | |
1256 | ||
1257 | /* create new memmap entry */ | |
1258 | if (!strcmp(res->name, "System RAM")) | |
1259 | firmware_map_add_hotplug(start, start + size, "System RAM"); | |
1260 | ||
1261 | /* device_online() will take the lock when calling online_pages() */ | |
1262 | mem_hotplug_done(); | |
1263 | ||
1264 | /* | |
1265 | * In case we're allowed to merge the resource, flag it and trigger | |
1266 | * merging now that adding succeeded. | |
1267 | */ | |
1268 | if (mhp_flags & MHP_MERGE_RESOURCE) | |
1269 | merge_system_ram_resource(res); | |
1270 | ||
1271 | /* online pages if requested */ | |
1272 | if (mhp_default_online_type != MMOP_OFFLINE) | |
1273 | walk_memory_blocks(start, size, NULL, online_memory_block); | |
1274 | ||
1275 | return ret; | |
1276 | error: | |
1277 | /* rollback pgdat allocation and others */ | |
1278 | if (new_node) | |
1279 | rollback_node_hotadd(nid); | |
1280 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) | |
1281 | memblock_remove(start, size); | |
1282 | mem_hotplug_done(); | |
1283 | return ret; | |
1284 | } | |
1285 | ||
1286 | /* requires device_hotplug_lock, see add_memory_resource() */ | |
1287 | int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags) | |
1288 | { | |
1289 | struct resource *res; | |
1290 | int ret; | |
1291 | ||
1292 | res = register_memory_resource(start, size, "System RAM"); | |
1293 | if (IS_ERR(res)) | |
1294 | return PTR_ERR(res); | |
1295 | ||
1296 | ret = add_memory_resource(nid, res, mhp_flags); | |
1297 | if (ret < 0) | |
1298 | release_memory_resource(res); | |
1299 | return ret; | |
1300 | } | |
1301 | ||
1302 | int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags) | |
1303 | { | |
1304 | int rc; | |
1305 | ||
1306 | lock_device_hotplug(); | |
1307 | rc = __add_memory(nid, start, size, mhp_flags); | |
1308 | unlock_device_hotplug(); | |
1309 | ||
1310 | return rc; | |
1311 | } | |
1312 | EXPORT_SYMBOL_GPL(add_memory); | |
1313 | ||
1314 | /* | |
1315 | * Add special, driver-managed memory to the system as system RAM. Such | |
1316 | * memory is not exposed via the raw firmware-provided memmap as system | |
1317 | * RAM, instead, it is detected and added by a driver - during cold boot, | |
1318 | * after a reboot, and after kexec. | |
1319 | * | |
1320 | * Reasons why this memory should not be used for the initial memmap of a | |
1321 | * kexec kernel or for placing kexec images: | |
1322 | * - The booting kernel is in charge of determining how this memory will be | |
1323 | * used (e.g., use persistent memory as system RAM) | |
1324 | * - Coordination with a hypervisor is required before this memory | |
1325 | * can be used (e.g., inaccessible parts). | |
1326 | * | |
1327 | * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided | |
1328 | * memory map") are created. Also, the created memory resource is flagged | |
1329 | * with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case | |
1330 | * this memory as well (esp., not place kexec images onto it). | |
1331 | * | |
1332 | * The resource_name (visible via /proc/iomem) has to have the format | |
1333 | * "System RAM ($DRIVER)". | |
1334 | */ | |
1335 | int add_memory_driver_managed(int nid, u64 start, u64 size, | |
1336 | const char *resource_name, mhp_t mhp_flags) | |
1337 | { | |
1338 | struct resource *res; | |
1339 | int rc; | |
1340 | ||
1341 | if (!resource_name || | |
1342 | strstr(resource_name, "System RAM (") != resource_name || | |
1343 | resource_name[strlen(resource_name) - 1] != ')') | |
1344 | return -EINVAL; | |
1345 | ||
1346 | lock_device_hotplug(); | |
1347 | ||
1348 | res = register_memory_resource(start, size, resource_name); | |
1349 | if (IS_ERR(res)) { | |
1350 | rc = PTR_ERR(res); | |
1351 | goto out_unlock; | |
1352 | } | |
1353 | ||
1354 | rc = add_memory_resource(nid, res, mhp_flags); | |
1355 | if (rc < 0) | |
1356 | release_memory_resource(res); | |
1357 | ||
1358 | out_unlock: | |
1359 | unlock_device_hotplug(); | |
1360 | return rc; | |
1361 | } | |
1362 | EXPORT_SYMBOL_GPL(add_memory_driver_managed); | |
1363 | ||
1364 | /* | |
1365 | * Platforms should define arch_get_mappable_range() that provides | |
1366 | * maximum possible addressable physical memory range for which the | |
1367 | * linear mapping could be created. The platform returned address | |
1368 | * range must adhere to these following semantics. | |
1369 | * | |
1370 | * - range.start <= range.end | |
1371 | * - Range includes both end points [range.start..range.end] | |
1372 | * | |
1373 | * There is also a fallback definition provided here, allowing the | |
1374 | * entire possible physical address range in case any platform does | |
1375 | * not define arch_get_mappable_range(). | |
1376 | */ | |
1377 | struct range __weak arch_get_mappable_range(void) | |
1378 | { | |
1379 | struct range mhp_range = { | |
1380 | .start = 0UL, | |
1381 | .end = -1ULL, | |
1382 | }; | |
1383 | return mhp_range; | |
1384 | } | |
1385 | ||
1386 | struct range mhp_get_pluggable_range(bool need_mapping) | |
1387 | { | |
1388 | const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1; | |
1389 | struct range mhp_range; | |
1390 | ||
1391 | if (need_mapping) { | |
1392 | mhp_range = arch_get_mappable_range(); | |
1393 | if (mhp_range.start > max_phys) { | |
1394 | mhp_range.start = 0; | |
1395 | mhp_range.end = 0; | |
1396 | } | |
1397 | mhp_range.end = min_t(u64, mhp_range.end, max_phys); | |
1398 | } else { | |
1399 | mhp_range.start = 0; | |
1400 | mhp_range.end = max_phys; | |
1401 | } | |
1402 | return mhp_range; | |
1403 | } | |
1404 | EXPORT_SYMBOL_GPL(mhp_get_pluggable_range); | |
1405 | ||
1406 | bool mhp_range_allowed(u64 start, u64 size, bool need_mapping) | |
1407 | { | |
1408 | struct range mhp_range = mhp_get_pluggable_range(need_mapping); | |
1409 | u64 end = start + size; | |
1410 | ||
1411 | if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end) | |
1412 | return true; | |
1413 | ||
1414 | pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n", | |
1415 | start, end, mhp_range.start, mhp_range.end); | |
1416 | return false; | |
1417 | } | |
1418 | ||
1419 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
1420 | /* | |
1421 | * Confirm all pages in a range [start, end) belong to the same zone (skipping | |
1422 | * memory holes). When true, return the zone. | |
1423 | */ | |
1424 | struct zone *test_pages_in_a_zone(unsigned long start_pfn, | |
1425 | unsigned long end_pfn) | |
1426 | { | |
1427 | unsigned long pfn, sec_end_pfn; | |
1428 | struct zone *zone = NULL; | |
1429 | struct page *page; | |
1430 | int i; | |
1431 | for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); | |
1432 | pfn < end_pfn; | |
1433 | pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { | |
1434 | /* Make sure the memory section is present first */ | |
1435 | if (!present_section_nr(pfn_to_section_nr(pfn))) | |
1436 | continue; | |
1437 | for (; pfn < sec_end_pfn && pfn < end_pfn; | |
1438 | pfn += MAX_ORDER_NR_PAGES) { | |
1439 | i = 0; | |
1440 | /* This is just a CONFIG_HOLES_IN_ZONE check.*/ | |
1441 | while ((i < MAX_ORDER_NR_PAGES) && | |
1442 | !pfn_valid_within(pfn + i)) | |
1443 | i++; | |
1444 | if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) | |
1445 | continue; | |
1446 | /* Check if we got outside of the zone */ | |
1447 | if (zone && !zone_spans_pfn(zone, pfn + i)) | |
1448 | return NULL; | |
1449 | page = pfn_to_page(pfn + i); | |
1450 | if (zone && page_zone(page) != zone) | |
1451 | return NULL; | |
1452 | zone = page_zone(page); | |
1453 | } | |
1454 | } | |
1455 | ||
1456 | return zone; | |
1457 | } | |
1458 | ||
1459 | /* | |
1460 | * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, | |
1461 | * non-lru movable pages and hugepages). Will skip over most unmovable | |
1462 | * pages (esp., pages that can be skipped when offlining), but bail out on | |
1463 | * definitely unmovable pages. | |
1464 | * | |
1465 | * Returns: | |
1466 | * 0 in case a movable page is found and movable_pfn was updated. | |
1467 | * -ENOENT in case no movable page was found. | |
1468 | * -EBUSY in case a definitely unmovable page was found. | |
1469 | */ | |
1470 | static int scan_movable_pages(unsigned long start, unsigned long end, | |
1471 | unsigned long *movable_pfn) | |
1472 | { | |
1473 | unsigned long pfn; | |
1474 | ||
1475 | for (pfn = start; pfn < end; pfn++) { | |
1476 | struct page *page, *head; | |
1477 | unsigned long skip; | |
1478 | ||
1479 | if (!pfn_valid(pfn)) | |
1480 | continue; | |
1481 | page = pfn_to_page(pfn); | |
1482 | if (PageLRU(page)) | |
1483 | goto found; | |
1484 | if (__PageMovable(page)) | |
1485 | goto found; | |
1486 | ||
1487 | /* | |
1488 | * PageOffline() pages that are not marked __PageMovable() and | |
1489 | * have a reference count > 0 (after MEM_GOING_OFFLINE) are | |
1490 | * definitely unmovable. If their reference count would be 0, | |
1491 | * they could at least be skipped when offlining memory. | |
1492 | */ | |
1493 | if (PageOffline(page) && page_count(page)) | |
1494 | return -EBUSY; | |
1495 | ||
1496 | if (!PageHuge(page)) | |
1497 | continue; | |
1498 | head = compound_head(page); | |
1499 | /* | |
1500 | * This test is racy as we hold no reference or lock. The | |
1501 | * hugetlb page could have been free'ed and head is no longer | |
1502 | * a hugetlb page before the following check. In such unlikely | |
1503 | * cases false positives and negatives are possible. Calling | |
1504 | * code must deal with these scenarios. | |
1505 | */ | |
1506 | if (HPageMigratable(head)) | |
1507 | goto found; | |
1508 | skip = compound_nr(head) - (page - head); | |
1509 | pfn += skip - 1; | |
1510 | } | |
1511 | return -ENOENT; | |
1512 | found: | |
1513 | *movable_pfn = pfn; | |
1514 | return 0; | |
1515 | } | |
1516 | ||
1517 | static int | |
1518 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |
1519 | { | |
1520 | unsigned long pfn; | |
1521 | struct page *page, *head; | |
1522 | int ret = 0; | |
1523 | LIST_HEAD(source); | |
1524 | ||
1525 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { | |
1526 | if (!pfn_valid(pfn)) | |
1527 | continue; | |
1528 | page = pfn_to_page(pfn); | |
1529 | head = compound_head(page); | |
1530 | ||
1531 | if (PageHuge(page)) { | |
1532 | pfn = page_to_pfn(head) + compound_nr(head) - 1; | |
1533 | isolate_huge_page(head, &source); | |
1534 | continue; | |
1535 | } else if (PageTransHuge(page)) | |
1536 | pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; | |
1537 | ||
1538 | /* | |
1539 | * HWPoison pages have elevated reference counts so the migration would | |
1540 | * fail on them. It also doesn't make any sense to migrate them in the | |
1541 | * first place. Still try to unmap such a page in case it is still mapped | |
1542 | * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep | |
1543 | * the unmap as the catch all safety net). | |
1544 | */ | |
1545 | if (PageHWPoison(page)) { | |
1546 | if (WARN_ON(PageLRU(page))) | |
1547 | isolate_lru_page(page); | |
1548 | if (page_mapped(page)) | |
1549 | try_to_unmap(page, TTU_IGNORE_MLOCK); | |
1550 | continue; | |
1551 | } | |
1552 | ||
1553 | if (!get_page_unless_zero(page)) | |
1554 | continue; | |
1555 | /* | |
1556 | * We can skip free pages. And we can deal with pages on | |
1557 | * LRU and non-lru movable pages. | |
1558 | */ | |
1559 | if (PageLRU(page)) | |
1560 | ret = isolate_lru_page(page); | |
1561 | else | |
1562 | ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); | |
1563 | if (!ret) { /* Success */ | |
1564 | list_add_tail(&page->lru, &source); | |
1565 | if (!__PageMovable(page)) | |
1566 | inc_node_page_state(page, NR_ISOLATED_ANON + | |
1567 | page_is_file_lru(page)); | |
1568 | ||
1569 | } else { | |
1570 | pr_warn("failed to isolate pfn %lx\n", pfn); | |
1571 | dump_page(page, "isolation failed"); | |
1572 | } | |
1573 | put_page(page); | |
1574 | } | |
1575 | if (!list_empty(&source)) { | |
1576 | nodemask_t nmask = node_states[N_MEMORY]; | |
1577 | struct migration_target_control mtc = { | |
1578 | .nmask = &nmask, | |
1579 | .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, | |
1580 | }; | |
1581 | ||
1582 | /* | |
1583 | * We have checked that migration range is on a single zone so | |
1584 | * we can use the nid of the first page to all the others. | |
1585 | */ | |
1586 | mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); | |
1587 | ||
1588 | /* | |
1589 | * try to allocate from a different node but reuse this node | |
1590 | * if there are no other online nodes to be used (e.g. we are | |
1591 | * offlining a part of the only existing node) | |
1592 | */ | |
1593 | node_clear(mtc.nid, nmask); | |
1594 | if (nodes_empty(nmask)) | |
1595 | node_set(mtc.nid, nmask); | |
1596 | ret = migrate_pages(&source, alloc_migration_target, NULL, | |
1597 | (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG); | |
1598 | if (ret) { | |
1599 | list_for_each_entry(page, &source, lru) { | |
1600 | pr_warn("migrating pfn %lx failed ret:%d ", | |
1601 | page_to_pfn(page), ret); | |
1602 | dump_page(page, "migration failure"); | |
1603 | } | |
1604 | putback_movable_pages(&source); | |
1605 | } | |
1606 | } | |
1607 | ||
1608 | return ret; | |
1609 | } | |
1610 | ||
1611 | static int __init cmdline_parse_movable_node(char *p) | |
1612 | { | |
1613 | movable_node_enabled = true; | |
1614 | return 0; | |
1615 | } | |
1616 | early_param("movable_node", cmdline_parse_movable_node); | |
1617 | ||
1618 | /* check which state of node_states will be changed when offline memory */ | |
1619 | static void node_states_check_changes_offline(unsigned long nr_pages, | |
1620 | struct zone *zone, struct memory_notify *arg) | |
1621 | { | |
1622 | struct pglist_data *pgdat = zone->zone_pgdat; | |
1623 | unsigned long present_pages = 0; | |
1624 | enum zone_type zt; | |
1625 | ||
1626 | arg->status_change_nid = NUMA_NO_NODE; | |
1627 | arg->status_change_nid_normal = NUMA_NO_NODE; | |
1628 | arg->status_change_nid_high = NUMA_NO_NODE; | |
1629 | ||
1630 | /* | |
1631 | * Check whether node_states[N_NORMAL_MEMORY] will be changed. | |
1632 | * If the memory to be offline is within the range | |
1633 | * [0..ZONE_NORMAL], and it is the last present memory there, | |
1634 | * the zones in that range will become empty after the offlining, | |
1635 | * thus we can determine that we need to clear the node from | |
1636 | * node_states[N_NORMAL_MEMORY]. | |
1637 | */ | |
1638 | for (zt = 0; zt <= ZONE_NORMAL; zt++) | |
1639 | present_pages += pgdat->node_zones[zt].present_pages; | |
1640 | if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) | |
1641 | arg->status_change_nid_normal = zone_to_nid(zone); | |
1642 | ||
1643 | #ifdef CONFIG_HIGHMEM | |
1644 | /* | |
1645 | * node_states[N_HIGH_MEMORY] contains nodes which | |
1646 | * have normal memory or high memory. | |
1647 | * Here we add the present_pages belonging to ZONE_HIGHMEM. | |
1648 | * If the zone is within the range of [0..ZONE_HIGHMEM), and | |
1649 | * we determine that the zones in that range become empty, | |
1650 | * we need to clear the node for N_HIGH_MEMORY. | |
1651 | */ | |
1652 | present_pages += pgdat->node_zones[ZONE_HIGHMEM].present_pages; | |
1653 | if (zone_idx(zone) <= ZONE_HIGHMEM && nr_pages >= present_pages) | |
1654 | arg->status_change_nid_high = zone_to_nid(zone); | |
1655 | #endif | |
1656 | ||
1657 | /* | |
1658 | * We have accounted the pages from [0..ZONE_NORMAL), and | |
1659 | * in case of CONFIG_HIGHMEM the pages from ZONE_HIGHMEM | |
1660 | * as well. | |
1661 | * Here we count the possible pages from ZONE_MOVABLE. | |
1662 | * If after having accounted all the pages, we see that the nr_pages | |
1663 | * to be offlined is over or equal to the accounted pages, | |
1664 | * we know that the node will become empty, and so, we can clear | |
1665 | * it for N_MEMORY as well. | |
1666 | */ | |
1667 | present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages; | |
1668 | ||
1669 | if (nr_pages >= present_pages) | |
1670 | arg->status_change_nid = zone_to_nid(zone); | |
1671 | } | |
1672 | ||
1673 | static void node_states_clear_node(int node, struct memory_notify *arg) | |
1674 | { | |
1675 | if (arg->status_change_nid_normal >= 0) | |
1676 | node_clear_state(node, N_NORMAL_MEMORY); | |
1677 | ||
1678 | if (arg->status_change_nid_high >= 0) | |
1679 | node_clear_state(node, N_HIGH_MEMORY); | |
1680 | ||
1681 | if (arg->status_change_nid >= 0) | |
1682 | node_clear_state(node, N_MEMORY); | |
1683 | } | |
1684 | ||
1685 | static int count_system_ram_pages_cb(unsigned long start_pfn, | |
1686 | unsigned long nr_pages, void *data) | |
1687 | { | |
1688 | unsigned long *nr_system_ram_pages = data; | |
1689 | ||
1690 | *nr_system_ram_pages += nr_pages; | |
1691 | return 0; | |
1692 | } | |
1693 | ||
1694 | int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) | |
1695 | { | |
1696 | const unsigned long end_pfn = start_pfn + nr_pages; | |
1697 | unsigned long pfn, system_ram_pages = 0; | |
1698 | unsigned long flags; | |
1699 | struct zone *zone; | |
1700 | struct memory_notify arg; | |
1701 | int ret, node; | |
1702 | char *reason; | |
1703 | ||
1704 | /* | |
1705 | * {on,off}lining is constrained to full memory sections (or more | |
1706 | * precisly to memory blocks from the user space POV). | |
1707 | * memmap_on_memory is an exception because it reserves initial part | |
1708 | * of the physical memory space for vmemmaps. That space is pageblock | |
1709 | * aligned. | |
1710 | */ | |
1711 | if (WARN_ON_ONCE(!nr_pages || | |
1712 | !IS_ALIGNED(start_pfn, pageblock_nr_pages) || | |
1713 | !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))) | |
1714 | return -EINVAL; | |
1715 | ||
1716 | mem_hotplug_begin(); | |
1717 | ||
1718 | /* | |
1719 | * Don't allow to offline memory blocks that contain holes. | |
1720 | * Consequently, memory blocks with holes can never get onlined | |
1721 | * via the hotplug path - online_pages() - as hotplugged memory has | |
1722 | * no holes. This way, we e.g., don't have to worry about marking | |
1723 | * memory holes PG_reserved, don't need pfn_valid() checks, and can | |
1724 | * avoid using walk_system_ram_range() later. | |
1725 | */ | |
1726 | walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages, | |
1727 | count_system_ram_pages_cb); | |
1728 | if (system_ram_pages != nr_pages) { | |
1729 | ret = -EINVAL; | |
1730 | reason = "memory holes"; | |
1731 | goto failed_removal; | |
1732 | } | |
1733 | ||
1734 | /* This makes hotplug much easier...and readable. | |
1735 | we assume this for now. .*/ | |
1736 | zone = test_pages_in_a_zone(start_pfn, end_pfn); | |
1737 | if (!zone) { | |
1738 | ret = -EINVAL; | |
1739 | reason = "multizone range"; | |
1740 | goto failed_removal; | |
1741 | } | |
1742 | node = zone_to_nid(zone); | |
1743 | ||
1744 | /* | |
1745 | * Disable pcplists so that page isolation cannot race with freeing | |
1746 | * in a way that pages from isolated pageblock are left on pcplists. | |
1747 | */ | |
1748 | zone_pcp_disable(zone); | |
1749 | lru_cache_disable(); | |
1750 | ||
1751 | /* set above range as isolated */ | |
1752 | ret = start_isolate_page_range(start_pfn, end_pfn, | |
1753 | MIGRATE_MOVABLE, | |
1754 | MEMORY_OFFLINE | REPORT_FAILURE); | |
1755 | if (ret) { | |
1756 | reason = "failure to isolate range"; | |
1757 | goto failed_removal_pcplists_disabled; | |
1758 | } | |
1759 | ||
1760 | arg.start_pfn = start_pfn; | |
1761 | arg.nr_pages = nr_pages; | |
1762 | node_states_check_changes_offline(nr_pages, zone, &arg); | |
1763 | ||
1764 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | |
1765 | ret = notifier_to_errno(ret); | |
1766 | if (ret) { | |
1767 | reason = "notifier failure"; | |
1768 | goto failed_removal_isolated; | |
1769 | } | |
1770 | ||
1771 | do { | |
1772 | pfn = start_pfn; | |
1773 | do { | |
1774 | if (signal_pending(current)) { | |
1775 | ret = -EINTR; | |
1776 | reason = "signal backoff"; | |
1777 | goto failed_removal_isolated; | |
1778 | } | |
1779 | ||
1780 | cond_resched(); | |
1781 | ||
1782 | ret = scan_movable_pages(pfn, end_pfn, &pfn); | |
1783 | if (!ret) { | |
1784 | /* | |
1785 | * TODO: fatal migration failures should bail | |
1786 | * out | |
1787 | */ | |
1788 | do_migrate_range(pfn, end_pfn); | |
1789 | } | |
1790 | } while (!ret); | |
1791 | ||
1792 | if (ret != -ENOENT) { | |
1793 | reason = "unmovable page"; | |
1794 | goto failed_removal_isolated; | |
1795 | } | |
1796 | ||
1797 | /* | |
1798 | * Dissolve free hugepages in the memory block before doing | |
1799 | * offlining actually in order to make hugetlbfs's object | |
1800 | * counting consistent. | |
1801 | */ | |
1802 | ret = dissolve_free_huge_pages(start_pfn, end_pfn); | |
1803 | if (ret) { | |
1804 | reason = "failure to dissolve huge pages"; | |
1805 | goto failed_removal_isolated; | |
1806 | } | |
1807 | ||
1808 | ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE); | |
1809 | ||
1810 | } while (ret); | |
1811 | ||
1812 | /* Mark all sections offline and remove free pages from the buddy. */ | |
1813 | __offline_isolated_pages(start_pfn, end_pfn); | |
1814 | pr_debug("Offlined Pages %ld\n", nr_pages); | |
1815 | ||
1816 | /* | |
1817 | * The memory sections are marked offline, and the pageblock flags | |
1818 | * effectively stale; nobody should be touching them. Fixup the number | |
1819 | * of isolated pageblocks, memory onlining will properly revert this. | |
1820 | */ | |
1821 | spin_lock_irqsave(&zone->lock, flags); | |
1822 | zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; | |
1823 | spin_unlock_irqrestore(&zone->lock, flags); | |
1824 | ||
1825 | lru_cache_enable(); | |
1826 | zone_pcp_enable(zone); | |
1827 | ||
1828 | /* removal success */ | |
1829 | adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages); | |
1830 | adjust_present_page_count(zone, -nr_pages); | |
1831 | ||
1832 | init_per_zone_wmark_min(); | |
1833 | ||
1834 | if (!populated_zone(zone)) { | |
1835 | zone_pcp_reset(zone); | |
1836 | build_all_zonelists(NULL); | |
1837 | } else | |
1838 | zone_pcp_update(zone); | |
1839 | ||
1840 | node_states_clear_node(node, &arg); | |
1841 | if (arg.status_change_nid >= 0) { | |
1842 | kswapd_stop(node); | |
1843 | kcompactd_stop(node); | |
1844 | } | |
1845 | ||
1846 | writeback_set_ratelimit(); | |
1847 | ||
1848 | memory_notify(MEM_OFFLINE, &arg); | |
1849 | remove_pfn_range_from_zone(zone, start_pfn, nr_pages); | |
1850 | mem_hotplug_done(); | |
1851 | return 0; | |
1852 | ||
1853 | failed_removal_isolated: | |
1854 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | |
1855 | memory_notify(MEM_CANCEL_OFFLINE, &arg); | |
1856 | failed_removal_pcplists_disabled: | |
1857 | zone_pcp_enable(zone); | |
1858 | failed_removal: | |
1859 | pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", | |
1860 | (unsigned long long) start_pfn << PAGE_SHIFT, | |
1861 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, | |
1862 | reason); | |
1863 | /* pushback to free area */ | |
1864 | mem_hotplug_done(); | |
1865 | return ret; | |
1866 | } | |
1867 | ||
1868 | static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) | |
1869 | { | |
1870 | int ret = !is_memblock_offlined(mem); | |
1871 | ||
1872 | if (unlikely(ret)) { | |
1873 | phys_addr_t beginpa, endpa; | |
1874 | ||
1875 | beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); | |
1876 | endpa = beginpa + memory_block_size_bytes() - 1; | |
1877 | pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", | |
1878 | &beginpa, &endpa); | |
1879 | ||
1880 | return -EBUSY; | |
1881 | } | |
1882 | return 0; | |
1883 | } | |
1884 | ||
1885 | static int get_nr_vmemmap_pages_cb(struct memory_block *mem, void *arg) | |
1886 | { | |
1887 | /* | |
1888 | * If not set, continue with the next block. | |
1889 | */ | |
1890 | return mem->nr_vmemmap_pages; | |
1891 | } | |
1892 | ||
1893 | static int check_cpu_on_node(pg_data_t *pgdat) | |
1894 | { | |
1895 | int cpu; | |
1896 | ||
1897 | for_each_present_cpu(cpu) { | |
1898 | if (cpu_to_node(cpu) == pgdat->node_id) | |
1899 | /* | |
1900 | * the cpu on this node isn't removed, and we can't | |
1901 | * offline this node. | |
1902 | */ | |
1903 | return -EBUSY; | |
1904 | } | |
1905 | ||
1906 | return 0; | |
1907 | } | |
1908 | ||
1909 | static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg) | |
1910 | { | |
1911 | int nid = *(int *)arg; | |
1912 | ||
1913 | /* | |
1914 | * If a memory block belongs to multiple nodes, the stored nid is not | |
1915 | * reliable. However, such blocks are always online (e.g., cannot get | |
1916 | * offlined) and, therefore, are still spanned by the node. | |
1917 | */ | |
1918 | return mem->nid == nid ? -EEXIST : 0; | |
1919 | } | |
1920 | ||
1921 | /** | |
1922 | * try_offline_node | |
1923 | * @nid: the node ID | |
1924 | * | |
1925 | * Offline a node if all memory sections and cpus of the node are removed. | |
1926 | * | |
1927 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
1928 | * and online/offline operations before this call. | |
1929 | */ | |
1930 | void try_offline_node(int nid) | |
1931 | { | |
1932 | pg_data_t *pgdat = NODE_DATA(nid); | |
1933 | int rc; | |
1934 | ||
1935 | /* | |
1936 | * If the node still spans pages (especially ZONE_DEVICE), don't | |
1937 | * offline it. A node spans memory after move_pfn_range_to_zone(), | |
1938 | * e.g., after the memory block was onlined. | |
1939 | */ | |
1940 | if (pgdat->node_spanned_pages) | |
1941 | return; | |
1942 | ||
1943 | /* | |
1944 | * Especially offline memory blocks might not be spanned by the | |
1945 | * node. They will get spanned by the node once they get onlined. | |
1946 | * However, they link to the node in sysfs and can get onlined later. | |
1947 | */ | |
1948 | rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb); | |
1949 | if (rc) | |
1950 | return; | |
1951 | ||
1952 | if (check_cpu_on_node(pgdat)) | |
1953 | return; | |
1954 | ||
1955 | /* | |
1956 | * all memory/cpu of this node are removed, we can offline this | |
1957 | * node now. | |
1958 | */ | |
1959 | node_set_offline(nid); | |
1960 | unregister_one_node(nid); | |
1961 | } | |
1962 | EXPORT_SYMBOL(try_offline_node); | |
1963 | ||
1964 | static int __ref try_remove_memory(int nid, u64 start, u64 size) | |
1965 | { | |
1966 | int rc = 0; | |
1967 | struct vmem_altmap mhp_altmap = {}; | |
1968 | struct vmem_altmap *altmap = NULL; | |
1969 | unsigned long nr_vmemmap_pages; | |
1970 | ||
1971 | BUG_ON(check_hotplug_memory_range(start, size)); | |
1972 | ||
1973 | /* | |
1974 | * All memory blocks must be offlined before removing memory. Check | |
1975 | * whether all memory blocks in question are offline and return error | |
1976 | * if this is not the case. | |
1977 | */ | |
1978 | rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb); | |
1979 | if (rc) | |
1980 | return rc; | |
1981 | ||
1982 | /* | |
1983 | * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in | |
1984 | * the same granularity it was added - a single memory block. | |
1985 | */ | |
1986 | if (memmap_on_memory) { | |
1987 | nr_vmemmap_pages = walk_memory_blocks(start, size, NULL, | |
1988 | get_nr_vmemmap_pages_cb); | |
1989 | if (nr_vmemmap_pages) { | |
1990 | if (size != memory_block_size_bytes()) { | |
1991 | pr_warn("Refuse to remove %#llx - %#llx," | |
1992 | "wrong granularity\n", | |
1993 | start, start + size); | |
1994 | return -EINVAL; | |
1995 | } | |
1996 | ||
1997 | /* | |
1998 | * Let remove_pmd_table->free_hugepage_table do the | |
1999 | * right thing if we used vmem_altmap when hot-adding | |
2000 | * the range. | |
2001 | */ | |
2002 | mhp_altmap.alloc = nr_vmemmap_pages; | |
2003 | altmap = &mhp_altmap; | |
2004 | } | |
2005 | } | |
2006 | ||
2007 | /* remove memmap entry */ | |
2008 | firmware_map_remove(start, start + size, "System RAM"); | |
2009 | ||
2010 | /* | |
2011 | * Memory block device removal under the device_hotplug_lock is | |
2012 | * a barrier against racing online attempts. | |
2013 | */ | |
2014 | remove_memory_block_devices(start, size); | |
2015 | ||
2016 | mem_hotplug_begin(); | |
2017 | ||
2018 | arch_remove_memory(nid, start, size, altmap); | |
2019 | ||
2020 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { | |
2021 | memblock_free(start, size); | |
2022 | memblock_remove(start, size); | |
2023 | } | |
2024 | ||
2025 | release_mem_region_adjustable(start, size); | |
2026 | ||
2027 | try_offline_node(nid); | |
2028 | ||
2029 | mem_hotplug_done(); | |
2030 | return 0; | |
2031 | } | |
2032 | ||
2033 | /** | |
2034 | * remove_memory | |
2035 | * @nid: the node ID | |
2036 | * @start: physical address of the region to remove | |
2037 | * @size: size of the region to remove | |
2038 | * | |
2039 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
2040 | * and online/offline operations before this call, as required by | |
2041 | * try_offline_node(). | |
2042 | */ | |
2043 | void __remove_memory(int nid, u64 start, u64 size) | |
2044 | { | |
2045 | ||
2046 | /* | |
2047 | * trigger BUG() if some memory is not offlined prior to calling this | |
2048 | * function | |
2049 | */ | |
2050 | if (try_remove_memory(nid, start, size)) | |
2051 | BUG(); | |
2052 | } | |
2053 | ||
2054 | /* | |
2055 | * Remove memory if every memory block is offline, otherwise return -EBUSY is | |
2056 | * some memory is not offline | |
2057 | */ | |
2058 | int remove_memory(int nid, u64 start, u64 size) | |
2059 | { | |
2060 | int rc; | |
2061 | ||
2062 | lock_device_hotplug(); | |
2063 | rc = try_remove_memory(nid, start, size); | |
2064 | unlock_device_hotplug(); | |
2065 | ||
2066 | return rc; | |
2067 | } | |
2068 | EXPORT_SYMBOL_GPL(remove_memory); | |
2069 | ||
2070 | static int try_offline_memory_block(struct memory_block *mem, void *arg) | |
2071 | { | |
2072 | uint8_t online_type = MMOP_ONLINE_KERNEL; | |
2073 | uint8_t **online_types = arg; | |
2074 | struct page *page; | |
2075 | int rc; | |
2076 | ||
2077 | /* | |
2078 | * Sense the online_type via the zone of the memory block. Offlining | |
2079 | * with multiple zones within one memory block will be rejected | |
2080 | * by offlining code ... so we don't care about that. | |
2081 | */ | |
2082 | page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr)); | |
2083 | if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE) | |
2084 | online_type = MMOP_ONLINE_MOVABLE; | |
2085 | ||
2086 | rc = device_offline(&mem->dev); | |
2087 | /* | |
2088 | * Default is MMOP_OFFLINE - change it only if offlining succeeded, | |
2089 | * so try_reonline_memory_block() can do the right thing. | |
2090 | */ | |
2091 | if (!rc) | |
2092 | **online_types = online_type; | |
2093 | ||
2094 | (*online_types)++; | |
2095 | /* Ignore if already offline. */ | |
2096 | return rc < 0 ? rc : 0; | |
2097 | } | |
2098 | ||
2099 | static int try_reonline_memory_block(struct memory_block *mem, void *arg) | |
2100 | { | |
2101 | uint8_t **online_types = arg; | |
2102 | int rc; | |
2103 | ||
2104 | if (**online_types != MMOP_OFFLINE) { | |
2105 | mem->online_type = **online_types; | |
2106 | rc = device_online(&mem->dev); | |
2107 | if (rc < 0) | |
2108 | pr_warn("%s: Failed to re-online memory: %d", | |
2109 | __func__, rc); | |
2110 | } | |
2111 | ||
2112 | /* Continue processing all remaining memory blocks. */ | |
2113 | (*online_types)++; | |
2114 | return 0; | |
2115 | } | |
2116 | ||
2117 | /* | |
2118 | * Try to offline and remove memory. Might take a long time to finish in case | |
2119 | * memory is still in use. Primarily useful for memory devices that logically | |
2120 | * unplugged all memory (so it's no longer in use) and want to offline + remove | |
2121 | * that memory. | |
2122 | */ | |
2123 | int offline_and_remove_memory(int nid, u64 start, u64 size) | |
2124 | { | |
2125 | const unsigned long mb_count = size / memory_block_size_bytes(); | |
2126 | uint8_t *online_types, *tmp; | |
2127 | int rc; | |
2128 | ||
2129 | if (!IS_ALIGNED(start, memory_block_size_bytes()) || | |
2130 | !IS_ALIGNED(size, memory_block_size_bytes()) || !size) | |
2131 | return -EINVAL; | |
2132 | ||
2133 | /* | |
2134 | * We'll remember the old online type of each memory block, so we can | |
2135 | * try to revert whatever we did when offlining one memory block fails | |
2136 | * after offlining some others succeeded. | |
2137 | */ | |
2138 | online_types = kmalloc_array(mb_count, sizeof(*online_types), | |
2139 | GFP_KERNEL); | |
2140 | if (!online_types) | |
2141 | return -ENOMEM; | |
2142 | /* | |
2143 | * Initialize all states to MMOP_OFFLINE, so when we abort processing in | |
2144 | * try_offline_memory_block(), we'll skip all unprocessed blocks in | |
2145 | * try_reonline_memory_block(). | |
2146 | */ | |
2147 | memset(online_types, MMOP_OFFLINE, mb_count); | |
2148 | ||
2149 | lock_device_hotplug(); | |
2150 | ||
2151 | tmp = online_types; | |
2152 | rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block); | |
2153 | ||
2154 | /* | |
2155 | * In case we succeeded to offline all memory, remove it. | |
2156 | * This cannot fail as it cannot get onlined in the meantime. | |
2157 | */ | |
2158 | if (!rc) { | |
2159 | rc = try_remove_memory(nid, start, size); | |
2160 | if (rc) | |
2161 | pr_err("%s: Failed to remove memory: %d", __func__, rc); | |
2162 | } | |
2163 | ||
2164 | /* | |
2165 | * Rollback what we did. While memory onlining might theoretically fail | |
2166 | * (nacked by a notifier), it barely ever happens. | |
2167 | */ | |
2168 | if (rc) { | |
2169 | tmp = online_types; | |
2170 | walk_memory_blocks(start, size, &tmp, | |
2171 | try_reonline_memory_block); | |
2172 | } | |
2173 | unlock_device_hotplug(); | |
2174 | ||
2175 | kfree(online_types); | |
2176 | return rc; | |
2177 | } | |
2178 | EXPORT_SYMBOL_GPL(offline_and_remove_memory); | |
2179 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |