1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/mm/memory_hotplug.c
8 #include <linux/stddef.h>
10 #include <linux/sched/signal.h>
11 #include <linux/swap.h>
12 #include <linux/interrupt.h>
13 #include <linux/pagemap.h>
14 #include <linux/compiler.h>
15 #include <linux/export.h>
16 #include <linux/pagevec.h>
17 #include <linux/writeback.h>
18 #include <linux/slab.h>
19 #include <linux/sysctl.h>
20 #include <linux/cpu.h>
21 #include <linux/memory.h>
22 #include <linux/memremap.h>
23 #include <linux/memory_hotplug.h>
24 #include <linux/highmem.h>
25 #include <linux/vmalloc.h>
26 #include <linux/ioport.h>
27 #include <linux/delay.h>
28 #include <linux/migrate.h>
29 #include <linux/page-isolation.h>
30 #include <linux/pfn.h>
31 #include <linux/suspend.h>
32 #include <linux/mm_inline.h>
33 #include <linux/firmware-map.h>
34 #include <linux/stop_machine.h>
35 #include <linux/hugetlb.h>
36 #include <linux/memblock.h>
37 #include <linux/compaction.h>
38 #include <linux/rmap.h>
40 #include <asm/tlbflush.h>
46 * online_page_callback contains pointer to current page onlining function.
47 * Initially it is generic_online_page(). If it is required it could be
48 * changed by calling set_online_page_callback() for callback registration
49 * and restore_online_page_callback() for generic callback restore.
52 static void generic_online_page(struct page
*page
, unsigned int order
);
54 static online_page_callback_t online_page_callback
= generic_online_page
;
55 static DEFINE_MUTEX(online_page_callback_lock
);
57 DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock
);
59 void get_online_mems(void)
61 percpu_down_read(&mem_hotplug_lock
);
64 void put_online_mems(void)
66 percpu_up_read(&mem_hotplug_lock
);
69 bool movable_node_enabled
= false;
71 #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
72 bool memhp_auto_online
;
74 bool memhp_auto_online
= true;
76 EXPORT_SYMBOL_GPL(memhp_auto_online
);
78 static int __init
setup_memhp_default_state(char *str
)
80 if (!strcmp(str
, "online"))
81 memhp_auto_online
= true;
82 else if (!strcmp(str
, "offline"))
83 memhp_auto_online
= false;
87 __setup("memhp_default_state=", setup_memhp_default_state
);
89 void mem_hotplug_begin(void)
92 percpu_down_write(&mem_hotplug_lock
);
95 void mem_hotplug_done(void)
97 percpu_up_write(&mem_hotplug_lock
);
101 u64 max_mem_size
= U64_MAX
;
103 /* add this memory to iomem resource */
104 static struct resource
*register_memory_resource(u64 start
, u64 size
)
106 struct resource
*res
;
107 unsigned long flags
= IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
;
108 char *resource_name
= "System RAM";
110 if (start
+ size
> max_mem_size
)
111 return ERR_PTR(-E2BIG
);
114 * Request ownership of the new memory range. This might be
115 * a child of an existing resource that was present but
116 * not marked as busy.
118 res
= __request_region(&iomem_resource
, start
, size
,
119 resource_name
, flags
);
122 pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n",
123 start
, start
+ size
);
124 return ERR_PTR(-EEXIST
);
129 static void release_memory_resource(struct resource
*res
)
133 release_resource(res
);
137 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
138 void get_page_bootmem(unsigned long info
, struct page
*page
,
141 page
->freelist
= (void *)type
;
142 SetPagePrivate(page
);
143 set_page_private(page
, info
);
147 void put_page_bootmem(struct page
*page
)
151 type
= (unsigned long) page
->freelist
;
152 BUG_ON(type
< MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE
||
153 type
> MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE
);
155 if (page_ref_dec_return(page
) == 1) {
156 page
->freelist
= NULL
;
157 ClearPagePrivate(page
);
158 set_page_private(page
, 0);
159 INIT_LIST_HEAD(&page
->lru
);
160 free_reserved_page(page
);
164 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
165 #ifndef CONFIG_SPARSEMEM_VMEMMAP
166 static void register_page_bootmem_info_section(unsigned long start_pfn
)
168 unsigned long mapsize
, section_nr
, i
;
169 struct mem_section
*ms
;
170 struct page
*page
, *memmap
;
171 struct mem_section_usage
*usage
;
173 section_nr
= pfn_to_section_nr(start_pfn
);
174 ms
= __nr_to_section(section_nr
);
176 /* Get section's memmap address */
177 memmap
= sparse_decode_mem_map(ms
->section_mem_map
, section_nr
);
180 * Get page for the memmap's phys address
181 * XXX: need more consideration for sparse_vmemmap...
183 page
= virt_to_page(memmap
);
184 mapsize
= sizeof(struct page
) * PAGES_PER_SECTION
;
185 mapsize
= PAGE_ALIGN(mapsize
) >> PAGE_SHIFT
;
187 /* remember memmap's page */
188 for (i
= 0; i
< mapsize
; i
++, page
++)
189 get_page_bootmem(section_nr
, page
, SECTION_INFO
);
192 page
= virt_to_page(usage
);
194 mapsize
= PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT
;
196 for (i
= 0; i
< mapsize
; i
++, page
++)
197 get_page_bootmem(section_nr
, page
, MIX_SECTION_INFO
);
200 #else /* CONFIG_SPARSEMEM_VMEMMAP */
201 static void register_page_bootmem_info_section(unsigned long start_pfn
)
203 unsigned long mapsize
, section_nr
, i
;
204 struct mem_section
*ms
;
205 struct page
*page
, *memmap
;
206 struct mem_section_usage
*usage
;
208 section_nr
= pfn_to_section_nr(start_pfn
);
209 ms
= __nr_to_section(section_nr
);
211 memmap
= sparse_decode_mem_map(ms
->section_mem_map
, section_nr
);
213 register_page_bootmem_memmap(section_nr
, memmap
, PAGES_PER_SECTION
);
216 page
= virt_to_page(usage
);
218 mapsize
= PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT
;
220 for (i
= 0; i
< mapsize
; i
++, page
++)
221 get_page_bootmem(section_nr
, page
, MIX_SECTION_INFO
);
223 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
225 void __init
register_page_bootmem_info_node(struct pglist_data
*pgdat
)
227 unsigned long i
, pfn
, end_pfn
, nr_pages
;
228 int node
= pgdat
->node_id
;
231 nr_pages
= PAGE_ALIGN(sizeof(struct pglist_data
)) >> PAGE_SHIFT
;
232 page
= virt_to_page(pgdat
);
234 for (i
= 0; i
< nr_pages
; i
++, page
++)
235 get_page_bootmem(node
, page
, NODE_INFO
);
237 pfn
= pgdat
->node_start_pfn
;
238 end_pfn
= pgdat_end_pfn(pgdat
);
240 /* register section info */
241 for (; pfn
< end_pfn
; pfn
+= PAGES_PER_SECTION
) {
243 * Some platforms can assign the same pfn to multiple nodes - on
244 * node0 as well as nodeN. To avoid registering a pfn against
245 * multiple nodes we check that this pfn does not already
246 * reside in some other nodes.
248 if (pfn_valid(pfn
) && (early_pfn_to_nid(pfn
) == node
))
249 register_page_bootmem_info_section(pfn
);
252 #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
254 static int check_pfn_span(unsigned long pfn
, unsigned long nr_pages
,
258 * Disallow all operations smaller than a sub-section and only
259 * allow operations smaller than a section for
260 * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range()
261 * enforces a larger memory_block_size_bytes() granularity for
262 * memory that will be marked online, so this check should only
263 * fire for direct arch_{add,remove}_memory() users outside of
264 * add_memory_resource().
266 unsigned long min_align
;
268 if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP
))
269 min_align
= PAGES_PER_SUBSECTION
;
271 min_align
= PAGES_PER_SECTION
;
272 if (!IS_ALIGNED(pfn
, min_align
)
273 || !IS_ALIGNED(nr_pages
, min_align
)) {
274 WARN(1, "Misaligned __%s_pages start: %#lx end: #%lx\n",
275 reason
, pfn
, pfn
+ nr_pages
- 1);
282 * Reasonably generic function for adding memory. It is
283 * expected that archs that support memory hotplug will
284 * call this function after deciding the zone to which to
287 int __ref
__add_pages(int nid
, unsigned long pfn
, unsigned long nr_pages
,
288 struct mhp_restrictions
*restrictions
)
291 unsigned long nr
, start_sec
, end_sec
;
292 struct vmem_altmap
*altmap
= restrictions
->altmap
;
296 * Validate altmap is within bounds of the total request
298 if (altmap
->base_pfn
!= pfn
299 || vmem_altmap_offset(altmap
) > nr_pages
) {
300 pr_warn_once("memory add fail, invalid altmap\n");
306 err
= check_pfn_span(pfn
, nr_pages
, "add");
310 start_sec
= pfn_to_section_nr(pfn
);
311 end_sec
= pfn_to_section_nr(pfn
+ nr_pages
- 1);
312 for (nr
= start_sec
; nr
<= end_sec
; nr
++) {
315 pfns
= min(nr_pages
, PAGES_PER_SECTION
316 - (pfn
& ~PAGE_SECTION_MASK
));
317 err
= sparse_add_section(nid
, pfn
, pfns
, altmap
);
324 vmemmap_populate_print_last();
328 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
329 static unsigned long find_smallest_section_pfn(int nid
, struct zone
*zone
,
330 unsigned long start_pfn
,
331 unsigned long end_pfn
)
333 for (; start_pfn
< end_pfn
; start_pfn
+= PAGES_PER_SUBSECTION
) {
334 if (unlikely(!pfn_valid(start_pfn
)))
337 if (unlikely(pfn_to_nid(start_pfn
) != nid
))
340 if (zone
&& zone
!= page_zone(pfn_to_page(start_pfn
)))
349 /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
350 static unsigned long find_biggest_section_pfn(int nid
, struct zone
*zone
,
351 unsigned long start_pfn
,
352 unsigned long end_pfn
)
356 /* pfn is the end pfn of a memory section. */
358 for (; pfn
>= start_pfn
; pfn
-= PAGES_PER_SUBSECTION
) {
359 if (unlikely(!pfn_valid(pfn
)))
362 if (unlikely(pfn_to_nid(pfn
) != nid
))
365 if (zone
&& zone
!= page_zone(pfn_to_page(pfn
)))
374 static void shrink_zone_span(struct zone
*zone
, unsigned long start_pfn
,
375 unsigned long end_pfn
)
377 unsigned long zone_start_pfn
= zone
->zone_start_pfn
;
378 unsigned long z
= zone_end_pfn(zone
); /* zone_end_pfn namespace clash */
379 unsigned long zone_end_pfn
= z
;
381 int nid
= zone_to_nid(zone
);
383 zone_span_writelock(zone
);
384 if (zone_start_pfn
== start_pfn
) {
386 * If the section is smallest section in the zone, it need
387 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
388 * In this case, we find second smallest valid mem_section
389 * for shrinking zone.
391 pfn
= find_smallest_section_pfn(nid
, zone
, end_pfn
,
394 zone
->zone_start_pfn
= pfn
;
395 zone
->spanned_pages
= zone_end_pfn
- pfn
;
397 } else if (zone_end_pfn
== end_pfn
) {
399 * If the section is biggest section in the zone, it need
400 * shrink zone->spanned_pages.
401 * In this case, we find second biggest valid mem_section for
404 pfn
= find_biggest_section_pfn(nid
, zone
, zone_start_pfn
,
407 zone
->spanned_pages
= pfn
- zone_start_pfn
+ 1;
411 * The section is not biggest or smallest mem_section in the zone, it
412 * only creates a hole in the zone. So in this case, we need not
413 * change the zone. But perhaps, the zone has only hole data. Thus
414 * it check the zone has only hole or not.
416 pfn
= zone_start_pfn
;
417 for (; pfn
< zone_end_pfn
; pfn
+= PAGES_PER_SUBSECTION
) {
418 if (unlikely(!pfn_valid(pfn
)))
421 if (page_zone(pfn_to_page(pfn
)) != zone
)
424 /* Skip range to be removed */
425 if (pfn
>= start_pfn
&& pfn
< end_pfn
)
428 /* If we find valid section, we have nothing to do */
429 zone_span_writeunlock(zone
);
433 /* The zone has no valid section */
434 zone
->zone_start_pfn
= 0;
435 zone
->spanned_pages
= 0;
436 zone_span_writeunlock(zone
);
439 static void shrink_pgdat_span(struct pglist_data
*pgdat
,
440 unsigned long start_pfn
, unsigned long end_pfn
)
442 unsigned long pgdat_start_pfn
= pgdat
->node_start_pfn
;
443 unsigned long p
= pgdat_end_pfn(pgdat
); /* pgdat_end_pfn namespace clash */
444 unsigned long pgdat_end_pfn
= p
;
446 int nid
= pgdat
->node_id
;
448 if (pgdat_start_pfn
== start_pfn
) {
450 * If the section is smallest section in the pgdat, it need
451 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
452 * In this case, we find second smallest valid mem_section
453 * for shrinking zone.
455 pfn
= find_smallest_section_pfn(nid
, NULL
, end_pfn
,
458 pgdat
->node_start_pfn
= pfn
;
459 pgdat
->node_spanned_pages
= pgdat_end_pfn
- pfn
;
461 } else if (pgdat_end_pfn
== end_pfn
) {
463 * If the section is biggest section in the pgdat, it need
464 * shrink pgdat->node_spanned_pages.
465 * In this case, we find second biggest valid mem_section for
468 pfn
= find_biggest_section_pfn(nid
, NULL
, pgdat_start_pfn
,
471 pgdat
->node_spanned_pages
= pfn
- pgdat_start_pfn
+ 1;
475 * If the section is not biggest or smallest mem_section in the pgdat,
476 * it only creates a hole in the pgdat. So in this case, we need not
478 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
479 * has only hole or not.
481 pfn
= pgdat_start_pfn
;
482 for (; pfn
< pgdat_end_pfn
; pfn
+= PAGES_PER_SUBSECTION
) {
483 if (unlikely(!pfn_valid(pfn
)))
486 if (pfn_to_nid(pfn
) != nid
)
489 /* Skip range to be removed */
490 if (pfn
>= start_pfn
&& pfn
< end_pfn
)
493 /* If we find valid section, we have nothing to do */
497 /* The pgdat has no valid section */
498 pgdat
->node_start_pfn
= 0;
499 pgdat
->node_spanned_pages
= 0;
502 static void __remove_zone(struct zone
*zone
, unsigned long start_pfn
,
503 unsigned long nr_pages
)
505 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
508 pgdat_resize_lock(zone
->zone_pgdat
, &flags
);
509 shrink_zone_span(zone
, start_pfn
, start_pfn
+ nr_pages
);
510 shrink_pgdat_span(pgdat
, start_pfn
, start_pfn
+ nr_pages
);
511 pgdat_resize_unlock(zone
->zone_pgdat
, &flags
);
514 static void __remove_section(struct zone
*zone
, unsigned long pfn
,
515 unsigned long nr_pages
, unsigned long map_offset
,
516 struct vmem_altmap
*altmap
)
518 struct mem_section
*ms
= __nr_to_section(pfn_to_section_nr(pfn
));
520 if (WARN_ON_ONCE(!valid_section(ms
)))
523 __remove_zone(zone
, pfn
, nr_pages
);
524 sparse_remove_section(ms
, pfn
, nr_pages
, map_offset
, altmap
);
528 * __remove_pages() - remove sections of pages from a zone
529 * @zone: zone from which pages need to be removed
530 * @pfn: starting pageframe (must be aligned to start of a section)
531 * @nr_pages: number of pages to remove (must be multiple of section size)
532 * @altmap: alternative device page map or %NULL if default memmap is used
534 * Generic helper function to remove section mappings and sysfs entries
535 * for the section of the memory we are removing. Caller needs to make
536 * sure that pages are marked reserved and zones are adjust properly by
537 * calling offline_pages().
539 void __remove_pages(struct zone
*zone
, unsigned long pfn
,
540 unsigned long nr_pages
, struct vmem_altmap
*altmap
)
542 unsigned long map_offset
= 0;
543 unsigned long nr
, start_sec
, end_sec
;
545 map_offset
= vmem_altmap_offset(altmap
);
547 clear_zone_contiguous(zone
);
549 if (check_pfn_span(pfn
, nr_pages
, "remove"))
552 start_sec
= pfn_to_section_nr(pfn
);
553 end_sec
= pfn_to_section_nr(pfn
+ nr_pages
- 1);
554 for (nr
= start_sec
; nr
<= end_sec
; nr
++) {
558 pfns
= min(nr_pages
, PAGES_PER_SECTION
559 - (pfn
& ~PAGE_SECTION_MASK
));
560 __remove_section(zone
, pfn
, pfns
, map_offset
, altmap
);
566 set_zone_contiguous(zone
);
569 int set_online_page_callback(online_page_callback_t callback
)
574 mutex_lock(&online_page_callback_lock
);
576 if (online_page_callback
== generic_online_page
) {
577 online_page_callback
= callback
;
581 mutex_unlock(&online_page_callback_lock
);
586 EXPORT_SYMBOL_GPL(set_online_page_callback
);
588 int restore_online_page_callback(online_page_callback_t callback
)
593 mutex_lock(&online_page_callback_lock
);
595 if (online_page_callback
== callback
) {
596 online_page_callback
= generic_online_page
;
600 mutex_unlock(&online_page_callback_lock
);
605 EXPORT_SYMBOL_GPL(restore_online_page_callback
);
607 void __online_page_set_limits(struct page
*page
)
610 EXPORT_SYMBOL_GPL(__online_page_set_limits
);
612 void __online_page_increment_counters(struct page
*page
)
614 adjust_managed_page_count(page
, 1);
616 EXPORT_SYMBOL_GPL(__online_page_increment_counters
);
618 void __online_page_free(struct page
*page
)
620 __free_reserved_page(page
);
622 EXPORT_SYMBOL_GPL(__online_page_free
);
624 static void generic_online_page(struct page
*page
, unsigned int order
)
626 kernel_map_pages(page
, 1 << order
, 1);
627 __free_pages_core(page
, order
);
628 totalram_pages_add(1UL << order
);
629 #ifdef CONFIG_HIGHMEM
630 if (PageHighMem(page
))
631 totalhigh_pages_add(1UL << order
);
635 static int online_pages_range(unsigned long start_pfn
, unsigned long nr_pages
,
638 const unsigned long end_pfn
= start_pfn
+ nr_pages
;
643 * Online the pages. The callback might decide to keep some pages
644 * PG_reserved (to add them to the buddy later), but we still account
645 * them as being online/belonging to this zone ("present").
647 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= 1ul << order
) {
648 order
= min(MAX_ORDER
- 1, get_order(PFN_PHYS(end_pfn
- pfn
)));
649 /* __free_pages_core() wants pfns to be aligned to the order */
650 if (WARN_ON_ONCE(!IS_ALIGNED(pfn
, 1ul << order
)))
652 (*online_page_callback
)(pfn_to_page(pfn
), order
);
655 /* mark all involved sections as online */
656 online_mem_sections(start_pfn
, end_pfn
);
658 *(unsigned long *)arg
+= nr_pages
;
662 /* check which state of node_states will be changed when online memory */
663 static void node_states_check_changes_online(unsigned long nr_pages
,
664 struct zone
*zone
, struct memory_notify
*arg
)
666 int nid
= zone_to_nid(zone
);
668 arg
->status_change_nid
= NUMA_NO_NODE
;
669 arg
->status_change_nid_normal
= NUMA_NO_NODE
;
670 arg
->status_change_nid_high
= NUMA_NO_NODE
;
672 if (!node_state(nid
, N_MEMORY
))
673 arg
->status_change_nid
= nid
;
674 if (zone_idx(zone
) <= ZONE_NORMAL
&& !node_state(nid
, N_NORMAL_MEMORY
))
675 arg
->status_change_nid_normal
= nid
;
676 #ifdef CONFIG_HIGHMEM
677 if (zone_idx(zone
) <= ZONE_HIGHMEM
&& !node_state(nid
, N_HIGH_MEMORY
))
678 arg
->status_change_nid_high
= nid
;
682 static void node_states_set_node(int node
, struct memory_notify
*arg
)
684 if (arg
->status_change_nid_normal
>= 0)
685 node_set_state(node
, N_NORMAL_MEMORY
);
687 if (arg
->status_change_nid_high
>= 0)
688 node_set_state(node
, N_HIGH_MEMORY
);
690 if (arg
->status_change_nid
>= 0)
691 node_set_state(node
, N_MEMORY
);
694 static void __meminit
resize_zone_range(struct zone
*zone
, unsigned long start_pfn
,
695 unsigned long nr_pages
)
697 unsigned long old_end_pfn
= zone_end_pfn(zone
);
699 if (zone_is_empty(zone
) || start_pfn
< zone
->zone_start_pfn
)
700 zone
->zone_start_pfn
= start_pfn
;
702 zone
->spanned_pages
= max(start_pfn
+ nr_pages
, old_end_pfn
) - zone
->zone_start_pfn
;
705 static void __meminit
resize_pgdat_range(struct pglist_data
*pgdat
, unsigned long start_pfn
,
706 unsigned long nr_pages
)
708 unsigned long old_end_pfn
= pgdat_end_pfn(pgdat
);
710 if (!pgdat
->node_spanned_pages
|| start_pfn
< pgdat
->node_start_pfn
)
711 pgdat
->node_start_pfn
= start_pfn
;
713 pgdat
->node_spanned_pages
= max(start_pfn
+ nr_pages
, old_end_pfn
) - pgdat
->node_start_pfn
;
717 * Associate the pfn range with the given zone, initializing the memmaps
718 * and resizing the pgdat/zone data to span the added pages. After this
719 * call, all affected pages are PG_reserved.
721 void __ref
move_pfn_range_to_zone(struct zone
*zone
, unsigned long start_pfn
,
722 unsigned long nr_pages
, struct vmem_altmap
*altmap
)
724 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
725 int nid
= pgdat
->node_id
;
728 clear_zone_contiguous(zone
);
730 /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
731 pgdat_resize_lock(pgdat
, &flags
);
732 zone_span_writelock(zone
);
733 if (zone_is_empty(zone
))
734 init_currently_empty_zone(zone
, start_pfn
, nr_pages
);
735 resize_zone_range(zone
, start_pfn
, nr_pages
);
736 zone_span_writeunlock(zone
);
737 resize_pgdat_range(pgdat
, start_pfn
, nr_pages
);
738 pgdat_resize_unlock(pgdat
, &flags
);
741 * TODO now we have a visible range of pages which are not associated
742 * with their zone properly. Not nice but set_pfnblock_flags_mask
743 * expects the zone spans the pfn range. All the pages in the range
744 * are reserved so nobody should be touching them so we should be safe
746 memmap_init_zone(nr_pages
, nid
, zone_idx(zone
), start_pfn
,
747 MEMMAP_HOTPLUG
, altmap
);
749 set_zone_contiguous(zone
);
753 * Returns a default kernel memory zone for the given pfn range.
754 * If no kernel zone covers this pfn range it will automatically go
755 * to the ZONE_NORMAL.
757 static struct zone
*default_kernel_zone_for_pfn(int nid
, unsigned long start_pfn
,
758 unsigned long nr_pages
)
760 struct pglist_data
*pgdat
= NODE_DATA(nid
);
763 for (zid
= 0; zid
<= ZONE_NORMAL
; zid
++) {
764 struct zone
*zone
= &pgdat
->node_zones
[zid
];
766 if (zone_intersects(zone
, start_pfn
, nr_pages
))
770 return &pgdat
->node_zones
[ZONE_NORMAL
];
773 static inline struct zone
*default_zone_for_pfn(int nid
, unsigned long start_pfn
,
774 unsigned long nr_pages
)
776 struct zone
*kernel_zone
= default_kernel_zone_for_pfn(nid
, start_pfn
,
778 struct zone
*movable_zone
= &NODE_DATA(nid
)->node_zones
[ZONE_MOVABLE
];
779 bool in_kernel
= zone_intersects(kernel_zone
, start_pfn
, nr_pages
);
780 bool in_movable
= zone_intersects(movable_zone
, start_pfn
, nr_pages
);
783 * We inherit the existing zone in a simple case where zones do not
784 * overlap in the given range
786 if (in_kernel
^ in_movable
)
787 return (in_kernel
) ? kernel_zone
: movable_zone
;
790 * If the range doesn't belong to any zone or two zones overlap in the
791 * given range then we use movable zone only if movable_node is
792 * enabled because we always online to a kernel zone by default.
794 return movable_node_enabled
? movable_zone
: kernel_zone
;
797 struct zone
* zone_for_pfn_range(int online_type
, int nid
, unsigned start_pfn
,
798 unsigned long nr_pages
)
800 if (online_type
== MMOP_ONLINE_KERNEL
)
801 return default_kernel_zone_for_pfn(nid
, start_pfn
, nr_pages
);
803 if (online_type
== MMOP_ONLINE_MOVABLE
)
804 return &NODE_DATA(nid
)->node_zones
[ZONE_MOVABLE
];
806 return default_zone_for_pfn(nid
, start_pfn
, nr_pages
);
809 int __ref
online_pages(unsigned long pfn
, unsigned long nr_pages
, int online_type
)
812 unsigned long onlined_pages
= 0;
814 int need_zonelists_rebuild
= 0;
817 struct memory_notify arg
;
818 struct memory_block
*mem
;
823 * We can't use pfn_to_nid() because nid might be stored in struct page
824 * which is not yet initialized. Instead, we find nid from memory block.
826 mem
= find_memory_block(__pfn_to_section(pfn
));
828 put_device(&mem
->dev
);
830 /* associate pfn range with the zone */
831 zone
= zone_for_pfn_range(online_type
, nid
, pfn
, nr_pages
);
832 move_pfn_range_to_zone(zone
, pfn
, nr_pages
, NULL
);
835 arg
.nr_pages
= nr_pages
;
836 node_states_check_changes_online(nr_pages
, zone
, &arg
);
838 ret
= memory_notify(MEM_GOING_ONLINE
, &arg
);
839 ret
= notifier_to_errno(ret
);
841 goto failed_addition
;
844 * If this zone is not populated, then it is not in zonelist.
845 * This means the page allocator ignores this zone.
846 * So, zonelist must be updated after online.
848 if (!populated_zone(zone
)) {
849 need_zonelists_rebuild
= 1;
850 setup_zone_pageset(zone
);
853 ret
= walk_system_ram_range(pfn
, nr_pages
, &onlined_pages
,
856 if (need_zonelists_rebuild
)
857 zone_pcp_reset(zone
);
858 goto failed_addition
;
861 zone
->present_pages
+= onlined_pages
;
863 pgdat_resize_lock(zone
->zone_pgdat
, &flags
);
864 zone
->zone_pgdat
->node_present_pages
+= onlined_pages
;
865 pgdat_resize_unlock(zone
->zone_pgdat
, &flags
);
870 node_states_set_node(nid
, &arg
);
871 if (need_zonelists_rebuild
)
872 build_all_zonelists(NULL
);
874 zone_pcp_update(zone
);
877 init_per_zone_wmark_min();
884 vm_total_pages
= nr_free_pagecache_pages();
886 writeback_set_ratelimit();
889 memory_notify(MEM_ONLINE
, &arg
);
894 pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
895 (unsigned long long) pfn
<< PAGE_SHIFT
,
896 (((unsigned long long) pfn
+ nr_pages
) << PAGE_SHIFT
) - 1);
897 memory_notify(MEM_CANCEL_ONLINE
, &arg
);
901 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
903 static void reset_node_present_pages(pg_data_t
*pgdat
)
907 for (z
= pgdat
->node_zones
; z
< pgdat
->node_zones
+ MAX_NR_ZONES
; z
++)
908 z
->present_pages
= 0;
910 pgdat
->node_present_pages
= 0;
913 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
914 static pg_data_t __ref
*hotadd_new_pgdat(int nid
, u64 start
)
916 struct pglist_data
*pgdat
;
917 unsigned long start_pfn
= PFN_DOWN(start
);
919 pgdat
= NODE_DATA(nid
);
921 pgdat
= arch_alloc_nodedata(nid
);
925 pgdat
->per_cpu_nodestats
=
926 alloc_percpu(struct per_cpu_nodestat
);
927 arch_refresh_nodedata(nid
, pgdat
);
931 * Reset the nr_zones, order and classzone_idx before reuse.
932 * Note that kswapd will init kswapd_classzone_idx properly
933 * when it starts in the near future.
936 pgdat
->kswapd_order
= 0;
937 pgdat
->kswapd_classzone_idx
= 0;
938 for_each_online_cpu(cpu
) {
939 struct per_cpu_nodestat
*p
;
941 p
= per_cpu_ptr(pgdat
->per_cpu_nodestats
, cpu
);
942 memset(p
, 0, sizeof(*p
));
946 /* we can use NODE_DATA(nid) from here */
948 pgdat
->node_id
= nid
;
949 pgdat
->node_start_pfn
= start_pfn
;
951 /* init node's zones as empty zones, we don't have any present pages.*/
952 free_area_init_core_hotplug(nid
);
955 * The node we allocated has no zone fallback lists. For avoiding
956 * to access not-initialized zonelist, build here.
958 build_all_zonelists(pgdat
);
961 * When memory is hot-added, all the memory is in offline state. So
962 * clear all zones' present_pages because they will be updated in
963 * online_pages() and offline_pages().
965 reset_node_managed_pages(pgdat
);
966 reset_node_present_pages(pgdat
);
971 static void rollback_node_hotadd(int nid
)
973 pg_data_t
*pgdat
= NODE_DATA(nid
);
975 arch_refresh_nodedata(nid
, NULL
);
976 free_percpu(pgdat
->per_cpu_nodestats
);
977 arch_free_nodedata(pgdat
);
982 * try_online_node - online a node if offlined
984 * @start: start addr of the node
985 * @set_node_online: Whether we want to online the node
986 * called by cpu_up() to online a node without onlined memory.
989 * 1 -> a new node has been allocated
990 * 0 -> the node is already online
991 * -ENOMEM -> the node could not be allocated
993 static int __try_online_node(int nid
, u64 start
, bool set_node_online
)
998 if (node_online(nid
))
1001 pgdat
= hotadd_new_pgdat(nid
, start
);
1003 pr_err("Cannot online node %d due to NULL pgdat\n", nid
);
1008 if (set_node_online
) {
1009 node_set_online(nid
);
1010 ret
= register_one_node(nid
);
1018 * Users of this function always want to online/register the node
1020 int try_online_node(int nid
)
1024 mem_hotplug_begin();
1025 ret
= __try_online_node(nid
, 0, true);
1030 static int check_hotplug_memory_range(u64 start
, u64 size
)
1032 /* memory range must be block size aligned */
1033 if (!size
|| !IS_ALIGNED(start
, memory_block_size_bytes()) ||
1034 !IS_ALIGNED(size
, memory_block_size_bytes())) {
1035 pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx",
1036 memory_block_size_bytes(), start
, size
);
1043 static int online_memory_block(struct memory_block
*mem
, void *arg
)
1045 return device_online(&mem
->dev
);
1049 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1050 * and online/offline operations (triggered e.g. by sysfs).
1052 * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
1054 int __ref
add_memory_resource(int nid
, struct resource
*res
)
1056 struct mhp_restrictions restrictions
= {};
1058 bool new_node
= false;
1062 size
= resource_size(res
);
1064 ret
= check_hotplug_memory_range(start
, size
);
1068 mem_hotplug_begin();
1071 * Add new range to memblock so that when hotadd_new_pgdat() is called
1072 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
1073 * this new range and calculate total pages correctly. The range will
1074 * be removed at hot-remove time.
1076 memblock_add_node(start
, size
, nid
);
1078 ret
= __try_online_node(nid
, start
, false);
1083 /* call arch's memory hotadd */
1084 ret
= arch_add_memory(nid
, start
, size
, &restrictions
);
1088 /* create memory block devices after memory was added */
1089 ret
= create_memory_block_devices(start
, size
);
1091 arch_remove_memory(nid
, start
, size
, NULL
);
1096 /* If sysfs file of new node can't be created, cpu on the node
1097 * can't be hot-added. There is no rollback way now.
1098 * So, check by BUG_ON() to catch it reluctantly..
1099 * We online node here. We can't roll back from here.
1101 node_set_online(nid
);
1102 ret
= __register_one_node(nid
);
1106 /* link memory sections under this node.*/
1107 ret
= link_mem_sections(nid
, PFN_DOWN(start
), PFN_UP(start
+ size
- 1));
1110 /* create new memmap entry */
1111 firmware_map_add_hotplug(start
, start
+ size
, "System RAM");
1113 /* device_online() will take the lock when calling online_pages() */
1116 /* online pages if requested */
1117 if (memhp_auto_online
)
1118 walk_memory_blocks(start
, size
, NULL
, online_memory_block
);
1122 /* rollback pgdat allocation and others */
1124 rollback_node_hotadd(nid
);
1125 memblock_remove(start
, size
);
1130 /* requires device_hotplug_lock, see add_memory_resource() */
1131 int __ref
__add_memory(int nid
, u64 start
, u64 size
)
1133 struct resource
*res
;
1136 res
= register_memory_resource(start
, size
);
1138 return PTR_ERR(res
);
1140 ret
= add_memory_resource(nid
, res
);
1142 release_memory_resource(res
);
1146 int add_memory(int nid
, u64 start
, u64 size
)
1150 lock_device_hotplug();
1151 rc
= __add_memory(nid
, start
, size
);
1152 unlock_device_hotplug();
1156 EXPORT_SYMBOL_GPL(add_memory
);
1158 #ifdef CONFIG_MEMORY_HOTREMOVE
1160 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
1161 * set and the size of the free page is given by page_order(). Using this,
1162 * the function determines if the pageblock contains only free pages.
1163 * Due to buddy contraints, a free page at least the size of a pageblock will
1164 * be located at the start of the pageblock
1166 static inline int pageblock_free(struct page
*page
)
1168 return PageBuddy(page
) && page_order(page
) >= pageblock_order
;
1171 /* Return the pfn of the start of the next active pageblock after a given pfn */
1172 static unsigned long next_active_pageblock(unsigned long pfn
)
1174 struct page
*page
= pfn_to_page(pfn
);
1176 /* Ensure the starting page is pageblock-aligned */
1177 BUG_ON(pfn
& (pageblock_nr_pages
- 1));
1179 /* If the entire pageblock is free, move to the end of free page */
1180 if (pageblock_free(page
)) {
1182 /* be careful. we don't have locks, page_order can be changed.*/
1183 order
= page_order(page
);
1184 if ((order
< MAX_ORDER
) && (order
>= pageblock_order
))
1185 return pfn
+ (1 << order
);
1188 return pfn
+ pageblock_nr_pages
;
1191 static bool is_pageblock_removable_nolock(unsigned long pfn
)
1193 struct page
*page
= pfn_to_page(pfn
);
1197 * We have to be careful here because we are iterating over memory
1198 * sections which are not zone aware so we might end up outside of
1199 * the zone but still within the section.
1200 * We have to take care about the node as well. If the node is offline
1201 * its NODE_DATA will be NULL - see page_zone.
1203 if (!node_online(page_to_nid(page
)))
1206 zone
= page_zone(page
);
1207 pfn
= page_to_pfn(page
);
1208 if (!zone_spans_pfn(zone
, pfn
))
1211 return !has_unmovable_pages(zone
, page
, 0, MIGRATE_MOVABLE
, SKIP_HWPOISON
);
1214 /* Checks if this range of memory is likely to be hot-removable. */
1215 bool is_mem_section_removable(unsigned long start_pfn
, unsigned long nr_pages
)
1217 unsigned long end_pfn
, pfn
;
1219 end_pfn
= min(start_pfn
+ nr_pages
,
1220 zone_end_pfn(page_zone(pfn_to_page(start_pfn
))));
1222 /* Check the starting page of each pageblock within the range */
1223 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
= next_active_pageblock(pfn
)) {
1224 if (!is_pageblock_removable_nolock(pfn
))
1229 /* All pageblocks in the memory block are likely to be hot-removable */
1234 * Confirm all pages in a range [start, end) belong to the same zone.
1235 * When true, return its valid [start, end).
1237 int test_pages_in_a_zone(unsigned long start_pfn
, unsigned long end_pfn
,
1238 unsigned long *valid_start
, unsigned long *valid_end
)
1240 unsigned long pfn
, sec_end_pfn
;
1241 unsigned long start
, end
;
1242 struct zone
*zone
= NULL
;
1245 for (pfn
= start_pfn
, sec_end_pfn
= SECTION_ALIGN_UP(start_pfn
+ 1);
1247 pfn
= sec_end_pfn
, sec_end_pfn
+= PAGES_PER_SECTION
) {
1248 /* Make sure the memory section is present first */
1249 if (!present_section_nr(pfn_to_section_nr(pfn
)))
1251 for (; pfn
< sec_end_pfn
&& pfn
< end_pfn
;
1252 pfn
+= MAX_ORDER_NR_PAGES
) {
1254 /* This is just a CONFIG_HOLES_IN_ZONE check.*/
1255 while ((i
< MAX_ORDER_NR_PAGES
) &&
1256 !pfn_valid_within(pfn
+ i
))
1258 if (i
== MAX_ORDER_NR_PAGES
|| pfn
+ i
>= end_pfn
)
1260 /* Check if we got outside of the zone */
1261 if (zone
&& !zone_spans_pfn(zone
, pfn
+ i
))
1263 page
= pfn_to_page(pfn
+ i
);
1264 if (zone
&& page_zone(page
) != zone
)
1268 zone
= page_zone(page
);
1269 end
= pfn
+ MAX_ORDER_NR_PAGES
;
1274 *valid_start
= start
;
1275 *valid_end
= min(end
, end_pfn
);
1283 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
1284 * non-lru movable pages and hugepages). We scan pfn because it's much
1285 * easier than scanning over linked list. This function returns the pfn
1286 * of the first found movable page if it's found, otherwise 0.
1288 static unsigned long scan_movable_pages(unsigned long start
, unsigned long end
)
1292 for (pfn
= start
; pfn
< end
; pfn
++) {
1293 struct page
*page
, *head
;
1296 if (!pfn_valid(pfn
))
1298 page
= pfn_to_page(pfn
);
1301 if (__PageMovable(page
))
1304 if (!PageHuge(page
))
1306 head
= compound_head(page
);
1307 if (page_huge_active(head
))
1309 skip
= compound_nr(head
) - (page
- head
);
1315 static struct page
*new_node_page(struct page
*page
, unsigned long private)
1317 int nid
= page_to_nid(page
);
1318 nodemask_t nmask
= node_states
[N_MEMORY
];
1321 * try to allocate from a different node but reuse this node if there
1322 * are no other online nodes to be used (e.g. we are offlining a part
1323 * of the only existing node)
1325 node_clear(nid
, nmask
);
1326 if (nodes_empty(nmask
))
1327 node_set(nid
, nmask
);
1329 return new_page_nodemask(page
, nid
, &nmask
);
1333 do_migrate_range(unsigned long start_pfn
, unsigned long end_pfn
)
1340 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
++) {
1341 if (!pfn_valid(pfn
))
1343 page
= pfn_to_page(pfn
);
1345 if (PageHuge(page
)) {
1346 struct page
*head
= compound_head(page
);
1347 pfn
= page_to_pfn(head
) + compound_nr(head
) - 1;
1348 isolate_huge_page(head
, &source
);
1350 } else if (PageTransHuge(page
))
1351 pfn
= page_to_pfn(compound_head(page
))
1352 + hpage_nr_pages(page
) - 1;
1355 * HWPoison pages have elevated reference counts so the migration would
1356 * fail on them. It also doesn't make any sense to migrate them in the
1357 * first place. Still try to unmap such a page in case it is still mapped
1358 * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
1359 * the unmap as the catch all safety net).
1361 if (PageHWPoison(page
)) {
1362 if (WARN_ON(PageLRU(page
)))
1363 isolate_lru_page(page
);
1364 if (page_mapped(page
))
1365 try_to_unmap(page
, TTU_IGNORE_MLOCK
| TTU_IGNORE_ACCESS
);
1369 if (!get_page_unless_zero(page
))
1372 * We can skip free pages. And we can deal with pages on
1373 * LRU and non-lru movable pages.
1376 ret
= isolate_lru_page(page
);
1378 ret
= isolate_movable_page(page
, ISOLATE_UNEVICTABLE
);
1379 if (!ret
) { /* Success */
1380 list_add_tail(&page
->lru
, &source
);
1381 if (!__PageMovable(page
))
1382 inc_node_page_state(page
, NR_ISOLATED_ANON
+
1383 page_is_file_cache(page
));
1386 pr_warn("failed to isolate pfn %lx\n", pfn
);
1387 dump_page(page
, "isolation failed");
1391 if (!list_empty(&source
)) {
1392 /* Allocate a new page from the nearest neighbor node */
1393 ret
= migrate_pages(&source
, new_node_page
, NULL
, 0,
1394 MIGRATE_SYNC
, MR_MEMORY_HOTPLUG
);
1396 list_for_each_entry(page
, &source
, lru
) {
1397 pr_warn("migrating pfn %lx failed ret:%d ",
1398 page_to_pfn(page
), ret
);
1399 dump_page(page
, "migration failure");
1401 putback_movable_pages(&source
);
1409 * remove from free_area[] and mark all as Reserved.
1412 offline_isolated_pages_cb(unsigned long start
, unsigned long nr_pages
,
1415 unsigned long *offlined_pages
= (unsigned long *)data
;
1417 *offlined_pages
+= __offline_isolated_pages(start
, start
+ nr_pages
);
1422 * Check all pages in range, recoreded as memory resource, are isolated.
1425 check_pages_isolated_cb(unsigned long start_pfn
, unsigned long nr_pages
,
1428 return test_pages_isolated(start_pfn
, start_pfn
+ nr_pages
, true);
1431 static int __init
cmdline_parse_movable_node(char *p
)
1433 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1434 movable_node_enabled
= true;
1436 pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n");
1440 early_param("movable_node", cmdline_parse_movable_node
);
1442 /* check which state of node_states will be changed when offline memory */
1443 static void node_states_check_changes_offline(unsigned long nr_pages
,
1444 struct zone
*zone
, struct memory_notify
*arg
)
1446 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
1447 unsigned long present_pages
= 0;
1450 arg
->status_change_nid
= NUMA_NO_NODE
;
1451 arg
->status_change_nid_normal
= NUMA_NO_NODE
;
1452 arg
->status_change_nid_high
= NUMA_NO_NODE
;
1455 * Check whether node_states[N_NORMAL_MEMORY] will be changed.
1456 * If the memory to be offline is within the range
1457 * [0..ZONE_NORMAL], and it is the last present memory there,
1458 * the zones in that range will become empty after the offlining,
1459 * thus we can determine that we need to clear the node from
1460 * node_states[N_NORMAL_MEMORY].
1462 for (zt
= 0; zt
<= ZONE_NORMAL
; zt
++)
1463 present_pages
+= pgdat
->node_zones
[zt
].present_pages
;
1464 if (zone_idx(zone
) <= ZONE_NORMAL
&& nr_pages
>= present_pages
)
1465 arg
->status_change_nid_normal
= zone_to_nid(zone
);
1467 #ifdef CONFIG_HIGHMEM
1469 * node_states[N_HIGH_MEMORY] contains nodes which
1470 * have normal memory or high memory.
1471 * Here we add the present_pages belonging to ZONE_HIGHMEM.
1472 * If the zone is within the range of [0..ZONE_HIGHMEM), and
1473 * we determine that the zones in that range become empty,
1474 * we need to clear the node for N_HIGH_MEMORY.
1476 present_pages
+= pgdat
->node_zones
[ZONE_HIGHMEM
].present_pages
;
1477 if (zone_idx(zone
) <= ZONE_HIGHMEM
&& nr_pages
>= present_pages
)
1478 arg
->status_change_nid_high
= zone_to_nid(zone
);
1482 * We have accounted the pages from [0..ZONE_NORMAL), and
1483 * in case of CONFIG_HIGHMEM the pages from ZONE_HIGHMEM
1485 * Here we count the possible pages from ZONE_MOVABLE.
1486 * If after having accounted all the pages, we see that the nr_pages
1487 * to be offlined is over or equal to the accounted pages,
1488 * we know that the node will become empty, and so, we can clear
1489 * it for N_MEMORY as well.
1491 present_pages
+= pgdat
->node_zones
[ZONE_MOVABLE
].present_pages
;
1493 if (nr_pages
>= present_pages
)
1494 arg
->status_change_nid
= zone_to_nid(zone
);
1497 static void node_states_clear_node(int node
, struct memory_notify
*arg
)
1499 if (arg
->status_change_nid_normal
>= 0)
1500 node_clear_state(node
, N_NORMAL_MEMORY
);
1502 if (arg
->status_change_nid_high
>= 0)
1503 node_clear_state(node
, N_HIGH_MEMORY
);
1505 if (arg
->status_change_nid
>= 0)
1506 node_clear_state(node
, N_MEMORY
);
1509 static int __ref
__offline_pages(unsigned long start_pfn
,
1510 unsigned long end_pfn
)
1512 unsigned long pfn
, nr_pages
;
1513 unsigned long offlined_pages
= 0;
1514 int ret
, node
, nr_isolate_pageblock
;
1515 unsigned long flags
;
1516 unsigned long valid_start
, valid_end
;
1518 struct memory_notify arg
;
1521 mem_hotplug_begin();
1523 /* This makes hotplug much easier...and readable.
1524 we assume this for now. .*/
1525 if (!test_pages_in_a_zone(start_pfn
, end_pfn
, &valid_start
,
1528 reason
= "multizone range";
1529 goto failed_removal
;
1532 zone
= page_zone(pfn_to_page(valid_start
));
1533 node
= zone_to_nid(zone
);
1534 nr_pages
= end_pfn
- start_pfn
;
1536 /* set above range as isolated */
1537 ret
= start_isolate_page_range(start_pfn
, end_pfn
,
1539 SKIP_HWPOISON
| REPORT_FAILURE
);
1541 reason
= "failure to isolate range";
1542 goto failed_removal
;
1544 nr_isolate_pageblock
= ret
;
1546 arg
.start_pfn
= start_pfn
;
1547 arg
.nr_pages
= nr_pages
;
1548 node_states_check_changes_offline(nr_pages
, zone
, &arg
);
1550 ret
= memory_notify(MEM_GOING_OFFLINE
, &arg
);
1551 ret
= notifier_to_errno(ret
);
1553 reason
= "notifier failure";
1554 goto failed_removal_isolated
;
1558 for (pfn
= start_pfn
; pfn
;) {
1559 if (signal_pending(current
)) {
1561 reason
= "signal backoff";
1562 goto failed_removal_isolated
;
1566 lru_add_drain_all();
1568 pfn
= scan_movable_pages(pfn
, end_pfn
);
1571 * TODO: fatal migration failures should bail
1574 do_migrate_range(pfn
, end_pfn
);
1579 * Dissolve free hugepages in the memory block before doing
1580 * offlining actually in order to make hugetlbfs's object
1581 * counting consistent.
1583 ret
= dissolve_free_huge_pages(start_pfn
, end_pfn
);
1585 reason
= "failure to dissolve huge pages";
1586 goto failed_removal_isolated
;
1589 ret
= walk_system_ram_range(start_pfn
, end_pfn
- start_pfn
,
1590 NULL
, check_pages_isolated_cb
);
1593 /* Ok, all of our target is isolated.
1594 We cannot do rollback at this point. */
1595 walk_system_ram_range(start_pfn
, end_pfn
- start_pfn
,
1596 &offlined_pages
, offline_isolated_pages_cb
);
1597 pr_info("Offlined Pages %ld\n", offlined_pages
);
1599 * Onlining will reset pagetype flags and makes migrate type
1600 * MOVABLE, so just need to decrease the number of isolated
1601 * pageblocks zone counter here.
1603 spin_lock_irqsave(&zone
->lock
, flags
);
1604 zone
->nr_isolate_pageblock
-= nr_isolate_pageblock
;
1605 spin_unlock_irqrestore(&zone
->lock
, flags
);
1607 /* removal success */
1608 adjust_managed_page_count(pfn_to_page(start_pfn
), -offlined_pages
);
1609 zone
->present_pages
-= offlined_pages
;
1611 pgdat_resize_lock(zone
->zone_pgdat
, &flags
);
1612 zone
->zone_pgdat
->node_present_pages
-= offlined_pages
;
1613 pgdat_resize_unlock(zone
->zone_pgdat
, &flags
);
1615 init_per_zone_wmark_min();
1617 if (!populated_zone(zone
)) {
1618 zone_pcp_reset(zone
);
1619 build_all_zonelists(NULL
);
1621 zone_pcp_update(zone
);
1623 node_states_clear_node(node
, &arg
);
1624 if (arg
.status_change_nid
>= 0) {
1626 kcompactd_stop(node
);
1629 vm_total_pages
= nr_free_pagecache_pages();
1630 writeback_set_ratelimit();
1632 memory_notify(MEM_OFFLINE
, &arg
);
1636 failed_removal_isolated
:
1637 undo_isolate_page_range(start_pfn
, end_pfn
, MIGRATE_MOVABLE
);
1638 memory_notify(MEM_CANCEL_OFFLINE
, &arg
);
1640 pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
1641 (unsigned long long) start_pfn
<< PAGE_SHIFT
,
1642 ((unsigned long long) end_pfn
<< PAGE_SHIFT
) - 1,
1644 /* pushback to free area */
1649 int offline_pages(unsigned long start_pfn
, unsigned long nr_pages
)
1651 return __offline_pages(start_pfn
, start_pfn
+ nr_pages
);
1654 static int check_memblock_offlined_cb(struct memory_block
*mem
, void *arg
)
1656 int ret
= !is_memblock_offlined(mem
);
1658 if (unlikely(ret
)) {
1659 phys_addr_t beginpa
, endpa
;
1661 beginpa
= PFN_PHYS(section_nr_to_pfn(mem
->start_section_nr
));
1662 endpa
= beginpa
+ memory_block_size_bytes() - 1;
1663 pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
1671 static int check_cpu_on_node(pg_data_t
*pgdat
)
1675 for_each_present_cpu(cpu
) {
1676 if (cpu_to_node(cpu
) == pgdat
->node_id
)
1678 * the cpu on this node isn't removed, and we can't
1679 * offline this node.
1691 * Offline a node if all memory sections and cpus of the node are removed.
1693 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1694 * and online/offline operations before this call.
1696 void try_offline_node(int nid
)
1698 pg_data_t
*pgdat
= NODE_DATA(nid
);
1699 unsigned long start_pfn
= pgdat
->node_start_pfn
;
1700 unsigned long end_pfn
= start_pfn
+ pgdat
->node_spanned_pages
;
1703 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= PAGES_PER_SECTION
) {
1704 unsigned long section_nr
= pfn_to_section_nr(pfn
);
1706 if (!present_section_nr(section_nr
))
1709 if (pfn_to_nid(pfn
) != nid
)
1713 * some memory sections of this node are not removed, and we
1714 * can't offline node now.
1719 if (check_cpu_on_node(pgdat
))
1723 * all memory/cpu of this node are removed, we can offline this
1726 node_set_offline(nid
);
1727 unregister_one_node(nid
);
1729 EXPORT_SYMBOL(try_offline_node
);
1731 static void __release_memory_resource(resource_size_t start
,
1732 resource_size_t size
)
1737 * When removing memory in the same granularity as it was added,
1738 * this function never fails. It might only fail if resources
1739 * have to be adjusted or split. We'll ignore the error, as
1740 * removing of memory cannot fail.
1742 ret
= release_mem_region_adjustable(&iomem_resource
, start
, size
);
1744 resource_size_t endres
= start
+ size
- 1;
1746 pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
1747 &start
, &endres
, ret
);
1751 static int __ref
try_remove_memory(int nid
, u64 start
, u64 size
)
1755 BUG_ON(check_hotplug_memory_range(start
, size
));
1757 mem_hotplug_begin();
1760 * All memory blocks must be offlined before removing memory. Check
1761 * whether all memory blocks in question are offline and return error
1762 * if this is not the case.
1764 rc
= walk_memory_blocks(start
, size
, NULL
, check_memblock_offlined_cb
);
1768 /* remove memmap entry */
1769 firmware_map_remove(start
, start
+ size
, "System RAM");
1770 memblock_free(start
, size
);
1771 memblock_remove(start
, size
);
1773 /* remove memory block devices before removing memory */
1774 remove_memory_block_devices(start
, size
);
1776 arch_remove_memory(nid
, start
, size
, NULL
);
1777 __release_memory_resource(start
, size
);
1779 try_offline_node(nid
);
1789 * @start: physical address of the region to remove
1790 * @size: size of the region to remove
1792 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1793 * and online/offline operations before this call, as required by
1794 * try_offline_node().
1796 void __remove_memory(int nid
, u64 start
, u64 size
)
1800 * trigger BUG() is some memory is not offlined prior to calling this
1803 if (try_remove_memory(nid
, start
, size
))
1808 * Remove memory if every memory block is offline, otherwise return -EBUSY is
1809 * some memory is not offline
1811 int remove_memory(int nid
, u64 start
, u64 size
)
1815 lock_device_hotplug();
1816 rc
= try_remove_memory(nid
, start
, size
);
1817 unlock_device_hotplug();
1821 EXPORT_SYMBOL_GPL(remove_memory
);
1822 #endif /* CONFIG_MEMORY_HOTREMOVE */