]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/memory_hotplug.c
mm, memory_hotplug: display allowed zones in the preferred ordering
[mirror_ubuntu-jammy-kernel.git] / mm / memory_hotplug.c
CommitLineData
3947be19
DH
1/*
2 * linux/mm/memory_hotplug.c
3 *
4 * Copyright (C)
5 */
6
3947be19
DH
7#include <linux/stddef.h>
8#include <linux/mm.h>
174cd4b1 9#include <linux/sched/signal.h>
3947be19
DH
10#include <linux/swap.h>
11#include <linux/interrupt.h>
12#include <linux/pagemap.h>
3947be19 13#include <linux/compiler.h>
b95f1b31 14#include <linux/export.h>
3947be19 15#include <linux/pagevec.h>
2d1d43f6 16#include <linux/writeback.h>
3947be19
DH
17#include <linux/slab.h>
18#include <linux/sysctl.h>
19#include <linux/cpu.h>
20#include <linux/memory.h>
4b94ffdc 21#include <linux/memremap.h>
3947be19
DH
22#include <linux/memory_hotplug.h>
23#include <linux/highmem.h>
24#include <linux/vmalloc.h>
0a547039 25#include <linux/ioport.h>
0c0e6195
KH
26#include <linux/delay.h>
27#include <linux/migrate.h>
28#include <linux/page-isolation.h>
71088785 29#include <linux/pfn.h>
6ad696d2 30#include <linux/suspend.h>
6d9c285a 31#include <linux/mm_inline.h>
d96ae530 32#include <linux/firmware-map.h>
60a5a19e 33#include <linux/stop_machine.h>
c8721bbb 34#include <linux/hugetlb.h>
c5320926 35#include <linux/memblock.h>
f784a3f1 36#include <linux/bootmem.h>
698b1b30 37#include <linux/compaction.h>
3947be19
DH
38
39#include <asm/tlbflush.h>
40
1e5ad9a3
AB
41#include "internal.h"
42
9d0ad8ca
DK
43/*
44 * online_page_callback contains pointer to current page onlining function.
45 * Initially it is generic_online_page(). If it is required it could be
46 * changed by calling set_online_page_callback() for callback registration
47 * and restore_online_page_callback() for generic callback restore.
48 */
49
50static void generic_online_page(struct page *page);
51
52static online_page_callback_t online_page_callback = generic_online_page;
bfc8c901 53static DEFINE_MUTEX(online_page_callback_lock);
9d0ad8ca 54
3f906ba2 55DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
bfc8c901 56
3f906ba2
TG
57void get_online_mems(void)
58{
59 percpu_down_read(&mem_hotplug_lock);
60}
bfc8c901 61
3f906ba2
TG
62void put_online_mems(void)
63{
64 percpu_up_read(&mem_hotplug_lock);
65}
bfc8c901 66
4932381e
MH
67bool movable_node_enabled = false;
68
8604d9e5 69#ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
31bc3858 70bool memhp_auto_online;
8604d9e5
VK
71#else
72bool memhp_auto_online = true;
73#endif
31bc3858
VK
74EXPORT_SYMBOL_GPL(memhp_auto_online);
75
86dd995d
VK
76static int __init setup_memhp_default_state(char *str)
77{
78 if (!strcmp(str, "online"))
79 memhp_auto_online = true;
80 else if (!strcmp(str, "offline"))
81 memhp_auto_online = false;
82
83 return 1;
84}
85__setup("memhp_default_state=", setup_memhp_default_state);
86
30467e0b 87void mem_hotplug_begin(void)
20d6c96b 88{
3f906ba2
TG
89 cpus_read_lock();
90 percpu_down_write(&mem_hotplug_lock);
20d6c96b
KM
91}
92
30467e0b 93void mem_hotplug_done(void)
bfc8c901 94{
3f906ba2
TG
95 percpu_up_write(&mem_hotplug_lock);
96 cpus_read_unlock();
bfc8c901 97}
20d6c96b 98
45e0b78b
KM
99/* add this memory to iomem resource */
100static struct resource *register_memory_resource(u64 start, u64 size)
101{
102 struct resource *res;
103 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
6f754ba4
VK
104 if (!res)
105 return ERR_PTR(-ENOMEM);
45e0b78b
KM
106
107 res->name = "System RAM";
108 res->start = start;
109 res->end = start + size - 1;
782b8664 110 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
45e0b78b 111 if (request_resource(&iomem_resource, res) < 0) {
4996eed8 112 pr_debug("System RAM resource %pR cannot be added\n", res);
45e0b78b 113 kfree(res);
6f754ba4 114 return ERR_PTR(-EEXIST);
45e0b78b
KM
115 }
116 return res;
117}
118
119static void release_memory_resource(struct resource *res)
120{
121 if (!res)
122 return;
123 release_resource(res);
124 kfree(res);
125 return;
126}
127
53947027 128#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
46723bfa
YI
129void get_page_bootmem(unsigned long info, struct page *page,
130 unsigned long type)
04753278 131{
ddffe98d 132 page->freelist = (void *)type;
04753278
YG
133 SetPagePrivate(page);
134 set_page_private(page, info);
fe896d18 135 page_ref_inc(page);
04753278
YG
136}
137
170a5a7e 138void put_page_bootmem(struct page *page)
04753278 139{
5f24ce5f 140 unsigned long type;
04753278 141
ddffe98d 142 type = (unsigned long) page->freelist;
5f24ce5f
AA
143 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
144 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
04753278 145
fe896d18 146 if (page_ref_dec_return(page) == 1) {
ddffe98d 147 page->freelist = NULL;
04753278
YG
148 ClearPagePrivate(page);
149 set_page_private(page, 0);
5f24ce5f 150 INIT_LIST_HEAD(&page->lru);
170a5a7e 151 free_reserved_page(page);
04753278 152 }
04753278
YG
153}
154
46723bfa
YI
155#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
156#ifndef CONFIG_SPARSEMEM_VMEMMAP
d92bc318 157static void register_page_bootmem_info_section(unsigned long start_pfn)
04753278
YG
158{
159 unsigned long *usemap, mapsize, section_nr, i;
160 struct mem_section *ms;
161 struct page *page, *memmap;
162
04753278
YG
163 section_nr = pfn_to_section_nr(start_pfn);
164 ms = __nr_to_section(section_nr);
165
166 /* Get section's memmap address */
167 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
168
169 /*
170 * Get page for the memmap's phys address
171 * XXX: need more consideration for sparse_vmemmap...
172 */
173 page = virt_to_page(memmap);
174 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
175 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
176
177 /* remember memmap's page */
178 for (i = 0; i < mapsize; i++, page++)
179 get_page_bootmem(section_nr, page, SECTION_INFO);
180
181 usemap = __nr_to_section(section_nr)->pageblock_flags;
182 page = virt_to_page(usemap);
183
184 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
185
186 for (i = 0; i < mapsize; i++, page++)
af370fb8 187 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
04753278
YG
188
189}
46723bfa
YI
190#else /* CONFIG_SPARSEMEM_VMEMMAP */
191static void register_page_bootmem_info_section(unsigned long start_pfn)
192{
193 unsigned long *usemap, mapsize, section_nr, i;
194 struct mem_section *ms;
195 struct page *page, *memmap;
196
197 if (!pfn_valid(start_pfn))
198 return;
199
200 section_nr = pfn_to_section_nr(start_pfn);
201 ms = __nr_to_section(section_nr);
202
203 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
204
205 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
206
207 usemap = __nr_to_section(section_nr)->pageblock_flags;
208 page = virt_to_page(usemap);
209
210 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
211
212 for (i = 0; i < mapsize; i++, page++)
213 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
214}
215#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
04753278 216
7ded384a 217void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
04753278
YG
218{
219 unsigned long i, pfn, end_pfn, nr_pages;
220 int node = pgdat->node_id;
221 struct page *page;
04753278
YG
222
223 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
224 page = virt_to_page(pgdat);
225
226 for (i = 0; i < nr_pages; i++, page++)
227 get_page_bootmem(node, page, NODE_INFO);
228
04753278 229 pfn = pgdat->node_start_pfn;
c1f19495 230 end_pfn = pgdat_end_pfn(pgdat);
04753278 231
7e9f5eb0 232 /* register section info */
f14851af 233 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
234 /*
235 * Some platforms can assign the same pfn to multiple nodes - on
236 * node0 as well as nodeN. To avoid registering a pfn against
237 * multiple nodes we check that this pfn does not already
7e9f5eb0 238 * reside in some other nodes.
f14851af 239 */
f65e91df 240 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
f14851af 241 register_page_bootmem_info_section(pfn);
242 }
04753278 243}
46723bfa 244#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
04753278 245
f1dd2cd1
MH
246static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
247 bool want_memblock)
3947be19 248{
3947be19 249 int ret;
f1dd2cd1 250 int i;
3947be19 251
ebd15302
KH
252 if (pfn_valid(phys_start_pfn))
253 return -EEXIST;
254
f1dd2cd1 255 ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn);
3947be19
DH
256 if (ret < 0)
257 return ret;
258
f1dd2cd1
MH
259 /*
260 * Make all the pages reserved so that nobody will stumble over half
261 * initialized state.
262 * FIXME: We also have to associate it with a node because pfn_to_node
263 * relies on having page with the proper node.
264 */
265 for (i = 0; i < PAGES_PER_SECTION; i++) {
266 unsigned long pfn = phys_start_pfn + i;
267 struct page *page;
268 if (!pfn_valid(pfn))
269 continue;
718127cc 270
f1dd2cd1
MH
271 page = pfn_to_page(pfn);
272 set_page_node(page, nid);
273 SetPageReserved(page);
274 }
718127cc 275
1b862aec
MH
276 if (!want_memblock)
277 return 0;
278
c04fc586 279 return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
3947be19
DH
280}
281
4edd7cef
DR
282/*
283 * Reasonably generic function for adding memory. It is
284 * expected that archs that support memory hotplug will
285 * call this function after deciding the zone to which to
286 * add the new pages.
287 */
f1dd2cd1 288int __ref __add_pages(int nid, unsigned long phys_start_pfn,
1b862aec 289 unsigned long nr_pages, bool want_memblock)
4edd7cef
DR
290{
291 unsigned long i;
292 int err = 0;
293 int start_sec, end_sec;
4b94ffdc
DW
294 struct vmem_altmap *altmap;
295
4edd7cef
DR
296 /* during initialize mem_map, align hot-added range to section */
297 start_sec = pfn_to_section_nr(phys_start_pfn);
298 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
299
4b94ffdc
DW
300 altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
301 if (altmap) {
302 /*
303 * Validate altmap is within bounds of the total request
304 */
305 if (altmap->base_pfn != phys_start_pfn
306 || vmem_altmap_offset(altmap) > nr_pages) {
307 pr_warn_once("memory add fail, invalid altmap\n");
7cf91a98
JK
308 err = -EINVAL;
309 goto out;
4b94ffdc
DW
310 }
311 altmap->alloc = 0;
312 }
313
4edd7cef 314 for (i = start_sec; i <= end_sec; i++) {
f1dd2cd1 315 err = __add_section(nid, section_nr_to_pfn(i), want_memblock);
4edd7cef
DR
316
317 /*
318 * EEXIST is finally dealt with by ioresource collision
319 * check. see add_memory() => register_memory_resource()
320 * Warning will be printed if there is collision.
321 */
322 if (err && (err != -EEXIST))
323 break;
324 err = 0;
325 }
c435a390 326 vmemmap_populate_print_last();
7cf91a98 327out:
4edd7cef
DR
328 return err;
329}
330EXPORT_SYMBOL_GPL(__add_pages);
331
332#ifdef CONFIG_MEMORY_HOTREMOVE
815121d2
YI
333/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
334static int find_smallest_section_pfn(int nid, struct zone *zone,
335 unsigned long start_pfn,
336 unsigned long end_pfn)
337{
338 struct mem_section *ms;
339
340 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
341 ms = __pfn_to_section(start_pfn);
342
343 if (unlikely(!valid_section(ms)))
344 continue;
345
346 if (unlikely(pfn_to_nid(start_pfn) != nid))
347 continue;
348
349 if (zone && zone != page_zone(pfn_to_page(start_pfn)))
350 continue;
351
352 return start_pfn;
353 }
354
355 return 0;
356}
357
358/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
359static int find_biggest_section_pfn(int nid, struct zone *zone,
360 unsigned long start_pfn,
361 unsigned long end_pfn)
362{
363 struct mem_section *ms;
364 unsigned long pfn;
365
366 /* pfn is the end pfn of a memory section. */
367 pfn = end_pfn - 1;
368 for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
369 ms = __pfn_to_section(pfn);
370
371 if (unlikely(!valid_section(ms)))
372 continue;
373
374 if (unlikely(pfn_to_nid(pfn) != nid))
375 continue;
376
377 if (zone && zone != page_zone(pfn_to_page(pfn)))
378 continue;
379
380 return pfn;
381 }
382
383 return 0;
384}
385
386static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
387 unsigned long end_pfn)
388{
c33bc315
XQ
389 unsigned long zone_start_pfn = zone->zone_start_pfn;
390 unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
391 unsigned long zone_end_pfn = z;
815121d2
YI
392 unsigned long pfn;
393 struct mem_section *ms;
394 int nid = zone_to_nid(zone);
395
396 zone_span_writelock(zone);
397 if (zone_start_pfn == start_pfn) {
398 /*
399 * If the section is smallest section in the zone, it need
400 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
401 * In this case, we find second smallest valid mem_section
402 * for shrinking zone.
403 */
404 pfn = find_smallest_section_pfn(nid, zone, end_pfn,
405 zone_end_pfn);
406 if (pfn) {
407 zone->zone_start_pfn = pfn;
408 zone->spanned_pages = zone_end_pfn - pfn;
409 }
410 } else if (zone_end_pfn == end_pfn) {
411 /*
412 * If the section is biggest section in the zone, it need
413 * shrink zone->spanned_pages.
414 * In this case, we find second biggest valid mem_section for
415 * shrinking zone.
416 */
417 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
418 start_pfn);
419 if (pfn)
420 zone->spanned_pages = pfn - zone_start_pfn + 1;
421 }
422
423 /*
424 * The section is not biggest or smallest mem_section in the zone, it
425 * only creates a hole in the zone. So in this case, we need not
426 * change the zone. But perhaps, the zone has only hole data. Thus
427 * it check the zone has only hole or not.
428 */
429 pfn = zone_start_pfn;
430 for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
431 ms = __pfn_to_section(pfn);
432
433 if (unlikely(!valid_section(ms)))
434 continue;
435
436 if (page_zone(pfn_to_page(pfn)) != zone)
437 continue;
438
439 /* If the section is current section, it continues the loop */
440 if (start_pfn == pfn)
441 continue;
442
443 /* If we find valid section, we have nothing to do */
444 zone_span_writeunlock(zone);
445 return;
446 }
447
448 /* The zone has no valid section */
449 zone->zone_start_pfn = 0;
450 zone->spanned_pages = 0;
451 zone_span_writeunlock(zone);
452}
453
454static void shrink_pgdat_span(struct pglist_data *pgdat,
455 unsigned long start_pfn, unsigned long end_pfn)
456{
83285c72
XQ
457 unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
458 unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
459 unsigned long pgdat_end_pfn = p;
815121d2
YI
460 unsigned long pfn;
461 struct mem_section *ms;
462 int nid = pgdat->node_id;
463
464 if (pgdat_start_pfn == start_pfn) {
465 /*
466 * If the section is smallest section in the pgdat, it need
467 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
468 * In this case, we find second smallest valid mem_section
469 * for shrinking zone.
470 */
471 pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
472 pgdat_end_pfn);
473 if (pfn) {
474 pgdat->node_start_pfn = pfn;
475 pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
476 }
477 } else if (pgdat_end_pfn == end_pfn) {
478 /*
479 * If the section is biggest section in the pgdat, it need
480 * shrink pgdat->node_spanned_pages.
481 * In this case, we find second biggest valid mem_section for
482 * shrinking zone.
483 */
484 pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
485 start_pfn);
486 if (pfn)
487 pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
488 }
489
490 /*
491 * If the section is not biggest or smallest mem_section in the pgdat,
492 * it only creates a hole in the pgdat. So in this case, we need not
493 * change the pgdat.
494 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
495 * has only hole or not.
496 */
497 pfn = pgdat_start_pfn;
498 for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
499 ms = __pfn_to_section(pfn);
500
501 if (unlikely(!valid_section(ms)))
502 continue;
503
504 if (pfn_to_nid(pfn) != nid)
505 continue;
506
507 /* If the section is current section, it continues the loop */
508 if (start_pfn == pfn)
509 continue;
510
511 /* If we find valid section, we have nothing to do */
512 return;
513 }
514
515 /* The pgdat has no valid section */
516 pgdat->node_start_pfn = 0;
517 pgdat->node_spanned_pages = 0;
518}
519
520static void __remove_zone(struct zone *zone, unsigned long start_pfn)
521{
522 struct pglist_data *pgdat = zone->zone_pgdat;
523 int nr_pages = PAGES_PER_SECTION;
815121d2
YI
524 unsigned long flags;
525
815121d2
YI
526 pgdat_resize_lock(zone->zone_pgdat, &flags);
527 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
528 shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
529 pgdat_resize_unlock(zone->zone_pgdat, &flags);
530}
531
4b94ffdc
DW
532static int __remove_section(struct zone *zone, struct mem_section *ms,
533 unsigned long map_offset)
ea01ea93 534{
815121d2
YI
535 unsigned long start_pfn;
536 int scn_nr;
ea01ea93
BP
537 int ret = -EINVAL;
538
539 if (!valid_section(ms))
540 return ret;
541
542 ret = unregister_memory_section(ms);
543 if (ret)
544 return ret;
545
815121d2
YI
546 scn_nr = __section_nr(ms);
547 start_pfn = section_nr_to_pfn(scn_nr);
548 __remove_zone(zone, start_pfn);
549
4b94ffdc 550 sparse_remove_one_section(zone, ms, map_offset);
ea01ea93
BP
551 return 0;
552}
553
ea01ea93
BP
554/**
555 * __remove_pages() - remove sections of pages from a zone
556 * @zone: zone from which pages need to be removed
557 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
558 * @nr_pages: number of pages to remove (must be multiple of section size)
559 *
560 * Generic helper function to remove section mappings and sysfs entries
561 * for the section of the memory we are removing. Caller needs to make
562 * sure that pages are marked reserved and zones are adjust properly by
563 * calling offline_pages().
564 */
565int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
566 unsigned long nr_pages)
567{
fe74ebb1 568 unsigned long i;
4b94ffdc
DW
569 unsigned long map_offset = 0;
570 int sections_to_remove, ret = 0;
571
572 /* In the ZONE_DEVICE case device driver owns the memory region */
573 if (is_dev_zone(zone)) {
574 struct page *page = pfn_to_page(phys_start_pfn);
575 struct vmem_altmap *altmap;
576
577 altmap = to_vmem_altmap((unsigned long) page);
578 if (altmap)
579 map_offset = vmem_altmap_offset(altmap);
580 } else {
581 resource_size_t start, size;
582
583 start = phys_start_pfn << PAGE_SHIFT;
584 size = nr_pages * PAGE_SIZE;
585
586 ret = release_mem_region_adjustable(&iomem_resource, start,
587 size);
588 if (ret) {
589 resource_size_t endres = start + size - 1;
590
591 pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
592 &start, &endres, ret);
593 }
594 }
ea01ea93 595
7cf91a98
JK
596 clear_zone_contiguous(zone);
597
ea01ea93
BP
598 /*
599 * We can only remove entire sections
600 */
601 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
602 BUG_ON(nr_pages % PAGES_PER_SECTION);
603
ea01ea93
BP
604 sections_to_remove = nr_pages / PAGES_PER_SECTION;
605 for (i = 0; i < sections_to_remove; i++) {
606 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
4b94ffdc
DW
607
608 ret = __remove_section(zone, __pfn_to_section(pfn), map_offset);
609 map_offset = 0;
ea01ea93
BP
610 if (ret)
611 break;
612 }
7cf91a98
JK
613
614 set_zone_contiguous(zone);
615
ea01ea93
BP
616 return ret;
617}
4edd7cef 618#endif /* CONFIG_MEMORY_HOTREMOVE */
ea01ea93 619
9d0ad8ca
DK
620int set_online_page_callback(online_page_callback_t callback)
621{
622 int rc = -EINVAL;
623
bfc8c901
VD
624 get_online_mems();
625 mutex_lock(&online_page_callback_lock);
9d0ad8ca
DK
626
627 if (online_page_callback == generic_online_page) {
628 online_page_callback = callback;
629 rc = 0;
630 }
631
bfc8c901
VD
632 mutex_unlock(&online_page_callback_lock);
633 put_online_mems();
9d0ad8ca
DK
634
635 return rc;
636}
637EXPORT_SYMBOL_GPL(set_online_page_callback);
638
639int restore_online_page_callback(online_page_callback_t callback)
640{
641 int rc = -EINVAL;
642
bfc8c901
VD
643 get_online_mems();
644 mutex_lock(&online_page_callback_lock);
9d0ad8ca
DK
645
646 if (online_page_callback == callback) {
647 online_page_callback = generic_online_page;
648 rc = 0;
649 }
650
bfc8c901
VD
651 mutex_unlock(&online_page_callback_lock);
652 put_online_mems();
9d0ad8ca
DK
653
654 return rc;
655}
656EXPORT_SYMBOL_GPL(restore_online_page_callback);
657
658void __online_page_set_limits(struct page *page)
180c06ef 659{
9d0ad8ca
DK
660}
661EXPORT_SYMBOL_GPL(__online_page_set_limits);
662
663void __online_page_increment_counters(struct page *page)
664{
3dcc0571 665 adjust_managed_page_count(page, 1);
9d0ad8ca
DK
666}
667EXPORT_SYMBOL_GPL(__online_page_increment_counters);
180c06ef 668
9d0ad8ca
DK
669void __online_page_free(struct page *page)
670{
3dcc0571 671 __free_reserved_page(page);
180c06ef 672}
9d0ad8ca
DK
673EXPORT_SYMBOL_GPL(__online_page_free);
674
675static void generic_online_page(struct page *page)
676{
677 __online_page_set_limits(page);
678 __online_page_increment_counters(page);
679 __online_page_free(page);
680}
180c06ef 681
75884fb1
KH
682static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
683 void *arg)
3947be19
DH
684{
685 unsigned long i;
75884fb1
KH
686 unsigned long onlined_pages = *(unsigned long *)arg;
687 struct page *page;
2d070eab 688
75884fb1
KH
689 if (PageReserved(pfn_to_page(start_pfn)))
690 for (i = 0; i < nr_pages; i++) {
691 page = pfn_to_page(start_pfn + i);
9d0ad8ca 692 (*online_page_callback)(page);
75884fb1
KH
693 onlined_pages++;
694 }
2d070eab
MH
695
696 online_mem_sections(start_pfn, start_pfn + nr_pages);
697
75884fb1
KH
698 *(unsigned long *)arg = onlined_pages;
699 return 0;
700}
701
d9713679
LJ
702/* check which state of node_states will be changed when online memory */
703static void node_states_check_changes_online(unsigned long nr_pages,
704 struct zone *zone, struct memory_notify *arg)
705{
706 int nid = zone_to_nid(zone);
707 enum zone_type zone_last = ZONE_NORMAL;
708
709 /*
6715ddf9
LJ
710 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
711 * contains nodes which have zones of 0...ZONE_NORMAL,
712 * set zone_last to ZONE_NORMAL.
d9713679 713 *
6715ddf9
LJ
714 * If we don't have HIGHMEM nor movable node,
715 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
716 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
d9713679 717 */
6715ddf9 718 if (N_MEMORY == N_NORMAL_MEMORY)
d9713679
LJ
719 zone_last = ZONE_MOVABLE;
720
721 /*
722 * if the memory to be online is in a zone of 0...zone_last, and
723 * the zones of 0...zone_last don't have memory before online, we will
724 * need to set the node to node_states[N_NORMAL_MEMORY] after
725 * the memory is online.
726 */
727 if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
728 arg->status_change_nid_normal = nid;
729 else
730 arg->status_change_nid_normal = -1;
731
6715ddf9
LJ
732#ifdef CONFIG_HIGHMEM
733 /*
734 * If we have movable node, node_states[N_HIGH_MEMORY]
735 * contains nodes which have zones of 0...ZONE_HIGHMEM,
736 * set zone_last to ZONE_HIGHMEM.
737 *
738 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
739 * contains nodes which have zones of 0...ZONE_MOVABLE,
740 * set zone_last to ZONE_MOVABLE.
741 */
742 zone_last = ZONE_HIGHMEM;
743 if (N_MEMORY == N_HIGH_MEMORY)
744 zone_last = ZONE_MOVABLE;
745
746 if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
747 arg->status_change_nid_high = nid;
748 else
749 arg->status_change_nid_high = -1;
750#else
751 arg->status_change_nid_high = arg->status_change_nid_normal;
752#endif
753
d9713679
LJ
754 /*
755 * if the node don't have memory befor online, we will need to
6715ddf9 756 * set the node to node_states[N_MEMORY] after the memory
d9713679
LJ
757 * is online.
758 */
6715ddf9 759 if (!node_state(nid, N_MEMORY))
d9713679
LJ
760 arg->status_change_nid = nid;
761 else
762 arg->status_change_nid = -1;
763}
764
765static void node_states_set_node(int node, struct memory_notify *arg)
766{
767 if (arg->status_change_nid_normal >= 0)
768 node_set_state(node, N_NORMAL_MEMORY);
769
6715ddf9
LJ
770 if (arg->status_change_nid_high >= 0)
771 node_set_state(node, N_HIGH_MEMORY);
772
773 node_set_state(node, N_MEMORY);
d9713679
LJ
774}
775
f1dd2cd1
MH
776static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
777 unsigned long nr_pages)
778{
779 unsigned long old_end_pfn = zone_end_pfn(zone);
780
781 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
782 zone->zone_start_pfn = start_pfn;
783
784 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
785}
786
787static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
788 unsigned long nr_pages)
789{
790 unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
791
792 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
793 pgdat->node_start_pfn = start_pfn;
794
795 pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
796}
797
cdf72f25 798void __ref move_pfn_range_to_zone(struct zone *zone,
f1dd2cd1
MH
799 unsigned long start_pfn, unsigned long nr_pages)
800{
801 struct pglist_data *pgdat = zone->zone_pgdat;
802 int nid = pgdat->node_id;
803 unsigned long flags;
df429ac0 804
f1dd2cd1
MH
805 if (zone_is_empty(zone))
806 init_currently_empty_zone(zone, start_pfn, nr_pages);
df429ac0 807
f1dd2cd1
MH
808 clear_zone_contiguous(zone);
809
810 /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
811 pgdat_resize_lock(pgdat, &flags);
812 zone_span_writelock(zone);
813 resize_zone_range(zone, start_pfn, nr_pages);
814 zone_span_writeunlock(zone);
815 resize_pgdat_range(pgdat, start_pfn, nr_pages);
816 pgdat_resize_unlock(pgdat, &flags);
817
818 /*
819 * TODO now we have a visible range of pages which are not associated
820 * with their zone properly. Not nice but set_pfnblock_flags_mask
821 * expects the zone spans the pfn range. All the pages in the range
822 * are reserved so nobody should be touching them so we should be safe
823 */
824 memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, MEMMAP_HOTPLUG);
825
826 set_zone_contiguous(zone);
827}
828
c246a213
MH
829/*
830 * Returns a default kernel memory zone for the given pfn range.
831 * If no kernel zone covers this pfn range it will automatically go
832 * to the ZONE_NORMAL.
833 */
e5e68930 834static struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
c246a213
MH
835 unsigned long nr_pages)
836{
837 struct pglist_data *pgdat = NODE_DATA(nid);
838 int zid;
839
840 for (zid = 0; zid <= ZONE_NORMAL; zid++) {
841 struct zone *zone = &pgdat->node_zones[zid];
842
843 if (zone_intersects(zone, start_pfn, nr_pages))
844 return zone;
845 }
846
847 return &pgdat->node_zones[ZONE_NORMAL];
848}
849
e5e68930
MH
850bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, int online_type)
851{
852 struct pglist_data *pgdat = NODE_DATA(nid);
853 struct zone *movable_zone = &pgdat->node_zones[ZONE_MOVABLE];
854 struct zone *default_zone = default_zone_for_pfn(nid, pfn, nr_pages);
855
856 /*
857 * TODO there shouldn't be any inherent reason to have ZONE_NORMAL
858 * physically before ZONE_MOVABLE. All we need is they do not
859 * overlap. Historically we didn't allow ZONE_NORMAL after ZONE_MOVABLE
860 * though so let's stick with it for simplicity for now.
861 * TODO make sure we do not overlap with ZONE_DEVICE
862 */
863 if (online_type == MMOP_ONLINE_KERNEL) {
864 if (zone_is_empty(movable_zone))
865 return true;
866 return movable_zone->zone_start_pfn >= pfn + nr_pages;
867 } else if (online_type == MMOP_ONLINE_MOVABLE) {
868 return zone_end_pfn(default_zone) <= pfn;
869 }
870
871 /* MMOP_ONLINE_KEEP will always succeed and inherits the current zone */
872 return online_type == MMOP_ONLINE_KEEP;
873}
874
9f123ab5
MH
875static inline bool movable_pfn_range(int nid, struct zone *default_zone,
876 unsigned long start_pfn, unsigned long nr_pages)
877{
878 if (!allow_online_pfn_range(nid, start_pfn, nr_pages,
879 MMOP_ONLINE_KERNEL))
880 return true;
881
882 if (!movable_node_is_enabled())
883 return false;
884
885 return !zone_intersects(default_zone, start_pfn, nr_pages);
886}
887
e5e68930
MH
888struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
889 unsigned long nr_pages)
f1dd2cd1
MH
890{
891 struct pglist_data *pgdat = NODE_DATA(nid);
c246a213 892 struct zone *zone = default_zone_for_pfn(nid, start_pfn, nr_pages);
f1dd2cd1
MH
893
894 if (online_type == MMOP_ONLINE_KEEP) {
895 struct zone *movable_zone = &pgdat->node_zones[ZONE_MOVABLE];
896 /*
a69578a1
MH
897 * MMOP_ONLINE_KEEP defaults to MMOP_ONLINE_KERNEL but use
898 * movable zone if that is not possible (e.g. we are within
9f123ab5
MH
899 * or past the existing movable zone). movable_node overrides
900 * this default and defaults to movable zone
f1dd2cd1 901 */
9f123ab5 902 if (movable_pfn_range(nid, zone, start_pfn, nr_pages))
f1dd2cd1
MH
903 zone = movable_zone;
904 } else if (online_type == MMOP_ONLINE_MOVABLE) {
905 zone = &pgdat->node_zones[ZONE_MOVABLE];
df429ac0
RA
906 }
907
e5e68930
MH
908 return zone;
909}
910
911/*
912 * Associates the given pfn range with the given node and the zone appropriate
913 * for the given online type.
914 */
915static struct zone * __meminit move_pfn_range(int online_type, int nid,
916 unsigned long start_pfn, unsigned long nr_pages)
917{
918 struct zone *zone;
919
920 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
f1dd2cd1
MH
921 move_pfn_range_to_zone(zone, start_pfn, nr_pages);
922 return zone;
df429ac0 923}
75884fb1 924
30467e0b 925/* Must be protected by mem_hotplug_begin() */
511c2aba 926int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
75884fb1 927{
aa47228a 928 unsigned long flags;
3947be19
DH
929 unsigned long onlined_pages = 0;
930 struct zone *zone;
6811378e 931 int need_zonelists_rebuild = 0;
7b78d335
YG
932 int nid;
933 int ret;
934 struct memory_notify arg;
935
f1dd2cd1
MH
936 nid = pfn_to_nid(pfn);
937 if (!allow_online_pfn_range(nid, pfn, nr_pages, online_type))
30467e0b 938 return -EINVAL;
74d42d8f 939
f1dd2cd1
MH
940 /* associate pfn range with the zone */
941 zone = move_pfn_range(online_type, nid, pfn, nr_pages);
942
7b78d335
YG
943 arg.start_pfn = pfn;
944 arg.nr_pages = nr_pages;
d9713679 945 node_states_check_changes_online(nr_pages, zone, &arg);
7b78d335 946
7b78d335
YG
947 ret = memory_notify(MEM_GOING_ONLINE, &arg);
948 ret = notifier_to_errno(ret);
e33e33b4
CY
949 if (ret)
950 goto failed_addition;
951
6811378e
YG
952 /*
953 * If this zone is not populated, then it is not in zonelist.
954 * This means the page allocator ignores this zone.
955 * So, zonelist must be updated after online.
956 */
4eaf3f64 957 mutex_lock(&zonelists_mutex);
6dcd73d7 958 if (!populated_zone(zone)) {
6811378e 959 need_zonelists_rebuild = 1;
6dcd73d7
WC
960 build_all_zonelists(NULL, zone);
961 }
6811378e 962
908eedc6 963 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
75884fb1 964 online_pages_range);
fd8a4221 965 if (ret) {
6dcd73d7
WC
966 if (need_zonelists_rebuild)
967 zone_pcp_reset(zone);
4eaf3f64 968 mutex_unlock(&zonelists_mutex);
e33e33b4 969 goto failed_addition;
fd8a4221
GL
970 }
971
3947be19 972 zone->present_pages += onlined_pages;
aa47228a
CS
973
974 pgdat_resize_lock(zone->zone_pgdat, &flags);
f2937be5 975 zone->zone_pgdat->node_present_pages += onlined_pages;
aa47228a
CS
976 pgdat_resize_unlock(zone->zone_pgdat, &flags);
977
08dff7b7 978 if (onlined_pages) {
e888ca35 979 node_states_set_node(nid, &arg);
08dff7b7 980 if (need_zonelists_rebuild)
6dcd73d7 981 build_all_zonelists(NULL, NULL);
08dff7b7
JL
982 else
983 zone_pcp_update(zone);
984 }
3947be19 985
4eaf3f64 986 mutex_unlock(&zonelists_mutex);
1b79acc9
KM
987
988 init_per_zone_wmark_min();
989
698b1b30 990 if (onlined_pages) {
e888ca35 991 kswapd_run(nid);
698b1b30
VB
992 kcompactd_run(nid);
993 }
61b13993 994
1f522509 995 vm_total_pages = nr_free_pagecache_pages();
2f7f24ec 996
2d1d43f6 997 writeback_set_ratelimit();
7b78d335
YG
998
999 if (onlined_pages)
1000 memory_notify(MEM_ONLINE, &arg);
30467e0b 1001 return 0;
e33e33b4
CY
1002
1003failed_addition:
1004 pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
1005 (unsigned long long) pfn << PAGE_SHIFT,
1006 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
1007 memory_notify(MEM_CANCEL_ONLINE, &arg);
1008 return ret;
3947be19 1009}
53947027 1010#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
bc02af93 1011
0bd85420
TC
1012static void reset_node_present_pages(pg_data_t *pgdat)
1013{
1014 struct zone *z;
1015
1016 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1017 z->present_pages = 0;
1018
1019 pgdat->node_present_pages = 0;
1020}
1021
e1319331
HS
1022/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1023static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
9af3c2de
YG
1024{
1025 struct pglist_data *pgdat;
1026 unsigned long zones_size[MAX_NR_ZONES] = {0};
1027 unsigned long zholes_size[MAX_NR_ZONES] = {0};
c8e861a5 1028 unsigned long start_pfn = PFN_DOWN(start);
9af3c2de 1029
a1e565aa
TC
1030 pgdat = NODE_DATA(nid);
1031 if (!pgdat) {
1032 pgdat = arch_alloc_nodedata(nid);
1033 if (!pgdat)
1034 return NULL;
9af3c2de 1035
a1e565aa 1036 arch_refresh_nodedata(nid, pgdat);
b0dc3a34 1037 } else {
e716f2eb
MG
1038 /*
1039 * Reset the nr_zones, order and classzone_idx before reuse.
1040 * Note that kswapd will init kswapd_classzone_idx properly
1041 * when it starts in the near future.
1042 */
b0dc3a34 1043 pgdat->nr_zones = 0;
38087d9b
MG
1044 pgdat->kswapd_order = 0;
1045 pgdat->kswapd_classzone_idx = 0;
a1e565aa 1046 }
9af3c2de
YG
1047
1048 /* we can use NODE_DATA(nid) from here */
1049
1050 /* init node's zones as empty zones, we don't have any present pages.*/
9109fb7b 1051 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
5830169f 1052 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
9af3c2de 1053
959ecc48
KH
1054 /*
1055 * The node we allocated has no zone fallback lists. For avoiding
1056 * to access not-initialized zonelist, build here.
1057 */
f957db4f 1058 mutex_lock(&zonelists_mutex);
9adb62a5 1059 build_all_zonelists(pgdat, NULL);
f957db4f 1060 mutex_unlock(&zonelists_mutex);
959ecc48 1061
f784a3f1
TC
1062 /*
1063 * zone->managed_pages is set to an approximate value in
1064 * free_area_init_core(), which will cause
1065 * /sys/device/system/node/nodeX/meminfo has wrong data.
1066 * So reset it to 0 before any memory is onlined.
1067 */
1068 reset_node_managed_pages(pgdat);
1069
0bd85420
TC
1070 /*
1071 * When memory is hot-added, all the memory is in offline state. So
1072 * clear all zones' present_pages because they will be updated in
1073 * online_pages() and offline_pages().
1074 */
1075 reset_node_present_pages(pgdat);
1076
9af3c2de
YG
1077 return pgdat;
1078}
1079
1080static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
1081{
1082 arch_refresh_nodedata(nid, NULL);
5830169f 1083 free_percpu(pgdat->per_cpu_nodestats);
9af3c2de
YG
1084 arch_free_nodedata(pgdat);
1085 return;
1086}
1087
0a547039 1088
01b0f197
TK
1089/**
1090 * try_online_node - online a node if offlined
1091 *
cf23422b 1092 * called by cpu_up() to online a node without onlined memory.
1093 */
01b0f197 1094int try_online_node(int nid)
cf23422b 1095{
1096 pg_data_t *pgdat;
1097 int ret;
1098
01b0f197
TK
1099 if (node_online(nid))
1100 return 0;
1101
bfc8c901 1102 mem_hotplug_begin();
cf23422b 1103 pgdat = hotadd_new_pgdat(nid, 0);
7553e8f2 1104 if (!pgdat) {
01b0f197 1105 pr_err("Cannot online node %d due to NULL pgdat\n", nid);
cf23422b 1106 ret = -ENOMEM;
1107 goto out;
1108 }
1109 node_set_online(nid);
1110 ret = register_one_node(nid);
1111 BUG_ON(ret);
1112
01b0f197
TK
1113 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
1114 mutex_lock(&zonelists_mutex);
1115 build_all_zonelists(NULL, NULL);
1116 mutex_unlock(&zonelists_mutex);
1117 }
1118
cf23422b 1119out:
bfc8c901 1120 mem_hotplug_done();
cf23422b 1121 return ret;
1122}
1123
27356f54
TK
1124static int check_hotplug_memory_range(u64 start, u64 size)
1125{
c8e861a5 1126 u64 start_pfn = PFN_DOWN(start);
27356f54
TK
1127 u64 nr_pages = size >> PAGE_SHIFT;
1128
1129 /* Memory range must be aligned with section */
1130 if ((start_pfn & ~PAGE_SECTION_MASK) ||
1131 (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
1132 pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
1133 (unsigned long long)start,
1134 (unsigned long long)size);
1135 return -EINVAL;
1136 }
1137
1138 return 0;
1139}
1140
31bc3858
VK
1141static int online_memory_block(struct memory_block *mem, void *arg)
1142{
dc18d706 1143 return device_online(&mem->dev);
31bc3858
VK
1144}
1145
31168481 1146/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
31bc3858 1147int __ref add_memory_resource(int nid, struct resource *res, bool online)
bc02af93 1148{
62cedb9f 1149 u64 start, size;
9af3c2de 1150 pg_data_t *pgdat = NULL;
a1e565aa
TC
1151 bool new_pgdat;
1152 bool new_node;
bc02af93
YG
1153 int ret;
1154
62cedb9f
DV
1155 start = res->start;
1156 size = resource_size(res);
1157
27356f54
TK
1158 ret = check_hotplug_memory_range(start, size);
1159 if (ret)
1160 return ret;
1161
a1e565aa
TC
1162 { /* Stupid hack to suppress address-never-null warning */
1163 void *p = NODE_DATA(nid);
1164 new_pgdat = !p;
1165 }
ac13c462 1166
bfc8c901 1167 mem_hotplug_begin();
ac13c462 1168
7f36e3e5
TC
1169 /*
1170 * Add new range to memblock so that when hotadd_new_pgdat() is called
1171 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
1172 * this new range and calculate total pages correctly. The range will
1173 * be removed at hot-remove time.
1174 */
1175 memblock_add_node(start, size, nid);
1176
a1e565aa
TC
1177 new_node = !node_online(nid);
1178 if (new_node) {
9af3c2de 1179 pgdat = hotadd_new_pgdat(nid, start);
6ad696d2 1180 ret = -ENOMEM;
9af3c2de 1181 if (!pgdat)
41b9e2d7 1182 goto error;
9af3c2de
YG
1183 }
1184
bc02af93 1185 /* call arch's memory hotadd */
3d79a728 1186 ret = arch_add_memory(nid, start, size, true);
bc02af93 1187
9af3c2de
YG
1188 if (ret < 0)
1189 goto error;
1190
0fc44159 1191 /* we online node here. we can't roll back from here. */
9af3c2de
YG
1192 node_set_online(nid);
1193
a1e565aa 1194 if (new_node) {
9037a993
MH
1195 unsigned long start_pfn = start >> PAGE_SHIFT;
1196 unsigned long nr_pages = size >> PAGE_SHIFT;
1197
1198 ret = __register_one_node(nid);
1199 if (ret)
1200 goto register_fail;
1201
1202 /*
1203 * link memory sections under this node. This is already
1204 * done when creatig memory section in register_new_memory
1205 * but that depends to have the node registered so offline
1206 * nodes have to go through register_node.
1207 * TODO clean up this mess.
1208 */
1209 ret = link_mem_sections(nid, start_pfn, nr_pages);
1210register_fail:
0fc44159
YG
1211 /*
1212 * If sysfs file of new node can't create, cpu on the node
1213 * can't be hot-added. There is no rollback way now.
1214 * So, check by BUG_ON() to catch it reluctantly..
1215 */
1216 BUG_ON(ret);
1217 }
1218
d96ae530
AM
1219 /* create new memmap entry */
1220 firmware_map_add_hotplug(start, start + size, "System RAM");
1221
31bc3858
VK
1222 /* online pages if requested */
1223 if (online)
1224 walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
1225 NULL, online_memory_block);
1226
6ad696d2
AK
1227 goto out;
1228
9af3c2de
YG
1229error:
1230 /* rollback pgdat allocation and others */
dbac61a3 1231 if (new_pgdat && pgdat)
9af3c2de 1232 rollback_node_hotadd(nid, pgdat);
7f36e3e5 1233 memblock_remove(start, size);
9af3c2de 1234
6ad696d2 1235out:
bfc8c901 1236 mem_hotplug_done();
bc02af93
YG
1237 return ret;
1238}
62cedb9f
DV
1239EXPORT_SYMBOL_GPL(add_memory_resource);
1240
1241int __ref add_memory(int nid, u64 start, u64 size)
1242{
1243 struct resource *res;
1244 int ret;
1245
1246 res = register_memory_resource(start, size);
6f754ba4
VK
1247 if (IS_ERR(res))
1248 return PTR_ERR(res);
62cedb9f 1249
31bc3858 1250 ret = add_memory_resource(nid, res, memhp_auto_online);
62cedb9f
DV
1251 if (ret < 0)
1252 release_memory_resource(res);
1253 return ret;
1254}
bc02af93 1255EXPORT_SYMBOL_GPL(add_memory);
0c0e6195
KH
1256
1257#ifdef CONFIG_MEMORY_HOTREMOVE
5c755e9f
BP
1258/*
1259 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
1260 * set and the size of the free page is given by page_order(). Using this,
1261 * the function determines if the pageblock contains only free pages.
1262 * Due to buddy contraints, a free page at least the size of a pageblock will
1263 * be located at the start of the pageblock
1264 */
1265static inline int pageblock_free(struct page *page)
1266{
1267 return PageBuddy(page) && page_order(page) >= pageblock_order;
1268}
1269
1270/* Return the start of the next active pageblock after a given page */
1271static struct page *next_active_pageblock(struct page *page)
1272{
5c755e9f
BP
1273 /* Ensure the starting page is pageblock-aligned */
1274 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
1275
5c755e9f 1276 /* If the entire pageblock is free, move to the end of free page */
0dcc48c1
KH
1277 if (pageblock_free(page)) {
1278 int order;
1279 /* be careful. we don't have locks, page_order can be changed.*/
1280 order = page_order(page);
1281 if ((order < MAX_ORDER) && (order >= pageblock_order))
1282 return page + (1 << order);
1283 }
5c755e9f 1284
0dcc48c1 1285 return page + pageblock_nr_pages;
5c755e9f
BP
1286}
1287
1288/* Checks if this range of memory is likely to be hot-removable. */
c98940f6 1289bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
5c755e9f 1290{
5c755e9f
BP
1291 struct page *page = pfn_to_page(start_pfn);
1292 struct page *end_page = page + nr_pages;
1293
1294 /* Check the starting page of each pageblock within the range */
1295 for (; page < end_page; page = next_active_pageblock(page)) {
49ac8255 1296 if (!is_pageblock_removable_nolock(page))
c98940f6 1297 return false;
49ac8255 1298 cond_resched();
5c755e9f
BP
1299 }
1300
1301 /* All pageblocks in the memory block are likely to be hot-removable */
c98940f6 1302 return true;
5c755e9f
BP
1303}
1304
0c0e6195 1305/*
deb88a2a 1306 * Confirm all pages in a range [start, end) belong to the same zone.
a96dfddb 1307 * When true, return its valid [start, end).
0c0e6195 1308 */
a96dfddb
TK
1309int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1310 unsigned long *valid_start, unsigned long *valid_end)
0c0e6195 1311{
5f0f2887 1312 unsigned long pfn, sec_end_pfn;
a96dfddb 1313 unsigned long start, end;
0c0e6195
KH
1314 struct zone *zone = NULL;
1315 struct page *page;
1316 int i;
deb88a2a 1317 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
0c0e6195 1318 pfn < end_pfn;
deb88a2a 1319 pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
5f0f2887
AB
1320 /* Make sure the memory section is present first */
1321 if (!present_section_nr(pfn_to_section_nr(pfn)))
0c0e6195 1322 continue;
5f0f2887
AB
1323 for (; pfn < sec_end_pfn && pfn < end_pfn;
1324 pfn += MAX_ORDER_NR_PAGES) {
1325 i = 0;
1326 /* This is just a CONFIG_HOLES_IN_ZONE check.*/
1327 while ((i < MAX_ORDER_NR_PAGES) &&
1328 !pfn_valid_within(pfn + i))
1329 i++;
d6d8c8a4 1330 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
5f0f2887
AB
1331 continue;
1332 page = pfn_to_page(pfn + i);
1333 if (zone && page_zone(page) != zone)
1334 return 0;
a96dfddb
TK
1335 if (!zone)
1336 start = pfn + i;
5f0f2887 1337 zone = page_zone(page);
a96dfddb 1338 end = pfn + MAX_ORDER_NR_PAGES;
5f0f2887 1339 }
0c0e6195 1340 }
deb88a2a 1341
a96dfddb
TK
1342 if (zone) {
1343 *valid_start = start;
d6d8c8a4 1344 *valid_end = min(end, end_pfn);
deb88a2a 1345 return 1;
a96dfddb 1346 } else {
deb88a2a 1347 return 0;
a96dfddb 1348 }
0c0e6195
KH
1349}
1350
1351/*
0efadf48
YX
1352 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
1353 * non-lru movable pages and hugepages). We scan pfn because it's much
1354 * easier than scanning over linked list. This function returns the pfn
1355 * of the first found movable page if it's found, otherwise 0.
0c0e6195 1356 */
c8721bbb 1357static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
0c0e6195
KH
1358{
1359 unsigned long pfn;
1360 struct page *page;
1361 for (pfn = start; pfn < end; pfn++) {
1362 if (pfn_valid(pfn)) {
1363 page = pfn_to_page(pfn);
1364 if (PageLRU(page))
1365 return pfn;
0efadf48
YX
1366 if (__PageMovable(page))
1367 return pfn;
c8721bbb 1368 if (PageHuge(page)) {
7e1f049e 1369 if (page_huge_active(page))
c8721bbb
NH
1370 return pfn;
1371 else
1372 pfn = round_up(pfn + 1,
1373 1 << compound_order(page)) - 1;
1374 }
0c0e6195
KH
1375 }
1376 }
1377 return 0;
1378}
1379
394e31d2
XQ
1380static struct page *new_node_page(struct page *page, unsigned long private,
1381 int **result)
1382{
394e31d2 1383 int nid = page_to_nid(page);
231e97e2 1384 nodemask_t nmask = node_states[N_MEMORY];
7f252f27
MH
1385
1386 /*
1387 * try to allocate from a different node but reuse this node if there
1388 * are no other online nodes to be used (e.g. we are offlining a part
1389 * of the only existing node)
1390 */
1391 node_clear(nid, nmask);
1392 if (nodes_empty(nmask))
1393 node_set(nid, nmask);
394e31d2 1394
8b913238 1395 return new_page_nodemask(page, nid, &nmask);
394e31d2
XQ
1396}
1397
0c0e6195
KH
1398#define NR_OFFLINE_AT_ONCE_PAGES (256)
1399static int
1400do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1401{
1402 unsigned long pfn;
1403 struct page *page;
1404 int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
1405 int not_managed = 0;
1406 int ret = 0;
1407 LIST_HEAD(source);
1408
1409 for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
1410 if (!pfn_valid(pfn))
1411 continue;
1412 page = pfn_to_page(pfn);
c8721bbb
NH
1413
1414 if (PageHuge(page)) {
1415 struct page *head = compound_head(page);
1416 pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
1417 if (compound_order(head) > PFN_SECTION_SHIFT) {
1418 ret = -EBUSY;
1419 break;
1420 }
1421 if (isolate_huge_page(page, &source))
1422 move_pages -= 1 << compound_order(head);
1423 continue;
1424 }
1425
700c2a46 1426 if (!get_page_unless_zero(page))
0c0e6195
KH
1427 continue;
1428 /*
0efadf48
YX
1429 * We can skip free pages. And we can deal with pages on
1430 * LRU and non-lru movable pages.
0c0e6195 1431 */
0efadf48
YX
1432 if (PageLRU(page))
1433 ret = isolate_lru_page(page);
1434 else
1435 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
0c0e6195 1436 if (!ret) { /* Success */
700c2a46 1437 put_page(page);
62695a84 1438 list_add_tail(&page->lru, &source);
0c0e6195 1439 move_pages--;
0efadf48
YX
1440 if (!__PageMovable(page))
1441 inc_node_page_state(page, NR_ISOLATED_ANON +
1442 page_is_file_cache(page));
6d9c285a 1443
0c0e6195 1444 } else {
0c0e6195 1445#ifdef CONFIG_DEBUG_VM
0efadf48
YX
1446 pr_alert("failed to isolate pfn %lx\n", pfn);
1447 dump_page(page, "isolation failed");
0c0e6195 1448#endif
700c2a46 1449 put_page(page);
25985edc 1450 /* Because we don't have big zone->lock. we should
809c4449
BL
1451 check this again here. */
1452 if (page_count(page)) {
1453 not_managed++;
f3ab2636 1454 ret = -EBUSY;
809c4449
BL
1455 break;
1456 }
0c0e6195
KH
1457 }
1458 }
f3ab2636
BL
1459 if (!list_empty(&source)) {
1460 if (not_managed) {
c8721bbb 1461 putback_movable_pages(&source);
f3ab2636
BL
1462 goto out;
1463 }
74c08f98 1464
394e31d2
XQ
1465 /* Allocate a new page from the nearest neighbor node */
1466 ret = migrate_pages(&source, new_node_page, NULL, 0,
9c620e2b 1467 MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
f3ab2636 1468 if (ret)
c8721bbb 1469 putback_movable_pages(&source);
0c0e6195 1470 }
0c0e6195
KH
1471out:
1472 return ret;
1473}
1474
1475/*
1476 * remove from free_area[] and mark all as Reserved.
1477 */
1478static int
1479offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
1480 void *data)
1481{
1482 __offline_isolated_pages(start, start + nr_pages);
1483 return 0;
1484}
1485
1486static void
1487offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
1488{
908eedc6 1489 walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
0c0e6195
KH
1490 offline_isolated_pages_cb);
1491}
1492
1493/*
1494 * Check all pages in range, recoreded as memory resource, are isolated.
1495 */
1496static int
1497check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
1498 void *data)
1499{
1500 int ret;
1501 long offlined = *(long *)data;
b023f468 1502 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
0c0e6195
KH
1503 offlined = nr_pages;
1504 if (!ret)
1505 *(long *)data += offlined;
1506 return ret;
1507}
1508
1509static long
1510check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
1511{
1512 long offlined = 0;
1513 int ret;
1514
908eedc6 1515 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
0c0e6195
KH
1516 check_pages_isolated_cb);
1517 if (ret < 0)
1518 offlined = (long)ret;
1519 return offlined;
1520}
1521
c5320926
TC
1522static int __init cmdline_parse_movable_node(char *p)
1523{
4932381e 1524#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
55ac590c 1525 movable_node_enabled = true;
4932381e
MH
1526#else
1527 pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n");
1528#endif
c5320926
TC
1529 return 0;
1530}
1531early_param("movable_node", cmdline_parse_movable_node);
1532
d9713679
LJ
1533/* check which state of node_states will be changed when offline memory */
1534static void node_states_check_changes_offline(unsigned long nr_pages,
1535 struct zone *zone, struct memory_notify *arg)
1536{
1537 struct pglist_data *pgdat = zone->zone_pgdat;
1538 unsigned long present_pages = 0;
1539 enum zone_type zt, zone_last = ZONE_NORMAL;
1540
1541 /*
6715ddf9
LJ
1542 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
1543 * contains nodes which have zones of 0...ZONE_NORMAL,
1544 * set zone_last to ZONE_NORMAL.
d9713679 1545 *
6715ddf9
LJ
1546 * If we don't have HIGHMEM nor movable node,
1547 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
1548 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
d9713679 1549 */
6715ddf9 1550 if (N_MEMORY == N_NORMAL_MEMORY)
d9713679
LJ
1551 zone_last = ZONE_MOVABLE;
1552
1553 /*
1554 * check whether node_states[N_NORMAL_MEMORY] will be changed.
1555 * If the memory to be offline is in a zone of 0...zone_last,
1556 * and it is the last present memory, 0...zone_last will
1557 * become empty after offline , thus we can determind we will
1558 * need to clear the node from node_states[N_NORMAL_MEMORY].
1559 */
1560 for (zt = 0; zt <= zone_last; zt++)
1561 present_pages += pgdat->node_zones[zt].present_pages;
1562 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1563 arg->status_change_nid_normal = zone_to_nid(zone);
1564 else
1565 arg->status_change_nid_normal = -1;
1566
6715ddf9
LJ
1567#ifdef CONFIG_HIGHMEM
1568 /*
1569 * If we have movable node, node_states[N_HIGH_MEMORY]
1570 * contains nodes which have zones of 0...ZONE_HIGHMEM,
1571 * set zone_last to ZONE_HIGHMEM.
1572 *
1573 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
1574 * contains nodes which have zones of 0...ZONE_MOVABLE,
1575 * set zone_last to ZONE_MOVABLE.
1576 */
1577 zone_last = ZONE_HIGHMEM;
1578 if (N_MEMORY == N_HIGH_MEMORY)
1579 zone_last = ZONE_MOVABLE;
1580
1581 for (; zt <= zone_last; zt++)
1582 present_pages += pgdat->node_zones[zt].present_pages;
1583 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1584 arg->status_change_nid_high = zone_to_nid(zone);
1585 else
1586 arg->status_change_nid_high = -1;
1587#else
1588 arg->status_change_nid_high = arg->status_change_nid_normal;
1589#endif
1590
d9713679
LJ
1591 /*
1592 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1593 */
1594 zone_last = ZONE_MOVABLE;
1595
1596 /*
1597 * check whether node_states[N_HIGH_MEMORY] will be changed
1598 * If we try to offline the last present @nr_pages from the node,
1599 * we can determind we will need to clear the node from
1600 * node_states[N_HIGH_MEMORY].
1601 */
1602 for (; zt <= zone_last; zt++)
1603 present_pages += pgdat->node_zones[zt].present_pages;
1604 if (nr_pages >= present_pages)
1605 arg->status_change_nid = zone_to_nid(zone);
1606 else
1607 arg->status_change_nid = -1;
1608}
1609
1610static void node_states_clear_node(int node, struct memory_notify *arg)
1611{
1612 if (arg->status_change_nid_normal >= 0)
1613 node_clear_state(node, N_NORMAL_MEMORY);
1614
6715ddf9
LJ
1615 if ((N_MEMORY != N_NORMAL_MEMORY) &&
1616 (arg->status_change_nid_high >= 0))
d9713679 1617 node_clear_state(node, N_HIGH_MEMORY);
6715ddf9
LJ
1618
1619 if ((N_MEMORY != N_HIGH_MEMORY) &&
1620 (arg->status_change_nid >= 0))
1621 node_clear_state(node, N_MEMORY);
d9713679
LJ
1622}
1623
a16cee10 1624static int __ref __offline_pages(unsigned long start_pfn,
0c0e6195
KH
1625 unsigned long end_pfn, unsigned long timeout)
1626{
1627 unsigned long pfn, nr_pages, expire;
1628 long offlined_pages;
7b78d335 1629 int ret, drain, retry_max, node;
d702909f 1630 unsigned long flags;
a96dfddb 1631 unsigned long valid_start, valid_end;
0c0e6195 1632 struct zone *zone;
7b78d335 1633 struct memory_notify arg;
0c0e6195 1634
0c0e6195
KH
1635 /* at least, alignment against pageblock is necessary */
1636 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
1637 return -EINVAL;
1638 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
1639 return -EINVAL;
1640 /* This makes hotplug much easier...and readable.
1641 we assume this for now. .*/
a96dfddb 1642 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
0c0e6195 1643 return -EINVAL;
7b78d335 1644
a96dfddb 1645 zone = page_zone(pfn_to_page(valid_start));
7b78d335
YG
1646 node = zone_to_nid(zone);
1647 nr_pages = end_pfn - start_pfn;
1648
0c0e6195 1649 /* set above range as isolated */
b023f468
WC
1650 ret = start_isolate_page_range(start_pfn, end_pfn,
1651 MIGRATE_MOVABLE, true);
0c0e6195 1652 if (ret)
30467e0b 1653 return ret;
7b78d335
YG
1654
1655 arg.start_pfn = start_pfn;
1656 arg.nr_pages = nr_pages;
d9713679 1657 node_states_check_changes_offline(nr_pages, zone, &arg);
7b78d335
YG
1658
1659 ret = memory_notify(MEM_GOING_OFFLINE, &arg);
1660 ret = notifier_to_errno(ret);
1661 if (ret)
1662 goto failed_removal;
1663
0c0e6195
KH
1664 pfn = start_pfn;
1665 expire = jiffies + timeout;
1666 drain = 0;
1667 retry_max = 5;
1668repeat:
1669 /* start memory hot removal */
1670 ret = -EAGAIN;
1671 if (time_after(jiffies, expire))
1672 goto failed_removal;
1673 ret = -EINTR;
1674 if (signal_pending(current))
1675 goto failed_removal;
1676 ret = 0;
1677 if (drain) {
3f906ba2 1678 lru_add_drain_all_cpuslocked();
0c0e6195 1679 cond_resched();
c0554329 1680 drain_all_pages(zone);
0c0e6195
KH
1681 }
1682
c8721bbb
NH
1683 pfn = scan_movable_pages(start_pfn, end_pfn);
1684 if (pfn) { /* We have movable pages */
0c0e6195
KH
1685 ret = do_migrate_range(pfn, end_pfn);
1686 if (!ret) {
1687 drain = 1;
1688 goto repeat;
1689 } else {
1690 if (ret < 0)
1691 if (--retry_max == 0)
1692 goto failed_removal;
1693 yield();
1694 drain = 1;
1695 goto repeat;
1696 }
1697 }
b3834be5 1698 /* drain all zone's lru pagevec, this is asynchronous... */
3f906ba2 1699 lru_add_drain_all_cpuslocked();
0c0e6195 1700 yield();
b3834be5 1701 /* drain pcp pages, this is synchronous. */
c0554329 1702 drain_all_pages(zone);
c8721bbb
NH
1703 /*
1704 * dissolve free hugepages in the memory block before doing offlining
1705 * actually in order to make hugetlbfs's object counting consistent.
1706 */
082d5b6b
GS
1707 ret = dissolve_free_huge_pages(start_pfn, end_pfn);
1708 if (ret)
1709 goto failed_removal;
0c0e6195
KH
1710 /* check again */
1711 offlined_pages = check_pages_isolated(start_pfn, end_pfn);
1712 if (offlined_pages < 0) {
1713 ret = -EBUSY;
1714 goto failed_removal;
1715 }
e33e33b4 1716 pr_info("Offlined Pages %ld\n", offlined_pages);
b3834be5 1717 /* Ok, all of our target is isolated.
0c0e6195
KH
1718 We cannot do rollback at this point. */
1719 offline_isolated_pages(start_pfn, end_pfn);
dbc0e4ce 1720 /* reset pagetype flags and makes migrate type to be MOVABLE */
0815f3d8 1721 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
0c0e6195 1722 /* removal success */
3dcc0571 1723 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
0c0e6195 1724 zone->present_pages -= offlined_pages;
d702909f
CS
1725
1726 pgdat_resize_lock(zone->zone_pgdat, &flags);
0c0e6195 1727 zone->zone_pgdat->node_present_pages -= offlined_pages;
d702909f 1728 pgdat_resize_unlock(zone->zone_pgdat, &flags);
7b78d335 1729
1b79acc9
KM
1730 init_per_zone_wmark_min();
1731
1e8537ba 1732 if (!populated_zone(zone)) {
340175b7 1733 zone_pcp_reset(zone);
1e8537ba
XQ
1734 mutex_lock(&zonelists_mutex);
1735 build_all_zonelists(NULL, NULL);
1736 mutex_unlock(&zonelists_mutex);
1737 } else
1738 zone_pcp_update(zone);
340175b7 1739
d9713679 1740 node_states_clear_node(node, &arg);
698b1b30 1741 if (arg.status_change_nid >= 0) {
8fe23e05 1742 kswapd_stop(node);
698b1b30
VB
1743 kcompactd_stop(node);
1744 }
bce7394a 1745
0c0e6195
KH
1746 vm_total_pages = nr_free_pagecache_pages();
1747 writeback_set_ratelimit();
7b78d335
YG
1748
1749 memory_notify(MEM_OFFLINE, &arg);
0c0e6195
KH
1750 return 0;
1751
1752failed_removal:
e33e33b4
CY
1753 pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n",
1754 (unsigned long long) start_pfn << PAGE_SHIFT,
1755 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
7b78d335 1756 memory_notify(MEM_CANCEL_OFFLINE, &arg);
0c0e6195 1757 /* pushback to free area */
0815f3d8 1758 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
0c0e6195
KH
1759 return ret;
1760}
71088785 1761
30467e0b 1762/* Must be protected by mem_hotplug_begin() */
a16cee10
WC
1763int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1764{
1765 return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
1766}
e2ff3940 1767#endif /* CONFIG_MEMORY_HOTREMOVE */
a16cee10 1768
bbc76be6
WC
1769/**
1770 * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
1771 * @start_pfn: start pfn of the memory range
e05c4bbf 1772 * @end_pfn: end pfn of the memory range
bbc76be6
WC
1773 * @arg: argument passed to func
1774 * @func: callback for each memory section walked
1775 *
1776 * This function walks through all present mem sections in range
1777 * [start_pfn, end_pfn) and call func on each mem section.
1778 *
1779 * Returns the return value of func.
1780 */
e2ff3940 1781int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
bbc76be6 1782 void *arg, int (*func)(struct memory_block *, void *))
71088785 1783{
e90bdb7f
WC
1784 struct memory_block *mem = NULL;
1785 struct mem_section *section;
e90bdb7f
WC
1786 unsigned long pfn, section_nr;
1787 int ret;
e90bdb7f
WC
1788
1789 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1790 section_nr = pfn_to_section_nr(pfn);
1791 if (!present_section_nr(section_nr))
1792 continue;
1793
1794 section = __nr_to_section(section_nr);
1795 /* same memblock? */
1796 if (mem)
1797 if ((section_nr >= mem->start_section_nr) &&
1798 (section_nr <= mem->end_section_nr))
1799 continue;
1800
1801 mem = find_memory_block_hinted(section, mem);
1802 if (!mem)
1803 continue;
1804
bbc76be6 1805 ret = func(mem, arg);
e90bdb7f 1806 if (ret) {
bbc76be6
WC
1807 kobject_put(&mem->dev.kobj);
1808 return ret;
e90bdb7f
WC
1809 }
1810 }
1811
1812 if (mem)
1813 kobject_put(&mem->dev.kobj);
1814
bbc76be6
WC
1815 return 0;
1816}
1817
e2ff3940 1818#ifdef CONFIG_MEMORY_HOTREMOVE
d6de9d53 1819static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
bbc76be6
WC
1820{
1821 int ret = !is_memblock_offlined(mem);
1822
349daa0f
RD
1823 if (unlikely(ret)) {
1824 phys_addr_t beginpa, endpa;
1825
1826 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
1827 endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
756a025f 1828 pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
349daa0f
RD
1829 &beginpa, &endpa);
1830 }
bbc76be6
WC
1831
1832 return ret;
1833}
1834
0f1cfe9d 1835static int check_cpu_on_node(pg_data_t *pgdat)
60a5a19e 1836{
60a5a19e
TC
1837 int cpu;
1838
1839 for_each_present_cpu(cpu) {
1840 if (cpu_to_node(cpu) == pgdat->node_id)
1841 /*
1842 * the cpu on this node isn't removed, and we can't
1843 * offline this node.
1844 */
1845 return -EBUSY;
1846 }
1847
1848 return 0;
1849}
1850
0f1cfe9d 1851static void unmap_cpu_on_node(pg_data_t *pgdat)
e13fe869
WC
1852{
1853#ifdef CONFIG_ACPI_NUMA
e13fe869
WC
1854 int cpu;
1855
1856 for_each_possible_cpu(cpu)
1857 if (cpu_to_node(cpu) == pgdat->node_id)
1858 numa_clear_node(cpu);
1859#endif
1860}
1861
0f1cfe9d 1862static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
e13fe869 1863{
0f1cfe9d 1864 int ret;
e13fe869 1865
0f1cfe9d 1866 ret = check_cpu_on_node(pgdat);
e13fe869
WC
1867 if (ret)
1868 return ret;
1869
1870 /*
1871 * the node will be offlined when we come here, so we can clear
1872 * the cpu_to_node() now.
1873 */
1874
0f1cfe9d 1875 unmap_cpu_on_node(pgdat);
e13fe869
WC
1876 return 0;
1877}
1878
0f1cfe9d
TK
1879/**
1880 * try_offline_node
1881 *
1882 * Offline a node if all memory sections and cpus of the node are removed.
1883 *
1884 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1885 * and online/offline operations before this call.
1886 */
90b30cdc 1887void try_offline_node(int nid)
60a5a19e 1888{
d822b86a
WC
1889 pg_data_t *pgdat = NODE_DATA(nid);
1890 unsigned long start_pfn = pgdat->node_start_pfn;
1891 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
60a5a19e
TC
1892 unsigned long pfn;
1893
1894 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1895 unsigned long section_nr = pfn_to_section_nr(pfn);
1896
1897 if (!present_section_nr(section_nr))
1898 continue;
1899
1900 if (pfn_to_nid(pfn) != nid)
1901 continue;
1902
1903 /*
1904 * some memory sections of this node are not removed, and we
1905 * can't offline node now.
1906 */
1907 return;
1908 }
1909
0f1cfe9d 1910 if (check_and_unmap_cpu_on_node(pgdat))
60a5a19e
TC
1911 return;
1912
1913 /*
1914 * all memory/cpu of this node are removed, we can offline this
1915 * node now.
1916 */
1917 node_set_offline(nid);
1918 unregister_one_node(nid);
1919}
90b30cdc 1920EXPORT_SYMBOL(try_offline_node);
60a5a19e 1921
0f1cfe9d
TK
1922/**
1923 * remove_memory
1924 *
1925 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1926 * and online/offline operations before this call, as required by
1927 * try_offline_node().
1928 */
242831eb 1929void __ref remove_memory(int nid, u64 start, u64 size)
bbc76be6 1930{
242831eb 1931 int ret;
993c1aad 1932
27356f54
TK
1933 BUG_ON(check_hotplug_memory_range(start, size));
1934
bfc8c901 1935 mem_hotplug_begin();
6677e3ea
YI
1936
1937 /*
242831eb
RW
1938 * All memory blocks must be offlined before removing memory. Check
1939 * whether all memory blocks in question are offline and trigger a BUG()
1940 * if this is not the case.
6677e3ea 1941 */
242831eb 1942 ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
d6de9d53 1943 check_memblock_offlined_cb);
bfc8c901 1944 if (ret)
242831eb 1945 BUG();
6677e3ea 1946
46c66c4b
YI
1947 /* remove memmap entry */
1948 firmware_map_remove(start, start + size, "System RAM");
f9126ab9
XQ
1949 memblock_free(start, size);
1950 memblock_remove(start, size);
46c66c4b 1951
24d335ca
WC
1952 arch_remove_memory(start, size);
1953
60a5a19e
TC
1954 try_offline_node(nid);
1955
bfc8c901 1956 mem_hotplug_done();
71088785 1957}
71088785 1958EXPORT_SYMBOL_GPL(remove_memory);
aba6efc4 1959#endif /* CONFIG_MEMORY_HOTREMOVE */