]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/memory_hotplug.c
x86/msr: Add definitions for new speculation control MSRs
[mirror_ubuntu-artful-kernel.git] / mm / memory_hotplug.c
CommitLineData
3947be19
DH
1/*
2 * linux/mm/memory_hotplug.c
3 *
4 * Copyright (C)
5 */
6
3947be19
DH
7#include <linux/stddef.h>
8#include <linux/mm.h>
174cd4b1 9#include <linux/sched/signal.h>
3947be19
DH
10#include <linux/swap.h>
11#include <linux/interrupt.h>
12#include <linux/pagemap.h>
3947be19 13#include <linux/compiler.h>
b95f1b31 14#include <linux/export.h>
3947be19 15#include <linux/pagevec.h>
2d1d43f6 16#include <linux/writeback.h>
3947be19
DH
17#include <linux/slab.h>
18#include <linux/sysctl.h>
19#include <linux/cpu.h>
20#include <linux/memory.h>
4b94ffdc 21#include <linux/memremap.h>
3947be19
DH
22#include <linux/memory_hotplug.h>
23#include <linux/highmem.h>
24#include <linux/vmalloc.h>
0a547039 25#include <linux/ioport.h>
0c0e6195
KH
26#include <linux/delay.h>
27#include <linux/migrate.h>
28#include <linux/page-isolation.h>
71088785 29#include <linux/pfn.h>
6ad696d2 30#include <linux/suspend.h>
6d9c285a 31#include <linux/mm_inline.h>
d96ae530 32#include <linux/firmware-map.h>
60a5a19e 33#include <linux/stop_machine.h>
c8721bbb 34#include <linux/hugetlb.h>
c5320926 35#include <linux/memblock.h>
f784a3f1 36#include <linux/bootmem.h>
698b1b30 37#include <linux/compaction.h>
3947be19
DH
38
39#include <asm/tlbflush.h>
40
1e5ad9a3
AB
41#include "internal.h"
42
9d0ad8ca
DK
43/*
44 * online_page_callback contains pointer to current page onlining function.
45 * Initially it is generic_online_page(). If it is required it could be
46 * changed by calling set_online_page_callback() for callback registration
47 * and restore_online_page_callback() for generic callback restore.
48 */
49
50static void generic_online_page(struct page *page);
51
52static online_page_callback_t online_page_callback = generic_online_page;
bfc8c901 53static DEFINE_MUTEX(online_page_callback_lock);
9d0ad8ca 54
3f906ba2 55DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
bfc8c901 56
3f906ba2
TG
57void get_online_mems(void)
58{
59 percpu_down_read(&mem_hotplug_lock);
60}
bfc8c901 61
3f906ba2
TG
62void put_online_mems(void)
63{
64 percpu_up_read(&mem_hotplug_lock);
65}
bfc8c901 66
4932381e
MH
67bool movable_node_enabled = false;
68
8604d9e5 69#ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
31bc3858 70bool memhp_auto_online;
8604d9e5
VK
71#else
72bool memhp_auto_online = true;
73#endif
31bc3858
VK
74EXPORT_SYMBOL_GPL(memhp_auto_online);
75
86dd995d
VK
76static int __init setup_memhp_default_state(char *str)
77{
78 if (!strcmp(str, "online"))
79 memhp_auto_online = true;
80 else if (!strcmp(str, "offline"))
81 memhp_auto_online = false;
82
83 return 1;
84}
85__setup("memhp_default_state=", setup_memhp_default_state);
86
30467e0b 87void mem_hotplug_begin(void)
20d6c96b 88{
3f906ba2
TG
89 cpus_read_lock();
90 percpu_down_write(&mem_hotplug_lock);
20d6c96b
KM
91}
92
30467e0b 93void mem_hotplug_done(void)
bfc8c901 94{
3f906ba2
TG
95 percpu_up_write(&mem_hotplug_lock);
96 cpus_read_unlock();
bfc8c901 97}
20d6c96b 98
45e0b78b
KM
99/* add this memory to iomem resource */
100static struct resource *register_memory_resource(u64 start, u64 size)
101{
102 struct resource *res;
103 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
6f754ba4
VK
104 if (!res)
105 return ERR_PTR(-ENOMEM);
45e0b78b
KM
106
107 res->name = "System RAM";
108 res->start = start;
109 res->end = start + size - 1;
782b8664 110 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
45e0b78b 111 if (request_resource(&iomem_resource, res) < 0) {
4996eed8 112 pr_debug("System RAM resource %pR cannot be added\n", res);
45e0b78b 113 kfree(res);
6f754ba4 114 return ERR_PTR(-EEXIST);
45e0b78b
KM
115 }
116 return res;
117}
118
119static void release_memory_resource(struct resource *res)
120{
121 if (!res)
122 return;
123 release_resource(res);
124 kfree(res);
125 return;
126}
127
53947027 128#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
46723bfa
YI
129void get_page_bootmem(unsigned long info, struct page *page,
130 unsigned long type)
04753278 131{
ddffe98d 132 page->freelist = (void *)type;
04753278
YG
133 SetPagePrivate(page);
134 set_page_private(page, info);
fe896d18 135 page_ref_inc(page);
04753278
YG
136}
137
170a5a7e 138void put_page_bootmem(struct page *page)
04753278 139{
5f24ce5f 140 unsigned long type;
04753278 141
ddffe98d 142 type = (unsigned long) page->freelist;
5f24ce5f
AA
143 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
144 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
04753278 145
fe896d18 146 if (page_ref_dec_return(page) == 1) {
ddffe98d 147 page->freelist = NULL;
04753278
YG
148 ClearPagePrivate(page);
149 set_page_private(page, 0);
5f24ce5f 150 INIT_LIST_HEAD(&page->lru);
170a5a7e 151 free_reserved_page(page);
04753278 152 }
04753278
YG
153}
154
46723bfa
YI
155#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
156#ifndef CONFIG_SPARSEMEM_VMEMMAP
d92bc318 157static void register_page_bootmem_info_section(unsigned long start_pfn)
04753278
YG
158{
159 unsigned long *usemap, mapsize, section_nr, i;
160 struct mem_section *ms;
161 struct page *page, *memmap;
162
04753278
YG
163 section_nr = pfn_to_section_nr(start_pfn);
164 ms = __nr_to_section(section_nr);
165
166 /* Get section's memmap address */
167 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
168
169 /*
170 * Get page for the memmap's phys address
171 * XXX: need more consideration for sparse_vmemmap...
172 */
173 page = virt_to_page(memmap);
174 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
175 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
176
177 /* remember memmap's page */
178 for (i = 0; i < mapsize; i++, page++)
179 get_page_bootmem(section_nr, page, SECTION_INFO);
180
181 usemap = __nr_to_section(section_nr)->pageblock_flags;
182 page = virt_to_page(usemap);
183
184 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
185
186 for (i = 0; i < mapsize; i++, page++)
af370fb8 187 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
04753278
YG
188
189}
46723bfa
YI
190#else /* CONFIG_SPARSEMEM_VMEMMAP */
191static void register_page_bootmem_info_section(unsigned long start_pfn)
192{
193 unsigned long *usemap, mapsize, section_nr, i;
194 struct mem_section *ms;
195 struct page *page, *memmap;
196
197 if (!pfn_valid(start_pfn))
198 return;
199
200 section_nr = pfn_to_section_nr(start_pfn);
201 ms = __nr_to_section(section_nr);
202
203 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
204
205 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
206
207 usemap = __nr_to_section(section_nr)->pageblock_flags;
208 page = virt_to_page(usemap);
209
210 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
211
212 for (i = 0; i < mapsize; i++, page++)
213 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
214}
215#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
04753278 216
7ded384a 217void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
04753278
YG
218{
219 unsigned long i, pfn, end_pfn, nr_pages;
220 int node = pgdat->node_id;
221 struct page *page;
04753278
YG
222
223 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
224 page = virt_to_page(pgdat);
225
226 for (i = 0; i < nr_pages; i++, page++)
227 get_page_bootmem(node, page, NODE_INFO);
228
04753278 229 pfn = pgdat->node_start_pfn;
c1f19495 230 end_pfn = pgdat_end_pfn(pgdat);
04753278 231
7e9f5eb0 232 /* register section info */
f14851af 233 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
234 /*
235 * Some platforms can assign the same pfn to multiple nodes - on
236 * node0 as well as nodeN. To avoid registering a pfn against
237 * multiple nodes we check that this pfn does not already
7e9f5eb0 238 * reside in some other nodes.
f14851af 239 */
f65e91df 240 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
f14851af 241 register_page_bootmem_info_section(pfn);
242 }
04753278 243}
46723bfa 244#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
04753278 245
4fe85d5a
CIK
246static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn,
247 unsigned long end_pfn)
248{
249 unsigned long old_zone_end_pfn;
250
251 zone_span_writelock(zone);
252
253 old_zone_end_pfn = zone_end_pfn(zone);
254 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
255 zone->zone_start_pfn = start_pfn;
256
257 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
258 zone->zone_start_pfn;
259
260 zone_span_writeunlock(zone);
261}
262
263static void resize_zone(struct zone *zone, unsigned long start_pfn,
264 unsigned long end_pfn)
265{
266 zone_span_writelock(zone);
267
268 if (end_pfn - start_pfn) {
269 zone->zone_start_pfn = start_pfn;
270 zone->spanned_pages = end_pfn - start_pfn;
271 } else {
272 /*
273 * make it consist as free_area_init_core(),
274 * if spanned_pages = 0, then keep start_pfn = 0
275 */
276 zone->zone_start_pfn = 0;
277 zone->spanned_pages = 0;
278 }
279
280 zone_span_writeunlock(zone);
281}
282
283static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
284 unsigned long end_pfn)
285{
286 enum zone_type zid = zone_idx(zone);
287 int nid = zone->zone_pgdat->node_id;
288 unsigned long pfn;
289
290 for (pfn = start_pfn; pfn < end_pfn; pfn++)
291 set_page_links(pfn_to_page(pfn), zid, nid, pfn);
292}
293
294static void __ref ensure_zone_is_initialized(struct zone *zone,
295 unsigned long start_pfn, unsigned long num_pages)
296{
297 if (!zone_is_initialized(zone))
298 init_currently_empty_zone(zone, start_pfn, num_pages);
299}
300
301static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
302 unsigned long start_pfn, unsigned long end_pfn)
303{
304 unsigned long flags;
305 unsigned long z1_start_pfn;
306
307 ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn);
308
309 pgdat_resize_lock(z1->zone_pgdat, &flags);
310
311 /* can't move pfns which are higher than @z2 */
312 if (end_pfn > zone_end_pfn(z2))
313 goto out_fail;
314 /* the move out part must be at the left most of @z2 */
315 if (start_pfn > z2->zone_start_pfn)
316 goto out_fail;
317 /* must included/overlap */
318 if (end_pfn <= z2->zone_start_pfn)
319 goto out_fail;
320
321 /* use start_pfn for z1's start_pfn if z1 is empty */
322 if (!zone_is_empty(z1))
323 z1_start_pfn = z1->zone_start_pfn;
324 else
325 z1_start_pfn = start_pfn;
326
327 resize_zone(z1, z1_start_pfn, end_pfn);
328 resize_zone(z2, end_pfn, zone_end_pfn(z2));
329
330 pgdat_resize_unlock(z1->zone_pgdat, &flags);
331
332 fix_zone_id(z1, start_pfn, end_pfn);
333
334 return 0;
335out_fail:
336 pgdat_resize_unlock(z1->zone_pgdat, &flags);
337 return -1;
338}
339
340static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
341 unsigned long start_pfn, unsigned long end_pfn)
342{
343 unsigned long flags;
344 unsigned long z2_end_pfn;
345
346 ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn);
347
348 pgdat_resize_lock(z1->zone_pgdat, &flags);
349
350 /* can't move pfns which are lower than @z1 */
351 if (z1->zone_start_pfn > start_pfn)
352 goto out_fail;
353 /* the move out part mast at the right most of @z1 */
354 if (zone_end_pfn(z1) > end_pfn)
355 goto out_fail;
356 /* must included/overlap */
357 if (start_pfn >= zone_end_pfn(z1))
358 goto out_fail;
359
360 /* use end_pfn for z2's end_pfn if z2 is empty */
361 if (!zone_is_empty(z2))
362 z2_end_pfn = zone_end_pfn(z2);
363 else
364 z2_end_pfn = end_pfn;
365
366 resize_zone(z1, z1->zone_start_pfn, start_pfn);
367 resize_zone(z2, start_pfn, z2_end_pfn);
368
369 pgdat_resize_unlock(z1->zone_pgdat, &flags);
370
371 fix_zone_id(z2, start_pfn, end_pfn);
372
373 return 0;
374out_fail:
375 pgdat_resize_unlock(z1->zone_pgdat, &flags);
376 return -1;
377}
378
379static struct zone * __meminit move_pfn_range(int zone_shift,
380 unsigned long start_pfn, unsigned long end_pfn)
381{
382 struct zone *zone = page_zone(pfn_to_page(start_pfn));
383 int ret = 0;
384
385 if (zone_shift < 0)
386 ret = move_pfn_range_left(zone + zone_shift, zone,
387 start_pfn, end_pfn);
388 else if (zone_shift)
389 ret = move_pfn_range_right(zone, zone + zone_shift,
390 start_pfn, end_pfn);
391
392 if (ret)
393 return NULL;
394
395 return zone + zone_shift;
396}
397
398static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
399 unsigned long end_pfn)
400{
401 unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat);
402
403 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
404 pgdat->node_start_pfn = start_pfn;
405
406 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
407 pgdat->node_start_pfn;
408}
409
410static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
411{
412 struct pglist_data *pgdat = zone->zone_pgdat;
413 int nr_pages = PAGES_PER_SECTION;
414 int nid = pgdat->node_id;
415 int zone_type;
416 unsigned long flags, pfn;
417
418 zone_type = zone - pgdat->node_zones;
419 ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages);
420
421 pgdat_resize_lock(zone->zone_pgdat, &flags);
422 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
423 grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
424 phys_start_pfn + nr_pages);
425 pgdat_resize_unlock(zone->zone_pgdat, &flags);
426 memmap_init_zone(nr_pages, nid, zone_type,
427 phys_start_pfn, MEMMAP_HOTPLUG);
428
429 /* online_page_range is called later and expects pages reserved */
430 for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
431 if (!pfn_valid(pfn))
432 continue;
433
434 SetPageReserved(pfn_to_page(pfn));
435 }
436 return 0;
437}
438
439static int __meminit __add_section(int nid, struct zone *zone,
440 unsigned long phys_start_pfn, bool want_memblock)
3947be19 441{
3947be19
DH
442 int ret;
443
ebd15302
KH
444 if (pfn_valid(phys_start_pfn))
445 return -EEXIST;
446
4fe85d5a
CIK
447 ret = sparse_add_one_section(zone, phys_start_pfn);
448
3947be19
DH
449 if (ret < 0)
450 return ret;
451
4fe85d5a 452 ret = __add_zone(zone, phys_start_pfn);
718127cc 453
4fe85d5a
CIK
454 if (ret < 0)
455 return ret;
718127cc 456
1b862aec
MH
457 if (!want_memblock)
458 return 0;
459
c04fc586 460 return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
3947be19
DH
461}
462
4edd7cef
DR
463/*
464 * Reasonably generic function for adding memory. It is
465 * expected that archs that support memory hotplug will
466 * call this function after deciding the zone to which to
467 * add the new pages.
468 */
4fe85d5a 469int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
1b862aec 470 unsigned long nr_pages, bool want_memblock)
4edd7cef
DR
471{
472 unsigned long i;
473 int err = 0;
474 int start_sec, end_sec;
4b94ffdc
DW
475 struct vmem_altmap *altmap;
476
4fe85d5a
CIK
477 clear_zone_contiguous(zone);
478
4edd7cef
DR
479 /* during initialize mem_map, align hot-added range to section */
480 start_sec = pfn_to_section_nr(phys_start_pfn);
481 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
482
4b94ffdc
DW
483 altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
484 if (altmap) {
485 /*
486 * Validate altmap is within bounds of the total request
487 */
488 if (altmap->base_pfn != phys_start_pfn
489 || vmem_altmap_offset(altmap) > nr_pages) {
490 pr_warn_once("memory add fail, invalid altmap\n");
7cf91a98
JK
491 err = -EINVAL;
492 goto out;
4b94ffdc
DW
493 }
494 altmap->alloc = 0;
495 }
496
4edd7cef 497 for (i = start_sec; i <= end_sec; i++) {
4fe85d5a 498 err = __add_section(nid, zone, section_nr_to_pfn(i), want_memblock);
4edd7cef
DR
499
500 /*
501 * EEXIST is finally dealt with by ioresource collision
502 * check. see add_memory() => register_memory_resource()
503 * Warning will be printed if there is collision.
504 */
505 if (err && (err != -EEXIST))
506 break;
507 err = 0;
508 }
c435a390 509 vmemmap_populate_print_last();
7cf91a98 510out:
4fe85d5a 511 set_zone_contiguous(zone);
4edd7cef
DR
512 return err;
513}
514EXPORT_SYMBOL_GPL(__add_pages);
515
516#ifdef CONFIG_MEMORY_HOTREMOVE
815121d2
YI
517/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
518static int find_smallest_section_pfn(int nid, struct zone *zone,
519 unsigned long start_pfn,
520 unsigned long end_pfn)
521{
522 struct mem_section *ms;
523
524 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
525 ms = __pfn_to_section(start_pfn);
526
527 if (unlikely(!valid_section(ms)))
528 continue;
529
530 if (unlikely(pfn_to_nid(start_pfn) != nid))
531 continue;
532
533 if (zone && zone != page_zone(pfn_to_page(start_pfn)))
534 continue;
535
536 return start_pfn;
537 }
538
539 return 0;
540}
541
542/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
543static int find_biggest_section_pfn(int nid, struct zone *zone,
544 unsigned long start_pfn,
545 unsigned long end_pfn)
546{
547 struct mem_section *ms;
548 unsigned long pfn;
549
550 /* pfn is the end pfn of a memory section. */
551 pfn = end_pfn - 1;
552 for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
553 ms = __pfn_to_section(pfn);
554
555 if (unlikely(!valid_section(ms)))
556 continue;
557
558 if (unlikely(pfn_to_nid(pfn) != nid))
559 continue;
560
561 if (zone && zone != page_zone(pfn_to_page(pfn)))
562 continue;
563
564 return pfn;
565 }
566
567 return 0;
568}
569
570static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
571 unsigned long end_pfn)
572{
c33bc315
XQ
573 unsigned long zone_start_pfn = zone->zone_start_pfn;
574 unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
575 unsigned long zone_end_pfn = z;
815121d2
YI
576 unsigned long pfn;
577 struct mem_section *ms;
578 int nid = zone_to_nid(zone);
579
580 zone_span_writelock(zone);
581 if (zone_start_pfn == start_pfn) {
582 /*
583 * If the section is smallest section in the zone, it need
584 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
585 * In this case, we find second smallest valid mem_section
586 * for shrinking zone.
587 */
588 pfn = find_smallest_section_pfn(nid, zone, end_pfn,
589 zone_end_pfn);
590 if (pfn) {
591 zone->zone_start_pfn = pfn;
592 zone->spanned_pages = zone_end_pfn - pfn;
593 }
594 } else if (zone_end_pfn == end_pfn) {
595 /*
596 * If the section is biggest section in the zone, it need
597 * shrink zone->spanned_pages.
598 * In this case, we find second biggest valid mem_section for
599 * shrinking zone.
600 */
601 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
602 start_pfn);
603 if (pfn)
604 zone->spanned_pages = pfn - zone_start_pfn + 1;
605 }
606
607 /*
608 * The section is not biggest or smallest mem_section in the zone, it
609 * only creates a hole in the zone. So in this case, we need not
610 * change the zone. But perhaps, the zone has only hole data. Thus
611 * it check the zone has only hole or not.
612 */
613 pfn = zone_start_pfn;
614 for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
615 ms = __pfn_to_section(pfn);
616
617 if (unlikely(!valid_section(ms)))
618 continue;
619
620 if (page_zone(pfn_to_page(pfn)) != zone)
621 continue;
622
623 /* If the section is current section, it continues the loop */
624 if (start_pfn == pfn)
625 continue;
626
627 /* If we find valid section, we have nothing to do */
628 zone_span_writeunlock(zone);
629 return;
630 }
631
632 /* The zone has no valid section */
633 zone->zone_start_pfn = 0;
634 zone->spanned_pages = 0;
635 zone_span_writeunlock(zone);
636}
637
638static void shrink_pgdat_span(struct pglist_data *pgdat,
639 unsigned long start_pfn, unsigned long end_pfn)
640{
83285c72
XQ
641 unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
642 unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
643 unsigned long pgdat_end_pfn = p;
815121d2
YI
644 unsigned long pfn;
645 struct mem_section *ms;
646 int nid = pgdat->node_id;
647
648 if (pgdat_start_pfn == start_pfn) {
649 /*
650 * If the section is smallest section in the pgdat, it need
651 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
652 * In this case, we find second smallest valid mem_section
653 * for shrinking zone.
654 */
655 pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
656 pgdat_end_pfn);
657 if (pfn) {
658 pgdat->node_start_pfn = pfn;
659 pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
660 }
661 } else if (pgdat_end_pfn == end_pfn) {
662 /*
663 * If the section is biggest section in the pgdat, it need
664 * shrink pgdat->node_spanned_pages.
665 * In this case, we find second biggest valid mem_section for
666 * shrinking zone.
667 */
668 pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
669 start_pfn);
670 if (pfn)
671 pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
672 }
673
674 /*
675 * If the section is not biggest or smallest mem_section in the pgdat,
676 * it only creates a hole in the pgdat. So in this case, we need not
677 * change the pgdat.
678 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
679 * has only hole or not.
680 */
681 pfn = pgdat_start_pfn;
682 for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
683 ms = __pfn_to_section(pfn);
684
685 if (unlikely(!valid_section(ms)))
686 continue;
687
688 if (pfn_to_nid(pfn) != nid)
689 continue;
690
691 /* If the section is current section, it continues the loop */
692 if (start_pfn == pfn)
693 continue;
694
695 /* If we find valid section, we have nothing to do */
696 return;
697 }
698
699 /* The pgdat has no valid section */
700 pgdat->node_start_pfn = 0;
701 pgdat->node_spanned_pages = 0;
702}
703
704static void __remove_zone(struct zone *zone, unsigned long start_pfn)
705{
706 struct pglist_data *pgdat = zone->zone_pgdat;
707 int nr_pages = PAGES_PER_SECTION;
815121d2
YI
708 unsigned long flags;
709
815121d2
YI
710 pgdat_resize_lock(zone->zone_pgdat, &flags);
711 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
712 shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
713 pgdat_resize_unlock(zone->zone_pgdat, &flags);
714}
715
4b94ffdc
DW
716static int __remove_section(struct zone *zone, struct mem_section *ms,
717 unsigned long map_offset)
ea01ea93 718{
815121d2
YI
719 unsigned long start_pfn;
720 int scn_nr;
ea01ea93
BP
721 int ret = -EINVAL;
722
723 if (!valid_section(ms))
724 return ret;
725
726 ret = unregister_memory_section(ms);
727 if (ret)
728 return ret;
729
815121d2
YI
730 scn_nr = __section_nr(ms);
731 start_pfn = section_nr_to_pfn(scn_nr);
732 __remove_zone(zone, start_pfn);
733
4b94ffdc 734 sparse_remove_one_section(zone, ms, map_offset);
ea01ea93
BP
735 return 0;
736}
737
ea01ea93
BP
738/**
739 * __remove_pages() - remove sections of pages from a zone
740 * @zone: zone from which pages need to be removed
741 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
742 * @nr_pages: number of pages to remove (must be multiple of section size)
743 *
744 * Generic helper function to remove section mappings and sysfs entries
745 * for the section of the memory we are removing. Caller needs to make
746 * sure that pages are marked reserved and zones are adjust properly by
747 * calling offline_pages().
748 */
749int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
750 unsigned long nr_pages)
751{
fe74ebb1 752 unsigned long i;
4b94ffdc
DW
753 unsigned long map_offset = 0;
754 int sections_to_remove, ret = 0;
755
756 /* In the ZONE_DEVICE case device driver owns the memory region */
757 if (is_dev_zone(zone)) {
758 struct page *page = pfn_to_page(phys_start_pfn);
759 struct vmem_altmap *altmap;
760
761 altmap = to_vmem_altmap((unsigned long) page);
762 if (altmap)
763 map_offset = vmem_altmap_offset(altmap);
764 } else {
765 resource_size_t start, size;
766
767 start = phys_start_pfn << PAGE_SHIFT;
768 size = nr_pages * PAGE_SIZE;
769
770 ret = release_mem_region_adjustable(&iomem_resource, start,
771 size);
772 if (ret) {
773 resource_size_t endres = start + size - 1;
774
775 pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
776 &start, &endres, ret);
777 }
778 }
ea01ea93 779
7cf91a98
JK
780 clear_zone_contiguous(zone);
781
ea01ea93
BP
782 /*
783 * We can only remove entire sections
784 */
785 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
786 BUG_ON(nr_pages % PAGES_PER_SECTION);
787
ea01ea93
BP
788 sections_to_remove = nr_pages / PAGES_PER_SECTION;
789 for (i = 0; i < sections_to_remove; i++) {
790 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
4b94ffdc
DW
791
792 ret = __remove_section(zone, __pfn_to_section(pfn), map_offset);
793 map_offset = 0;
ea01ea93
BP
794 if (ret)
795 break;
796 }
7cf91a98
JK
797
798 set_zone_contiguous(zone);
799
ea01ea93
BP
800 return ret;
801}
4edd7cef 802#endif /* CONFIG_MEMORY_HOTREMOVE */
ea01ea93 803
9d0ad8ca
DK
804int set_online_page_callback(online_page_callback_t callback)
805{
806 int rc = -EINVAL;
807
bfc8c901
VD
808 get_online_mems();
809 mutex_lock(&online_page_callback_lock);
9d0ad8ca
DK
810
811 if (online_page_callback == generic_online_page) {
812 online_page_callback = callback;
813 rc = 0;
814 }
815
bfc8c901
VD
816 mutex_unlock(&online_page_callback_lock);
817 put_online_mems();
9d0ad8ca
DK
818
819 return rc;
820}
821EXPORT_SYMBOL_GPL(set_online_page_callback);
822
823int restore_online_page_callback(online_page_callback_t callback)
824{
825 int rc = -EINVAL;
826
bfc8c901
VD
827 get_online_mems();
828 mutex_lock(&online_page_callback_lock);
9d0ad8ca
DK
829
830 if (online_page_callback == callback) {
831 online_page_callback = generic_online_page;
832 rc = 0;
833 }
834
bfc8c901
VD
835 mutex_unlock(&online_page_callback_lock);
836 put_online_mems();
9d0ad8ca
DK
837
838 return rc;
839}
840EXPORT_SYMBOL_GPL(restore_online_page_callback);
841
842void __online_page_set_limits(struct page *page)
180c06ef 843{
9d0ad8ca
DK
844}
845EXPORT_SYMBOL_GPL(__online_page_set_limits);
846
847void __online_page_increment_counters(struct page *page)
848{
3dcc0571 849 adjust_managed_page_count(page, 1);
9d0ad8ca
DK
850}
851EXPORT_SYMBOL_GPL(__online_page_increment_counters);
180c06ef 852
9d0ad8ca
DK
853void __online_page_free(struct page *page)
854{
3dcc0571 855 __free_reserved_page(page);
180c06ef 856}
9d0ad8ca
DK
857EXPORT_SYMBOL_GPL(__online_page_free);
858
859static void generic_online_page(struct page *page)
860{
861 __online_page_set_limits(page);
862 __online_page_increment_counters(page);
863 __online_page_free(page);
864}
180c06ef 865
75884fb1
KH
866static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
867 void *arg)
3947be19
DH
868{
869 unsigned long i;
75884fb1
KH
870 unsigned long onlined_pages = *(unsigned long *)arg;
871 struct page *page;
2d070eab 872
75884fb1
KH
873 if (PageReserved(pfn_to_page(start_pfn)))
874 for (i = 0; i < nr_pages; i++) {
875 page = pfn_to_page(start_pfn + i);
9d0ad8ca 876 (*online_page_callback)(page);
75884fb1
KH
877 onlined_pages++;
878 }
2d070eab
MH
879
880 online_mem_sections(start_pfn, start_pfn + nr_pages);
881
75884fb1
KH
882 *(unsigned long *)arg = onlined_pages;
883 return 0;
884}
885
d9713679
LJ
886/* check which state of node_states will be changed when online memory */
887static void node_states_check_changes_online(unsigned long nr_pages,
888 struct zone *zone, struct memory_notify *arg)
889{
890 int nid = zone_to_nid(zone);
891 enum zone_type zone_last = ZONE_NORMAL;
892
893 /*
6715ddf9
LJ
894 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
895 * contains nodes which have zones of 0...ZONE_NORMAL,
896 * set zone_last to ZONE_NORMAL.
d9713679 897 *
6715ddf9
LJ
898 * If we don't have HIGHMEM nor movable node,
899 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
900 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
d9713679 901 */
6715ddf9 902 if (N_MEMORY == N_NORMAL_MEMORY)
d9713679
LJ
903 zone_last = ZONE_MOVABLE;
904
905 /*
906 * if the memory to be online is in a zone of 0...zone_last, and
907 * the zones of 0...zone_last don't have memory before online, we will
908 * need to set the node to node_states[N_NORMAL_MEMORY] after
909 * the memory is online.
910 */
911 if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
912 arg->status_change_nid_normal = nid;
913 else
914 arg->status_change_nid_normal = -1;
915
6715ddf9
LJ
916#ifdef CONFIG_HIGHMEM
917 /*
918 * If we have movable node, node_states[N_HIGH_MEMORY]
919 * contains nodes which have zones of 0...ZONE_HIGHMEM,
920 * set zone_last to ZONE_HIGHMEM.
921 *
922 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
923 * contains nodes which have zones of 0...ZONE_MOVABLE,
924 * set zone_last to ZONE_MOVABLE.
925 */
926 zone_last = ZONE_HIGHMEM;
927 if (N_MEMORY == N_HIGH_MEMORY)
928 zone_last = ZONE_MOVABLE;
929
930 if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
931 arg->status_change_nid_high = nid;
932 else
933 arg->status_change_nid_high = -1;
934#else
935 arg->status_change_nid_high = arg->status_change_nid_normal;
936#endif
937
d9713679
LJ
938 /*
939 * if the node don't have memory befor online, we will need to
6715ddf9 940 * set the node to node_states[N_MEMORY] after the memory
d9713679
LJ
941 * is online.
942 */
6715ddf9 943 if (!node_state(nid, N_MEMORY))
d9713679
LJ
944 arg->status_change_nid = nid;
945 else
946 arg->status_change_nid = -1;
947}
948
949static void node_states_set_node(int node, struct memory_notify *arg)
950{
951 if (arg->status_change_nid_normal >= 0)
952 node_set_state(node, N_NORMAL_MEMORY);
953
6715ddf9
LJ
954 if (arg->status_change_nid_high >= 0)
955 node_set_state(node, N_HIGH_MEMORY);
956
957 node_set_state(node, N_MEMORY);
d9713679
LJ
958}
959
4fe85d5a
CIK
960bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
961 enum zone_type target, int *zone_shift)
f1dd2cd1 962{
4fe85d5a
CIK
963 struct zone *zone = page_zone(pfn_to_page(pfn));
964 enum zone_type idx = zone_idx(zone);
965 int i;
f1dd2cd1 966
4fe85d5a 967 *zone_shift = 0;
c246a213 968
4fe85d5a
CIK
969 if (idx < target) {
970 /* pages must be at end of current zone */
971 if (pfn + nr_pages != zone_end_pfn(zone))
972 return false;
c246a213 973
4fe85d5a
CIK
974 /* no zones in use between current zone and target */
975 for (i = idx + 1; i < target; i++)
976 if (zone_is_initialized(zone - idx + i))
977 return false;
c246a213
MH
978 }
979
4fe85d5a
CIK
980 if (target < idx) {
981 /* pages must be at beginning of current zone */
982 if (pfn != zone->zone_start_pfn)
983 return false;
f1dd2cd1 984
4fe85d5a
CIK
985 /* no zones in use between current zone and target */
986 for (i = target + 1; i < idx; i++)
987 if (zone_is_initialized(zone - idx + i))
988 return false;
df429ac0
RA
989 }
990
4fe85d5a
CIK
991 *zone_shift = target - idx;
992 return true;
df429ac0 993}
75884fb1 994
30467e0b 995/* Must be protected by mem_hotplug_begin() */
511c2aba 996int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
75884fb1 997{
aa47228a 998 unsigned long flags;
3947be19
DH
999 unsigned long onlined_pages = 0;
1000 struct zone *zone;
6811378e 1001 int need_zonelists_rebuild = 0;
7b78d335
YG
1002 int nid;
1003 int ret;
1004 struct memory_notify arg;
4fe85d5a 1005 int zone_shift = 0;
7b78d335 1006
4fe85d5a
CIK
1007 if (online_type == MMOP_ONLINE_KERNEL) {
1008 if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
1009 return -EINVAL;
1010 } else if (online_type == MMOP_ONLINE_MOVABLE) {
1011 if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
1012 return -EINVAL;
1013 }
1014
1015 zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
1016 if (!zone)
1017 return -EINVAL;
f1dd2cd1 1018
7b78d335
YG
1019 arg.start_pfn = pfn;
1020 arg.nr_pages = nr_pages;
d9713679 1021 node_states_check_changes_online(nr_pages, zone, &arg);
7b78d335 1022
4fe85d5a
CIK
1023 nid = zone_to_nid(zone);
1024
7b78d335
YG
1025 ret = memory_notify(MEM_GOING_ONLINE, &arg);
1026 ret = notifier_to_errno(ret);
e33e33b4
CY
1027 if (ret)
1028 goto failed_addition;
1029
6811378e
YG
1030 /*
1031 * If this zone is not populated, then it is not in zonelist.
1032 * This means the page allocator ignores this zone.
1033 * So, zonelist must be updated after online.
1034 */
4eaf3f64 1035 mutex_lock(&zonelists_mutex);
6dcd73d7 1036 if (!populated_zone(zone)) {
6811378e 1037 need_zonelists_rebuild = 1;
6dcd73d7
WC
1038 build_all_zonelists(NULL, zone);
1039 }
6811378e 1040
908eedc6 1041 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
75884fb1 1042 online_pages_range);
fd8a4221 1043 if (ret) {
6dcd73d7
WC
1044 if (need_zonelists_rebuild)
1045 zone_pcp_reset(zone);
4eaf3f64 1046 mutex_unlock(&zonelists_mutex);
e33e33b4 1047 goto failed_addition;
fd8a4221
GL
1048 }
1049
3947be19 1050 zone->present_pages += onlined_pages;
aa47228a
CS
1051
1052 pgdat_resize_lock(zone->zone_pgdat, &flags);
f2937be5 1053 zone->zone_pgdat->node_present_pages += onlined_pages;
aa47228a
CS
1054 pgdat_resize_unlock(zone->zone_pgdat, &flags);
1055
08dff7b7 1056 if (onlined_pages) {
e888ca35 1057 node_states_set_node(nid, &arg);
08dff7b7 1058 if (need_zonelists_rebuild)
6dcd73d7 1059 build_all_zonelists(NULL, NULL);
08dff7b7
JL
1060 else
1061 zone_pcp_update(zone);
1062 }
3947be19 1063
4eaf3f64 1064 mutex_unlock(&zonelists_mutex);
1b79acc9
KM
1065
1066 init_per_zone_wmark_min();
1067
698b1b30 1068 if (onlined_pages) {
e888ca35 1069 kswapd_run(nid);
698b1b30
VB
1070 kcompactd_run(nid);
1071 }
61b13993 1072
1f522509 1073 vm_total_pages = nr_free_pagecache_pages();
2f7f24ec 1074
2d1d43f6 1075 writeback_set_ratelimit();
7b78d335
YG
1076
1077 if (onlined_pages)
1078 memory_notify(MEM_ONLINE, &arg);
30467e0b 1079 return 0;
e33e33b4
CY
1080
1081failed_addition:
1082 pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
1083 (unsigned long long) pfn << PAGE_SHIFT,
1084 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
1085 memory_notify(MEM_CANCEL_ONLINE, &arg);
1086 return ret;
3947be19 1087}
53947027 1088#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
bc02af93 1089
0bd85420
TC
1090static void reset_node_present_pages(pg_data_t *pgdat)
1091{
1092 struct zone *z;
1093
1094 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1095 z->present_pages = 0;
1096
1097 pgdat->node_present_pages = 0;
1098}
1099
e1319331
HS
1100/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1101static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
9af3c2de
YG
1102{
1103 struct pglist_data *pgdat;
1104 unsigned long zones_size[MAX_NR_ZONES] = {0};
1105 unsigned long zholes_size[MAX_NR_ZONES] = {0};
c8e861a5 1106 unsigned long start_pfn = PFN_DOWN(start);
9af3c2de 1107
a1e565aa
TC
1108 pgdat = NODE_DATA(nid);
1109 if (!pgdat) {
1110 pgdat = arch_alloc_nodedata(nid);
1111 if (!pgdat)
1112 return NULL;
9af3c2de 1113
a1e565aa 1114 arch_refresh_nodedata(nid, pgdat);
b0dc3a34 1115 } else {
e716f2eb
MG
1116 /*
1117 * Reset the nr_zones, order and classzone_idx before reuse.
1118 * Note that kswapd will init kswapd_classzone_idx properly
1119 * when it starts in the near future.
1120 */
b0dc3a34 1121 pgdat->nr_zones = 0;
38087d9b
MG
1122 pgdat->kswapd_order = 0;
1123 pgdat->kswapd_classzone_idx = 0;
a1e565aa 1124 }
9af3c2de
YG
1125
1126 /* we can use NODE_DATA(nid) from here */
1127
1128 /* init node's zones as empty zones, we don't have any present pages.*/
9109fb7b 1129 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
5830169f 1130 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
9af3c2de 1131
959ecc48
KH
1132 /*
1133 * The node we allocated has no zone fallback lists. For avoiding
1134 * to access not-initialized zonelist, build here.
1135 */
f957db4f 1136 mutex_lock(&zonelists_mutex);
9adb62a5 1137 build_all_zonelists(pgdat, NULL);
f957db4f 1138 mutex_unlock(&zonelists_mutex);
959ecc48 1139
f784a3f1
TC
1140 /*
1141 * zone->managed_pages is set to an approximate value in
1142 * free_area_init_core(), which will cause
1143 * /sys/device/system/node/nodeX/meminfo has wrong data.
1144 * So reset it to 0 before any memory is onlined.
1145 */
1146 reset_node_managed_pages(pgdat);
1147
0bd85420
TC
1148 /*
1149 * When memory is hot-added, all the memory is in offline state. So
1150 * clear all zones' present_pages because they will be updated in
1151 * online_pages() and offline_pages().
1152 */
1153 reset_node_present_pages(pgdat);
1154
9af3c2de
YG
1155 return pgdat;
1156}
1157
1158static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
1159{
1160 arch_refresh_nodedata(nid, NULL);
5830169f 1161 free_percpu(pgdat->per_cpu_nodestats);
9af3c2de
YG
1162 arch_free_nodedata(pgdat);
1163 return;
1164}
1165
0a547039 1166
01b0f197
TK
1167/**
1168 * try_online_node - online a node if offlined
1169 *
cf23422b 1170 * called by cpu_up() to online a node without onlined memory.
1171 */
01b0f197 1172int try_online_node(int nid)
cf23422b 1173{
1174 pg_data_t *pgdat;
1175 int ret;
1176
01b0f197
TK
1177 if (node_online(nid))
1178 return 0;
1179
bfc8c901 1180 mem_hotplug_begin();
cf23422b 1181 pgdat = hotadd_new_pgdat(nid, 0);
7553e8f2 1182 if (!pgdat) {
01b0f197 1183 pr_err("Cannot online node %d due to NULL pgdat\n", nid);
cf23422b 1184 ret = -ENOMEM;
1185 goto out;
1186 }
1187 node_set_online(nid);
1188 ret = register_one_node(nid);
1189 BUG_ON(ret);
1190
01b0f197
TK
1191 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
1192 mutex_lock(&zonelists_mutex);
1193 build_all_zonelists(NULL, NULL);
1194 mutex_unlock(&zonelists_mutex);
1195 }
1196
cf23422b 1197out:
bfc8c901 1198 mem_hotplug_done();
cf23422b 1199 return ret;
1200}
1201
27356f54
TK
1202static int check_hotplug_memory_range(u64 start, u64 size)
1203{
c8e861a5 1204 u64 start_pfn = PFN_DOWN(start);
27356f54
TK
1205 u64 nr_pages = size >> PAGE_SHIFT;
1206
1207 /* Memory range must be aligned with section */
1208 if ((start_pfn & ~PAGE_SECTION_MASK) ||
1209 (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
1210 pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
1211 (unsigned long long)start,
1212 (unsigned long long)size);
1213 return -EINVAL;
1214 }
1215
1216 return 0;
1217}
1218
4fe85d5a
CIK
1219/*
1220 * If movable zone has already been setup, newly added memory should be check.
1221 * If its address is higher than movable zone, it should be added as movable.
1222 * Without this check, movable zone may overlap with other zone.
1223 */
1224static int should_add_memory_movable(int nid, u64 start, u64 size)
1225{
1226 unsigned long start_pfn = start >> PAGE_SHIFT;
1227 pg_data_t *pgdat = NODE_DATA(nid);
1228 struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE;
1229
1230 if (zone_is_empty(movable_zone))
1231 return 0;
1232
1233 if (movable_zone->zone_start_pfn <= start_pfn)
1234 return 1;
1235
1236 return 0;
1237}
1238
1239int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
1240 bool for_device)
1241{
1242#ifdef CONFIG_ZONE_DEVICE
1243 if (for_device)
1244 return ZONE_DEVICE;
1245#endif
1246 if (should_add_memory_movable(nid, start, size))
1247 return ZONE_MOVABLE;
1248
1249 return zone_default;
1250}
1251
31bc3858
VK
1252static int online_memory_block(struct memory_block *mem, void *arg)
1253{
dc18d706 1254 return device_online(&mem->dev);
31bc3858
VK
1255}
1256
31168481 1257/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
31bc3858 1258int __ref add_memory_resource(int nid, struct resource *res, bool online)
bc02af93 1259{
62cedb9f 1260 u64 start, size;
9af3c2de 1261 pg_data_t *pgdat = NULL;
a1e565aa
TC
1262 bool new_pgdat;
1263 bool new_node;
bc02af93
YG
1264 int ret;
1265
62cedb9f
DV
1266 start = res->start;
1267 size = resource_size(res);
1268
27356f54
TK
1269 ret = check_hotplug_memory_range(start, size);
1270 if (ret)
1271 return ret;
1272
a1e565aa
TC
1273 { /* Stupid hack to suppress address-never-null warning */
1274 void *p = NODE_DATA(nid);
1275 new_pgdat = !p;
1276 }
ac13c462 1277
bfc8c901 1278 mem_hotplug_begin();
ac13c462 1279
7f36e3e5
TC
1280 /*
1281 * Add new range to memblock so that when hotadd_new_pgdat() is called
1282 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
1283 * this new range and calculate total pages correctly. The range will
1284 * be removed at hot-remove time.
1285 */
1286 memblock_add_node(start, size, nid);
1287
a1e565aa
TC
1288 new_node = !node_online(nid);
1289 if (new_node) {
9af3c2de 1290 pgdat = hotadd_new_pgdat(nid, start);
6ad696d2 1291 ret = -ENOMEM;
9af3c2de 1292 if (!pgdat)
41b9e2d7 1293 goto error;
9af3c2de
YG
1294 }
1295
bc02af93 1296 /* call arch's memory hotadd */
715d4807 1297 ret = arch_add_memory(nid, start, size, false);
bc02af93 1298
9af3c2de
YG
1299 if (ret < 0)
1300 goto error;
1301
0fc44159 1302 /* we online node here. we can't roll back from here. */
9af3c2de
YG
1303 node_set_online(nid);
1304
a1e565aa 1305 if (new_node) {
9037a993
MH
1306 unsigned long start_pfn = start >> PAGE_SHIFT;
1307 unsigned long nr_pages = size >> PAGE_SHIFT;
1308
1309 ret = __register_one_node(nid);
1310 if (ret)
1311 goto register_fail;
1312
1313 /*
1314 * link memory sections under this node. This is already
1315 * done when creatig memory section in register_new_memory
1316 * but that depends to have the node registered so offline
1317 * nodes have to go through register_node.
1318 * TODO clean up this mess.
1319 */
1320 ret = link_mem_sections(nid, start_pfn, nr_pages);
1321register_fail:
0fc44159
YG
1322 /*
1323 * If sysfs file of new node can't create, cpu on the node
1324 * can't be hot-added. There is no rollback way now.
1325 * So, check by BUG_ON() to catch it reluctantly..
1326 */
1327 BUG_ON(ret);
1328 }
1329
d96ae530
AM
1330 /* create new memmap entry */
1331 firmware_map_add_hotplug(start, start + size, "System RAM");
1332
31bc3858
VK
1333 /* online pages if requested */
1334 if (online)
1335 walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
1336 NULL, online_memory_block);
1337
6ad696d2
AK
1338 goto out;
1339
9af3c2de
YG
1340error:
1341 /* rollback pgdat allocation and others */
dbac61a3 1342 if (new_pgdat && pgdat)
9af3c2de 1343 rollback_node_hotadd(nid, pgdat);
7f36e3e5 1344 memblock_remove(start, size);
9af3c2de 1345
6ad696d2 1346out:
bfc8c901 1347 mem_hotplug_done();
bc02af93
YG
1348 return ret;
1349}
62cedb9f
DV
1350EXPORT_SYMBOL_GPL(add_memory_resource);
1351
1352int __ref add_memory(int nid, u64 start, u64 size)
1353{
1354 struct resource *res;
1355 int ret;
1356
1357 res = register_memory_resource(start, size);
6f754ba4
VK
1358 if (IS_ERR(res))
1359 return PTR_ERR(res);
62cedb9f 1360
31bc3858 1361 ret = add_memory_resource(nid, res, memhp_auto_online);
62cedb9f
DV
1362 if (ret < 0)
1363 release_memory_resource(res);
1364 return ret;
1365}
bc02af93 1366EXPORT_SYMBOL_GPL(add_memory);
0c0e6195
KH
1367
1368#ifdef CONFIG_MEMORY_HOTREMOVE
5c755e9f
BP
1369/*
1370 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
1371 * set and the size of the free page is given by page_order(). Using this,
1372 * the function determines if the pageblock contains only free pages.
1373 * Due to buddy contraints, a free page at least the size of a pageblock will
1374 * be located at the start of the pageblock
1375 */
1376static inline int pageblock_free(struct page *page)
1377{
1378 return PageBuddy(page) && page_order(page) >= pageblock_order;
1379}
1380
1381/* Return the start of the next active pageblock after a given page */
1382static struct page *next_active_pageblock(struct page *page)
1383{
5c755e9f
BP
1384 /* Ensure the starting page is pageblock-aligned */
1385 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
1386
5c755e9f 1387 /* If the entire pageblock is free, move to the end of free page */
0dcc48c1
KH
1388 if (pageblock_free(page)) {
1389 int order;
1390 /* be careful. we don't have locks, page_order can be changed.*/
1391 order = page_order(page);
1392 if ((order < MAX_ORDER) && (order >= pageblock_order))
1393 return page + (1 << order);
1394 }
5c755e9f 1395
0dcc48c1 1396 return page + pageblock_nr_pages;
5c755e9f
BP
1397}
1398
1399/* Checks if this range of memory is likely to be hot-removable. */
c98940f6 1400bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
5c755e9f 1401{
5c755e9f
BP
1402 struct page *page = pfn_to_page(start_pfn);
1403 struct page *end_page = page + nr_pages;
1404
1405 /* Check the starting page of each pageblock within the range */
1406 for (; page < end_page; page = next_active_pageblock(page)) {
49ac8255 1407 if (!is_pageblock_removable_nolock(page))
c98940f6 1408 return false;
49ac8255 1409 cond_resched();
5c755e9f
BP
1410 }
1411
1412 /* All pageblocks in the memory block are likely to be hot-removable */
c98940f6 1413 return true;
5c755e9f
BP
1414}
1415
0c0e6195 1416/*
deb88a2a 1417 * Confirm all pages in a range [start, end) belong to the same zone.
a96dfddb 1418 * When true, return its valid [start, end).
0c0e6195 1419 */
a96dfddb
TK
1420int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1421 unsigned long *valid_start, unsigned long *valid_end)
0c0e6195 1422{
5f0f2887 1423 unsigned long pfn, sec_end_pfn;
a96dfddb 1424 unsigned long start, end;
0c0e6195
KH
1425 struct zone *zone = NULL;
1426 struct page *page;
1427 int i;
deb88a2a 1428 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
0c0e6195 1429 pfn < end_pfn;
deb88a2a 1430 pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
5f0f2887
AB
1431 /* Make sure the memory section is present first */
1432 if (!present_section_nr(pfn_to_section_nr(pfn)))
0c0e6195 1433 continue;
5f0f2887
AB
1434 for (; pfn < sec_end_pfn && pfn < end_pfn;
1435 pfn += MAX_ORDER_NR_PAGES) {
1436 i = 0;
1437 /* This is just a CONFIG_HOLES_IN_ZONE check.*/
1438 while ((i < MAX_ORDER_NR_PAGES) &&
1439 !pfn_valid_within(pfn + i))
1440 i++;
d6d8c8a4 1441 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
5f0f2887
AB
1442 continue;
1443 page = pfn_to_page(pfn + i);
1444 if (zone && page_zone(page) != zone)
1445 return 0;
a96dfddb
TK
1446 if (!zone)
1447 start = pfn + i;
5f0f2887 1448 zone = page_zone(page);
a96dfddb 1449 end = pfn + MAX_ORDER_NR_PAGES;
5f0f2887 1450 }
0c0e6195 1451 }
deb88a2a 1452
a96dfddb
TK
1453 if (zone) {
1454 *valid_start = start;
d6d8c8a4 1455 *valid_end = min(end, end_pfn);
deb88a2a 1456 return 1;
a96dfddb 1457 } else {
deb88a2a 1458 return 0;
a96dfddb 1459 }
0c0e6195
KH
1460}
1461
1462/*
0efadf48
YX
1463 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
1464 * non-lru movable pages and hugepages). We scan pfn because it's much
1465 * easier than scanning over linked list. This function returns the pfn
1466 * of the first found movable page if it's found, otherwise 0.
0c0e6195 1467 */
c8721bbb 1468static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
0c0e6195
KH
1469{
1470 unsigned long pfn;
1471 struct page *page;
1472 for (pfn = start; pfn < end; pfn++) {
1473 if (pfn_valid(pfn)) {
1474 page = pfn_to_page(pfn);
1475 if (PageLRU(page))
1476 return pfn;
0efadf48
YX
1477 if (__PageMovable(page))
1478 return pfn;
c8721bbb 1479 if (PageHuge(page)) {
7e1f049e 1480 if (page_huge_active(page))
c8721bbb
NH
1481 return pfn;
1482 else
1483 pfn = round_up(pfn + 1,
1484 1 << compound_order(page)) - 1;
1485 }
0c0e6195
KH
1486 }
1487 }
1488 return 0;
1489}
1490
394e31d2
XQ
1491static struct page *new_node_page(struct page *page, unsigned long private,
1492 int **result)
1493{
394e31d2 1494 int nid = page_to_nid(page);
231e97e2 1495 nodemask_t nmask = node_states[N_MEMORY];
7f252f27
MH
1496
1497 /*
1498 * try to allocate from a different node but reuse this node if there
1499 * are no other online nodes to be used (e.g. we are offlining a part
1500 * of the only existing node)
1501 */
1502 node_clear(nid, nmask);
1503 if (nodes_empty(nmask))
1504 node_set(nid, nmask);
394e31d2 1505
8b913238 1506 return new_page_nodemask(page, nid, &nmask);
394e31d2
XQ
1507}
1508
0c0e6195
KH
1509#define NR_OFFLINE_AT_ONCE_PAGES (256)
1510static int
1511do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1512{
1513 unsigned long pfn;
1514 struct page *page;
1515 int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
1516 int not_managed = 0;
1517 int ret = 0;
1518 LIST_HEAD(source);
1519
1520 for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
1521 if (!pfn_valid(pfn))
1522 continue;
1523 page = pfn_to_page(pfn);
c8721bbb
NH
1524
1525 if (PageHuge(page)) {
1526 struct page *head = compound_head(page);
1527 pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
1528 if (compound_order(head) > PFN_SECTION_SHIFT) {
1529 ret = -EBUSY;
1530 break;
1531 }
1532 if (isolate_huge_page(page, &source))
1533 move_pages -= 1 << compound_order(head);
1534 continue;
1535 }
1536
700c2a46 1537 if (!get_page_unless_zero(page))
0c0e6195
KH
1538 continue;
1539 /*
0efadf48
YX
1540 * We can skip free pages. And we can deal with pages on
1541 * LRU and non-lru movable pages.
0c0e6195 1542 */
0efadf48
YX
1543 if (PageLRU(page))
1544 ret = isolate_lru_page(page);
1545 else
1546 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
0c0e6195 1547 if (!ret) { /* Success */
700c2a46 1548 put_page(page);
62695a84 1549 list_add_tail(&page->lru, &source);
0c0e6195 1550 move_pages--;
0efadf48
YX
1551 if (!__PageMovable(page))
1552 inc_node_page_state(page, NR_ISOLATED_ANON +
1553 page_is_file_cache(page));
6d9c285a 1554
0c0e6195 1555 } else {
0c0e6195 1556#ifdef CONFIG_DEBUG_VM
0efadf48
YX
1557 pr_alert("failed to isolate pfn %lx\n", pfn);
1558 dump_page(page, "isolation failed");
0c0e6195 1559#endif
700c2a46 1560 put_page(page);
25985edc 1561 /* Because we don't have big zone->lock. we should
809c4449
BL
1562 check this again here. */
1563 if (page_count(page)) {
1564 not_managed++;
f3ab2636 1565 ret = -EBUSY;
809c4449
BL
1566 break;
1567 }
0c0e6195
KH
1568 }
1569 }
f3ab2636
BL
1570 if (!list_empty(&source)) {
1571 if (not_managed) {
c8721bbb 1572 putback_movable_pages(&source);
f3ab2636
BL
1573 goto out;
1574 }
74c08f98 1575
394e31d2
XQ
1576 /* Allocate a new page from the nearest neighbor node */
1577 ret = migrate_pages(&source, new_node_page, NULL, 0,
9c620e2b 1578 MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
f3ab2636 1579 if (ret)
c8721bbb 1580 putback_movable_pages(&source);
0c0e6195 1581 }
0c0e6195
KH
1582out:
1583 return ret;
1584}
1585
1586/*
1587 * remove from free_area[] and mark all as Reserved.
1588 */
1589static int
1590offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
1591 void *data)
1592{
1593 __offline_isolated_pages(start, start + nr_pages);
1594 return 0;
1595}
1596
1597static void
1598offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
1599{
908eedc6 1600 walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
0c0e6195
KH
1601 offline_isolated_pages_cb);
1602}
1603
1604/*
1605 * Check all pages in range, recoreded as memory resource, are isolated.
1606 */
1607static int
1608check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
1609 void *data)
1610{
1611 int ret;
1612 long offlined = *(long *)data;
b023f468 1613 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
0c0e6195
KH
1614 offlined = nr_pages;
1615 if (!ret)
1616 *(long *)data += offlined;
1617 return ret;
1618}
1619
1620static long
1621check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
1622{
1623 long offlined = 0;
1624 int ret;
1625
908eedc6 1626 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
0c0e6195
KH
1627 check_pages_isolated_cb);
1628 if (ret < 0)
1629 offlined = (long)ret;
1630 return offlined;
1631}
1632
c5320926
TC
1633static int __init cmdline_parse_movable_node(char *p)
1634{
4932381e 1635#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
55ac590c 1636 movable_node_enabled = true;
4932381e
MH
1637#else
1638 pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n");
1639#endif
c5320926
TC
1640 return 0;
1641}
1642early_param("movable_node", cmdline_parse_movable_node);
1643
d9713679
LJ
1644/* check which state of node_states will be changed when offline memory */
1645static void node_states_check_changes_offline(unsigned long nr_pages,
1646 struct zone *zone, struct memory_notify *arg)
1647{
1648 struct pglist_data *pgdat = zone->zone_pgdat;
1649 unsigned long present_pages = 0;
1650 enum zone_type zt, zone_last = ZONE_NORMAL;
1651
1652 /*
6715ddf9
LJ
1653 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
1654 * contains nodes which have zones of 0...ZONE_NORMAL,
1655 * set zone_last to ZONE_NORMAL.
d9713679 1656 *
6715ddf9
LJ
1657 * If we don't have HIGHMEM nor movable node,
1658 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
1659 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
d9713679 1660 */
6715ddf9 1661 if (N_MEMORY == N_NORMAL_MEMORY)
d9713679
LJ
1662 zone_last = ZONE_MOVABLE;
1663
1664 /*
1665 * check whether node_states[N_NORMAL_MEMORY] will be changed.
1666 * If the memory to be offline is in a zone of 0...zone_last,
1667 * and it is the last present memory, 0...zone_last will
1668 * become empty after offline , thus we can determind we will
1669 * need to clear the node from node_states[N_NORMAL_MEMORY].
1670 */
1671 for (zt = 0; zt <= zone_last; zt++)
1672 present_pages += pgdat->node_zones[zt].present_pages;
1673 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1674 arg->status_change_nid_normal = zone_to_nid(zone);
1675 else
1676 arg->status_change_nid_normal = -1;
1677
6715ddf9
LJ
1678#ifdef CONFIG_HIGHMEM
1679 /*
1680 * If we have movable node, node_states[N_HIGH_MEMORY]
1681 * contains nodes which have zones of 0...ZONE_HIGHMEM,
1682 * set zone_last to ZONE_HIGHMEM.
1683 *
1684 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
1685 * contains nodes which have zones of 0...ZONE_MOVABLE,
1686 * set zone_last to ZONE_MOVABLE.
1687 */
1688 zone_last = ZONE_HIGHMEM;
1689 if (N_MEMORY == N_HIGH_MEMORY)
1690 zone_last = ZONE_MOVABLE;
1691
1692 for (; zt <= zone_last; zt++)
1693 present_pages += pgdat->node_zones[zt].present_pages;
1694 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1695 arg->status_change_nid_high = zone_to_nid(zone);
1696 else
1697 arg->status_change_nid_high = -1;
1698#else
1699 arg->status_change_nid_high = arg->status_change_nid_normal;
1700#endif
1701
d9713679
LJ
1702 /*
1703 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1704 */
1705 zone_last = ZONE_MOVABLE;
1706
1707 /*
1708 * check whether node_states[N_HIGH_MEMORY] will be changed
1709 * If we try to offline the last present @nr_pages from the node,
1710 * we can determind we will need to clear the node from
1711 * node_states[N_HIGH_MEMORY].
1712 */
1713 for (; zt <= zone_last; zt++)
1714 present_pages += pgdat->node_zones[zt].present_pages;
1715 if (nr_pages >= present_pages)
1716 arg->status_change_nid = zone_to_nid(zone);
1717 else
1718 arg->status_change_nid = -1;
1719}
1720
1721static void node_states_clear_node(int node, struct memory_notify *arg)
1722{
1723 if (arg->status_change_nid_normal >= 0)
1724 node_clear_state(node, N_NORMAL_MEMORY);
1725
6715ddf9
LJ
1726 if ((N_MEMORY != N_NORMAL_MEMORY) &&
1727 (arg->status_change_nid_high >= 0))
d9713679 1728 node_clear_state(node, N_HIGH_MEMORY);
6715ddf9
LJ
1729
1730 if ((N_MEMORY != N_HIGH_MEMORY) &&
1731 (arg->status_change_nid >= 0))
1732 node_clear_state(node, N_MEMORY);
d9713679
LJ
1733}
1734
a16cee10 1735static int __ref __offline_pages(unsigned long start_pfn,
0c0e6195
KH
1736 unsigned long end_pfn, unsigned long timeout)
1737{
1738 unsigned long pfn, nr_pages, expire;
1739 long offlined_pages;
7b78d335 1740 int ret, drain, retry_max, node;
d702909f 1741 unsigned long flags;
a96dfddb 1742 unsigned long valid_start, valid_end;
0c0e6195 1743 struct zone *zone;
7b78d335 1744 struct memory_notify arg;
0c0e6195 1745
0c0e6195
KH
1746 /* at least, alignment against pageblock is necessary */
1747 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
1748 return -EINVAL;
1749 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
1750 return -EINVAL;
1751 /* This makes hotplug much easier...and readable.
1752 we assume this for now. .*/
a96dfddb 1753 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
0c0e6195 1754 return -EINVAL;
7b78d335 1755
a96dfddb 1756 zone = page_zone(pfn_to_page(valid_start));
7b78d335
YG
1757 node = zone_to_nid(zone);
1758 nr_pages = end_pfn - start_pfn;
1759
0c0e6195 1760 /* set above range as isolated */
b023f468
WC
1761 ret = start_isolate_page_range(start_pfn, end_pfn,
1762 MIGRATE_MOVABLE, true);
0c0e6195 1763 if (ret)
30467e0b 1764 return ret;
7b78d335
YG
1765
1766 arg.start_pfn = start_pfn;
1767 arg.nr_pages = nr_pages;
d9713679 1768 node_states_check_changes_offline(nr_pages, zone, &arg);
7b78d335
YG
1769
1770 ret = memory_notify(MEM_GOING_OFFLINE, &arg);
1771 ret = notifier_to_errno(ret);
1772 if (ret)
1773 goto failed_removal;
1774
0c0e6195
KH
1775 pfn = start_pfn;
1776 expire = jiffies + timeout;
1777 drain = 0;
1778 retry_max = 5;
1779repeat:
1780 /* start memory hot removal */
1781 ret = -EAGAIN;
1782 if (time_after(jiffies, expire))
1783 goto failed_removal;
1784 ret = -EINTR;
1785 if (signal_pending(current))
1786 goto failed_removal;
1787 ret = 0;
1788 if (drain) {
3f906ba2 1789 lru_add_drain_all_cpuslocked();
0c0e6195 1790 cond_resched();
c0554329 1791 drain_all_pages(zone);
0c0e6195
KH
1792 }
1793
c8721bbb
NH
1794 pfn = scan_movable_pages(start_pfn, end_pfn);
1795 if (pfn) { /* We have movable pages */
0c0e6195
KH
1796 ret = do_migrate_range(pfn, end_pfn);
1797 if (!ret) {
1798 drain = 1;
1799 goto repeat;
1800 } else {
1801 if (ret < 0)
1802 if (--retry_max == 0)
1803 goto failed_removal;
1804 yield();
1805 drain = 1;
1806 goto repeat;
1807 }
1808 }
b3834be5 1809 /* drain all zone's lru pagevec, this is asynchronous... */
3f906ba2 1810 lru_add_drain_all_cpuslocked();
0c0e6195 1811 yield();
b3834be5 1812 /* drain pcp pages, this is synchronous. */
c0554329 1813 drain_all_pages(zone);
c8721bbb
NH
1814 /*
1815 * dissolve free hugepages in the memory block before doing offlining
1816 * actually in order to make hugetlbfs's object counting consistent.
1817 */
082d5b6b
GS
1818 ret = dissolve_free_huge_pages(start_pfn, end_pfn);
1819 if (ret)
1820 goto failed_removal;
0c0e6195
KH
1821 /* check again */
1822 offlined_pages = check_pages_isolated(start_pfn, end_pfn);
1823 if (offlined_pages < 0) {
1824 ret = -EBUSY;
1825 goto failed_removal;
1826 }
e33e33b4 1827 pr_info("Offlined Pages %ld\n", offlined_pages);
b3834be5 1828 /* Ok, all of our target is isolated.
0c0e6195
KH
1829 We cannot do rollback at this point. */
1830 offline_isolated_pages(start_pfn, end_pfn);
dbc0e4ce 1831 /* reset pagetype flags and makes migrate type to be MOVABLE */
0815f3d8 1832 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
0c0e6195 1833 /* removal success */
3dcc0571 1834 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
0c0e6195 1835 zone->present_pages -= offlined_pages;
d702909f
CS
1836
1837 pgdat_resize_lock(zone->zone_pgdat, &flags);
0c0e6195 1838 zone->zone_pgdat->node_present_pages -= offlined_pages;
d702909f 1839 pgdat_resize_unlock(zone->zone_pgdat, &flags);
7b78d335 1840
1b79acc9
KM
1841 init_per_zone_wmark_min();
1842
1e8537ba 1843 if (!populated_zone(zone)) {
340175b7 1844 zone_pcp_reset(zone);
1e8537ba
XQ
1845 mutex_lock(&zonelists_mutex);
1846 build_all_zonelists(NULL, NULL);
1847 mutex_unlock(&zonelists_mutex);
1848 } else
1849 zone_pcp_update(zone);
340175b7 1850
d9713679 1851 node_states_clear_node(node, &arg);
698b1b30 1852 if (arg.status_change_nid >= 0) {
8fe23e05 1853 kswapd_stop(node);
698b1b30
VB
1854 kcompactd_stop(node);
1855 }
bce7394a 1856
0c0e6195
KH
1857 vm_total_pages = nr_free_pagecache_pages();
1858 writeback_set_ratelimit();
7b78d335
YG
1859
1860 memory_notify(MEM_OFFLINE, &arg);
0c0e6195
KH
1861 return 0;
1862
1863failed_removal:
e33e33b4
CY
1864 pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n",
1865 (unsigned long long) start_pfn << PAGE_SHIFT,
1866 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
7b78d335 1867 memory_notify(MEM_CANCEL_OFFLINE, &arg);
0c0e6195 1868 /* pushback to free area */
0815f3d8 1869 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
0c0e6195
KH
1870 return ret;
1871}
71088785 1872
30467e0b 1873/* Must be protected by mem_hotplug_begin() */
a16cee10
WC
1874int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1875{
1876 return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
1877}
e2ff3940 1878#endif /* CONFIG_MEMORY_HOTREMOVE */
a16cee10 1879
bbc76be6
WC
1880/**
1881 * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
1882 * @start_pfn: start pfn of the memory range
e05c4bbf 1883 * @end_pfn: end pfn of the memory range
bbc76be6
WC
1884 * @arg: argument passed to func
1885 * @func: callback for each memory section walked
1886 *
1887 * This function walks through all present mem sections in range
1888 * [start_pfn, end_pfn) and call func on each mem section.
1889 *
1890 * Returns the return value of func.
1891 */
e2ff3940 1892int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
bbc76be6 1893 void *arg, int (*func)(struct memory_block *, void *))
71088785 1894{
e90bdb7f
WC
1895 struct memory_block *mem = NULL;
1896 struct mem_section *section;
e90bdb7f
WC
1897 unsigned long pfn, section_nr;
1898 int ret;
e90bdb7f
WC
1899
1900 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1901 section_nr = pfn_to_section_nr(pfn);
1902 if (!present_section_nr(section_nr))
1903 continue;
1904
1905 section = __nr_to_section(section_nr);
1906 /* same memblock? */
1907 if (mem)
1908 if ((section_nr >= mem->start_section_nr) &&
1909 (section_nr <= mem->end_section_nr))
1910 continue;
1911
1912 mem = find_memory_block_hinted(section, mem);
1913 if (!mem)
1914 continue;
1915
bbc76be6 1916 ret = func(mem, arg);
e90bdb7f 1917 if (ret) {
bbc76be6
WC
1918 kobject_put(&mem->dev.kobj);
1919 return ret;
e90bdb7f
WC
1920 }
1921 }
1922
1923 if (mem)
1924 kobject_put(&mem->dev.kobj);
1925
bbc76be6
WC
1926 return 0;
1927}
1928
e2ff3940 1929#ifdef CONFIG_MEMORY_HOTREMOVE
d6de9d53 1930static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
bbc76be6
WC
1931{
1932 int ret = !is_memblock_offlined(mem);
1933
349daa0f
RD
1934 if (unlikely(ret)) {
1935 phys_addr_t beginpa, endpa;
1936
1937 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
1938 endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
756a025f 1939 pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
349daa0f
RD
1940 &beginpa, &endpa);
1941 }
bbc76be6
WC
1942
1943 return ret;
1944}
1945
0f1cfe9d 1946static int check_cpu_on_node(pg_data_t *pgdat)
60a5a19e 1947{
60a5a19e
TC
1948 int cpu;
1949
1950 for_each_present_cpu(cpu) {
1951 if (cpu_to_node(cpu) == pgdat->node_id)
1952 /*
1953 * the cpu on this node isn't removed, and we can't
1954 * offline this node.
1955 */
1956 return -EBUSY;
1957 }
1958
1959 return 0;
1960}
1961
0f1cfe9d 1962static void unmap_cpu_on_node(pg_data_t *pgdat)
e13fe869
WC
1963{
1964#ifdef CONFIG_ACPI_NUMA
e13fe869
WC
1965 int cpu;
1966
1967 for_each_possible_cpu(cpu)
1968 if (cpu_to_node(cpu) == pgdat->node_id)
1969 numa_clear_node(cpu);
1970#endif
1971}
1972
0f1cfe9d 1973static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
e13fe869 1974{
0f1cfe9d 1975 int ret;
e13fe869 1976
0f1cfe9d 1977 ret = check_cpu_on_node(pgdat);
e13fe869
WC
1978 if (ret)
1979 return ret;
1980
1981 /*
1982 * the node will be offlined when we come here, so we can clear
1983 * the cpu_to_node() now.
1984 */
1985
0f1cfe9d 1986 unmap_cpu_on_node(pgdat);
e13fe869
WC
1987 return 0;
1988}
1989
0f1cfe9d
TK
1990/**
1991 * try_offline_node
1992 *
1993 * Offline a node if all memory sections and cpus of the node are removed.
1994 *
1995 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1996 * and online/offline operations before this call.
1997 */
90b30cdc 1998void try_offline_node(int nid)
60a5a19e 1999{
d822b86a
WC
2000 pg_data_t *pgdat = NODE_DATA(nid);
2001 unsigned long start_pfn = pgdat->node_start_pfn;
2002 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
60a5a19e
TC
2003 unsigned long pfn;
2004
2005 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2006 unsigned long section_nr = pfn_to_section_nr(pfn);
2007
2008 if (!present_section_nr(section_nr))
2009 continue;
2010
2011 if (pfn_to_nid(pfn) != nid)
2012 continue;
2013
2014 /*
2015 * some memory sections of this node are not removed, and we
2016 * can't offline node now.
2017 */
2018 return;
2019 }
2020
0f1cfe9d 2021 if (check_and_unmap_cpu_on_node(pgdat))
60a5a19e
TC
2022 return;
2023
2024 /*
2025 * all memory/cpu of this node are removed, we can offline this
2026 * node now.
2027 */
2028 node_set_offline(nid);
2029 unregister_one_node(nid);
2030}
90b30cdc 2031EXPORT_SYMBOL(try_offline_node);
60a5a19e 2032
0f1cfe9d
TK
2033/**
2034 * remove_memory
2035 *
2036 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
2037 * and online/offline operations before this call, as required by
2038 * try_offline_node().
2039 */
242831eb 2040void __ref remove_memory(int nid, u64 start, u64 size)
bbc76be6 2041{
242831eb 2042 int ret;
993c1aad 2043
27356f54
TK
2044 BUG_ON(check_hotplug_memory_range(start, size));
2045
bfc8c901 2046 mem_hotplug_begin();
6677e3ea
YI
2047
2048 /*
242831eb
RW
2049 * All memory blocks must be offlined before removing memory. Check
2050 * whether all memory blocks in question are offline and trigger a BUG()
2051 * if this is not the case.
6677e3ea 2052 */
242831eb 2053 ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
d6de9d53 2054 check_memblock_offlined_cb);
bfc8c901 2055 if (ret)
242831eb 2056 BUG();
6677e3ea 2057
46c66c4b
YI
2058 /* remove memmap entry */
2059 firmware_map_remove(start, start + size, "System RAM");
f9126ab9
XQ
2060 memblock_free(start, size);
2061 memblock_remove(start, size);
46c66c4b 2062
24d335ca
WC
2063 arch_remove_memory(start, size);
2064
60a5a19e
TC
2065 try_offline_node(nid);
2066
bfc8c901 2067 mem_hotplug_done();
71088785 2068}
71088785 2069EXPORT_SYMBOL_GPL(remove_memory);
aba6efc4 2070#endif /* CONFIG_MEMORY_HOTREMOVE */