]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/memory_hotplug.c
mm, devm_memremap_pages: hold device_hotplug lock over mem_hotplug_{begin, done}
[mirror_ubuntu-artful-kernel.git] / mm / memory_hotplug.c
CommitLineData
3947be19
DH
1/*
2 * linux/mm/memory_hotplug.c
3 *
4 * Copyright (C)
5 */
6
3947be19
DH
7#include <linux/stddef.h>
8#include <linux/mm.h>
9#include <linux/swap.h>
10#include <linux/interrupt.h>
11#include <linux/pagemap.h>
3947be19 12#include <linux/compiler.h>
b95f1b31 13#include <linux/export.h>
3947be19 14#include <linux/pagevec.h>
2d1d43f6 15#include <linux/writeback.h>
3947be19
DH
16#include <linux/slab.h>
17#include <linux/sysctl.h>
18#include <linux/cpu.h>
19#include <linux/memory.h>
4b94ffdc 20#include <linux/memremap.h>
3947be19
DH
21#include <linux/memory_hotplug.h>
22#include <linux/highmem.h>
23#include <linux/vmalloc.h>
0a547039 24#include <linux/ioport.h>
0c0e6195
KH
25#include <linux/delay.h>
26#include <linux/migrate.h>
27#include <linux/page-isolation.h>
71088785 28#include <linux/pfn.h>
6ad696d2 29#include <linux/suspend.h>
6d9c285a 30#include <linux/mm_inline.h>
d96ae530 31#include <linux/firmware-map.h>
60a5a19e 32#include <linux/stop_machine.h>
c8721bbb 33#include <linux/hugetlb.h>
c5320926 34#include <linux/memblock.h>
f784a3f1 35#include <linux/bootmem.h>
698b1b30 36#include <linux/compaction.h>
3947be19
DH
37
38#include <asm/tlbflush.h>
39
1e5ad9a3
AB
40#include "internal.h"
41
9d0ad8ca
DK
42/*
43 * online_page_callback contains pointer to current page onlining function.
44 * Initially it is generic_online_page(). If it is required it could be
45 * changed by calling set_online_page_callback() for callback registration
46 * and restore_online_page_callback() for generic callback restore.
47 */
48
49static void generic_online_page(struct page *page);
50
51static online_page_callback_t online_page_callback = generic_online_page;
bfc8c901 52static DEFINE_MUTEX(online_page_callback_lock);
9d0ad8ca 53
bfc8c901
VD
54/* The same as the cpu_hotplug lock, but for memory hotplug. */
55static struct {
56 struct task_struct *active_writer;
57 struct mutex lock; /* Synchronizes accesses to refcount, */
58 /*
59 * Also blocks the new readers during
60 * an ongoing mem hotplug operation.
61 */
62 int refcount;
63
64#ifdef CONFIG_DEBUG_LOCK_ALLOC
65 struct lockdep_map dep_map;
66#endif
67} mem_hotplug = {
68 .active_writer = NULL,
69 .lock = __MUTEX_INITIALIZER(mem_hotplug.lock),
70 .refcount = 0,
71#ifdef CONFIG_DEBUG_LOCK_ALLOC
72 .dep_map = {.name = "mem_hotplug.lock" },
73#endif
74};
75
76/* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */
77#define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map)
78#define memhp_lock_acquire() lock_map_acquire(&mem_hotplug.dep_map)
79#define memhp_lock_release() lock_map_release(&mem_hotplug.dep_map)
80
8604d9e5 81#ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
31bc3858 82bool memhp_auto_online;
8604d9e5
VK
83#else
84bool memhp_auto_online = true;
85#endif
31bc3858
VK
86EXPORT_SYMBOL_GPL(memhp_auto_online);
87
86dd995d
VK
88static int __init setup_memhp_default_state(char *str)
89{
90 if (!strcmp(str, "online"))
91 memhp_auto_online = true;
92 else if (!strcmp(str, "offline"))
93 memhp_auto_online = false;
94
95 return 1;
96}
97__setup("memhp_default_state=", setup_memhp_default_state);
98
bfc8c901
VD
99void get_online_mems(void)
100{
101 might_sleep();
102 if (mem_hotplug.active_writer == current)
103 return;
104 memhp_lock_acquire_read();
105 mutex_lock(&mem_hotplug.lock);
106 mem_hotplug.refcount++;
107 mutex_unlock(&mem_hotplug.lock);
108
109}
20d6c96b 110
bfc8c901 111void put_online_mems(void)
20d6c96b 112{
bfc8c901
VD
113 if (mem_hotplug.active_writer == current)
114 return;
115 mutex_lock(&mem_hotplug.lock);
116
117 if (WARN_ON(!mem_hotplug.refcount))
118 mem_hotplug.refcount++; /* try to fix things up */
119
120 if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer))
121 wake_up_process(mem_hotplug.active_writer);
122 mutex_unlock(&mem_hotplug.lock);
123 memhp_lock_release();
124
20d6c96b
KM
125}
126
30467e0b 127void mem_hotplug_begin(void)
20d6c96b 128{
bfc8c901
VD
129 mem_hotplug.active_writer = current;
130
131 memhp_lock_acquire();
132 for (;;) {
133 mutex_lock(&mem_hotplug.lock);
134 if (likely(!mem_hotplug.refcount))
135 break;
136 __set_current_state(TASK_UNINTERRUPTIBLE);
137 mutex_unlock(&mem_hotplug.lock);
138 schedule();
139 }
20d6c96b
KM
140}
141
30467e0b 142void mem_hotplug_done(void)
bfc8c901
VD
143{
144 mem_hotplug.active_writer = NULL;
145 mutex_unlock(&mem_hotplug.lock);
146 memhp_lock_release();
147}
20d6c96b 148
45e0b78b
KM
149/* add this memory to iomem resource */
150static struct resource *register_memory_resource(u64 start, u64 size)
151{
152 struct resource *res;
153 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
6f754ba4
VK
154 if (!res)
155 return ERR_PTR(-ENOMEM);
45e0b78b
KM
156
157 res->name = "System RAM";
158 res->start = start;
159 res->end = start + size - 1;
782b8664 160 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
45e0b78b 161 if (request_resource(&iomem_resource, res) < 0) {
4996eed8 162 pr_debug("System RAM resource %pR cannot be added\n", res);
45e0b78b 163 kfree(res);
6f754ba4 164 return ERR_PTR(-EEXIST);
45e0b78b
KM
165 }
166 return res;
167}
168
169static void release_memory_resource(struct resource *res)
170{
171 if (!res)
172 return;
173 release_resource(res);
174 kfree(res);
175 return;
176}
177
53947027 178#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
46723bfa
YI
179void get_page_bootmem(unsigned long info, struct page *page,
180 unsigned long type)
04753278 181{
ddffe98d 182 page->freelist = (void *)type;
04753278
YG
183 SetPagePrivate(page);
184 set_page_private(page, info);
fe896d18 185 page_ref_inc(page);
04753278
YG
186}
187
170a5a7e 188void put_page_bootmem(struct page *page)
04753278 189{
5f24ce5f 190 unsigned long type;
04753278 191
ddffe98d 192 type = (unsigned long) page->freelist;
5f24ce5f
AA
193 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
194 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
04753278 195
fe896d18 196 if (page_ref_dec_return(page) == 1) {
ddffe98d 197 page->freelist = NULL;
04753278
YG
198 ClearPagePrivate(page);
199 set_page_private(page, 0);
5f24ce5f 200 INIT_LIST_HEAD(&page->lru);
170a5a7e 201 free_reserved_page(page);
04753278 202 }
04753278
YG
203}
204
46723bfa
YI
205#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
206#ifndef CONFIG_SPARSEMEM_VMEMMAP
d92bc318 207static void register_page_bootmem_info_section(unsigned long start_pfn)
04753278
YG
208{
209 unsigned long *usemap, mapsize, section_nr, i;
210 struct mem_section *ms;
211 struct page *page, *memmap;
212
04753278
YG
213 section_nr = pfn_to_section_nr(start_pfn);
214 ms = __nr_to_section(section_nr);
215
216 /* Get section's memmap address */
217 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
218
219 /*
220 * Get page for the memmap's phys address
221 * XXX: need more consideration for sparse_vmemmap...
222 */
223 page = virt_to_page(memmap);
224 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
225 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
226
227 /* remember memmap's page */
228 for (i = 0; i < mapsize; i++, page++)
229 get_page_bootmem(section_nr, page, SECTION_INFO);
230
231 usemap = __nr_to_section(section_nr)->pageblock_flags;
232 page = virt_to_page(usemap);
233
234 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
235
236 for (i = 0; i < mapsize; i++, page++)
af370fb8 237 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
04753278
YG
238
239}
46723bfa
YI
240#else /* CONFIG_SPARSEMEM_VMEMMAP */
241static void register_page_bootmem_info_section(unsigned long start_pfn)
242{
243 unsigned long *usemap, mapsize, section_nr, i;
244 struct mem_section *ms;
245 struct page *page, *memmap;
246
247 if (!pfn_valid(start_pfn))
248 return;
249
250 section_nr = pfn_to_section_nr(start_pfn);
251 ms = __nr_to_section(section_nr);
252
253 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
254
255 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
256
257 usemap = __nr_to_section(section_nr)->pageblock_flags;
258 page = virt_to_page(usemap);
259
260 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
261
262 for (i = 0; i < mapsize; i++, page++)
263 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
264}
265#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
04753278 266
7ded384a 267void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
04753278
YG
268{
269 unsigned long i, pfn, end_pfn, nr_pages;
270 int node = pgdat->node_id;
271 struct page *page;
04753278
YG
272
273 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
274 page = virt_to_page(pgdat);
275
276 for (i = 0; i < nr_pages; i++, page++)
277 get_page_bootmem(node, page, NODE_INFO);
278
04753278 279 pfn = pgdat->node_start_pfn;
c1f19495 280 end_pfn = pgdat_end_pfn(pgdat);
04753278 281
7e9f5eb0 282 /* register section info */
f14851af 283 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
284 /*
285 * Some platforms can assign the same pfn to multiple nodes - on
286 * node0 as well as nodeN. To avoid registering a pfn against
287 * multiple nodes we check that this pfn does not already
7e9f5eb0 288 * reside in some other nodes.
f14851af 289 */
f65e91df 290 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
f14851af 291 register_page_bootmem_info_section(pfn);
292 }
04753278 293}
46723bfa 294#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
04753278 295
f2765404
FF
296static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn,
297 unsigned long end_pfn)
76cdd58e
HC
298{
299 unsigned long old_zone_end_pfn;
300
301 zone_span_writelock(zone);
302
c33bc315 303 old_zone_end_pfn = zone_end_pfn(zone);
8080fc03 304 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
76cdd58e
HC
305 zone->zone_start_pfn = start_pfn;
306
307 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
308 zone->zone_start_pfn;
309
310 zone_span_writeunlock(zone);
311}
312
511c2aba
LJ
313static void resize_zone(struct zone *zone, unsigned long start_pfn,
314 unsigned long end_pfn)
315{
316 zone_span_writelock(zone);
317
e455a9b9
LJ
318 if (end_pfn - start_pfn) {
319 zone->zone_start_pfn = start_pfn;
320 zone->spanned_pages = end_pfn - start_pfn;
321 } else {
322 /*
323 * make it consist as free_area_init_core(),
324 * if spanned_pages = 0, then keep start_pfn = 0
325 */
326 zone->zone_start_pfn = 0;
327 zone->spanned_pages = 0;
328 }
511c2aba
LJ
329
330 zone_span_writeunlock(zone);
331}
332
333static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
334 unsigned long end_pfn)
335{
336 enum zone_type zid = zone_idx(zone);
337 int nid = zone->zone_pgdat->node_id;
338 unsigned long pfn;
339
340 for (pfn = start_pfn; pfn < end_pfn; pfn++)
341 set_page_links(pfn_to_page(pfn), zid, nid, pfn);
342}
343
f6bbb78e 344/* Can fail with -ENOMEM from allocating a wait table with vmalloc() or
9e43aa2b 345 * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */
f6bbb78e
CS
346static int __ref ensure_zone_is_initialized(struct zone *zone,
347 unsigned long start_pfn, unsigned long num_pages)
348{
349 if (!zone_is_initialized(zone))
b171e409
YB
350 return init_currently_empty_zone(zone, start_pfn, num_pages);
351
f6bbb78e
CS
352 return 0;
353}
354
e455a9b9 355static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
511c2aba
LJ
356 unsigned long start_pfn, unsigned long end_pfn)
357{
e455a9b9 358 int ret;
511c2aba 359 unsigned long flags;
e455a9b9
LJ
360 unsigned long z1_start_pfn;
361
64dd1b29
CS
362 ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn);
363 if (ret)
364 return ret;
511c2aba
LJ
365
366 pgdat_resize_lock(z1->zone_pgdat, &flags);
367
368 /* can't move pfns which are higher than @z2 */
108bcc96 369 if (end_pfn > zone_end_pfn(z2))
511c2aba 370 goto out_fail;
834405c3 371 /* the move out part must be at the left most of @z2 */
511c2aba
LJ
372 if (start_pfn > z2->zone_start_pfn)
373 goto out_fail;
374 /* must included/overlap */
375 if (end_pfn <= z2->zone_start_pfn)
376 goto out_fail;
377
e455a9b9 378 /* use start_pfn for z1's start_pfn if z1 is empty */
8080fc03 379 if (!zone_is_empty(z1))
e455a9b9
LJ
380 z1_start_pfn = z1->zone_start_pfn;
381 else
382 z1_start_pfn = start_pfn;
383
384 resize_zone(z1, z1_start_pfn, end_pfn);
108bcc96 385 resize_zone(z2, end_pfn, zone_end_pfn(z2));
511c2aba
LJ
386
387 pgdat_resize_unlock(z1->zone_pgdat, &flags);
388
389 fix_zone_id(z1, start_pfn, end_pfn);
390
391 return 0;
392out_fail:
393 pgdat_resize_unlock(z1->zone_pgdat, &flags);
394 return -1;
395}
396
e455a9b9 397static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
511c2aba
LJ
398 unsigned long start_pfn, unsigned long end_pfn)
399{
e455a9b9 400 int ret;
511c2aba 401 unsigned long flags;
e455a9b9
LJ
402 unsigned long z2_end_pfn;
403
64dd1b29
CS
404 ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn);
405 if (ret)
406 return ret;
511c2aba
LJ
407
408 pgdat_resize_lock(z1->zone_pgdat, &flags);
409
410 /* can't move pfns which are lower than @z1 */
411 if (z1->zone_start_pfn > start_pfn)
412 goto out_fail;
413 /* the move out part mast at the right most of @z1 */
108bcc96 414 if (zone_end_pfn(z1) > end_pfn)
511c2aba
LJ
415 goto out_fail;
416 /* must included/overlap */
108bcc96 417 if (start_pfn >= zone_end_pfn(z1))
511c2aba
LJ
418 goto out_fail;
419
e455a9b9 420 /* use end_pfn for z2's end_pfn if z2 is empty */
8080fc03 421 if (!zone_is_empty(z2))
108bcc96 422 z2_end_pfn = zone_end_pfn(z2);
e455a9b9
LJ
423 else
424 z2_end_pfn = end_pfn;
425
511c2aba 426 resize_zone(z1, z1->zone_start_pfn, start_pfn);
e455a9b9 427 resize_zone(z2, start_pfn, z2_end_pfn);
511c2aba
LJ
428
429 pgdat_resize_unlock(z1->zone_pgdat, &flags);
430
431 fix_zone_id(z2, start_pfn, end_pfn);
432
433 return 0;
434out_fail:
435 pgdat_resize_unlock(z1->zone_pgdat, &flags);
436 return -1;
437}
438
e51e6c8f
RA
439static struct zone * __meminit move_pfn_range(int zone_shift,
440 unsigned long start_pfn, unsigned long end_pfn)
441{
442 struct zone *zone = page_zone(pfn_to_page(start_pfn));
443 int ret = 0;
444
445 if (zone_shift < 0)
446 ret = move_pfn_range_left(zone + zone_shift, zone,
447 start_pfn, end_pfn);
448 else if (zone_shift)
449 ret = move_pfn_range_right(zone, zone + zone_shift,
450 start_pfn, end_pfn);
451
452 if (ret)
453 return NULL;
454
455 return zone + zone_shift;
456}
457
f2765404
FF
458static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
459 unsigned long end_pfn)
76cdd58e 460{
83285c72 461 unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat);
76cdd58e 462
712cd386 463 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
76cdd58e
HC
464 pgdat->node_start_pfn = start_pfn;
465
466 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
467 pgdat->node_start_pfn;
468}
469
31168481 470static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
3947be19
DH
471{
472 struct pglist_data *pgdat = zone->zone_pgdat;
473 int nr_pages = PAGES_PER_SECTION;
474 int nid = pgdat->node_id;
475 int zone_type;
e298ff75 476 unsigned long flags, pfn;
64dd1b29 477 int ret;
3947be19
DH
478
479 zone_type = zone - pgdat->node_zones;
64dd1b29
CS
480 ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages);
481 if (ret)
482 return ret;
76cdd58e 483
76cdd58e
HC
484 pgdat_resize_lock(zone->zone_pgdat, &flags);
485 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
486 grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
487 phys_start_pfn + nr_pages);
488 pgdat_resize_unlock(zone->zone_pgdat, &flags);
a2f3aa02
DH
489 memmap_init_zone(nr_pages, nid, zone_type,
490 phys_start_pfn, MEMMAP_HOTPLUG);
e298ff75
MG
491
492 /* online_page_range is called later and expects pages reserved */
493 for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
494 if (!pfn_valid(pfn))
495 continue;
496
497 SetPageReserved(pfn_to_page(pfn));
498 }
718127cc 499 return 0;
3947be19
DH
500}
501
c04fc586
GH
502static int __meminit __add_section(int nid, struct zone *zone,
503 unsigned long phys_start_pfn)
3947be19 504{
3947be19
DH
505 int ret;
506
ebd15302
KH
507 if (pfn_valid(phys_start_pfn))
508 return -EEXIST;
509
85b35fea 510 ret = sparse_add_one_section(zone, phys_start_pfn);
3947be19
DH
511
512 if (ret < 0)
513 return ret;
514
718127cc
YG
515 ret = __add_zone(zone, phys_start_pfn);
516
517 if (ret < 0)
518 return ret;
519
c04fc586 520 return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
3947be19
DH
521}
522
4edd7cef
DR
523/*
524 * Reasonably generic function for adding memory. It is
525 * expected that archs that support memory hotplug will
526 * call this function after deciding the zone to which to
527 * add the new pages.
528 */
529int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
530 unsigned long nr_pages)
531{
532 unsigned long i;
533 int err = 0;
534 int start_sec, end_sec;
4b94ffdc
DW
535 struct vmem_altmap *altmap;
536
7cf91a98
JK
537 clear_zone_contiguous(zone);
538
4edd7cef
DR
539 /* during initialize mem_map, align hot-added range to section */
540 start_sec = pfn_to_section_nr(phys_start_pfn);
541 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
542
4b94ffdc
DW
543 altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
544 if (altmap) {
545 /*
546 * Validate altmap is within bounds of the total request
547 */
548 if (altmap->base_pfn != phys_start_pfn
549 || vmem_altmap_offset(altmap) > nr_pages) {
550 pr_warn_once("memory add fail, invalid altmap\n");
7cf91a98
JK
551 err = -EINVAL;
552 goto out;
4b94ffdc
DW
553 }
554 altmap->alloc = 0;
555 }
556
4edd7cef 557 for (i = start_sec; i <= end_sec; i++) {
19c07d5e 558 err = __add_section(nid, zone, section_nr_to_pfn(i));
4edd7cef
DR
559
560 /*
561 * EEXIST is finally dealt with by ioresource collision
562 * check. see add_memory() => register_memory_resource()
563 * Warning will be printed if there is collision.
564 */
565 if (err && (err != -EEXIST))
566 break;
567 err = 0;
568 }
c435a390 569 vmemmap_populate_print_last();
7cf91a98
JK
570out:
571 set_zone_contiguous(zone);
4edd7cef
DR
572 return err;
573}
574EXPORT_SYMBOL_GPL(__add_pages);
575
576#ifdef CONFIG_MEMORY_HOTREMOVE
815121d2
YI
577/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
578static int find_smallest_section_pfn(int nid, struct zone *zone,
579 unsigned long start_pfn,
580 unsigned long end_pfn)
581{
582 struct mem_section *ms;
583
584 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
585 ms = __pfn_to_section(start_pfn);
586
587 if (unlikely(!valid_section(ms)))
588 continue;
589
590 if (unlikely(pfn_to_nid(start_pfn) != nid))
591 continue;
592
593 if (zone && zone != page_zone(pfn_to_page(start_pfn)))
594 continue;
595
596 return start_pfn;
597 }
598
599 return 0;
600}
601
602/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
603static int find_biggest_section_pfn(int nid, struct zone *zone,
604 unsigned long start_pfn,
605 unsigned long end_pfn)
606{
607 struct mem_section *ms;
608 unsigned long pfn;
609
610 /* pfn is the end pfn of a memory section. */
611 pfn = end_pfn - 1;
612 for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
613 ms = __pfn_to_section(pfn);
614
615 if (unlikely(!valid_section(ms)))
616 continue;
617
618 if (unlikely(pfn_to_nid(pfn) != nid))
619 continue;
620
621 if (zone && zone != page_zone(pfn_to_page(pfn)))
622 continue;
623
624 return pfn;
625 }
626
627 return 0;
628}
629
630static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
631 unsigned long end_pfn)
632{
c33bc315
XQ
633 unsigned long zone_start_pfn = zone->zone_start_pfn;
634 unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
635 unsigned long zone_end_pfn = z;
815121d2
YI
636 unsigned long pfn;
637 struct mem_section *ms;
638 int nid = zone_to_nid(zone);
639
640 zone_span_writelock(zone);
641 if (zone_start_pfn == start_pfn) {
642 /*
643 * If the section is smallest section in the zone, it need
644 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
645 * In this case, we find second smallest valid mem_section
646 * for shrinking zone.
647 */
648 pfn = find_smallest_section_pfn(nid, zone, end_pfn,
649 zone_end_pfn);
650 if (pfn) {
651 zone->zone_start_pfn = pfn;
652 zone->spanned_pages = zone_end_pfn - pfn;
653 }
654 } else if (zone_end_pfn == end_pfn) {
655 /*
656 * If the section is biggest section in the zone, it need
657 * shrink zone->spanned_pages.
658 * In this case, we find second biggest valid mem_section for
659 * shrinking zone.
660 */
661 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
662 start_pfn);
663 if (pfn)
664 zone->spanned_pages = pfn - zone_start_pfn + 1;
665 }
666
667 /*
668 * The section is not biggest or smallest mem_section in the zone, it
669 * only creates a hole in the zone. So in this case, we need not
670 * change the zone. But perhaps, the zone has only hole data. Thus
671 * it check the zone has only hole or not.
672 */
673 pfn = zone_start_pfn;
674 for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
675 ms = __pfn_to_section(pfn);
676
677 if (unlikely(!valid_section(ms)))
678 continue;
679
680 if (page_zone(pfn_to_page(pfn)) != zone)
681 continue;
682
683 /* If the section is current section, it continues the loop */
684 if (start_pfn == pfn)
685 continue;
686
687 /* If we find valid section, we have nothing to do */
688 zone_span_writeunlock(zone);
689 return;
690 }
691
692 /* The zone has no valid section */
693 zone->zone_start_pfn = 0;
694 zone->spanned_pages = 0;
695 zone_span_writeunlock(zone);
696}
697
698static void shrink_pgdat_span(struct pglist_data *pgdat,
699 unsigned long start_pfn, unsigned long end_pfn)
700{
83285c72
XQ
701 unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
702 unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
703 unsigned long pgdat_end_pfn = p;
815121d2
YI
704 unsigned long pfn;
705 struct mem_section *ms;
706 int nid = pgdat->node_id;
707
708 if (pgdat_start_pfn == start_pfn) {
709 /*
710 * If the section is smallest section in the pgdat, it need
711 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
712 * In this case, we find second smallest valid mem_section
713 * for shrinking zone.
714 */
715 pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
716 pgdat_end_pfn);
717 if (pfn) {
718 pgdat->node_start_pfn = pfn;
719 pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
720 }
721 } else if (pgdat_end_pfn == end_pfn) {
722 /*
723 * If the section is biggest section in the pgdat, it need
724 * shrink pgdat->node_spanned_pages.
725 * In this case, we find second biggest valid mem_section for
726 * shrinking zone.
727 */
728 pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
729 start_pfn);
730 if (pfn)
731 pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
732 }
733
734 /*
735 * If the section is not biggest or smallest mem_section in the pgdat,
736 * it only creates a hole in the pgdat. So in this case, we need not
737 * change the pgdat.
738 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
739 * has only hole or not.
740 */
741 pfn = pgdat_start_pfn;
742 for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
743 ms = __pfn_to_section(pfn);
744
745 if (unlikely(!valid_section(ms)))
746 continue;
747
748 if (pfn_to_nid(pfn) != nid)
749 continue;
750
751 /* If the section is current section, it continues the loop */
752 if (start_pfn == pfn)
753 continue;
754
755 /* If we find valid section, we have nothing to do */
756 return;
757 }
758
759 /* The pgdat has no valid section */
760 pgdat->node_start_pfn = 0;
761 pgdat->node_spanned_pages = 0;
762}
763
764static void __remove_zone(struct zone *zone, unsigned long start_pfn)
765{
766 struct pglist_data *pgdat = zone->zone_pgdat;
767 int nr_pages = PAGES_PER_SECTION;
768 int zone_type;
769 unsigned long flags;
770
771 zone_type = zone - pgdat->node_zones;
772
773 pgdat_resize_lock(zone->zone_pgdat, &flags);
774 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
775 shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
776 pgdat_resize_unlock(zone->zone_pgdat, &flags);
777}
778
4b94ffdc
DW
779static int __remove_section(struct zone *zone, struct mem_section *ms,
780 unsigned long map_offset)
ea01ea93 781{
815121d2
YI
782 unsigned long start_pfn;
783 int scn_nr;
ea01ea93
BP
784 int ret = -EINVAL;
785
786 if (!valid_section(ms))
787 return ret;
788
789 ret = unregister_memory_section(ms);
790 if (ret)
791 return ret;
792
815121d2
YI
793 scn_nr = __section_nr(ms);
794 start_pfn = section_nr_to_pfn(scn_nr);
795 __remove_zone(zone, start_pfn);
796
4b94ffdc 797 sparse_remove_one_section(zone, ms, map_offset);
ea01ea93
BP
798 return 0;
799}
800
ea01ea93
BP
801/**
802 * __remove_pages() - remove sections of pages from a zone
803 * @zone: zone from which pages need to be removed
804 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
805 * @nr_pages: number of pages to remove (must be multiple of section size)
806 *
807 * Generic helper function to remove section mappings and sysfs entries
808 * for the section of the memory we are removing. Caller needs to make
809 * sure that pages are marked reserved and zones are adjust properly by
810 * calling offline_pages().
811 */
812int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
813 unsigned long nr_pages)
814{
fe74ebb1 815 unsigned long i;
4b94ffdc
DW
816 unsigned long map_offset = 0;
817 int sections_to_remove, ret = 0;
818
819 /* In the ZONE_DEVICE case device driver owns the memory region */
820 if (is_dev_zone(zone)) {
821 struct page *page = pfn_to_page(phys_start_pfn);
822 struct vmem_altmap *altmap;
823
824 altmap = to_vmem_altmap((unsigned long) page);
825 if (altmap)
826 map_offset = vmem_altmap_offset(altmap);
827 } else {
828 resource_size_t start, size;
829
830 start = phys_start_pfn << PAGE_SHIFT;
831 size = nr_pages * PAGE_SIZE;
832
833 ret = release_mem_region_adjustable(&iomem_resource, start,
834 size);
835 if (ret) {
836 resource_size_t endres = start + size - 1;
837
838 pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
839 &start, &endres, ret);
840 }
841 }
ea01ea93 842
7cf91a98
JK
843 clear_zone_contiguous(zone);
844
ea01ea93
BP
845 /*
846 * We can only remove entire sections
847 */
848 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
849 BUG_ON(nr_pages % PAGES_PER_SECTION);
850
ea01ea93
BP
851 sections_to_remove = nr_pages / PAGES_PER_SECTION;
852 for (i = 0; i < sections_to_remove; i++) {
853 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
4b94ffdc
DW
854
855 ret = __remove_section(zone, __pfn_to_section(pfn), map_offset);
856 map_offset = 0;
ea01ea93
BP
857 if (ret)
858 break;
859 }
7cf91a98
JK
860
861 set_zone_contiguous(zone);
862
ea01ea93
BP
863 return ret;
864}
865EXPORT_SYMBOL_GPL(__remove_pages);
4edd7cef 866#endif /* CONFIG_MEMORY_HOTREMOVE */
ea01ea93 867
9d0ad8ca
DK
868int set_online_page_callback(online_page_callback_t callback)
869{
870 int rc = -EINVAL;
871
bfc8c901
VD
872 get_online_mems();
873 mutex_lock(&online_page_callback_lock);
9d0ad8ca
DK
874
875 if (online_page_callback == generic_online_page) {
876 online_page_callback = callback;
877 rc = 0;
878 }
879
bfc8c901
VD
880 mutex_unlock(&online_page_callback_lock);
881 put_online_mems();
9d0ad8ca
DK
882
883 return rc;
884}
885EXPORT_SYMBOL_GPL(set_online_page_callback);
886
887int restore_online_page_callback(online_page_callback_t callback)
888{
889 int rc = -EINVAL;
890
bfc8c901
VD
891 get_online_mems();
892 mutex_lock(&online_page_callback_lock);
9d0ad8ca
DK
893
894 if (online_page_callback == callback) {
895 online_page_callback = generic_online_page;
896 rc = 0;
897 }
898
bfc8c901
VD
899 mutex_unlock(&online_page_callback_lock);
900 put_online_mems();
9d0ad8ca
DK
901
902 return rc;
903}
904EXPORT_SYMBOL_GPL(restore_online_page_callback);
905
906void __online_page_set_limits(struct page *page)
180c06ef 907{
9d0ad8ca
DK
908}
909EXPORT_SYMBOL_GPL(__online_page_set_limits);
910
911void __online_page_increment_counters(struct page *page)
912{
3dcc0571 913 adjust_managed_page_count(page, 1);
9d0ad8ca
DK
914}
915EXPORT_SYMBOL_GPL(__online_page_increment_counters);
180c06ef 916
9d0ad8ca
DK
917void __online_page_free(struct page *page)
918{
3dcc0571 919 __free_reserved_page(page);
180c06ef 920}
9d0ad8ca
DK
921EXPORT_SYMBOL_GPL(__online_page_free);
922
923static void generic_online_page(struct page *page)
924{
925 __online_page_set_limits(page);
926 __online_page_increment_counters(page);
927 __online_page_free(page);
928}
180c06ef 929
75884fb1
KH
930static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
931 void *arg)
3947be19
DH
932{
933 unsigned long i;
75884fb1
KH
934 unsigned long onlined_pages = *(unsigned long *)arg;
935 struct page *page;
936 if (PageReserved(pfn_to_page(start_pfn)))
937 for (i = 0; i < nr_pages; i++) {
938 page = pfn_to_page(start_pfn + i);
9d0ad8ca 939 (*online_page_callback)(page);
75884fb1
KH
940 onlined_pages++;
941 }
942 *(unsigned long *)arg = onlined_pages;
943 return 0;
944}
945
09285af7 946#ifdef CONFIG_MOVABLE_NODE
79a4dcef
TC
947/*
948 * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have
949 * normal memory.
950 */
09285af7
LJ
951static bool can_online_high_movable(struct zone *zone)
952{
953 return true;
954}
79a4dcef 955#else /* CONFIG_MOVABLE_NODE */
74d42d8f
LJ
956/* ensure every online node has NORMAL memory */
957static bool can_online_high_movable(struct zone *zone)
958{
959 return node_state(zone_to_nid(zone), N_NORMAL_MEMORY);
960}
79a4dcef 961#endif /* CONFIG_MOVABLE_NODE */
74d42d8f 962
d9713679
LJ
963/* check which state of node_states will be changed when online memory */
964static void node_states_check_changes_online(unsigned long nr_pages,
965 struct zone *zone, struct memory_notify *arg)
966{
967 int nid = zone_to_nid(zone);
968 enum zone_type zone_last = ZONE_NORMAL;
969
970 /*
6715ddf9
LJ
971 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
972 * contains nodes which have zones of 0...ZONE_NORMAL,
973 * set zone_last to ZONE_NORMAL.
d9713679 974 *
6715ddf9
LJ
975 * If we don't have HIGHMEM nor movable node,
976 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
977 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
d9713679 978 */
6715ddf9 979 if (N_MEMORY == N_NORMAL_MEMORY)
d9713679
LJ
980 zone_last = ZONE_MOVABLE;
981
982 /*
983 * if the memory to be online is in a zone of 0...zone_last, and
984 * the zones of 0...zone_last don't have memory before online, we will
985 * need to set the node to node_states[N_NORMAL_MEMORY] after
986 * the memory is online.
987 */
988 if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
989 arg->status_change_nid_normal = nid;
990 else
991 arg->status_change_nid_normal = -1;
992
6715ddf9
LJ
993#ifdef CONFIG_HIGHMEM
994 /*
995 * If we have movable node, node_states[N_HIGH_MEMORY]
996 * contains nodes which have zones of 0...ZONE_HIGHMEM,
997 * set zone_last to ZONE_HIGHMEM.
998 *
999 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
1000 * contains nodes which have zones of 0...ZONE_MOVABLE,
1001 * set zone_last to ZONE_MOVABLE.
1002 */
1003 zone_last = ZONE_HIGHMEM;
1004 if (N_MEMORY == N_HIGH_MEMORY)
1005 zone_last = ZONE_MOVABLE;
1006
1007 if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
1008 arg->status_change_nid_high = nid;
1009 else
1010 arg->status_change_nid_high = -1;
1011#else
1012 arg->status_change_nid_high = arg->status_change_nid_normal;
1013#endif
1014
d9713679
LJ
1015 /*
1016 * if the node don't have memory befor online, we will need to
6715ddf9 1017 * set the node to node_states[N_MEMORY] after the memory
d9713679
LJ
1018 * is online.
1019 */
6715ddf9 1020 if (!node_state(nid, N_MEMORY))
d9713679
LJ
1021 arg->status_change_nid = nid;
1022 else
1023 arg->status_change_nid = -1;
1024}
1025
1026static void node_states_set_node(int node, struct memory_notify *arg)
1027{
1028 if (arg->status_change_nid_normal >= 0)
1029 node_set_state(node, N_NORMAL_MEMORY);
1030
6715ddf9
LJ
1031 if (arg->status_change_nid_high >= 0)
1032 node_set_state(node, N_HIGH_MEMORY);
1033
1034 node_set_state(node, N_MEMORY);
d9713679
LJ
1035}
1036
8a1f780e
YI
1037bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
1038 enum zone_type target, int *zone_shift)
df429ac0
RA
1039{
1040 struct zone *zone = page_zone(pfn_to_page(pfn));
1041 enum zone_type idx = zone_idx(zone);
1042 int i;
1043
8a1f780e
YI
1044 *zone_shift = 0;
1045
df429ac0
RA
1046 if (idx < target) {
1047 /* pages must be at end of current zone */
1048 if (pfn + nr_pages != zone_end_pfn(zone))
8a1f780e 1049 return false;
df429ac0
RA
1050
1051 /* no zones in use between current zone and target */
1052 for (i = idx + 1; i < target; i++)
1053 if (zone_is_initialized(zone - idx + i))
8a1f780e 1054 return false;
df429ac0
RA
1055 }
1056
1057 if (target < idx) {
1058 /* pages must be at beginning of current zone */
1059 if (pfn != zone->zone_start_pfn)
8a1f780e 1060 return false;
df429ac0
RA
1061
1062 /* no zones in use between current zone and target */
1063 for (i = target + 1; i < idx; i++)
1064 if (zone_is_initialized(zone - idx + i))
8a1f780e 1065 return false;
df429ac0
RA
1066 }
1067
8a1f780e
YI
1068 *zone_shift = target - idx;
1069 return true;
df429ac0 1070}
75884fb1 1071
30467e0b 1072/* Must be protected by mem_hotplug_begin() */
511c2aba 1073int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
75884fb1 1074{
aa47228a 1075 unsigned long flags;
3947be19
DH
1076 unsigned long onlined_pages = 0;
1077 struct zone *zone;
6811378e 1078 int need_zonelists_rebuild = 0;
7b78d335
YG
1079 int nid;
1080 int ret;
1081 struct memory_notify arg;
e51e6c8f 1082 int zone_shift = 0;
7b78d335 1083
d9713679
LJ
1084 /*
1085 * This doesn't need a lock to do pfn_to_page().
1086 * The section can't be removed here because of the
1087 * memory_block->state_mutex.
1088 */
1089 zone = page_zone(pfn_to_page(pfn));
1090
4f7c6b49
TC
1091 if ((zone_idx(zone) > ZONE_NORMAL ||
1092 online_type == MMOP_ONLINE_MOVABLE) &&
bfc8c901 1093 !can_online_high_movable(zone))
30467e0b 1094 return -EINVAL;
74d42d8f 1095
8a1f780e
YI
1096 if (online_type == MMOP_ONLINE_KERNEL) {
1097 if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
1098 return -EINVAL;
1099 } else if (online_type == MMOP_ONLINE_MOVABLE) {
1100 if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
1101 return -EINVAL;
1102 }
511c2aba 1103
e51e6c8f
RA
1104 zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
1105 if (!zone)
1106 return -EINVAL;
511c2aba 1107
7b78d335
YG
1108 arg.start_pfn = pfn;
1109 arg.nr_pages = nr_pages;
d9713679 1110 node_states_check_changes_online(nr_pages, zone, &arg);
7b78d335 1111
e888ca35 1112 nid = zone_to_nid(zone);
3947be19 1113
7b78d335
YG
1114 ret = memory_notify(MEM_GOING_ONLINE, &arg);
1115 ret = notifier_to_errno(ret);
e33e33b4
CY
1116 if (ret)
1117 goto failed_addition;
1118
6811378e
YG
1119 /*
1120 * If this zone is not populated, then it is not in zonelist.
1121 * This means the page allocator ignores this zone.
1122 * So, zonelist must be updated after online.
1123 */
4eaf3f64 1124 mutex_lock(&zonelists_mutex);
6dcd73d7 1125 if (!populated_zone(zone)) {
6811378e 1126 need_zonelists_rebuild = 1;
6dcd73d7
WC
1127 build_all_zonelists(NULL, zone);
1128 }
6811378e 1129
908eedc6 1130 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
75884fb1 1131 online_pages_range);
fd8a4221 1132 if (ret) {
6dcd73d7
WC
1133 if (need_zonelists_rebuild)
1134 zone_pcp_reset(zone);
4eaf3f64 1135 mutex_unlock(&zonelists_mutex);
e33e33b4 1136 goto failed_addition;
fd8a4221
GL
1137 }
1138
3947be19 1139 zone->present_pages += onlined_pages;
aa47228a
CS
1140
1141 pgdat_resize_lock(zone->zone_pgdat, &flags);
f2937be5 1142 zone->zone_pgdat->node_present_pages += onlined_pages;
aa47228a
CS
1143 pgdat_resize_unlock(zone->zone_pgdat, &flags);
1144
08dff7b7 1145 if (onlined_pages) {
e888ca35 1146 node_states_set_node(nid, &arg);
08dff7b7 1147 if (need_zonelists_rebuild)
6dcd73d7 1148 build_all_zonelists(NULL, NULL);
08dff7b7
JL
1149 else
1150 zone_pcp_update(zone);
1151 }
3947be19 1152
4eaf3f64 1153 mutex_unlock(&zonelists_mutex);
1b79acc9
KM
1154
1155 init_per_zone_wmark_min();
1156
698b1b30 1157 if (onlined_pages) {
e888ca35 1158 kswapd_run(nid);
698b1b30
VB
1159 kcompactd_run(nid);
1160 }
61b13993 1161
1f522509 1162 vm_total_pages = nr_free_pagecache_pages();
2f7f24ec 1163
2d1d43f6 1164 writeback_set_ratelimit();
7b78d335
YG
1165
1166 if (onlined_pages)
1167 memory_notify(MEM_ONLINE, &arg);
30467e0b 1168 return 0;
e33e33b4
CY
1169
1170failed_addition:
1171 pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
1172 (unsigned long long) pfn << PAGE_SHIFT,
1173 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
1174 memory_notify(MEM_CANCEL_ONLINE, &arg);
1175 return ret;
3947be19 1176}
53947027 1177#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
bc02af93 1178
0bd85420
TC
1179static void reset_node_present_pages(pg_data_t *pgdat)
1180{
1181 struct zone *z;
1182
1183 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1184 z->present_pages = 0;
1185
1186 pgdat->node_present_pages = 0;
1187}
1188
e1319331
HS
1189/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1190static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
9af3c2de
YG
1191{
1192 struct pglist_data *pgdat;
1193 unsigned long zones_size[MAX_NR_ZONES] = {0};
1194 unsigned long zholes_size[MAX_NR_ZONES] = {0};
c8e861a5 1195 unsigned long start_pfn = PFN_DOWN(start);
9af3c2de 1196
a1e565aa
TC
1197 pgdat = NODE_DATA(nid);
1198 if (!pgdat) {
1199 pgdat = arch_alloc_nodedata(nid);
1200 if (!pgdat)
1201 return NULL;
9af3c2de 1202
a1e565aa 1203 arch_refresh_nodedata(nid, pgdat);
b0dc3a34 1204 } else {
38087d9b 1205 /* Reset the nr_zones, order and classzone_idx before reuse */
b0dc3a34 1206 pgdat->nr_zones = 0;
38087d9b
MG
1207 pgdat->kswapd_order = 0;
1208 pgdat->kswapd_classzone_idx = 0;
a1e565aa 1209 }
9af3c2de
YG
1210
1211 /* we can use NODE_DATA(nid) from here */
1212
1213 /* init node's zones as empty zones, we don't have any present pages.*/
9109fb7b 1214 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
5830169f 1215 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
9af3c2de 1216
959ecc48
KH
1217 /*
1218 * The node we allocated has no zone fallback lists. For avoiding
1219 * to access not-initialized zonelist, build here.
1220 */
f957db4f 1221 mutex_lock(&zonelists_mutex);
9adb62a5 1222 build_all_zonelists(pgdat, NULL);
f957db4f 1223 mutex_unlock(&zonelists_mutex);
959ecc48 1224
f784a3f1
TC
1225 /*
1226 * zone->managed_pages is set to an approximate value in
1227 * free_area_init_core(), which will cause
1228 * /sys/device/system/node/nodeX/meminfo has wrong data.
1229 * So reset it to 0 before any memory is onlined.
1230 */
1231 reset_node_managed_pages(pgdat);
1232
0bd85420
TC
1233 /*
1234 * When memory is hot-added, all the memory is in offline state. So
1235 * clear all zones' present_pages because they will be updated in
1236 * online_pages() and offline_pages().
1237 */
1238 reset_node_present_pages(pgdat);
1239
9af3c2de
YG
1240 return pgdat;
1241}
1242
1243static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
1244{
1245 arch_refresh_nodedata(nid, NULL);
5830169f 1246 free_percpu(pgdat->per_cpu_nodestats);
9af3c2de
YG
1247 arch_free_nodedata(pgdat);
1248 return;
1249}
1250
0a547039 1251
01b0f197
TK
1252/**
1253 * try_online_node - online a node if offlined
1254 *
cf23422b 1255 * called by cpu_up() to online a node without onlined memory.
1256 */
01b0f197 1257int try_online_node(int nid)
cf23422b 1258{
1259 pg_data_t *pgdat;
1260 int ret;
1261
01b0f197
TK
1262 if (node_online(nid))
1263 return 0;
1264
bfc8c901 1265 mem_hotplug_begin();
cf23422b 1266 pgdat = hotadd_new_pgdat(nid, 0);
7553e8f2 1267 if (!pgdat) {
01b0f197 1268 pr_err("Cannot online node %d due to NULL pgdat\n", nid);
cf23422b 1269 ret = -ENOMEM;
1270 goto out;
1271 }
1272 node_set_online(nid);
1273 ret = register_one_node(nid);
1274 BUG_ON(ret);
1275
01b0f197
TK
1276 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
1277 mutex_lock(&zonelists_mutex);
1278 build_all_zonelists(NULL, NULL);
1279 mutex_unlock(&zonelists_mutex);
1280 }
1281
cf23422b 1282out:
bfc8c901 1283 mem_hotplug_done();
cf23422b 1284 return ret;
1285}
1286
27356f54
TK
1287static int check_hotplug_memory_range(u64 start, u64 size)
1288{
c8e861a5 1289 u64 start_pfn = PFN_DOWN(start);
27356f54
TK
1290 u64 nr_pages = size >> PAGE_SHIFT;
1291
1292 /* Memory range must be aligned with section */
1293 if ((start_pfn & ~PAGE_SECTION_MASK) ||
1294 (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
1295 pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
1296 (unsigned long long)start,
1297 (unsigned long long)size);
1298 return -EINVAL;
1299 }
1300
1301 return 0;
1302}
1303
63264400
WN
1304/*
1305 * If movable zone has already been setup, newly added memory should be check.
1306 * If its address is higher than movable zone, it should be added as movable.
1307 * Without this check, movable zone may overlap with other zone.
1308 */
1309static int should_add_memory_movable(int nid, u64 start, u64 size)
1310{
1311 unsigned long start_pfn = start >> PAGE_SHIFT;
1312 pg_data_t *pgdat = NODE_DATA(nid);
1313 struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE;
1314
1315 if (zone_is_empty(movable_zone))
1316 return 0;
1317
1318 if (movable_zone->zone_start_pfn <= start_pfn)
1319 return 1;
1320
1321 return 0;
1322}
1323
033fbae9
DW
1324int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
1325 bool for_device)
63264400 1326{
033fbae9
DW
1327#ifdef CONFIG_ZONE_DEVICE
1328 if (for_device)
1329 return ZONE_DEVICE;
1330#endif
63264400
WN
1331 if (should_add_memory_movable(nid, start, size))
1332 return ZONE_MOVABLE;
1333
1334 return zone_default;
1335}
1336
31bc3858
VK
1337static int online_memory_block(struct memory_block *mem, void *arg)
1338{
1339 return memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
1340}
1341
31168481 1342/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
31bc3858 1343int __ref add_memory_resource(int nid, struct resource *res, bool online)
bc02af93 1344{
62cedb9f 1345 u64 start, size;
9af3c2de 1346 pg_data_t *pgdat = NULL;
a1e565aa
TC
1347 bool new_pgdat;
1348 bool new_node;
bc02af93
YG
1349 int ret;
1350
62cedb9f
DV
1351 start = res->start;
1352 size = resource_size(res);
1353
27356f54
TK
1354 ret = check_hotplug_memory_range(start, size);
1355 if (ret)
1356 return ret;
1357
a1e565aa
TC
1358 { /* Stupid hack to suppress address-never-null warning */
1359 void *p = NODE_DATA(nid);
1360 new_pgdat = !p;
1361 }
ac13c462 1362
bfc8c901 1363 mem_hotplug_begin();
ac13c462 1364
7f36e3e5
TC
1365 /*
1366 * Add new range to memblock so that when hotadd_new_pgdat() is called
1367 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
1368 * this new range and calculate total pages correctly. The range will
1369 * be removed at hot-remove time.
1370 */
1371 memblock_add_node(start, size, nid);
1372
a1e565aa
TC
1373 new_node = !node_online(nid);
1374 if (new_node) {
9af3c2de 1375 pgdat = hotadd_new_pgdat(nid, start);
6ad696d2 1376 ret = -ENOMEM;
9af3c2de 1377 if (!pgdat)
41b9e2d7 1378 goto error;
9af3c2de
YG
1379 }
1380
bc02af93 1381 /* call arch's memory hotadd */
033fbae9 1382 ret = arch_add_memory(nid, start, size, false);
bc02af93 1383
9af3c2de
YG
1384 if (ret < 0)
1385 goto error;
1386
0fc44159 1387 /* we online node here. we can't roll back from here. */
9af3c2de
YG
1388 node_set_online(nid);
1389
a1e565aa 1390 if (new_node) {
0fc44159
YG
1391 ret = register_one_node(nid);
1392 /*
1393 * If sysfs file of new node can't create, cpu on the node
1394 * can't be hot-added. There is no rollback way now.
1395 * So, check by BUG_ON() to catch it reluctantly..
1396 */
1397 BUG_ON(ret);
1398 }
1399
d96ae530
AM
1400 /* create new memmap entry */
1401 firmware_map_add_hotplug(start, start + size, "System RAM");
1402
31bc3858
VK
1403 /* online pages if requested */
1404 if (online)
1405 walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
1406 NULL, online_memory_block);
1407
6ad696d2
AK
1408 goto out;
1409
9af3c2de
YG
1410error:
1411 /* rollback pgdat allocation and others */
1412 if (new_pgdat)
1413 rollback_node_hotadd(nid, pgdat);
7f36e3e5 1414 memblock_remove(start, size);
9af3c2de 1415
6ad696d2 1416out:
bfc8c901 1417 mem_hotplug_done();
bc02af93
YG
1418 return ret;
1419}
62cedb9f
DV
1420EXPORT_SYMBOL_GPL(add_memory_resource);
1421
1422int __ref add_memory(int nid, u64 start, u64 size)
1423{
1424 struct resource *res;
1425 int ret;
1426
1427 res = register_memory_resource(start, size);
6f754ba4
VK
1428 if (IS_ERR(res))
1429 return PTR_ERR(res);
62cedb9f 1430
31bc3858 1431 ret = add_memory_resource(nid, res, memhp_auto_online);
62cedb9f
DV
1432 if (ret < 0)
1433 release_memory_resource(res);
1434 return ret;
1435}
bc02af93 1436EXPORT_SYMBOL_GPL(add_memory);
0c0e6195
KH
1437
1438#ifdef CONFIG_MEMORY_HOTREMOVE
5c755e9f
BP
1439/*
1440 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
1441 * set and the size of the free page is given by page_order(). Using this,
1442 * the function determines if the pageblock contains only free pages.
1443 * Due to buddy contraints, a free page at least the size of a pageblock will
1444 * be located at the start of the pageblock
1445 */
1446static inline int pageblock_free(struct page *page)
1447{
1448 return PageBuddy(page) && page_order(page) >= pageblock_order;
1449}
1450
1451/* Return the start of the next active pageblock after a given page */
1452static struct page *next_active_pageblock(struct page *page)
1453{
5c755e9f
BP
1454 /* Ensure the starting page is pageblock-aligned */
1455 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
1456
5c755e9f 1457 /* If the entire pageblock is free, move to the end of free page */
0dcc48c1
KH
1458 if (pageblock_free(page)) {
1459 int order;
1460 /* be careful. we don't have locks, page_order can be changed.*/
1461 order = page_order(page);
1462 if ((order < MAX_ORDER) && (order >= pageblock_order))
1463 return page + (1 << order);
1464 }
5c755e9f 1465
0dcc48c1 1466 return page + pageblock_nr_pages;
5c755e9f
BP
1467}
1468
1469/* Checks if this range of memory is likely to be hot-removable. */
c98940f6 1470bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
5c755e9f 1471{
5c755e9f
BP
1472 struct page *page = pfn_to_page(start_pfn);
1473 struct page *end_page = page + nr_pages;
1474
1475 /* Check the starting page of each pageblock within the range */
1476 for (; page < end_page; page = next_active_pageblock(page)) {
49ac8255 1477 if (!is_pageblock_removable_nolock(page))
c98940f6 1478 return false;
49ac8255 1479 cond_resched();
5c755e9f
BP
1480 }
1481
1482 /* All pageblocks in the memory block are likely to be hot-removable */
c98940f6 1483 return true;
5c755e9f
BP
1484}
1485
0c0e6195 1486/*
deb88a2a 1487 * Confirm all pages in a range [start, end) belong to the same zone.
a96dfddb 1488 * When true, return its valid [start, end).
0c0e6195 1489 */
a96dfddb
TK
1490int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1491 unsigned long *valid_start, unsigned long *valid_end)
0c0e6195 1492{
5f0f2887 1493 unsigned long pfn, sec_end_pfn;
a96dfddb 1494 unsigned long start, end;
0c0e6195
KH
1495 struct zone *zone = NULL;
1496 struct page *page;
1497 int i;
deb88a2a 1498 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
0c0e6195 1499 pfn < end_pfn;
deb88a2a 1500 pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
5f0f2887
AB
1501 /* Make sure the memory section is present first */
1502 if (!present_section_nr(pfn_to_section_nr(pfn)))
0c0e6195 1503 continue;
5f0f2887
AB
1504 for (; pfn < sec_end_pfn && pfn < end_pfn;
1505 pfn += MAX_ORDER_NR_PAGES) {
1506 i = 0;
1507 /* This is just a CONFIG_HOLES_IN_ZONE check.*/
1508 while ((i < MAX_ORDER_NR_PAGES) &&
1509 !pfn_valid_within(pfn + i))
1510 i++;
1511 if (i == MAX_ORDER_NR_PAGES)
1512 continue;
1513 page = pfn_to_page(pfn + i);
1514 if (zone && page_zone(page) != zone)
1515 return 0;
a96dfddb
TK
1516 if (!zone)
1517 start = pfn + i;
5f0f2887 1518 zone = page_zone(page);
a96dfddb 1519 end = pfn + MAX_ORDER_NR_PAGES;
5f0f2887 1520 }
0c0e6195 1521 }
deb88a2a 1522
a96dfddb
TK
1523 if (zone) {
1524 *valid_start = start;
1525 *valid_end = end;
deb88a2a 1526 return 1;
a96dfddb 1527 } else {
deb88a2a 1528 return 0;
a96dfddb 1529 }
0c0e6195
KH
1530}
1531
1532/*
c8721bbb
NH
1533 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages
1534 * and hugepages). We scan pfn because it's much easier than scanning over
1535 * linked list. This function returns the pfn of the first found movable
1536 * page if it's found, otherwise 0.
0c0e6195 1537 */
c8721bbb 1538static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
0c0e6195
KH
1539{
1540 unsigned long pfn;
1541 struct page *page;
1542 for (pfn = start; pfn < end; pfn++) {
1543 if (pfn_valid(pfn)) {
1544 page = pfn_to_page(pfn);
1545 if (PageLRU(page))
1546 return pfn;
c8721bbb 1547 if (PageHuge(page)) {
7e1f049e 1548 if (page_huge_active(page))
c8721bbb
NH
1549 return pfn;
1550 else
1551 pfn = round_up(pfn + 1,
1552 1 << compound_order(page)) - 1;
1553 }
0c0e6195
KH
1554 }
1555 }
1556 return 0;
1557}
1558
394e31d2
XQ
1559static struct page *new_node_page(struct page *page, unsigned long private,
1560 int **result)
1561{
1562 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
1563 int nid = page_to_nid(page);
231e97e2
LZ
1564 nodemask_t nmask = node_states[N_MEMORY];
1565 struct page *new_page = NULL;
394e31d2
XQ
1566
1567 /*
1568 * TODO: allocate a destination hugepage from a nearest neighbor node,
1569 * accordance with memory policy of the user process if possible. For
1570 * now as a simple work-around, we use the next node for destination.
1571 */
1572 if (PageHuge(page))
1573 return alloc_huge_page_node(page_hstate(compound_head(page)),
1574 next_node_in(nid, nmask));
1575
231e97e2 1576 node_clear(nid, nmask);
9bb627be 1577
394e31d2
XQ
1578 if (PageHighMem(page)
1579 || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
1580 gfp_mask |= __GFP_HIGHMEM;
1581
231e97e2
LZ
1582 if (!nodes_empty(nmask))
1583 new_page = __alloc_pages_nodemask(gfp_mask, 0,
394e31d2
XQ
1584 node_zonelist(nid, gfp_mask), &nmask);
1585 if (!new_page)
1586 new_page = __alloc_pages(gfp_mask, 0,
1587 node_zonelist(nid, gfp_mask));
1588
1589 return new_page;
1590}
1591
0c0e6195
KH
1592#define NR_OFFLINE_AT_ONCE_PAGES (256)
1593static int
1594do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1595{
1596 unsigned long pfn;
1597 struct page *page;
1598 int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
1599 int not_managed = 0;
1600 int ret = 0;
1601 LIST_HEAD(source);
1602
1603 for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
1604 if (!pfn_valid(pfn))
1605 continue;
1606 page = pfn_to_page(pfn);
c8721bbb
NH
1607
1608 if (PageHuge(page)) {
1609 struct page *head = compound_head(page);
1610 pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
1611 if (compound_order(head) > PFN_SECTION_SHIFT) {
1612 ret = -EBUSY;
1613 break;
1614 }
1615 if (isolate_huge_page(page, &source))
1616 move_pages -= 1 << compound_order(head);
1617 continue;
1618 }
1619
700c2a46 1620 if (!get_page_unless_zero(page))
0c0e6195
KH
1621 continue;
1622 /*
1623 * We can skip free pages. And we can only deal with pages on
1624 * LRU.
1625 */
62695a84 1626 ret = isolate_lru_page(page);
0c0e6195 1627 if (!ret) { /* Success */
700c2a46 1628 put_page(page);
62695a84 1629 list_add_tail(&page->lru, &source);
0c0e6195 1630 move_pages--;
599d0c95 1631 inc_node_page_state(page, NR_ISOLATED_ANON +
6d9c285a
KM
1632 page_is_file_cache(page));
1633
0c0e6195 1634 } else {
0c0e6195 1635#ifdef CONFIG_DEBUG_VM
e33e33b4 1636 pr_alert("removing pfn %lx from LRU failed\n", pfn);
f0b791a3 1637 dump_page(page, "failed to remove from LRU");
0c0e6195 1638#endif
700c2a46 1639 put_page(page);
25985edc 1640 /* Because we don't have big zone->lock. we should
809c4449
BL
1641 check this again here. */
1642 if (page_count(page)) {
1643 not_managed++;
f3ab2636 1644 ret = -EBUSY;
809c4449
BL
1645 break;
1646 }
0c0e6195
KH
1647 }
1648 }
f3ab2636
BL
1649 if (!list_empty(&source)) {
1650 if (not_managed) {
c8721bbb 1651 putback_movable_pages(&source);
f3ab2636
BL
1652 goto out;
1653 }
74c08f98 1654
394e31d2
XQ
1655 /* Allocate a new page from the nearest neighbor node */
1656 ret = migrate_pages(&source, new_node_page, NULL, 0,
9c620e2b 1657 MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
f3ab2636 1658 if (ret)
c8721bbb 1659 putback_movable_pages(&source);
0c0e6195 1660 }
0c0e6195
KH
1661out:
1662 return ret;
1663}
1664
1665/*
1666 * remove from free_area[] and mark all as Reserved.
1667 */
1668static int
1669offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
1670 void *data)
1671{
1672 __offline_isolated_pages(start, start + nr_pages);
1673 return 0;
1674}
1675
1676static void
1677offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
1678{
908eedc6 1679 walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
0c0e6195
KH
1680 offline_isolated_pages_cb);
1681}
1682
1683/*
1684 * Check all pages in range, recoreded as memory resource, are isolated.
1685 */
1686static int
1687check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
1688 void *data)
1689{
1690 int ret;
1691 long offlined = *(long *)data;
b023f468 1692 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
0c0e6195
KH
1693 offlined = nr_pages;
1694 if (!ret)
1695 *(long *)data += offlined;
1696 return ret;
1697}
1698
1699static long
1700check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
1701{
1702 long offlined = 0;
1703 int ret;
1704
908eedc6 1705 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
0c0e6195
KH
1706 check_pages_isolated_cb);
1707 if (ret < 0)
1708 offlined = (long)ret;
1709 return offlined;
1710}
1711
09285af7 1712#ifdef CONFIG_MOVABLE_NODE
79a4dcef
TC
1713/*
1714 * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have
1715 * normal memory.
1716 */
09285af7
LJ
1717static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1718{
1719 return true;
1720}
79a4dcef 1721#else /* CONFIG_MOVABLE_NODE */
74d42d8f
LJ
1722/* ensure the node has NORMAL memory if it is still online */
1723static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1724{
1725 struct pglist_data *pgdat = zone->zone_pgdat;
1726 unsigned long present_pages = 0;
1727 enum zone_type zt;
1728
1729 for (zt = 0; zt <= ZONE_NORMAL; zt++)
1730 present_pages += pgdat->node_zones[zt].present_pages;
1731
1732 if (present_pages > nr_pages)
1733 return true;
1734
1735 present_pages = 0;
1736 for (; zt <= ZONE_MOVABLE; zt++)
1737 present_pages += pgdat->node_zones[zt].present_pages;
1738
1739 /*
1740 * we can't offline the last normal memory until all
1741 * higher memory is offlined.
1742 */
1743 return present_pages == 0;
1744}
79a4dcef 1745#endif /* CONFIG_MOVABLE_NODE */
74d42d8f 1746
c5320926
TC
1747static int __init cmdline_parse_movable_node(char *p)
1748{
1749#ifdef CONFIG_MOVABLE_NODE
55ac590c 1750 movable_node_enabled = true;
c5320926
TC
1751#else
1752 pr_warn("movable_node option not supported\n");
1753#endif
1754 return 0;
1755}
1756early_param("movable_node", cmdline_parse_movable_node);
1757
d9713679
LJ
1758/* check which state of node_states will be changed when offline memory */
1759static void node_states_check_changes_offline(unsigned long nr_pages,
1760 struct zone *zone, struct memory_notify *arg)
1761{
1762 struct pglist_data *pgdat = zone->zone_pgdat;
1763 unsigned long present_pages = 0;
1764 enum zone_type zt, zone_last = ZONE_NORMAL;
1765
1766 /*
6715ddf9
LJ
1767 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
1768 * contains nodes which have zones of 0...ZONE_NORMAL,
1769 * set zone_last to ZONE_NORMAL.
d9713679 1770 *
6715ddf9
LJ
1771 * If we don't have HIGHMEM nor movable node,
1772 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
1773 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
d9713679 1774 */
6715ddf9 1775 if (N_MEMORY == N_NORMAL_MEMORY)
d9713679
LJ
1776 zone_last = ZONE_MOVABLE;
1777
1778 /*
1779 * check whether node_states[N_NORMAL_MEMORY] will be changed.
1780 * If the memory to be offline is in a zone of 0...zone_last,
1781 * and it is the last present memory, 0...zone_last will
1782 * become empty after offline , thus we can determind we will
1783 * need to clear the node from node_states[N_NORMAL_MEMORY].
1784 */
1785 for (zt = 0; zt <= zone_last; zt++)
1786 present_pages += pgdat->node_zones[zt].present_pages;
1787 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1788 arg->status_change_nid_normal = zone_to_nid(zone);
1789 else
1790 arg->status_change_nid_normal = -1;
1791
6715ddf9
LJ
1792#ifdef CONFIG_HIGHMEM
1793 /*
1794 * If we have movable node, node_states[N_HIGH_MEMORY]
1795 * contains nodes which have zones of 0...ZONE_HIGHMEM,
1796 * set zone_last to ZONE_HIGHMEM.
1797 *
1798 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
1799 * contains nodes which have zones of 0...ZONE_MOVABLE,
1800 * set zone_last to ZONE_MOVABLE.
1801 */
1802 zone_last = ZONE_HIGHMEM;
1803 if (N_MEMORY == N_HIGH_MEMORY)
1804 zone_last = ZONE_MOVABLE;
1805
1806 for (; zt <= zone_last; zt++)
1807 present_pages += pgdat->node_zones[zt].present_pages;
1808 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1809 arg->status_change_nid_high = zone_to_nid(zone);
1810 else
1811 arg->status_change_nid_high = -1;
1812#else
1813 arg->status_change_nid_high = arg->status_change_nid_normal;
1814#endif
1815
d9713679
LJ
1816 /*
1817 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1818 */
1819 zone_last = ZONE_MOVABLE;
1820
1821 /*
1822 * check whether node_states[N_HIGH_MEMORY] will be changed
1823 * If we try to offline the last present @nr_pages from the node,
1824 * we can determind we will need to clear the node from
1825 * node_states[N_HIGH_MEMORY].
1826 */
1827 for (; zt <= zone_last; zt++)
1828 present_pages += pgdat->node_zones[zt].present_pages;
1829 if (nr_pages >= present_pages)
1830 arg->status_change_nid = zone_to_nid(zone);
1831 else
1832 arg->status_change_nid = -1;
1833}
1834
1835static void node_states_clear_node(int node, struct memory_notify *arg)
1836{
1837 if (arg->status_change_nid_normal >= 0)
1838 node_clear_state(node, N_NORMAL_MEMORY);
1839
6715ddf9
LJ
1840 if ((N_MEMORY != N_NORMAL_MEMORY) &&
1841 (arg->status_change_nid_high >= 0))
d9713679 1842 node_clear_state(node, N_HIGH_MEMORY);
6715ddf9
LJ
1843
1844 if ((N_MEMORY != N_HIGH_MEMORY) &&
1845 (arg->status_change_nid >= 0))
1846 node_clear_state(node, N_MEMORY);
d9713679
LJ
1847}
1848
a16cee10 1849static int __ref __offline_pages(unsigned long start_pfn,
0c0e6195
KH
1850 unsigned long end_pfn, unsigned long timeout)
1851{
1852 unsigned long pfn, nr_pages, expire;
1853 long offlined_pages;
7b78d335 1854 int ret, drain, retry_max, node;
d702909f 1855 unsigned long flags;
a96dfddb 1856 unsigned long valid_start, valid_end;
0c0e6195 1857 struct zone *zone;
7b78d335 1858 struct memory_notify arg;
0c0e6195 1859
0c0e6195
KH
1860 /* at least, alignment against pageblock is necessary */
1861 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
1862 return -EINVAL;
1863 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
1864 return -EINVAL;
1865 /* This makes hotplug much easier...and readable.
1866 we assume this for now. .*/
a96dfddb 1867 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
0c0e6195 1868 return -EINVAL;
7b78d335 1869
a96dfddb 1870 zone = page_zone(pfn_to_page(valid_start));
7b78d335
YG
1871 node = zone_to_nid(zone);
1872 nr_pages = end_pfn - start_pfn;
1873
74d42d8f 1874 if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages))
30467e0b 1875 return -EINVAL;
74d42d8f 1876
0c0e6195 1877 /* set above range as isolated */
b023f468
WC
1878 ret = start_isolate_page_range(start_pfn, end_pfn,
1879 MIGRATE_MOVABLE, true);
0c0e6195 1880 if (ret)
30467e0b 1881 return ret;
7b78d335
YG
1882
1883 arg.start_pfn = start_pfn;
1884 arg.nr_pages = nr_pages;
d9713679 1885 node_states_check_changes_offline(nr_pages, zone, &arg);
7b78d335
YG
1886
1887 ret = memory_notify(MEM_GOING_OFFLINE, &arg);
1888 ret = notifier_to_errno(ret);
1889 if (ret)
1890 goto failed_removal;
1891
0c0e6195
KH
1892 pfn = start_pfn;
1893 expire = jiffies + timeout;
1894 drain = 0;
1895 retry_max = 5;
1896repeat:
1897 /* start memory hot removal */
1898 ret = -EAGAIN;
1899 if (time_after(jiffies, expire))
1900 goto failed_removal;
1901 ret = -EINTR;
1902 if (signal_pending(current))
1903 goto failed_removal;
1904 ret = 0;
1905 if (drain) {
1906 lru_add_drain_all();
0c0e6195 1907 cond_resched();
c0554329 1908 drain_all_pages(zone);
0c0e6195
KH
1909 }
1910
c8721bbb
NH
1911 pfn = scan_movable_pages(start_pfn, end_pfn);
1912 if (pfn) { /* We have movable pages */
0c0e6195
KH
1913 ret = do_migrate_range(pfn, end_pfn);
1914 if (!ret) {
1915 drain = 1;
1916 goto repeat;
1917 } else {
1918 if (ret < 0)
1919 if (--retry_max == 0)
1920 goto failed_removal;
1921 yield();
1922 drain = 1;
1923 goto repeat;
1924 }
1925 }
b3834be5 1926 /* drain all zone's lru pagevec, this is asynchronous... */
0c0e6195 1927 lru_add_drain_all();
0c0e6195 1928 yield();
b3834be5 1929 /* drain pcp pages, this is synchronous. */
c0554329 1930 drain_all_pages(zone);
c8721bbb
NH
1931 /*
1932 * dissolve free hugepages in the memory block before doing offlining
1933 * actually in order to make hugetlbfs's object counting consistent.
1934 */
082d5b6b
GS
1935 ret = dissolve_free_huge_pages(start_pfn, end_pfn);
1936 if (ret)
1937 goto failed_removal;
0c0e6195
KH
1938 /* check again */
1939 offlined_pages = check_pages_isolated(start_pfn, end_pfn);
1940 if (offlined_pages < 0) {
1941 ret = -EBUSY;
1942 goto failed_removal;
1943 }
e33e33b4 1944 pr_info("Offlined Pages %ld\n", offlined_pages);
b3834be5 1945 /* Ok, all of our target is isolated.
0c0e6195
KH
1946 We cannot do rollback at this point. */
1947 offline_isolated_pages(start_pfn, end_pfn);
dbc0e4ce 1948 /* reset pagetype flags and makes migrate type to be MOVABLE */
0815f3d8 1949 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
0c0e6195 1950 /* removal success */
3dcc0571 1951 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
0c0e6195 1952 zone->present_pages -= offlined_pages;
d702909f
CS
1953
1954 pgdat_resize_lock(zone->zone_pgdat, &flags);
0c0e6195 1955 zone->zone_pgdat->node_present_pages -= offlined_pages;
d702909f 1956 pgdat_resize_unlock(zone->zone_pgdat, &flags);
7b78d335 1957
1b79acc9
KM
1958 init_per_zone_wmark_min();
1959
1e8537ba 1960 if (!populated_zone(zone)) {
340175b7 1961 zone_pcp_reset(zone);
1e8537ba
XQ
1962 mutex_lock(&zonelists_mutex);
1963 build_all_zonelists(NULL, NULL);
1964 mutex_unlock(&zonelists_mutex);
1965 } else
1966 zone_pcp_update(zone);
340175b7 1967
d9713679 1968 node_states_clear_node(node, &arg);
698b1b30 1969 if (arg.status_change_nid >= 0) {
8fe23e05 1970 kswapd_stop(node);
698b1b30
VB
1971 kcompactd_stop(node);
1972 }
bce7394a 1973
0c0e6195
KH
1974 vm_total_pages = nr_free_pagecache_pages();
1975 writeback_set_ratelimit();
7b78d335
YG
1976
1977 memory_notify(MEM_OFFLINE, &arg);
0c0e6195
KH
1978 return 0;
1979
1980failed_removal:
e33e33b4
CY
1981 pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n",
1982 (unsigned long long) start_pfn << PAGE_SHIFT,
1983 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
7b78d335 1984 memory_notify(MEM_CANCEL_OFFLINE, &arg);
0c0e6195 1985 /* pushback to free area */
0815f3d8 1986 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
0c0e6195
KH
1987 return ret;
1988}
71088785 1989
30467e0b 1990/* Must be protected by mem_hotplug_begin() */
a16cee10
WC
1991int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1992{
1993 return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
1994}
e2ff3940 1995#endif /* CONFIG_MEMORY_HOTREMOVE */
a16cee10 1996
bbc76be6
WC
1997/**
1998 * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
1999 * @start_pfn: start pfn of the memory range
e05c4bbf 2000 * @end_pfn: end pfn of the memory range
bbc76be6
WC
2001 * @arg: argument passed to func
2002 * @func: callback for each memory section walked
2003 *
2004 * This function walks through all present mem sections in range
2005 * [start_pfn, end_pfn) and call func on each mem section.
2006 *
2007 * Returns the return value of func.
2008 */
e2ff3940 2009int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
bbc76be6 2010 void *arg, int (*func)(struct memory_block *, void *))
71088785 2011{
e90bdb7f
WC
2012 struct memory_block *mem = NULL;
2013 struct mem_section *section;
e90bdb7f
WC
2014 unsigned long pfn, section_nr;
2015 int ret;
e90bdb7f
WC
2016
2017 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2018 section_nr = pfn_to_section_nr(pfn);
2019 if (!present_section_nr(section_nr))
2020 continue;
2021
2022 section = __nr_to_section(section_nr);
2023 /* same memblock? */
2024 if (mem)
2025 if ((section_nr >= mem->start_section_nr) &&
2026 (section_nr <= mem->end_section_nr))
2027 continue;
2028
2029 mem = find_memory_block_hinted(section, mem);
2030 if (!mem)
2031 continue;
2032
bbc76be6 2033 ret = func(mem, arg);
e90bdb7f 2034 if (ret) {
bbc76be6
WC
2035 kobject_put(&mem->dev.kobj);
2036 return ret;
e90bdb7f
WC
2037 }
2038 }
2039
2040 if (mem)
2041 kobject_put(&mem->dev.kobj);
2042
bbc76be6
WC
2043 return 0;
2044}
2045
e2ff3940 2046#ifdef CONFIG_MEMORY_HOTREMOVE
d6de9d53 2047static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
bbc76be6
WC
2048{
2049 int ret = !is_memblock_offlined(mem);
2050
349daa0f
RD
2051 if (unlikely(ret)) {
2052 phys_addr_t beginpa, endpa;
2053
2054 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
2055 endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
756a025f 2056 pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
349daa0f
RD
2057 &beginpa, &endpa);
2058 }
bbc76be6
WC
2059
2060 return ret;
2061}
2062
0f1cfe9d 2063static int check_cpu_on_node(pg_data_t *pgdat)
60a5a19e 2064{
60a5a19e
TC
2065 int cpu;
2066
2067 for_each_present_cpu(cpu) {
2068 if (cpu_to_node(cpu) == pgdat->node_id)
2069 /*
2070 * the cpu on this node isn't removed, and we can't
2071 * offline this node.
2072 */
2073 return -EBUSY;
2074 }
2075
2076 return 0;
2077}
2078
0f1cfe9d 2079static void unmap_cpu_on_node(pg_data_t *pgdat)
e13fe869
WC
2080{
2081#ifdef CONFIG_ACPI_NUMA
e13fe869
WC
2082 int cpu;
2083
2084 for_each_possible_cpu(cpu)
2085 if (cpu_to_node(cpu) == pgdat->node_id)
2086 numa_clear_node(cpu);
2087#endif
2088}
2089
0f1cfe9d 2090static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
e13fe869 2091{
0f1cfe9d 2092 int ret;
e13fe869 2093
0f1cfe9d 2094 ret = check_cpu_on_node(pgdat);
e13fe869
WC
2095 if (ret)
2096 return ret;
2097
2098 /*
2099 * the node will be offlined when we come here, so we can clear
2100 * the cpu_to_node() now.
2101 */
2102
0f1cfe9d 2103 unmap_cpu_on_node(pgdat);
e13fe869
WC
2104 return 0;
2105}
2106
0f1cfe9d
TK
2107/**
2108 * try_offline_node
2109 *
2110 * Offline a node if all memory sections and cpus of the node are removed.
2111 *
2112 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
2113 * and online/offline operations before this call.
2114 */
90b30cdc 2115void try_offline_node(int nid)
60a5a19e 2116{
d822b86a
WC
2117 pg_data_t *pgdat = NODE_DATA(nid);
2118 unsigned long start_pfn = pgdat->node_start_pfn;
2119 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
60a5a19e
TC
2120 unsigned long pfn;
2121
2122 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2123 unsigned long section_nr = pfn_to_section_nr(pfn);
2124
2125 if (!present_section_nr(section_nr))
2126 continue;
2127
2128 if (pfn_to_nid(pfn) != nid)
2129 continue;
2130
2131 /*
2132 * some memory sections of this node are not removed, and we
2133 * can't offline node now.
2134 */
2135 return;
2136 }
2137
0f1cfe9d 2138 if (check_and_unmap_cpu_on_node(pgdat))
60a5a19e
TC
2139 return;
2140
2141 /*
2142 * all memory/cpu of this node are removed, we can offline this
2143 * node now.
2144 */
2145 node_set_offline(nid);
2146 unregister_one_node(nid);
2147}
90b30cdc 2148EXPORT_SYMBOL(try_offline_node);
60a5a19e 2149
0f1cfe9d
TK
2150/**
2151 * remove_memory
2152 *
2153 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
2154 * and online/offline operations before this call, as required by
2155 * try_offline_node().
2156 */
242831eb 2157void __ref remove_memory(int nid, u64 start, u64 size)
bbc76be6 2158{
242831eb 2159 int ret;
993c1aad 2160
27356f54
TK
2161 BUG_ON(check_hotplug_memory_range(start, size));
2162
bfc8c901 2163 mem_hotplug_begin();
6677e3ea
YI
2164
2165 /*
242831eb
RW
2166 * All memory blocks must be offlined before removing memory. Check
2167 * whether all memory blocks in question are offline and trigger a BUG()
2168 * if this is not the case.
6677e3ea 2169 */
242831eb 2170 ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
d6de9d53 2171 check_memblock_offlined_cb);
bfc8c901 2172 if (ret)
242831eb 2173 BUG();
6677e3ea 2174
46c66c4b
YI
2175 /* remove memmap entry */
2176 firmware_map_remove(start, start + size, "System RAM");
f9126ab9
XQ
2177 memblock_free(start, size);
2178 memblock_remove(start, size);
46c66c4b 2179
24d335ca
WC
2180 arch_remove_memory(start, size);
2181
60a5a19e
TC
2182 try_offline_node(nid);
2183
bfc8c901 2184 mem_hotplug_done();
71088785 2185}
71088785 2186EXPORT_SYMBOL_GPL(remove_memory);
aba6efc4 2187#endif /* CONFIG_MEMORY_HOTREMOVE */